diff options
85 files changed, 1481 insertions, 300 deletions
diff --git a/.coveragerc b/.coveragerc index 00f46b61b..ad7893b91 100644 --- a/.coveragerc +++ b/.coveragerc @@ -14,7 +14,7 @@ omit = */test/* [report] -fail_under = 29 +fail_under = 28 [html] directory = cover diff --git a/README_CONTAINER_IMAGE.md b/README_CONTAINER_IMAGE.md index f62fc2ab9..35e057af3 100644 --- a/README_CONTAINER_IMAGE.md +++ b/README_CONTAINER_IMAGE.md @@ -2,7 +2,7 @@ The [Dockerfile](Dockerfile) in this repository uses the [playbook2image](https://github.com/aweiteka/playbook2image) source-to-image base image to containerize `openshift-ansible`. The resulting image can run any of the provided playbooks. -**Note**: at this time there are known issues that prevent to run this image for installation/upgrade purposes from within one of the hosts that is also an installation target at the same time: if the playbook you want to run attempts to manage the docker daemon and restart it (like install/upgrade playbooks do) this would kill the container itself during its operation. +**Note**: at this time there are known issues that prevent to run this image for installation/upgrade purposes (i.e. run one of the config/upgrade playbooks) from within one of the hosts that is also an installation target at the same time: if the playbook you want to run attempts to manage the docker daemon and restart it (like install/upgrade playbooks do) this would kill the container itself during its operation. ## Build @@ -11,7 +11,7 @@ To build a container image of `openshift-ansible`: 1. Using standalone **Docker**: cd openshift-ansible - docker build -t openshift-ansible . + docker build -t openshift/openshift-ansible . 1. Using an **OpenShift** build: @@ -20,15 +20,15 @@ To build a container image of `openshift-ansible`: ## Usage -The base image provides several options to control the behaviour of the containers. For more details on these options see the [playbook2image](https://github.com/aweiteka/playbook2image) documentation. +The `playbook2image` base image provides several options to control the behaviour of the containers. For more details on these options see the [playbook2image](https://github.com/aweiteka/playbook2image) documentation. At the very least, when running a container using an image built this way you must specify: -1. The **playbook** to run. This is set using the `PLAYBOOK_FILE` environment variable. 1. An **inventory** file. This can be mounted inside the container as a volume and specified with the `INVENTORY_FILE` environment variable. Alternatively you can serve the inventory file from a web server and use the `INVENTORY_URL` environment variable to fetch it. 1. **ssh keys** so that Ansible can reach your hosts. These should be mounted as a volume under `/opt/app-root/src/.ssh` +1. The **playbook** to run. This is set using the `PLAYBOOK_FILE` environment variable. If you don't specify a playbook the [`openshift_facts`](playbooks/byo/openshift_facts.yml) playbook will be run to collecting and show facts about your OpenShift environment. -Here is an example of how to run a containerized `openshift-ansible` playbook that will check the expiration dates of OpenShift's internal certificates using the [`openshift_certificate_expiry` role](../../roles/openshift_certificate_expiry). The inventory and ssh keys are mounted as volumes (the latter requires setting the uid in the container and SELinux label in the key file via `:Z` so they can be accessed) and the `PLAYBOOK_FILE` environment variable is set to point to an example certificate check playbook that is already part of the image: +Here is an example of how to run a containerized `openshift-ansible` playbook that will check the expiration dates of OpenShift's internal certificates using the [`openshift_certificate_expiry` role](roles/openshift_certificate_expiry). The inventory and ssh keys are mounted as volumes (the latter requires setting the uid in the container and SELinux label in the key file via `:Z` so they can be accessed) and the `PLAYBOOK_FILE` environment variable is set to point to an example certificate check playbook that is already part of the image: docker run -u `id -u` \ -v $HOME/.ssh/id_rsa:/opt/app-root/src/.ssh/id_rsa:Z \ @@ -36,6 +36,6 @@ Here is an example of how to run a containerized `openshift-ansible` playbook th -e INVENTORY_FILE=/tmp/inventory \ -e OPTS="-v" \ -e PLAYBOOK_FILE=playbooks/certificate_expiry/default.yaml \ - openshift-ansible + openshift/openshift-ansible -The [playbook2image examples](https://github.com/aweiteka/playbook2image/tree/master/examples) provide additional information on how to use a built image. +The [playbook2image examples](https://github.com/aweiteka/playbook2image/tree/master/examples) provide additional information on how to use an image built from it like this one. diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py index ed6923687..a619f9ccb 100644 --- a/filter_plugins/oo_filters.py +++ b/filter_plugins/oo_filters.py @@ -1,26 +1,32 @@ #!/usr/bin/python # -*- coding: utf-8 -*- # vim: expandtab:tabstop=4:shiftwidth=4 -# pylint: disable=no-name-in-module, import-error, wrong-import-order, ungrouped-imports """ Custom filters for use in openshift-ansible """ +import json import os import pdb -import pkg_resources -import re -import json -import yaml import random +import re -from ansible import errors from collections import Mapping -from distutils.util import strtobool -from distutils.version import LooseVersion +# pylint no-name-in-module and import-error disabled here because pylint +# fails to properly detect the packages when installed in a virtualenv +from distutils.util import strtobool # pylint:disable=no-name-in-module,import-error +from distutils.version import LooseVersion # pylint:disable=no-name-in-module,import-error from operator import itemgetter -from ansible.module_utils.six.moves.urllib.parse import urlparse + +import pkg_resources +import yaml + +from ansible import errors +# pylint no-name-in-module and import-error disabled here because pylint +# fails to properly detect the packages when installed in a virtualenv +from ansible.compat.six import string_types # pylint:disable=no-name-in-module,import-error +from ansible.compat.six.moves.urllib.parse import urlparse # pylint:disable=no-name-in-module,import-error +from ansible.module_utils._text import to_text from ansible.parsing.yaml.dumper import AnsibleDumper -from six import string_types HAS_OPENSSL = False try: @@ -29,15 +35,6 @@ try: except ImportError: pass -try: - # ansible-2.2 - # ansible.utils.unicode.to_unicode is deprecated in ansible-2.2, - # ansible.module_utils._text.to_text should be used instead. - from ansible.module_utils._text import to_text -except ImportError: - # ansible-2.1 - from ansible.utils.unicode import to_unicode as to_text - def oo_pdb(arg): """ This pops you into a pdb instance where arg is the data passed in @@ -117,8 +114,7 @@ def oo_merge_hostvars(hostvars, variables, inventory_hostname): raise errors.AnsibleFilterError("|failed expects variables is a dictionary") if not isinstance(inventory_hostname, string_types): raise errors.AnsibleFilterError("|failed expects inventory_hostname is a string") - # pylint: disable=no-member - ansible_version = pkg_resources.get_distribution("ansible").version + ansible_version = pkg_resources.get_distribution("ansible").version # pylint: disable=maybe-no-member merged_hostvars = {} if LooseVersion(ansible_version) >= LooseVersion('2.0.0'): merged_hostvars = oo_merge_dicts( diff --git a/hack/build-images.sh b/hack/build-images.sh new file mode 100755 index 000000000..f6210e239 --- /dev/null +++ b/hack/build-images.sh @@ -0,0 +1,87 @@ +#!/bin/bash + +set -o errexit +set -o nounset +set -o pipefail + +STARTTIME=$(date +%s) +source_root=$(dirname "${0}")/.. + +prefix="openshift/openshift-ansible" +version="latest" +verbose=false +options="" +help=false + +for args in "$@" +do + case $args in + --prefix=*) + prefix="${args#*=}" + ;; + --version=*) + version="${args#*=}" + ;; + --no-cache) + options="${options} --no-cache" + ;; + --verbose) + verbose=true + ;; + --help) + help=true + ;; + esac +done + +# allow ENV to take precedent over switches +prefix="${PREFIX:-$prefix}" +version="${OS_TAG:-$version}" + +if [ "$help" = true ]; then + echo "Builds the docker images for openshift-ansible" + echo + echo "Options: " + echo " --prefix=PREFIX" + echo " The prefix to use for the image names." + echo " default: openshift/openshift-ansible" + echo + echo " --version=VERSION" + echo " The version used to tag the image" + echo " default: latest" + echo + echo " --no-cache" + echo " If set will perform the build without a cache." + echo + echo " --verbose" + echo " Enables printing of the commands as they run." + echo + echo " --help" + echo " Prints this help message" + echo + exit 0 +fi + +if [ "$verbose" = true ]; then + set -x +fi + +BUILD_STARTTIME=$(date +%s) +comp_path=$source_root/ +docker_tag=${prefix}:${version} +echo +echo +echo "--- Building component '$comp_path' with docker tag '$docker_tag' ---" +docker build ${options} -t $docker_tag $comp_path +BUILD_ENDTIME=$(date +%s); echo "--- $docker_tag took $(($BUILD_ENDTIME - $BUILD_STARTTIME)) seconds ---" +echo +echo + +echo +echo +echo "++ Active images" +docker images | grep ${prefix} | grep ${version} | sort +echo + + +ret=$?; ENDTIME=$(date +%s); echo "$0 took $(($ENDTIME - $STARTTIME)) seconds"; exit "$ret" diff --git a/hack/push-release.sh b/hack/push-release.sh new file mode 100755 index 000000000..8639143af --- /dev/null +++ b/hack/push-release.sh @@ -0,0 +1,55 @@ +#!/bin/bash + +# This script pushes all of the built images to a registry. +# +# Set OS_PUSH_BASE_REGISTRY to prefix the destination images +# + +set -o errexit +set -o nounset +set -o pipefail + +STARTTIME=$(date +%s) +OS_ROOT=$(dirname "${BASH_SOURCE}")/.. + +PREFIX="${PREFIX:-openshift/openshift-ansible}" + +# Go to the top of the tree. +cd "${OS_ROOT}" + +# Allow a release to be repushed with a tag +tag="${OS_PUSH_TAG:-}" +if [[ -n "${tag}" ]]; then + tag=":${tag}" +else + tag=":latest" +fi + +# Source tag +source_tag="${OS_TAG:-}" +if [[ -z "${source_tag}" ]]; then + source_tag="latest" +fi + +images=( + ${PREFIX} +) + +PUSH_OPTS="" +if docker push --help | grep -q force; then + PUSH_OPTS="--force" +fi + +if [[ "${OS_PUSH_BASE_REGISTRY-}" != "" || "${tag}" != "" ]]; then + set -e + for image in "${images[@]}"; do + docker tag "${image}:${source_tag}" "${OS_PUSH_BASE_REGISTRY-}${image}${tag}" + done + set +e +fi + +for image in "${images[@]}"; do + docker push ${PUSH_OPTS} "${OS_PUSH_BASE_REGISTRY-}${image}${tag}" +done + +ret=$?; ENDTIME=$(date +%s); echo "$0 took $(($ENDTIME - $STARTTIME)) seconds"; exit "$ret" diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example index c2ca6743d..bb9f4706a 100644 --- a/inventory/byo/hosts.origin.example +++ b/inventory/byo/hosts.origin.example @@ -89,6 +89,8 @@ openshift_release=v1.4 # Skip upgrading Docker during an OpenShift upgrade, leaves the current Docker version alone. # docker_upgrade=False +# Specify exact version of etcd to configure or upgrade to. +# etcd_version="3.1.0" # Upgrade Hooks # @@ -640,7 +642,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # Configure nodeIP in the node config # This is needed in cases where node traffic is desired to go over an # interface other than the default network interface. -#openshift_node_set_node_ip=True +#openshift_set_node_ip=True # Force setting of system hostname when configuring OpenShift # This works around issues related to installations that do not have valid dns diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example index a6d613766..12a1b3991 100644 --- a/inventory/byo/hosts.ose.example +++ b/inventory/byo/hosts.ose.example @@ -89,6 +89,8 @@ openshift_release=v3.4 # Skip upgrading Docker during an OpenShift upgrade, leaves the current Docker version alone. # docker_upgrade=False +# Specify exact version of etcd to configure or upgrade to. +# etcd_version="3.1.0" # Upgrade Hooks # @@ -641,7 +643,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # Configure nodeIP in the node config # This is needed in cases where node traffic is desired to go over an # interface other than the default network interface. -#openshift_node_set_node_ip=True +#openshift_set_node_ip=True # Force setting of system hostname when configuring OpenShift # This works around issues related to installations that do not have valid dns diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml index e4db65b02..86f5a36ca 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml @@ -50,6 +50,8 @@ tags: - pre_upgrade +# Note: During upgrade the openshift excluder is not unexcluded inside the initialize_openshift_version.yml play. +# So it is necassary to run the play after running disable_excluder.yml. - include: ../../../../common/openshift-cluster/initialize_openshift_version.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml index 82f711f40..ff4c4b0d7 100644 --- a/playbooks/common/openshift-cluster/config.yml +++ b/playbooks/common/openshift-cluster/config.yml @@ -60,3 +60,7 @@ - include: openshift_hosted.yml tags: - hosted + +- include: reset_excluder.yml + tags: + - always diff --git a/playbooks/common/openshift-cluster/initialize_openshift_version.yml b/playbooks/common/openshift-cluster/initialize_openshift_version.yml index 7f37c606f..1f74e929f 100644 --- a/playbooks/common/openshift-cluster/initialize_openshift_version.yml +++ b/playbooks/common/openshift-cluster/initialize_openshift_version.yml @@ -18,12 +18,17 @@ msg: Incompatible versions of yum and subscription-manager found. You may need to update yum and yum-utils. when: "not openshift.common.is_atomic | bool and 'Plugin \"search-disabled-repos\" requires API 2.7. Supported API is 2.6.' in yum_ver_test.stdout" +# TODO(jchaloup): find a different way how to make repoquery --qf '%version` atomic-openshift work without disabling the excluders - include: disable_excluder.yml vars: # the excluders needs to be disabled no matter what status says with_status_check: false + # Only openshift excluder needs to be temporarily disabled + # So ignore the docker one + enable_docker_excluder: false tags: - always + when: openshift_upgrade_target is not defined - name: Determine openshift_version to configure on first master hosts: oo_first_master @@ -39,3 +44,13 @@ openshift_version: "{{ hostvars[groups.oo_first_master.0].openshift_version }}" roles: - openshift_version + + # Re-enable excluders if they are meant to be enabled (and only during installation, upgrade disables the excluders before this play) +- include: reset_excluder.yml + vars: + # Only openshift excluder needs to be re-enabled + # So ignore the docker one + enable_docker_excluder: false + tags: + - always + when: openshift_upgrade_target is not defined diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml index 06cda36a5..5db71b857 100644 --- a/playbooks/common/openshift-cluster/openshift_hosted.yml +++ b/playbooks/common/openshift-cluster/openshift_hosted.yml @@ -53,6 +53,8 @@ pre_tasks: - set_fact: openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}" + - set_fact: + openshift_metrics_hawkular_hostname: "{{ g_metrics_hostname | default('hawkular-metrics.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}" tasks: - block: @@ -60,3 +62,9 @@ name: openshift_logging tasks_from: update_master_config when: openshift_hosted_logging_deploy | default(false) | bool + + - block: + - include_role: + name: openshift_metrics + tasks_from: update_master_config + when: openshift_hosted_metrics_deploy | default(false) | bool diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml index 2af699209..cbb4a2434 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml @@ -31,7 +31,7 @@ - name: Generate new etcd CA hosts: oo_first_etcd roles: - - role: etcd_ca + - role: openshift_etcd_ca etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}" etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}" diff --git a/playbooks/common/openshift-cluster/upgrades/pre/validate_excluder.yml b/playbooks/common/openshift-cluster/upgrades/pre/validate_excluder.yml index 5078638b7..38d1cd0f8 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/validate_excluder.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/validate_excluder.yml @@ -3,20 +3,23 @@ # - repoquery_cmd # - excluder # - openshift_upgrade_target -- name: Get available excluder version - command: > - {{ repoquery_cmd }} --qf '%{version}' "{{ excluder }}" - register: excluder_version - failed_when: false - changed_when: false +- block: + - name: Get available excluder version + command: > + {{ repoquery_cmd }} --qf '%{version}' "{{ excluder }}" + register: excluder_version + failed_when: false + changed_when: false -- name: Docker excluder version detected - debug: - msg: "{{ excluder }}: {{ excluder_version.stdout }}" + - name: Docker excluder version detected + debug: + msg: "{{ excluder }}: {{ excluder_version.stdout }}" -- name: Check the available {{ excluder }} version is at most of the upgrade target version - fail: - msg: "Available {{ excluder }} version {{ excluder_version.stdout }} is higher than the upgrade target version {{ openshift_upgrade_target }}" - when: + - name: Check the available {{ excluder }} version is at most of the upgrade target version + fail: + msg: "Available {{ excluder }} version {{ excluder_version.stdout }} is higher than the upgrade target version {{ openshift_upgrade_target }}" + when: - "{{ excluder_version.stdout != '' }}" - "{{ excluder_version.stdout.split('.')[0:2] | join('.') | version_compare(openshift_upgrade_target, '>', strict=True) }}" + when: + - not openshift.common.is_atomic | bool diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml index 9c126033c..ae63c9ca9 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml @@ -35,7 +35,7 @@ kind: petsets register: l_do_petsets_exist - - name: FAIL ON Resource migration 'PetSets' unsupported + - name: Fail on unsupported resource migration 'PetSets' fail: msg: > PetSet objects were detected in your cluster. These are an @@ -59,9 +59,9 @@ migrating to StatefulSets, run this command as a user with cluster-admin privileges: - $ oc get petsets --all-namespaces -o yaml | oc delete -f - --cascale=false + $ oc get petsets --all-namespaces -o yaml | oc delete -f - --cascade=false when: # Search did not fail, valid resource type found - - l_do_petsets_exist.results.returncode == "0" + - l_do_petsets_exist.results.returncode == 0 # Items do exist in the search results - l_do_petsets_exist.results.results.0['items'] | length > 0 diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index 7a334e771..68b9db03a 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -127,6 +127,8 @@ etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}" etcd_cert_config_dir: "{{ openshift.common.config_base }}/master" etcd_cert_prefix: "master.etcd-" + - role: nuage_master + when: openshift.common.use_nuage | bool post_tasks: - name: Create group for deployment type diff --git a/roles/contiv/defaults/main.yml b/roles/contiv/defaults/main.yml index c2b72339c..1ccae61f2 100644 --- a/roles/contiv/defaults/main.yml +++ b/roles/contiv/defaults/main.yml @@ -2,11 +2,18 @@ # The version of Contiv binaries to use contiv_version: 1.0.0-beta.3-02-21-2017.20-52-42.UTC +# The version of cni binaries +cni_version: v0.4.0 + contiv_default_subnet: "20.1.1.1/24" contiv_default_gw: "20.1.1.254" # TCP port that Netmaster listens for network connections netmaster_port: 9999 +# Default for contiv_role +contiv_role: netmaster + + # TCP port that Netplugin listens for network connections netplugin_port: 6640 contiv_rpc_port1: 9001 @@ -33,6 +40,14 @@ bin_dir: /usr/bin # Path to the contivk8s cni binary cni_bin_dir: /opt/cni/bin +# Path to cni archive download directory +cni_download_dir: /tmp + +# URL for cni binaries +cni_bin_url_base: "https://github.com/containernetworking/cni/releases/download/" +cni_bin_url: "{{ cni_bin_url_base }}/{{ cni_version }}/cni-{{ cni_version }}.tbz2" + + # Contiv config directory contiv_config_dir: /opt/contiv/config diff --git a/roles/contiv/files/loopback b/roles/contiv/files/loopback Binary files differdeleted file mode 100644 index f02b0b1fb..000000000 --- a/roles/contiv/files/loopback +++ /dev/null diff --git a/roles/contiv/tasks/download_bins.yml b/roles/contiv/tasks/download_bins.yml index 28ed50fae..319fce46c 100644 --- a/roles/contiv/tasks/download_bins.yml +++ b/roles/contiv/tasks/download_bins.yml @@ -25,3 +25,22 @@ src: "{{ contiv_current_release_directory }}/netplugin-{{ contiv_version }}.tar.bz2" dest: "{{ contiv_current_release_directory }}" copy: no + +- name: Download Bins | Download cni tar file + get_url: + url: "{{ cni_bin_url }}" + dest: "{{ cni_download_dir }}" + mode: 0755 + validate_certs: False + environment: + http_proxy: "{{ http_proxy|default('') }}" + https_proxy: "{{ https_proxy|default('') }}" + no_proxy: "{{ no_proxy|default('') }}" + register: download_file + +- name: Download Bins | Extract cni tar file + unarchive: + src: "{{ download_file.dest }}" + dest: "{{ cni_download_dir }}" + copy: no + when: download_file.changed diff --git a/roles/contiv/tasks/netplugin.yml b/roles/contiv/tasks/netplugin.yml index ec6c72fe9..97b9762df 100644 --- a/roles/contiv/tasks/netplugin.yml +++ b/roles/contiv/tasks/netplugin.yml @@ -43,8 +43,9 @@ - name: Netplugin | Copy CNI loopback bin copy: - src: loopback + src: "{{ cni_download_dir }}/loopback" dest: "{{ cni_bin_dir }}/loopback" + remote_src: True mode: 0755 - name: Netplugin | Ensure kube_plugin_dir and cni/net.d directories exist diff --git a/roles/etcd/tasks/etcdctl.yml b/roles/etcd/tasks/etcdctl.yml index bb6fabf64..649ad23c1 100644 --- a/roles/etcd/tasks/etcdctl.yml +++ b/roles/etcd/tasks/etcdctl.yml @@ -1,6 +1,6 @@ --- - name: Install etcd for etcdctl - package: name=etcd state=present + package: name=etcd{{ '-' + etcd_version if etcd_version is defined else '' }} state=present when: not openshift.common.is_atomic | bool - name: Configure etcd profile.d alises diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml index b4ffc99e3..c09da3b61 100644 --- a/roles/etcd/tasks/main.yml +++ b/roles/etcd/tasks/main.yml @@ -7,7 +7,7 @@ etcd_ip: "{{ etcd_ip }}" - name: Install etcd - package: name=etcd state=present + package: name=etcd{{ '-' + etcd_version if etcd_version is defined else '' }} state=present when: not etcd_is_containerized | bool - name: Pull etcd container diff --git a/roles/etcd_server_certificates/meta/main.yml b/roles/etcd_server_certificates/meta/main.yml index b453f2bd8..98c913dba 100644 --- a/roles/etcd_server_certificates/meta/main.yml +++ b/roles/etcd_server_certificates/meta/main.yml @@ -13,4 +13,4 @@ galaxy_info: - cloud - system dependencies: -- role: etcd_ca +- role: openshift_etcd_ca diff --git a/roles/etcd_server_certificates/tasks/main.yml b/roles/etcd_server_certificates/tasks/main.yml index 242c1e997..4ae9b79c4 100644 --- a/roles/etcd_server_certificates/tasks/main.yml +++ b/roles/etcd_server_certificates/tasks/main.yml @@ -1,6 +1,6 @@ --- - name: Install etcd - package: name=etcd state=present + package: name=etcd{{ '-' + etcd_version if etcd_version is defined else '' }} state=present when: not etcd_is_containerized | bool - name: Check status of etcd certificates diff --git a/roles/lib_openshift/library/oadm_manage_node.py b/roles/lib_openshift/library/oadm_manage_node.py index 8c0a29ac7..8bb0538c0 100644 --- a/roles/lib_openshift/library/oadm_manage_node.py +++ b/roles/lib_openshift/library/oadm_manage_node.py @@ -1358,10 +1358,11 @@ class OpenShiftCLIConfig(object): def stringify(self): ''' return the options hash as cli params in a string ''' rval = [] - for key, data in self.config_options.items(): + for key in sorted(self.config_options.keys()): + data = self.config_options[key] if data['include'] \ and (data['value'] or isinstance(data['value'], int)): - rval.append('--%s=%s' % (key.replace('_', '-'), data['value'])) + rval.append('--{}={}'.format(key.replace('_', '-'), data['value'])) return rval diff --git a/roles/lib_openshift/library/oc_adm_ca_server_cert.py b/roles/lib_openshift/library/oc_adm_ca_server_cert.py index bbcd9d0c5..4ecfd2bff 100644 --- a/roles/lib_openshift/library/oc_adm_ca_server_cert.py +++ b/roles/lib_openshift/library/oc_adm_ca_server_cert.py @@ -1366,10 +1366,11 @@ class OpenShiftCLIConfig(object): def stringify(self): ''' return the options hash as cli params in a string ''' rval = [] - for key, data in self.config_options.items(): + for key in sorted(self.config_options.keys()): + data = self.config_options[key] if data['include'] \ and (data['value'] or isinstance(data['value'], int)): - rval.append('--%s=%s' % (key.replace('_', '-'), data['value'])) + rval.append('--{}={}'.format(key.replace('_', '-'), data['value'])) return rval diff --git a/roles/lib_openshift/library/oc_adm_policy_group.py b/roles/lib_openshift/library/oc_adm_policy_group.py index 0ab1c8d49..49ff22584 100644 --- a/roles/lib_openshift/library/oc_adm_policy_group.py +++ b/roles/lib_openshift/library/oc_adm_policy_group.py @@ -1344,10 +1344,11 @@ class OpenShiftCLIConfig(object): def stringify(self): ''' return the options hash as cli params in a string ''' rval = [] - for key, data in self.config_options.items(): + for key in sorted(self.config_options.keys()): + data = self.config_options[key] if data['include'] \ and (data['value'] or isinstance(data['value'], int)): - rval.append('--%s=%s' % (key.replace('_', '-'), data['value'])) + rval.append('--{}={}'.format(key.replace('_', '-'), data['value'])) return rval @@ -1907,6 +1908,28 @@ class PolicyGroup(OpenShiftCLI): self.verbose = verbose self._rolebinding = None self._scc = None + self._cluster_policy_bindings = None + self._policy_bindings = None + + @property + def policybindings(self): + if self._policy_bindings is None: + results = self._get('clusterpolicybindings', None) + if results['returncode'] != 0: + raise OpenShiftCLIError('Could not retrieve policybindings') + self._policy_bindings = results['results'][0]['items'][0] + + return self._policy_bindings + + @property + def clusterpolicybindings(self): + if self._cluster_policy_bindings is None: + results = self._get('clusterpolicybindings', None) + if results['returncode'] != 0: + raise OpenShiftCLIError('Could not retrieve clusterpolicybindings') + self._cluster_policy_bindings = results['results'][0]['items'][0] + + return self._cluster_policy_bindings @property def role_binding(self): @@ -1947,18 +1970,24 @@ class PolicyGroup(OpenShiftCLI): def exists_role_binding(self): ''' return whether role_binding exists ''' - results = self.get() - if results['returncode'] == 0: - self.role_binding = RoleBinding(results['results'][0]) - if self.role_binding.find_group_name(self.config.config_options['group']['value']) != None: - return True + bindings = None + if self.config.config_options['resource_kind']['value'] == 'cluster-role': + bindings = self.clusterpolicybindings + else: + bindings = self.policybindings + if bindings is None: return False - elif self.config.config_options['name']['value'] in results['stderr'] and '" not found' in results['stderr']: - return False + for binding in bindings['roleBindings']: + _rb = binding['roleBinding'] + if _rb['roleRef']['name'] == self.config.config_options['name']['value'] and \ + _rb['groupNames'] is not None and \ + self.config.config_options['group']['value'] in _rb['groupNames']: + self.role_binding = binding + return True - return results + return False def exists_scc(self): ''' return whether scc exists ''' diff --git a/roles/lib_openshift/library/oc_adm_policy_user.py b/roles/lib_openshift/library/oc_adm_policy_user.py index 91bd85122..bed05044c 100644 --- a/roles/lib_openshift/library/oc_adm_policy_user.py +++ b/roles/lib_openshift/library/oc_adm_policy_user.py @@ -1344,10 +1344,11 @@ class OpenShiftCLIConfig(object): def stringify(self): ''' return the options hash as cli params in a string ''' rval = [] - for key, data in self.config_options.items(): + for key in sorted(self.config_options.keys()): + data = self.config_options[key] if data['include'] \ and (data['value'] or isinstance(data['value'], int)): - rval.append('--%s=%s' % (key.replace('_', '-'), data['value'])) + rval.append('--{}={}'.format(key.replace('_', '-'), data['value'])) return rval @@ -1906,6 +1907,28 @@ class PolicyUser(OpenShiftCLI): self.verbose = verbose self._rolebinding = None self._scc = None + self._cluster_policy_bindings = None + self._policy_bindings = None + + @property + def policybindings(self): + if self._policy_bindings is None: + results = self._get('clusterpolicybindings', None) + if results['returncode'] != 0: + raise OpenShiftCLIError('Could not retrieve policybindings') + self._policy_bindings = results['results'][0]['items'][0] + + return self._policy_bindings + + @property + def clusterpolicybindings(self): + if self._cluster_policy_bindings is None: + results = self._get('clusterpolicybindings', None) + if results['returncode'] != 0: + raise OpenShiftCLIError('Could not retrieve clusterpolicybindings') + self._cluster_policy_bindings = results['results'][0]['items'][0] + + return self._cluster_policy_bindings @property def role_binding(self): @@ -1928,36 +1951,37 @@ class PolicyUser(OpenShiftCLI): self._scc = scc def get(self): - '''fetch the desired kind''' + '''fetch the desired kind + + This is only used for scc objects. + The {cluster}rolebindings happen in exists. + ''' resource_name = self.config.config_options['name']['value'] if resource_name == 'cluster-reader': resource_name += 's' - # oc adm policy add-... creates policy bindings with the name - # "[resource_name]-binding", however some bindings in the system - # simply use "[resource_name]". So try both. - - results = self._get(self.config.kind, resource_name) - if results['returncode'] == 0: - return results - - # Now try -binding naming convention - return self._get(self.config.kind, resource_name + "-binding") + return self._get(self.config.kind, resource_name) def exists_role_binding(self): ''' return whether role_binding exists ''' - results = self.get() - if results['returncode'] == 0: - self.role_binding = RoleBinding(results['results'][0]) - if self.role_binding.find_user_name(self.config.config_options['user']['value']) != None: - return True + bindings = None + if self.config.config_options['resource_kind']['value'] == 'cluster-role': + bindings = self.clusterpolicybindings + else: + bindings = self.policybindings + if bindings is None: return False - elif self.config.config_options['name']['value'] in results['stderr'] and '" not found' in results['stderr']: - return False + for binding in bindings['roleBindings']: + _rb = binding['roleBinding'] + if _rb['roleRef']['name'] == self.config.config_options['name']['value'] and \ + _rb['userNames'] is not None and \ + self.config.config_options['user']['value'] in _rb['userNames']: + self.role_binding = binding + return True - return results + return False def exists_scc(self): ''' return whether scc exists ''' diff --git a/roles/lib_openshift/library/oc_adm_registry.py b/roles/lib_openshift/library/oc_adm_registry.py index dcfc326a9..c398c5551 100644 --- a/roles/lib_openshift/library/oc_adm_registry.py +++ b/roles/lib_openshift/library/oc_adm_registry.py @@ -1462,10 +1462,11 @@ class OpenShiftCLIConfig(object): def stringify(self): ''' return the options hash as cli params in a string ''' rval = [] - for key, data in self.config_options.items(): + for key in sorted(self.config_options.keys()): + data = self.config_options[key] if data['include'] \ and (data['value'] or isinstance(data['value'], int)): - rval.append('--%s=%s' % (key.replace('_', '-'), data['value'])) + rval.append('--{}={}'.format(key.replace('_', '-'), data['value'])) return rval @@ -2266,7 +2267,6 @@ class Registry(OpenShiftCLI): def exists(self): '''does the object exist?''' - self.get() if self.deploymentconfig and self.service: return True @@ -2293,7 +2293,7 @@ class Registry(OpenShiftCLI): ''' prepare a registry for instantiation ''' options = self.config.to_option_list() - cmd = ['registry', '-n', self.config.namespace] + cmd = ['registry'] cmd.extend(options) cmd.extend(['--dry-run=True', '-o', 'json']) @@ -2327,7 +2327,8 @@ class Registry(OpenShiftCLI): service.put('spec.portalIP', self.portal_ip) # the dry-run doesn't apply the selector correctly - service.put('spec.selector', self.service.get_selector()) + if self.service: + service.put('spec.selector', self.service.get_selector()) # need to create the service and the deploymentconfig service_file = Utils.create_tmp_file_from_contents('service', service.yaml_dict) diff --git a/roles/lib_openshift/library/oc_adm_router.py b/roles/lib_openshift/library/oc_adm_router.py index 68b797577..ab06a5141 100644 --- a/roles/lib_openshift/library/oc_adm_router.py +++ b/roles/lib_openshift/library/oc_adm_router.py @@ -1487,10 +1487,11 @@ class OpenShiftCLIConfig(object): def stringify(self): ''' return the options hash as cli params in a string ''' rval = [] - for key, data in self.config_options.items(): + for key in sorted(self.config_options.keys()): + data = self.config_options[key] if data['include'] \ and (data['value'] or isinstance(data['value'], int)): - rval.append('--%s=%s' % (key.replace('_', '-'), data['value'])) + rval.append('--{}={}'.format(key.replace('_', '-'), data['value'])) return rval @@ -2729,7 +2730,7 @@ class Router(OpenShiftCLI): options = self.config.to_option_list() - cmd = ['router', self.config.name, '-n', self.config.namespace] + cmd = ['router', self.config.name] cmd.extend(options) cmd.extend(['--dry-run=True', '-o', 'json']) diff --git a/roles/lib_openshift/library/oc_edit.py b/roles/lib_openshift/library/oc_edit.py index 0347644eb..7a7eaf40a 100644 --- a/roles/lib_openshift/library/oc_edit.py +++ b/roles/lib_openshift/library/oc_edit.py @@ -1386,10 +1386,11 @@ class OpenShiftCLIConfig(object): def stringify(self): ''' return the options hash as cli params in a string ''' rval = [] - for key, data in self.config_options.items(): + for key in sorted(self.config_options.keys()): + data = self.config_options[key] if data['include'] \ and (data['value'] or isinstance(data['value'], int)): - rval.append('--%s=%s' % (key.replace('_', '-'), data['value'])) + rval.append('--{}={}'.format(key.replace('_', '-'), data['value'])) return rval diff --git a/roles/lib_openshift/library/oc_env.py b/roles/lib_openshift/library/oc_env.py index e343c70df..a1994b0f1 100644 --- a/roles/lib_openshift/library/oc_env.py +++ b/roles/lib_openshift/library/oc_env.py @@ -1353,10 +1353,11 @@ class OpenShiftCLIConfig(object): def stringify(self): ''' return the options hash as cli params in a string ''' rval = [] - for key, data in self.config_options.items(): + for key in sorted(self.config_options.keys()): + data = self.config_options[key] if data['include'] \ and (data['value'] or isinstance(data['value'], int)): - rval.append('--%s=%s' % (key.replace('_', '-'), data['value'])) + rval.append('--{}={}'.format(key.replace('_', '-'), data['value'])) return rval diff --git a/roles/lib_openshift/library/oc_label.py b/roles/lib_openshift/library/oc_label.py index c4ed42bbe..109a78184 100644 --- a/roles/lib_openshift/library/oc_label.py +++ b/roles/lib_openshift/library/oc_label.py @@ -1362,10 +1362,11 @@ class OpenShiftCLIConfig(object): def stringify(self): ''' return the options hash as cli params in a string ''' rval = [] - for key, data in self.config_options.items(): + for key in sorted(self.config_options.keys()): + data = self.config_options[key] if data['include'] \ and (data['value'] or isinstance(data['value'], int)): - rval.append('--%s=%s' % (key.replace('_', '-'), data['value'])) + rval.append('--{}={}'.format(key.replace('_', '-'), data['value'])) return rval diff --git a/roles/lib_openshift/library/oc_obj.py b/roles/lib_openshift/library/oc_obj.py index 863443517..bd6e77c2a 100644 --- a/roles/lib_openshift/library/oc_obj.py +++ b/roles/lib_openshift/library/oc_obj.py @@ -1365,10 +1365,11 @@ class OpenShiftCLIConfig(object): def stringify(self): ''' return the options hash as cli params in a string ''' rval = [] - for key, data in self.config_options.items(): + for key in sorted(self.config_options.keys()): + data = self.config_options[key] if data['include'] \ and (data['value'] or isinstance(data['value'], int)): - rval.append('--%s=%s' % (key.replace('_', '-'), data['value'])) + rval.append('--{}={}'.format(key.replace('_', '-'), data['value'])) return rval diff --git a/roles/lib_openshift/library/oc_objectvalidator.py b/roles/lib_openshift/library/oc_objectvalidator.py index 5a966fa93..1d0e4c876 100644 --- a/roles/lib_openshift/library/oc_objectvalidator.py +++ b/roles/lib_openshift/library/oc_objectvalidator.py @@ -1297,10 +1297,11 @@ class OpenShiftCLIConfig(object): def stringify(self): ''' return the options hash as cli params in a string ''' rval = [] - for key, data in self.config_options.items(): + for key in sorted(self.config_options.keys()): + data = self.config_options[key] if data['include'] \ and (data['value'] or isinstance(data['value'], int)): - rval.append('--%s=%s' % (key.replace('_', '-'), data['value'])) + rval.append('--{}={}'.format(key.replace('_', '-'), data['value'])) return rval diff --git a/roles/lib_openshift/library/oc_process.py b/roles/lib_openshift/library/oc_process.py index 03c9d2044..14d519e52 100644 --- a/roles/lib_openshift/library/oc_process.py +++ b/roles/lib_openshift/library/oc_process.py @@ -1354,10 +1354,11 @@ class OpenShiftCLIConfig(object): def stringify(self): ''' return the options hash as cli params in a string ''' rval = [] - for key, data in self.config_options.items(): + for key in sorted(self.config_options.keys()): + data = self.config_options[key] if data['include'] \ and (data['value'] or isinstance(data['value'], int)): - rval.append('--%s=%s' % (key.replace('_', '-'), data['value'])) + rval.append('--{}={}'.format(key.replace('_', '-'), data['value'])) return rval diff --git a/roles/lib_openshift/library/oc_project.py b/roles/lib_openshift/library/oc_project.py index 812c67de5..4f82abcfe 100644 --- a/roles/lib_openshift/library/oc_project.py +++ b/roles/lib_openshift/library/oc_project.py @@ -1351,10 +1351,11 @@ class OpenShiftCLIConfig(object): def stringify(self): ''' return the options hash as cli params in a string ''' rval = [] - for key, data in self.config_options.items(): + for key in sorted(self.config_options.keys()): + data = self.config_options[key] if data['include'] \ and (data['value'] or isinstance(data['value'], int)): - rval.append('--%s=%s' % (key.replace('_', '-'), data['value'])) + rval.append('--{}={}'.format(key.replace('_', '-'), data['value'])) return rval diff --git a/roles/lib_openshift/library/oc_route.py b/roles/lib_openshift/library/oc_route.py index 2ce3824e9..97dd310bc 100644 --- a/roles/lib_openshift/library/oc_route.py +++ b/roles/lib_openshift/library/oc_route.py @@ -1396,10 +1396,11 @@ class OpenShiftCLIConfig(object): def stringify(self): ''' return the options hash as cli params in a string ''' rval = [] - for key, data in self.config_options.items(): + for key in sorted(self.config_options.keys()): + data = self.config_options[key] if data['include'] \ and (data['value'] or isinstance(data['value'], int)): - rval.append('--%s=%s' % (key.replace('_', '-'), data['value'])) + rval.append('--{}={}'.format(key.replace('_', '-'), data['value'])) return rval diff --git a/roles/lib_openshift/library/oc_scale.py b/roles/lib_openshift/library/oc_scale.py index c9e9b1790..56e4e38f7 100644 --- a/roles/lib_openshift/library/oc_scale.py +++ b/roles/lib_openshift/library/oc_scale.py @@ -1340,10 +1340,11 @@ class OpenShiftCLIConfig(object): def stringify(self): ''' return the options hash as cli params in a string ''' rval = [] - for key, data in self.config_options.items(): + for key in sorted(self.config_options.keys()): + data = self.config_options[key] if data['include'] \ and (data['value'] or isinstance(data['value'], int)): - rval.append('--%s=%s' % (key.replace('_', '-'), data['value'])) + rval.append('--{}={}'.format(key.replace('_', '-'), data['value'])) return rval diff --git a/roles/lib_openshift/library/oc_secret.py b/roles/lib_openshift/library/oc_secret.py index 6058f0ee2..ad32d4900 100644 --- a/roles/lib_openshift/library/oc_secret.py +++ b/roles/lib_openshift/library/oc_secret.py @@ -1386,10 +1386,11 @@ class OpenShiftCLIConfig(object): def stringify(self): ''' return the options hash as cli params in a string ''' rval = [] - for key, data in self.config_options.items(): + for key in sorted(self.config_options.keys()): + data = self.config_options[key] if data['include'] \ and (data['value'] or isinstance(data['value'], int)): - rval.append('--%s=%s' % (key.replace('_', '-'), data['value'])) + rval.append('--{}={}'.format(key.replace('_', '-'), data['value'])) return rval diff --git a/roles/lib_openshift/library/oc_service.py b/roles/lib_openshift/library/oc_service.py index 8f6303a66..a4d0ca3f3 100644 --- a/roles/lib_openshift/library/oc_service.py +++ b/roles/lib_openshift/library/oc_service.py @@ -1392,10 +1392,11 @@ class OpenShiftCLIConfig(object): def stringify(self): ''' return the options hash as cli params in a string ''' rval = [] - for key, data in self.config_options.items(): + for key in sorted(self.config_options.keys()): + data = self.config_options[key] if data['include'] \ and (data['value'] or isinstance(data['value'], int)): - rval.append('--%s=%s' % (key.replace('_', '-'), data['value'])) + rval.append('--{}={}'.format(key.replace('_', '-'), data['value'])) return rval diff --git a/roles/lib_openshift/library/oc_serviceaccount.py b/roles/lib_openshift/library/oc_serviceaccount.py index 7d78c96d0..b6586fca9 100644 --- a/roles/lib_openshift/library/oc_serviceaccount.py +++ b/roles/lib_openshift/library/oc_serviceaccount.py @@ -1338,10 +1338,11 @@ class OpenShiftCLIConfig(object): def stringify(self): ''' return the options hash as cli params in a string ''' rval = [] - for key, data in self.config_options.items(): + for key in sorted(self.config_options.keys()): + data = self.config_options[key] if data['include'] \ and (data['value'] or isinstance(data['value'], int)): - rval.append('--%s=%s' % (key.replace('_', '-'), data['value'])) + rval.append('--{}={}'.format(key.replace('_', '-'), data['value'])) return rval diff --git a/roles/lib_openshift/library/oc_serviceaccount_secret.py b/roles/lib_openshift/library/oc_serviceaccount_secret.py index c058c555b..925a5a088 100644 --- a/roles/lib_openshift/library/oc_serviceaccount_secret.py +++ b/roles/lib_openshift/library/oc_serviceaccount_secret.py @@ -1338,10 +1338,11 @@ class OpenShiftCLIConfig(object): def stringify(self): ''' return the options hash as cli params in a string ''' rval = [] - for key, data in self.config_options.items(): + for key in sorted(self.config_options.keys()): + data = self.config_options[key] if data['include'] \ and (data['value'] or isinstance(data['value'], int)): - rval.append('--%s=%s' % (key.replace('_', '-'), data['value'])) + rval.append('--{}={}'.format(key.replace('_', '-'), data['value'])) return rval diff --git a/roles/lib_openshift/library/oc_version.py b/roles/lib_openshift/library/oc_version.py index 4e65b8a0a..8f59d4d7e 100644 --- a/roles/lib_openshift/library/oc_version.py +++ b/roles/lib_openshift/library/oc_version.py @@ -1310,10 +1310,11 @@ class OpenShiftCLIConfig(object): def stringify(self): ''' return the options hash as cli params in a string ''' rval = [] - for key, data in self.config_options.items(): + for key in sorted(self.config_options.keys()): + data = self.config_options[key] if data['include'] \ and (data['value'] or isinstance(data['value'], int)): - rval.append('--%s=%s' % (key.replace('_', '-'), data['value'])) + rval.append('--{}={}'.format(key.replace('_', '-'), data['value'])) return rval diff --git a/roles/lib_openshift/src/class/oc_adm_policy_group.py b/roles/lib_openshift/src/class/oc_adm_policy_group.py index afb066c77..1e51913e0 100644 --- a/roles/lib_openshift/src/class/oc_adm_policy_group.py +++ b/roles/lib_openshift/src/class/oc_adm_policy_group.py @@ -41,6 +41,28 @@ class PolicyGroup(OpenShiftCLI): self.verbose = verbose self._rolebinding = None self._scc = None + self._cluster_policy_bindings = None + self._policy_bindings = None + + @property + def policybindings(self): + if self._policy_bindings is None: + results = self._get('clusterpolicybindings', None) + if results['returncode'] != 0: + raise OpenShiftCLIError('Could not retrieve policybindings') + self._policy_bindings = results['results'][0]['items'][0] + + return self._policy_bindings + + @property + def clusterpolicybindings(self): + if self._cluster_policy_bindings is None: + results = self._get('clusterpolicybindings', None) + if results['returncode'] != 0: + raise OpenShiftCLIError('Could not retrieve clusterpolicybindings') + self._cluster_policy_bindings = results['results'][0]['items'][0] + + return self._cluster_policy_bindings @property def role_binding(self): @@ -81,18 +103,24 @@ class PolicyGroup(OpenShiftCLI): def exists_role_binding(self): ''' return whether role_binding exists ''' - results = self.get() - if results['returncode'] == 0: - self.role_binding = RoleBinding(results['results'][0]) - if self.role_binding.find_group_name(self.config.config_options['group']['value']) != None: - return True + bindings = None + if self.config.config_options['resource_kind']['value'] == 'cluster-role': + bindings = self.clusterpolicybindings + else: + bindings = self.policybindings + if bindings is None: return False - elif self.config.config_options['name']['value'] in results['stderr'] and '" not found' in results['stderr']: - return False + for binding in bindings['roleBindings']: + _rb = binding['roleBinding'] + if _rb['roleRef']['name'] == self.config.config_options['name']['value'] and \ + _rb['groupNames'] is not None and \ + self.config.config_options['group']['value'] in _rb['groupNames']: + self.role_binding = binding + return True - return results + return False def exists_scc(self): ''' return whether scc exists ''' diff --git a/roles/lib_openshift/src/class/oc_adm_policy_user.py b/roles/lib_openshift/src/class/oc_adm_policy_user.py index c9d53acfa..88fcc1ddc 100644 --- a/roles/lib_openshift/src/class/oc_adm_policy_user.py +++ b/roles/lib_openshift/src/class/oc_adm_policy_user.py @@ -40,6 +40,28 @@ class PolicyUser(OpenShiftCLI): self.verbose = verbose self._rolebinding = None self._scc = None + self._cluster_policy_bindings = None + self._policy_bindings = None + + @property + def policybindings(self): + if self._policy_bindings is None: + results = self._get('clusterpolicybindings', None) + if results['returncode'] != 0: + raise OpenShiftCLIError('Could not retrieve policybindings') + self._policy_bindings = results['results'][0]['items'][0] + + return self._policy_bindings + + @property + def clusterpolicybindings(self): + if self._cluster_policy_bindings is None: + results = self._get('clusterpolicybindings', None) + if results['returncode'] != 0: + raise OpenShiftCLIError('Could not retrieve clusterpolicybindings') + self._cluster_policy_bindings = results['results'][0]['items'][0] + + return self._cluster_policy_bindings @property def role_binding(self): @@ -62,36 +84,37 @@ class PolicyUser(OpenShiftCLI): self._scc = scc def get(self): - '''fetch the desired kind''' + '''fetch the desired kind + + This is only used for scc objects. + The {cluster}rolebindings happen in exists. + ''' resource_name = self.config.config_options['name']['value'] if resource_name == 'cluster-reader': resource_name += 's' - # oc adm policy add-... creates policy bindings with the name - # "[resource_name]-binding", however some bindings in the system - # simply use "[resource_name]". So try both. - - results = self._get(self.config.kind, resource_name) - if results['returncode'] == 0: - return results - - # Now try -binding naming convention - return self._get(self.config.kind, resource_name + "-binding") + return self._get(self.config.kind, resource_name) def exists_role_binding(self): ''' return whether role_binding exists ''' - results = self.get() - if results['returncode'] == 0: - self.role_binding = RoleBinding(results['results'][0]) - if self.role_binding.find_user_name(self.config.config_options['user']['value']) != None: - return True + bindings = None + if self.config.config_options['resource_kind']['value'] == 'cluster-role': + bindings = self.clusterpolicybindings + else: + bindings = self.policybindings + if bindings is None: return False - elif self.config.config_options['name']['value'] in results['stderr'] and '" not found' in results['stderr']: - return False + for binding in bindings['roleBindings']: + _rb = binding['roleBinding'] + if _rb['roleRef']['name'] == self.config.config_options['name']['value'] and \ + _rb['userNames'] is not None and \ + self.config.config_options['user']['value'] in _rb['userNames']: + self.role_binding = binding + return True - return results + return False def exists_scc(self): ''' return whether scc exists ''' diff --git a/roles/lib_openshift/src/class/oc_adm_registry.py b/roles/lib_openshift/src/class/oc_adm_registry.py index 37904c43f..c083cd179 100644 --- a/roles/lib_openshift/src/class/oc_adm_registry.py +++ b/roles/lib_openshift/src/class/oc_adm_registry.py @@ -119,7 +119,6 @@ class Registry(OpenShiftCLI): def exists(self): '''does the object exist?''' - self.get() if self.deploymentconfig and self.service: return True @@ -146,7 +145,7 @@ class Registry(OpenShiftCLI): ''' prepare a registry for instantiation ''' options = self.config.to_option_list() - cmd = ['registry', '-n', self.config.namespace] + cmd = ['registry'] cmd.extend(options) cmd.extend(['--dry-run=True', '-o', 'json']) @@ -180,7 +179,8 @@ class Registry(OpenShiftCLI): service.put('spec.portalIP', self.portal_ip) # the dry-run doesn't apply the selector correctly - service.put('spec.selector', self.service.get_selector()) + if self.service: + service.put('spec.selector', self.service.get_selector()) # need to create the service and the deploymentconfig service_file = Utils.create_tmp_file_from_contents('service', service.yaml_dict) diff --git a/roles/lib_openshift/src/class/oc_adm_router.py b/roles/lib_openshift/src/class/oc_adm_router.py index 7b163b120..356d06fdf 100644 --- a/roles/lib_openshift/src/class/oc_adm_router.py +++ b/roles/lib_openshift/src/class/oc_adm_router.py @@ -224,7 +224,7 @@ class Router(OpenShiftCLI): options = self.config.to_option_list() - cmd = ['router', self.config.name, '-n', self.config.namespace] + cmd = ['router', self.config.name] cmd.extend(options) cmd.extend(['--dry-run=True', '-o', 'json']) diff --git a/roles/lib_openshift/src/lib/base.py b/roles/lib_openshift/src/lib/base.py index d037074a5..334542b97 100644 --- a/roles/lib_openshift/src/lib/base.py +++ b/roles/lib_openshift/src/lib/base.py @@ -591,10 +591,11 @@ class OpenShiftCLIConfig(object): def stringify(self): ''' return the options hash as cli params in a string ''' rval = [] - for key, data in self.config_options.items(): + for key in sorted(self.config_options.keys()): + data = self.config_options[key] if data['include'] \ and (data['value'] or isinstance(data['value'], int)): - rval.append('--%s=%s' % (key.replace('_', '-'), data['value'])) + rval.append('--{}={}'.format(key.replace('_', '-'), data['value'])) return rval diff --git a/roles/lib_openshift/src/test/unit/test_oc_adm_registry.py b/roles/lib_openshift/src/test/unit/test_oc_adm_registry.py new file mode 100755 index 000000000..bab36fddc --- /dev/null +++ b/roles/lib_openshift/src/test/unit/test_oc_adm_registry.py @@ -0,0 +1,369 @@ +#!/usr/bin/env python +''' + Unit tests for oc adm registry +''' + +import os +import six +import sys +import unittest +import mock + +# Removing invalid variable names for tests so that I can +# keep them brief +# pylint: disable=invalid-name,no-name-in-module +# Disable import-error b/c our libraries aren't loaded in jenkins +# pylint: disable=import-error +# place class in our python path +module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501 +sys.path.insert(0, module_path) +from oc_adm_registry import Registry, locate_oc_binary # noqa: E402 + + +# pylint: disable=too-many-public-methods +class RegistryTest(unittest.TestCase): + ''' + Test class for Registry + ''' + dry_run = '''{ + "kind": "List", + "apiVersion": "v1", + "metadata": {}, + "items": [ + { + "kind": "ServiceAccount", + "apiVersion": "v1", + "metadata": { + "name": "registry", + "creationTimestamp": null + } + }, + { + "kind": "ClusterRoleBinding", + "apiVersion": "v1", + "metadata": { + "name": "registry-registry-role", + "creationTimestamp": null + }, + "userNames": [ + "system:serviceaccount:default:registry" + ], + "groupNames": null, + "subjects": [ + { + "kind": "ServiceAccount", + "namespace": "default", + "name": "registry" + } + ], + "roleRef": { + "kind": "ClusterRole", + "name": "system:registry" + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "docker-registry", + "creationTimestamp": null, + "labels": { + "docker-registry": "default" + } + }, + "spec": { + "strategy": { + "resources": {} + }, + "triggers": [ + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "test": false, + "selector": { + "docker-registry": "default" + }, + "template": { + "metadata": { + "creationTimestamp": null, + "labels": { + "docker-registry": "default" + } + }, + "spec": { + "volumes": [ + { + "name": "registry-storage", + "emptyDir": {} + } + ], + "containers": [ + { + "name": "registry", + "image": "openshift3/ose-docker-registry:v3.5.0.39", + "ports": [ + { + "containerPort": 5000 + } + ], + "env": [ + { + "name": "REGISTRY_HTTP_ADDR", + "value": ":5000" + }, + { + "name": "REGISTRY_HTTP_NET", + "value": "tcp" + }, + { + "name": "REGISTRY_HTTP_SECRET", + "value": "WQjSGeUu5KFZRTwGeIXgwIjyraNDLmdJblsFbtzZdF8=" + }, + { + "name": "REGISTRY_MIDDLEWARE_REPOSITORY_OPENSHIFT_ENFORCEQUOTA", + "value": "false" + } + ], + "resources": { + "requests": { + "cpu": "100m", + "memory": "256Mi" + } + }, + "volumeMounts": [ + { + "name": "registry-storage", + "mountPath": "/registry" + } + ], + "livenessProbe": { + "httpGet": { + "path": "/healthz", + "port": 5000 + }, + "initialDelaySeconds": 10, + "timeoutSeconds": 5 + }, + "readinessProbe": { + "httpGet": { + "path": "/healthz", + "port": 5000 + }, + "timeoutSeconds": 5 + }, + "securityContext": { + "privileged": false + } + } + ], + "nodeSelector": { + "type": "infra" + }, + "serviceAccountName": "registry", + "serviceAccount": "registry" + } + } + }, + "status": { + "latestVersion": 0, + "observedGeneration": 0, + "replicas": 0, + "updatedReplicas": 0, + "availableReplicas": 0, + "unavailableReplicas": 0 + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "metadata": { + "name": "docker-registry", + "creationTimestamp": null, + "labels": { + "docker-registry": "default" + } + }, + "spec": { + "ports": [ + { + "name": "5000-tcp", + "port": 5000, + "targetPort": 5000 + } + ], + "selector": { + "docker-registry": "default" + }, + "clusterIP": "172.30.119.110", + "sessionAffinity": "ClientIP" + }, + "status": { + "loadBalancer": {} + } + } + ]}''' + + @mock.patch('oc_adm_registry.Utils._write') + @mock.patch('oc_adm_registry.Utils.create_tmpfile_copy') + @mock.patch('oc_adm_registry.Registry._run') + def test_state_present(self, mock_cmd, mock_tmpfile_copy, mock_write): + ''' Testing state present ''' + params = {'state': 'present', + 'debug': False, + 'namespace': 'default', + 'name': 'docker-registry', + 'kubeconfig': '/etc/origin/master/admin.kubeconfig', + 'images': None, + 'latest_images': None, + 'labels': None, + 'ports': ['5000'], + 'replicas': 1, + 'selector': 'type=infra', + 'service_account': 'registry', + 'mount_host': None, + 'volume_mounts': None, + 'env_vars': {}, + 'enforce_quota': False, + 'force': False, + 'daemonset': False, + 'tls_key': None, + 'tls_certificate': None, + 'edits': []} + + mock_cmd.side_effect = [ + (1, '', 'Error from server (NotFound): deploymentconfigs "docker-registry" not found'), + (1, '', 'Error from server (NotFound): service "docker-registry" not found'), + (0, RegistryTest.dry_run, ''), + (0, '', ''), + (0, '', ''), + ] + + mock_tmpfile_copy.side_effect = [ + '/tmp/mocked_kubeconfig', + '/tmp/mocked_kubeconfig', + ] + + results = Registry.run_ansible(params, False) + + self.assertTrue(results['changed']) + for result in results['results']['results']: + self.assertEqual(result['returncode'], 0) + + mock_cmd.assert_has_calls([ + mock.call(['oc', 'get', 'dc', 'docker-registry', '-o', 'json', '-n', 'default'], None), + mock.call(['oc', 'get', 'svc', 'docker-registry', '-o', 'json', '-n', 'default'], None), + mock.call(['oc', 'adm', 'registry', '--daemonset=False', '--enforce-quota=False', + '--ports=5000', '--replicas=1', '--selector=type=infra', + '--service-account=registry', '--dry-run=True', '-o', 'json', '-n', 'default'], None), + mock.call(['oc', 'create', '-f', mock.ANY, '-n', 'default'], None), + mock.call(['oc', 'create', '-f', mock.ANY, '-n', 'default'], None), ]) + + @unittest.skipIf(six.PY3, 'py2 test only') + @mock.patch('os.path.exists') + @mock.patch('os.environ.get') + def test_binary_lookup_fallback(self, mock_env_get, mock_path_exists): + ''' Testing binary lookup fallback ''' + + mock_env_get.side_effect = lambda _v, _d: '' + + mock_path_exists.side_effect = lambda _: False + + self.assertEqual(locate_oc_binary(), 'oc') + + @unittest.skipIf(six.PY3, 'py2 test only') + @mock.patch('os.path.exists') + @mock.patch('os.environ.get') + def test_binary_lookup_in_path(self, mock_env_get, mock_path_exists): + ''' Testing binary lookup in path ''' + + oc_bin = '/usr/bin/oc' + + mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin' + + mock_path_exists.side_effect = lambda f: f == oc_bin + + self.assertEqual(locate_oc_binary(), oc_bin) + + @unittest.skipIf(six.PY3, 'py2 test only') + @mock.patch('os.path.exists') + @mock.patch('os.environ.get') + def test_binary_lookup_in_usr_local(self, mock_env_get, mock_path_exists): + ''' Testing binary lookup in /usr/local/bin ''' + + oc_bin = '/usr/local/bin/oc' + + mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin' + + mock_path_exists.side_effect = lambda f: f == oc_bin + + self.assertEqual(locate_oc_binary(), oc_bin) + + @unittest.skipIf(six.PY3, 'py2 test only') + @mock.patch('os.path.exists') + @mock.patch('os.environ.get') + def test_binary_lookup_in_home(self, mock_env_get, mock_path_exists): + ''' Testing binary lookup in ~/bin ''' + + oc_bin = os.path.expanduser('~/bin/oc') + + mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin' + + mock_path_exists.side_effect = lambda f: f == oc_bin + + self.assertEqual(locate_oc_binary(), oc_bin) + + @unittest.skipIf(six.PY2, 'py3 test only') + @mock.patch('shutil.which') + @mock.patch('os.environ.get') + def test_binary_lookup_fallback_py3(self, mock_env_get, mock_shutil_which): + ''' Testing binary lookup fallback ''' + + mock_env_get.side_effect = lambda _v, _d: '' + + mock_shutil_which.side_effect = lambda _f, path=None: None + + self.assertEqual(locate_oc_binary(), 'oc') + + @unittest.skipIf(six.PY2, 'py3 test only') + @mock.patch('shutil.which') + @mock.patch('os.environ.get') + def test_binary_lookup_in_path_py3(self, mock_env_get, mock_shutil_which): + ''' Testing binary lookup in path ''' + + oc_bin = '/usr/bin/oc' + + mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin' + + mock_shutil_which.side_effect = lambda _f, path=None: oc_bin + + self.assertEqual(locate_oc_binary(), oc_bin) + + @unittest.skipIf(six.PY2, 'py3 test only') + @mock.patch('shutil.which') + @mock.patch('os.environ.get') + def test_binary_lookup_in_usr_local_py3(self, mock_env_get, mock_shutil_which): + ''' Testing binary lookup in /usr/local/bin ''' + + oc_bin = '/usr/local/bin/oc' + + mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin' + + mock_shutil_which.side_effect = lambda _f, path=None: oc_bin + + self.assertEqual(locate_oc_binary(), oc_bin) + + @unittest.skipIf(six.PY2, 'py3 test only') + @mock.patch('shutil.which') + @mock.patch('os.environ.get') + def test_binary_lookup_in_home_py3(self, mock_env_get, mock_shutil_which): + ''' Testing binary lookup in ~/bin ''' + + oc_bin = os.path.expanduser('~/bin/oc') + + mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin' + + mock_shutil_which.side_effect = lambda _f, path=None: oc_bin + + self.assertEqual(locate_oc_binary(), oc_bin) diff --git a/roles/lib_openshift/src/test/unit/test_oc_adm_router.py b/roles/lib_openshift/src/test/unit/test_oc_adm_router.py new file mode 100755 index 000000000..51393dbaf --- /dev/null +++ b/roles/lib_openshift/src/test/unit/test_oc_adm_router.py @@ -0,0 +1,474 @@ +#!/usr/bin/env python +''' + Unit tests for oc adm router +''' + +import os +import six +import sys +import unittest +import mock + +# Removing invalid variable names for tests so that I can +# keep them brief +# pylint: disable=invalid-name,no-name-in-module +# Disable import-error b/c our libraries aren't loaded in jenkins +# pylint: disable=import-error +# place class in our python path +module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501 +sys.path.insert(0, module_path) +from oc_adm_router import Router, locate_oc_binary # noqa: E402 + + +# pylint: disable=too-many-public-methods +class RouterTest(unittest.TestCase): + ''' + Test class for Router + ''' + dry_run = '''{ + "kind": "List", + "apiVersion": "v1", + "metadata": {}, + "items": [ + { + "kind": "ServiceAccount", + "apiVersion": "v1", + "metadata": { + "name": "router", + "creationTimestamp": null + } + }, + { + "kind": "ClusterRoleBinding", + "apiVersion": "v1", + "metadata": { + "name": "router-router-role", + "creationTimestamp": null + }, + "userNames": [ + "system:serviceaccount:default:router" + ], + "groupNames": null, + "subjects": [ + { + "kind": "ServiceAccount", + "namespace": "default", + "name": "router" + } + ], + "roleRef": { + "kind": "ClusterRole", + "name": "system:router" + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "router", + "creationTimestamp": null, + "labels": { + "router": "router" + } + }, + "spec": { + "strategy": { + "type": "Rolling", + "rollingParams": { + "maxUnavailable": "25%", + "maxSurge": 0 + }, + "resources": {} + }, + "triggers": [ + { + "type": "ConfigChange" + } + ], + "replicas": 2, + "test": false, + "selector": { + "router": "router" + }, + "template": { + "metadata": { + "creationTimestamp": null, + "labels": { + "router": "router" + } + }, + "spec": { + "volumes": [ + { + "name": "server-certificate", + "secret": { + "secretName": "router-certs" + } + } + ], + "containers": [ + { + "name": "router", + "image": "openshift3/ose-haproxy-router:v3.5.0.39", + "ports": [ + { + "containerPort": 80 + }, + { + "containerPort": 443 + }, + { + "name": "stats", + "containerPort": 1936, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "DEFAULT_CERTIFICATE_DIR", + "value": "/etc/pki/tls/private" + }, + { + "name": "ROUTER_EXTERNAL_HOST_HOSTNAME" + }, + { + "name": "ROUTER_EXTERNAL_HOST_HTTPS_VSERVER" + }, + { + "name": "ROUTER_EXTERNAL_HOST_HTTP_VSERVER" + }, + { + "name": "ROUTER_EXTERNAL_HOST_INSECURE", + "value": "false" + }, + { + "name": "ROUTER_EXTERNAL_HOST_INTERNAL_ADDRESS" + }, + { + "name": "ROUTER_EXTERNAL_HOST_PARTITION_PATH" + }, + { + "name": "ROUTER_EXTERNAL_HOST_PASSWORD" + }, + { + "name": "ROUTER_EXTERNAL_HOST_PRIVKEY", + "value": "/etc/secret-volume/router.pem" + }, + { + "name": "ROUTER_EXTERNAL_HOST_USERNAME" + }, + { + "name": "ROUTER_EXTERNAL_HOST_VXLAN_GW_CIDR" + }, + { + "name": "ROUTER_SERVICE_HTTPS_PORT", + "value": "443" + }, + { + "name": "ROUTER_SERVICE_HTTP_PORT", + "value": "80" + }, + { + "name": "ROUTER_SERVICE_NAME", + "value": "router" + }, + { + "name": "ROUTER_SERVICE_NAMESPACE", + "value": "default" + }, + { + "name": "ROUTER_SUBDOMAIN" + }, + { + "name": "STATS_PASSWORD", + "value": "eSfUICQyyr" + }, + { + "name": "STATS_PORT", + "value": "1936" + }, + { + "name": "STATS_USERNAME", + "value": "admin" + } + ], + "resources": { + "requests": { + "cpu": "100m", + "memory": "256Mi" + } + }, + "volumeMounts": [ + { + "name": "server-certificate", + "readOnly": true, + "mountPath": "/etc/pki/tls/private" + } + ], + "livenessProbe": { + "httpGet": { + "path": "/healthz", + "port": 1936, + "host": "localhost" + }, + "initialDelaySeconds": 10 + }, + "readinessProbe": { + "httpGet": { + "path": "/healthz", + "port": 1936, + "host": "localhost" + }, + "initialDelaySeconds": 10 + }, + "imagePullPolicy": "IfNotPresent" + } + ], + "nodeSelector": { + "type": "infra" + }, + "serviceAccountName": "router", + "serviceAccount": "router", + "hostNetwork": true, + "securityContext": {} + } + } + }, + "status": { + "latestVersion": 0, + "observedGeneration": 0, + "replicas": 0, + "updatedReplicas": 0, + "availableReplicas": 0, + "unavailableReplicas": 0 + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "metadata": { + "name": "router", + "creationTimestamp": null, + "labels": { + "router": "router" + }, + "annotations": { + "service.alpha.openshift.io/serving-cert-secret-name": "router-certs" + } + }, + "spec": { + "ports": [ + { + "name": "80-tcp", + "port": 80, + "targetPort": 80 + }, + { + "name": "443-tcp", + "port": 443, + "targetPort": 443 + }, + { + "name": "1936-tcp", + "protocol": "TCP", + "port": 1936, + "targetPort": 1936 + } + ], + "selector": { + "router": "router" + } + }, + "status": { + "loadBalancer": {} + } + } + ] +}''' + + @mock.patch('oc_adm_router.Utils._write') + @mock.patch('oc_adm_router.Utils.create_tmpfile_copy') + @mock.patch('oc_adm_router.Router._run') + def test_state_present(self, mock_cmd, mock_tmpfile_copy, mock_write): + ''' Testing a create ''' + params = {'state': 'present', + 'debug': False, + 'namespace': 'default', + 'name': 'router', + 'default_cert': None, + 'cert_file': None, + 'key_file': None, + 'cacert_file': None, + 'labels': None, + 'ports': ['80:80', '443:443'], + 'images': None, + 'latest_images': None, + 'clusterip': None, + 'portalip': None, + 'session_affinity': None, + 'service_type': None, + 'kubeconfig': '/etc/origin/master/admin.kubeconfig', + 'replicas': 2, + 'selector': 'type=infra', + 'service_account': 'router', + 'router_type': None, + 'host_network': None, + 'external_host': None, + 'external_host_vserver': None, + 'external_host_insecure': False, + 'external_host_partition_path': None, + 'external_host_username': None, + 'external_host_password': None, + 'external_host_private_key': None, + 'expose_metrics': False, + 'metrics_image': None, + 'stats_user': None, + 'stats_password': None, + 'stats_port': 1936, + 'edits': []} + + mock_cmd.side_effect = [ + (1, '', 'Error from server (NotFound): deploymentconfigs "router" not found'), + (1, '', 'Error from server (NotFound): service "router" not found'), + (1, '', 'Error from server (NotFound): serviceaccount "router" not found'), + (1, '', 'Error from server (NotFound): secret "router-certs" not found'), + (1, '', 'Error from server (NotFound): clsuterrolebinding "router-router-role" not found'), + (0, RouterTest.dry_run, ''), + (0, '', ''), + (0, '', ''), + (0, '', ''), + (0, '', ''), + (0, '', ''), + ] + + mock_tmpfile_copy.side_effect = [ + '/tmp/mocked_kubeconfig', + ] + + results = Router.run_ansible(params, False) + + self.assertTrue(results['changed']) + for result in results['results']['results']: + self.assertEqual(result['returncode'], 0) + + mock_cmd.assert_has_calls([ + mock.call(['oc', 'get', 'dc', 'router', '-o', 'json', '-n', 'default'], None), + mock.call(['oc', 'get', 'svc', 'router', '-o', 'json', '-n', 'default'], None), + mock.call(['oc', 'get', 'sa', 'router', '-o', 'json', '-n', 'default'], None), + mock.call(['oc', 'get', 'secret', 'router-certs', '-o', 'json', '-n', 'default'], None), + mock.call(['oc', 'get', 'clusterrolebinding', 'router-router-role', '-o', 'json', '-n', 'default'], None), + mock.call(['oc', 'adm', 'router', 'router', '--expose-metrics=False', '--external-host-insecure=False', + '--ports=80:80,443:443', '--replicas=2', '--selector=type=infra', '--service-account=router', + '--stats-port=1936', '--dry-run=True', '-o', 'json', '-n', 'default'], None), + mock.call(['oc', 'create', '-f', mock.ANY, '-n', 'default'], None), + mock.call(['oc', 'create', '-f', mock.ANY, '-n', 'default'], None), + mock.call(['oc', 'create', '-f', mock.ANY, '-n', 'default'], None), + mock.call(['oc', 'create', '-f', mock.ANY, '-n', 'default'], None)]) + + @unittest.skipIf(six.PY3, 'py2 test only') + @mock.patch('os.path.exists') + @mock.patch('os.environ.get') + def test_binary_lookup_fallback(self, mock_env_get, mock_path_exists): + ''' Testing binary lookup fallback ''' + + mock_env_get.side_effect = lambda _v, _d: '' + + mock_path_exists.side_effect = lambda _: False + + self.assertEqual(locate_oc_binary(), 'oc') + + @unittest.skipIf(six.PY3, 'py2 test only') + @mock.patch('os.path.exists') + @mock.patch('os.environ.get') + def test_binary_lookup_in_path(self, mock_env_get, mock_path_exists): + ''' Testing binary lookup in path ''' + + oc_bin = '/usr/bin/oc' + + mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin' + + mock_path_exists.side_effect = lambda f: f == oc_bin + + self.assertEqual(locate_oc_binary(), oc_bin) + + @unittest.skipIf(six.PY3, 'py2 test only') + @mock.patch('os.path.exists') + @mock.patch('os.environ.get') + def test_binary_lookup_in_usr_local(self, mock_env_get, mock_path_exists): + ''' Testing binary lookup in /usr/local/bin ''' + + oc_bin = '/usr/local/bin/oc' + + mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin' + + mock_path_exists.side_effect = lambda f: f == oc_bin + + self.assertEqual(locate_oc_binary(), oc_bin) + + @unittest.skipIf(six.PY3, 'py2 test only') + @mock.patch('os.path.exists') + @mock.patch('os.environ.get') + def test_binary_lookup_in_home(self, mock_env_get, mock_path_exists): + ''' Testing binary lookup in ~/bin ''' + + oc_bin = os.path.expanduser('~/bin/oc') + + mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin' + + mock_path_exists.side_effect = lambda f: f == oc_bin + + self.assertEqual(locate_oc_binary(), oc_bin) + + @unittest.skipIf(six.PY2, 'py3 test only') + @mock.patch('shutil.which') + @mock.patch('os.environ.get') + def test_binary_lookup_fallback_py3(self, mock_env_get, mock_shutil_which): + ''' Testing binary lookup fallback ''' + + mock_env_get.side_effect = lambda _v, _d: '' + + mock_shutil_which.side_effect = lambda _f, path=None: None + + self.assertEqual(locate_oc_binary(), 'oc') + + @unittest.skipIf(six.PY2, 'py3 test only') + @mock.patch('shutil.which') + @mock.patch('os.environ.get') + def test_binary_lookup_in_path_py3(self, mock_env_get, mock_shutil_which): + ''' Testing binary lookup in path ''' + + oc_bin = '/usr/bin/oc' + + mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin' + + mock_shutil_which.side_effect = lambda _f, path=None: oc_bin + + self.assertEqual(locate_oc_binary(), oc_bin) + + @unittest.skipIf(six.PY2, 'py3 test only') + @mock.patch('shutil.which') + @mock.patch('os.environ.get') + def test_binary_lookup_in_usr_local_py3(self, mock_env_get, mock_shutil_which): + ''' Testing binary lookup in /usr/local/bin ''' + + oc_bin = '/usr/local/bin/oc' + + mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin' + + mock_shutil_which.side_effect = lambda _f, path=None: oc_bin + + self.assertEqual(locate_oc_binary(), oc_bin) + + @unittest.skipIf(six.PY2, 'py3 test only') + @mock.patch('shutil.which') + @mock.patch('os.environ.get') + def test_binary_lookup_in_home_py3(self, mock_env_get, mock_shutil_which): + ''' Testing binary lookup in ~/bin ''' + + oc_bin = os.path.expanduser('~/bin/oc') + + mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin' + + mock_shutil_which.side_effect = lambda _f, path=None: oc_bin + + self.assertEqual(locate_oc_binary(), oc_bin) diff --git a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py b/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py index 33a4faf3e..c204b5341 100644 --- a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py +++ b/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py @@ -10,8 +10,9 @@ import os import subprocess import yaml -from six.moves import configparser - +# pylint import-error disabled because pylint cannot find the package +# when installed in a virtualenv +from ansible.module_utils.six.moves import configparser # pylint: disable=import-error from ansible.module_utils.basic import AnsibleModule try: diff --git a/roles/openshift_serviceaccounts/meta/main.yml b/roles/openshift_etcd_ca/meta/main.yml index 7a30c220f..d73d27356 100644 --- a/roles/openshift_serviceaccounts/meta/main.yml +++ b/roles/openshift_etcd_ca/meta/main.yml @@ -1,16 +1,17 @@ --- galaxy_info: - author: OpenShift Operations - description: OpenShift Service Accounts + author: Tim Bielawa + description: Meta role around the etcd_ca role company: Red Hat, Inc. license: Apache License, Version 2.0 - min_ansible_version: 1.9 + min_ansible_version: 2.2 platforms: - name: EL versions: - 7 categories: - cloud + - system dependencies: -- { role: openshift_facts } -- { role: lib_openshift } +- role: openshift_etcd_facts +- role: etcd_ca diff --git a/roles/openshift_etcd_ca/tasks/main.yml b/roles/openshift_etcd_ca/tasks/main.yml new file mode 100644 index 000000000..ed97d539c --- /dev/null +++ b/roles/openshift_etcd_ca/tasks/main.yml @@ -0,0 +1 @@ +--- diff --git a/roles/openshift_excluder/defaults/main.yml b/roles/openshift_excluder/defaults/main.yml index 0d275e954..7c3ae2a86 100644 --- a/roles/openshift_excluder/defaults/main.yml +++ b/roles/openshift_excluder/defaults/main.yml @@ -2,3 +2,5 @@ # keep the 'current' package or update to 'latest' if available? openshift_excluder_package_state: present docker_excluder_package_state: present + +enable_excluders: true diff --git a/roles/openshift_excluder/meta/main.yml b/roles/openshift_excluder/meta/main.yml index 8bca38e77..4d1c1efca 100644 --- a/roles/openshift_excluder/meta/main.yml +++ b/roles/openshift_excluder/meta/main.yml @@ -13,3 +13,4 @@ galaxy_info: - cloud dependencies: - { role: openshift_facts } +- { role: openshift_repos } diff --git a/roles/openshift_excluder/tasks/adjust.yml b/roles/openshift_excluder/tasks/adjust.yml index 6f4070c3d..cbdd7785b 100644 --- a/roles/openshift_excluder/tasks/adjust.yml +++ b/roles/openshift_excluder/tasks/adjust.yml @@ -8,16 +8,18 @@ - include: exclude.yml vars: # Enable the docker excluder only if it is overrided - enable_docker_excluder: "{{ enable_docker_excluder_override | default(false) | bool }}" + # BZ #1430612: docker excluders should be enabled even during installation and upgrade + exclude_docker_excluder: "{{ enable_docker_excluder | default(true) | bool }}" # excluder is to be disabled by default - enable_openshift_excluder: false + exclude_openshift_excluder: false # All excluders that are to be disabled are disabled - include: unexclude.yml vars: # If the docker override is not set, default to the generic behaviour - disable_docker_excluder: "{{ not enable_docker_excluder_override | default(not docker_excluder_on) | bool }}" + # BZ #1430612: docker excluders should be enabled even during installation and upgrade + unexclude_docker_excluder: false # disable openshift excluder is never overrided to be enabled # disable it if the docker excluder is enabled - disable_openshift_excluder: "{{ openshift_excluder_on | bool }}" + unexclude_openshift_excluder: "{{ openshift_excluder_on | bool }}" when: - - not openshift.common.is_containerized | bool + - not openshift.common.is_atomic | bool diff --git a/roles/openshift_excluder/tasks/enable.yml b/roles/openshift_excluder/tasks/enable.yml index ef6fc4a01..9122c9aeb 100644 --- a/roles/openshift_excluder/tasks/enable.yml +++ b/roles/openshift_excluder/tasks/enable.yml @@ -13,9 +13,9 @@ - include: exclude.yml vars: # Enable the docker excluder only if it is overrided, resp. enabled by default (in that order) - enable_docker_excluder: "{{ enable_docker_excluder_override | default(docker_excluder_on) | bool }}" + exclude_docker_excluder: "{{ enable_docker_excluder_override | default(docker_excluder_on) | bool }}" # Enable the openshift excluder only if it is not overrided, resp. enabled by default (in that order) - enable_openshift_excluder: "{{ not disable_openshift_excluder_override | default(not openshift_excluder_on) | bool }}" + exclude_openshift_excluder: "{{ not disable_openshift_excluder_override | default(not openshift_excluder_on) | bool }}" when: - - not openshift.common.is_containerized | bool + - not openshift.common.is_atomic | bool diff --git a/roles/openshift_excluder/tasks/exclude.yml b/roles/openshift_excluder/tasks/exclude.yml index ee0ad8a0b..d31351aea 100644 --- a/roles/openshift_excluder/tasks/exclude.yml +++ b/roles/openshift_excluder/tasks/exclude.yml @@ -1,20 +1,20 @@ --- # input variables: -# - enable_docker_excluder -# - enable_openshift_excluder +# - exclude_docker_excluder +# - exclude_openshift_excluder - block: - name: Enable docker excluder command: "{{ openshift.common.service_type }}-docker-excluder exclude" # if the docker override is set, it means the docker excluder needs to be enabled no matter what # if the docker override is not set, the excluder is set based on enable_docker_excluder when: - - enable_docker_excluder | default(false) | bool + - exclude_docker_excluder | default(false) | bool - name: Enable openshift excluder command: "{{ openshift.common.service_type }}-excluder exclude" # if the openshift override is set, it means the openshift excluder is disabled no matter what # if the openshift override is not set, the excluder is set based on enable_openshift_excluder when: - - enable_openshift_excluder | default(false) | bool + - exclude_openshift_excluder | default(false) | bool when: - - not openshift.common.is_containerized | bool + - not openshift.common.is_atomic | bool diff --git a/roles/openshift_excluder/tasks/init.yml b/roles/openshift_excluder/tasks/init.yml index dee779925..1ea18f363 100644 --- a/roles/openshift_excluder/tasks/init.yml +++ b/roles/openshift_excluder/tasks/init.yml @@ -1,12 +1,12 @@ --- - name: Evalute if docker excluder is to be enabled set_fact: - docker_excluder_on: "{{ enable_docker_excluder | default(enable_excluders | default(false)) | bool }}" + docker_excluder_on: "{{ enable_docker_excluder | default(enable_excluders) | bool }}" - debug: var=docker_excluder_on - name: Evalute if openshift excluder is to be enabled set_fact: - openshift_excluder_on: "{{ enable_openshift_excluder | default(enable_excluders | default(false)) | bool }}" + openshift_excluder_on: "{{ enable_openshift_excluder | default(enable_excluders) | bool }}" - debug: var=openshift_excluder_on diff --git a/roles/openshift_excluder/tasks/install.yml b/roles/openshift_excluder/tasks/install.yml index 01fe5da55..dcc8df0cb 100644 --- a/roles/openshift_excluder/tasks/install.yml +++ b/roles/openshift_excluder/tasks/install.yml @@ -18,4 +18,4 @@ when: - install_openshift_excluder | default(true) | bool when: - - not openshift.common.is_containerized | bool + - not openshift.common.is_atomic | bool diff --git a/roles/openshift_excluder/tasks/status.yml b/roles/openshift_excluder/tasks/status.yml index 3b6821244..363ccdbea 100644 --- a/roles/openshift_excluder/tasks/status.yml +++ b/roles/openshift_excluder/tasks/status.yml @@ -81,4 +81,4 @@ - "{{ docker_excluder_on }}" when: - - not openshift.common.is_containerized | bool + - not openshift.common.is_atomic | bool diff --git a/roles/openshift_excluder/tasks/unexclude.yml b/roles/openshift_excluder/tasks/unexclude.yml index 4df92bc65..9112adbac 100644 --- a/roles/openshift_excluder/tasks/unexclude.yml +++ b/roles/openshift_excluder/tasks/unexclude.yml @@ -1,19 +1,17 @@ --- # input variables: -# - disable_docker_excluder -# - disable_openshift_excluder +# - unexclude_docker_excluder +# - unexclude_openshift_excluder - block: - - include: init.yml - - name: disable docker excluder command: "{{ openshift.common.service_type }}-docker-excluder unexclude" when: - - disable_docker_excluder | default(false) | bool + - unexclude_docker_excluder | default(false) | bool - name: disable openshift excluder command: "{{ openshift.common.service_type }}-excluder unexclude" when: - - disable_openshift_excluder | default(false) | bool + - unexclude_openshift_excluder | default(false) | bool when: - - not openshift.common.is_containerized | bool + - not openshift.common.is_atomic | bool diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 8ea900e21..eeab8a99c 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -19,8 +19,8 @@ import struct import socket from distutils.util import strtobool from distutils.version import LooseVersion -from six import string_types, text_type -from six.moves import configparser +from ansible.module_utils.six import string_types, text_type +from ansible.module_utils.six.moves import configparser # ignore pylint errors related to the module_utils import # pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import diff --git a/roles/openshift_health_checker/openshift_checks/__init__.py b/roles/openshift_health_checker/openshift_checks/__init__.py index 2c70438c9..8433923ed 100644 --- a/roles/openshift_health_checker/openshift_checks/__init__.py +++ b/roles/openshift_health_checker/openshift_checks/__init__.py @@ -2,13 +2,17 @@ Health checks for OpenShift clusters. """ +import operator import os + from abc import ABCMeta, abstractmethod, abstractproperty from importlib import import_module -import operator -import six -from six.moves import reduce +# add_metaclass is not available in the embedded six from module_utils in Ansible 2.2.1 +from six import add_metaclass +# pylint import-error disabled because pylint cannot find the package +# when installed in a virtualenv +from ansible.module_utils.six.moves import reduce # pylint: disable=import-error, redefined-builtin class OpenShiftCheckException(Exception): @@ -16,7 +20,7 @@ class OpenShiftCheckException(Exception): pass -@six.add_metaclass(ABCMeta) +@add_metaclass(ABCMeta) class OpenShiftCheck(object): """A base class for defining checks for an OpenShift cluster environment.""" diff --git a/roles/openshift_hosted/meta/main.yml b/roles/openshift_hosted/meta/main.yml index e9b590550..bbbb76414 100644 --- a/roles/openshift_hosted/meta/main.yml +++ b/roles/openshift_hosted/meta/main.yml @@ -17,19 +17,3 @@ dependencies: - role: lib_openshift - role: openshift_projects openshift_projects: "{{ openshift_additional_projects | default({}) | oo_merge_dicts({'default':{'default_node_selector':''},'openshift-infra':{'default_node_selector':''},'logging':{'default_node_selector':''}}) }}" -- role: openshift_serviceaccounts - openshift_serviceaccounts_names: - - router - - registry - openshift_serviceaccounts_namespace: default - openshift_serviceaccounts_sccs: - - hostnetwork - when: openshift.common.version_gte_3_2_or_1_2 -- role: openshift_serviceaccounts - openshift_serviceaccounts_names: - - router - - registry - openshift_serviceaccounts_namespace: default - openshift_serviceaccounts_sccs: - - privileged - when: not openshift.common.version_gte_3_2_or_1_2 diff --git a/roles/openshift_hosted/tasks/registry/registry.yml b/roles/openshift_hosted/tasks/registry/registry.yml index d89ce855a..0b8042473 100644 --- a/roles/openshift_hosted/tasks/registry/registry.yml +++ b/roles/openshift_hosted/tasks/registry/registry.yml @@ -56,12 +56,24 @@ openshift_hosted_registry_force: - False +- name: Create the registry service account + oc_serviceaccount: + name: "{{ openshift_hosted_registry_serviceaccount }}" + namespace: "{{ openshift_hosted_registry_namespace }}" + +- name: Grant the registry serivce account access to the appropriate scc + oc_adm_policy_user: + user: "system:serviceaccount:{{ openshift_hosted_registry_namespace }}:{{ openshift_hosted_registry_serviceaccount }}" + namespace: "{{ openshift_hosted_registry_namespace }}" + resource_kind: scc + resource_name: hostnetwork + - name: oc adm policy add-cluster-role-to-user system:registry system:serviceaccount:default:registry oc_adm_policy_user: - user: system:serviceaccount:default:registry + user: "system:serviceaccount:{{ openshift_hosted_registry_namespace }}:{{ openshift_hosted_registry_serviceaccount }}" + namespace: "{{ openshift_hosted_registry_namespace }}" resource_kind: cluster-role resource_name: system:registry - state: present - name: create the default registry service oc_service: diff --git a/roles/openshift_hosted/tasks/router/router.yml b/roles/openshift_hosted/tasks/router/router.yml index 3b7021eae..969fb27a9 100644 --- a/roles/openshift_hosted/tasks/router/router.yml +++ b/roles/openshift_hosted/tasks/router/router.yml @@ -22,6 +22,21 @@ with_items: "{{ openshift_hosted_routers | oo_collect(attribute='certificates') | oo_select_keys_from_list(['keyfile', 'certfile', 'cafile']) }}" +- name: Create the router service account(s) + oc_serviceaccount: + name: "{{ item.serviceaccount }}" + namespace: "{{ item.namespace }}" + state: present + with_items: "{{ openshift_hosted_routers }}" + +- name: Grant the router serivce account(s) access to the appropriate scc + oc_adm_policy_user: + user: "system:serviceaccount:{{ item.namespace }}:{{ item.serviceaccount }}" + namespace: "{{ item.namespace }}" + resource_kind: scc + resource_name: hostnetwork + with_items: "{{ openshift_hosted_routers }}" + - name: Create OpenShift router oc_adm_router: name: "{{ item.name }}" diff --git a/roles/openshift_logging/filter_plugins/openshift_logging.py b/roles/openshift_logging/filter_plugins/openshift_logging.py index 9beffaef7..44b0b2d48 100644 --- a/roles/openshift_logging/filter_plugins/openshift_logging.py +++ b/roles/openshift_logging/filter_plugins/openshift_logging.py @@ -5,6 +5,18 @@ import random +def es_storage(os_logging_facts, dc_name, pvc_claim, root='elasticsearch'): + '''Return a hash with the desired storage for the given ES instance''' + deploy_config = os_logging_facts[root]['deploymentconfigs'].get(dc_name) + if deploy_config: + storage = deploy_config['volumes']['elasticsearch-storage'] + if storage.get('hostPath'): + return dict(kind='hostpath', path=storage.get('hostPath').get('path')) + if len(pvc_claim.strip()) > 0: + return dict(kind='pvc', pvc_claim=pvc_claim) + return dict(kind='emptydir') + + def random_word(source_alpha, length): ''' Returns a random word given the source of characters to pick from and resulting length ''' return ''.join(random.choice(source_alpha) for i in range(length)) @@ -44,4 +56,5 @@ class FilterModule(object): 'random_word': random_word, 'entry_from_named_pair': entry_from_named_pair, 'map_from_pairs': map_from_pairs, + 'es_storage': es_storage } diff --git a/roles/openshift_logging/meta/main.yaml b/roles/openshift_logging/meta/main.yaml index ab57242c8..9c480f73a 100644 --- a/roles/openshift_logging/meta/main.yaml +++ b/roles/openshift_logging/meta/main.yaml @@ -13,5 +13,4 @@ galaxy_info: - cloud dependencies: - role: lib_openshift -- role: openshift_master_facts - role: openshift_facts diff --git a/roles/openshift_logging/tasks/generate_secrets.yaml b/roles/openshift_logging/tasks/generate_secrets.yaml index 81fac8b5e..0f8e7ae58 100644 --- a/roles/openshift_logging/tasks/generate_secrets.yaml +++ b/roles/openshift_logging/tasks/generate_secrets.yaml @@ -64,7 +64,7 @@ admin-ca={{generated_certs_dir}}/ca.crt admin.jks={{generated_certs_dir}}/system.admin.jks -o yaml vars: secret_name: logging-elasticsearch - secret_keys: ["admin-cert", "searchguard.key", "admin-ca", "key", "truststore", "admin-key"] + secret_keys: ["admin-cert", "searchguard.key", "admin-ca", "key", "truststore", "admin-key", "searchguard.truststore"] register: logging_es_secret when: secret_name not in openshift_logging_facts.elasticsearch.secrets or secret_keys | difference(openshift_logging_facts.elasticsearch.secrets["{{secret_name}}"]["keys"]) | length != 0 diff --git a/roles/openshift_logging/tasks/install_elasticsearch.yaml b/roles/openshift_logging/tasks/install_elasticsearch.yaml index a0ad12d94..086f9e33f 100644 --- a/roles/openshift_logging/tasks/install_elasticsearch.yaml +++ b/roles/openshift_logging/tasks/install_elasticsearch.yaml @@ -2,6 +2,8 @@ - name: Getting current ES deployment size set_fact: openshift_logging_current_es_size={{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length }} +- set_fact: es_pvc_pool={{[]}} + - name: Generate PersistentVolumeClaims include: "{{ role_path}}/tasks/generate_pvcs.yaml" vars: @@ -42,10 +44,10 @@ es_cluster_name: "{{component}}" es_cpu_limit: "{{openshift_logging_es_cpu_limit }}" es_memory_limit: "{{openshift_logging_es_memory_limit}}" - volume_names: "{{es_pvc_pool | default([])}}" - pvc_claim: "{{(volume_names | length > item.0) | ternary(volume_names[item.0], None)}}" + pvc_claim: "{{(es_pvc_pool | length > item.0) | ternary(es_pvc_pool[item.0], None)}}" deploy_name: "{{item.1}}" es_node_selector: "{{openshift_logging_es_nodeselector | default({}) }}" + es_storage: "{{openshift_logging_facts|es_storage(deploy_name, pvc_claim)}}" with_indexed_items: - "{{ es_dc_pool }}" check_mode: no @@ -111,8 +113,7 @@ logging_component: elasticsearch deploy_name_prefix: "logging-{{component}}" image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}" - volume_names: "{{es_pvc_pool | default([])}}" - pvc_claim: "{{(volume_names | length > item.0) | ternary(volume_names[item.0], None)}}" + pvc_claim: "{{(es_pvc_pool | length > item.0) | ternary(es_pvc_pool[item.0], None)}}" deploy_name: "{{item.1}}" es_cluster_name: "{{component}}" es_cpu_limit: "{{openshift_logging_es_ops_cpu_limit }}" @@ -121,7 +122,8 @@ es_recover_after_nodes: "{{es_ops_recover_after_nodes}}" es_recover_expected_nodes: "{{es_ops_recover_expected_nodes}}" openshift_logging_es_recover_after_time: "{{openshift_logging_es_ops_recover_after_time}}" - es_node_selector: "{{openshift_logging_es_ops_nodeselector | default({}) | map_from_pairs }}" + es_node_selector: "{{openshift_logging_es_ops_nodeselector | default({}) }}" + es_storage: "{{openshift_logging_facts|es_storage(deploy_name, pvc_claim,root='elasticsearch_ops')}}" with_indexed_items: - "{{ es_ops_dc_pool | default([]) }}" when: diff --git a/roles/openshift_logging/templates/es-storage-emptydir.partial b/roles/openshift_logging/templates/es-storage-emptydir.partial new file mode 100644 index 000000000..ccd01a816 --- /dev/null +++ b/roles/openshift_logging/templates/es-storage-emptydir.partial @@ -0,0 +1 @@ + emptyDir: {} diff --git a/roles/openshift_logging/templates/es-storage-hostpath.partial b/roles/openshift_logging/templates/es-storage-hostpath.partial new file mode 100644 index 000000000..07ddad9ba --- /dev/null +++ b/roles/openshift_logging/templates/es-storage-hostpath.partial @@ -0,0 +1,2 @@ + hostPath: + path: {{es_storage['path']}} diff --git a/roles/openshift_logging/templates/es-storage-pvc.partial b/roles/openshift_logging/templates/es-storage-pvc.partial new file mode 100644 index 000000000..fcbff68de --- /dev/null +++ b/roles/openshift_logging/templates/es-storage-pvc.partial @@ -0,0 +1,2 @@ + persistentVolumeClaim: + claimName: {{es_storage['pvc_claim']}} diff --git a/roles/openshift_logging/templates/es.j2 b/roles/openshift_logging/templates/es.j2 index 81ae070be..16185fc1d 100644 --- a/roles/openshift_logging/templates/es.j2 +++ b/roles/openshift_logging/templates/es.j2 @@ -103,9 +103,4 @@ spec: configMap: name: logging-elasticsearch - name: elasticsearch-storage -{% if pvc_claim is defined and pvc_claim | trim | length > 0 %} - persistentVolumeClaim: - claimName: {{pvc_claim}} -{% else %} - emptyDir: {} -{% endif %} +{% include 'es-storage-'+ es_storage['kind'] + '.partial' %} diff --git a/roles/openshift_master/meta/main.yml b/roles/openshift_master/meta/main.yml index af3e7eeec..18e1b3a54 100644 --- a/roles/openshift_master/meta/main.yml +++ b/roles/openshift_master/meta/main.yml @@ -40,8 +40,6 @@ dependencies: port: 4001/tcp when: groups.oo_etcd_to_config | default([]) | length == 0 - role: nickhammond.logrotate -- role: nuage_master - when: openshift.common.use_nuage | bool - role: contiv contiv_role: netmaster when: openshift.common.use_contiv | bool diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index 2ef61cddf..98e0da1a2 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -249,7 +249,7 @@ # Using curl here since the uri module requires python-httplib2 and # wait_for port doesn't provide health information. command: > - curl --silent + curl --silent --tlsv1.2 {% if openshift.common.version_gte_3_2_or_1_2 | bool %} --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt {% else %} diff --git a/roles/openshift_master_facts/filter_plugins/openshift_master.py b/roles/openshift_master_facts/filter_plugins/openshift_master.py index db24028cd..01806c97f 100644 --- a/roles/openshift_master_facts/filter_plugins/openshift_master.py +++ b/roles/openshift_master_facts/filter_plugins/openshift_master.py @@ -7,12 +7,16 @@ Custom filters for use in openshift-master import copy import sys +# pylint import-error disabled because pylint cannot find the package +# when installed in a virtualenv from distutils.version import LooseVersion # pylint: disable=no-name-in-module,import-error from ansible import errors from ansible.parsing.yaml.dumper import AnsibleDumper from ansible.plugins.filter.core import to_bool as ansible_bool -from six import string_types +# pylint import-error disabled because pylint cannot find the package +# when installed in a virtualenv +from ansible.compat.six import string_types # pylint: disable=no-name-in-module,import-error import yaml diff --git a/roles/openshift_metrics/handlers/main.yml b/roles/openshift_metrics/handlers/main.yml new file mode 100644 index 000000000..ffb812271 --- /dev/null +++ b/roles/openshift_metrics/handlers/main.yml @@ -0,0 +1,26 @@ +--- +- name: restart master + systemd: name={{ openshift.common.service_type }}-master state=restarted + when: (openshift.master.ha is not defined or not openshift.master.ha | bool) and (not (master_service_status_changed | default(false) | bool)) + notify: Verify API Server + +- name: Verify API Server + # Using curl here since the uri module requires python-httplib2 and + # wait_for port doesn't provide health information. + command: > + curl --silent --tlsv1.2 + {% if openshift.common.version_gte_3_2_or_1_2 | bool %} + --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt + {% else %} + --cacert {{ openshift.common.config_base }}/master/ca.crt + {% endif %} + {{ openshift.master.api_url }}/healthz/ready + args: + # Disables the following warning: + # Consider using get_url or uri module rather than running curl + warn: no + register: api_available_output + until: api_available_output.stdout == 'ok' + retries: 120 + delay: 1 + changed_when: false diff --git a/roles/openshift_metrics/tasks/install_metrics.yaml b/roles/openshift_metrics/tasks/install_metrics.yaml index 66a3abdbd..ffe6f63a2 100644 --- a/roles/openshift_metrics/tasks/install_metrics.yaml +++ b/roles/openshift_metrics/tasks/install_metrics.yaml @@ -34,6 +34,8 @@ file_content: "{{ item.content | b64decode | from_yaml }}" with_items: "{{ object_defs.results }}" +- include: update_master_config.yaml + - command: > {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig diff --git a/roles/openshift_metrics/tasks/update_master_config.yaml b/roles/openshift_metrics/tasks/update_master_config.yaml new file mode 100644 index 000000000..20fc45fd4 --- /dev/null +++ b/roles/openshift_metrics/tasks/update_master_config.yaml @@ -0,0 +1,9 @@ +--- +- name: Adding metrics route information to metricsPublicURL + modify_yaml: + dest: "{{ openshift.common.config_base }}/master/master-config.yaml" + yaml_key: assetConfig.metricsPublicURL + yaml_value: "https://{{ openshift_metrics_hawkular_hostname}}/hawkular/metrics" + notify: restart master + tags: + - update_master_config diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index 691227915..626248306 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -153,7 +153,7 @@ # Using curl here since the uri module requires python-httplib2 and # wait_for port doesn't provide health information. command: > - curl --silent --cacert {{ openshift.common.config_base }}/node/ca.crt + curl --silent --tlsv1.2 --cacert {{ openshift.common.config_base }}/node/ca.crt {{ openshift_node_master_api_url }}/healthz/ready args: # Disables the following warning: diff --git a/roles/openshift_serviceaccounts/tasks/legacy_add_scc_to_user.yml b/roles/openshift_serviceaccounts/tasks/legacy_add_scc_to_user.yml deleted file mode 100644 index b8cbe9a84..000000000 --- a/roles/openshift_serviceaccounts/tasks/legacy_add_scc_to_user.yml +++ /dev/null @@ -1,38 +0,0 @@ ---- -#### -# -# OSE 3.0.z did not have 'oadm policy add-scc-to-user'. -# -#### - -- name: tmp dir for openshift - file: - path: /tmp/openshift - state: directory - owner: root - mode: 0700 - -- name: Create service account configs - template: - src: serviceaccount.j2 - dest: "/tmp/openshift/{{ item }}-serviceaccount.yaml" - with_items: '{{ openshift_serviceaccounts_names }}' - -- name: Get current security context constraints - shell: > - {{ openshift.common.client_binary }} get scc privileged -o yaml - --output-version=v1 > /tmp/openshift/scc.yaml - changed_when: false - -- name: Add security context constraint for {{ item }} - lineinfile: - dest: /tmp/openshift/scc.yaml - line: "- system:serviceaccount:{{ openshift_serviceaccounts_namespace }}:{{ item.0 }}" - insertafter: "^users:$" - when: "item.1.rc == 0 and 'system:serviceaccount:{{ openshift_serviceaccounts_namespace }}:{{ item.0 }}' not in {{ (item.1.stdout | from_yaml).users }}" - with_nested: - - '{{ openshift_serviceaccounts_names }}' - - '{{ scc_test.results }}' - -- name: Apply new scc rules for service accounts - command: "{{ openshift.common.client_binary }} update -f /tmp/openshift/scc.yaml --api-version=v1" diff --git a/roles/openshift_serviceaccounts/tasks/main.yml b/roles/openshift_serviceaccounts/tasks/main.yml deleted file mode 100644 index 1d570fa5b..000000000 --- a/roles/openshift_serviceaccounts/tasks/main.yml +++ /dev/null @@ -1,28 +0,0 @@ ---- -- name: create the service account - oc_serviceaccount: - name: "{{ item }}" - namespace: "{{ openshift_serviceaccounts_namespace }}" - state: present - with_items: - - "{{ openshift_serviceaccounts_names }}" - -- name: test if scc needs to be updated - command: > - {{ openshift.common.client_binary }} get scc {{ item }} -o yaml - changed_when: false - failed_when: false - register: scc_test - with_items: "{{ openshift_serviceaccounts_sccs }}" - -- name: Grant the user access to the appropriate scc - command: > - {{ openshift.common.client_binary }} adm policy add-scc-to-user - {{ item.1.item }} system:serviceaccount:{{ openshift_serviceaccounts_namespace }}:{{ item.0 }} - when: "openshift.common.version_gte_3_1_or_1_1 and item.1.rc == 0 and 'system:serviceaccount:{{ openshift_serviceaccounts_namespace }}:{{ item.0 }}' not in {{ (item.1.stdout | from_yaml).users | default([]) }}" - with_nested: - - "{{ openshift_serviceaccounts_names }}" - - "{{ scc_test.results }}" - -- include: legacy_add_scc_to_user.yml - when: not openshift.common.version_gte_3_1_or_1_1 diff --git a/roles/openshift_serviceaccounts/templates/serviceaccount.j2 b/roles/openshift_serviceaccounts/templates/serviceaccount.j2 deleted file mode 100644 index c5f12421f..000000000 --- a/roles/openshift_serviceaccounts/templates/serviceaccount.j2 +++ /dev/null @@ -1,4 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ item.0 }} |