summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--.tito/releasers.conf6
-rw-r--r--README.md3
-rw-r--r--README_libvirt.md8
-rw-r--r--ansible.cfg10
-rw-r--r--filter_plugins/oo_filters.py8
-rw-r--r--inventory/byo/hosts.byo.glusterfs.external.example10
-rw-r--r--inventory/byo/hosts.byo.glusterfs.mixed.example10
-rw-r--r--inventory/byo/hosts.byo.glusterfs.native.example10
-rw-r--r--inventory/byo/hosts.byo.glusterfs.registry-only.example10
-rw-r--r--inventory/byo/hosts.byo.glusterfs.storage-and-registry.example16
-rw-r--r--inventory/byo/hosts.origin.example9
-rw-r--r--inventory/byo/hosts.ose.example9
-rw-r--r--openshift-ansible.spec105
-rw-r--r--playbooks/byo/openshift-cluster/cluster_hosts.yml1
-rw-r--r--playbooks/common/openshift-checks/health.yml7
-rw-r--r--playbooks/common/openshift-checks/pre-install.yml7
-rw-r--r--playbooks/common/openshift-cluster/config.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml2
-rw-r--r--playbooks/common/openshift-etcd/migrate.yml40
-rw-r--r--playbooks/common/openshift-loadbalancer/config.yml1
-rw-r--r--playbooks/common/openshift-master/config.yml58
-rw-r--r--playbooks/common/openshift-node/config.yml32
-rw-r--r--playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml10
-rw-r--r--roles/contiv/defaults/main.yml2
-rw-r--r--roles/contiv/meta/main.yml2
-rw-r--r--roles/contiv/tasks/netmaster.yml12
-rw-r--r--roles/contiv/tasks/netmaster_iptables.yml2
-rw-r--r--roles/contiv/tasks/netplugin_iptables.yml2
-rw-r--r--roles/contiv/templates/contiv.cfg.j23
-rw-r--r--roles/contiv/templates/contiv.cfg.master.j27
-rw-r--r--roles/contiv/templates/netmaster.service2
-rw-r--r--roles/contiv_auth_proxy/defaults/main.yml5
-rw-r--r--roles/contiv_auth_proxy/files/cert.pem33
-rw-r--r--roles/contiv_auth_proxy/files/key.pem51
-rw-r--r--roles/contiv_auth_proxy/templates/auth_proxy.j22
-rw-r--r--roles/contiv_facts/tasks/main.yml6
-rw-r--r--roles/contiv_facts/tasks/rpm.yml4
-rw-r--r--roles/etcd/tasks/main.yml3
-rw-r--r--roles/etcd_migrate/tasks/check.yml4
-rw-r--r--roles/lib_openshift/library/oc_adm_policy_group.py41
-rw-r--r--roles/lib_openshift/library/oc_adm_policy_user.py47
-rw-r--r--roles/lib_openshift/library/oc_clusterrole.py2
-rw-r--r--roles/lib_openshift/library/oc_pvc.py74
-rw-r--r--roles/lib_openshift/library/oc_storageclass.py9
-rw-r--r--roles/lib_openshift/src/ansible/oc_pvc.py6
-rw-r--r--roles/lib_openshift/src/ansible/oc_storageclass.py2
-rw-r--r--roles/lib_openshift/src/class/oc_adm_policy_group.py41
-rw-r--r--roles/lib_openshift/src/class/oc_adm_policy_user.py47
-rw-r--r--roles/lib_openshift/src/class/oc_clusterrole.py2
-rw-r--r--roles/lib_openshift/src/class/oc_pvc.py2
-rw-r--r--roles/lib_openshift/src/doc/pvc12
-rw-r--r--roles/lib_openshift/src/lib/pvc.py54
-rw-r--r--roles/lib_openshift/src/lib/storageclass.py7
-rwxr-xr-xroles/lib_openshift/src/test/integration/oc_pvc.yml28
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_pvc.py11
-rw-r--r--roles/openshift_default_storage_class/defaults/main.yml4
-rw-r--r--roles/openshift_excluder/tasks/unexclude.yml4
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py13
-rw-r--r--roles/openshift_health_checker/action_plugins/openshift_health_check.py30
-rw-r--r--roles/openshift_health_checker/callback_plugins/zz_failure_summary.py38
-rw-r--r--[-rwxr-xr-x]roles/openshift_health_checker/library/aos_version.py6
-rw-r--r--[-rwxr-xr-x]roles/openshift_health_checker/library/check_yum_update.py0
-rw-r--r--roles/openshift_health_checker/library/docker_info.py2
-rw-r--r--roles/openshift_health_checker/openshift_checks/__init__.py105
-rw-r--r--roles/openshift_health_checker/openshift_checks/disk_availability.py19
-rw-r--r--roles/openshift_health_checker/openshift_checks/docker_image_availability.py72
-rw-r--r--roles/openshift_health_checker/openshift_checks/docker_storage.py36
-rw-r--r--roles/openshift_health_checker/openshift_checks/etcd_imagedata_size.py29
-rw-r--r--roles/openshift_health_checker/openshift_checks/etcd_traffic.py19
-rw-r--r--roles/openshift_health_checker/openshift_checks/etcd_volume.py23
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/curator.py20
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py63
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/fluentd.py51
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/fluentd_config.py138
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/kibana.py41
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/logging.py40
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/logging_index_time.py28
-rw-r--r--roles/openshift_health_checker/openshift_checks/memory_availability.py21
-rw-r--r--roles/openshift_health_checker/openshift_checks/mixins.py25
-rw-r--r--roles/openshift_health_checker/openshift_checks/ovs_version.py50
-rw-r--r--roles/openshift_health_checker/openshift_checks/package_availability.py21
-rw-r--r--roles/openshift_health_checker/openshift_checks/package_update.py8
-rw-r--r--roles/openshift_health_checker/openshift_checks/package_version.py39
-rw-r--r--roles/openshift_health_checker/test/action_plugin_test.py17
-rw-r--r--roles/openshift_health_checker/test/disk_availability_test.py11
-rw-r--r--roles/openshift_health_checker/test/docker_image_availability_test.py37
-rw-r--r--roles/openshift_health_checker/test/docker_storage_test.py36
-rw-r--r--roles/openshift_health_checker/test/elasticsearch_test.py37
-rw-r--r--roles/openshift_health_checker/test/etcd_imagedata_size_test.py20
-rw-r--r--roles/openshift_health_checker/test/etcd_traffic_test.py16
-rw-r--r--roles/openshift_health_checker/test/etcd_volume_test.py9
-rw-r--r--roles/openshift_health_checker/test/fluentd_config_test.py357
-rw-r--r--roles/openshift_health_checker/test/fluentd_test.py14
-rw-r--r--roles/openshift_health_checker/test/kibana_test.py29
-rw-r--r--roles/openshift_health_checker/test/logging_check_test.py14
-rw-r--r--roles/openshift_health_checker/test/logging_index_time_test.py32
-rw-r--r--roles/openshift_health_checker/test/memory_availability_test.py8
-rw-r--r--roles/openshift_health_checker/test/mixins_test.py4
-rw-r--r--roles/openshift_health_checker/test/openshift_check_test.py43
-rw-r--r--roles/openshift_health_checker/test/ovs_version_test.py17
-rw-r--r--roles/openshift_health_checker/test/package_availability_test.py7
-rw-r--r--roles/openshift_health_checker/test/package_update_test.py5
-rw-r--r--roles/openshift_health_checker/test/package_version_test.py26
-rw-r--r--roles/openshift_hosted/defaults/main.yml2
-rw-r--r--roles/openshift_hosted/tasks/router/router.yml6
-rw-r--r--roles/openshift_loadbalancer/README.md2
-rw-r--r--roles/openshift_logging/README.md31
-rw-r--r--roles/openshift_logging/defaults/main.yml2
-rw-r--r--roles/openshift_logging/tasks/install_logging.yaml16
-rw-r--r--roles/openshift_logging/vars/openshift-enterprise.yml2
-rw-r--r--roles/openshift_logging_curator/tasks/main.yaml2
-rw-r--r--roles/openshift_logging_curator/templates/curator.j22
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/main.yaml6
-rw-r--r--roles/openshift_logging_elasticsearch/templates/es.j24
-rw-r--r--roles/openshift_logging_fluentd/defaults/main.yml3
-rw-r--r--roles/openshift_logging_fluentd/tasks/main.yaml8
-rw-r--r--roles/openshift_logging_fluentd/templates/fluentd.j217
-rw-r--r--roles/openshift_logging_fluentd/vars/main.yml1
-rw-r--r--roles/openshift_logging_kibana/tasks/main.yaml2
-rw-r--r--roles/openshift_logging_kibana/templates/kibana.j22
-rw-r--r--roles/openshift_logging_mux/defaults/main.yml18
-rw-r--r--roles/openshift_logging_mux/tasks/main.yaml14
-rw-r--r--roles/openshift_logging_mux/templates/mux.j218
-rw-r--r--roles/openshift_master/tasks/main.yml30
-rw-r--r--roles/openshift_master/tasks/systemd_units.yml6
-rw-r--r--roles/openshift_master/templates/atomic-openshift-master.j25
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j28
-rw-r--r--roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j25
-rw-r--r--roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j27
-rw-r--r--roles/openshift_master/vars/main.yml1
-rw-r--r--roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml1
-rw-r--r--roles/openshift_metrics/tasks/generate_rolebindings.yaml33
-rw-r--r--roles/openshift_metrics/tasks/generate_serviceaccounts.yaml12
-rw-r--r--roles/openshift_metrics/tasks/uninstall_metrics.yaml3
-rw-r--r--roles/openshift_metrics/templates/hawkular_metrics_role.j215
-rw-r--r--roles/openshift_metrics/templates/route.j22
-rw-r--r--roles/openshift_metrics/vars/openshift-enterprise.yml2
-rw-r--r--roles/openshift_node/defaults/main.yml2
-rw-r--r--roles/openshift_node/handlers/main.yml2
-rw-r--r--roles/openshift_node/meta/main.yml3
-rw-r--r--roles/openshift_node/tasks/config/configure-node-settings.yml16
-rw-r--r--roles/openshift_node/tasks/config/configure-proxy-settings.yml17
-rw-r--r--roles/openshift_node/tasks/config/install-node-deps-docker-service-file.yml8
-rw-r--r--roles/openshift_node/tasks/config/install-node-docker-service-file.yml8
-rw-r--r--roles/openshift_node/tasks/config/install-ovs-docker-service-file.yml8
-rw-r--r--roles/openshift_node/tasks/config/install-ovs-service-env-file.yml8
-rw-r--r--roles/openshift_node/tasks/config/workaround-bz1331590-ovs-oom-fix.yml13
-rw-r--r--roles/openshift_node/tasks/main.yml38
-rw-r--r--roles/openshift_node/tasks/systemd_units.yml85
-rw-r--r--roles/openshift_node/templates/node.service.j22
-rwxr-xr-xroles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh29
-rw-r--r--roles/openshift_node_facts/meta/main.yml15
-rw-r--r--roles/openshift_node_facts/tasks/main.yml34
-rw-r--r--roles/openshift_node_upgrade/handlers/main.yml10
-rw-r--r--roles/openshift_node_upgrade/tasks/config/configure-node-settings.yml16
-rw-r--r--roles/openshift_node_upgrade/tasks/config/configure-proxy-settings.yml17
-rw-r--r--roles/openshift_node_upgrade/tasks/config/install-node-deps-docker-service-file.yml8
-rw-r--r--roles/openshift_node_upgrade/tasks/config/install-node-docker-service-file.yml8
-rw-r--r--roles/openshift_node_upgrade/tasks/config/install-ovs-docker-service-file.yml8
-rw-r--r--roles/openshift_node_upgrade/tasks/config/install-ovs-service-env-file.yml8
-rw-r--r--roles/openshift_node_upgrade/tasks/config/workaround-bz1331590-ovs-oom-fix.yml13
-rw-r--r--roles/openshift_node_upgrade/tasks/main.yml4
-rw-r--r--roles/openshift_node_upgrade/tasks/systemd_units.yml81
-rw-r--r--roles/openshift_node_upgrade/templates/node.service.j22
-rw-r--r--roles/openshift_service_catalog/files/kubeservicecatalog_roles_bindings.yml6
-rw-r--r--roles/openshift_storage_glusterfs/defaults/main.yml2
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml2
-rw-r--r--utils/src/ooinstall/openshift_ansible.py2
169 files changed, 2233 insertions, 1115 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index 64a35a246..a667c3f2d 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.6.153-1 ./
+3.7.1-1 ./
diff --git a/.tito/releasers.conf b/.tito/releasers.conf
index b52e4fd87..17baaa1bd 100644
--- a/.tito/releasers.conf
+++ b/.tito/releasers.conf
@@ -37,6 +37,12 @@ releaser = tito.release.DistGitReleaser
branches = rhaos-3.6-rhel-7
srpm_disttag = .el7aos
+[aos-3.7]
+releaser = tito.release.DistGitReleaser
+branches = rhaos-3.7-rhel-7
+srpm_disttag = .el7aos
+
+
[copr-openshift-ansible]
releaser = tito.release.CoprReleaser
project_name = @OpenShiftOnlineOps/openshift-ansible
diff --git a/README.md b/README.md
index 1cf8d1156..71912fb98 100644
--- a/README.md
+++ b/README.md
@@ -39,7 +39,8 @@ Follow this release pattern and you can't go wrong:
| ------------- | ----------------- |
| 1.3 | 3.3 |
| 1.4 | 3.4 |
-| 1.*X* | 3.*X* |
+| 1.5 | 3.5 |
+| 3.*X* | 3.*X* |
If you're running from the openshift-ansible **master branch** we can
only guarantee compatibility with the newest origin releases **in
diff --git a/README_libvirt.md b/README_libvirt.md
index c523d83fb..1661681a0 100644
--- a/README_libvirt.md
+++ b/README_libvirt.md
@@ -15,7 +15,7 @@ Install dependencies
3. Install [ebtables](http://ebtables.netfilter.org/)
4. Install [qemu and qemu-system-x86](http://wiki.qemu.org/Main_Page)
5. Install [libvirt-python and libvirt](http://libvirt.org/)
-6. Install [genisoimage](http://cdrkit.org/)
+6. Install [genisoimage](http://cdrkit.org/) or [mkisofs](http://cdrtools.sourceforge.net/private/cdrecord.html)
7. Enable and start the libvirt daemon, e.g:
- `systemctl enable libvirtd`
- `systemctl start libvirtd`
@@ -23,6 +23,7 @@ Install dependencies
9. Check that your `$HOME` is accessible to the qemu user²
10. Configure dns resolution on the host³
11. Install libselinux-python
+12. Ensure you have an SSH private and public keypair at `~/.ssh/id_rsa` and `~/.ssh/id_rsa.pub`⁴
#### ¹ Depending on your distribution, libvirt access may be denied by default or may require a password at each access.
@@ -103,6 +104,11 @@ sudo vi /etc/NetworkManager/dnsmasq.d/libvirt_dnsmasq.conf
server=/example.com/192.168.55.1
```
+#### ⁴ Private and public keypair in ~/.ssh/id_rsa and ~/.ssh/id_rsa.pub
+
+This playbook uses SSH keys to communicate with the libvirt-driven virtual machines. At this time the names of those keys are fixed and cannot be changed.
+
+
Test The Setup
--------------
diff --git a/ansible.cfg b/ansible.cfg
index 14b77ba0f..589a58e9d 100644
--- a/ansible.cfg
+++ b/ansible.cfg
@@ -16,6 +16,13 @@ host_key_checking = False
retry_files_enabled = False
retry_files_save_path = ~/ansible-installer-retries
nocows = True
+remote_user = root
+roles_path = roles/
+gathering = smart
+fact_caching = jsonfile
+fact_caching_connection = $HOME/ansible/facts
+fact_caching_timeout = 600
+callback_whitelist = profile_tasks
# Uncomment to use the provided BYO inventory
#hostfile = inventory/byo/hosts
@@ -29,7 +36,8 @@ nocows = True
# Additional ssh options for OpenShift Ansible
[ssh_connection]
pipelining = True
+ssh_args = -o ControlMaster=auto -o ControlPersist=600s
+timeout = 10
# shorten the ControlPath which is often too long; when it is,
# ssh connection reuse silently fails, making everything slower.
control_path = %(directory)s/%%h-%%r
-
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py
index c6d0e69eb..36a90a870 100644
--- a/filter_plugins/oo_filters.py
+++ b/filter_plugins/oo_filters.py
@@ -194,10 +194,10 @@ def oo_select_keys_from_list(data, keys):
"""
if not isinstance(data, list):
- raise errors.AnsibleFilterError("|failed expects to filter on a list")
+ raise errors.AnsibleFilterError("|oo_select_keys_from_list failed expects to filter on a list")
if not isinstance(keys, list):
- raise errors.AnsibleFilterError("|failed expects first param is a list")
+ raise errors.AnsibleFilterError("|oo_select_keys_from_list failed expects first param is a list")
# Gather up the values for the list of keys passed in
retval = [oo_select_keys(item, keys) for item in data]
@@ -213,10 +213,10 @@ def oo_select_keys(data, keys):
"""
if not isinstance(data, Mapping):
- raise errors.AnsibleFilterError("|failed expects to filter on a dict or object")
+ raise errors.AnsibleFilterError("|oo_select_keys failed expects to filter on a dict or object")
if not isinstance(keys, list):
- raise errors.AnsibleFilterError("|failed expects first param is a list")
+ raise errors.AnsibleFilterError("|oo_select_keys failed expects first param is a list")
# Gather up the values for the list of keys passed in
retval = [data[key] for key in keys if key in data]
diff --git a/inventory/byo/hosts.byo.glusterfs.external.example b/inventory/byo/hosts.byo.glusterfs.external.example
index 628d3a3f7..5a284ce97 100644
--- a/inventory/byo/hosts.byo.glusterfs.external.example
+++ b/inventory/byo/hosts.byo.glusterfs.external.example
@@ -31,13 +31,13 @@ openshift_storage_glusterfs_is_native=False
openshift_storage_glusterfs_heketi_url=172.0.0.1
[masters]
-master node=True storage=True master=True
+master
[nodes]
-master node=True storage=True master=True openshift_schedulable=False
-node0 node=True openshift_schedulable=True
-node1 node=True openshift_schedulable=True
-node2 node=True openshift_schedulable=True
+master openshift_schedulable=False
+node0 openshift_schedulable=True
+node1 openshift_schedulable=True
+node2 openshift_schedulable=True
# Specify the glusterfs group, which contains the nodes of the external
# GlusterFS cluster. At a minimum, each node must have "glusterfs_hostname"
diff --git a/inventory/byo/hosts.byo.glusterfs.mixed.example b/inventory/byo/hosts.byo.glusterfs.mixed.example
index fd47cb9d5..d16df6470 100644
--- a/inventory/byo/hosts.byo.glusterfs.mixed.example
+++ b/inventory/byo/hosts.byo.glusterfs.mixed.example
@@ -34,13 +34,13 @@ openshift_storage_glusterfs_heketi_is_native=True
openshift_storage_glusterfs_heketi_executor=ssh
openshift_storage_glusterfs_heketi_ssh_keyfile=/root/id_rsa
[masters]
-master node=True storage=True master=True
+master
[nodes]
-master node=True storage=True master=True openshift_schedulable=False
-node0 node=True openshift_schedulable=True
-node1 node=True openshift_schedulable=True
-node2 node=True openshift_schedulable=True
+master openshift_schedulable=False
+node0 openshift_schedulable=True
+node1 openshift_schedulable=True
+node2 openshift_schedulable=True
# Specify the glusterfs group, which contains the nodes of the external
# GlusterFS cluster. At a minimum, each node must have "glusterfs_hostname"
diff --git a/inventory/byo/hosts.byo.glusterfs.native.example b/inventory/byo/hosts.byo.glusterfs.native.example
index a3e2570c9..c1a1f6f84 100644
--- a/inventory/byo/hosts.byo.glusterfs.native.example
+++ b/inventory/byo/hosts.byo.glusterfs.native.example
@@ -24,15 +24,15 @@ ansible_ssh_user=root
openshift_deployment_type=origin
[masters]
-master node=True storage=True master=True
+master
[nodes]
-master node=True storage=True master=True openshift_schedulable=False
+master openshift_schedulable=False
# A hosted registry, by default, will only be deployed on nodes labeled
# "region=infra".
-node0 node=True openshift_schedulable=True
-node1 node=True openshift_schedulable=True
-node2 node=True openshift_schedulable=True
+node0 openshift_schedulable=True
+node1 openshift_schedulable=True
+node2 openshift_schedulable=True
# Specify the glusterfs group, which contains the nodes that will host
# GlusterFS storage pods. At a minimum, each node must have a
diff --git a/inventory/byo/hosts.byo.glusterfs.registry-only.example b/inventory/byo/hosts.byo.glusterfs.registry-only.example
index 999518abe..31a85ee42 100644
--- a/inventory/byo/hosts.byo.glusterfs.registry-only.example
+++ b/inventory/byo/hosts.byo.glusterfs.registry-only.example
@@ -30,15 +30,15 @@ openshift_deployment_type=origin
openshift_hosted_registry_storage_kind=glusterfs
[masters]
-master node=True storage=True master=True
+master
[nodes]
-master node=True storage=True master=True openshift_schedulable=False
+master openshift_schedulable=False
# A hosted registry, by default, will only be deployed on nodes labeled
# "region=infra".
-node0 node=True openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
-node1 node=True openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
-node2 node=True openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
+node0 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
+node1 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
+node2 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
# Specify the glusterfs group, which contains the nodes that will host
# GlusterFS storage pods. At a minimum, each node must have a
diff --git a/inventory/byo/hosts.byo.glusterfs.storage-and-registry.example b/inventory/byo/hosts.byo.glusterfs.storage-and-registry.example
index 1df79301a..54bd89ddc 100644
--- a/inventory/byo/hosts.byo.glusterfs.storage-and-registry.example
+++ b/inventory/byo/hosts.byo.glusterfs.storage-and-registry.example
@@ -31,20 +31,20 @@ openshift_deployment_type=origin
openshift_hosted_registry_storage_kind=glusterfs
[masters]
-master node=True storage=True master=True
+master
[nodes]
-master node=True storage=True master=True openshift_schedulable=False
+master openshift_schedulable=False
# It is recommended to not use a single cluster for both general and registry
# storage, so two three-node clusters will be required.
-node0 node=True openshift_schedulable=True
-node1 node=True openshift_schedulable=True
-node2 node=True openshift_schedulable=True
+node0 openshift_schedulable=True
+node1 openshift_schedulable=True
+node2 openshift_schedulable=True
# A hosted registry, by default, will only be deployed on nodes labeled
# "region=infra".
-node3 node=True openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
-node4 node=True openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
-node5 node=True openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
+node3 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
+node4 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
+node5 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
# Specify the glusterfs group, which contains the nodes that will host
# GlusterFS storage pods. At a minimum, each node must have a
diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example
index de7493f71..f09c3d255 100644
--- a/inventory/byo/hosts.origin.example
+++ b/inventory/byo/hosts.origin.example
@@ -84,6 +84,13 @@ openshift_release=v3.6
# See: https://godoc.org/github.com/openshift/origin/pkg/cmd/server/api#ImagePolicyConfig
#openshift_master_image_policy_config={"maxImagesBulkImportedPerRepository": 3, "disableScheduledImport": true}
+# Configure master API rate limits for external clients
+#openshift_master_external_ratelimit_qps=200
+#openshift_master_external_ratelimit_burst=400
+# Configure master API rate limits for loopback clients
+#openshift_master_loopback_ratelimit_qps=300
+#openshift_master_loopback_ratelimit_burst=600
+
# Docker Configuration
# Add additional, insecure, and blocked registries to global docker configuration
# For enterprise deployment types we ensure that registry.access.redhat.com is
@@ -563,7 +570,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#
# StorageClass
# openshift_storageclass_name=gp2
-# openshift_storageclass_parameters={'type': 'gp2', 'encrypted': false}
+# openshift_storageclass_parameters={'type': 'gp2', 'encrypted': 'false'}
#
# Logging deployment
diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example
index 62a364e0d..c4b5da5b8 100644
--- a/inventory/byo/hosts.ose.example
+++ b/inventory/byo/hosts.ose.example
@@ -84,6 +84,13 @@ openshift_release=v3.6
# See: https://godoc.org/github.com/openshift/origin/pkg/cmd/server/api#ImagePolicyConfig
#openshift_master_image_policy_config={"maxImagesBulkImportedPerRepository": 3, "disableScheduledImport": true}
+# Configure master API rate limits for external clients
+#openshift_master_external_ratelimit_qps=200
+#openshift_master_external_ratelimit_burst=400
+# Configure master API rate limits for loopback clients
+#openshift_master_loopback_ratelimit_qps=300
+#openshift_master_loopback_ratelimit_burst=600
+
# Docker Configuration
# Add additional, insecure, and blocked registries to global docker configuration
# For enterprise deployment types we ensure that registry.access.redhat.com is
@@ -563,7 +570,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#
# StorageClass
# openshift_storageclass_name=gp2
-# openshift_storageclass_parameters={'type': 'gp2', 'encrypted': false}
+# openshift_storageclass_parameters={'type': 'gp2', 'encrypted': 'false'}
#
# Logging deployment
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index 3a59ab25b..9cadf5947 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -9,7 +9,7 @@
%global __requires_exclude ^/usr/bin/ansible-playbook$
Name: openshift-ansible
-Version: 3.6.153
+Version: 3.7.1
Release: 1%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
@@ -280,6 +280,109 @@ Atomic OpenShift Utilities includes
%changelog
+* Thu Jul 27 2017 Scott Dodson <sdodson@redhat.com> 3.7.1-1
+- Fix incorrect delegate_to in control plane upgrade (sdodson@redhat.com)
+- Follow the new naming conventions. (zhang.wanmin@zte.com.cn)
+- Simplify generation of /etc/origin/node/resolv.conf (sdodson@redhat.com)
+- Add glusterfs hosts to oo_all_hosts so that hosts set initial facts.
+ (abutcher@redhat.com)
+- Sync all openshift.common.use_openshift_sdn uses in yaml files
+ (jchaloup@redhat.com)
+- Fixing podpresets perms for service-catalog-controller (ewolinet@redhat.com)
+- Fixing route spec caCertificate to be correctly capitalized
+ (ewolinet@redhat.com)
+- Set TimeoutStartSec=300 (sdodson@redhat.com)
+- Revert "set KillMode to process in node service file" (sdodson@redhat.com)
+- openshift_checks: refactor to internalize task_vars (lmeyer@redhat.com)
+- openshift_checks: get rid of deprecated module_executor (lmeyer@redhat.com)
+- openshift_checks: improve comments/names (lmeyer@redhat.com)
+- add default value for router path in the cert (efreiber@redhat.com)
+- Router wildcard certificate created by default (efreiber@redhat.com)
+- Remove unsupported parameters from example inventory files.
+ (jarrpa@redhat.com)
+- Fix lint errors (sdodson@redhat.com)
+- Metrics: grant hawkular namespace listener role (mwringe@redhat.com)
+- Removing nolog from htpasswd invocation so not to supress errors
+ (ewolinet@redhat.com)
+- Removed kubernetes.io string from default. (kwoodson@redhat.com)
+- Allow storage migrations to be optional and/or non fatal (sdodson@redhat.com)
+- libvirt: fall back to mkisofs if genisoimage isn't available
+ (dcbw@redhat.com)
+- libvirt: add documentation about SSH keypair requirements (dcbw@redhat.com)
+- Updating how storage type is determined, adding bool filter in
+ openshift_logging_elasticsearch (ewolinet@redhat.com)
+- Pass the provisioner to the module. (kwoodson@redhat.com)
+- Use absolute path when unexcluding (Sergi Jimenez)
+- Fixes https://bugzilla.redhat.com/show_bug.cgi?id=1474246 (Sergi Jimenez)
+- Support enabling the centos-openshift-origin-testing repository
+ (dms@redhat.com)
+- 1472467- add ose- prefix to ansible service broker name (fabian@fabianism.us)
+- Updating openshift_logging_kibana default for kibana hostname
+ (ewolinet@redhat.com)
+- GlusterFS: Create registry storage svc and ep in registry namespace
+ (jarrpa@redhat.com)
+- Default an empty list for etcd_to_config if not there (tbielawa@redhat.com)
+- If proxy in effect, add etcd host IP addresses to NO_PROXY list on masters
+ (tbielawa@redhat.com)
+- GlusterFS: Pass all booleans through bool filter. (jarrpa@redhat.com)
+- GlusterFS: Fix bug in detecting whether to open firewall ports.
+ (jarrpa@redhat.com)
+- Pass first master's openshift_image_tag to openshift_loadbalancer for
+ containerized haproxy installation. (abutcher@redhat.com)
+- verify sane log times in logging stack (jvallejo@redhat.com)
+- Fix log dumping on service failure (sdodson@redhat.com)
+- Updating verbs for serviceclasses objects (ewolinet@redhat.com)
+- Fix broken link to Docker image instructions (rhcarvalho@gmail.com)
+- Added parameters inside of gce defaults. Pass all params to the module.
+ (kwoodson@redhat.com)
+- add etcd increased-traffic check (jvallejo@redhat.com)
+- Add etcd exports to openshift_storage_nfs (abutcher@redhat.com)
+- Hopefully finally fix the no_proxy settings (tbielawa@redhat.com)
+- openshift_checks/docker_storage: overlay/2 support (lmeyer@redhat.com)
+- Removing parameter kind and allowing default to be passed.
+ (kwoodson@redhat.com)
+- Remove openshift_use_dnsmasq from aws and libvirt playbooks
+ (sdodson@redhat.com)
+- 1471973- default to bootstrapping the broker on startup (fabian@fabianism.us)
+- image builds: remove dependency on playbook2image (jvallejo@redhat.com)
+- Setting node selector to be empty string (ewolinet@redhat.com)
+- Add drain retries after 60 second delay (sdodson@redhat.com)
+- Dump some logs (sdodson@redhat.com)
+- daemon_reload on node and ovs start (sdodson@redhat.com)
+- Ensure proper fact evaluation (sdodson@redhat.com)
+- Wrap additional service changes in retries (sdodson@redhat.com)
+- Wrap docker stop in retries (sdodson@redhat.com)
+- Add retries to node restart handlers (sdodson@redhat.com)
+- Test docker restart with retries 3 delay 30 (smilner@redhat.com)
+- Adding podpreset config into master-config (ewolinet@redhat.com)
+- Update image-gc-high-threshold value (decarr@redhat.com)
+- Adding a check for variable definition. (kwoodson@redhat.com)
+- docker: fix docker_selinux_enabled (lmeyer@redhat.com)
+- Changing cluster role to admin (rhallise@redhat.com)
+- drain still pending in below files without fix : (jkaur@redhat.com)
+- Fixed spacing and lint errors. (kwoodson@redhat.com)
+- Switch CI to ansible-2.3.1.0 (sdodson@redhat.com)
+- Allow OVS 2.7 in latest OpenShift releases (rhcarvalho@gmail.com)
+- Make aos_version module handle multiple versions (rhcarvalho@gmail.com)
+- Split positive and negative unit tests (rhcarvalho@gmail.com)
+- GlusterFS: Create in custom namespace by default (jarrpa@redhat.com)
+- hosted registry: Use proper node name in GlusterFS storage setup
+ (jarrpa@redhat.com)
+- GlusterFS: Make heketi-cli command configurable (jarrpa@redhat.com)
+- GlusterFS: Reintroduce heketi-cli check for non-native heketi
+ (jarrpa@redhat.com)
+- GlusterFS: Bug fixes for external GlusterFS nodes (jarrpa@redhat.com)
+- GlusterFS: Improve and extend example inventory files (jarrpa@redhat.com)
+- Fixed tests and added sleep for update. (kwoodson@redhat.com)
+- Fixing needs_update comparison. Added a small pause for race conditions.
+ Fixed doc. Fix kind to storageclass (kwoodson@redhat.com)
+- Adding storageclass support to lib_openshift. (kwoodson@redhat.com)
+- Add an SA policy to the ansible-service-broker (rhallise@redhat.com)
+- Import templates will fail if user is not system:admin (jkaur@redhat.com)
+- Additional optimization parameters for ansible.cfg (sejug@redhat.com)
+- Fix etcd conditional check failure (admin@webresource.nl)
+- Remove invalid when: from vars: (rteague@redhat.com)
+
* Tue Jul 18 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.153-1
- Updating to compare sets instead of sorted lists (ewolinet@redhat.com)
- Adding ability to create podpreset for service-catalog-controller for
diff --git a/playbooks/byo/openshift-cluster/cluster_hosts.yml b/playbooks/byo/openshift-cluster/cluster_hosts.yml
index 9d086b7b6..0db7ccf89 100644
--- a/playbooks/byo/openshift-cluster/cluster_hosts.yml
+++ b/playbooks/byo/openshift-cluster/cluster_hosts.yml
@@ -20,4 +20,5 @@ g_glusterfs_registry_hosts: "{{ groups.glusterfs_registry | default(g_glusterfs_
g_all_hosts: "{{ g_master_hosts | union(g_node_hosts) | union(g_etcd_hosts)
| union(g_lb_hosts) | union(g_nfs_hosts)
| union(g_new_node_hosts)| union(g_new_master_hosts)
+ | union(g_glusterfs_hosts) | union(g_glusterfs_registry_hosts)
| default([]) }}"
diff --git a/playbooks/common/openshift-checks/health.yml b/playbooks/common/openshift-checks/health.yml
index c7766ff04..7e83b4aa6 100644
--- a/playbooks/common/openshift-checks/health.yml
+++ b/playbooks/common/openshift-checks/health.yml
@@ -1,16 +1,13 @@
---
-# openshift_health_checker depends on openshift_version which now requires group eval.
- include: ../openshift-cluster/evaluate_groups.yml
- tags:
- - always
- name: Run OpenShift health checks
hosts: OSEv3
roles:
- openshift_health_checker
vars:
- - r_openshift_health_checker_playbook_context: "health"
+ - r_openshift_health_checker_playbook_context: health
post_tasks:
- - action: openshift_health_check # https://github.com/ansible/ansible/issues/20513
+ - action: openshift_health_check
args:
checks: ['@health']
diff --git a/playbooks/common/openshift-checks/pre-install.yml b/playbooks/common/openshift-checks/pre-install.yml
index 7ca9f7e8b..afd4f95e0 100644
--- a/playbooks/common/openshift-checks/pre-install.yml
+++ b/playbooks/common/openshift-checks/pre-install.yml
@@ -1,16 +1,13 @@
---
-# openshift_health_checker depends on openshift_version which now requires group eval.
- include: ../openshift-cluster/evaluate_groups.yml
- tags:
- - always
- hosts: OSEv3
name: run OpenShift pre-install checks
roles:
- openshift_health_checker
vars:
- - r_openshift_health_checker_playbook_context: "pre-install"
+ - r_openshift_health_checker_playbook_context: pre-install
post_tasks:
- - action: openshift_health_check # https://github.com/ansible/ansible/issues/20513
+ - action: openshift_health_check
args:
checks: ['@preflight']
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index 867e6b3d6..7136f1c1f 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -10,7 +10,7 @@
roles:
- openshift_health_checker
vars:
- - r_openshift_health_checker_playbook_context: "install"
+ - r_openshift_health_checker_playbook_context: install
post_tasks:
- action: openshift_health_check
args:
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index 695dc3140..6a0471948 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -157,7 +157,7 @@
- l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0
- openshift_upgrade_post_storage_migration_fatal | default(false,true) | bool
run_once: true
- delegate_to: oo_first_master
+ delegate_to: "{{ groups.oo_first_master.0 }}"
- set_fact:
master_update_complete: True
diff --git a/playbooks/common/openshift-etcd/migrate.yml b/playbooks/common/openshift-etcd/migrate.yml
index c655449fa..3e7a48669 100644
--- a/playbooks/common/openshift-etcd/migrate.yml
+++ b/playbooks/common/openshift-etcd/migrate.yml
@@ -17,6 +17,26 @@
tags:
- always
+- name: Prepare masters for etcd data migration
+ hosts: oo_masters_to_config
+ tasks:
+ - set_fact:
+ master_services:
+ - "{{ openshift.common.service_type + '-master' }}"
+ - set_fact:
+ master_services:
+ - "{{ openshift.common.service_type + '-master-controllers' }}"
+ - "{{ openshift.common.service_type + '-master-api' }}"
+ when:
+ - (openshift_master_cluster_method is defined and openshift_master_cluster_method == "native") or openshift.common.is_master_system_container | bool
+ - debug:
+ msg: "master service name: {{ master_services }}"
+ - name: Stop masters
+ service:
+ name: "{{ item }}"
+ state: stopped
+ with_items: "{{ master_services }}"
+
- name: Backup v2 data
hosts: oo_etcd_to_migrate
gather_facts: no
@@ -47,26 +67,6 @@
when:
- etcd_backup_failed | length > 0
-- name: Prepare masters for etcd data migration
- hosts: oo_masters_to_config
- tasks:
- - set_fact:
- master_services:
- - "{{ openshift.common.service_type + '-master' }}"
- - set_fact:
- master_services:
- - "{{ openshift.common.service_type + '-master-controllers' }}"
- - "{{ openshift.common.service_type + '-master-api' }}"
- when:
- - (openshift_master_cluster_method is defined and openshift_master_cluster_method == "native") or openshift.common.is_master_system_container | bool
- - debug:
- msg: "master service name: {{ master_services }}"
- - name: Stop masters
- service:
- name: "{{ item }}"
- state: stopped
- with_items: "{{ master_services }}"
-
- name: Migrate etcd data from v2 to v3
hosts: oo_etcd_to_migrate
gather_facts: no
diff --git a/playbooks/common/openshift-loadbalancer/config.yml b/playbooks/common/openshift-loadbalancer/config.yml
index c414913bf..2dacc1218 100644
--- a/playbooks/common/openshift-loadbalancer/config.yml
+++ b/playbooks/common/openshift-loadbalancer/config.yml
@@ -12,5 +12,6 @@
openshift_use_nuage | default(false),
nuage_mon_rest_server_port | default(none)))
+ openshift_loadbalancer_additional_backends | default([]) }}"
+ openshift_image_tag: "{{ hostvars[groups.oo_first_master.0].openshift_image_tag }}"
roles:
- role: openshift_loadbalancer
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index 7d3a371e3..b30450def 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -5,6 +5,19 @@
t_oo_option_master_debug_level: "{{ lookup('oo_option', 'openshift_master_debug_level') }}"
pre_tasks:
+ # Per https://bugzilla.redhat.com/show_bug.cgi?id=1469336
+ #
+ # When scaling up a cluster upgraded from OCP <= 3.5, ensure that
+ # OPENSHIFT_DEFAULT_REGISTRY is present as defined on the existing
+ # masters, or absent if such is the case.
+ - name: Detect if this host is a new master in a scale up
+ set_fact:
+ g_openshift_master_is_scaleup: "{{ openshift.common.hostname in ( groups['new_masters'] | default([]) ) }}"
+
+ - name: Scaleup Detection
+ debug:
+ var: g_openshift_master_is_scaleup
+
- name: Check for RPM generated config marker file .config_managed
stat:
path: /etc/origin/.config_managed
@@ -69,7 +82,7 @@
ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}"
master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}"
-- name: Inspect state of first master session secrets and config
+- name: Inspect state of first master config settings
hosts: oo_first_master
roles:
- role: openshift_facts
@@ -98,6 +111,42 @@
set_fact:
l_etcd3_enabled: "{{ etcd3_grep.rc == 0 | bool }}"
+ - name: Check if atomic-openshift-master sysconfig exists yet
+ stat:
+ path: /etc/sysconfig/atomic-openshift-master
+ register: l_aom_exists
+
+ - name: Preserve OPENSHIFT_DEFAULT_REGISTRY master parameter if present
+ command: awk '/^OPENSHIFT_DEFAULT_REGISTRY/' /etc/sysconfig/atomic-openshift-master
+ register: l_default_registry_defined
+ when: l_aom_exists.stat.exists | bool
+
+ - name: Check if atomic-openshift-master-api sysconfig exists yet
+ stat:
+ path: /etc/sysconfig/atomic-openshift-master-api
+ register: l_aom_api_exists
+
+ - name: Preserve OPENSHIFT_DEFAULT_REGISTRY master-api parameter if present
+ command: awk '/^OPENSHIFT_DEFAULT_REGISTRY/' /etc/sysconfig/atomic-openshift-master-api
+ register: l_default_registry_defined_api
+ when: l_aom_api_exists.stat.exists | bool
+
+ - name: Check if atomic-openshift-master-controllers sysconfig exists yet
+ stat:
+ path: /etc/sysconfig/atomic-openshift-master-controllers
+ register: l_aom_controllers_exists
+
+ - name: Preserve OPENSHIFT_DEFAULT_REGISTRY master-controllers parameter if present
+ command: awk '/^OPENSHIFT_DEFAULT_REGISTRY/' /etc/sysconfig/atomic-openshift-master-controllers
+ register: l_default_registry_defined_controllers
+ when: l_aom_controllers_exists.stat.exists | bool
+
+ - name: Update facts with OPENSHIFT_DEFAULT_REGISTRY value
+ set_fact:
+ l_default_registry_value: "{{ l_default_registry_defined.stdout | default('') }}"
+ l_default_registry_value_api: "{{ l_default_registry_defined_api.stdout | default('') }}"
+ l_default_registry_value_controllers: "{{ l_default_registry_defined_controllers.stdout | default('') }}"
+
- name: Generate master session secrets
hosts: oo_first_master
vars:
@@ -127,6 +176,9 @@
| union(groups['oo_etcd_to_config'] | default([])))
| oo_collect('openshift.common.hostname') | default([]) | join (',')
}}"
+ openshift_no_proxy_etcd_host_ips: "{{ hostvars | oo_select_keys(groups['oo_etcd_to_config'] | default([]))
+ | oo_collect('openshift.common.ip') | default([]) | join(',')
+ }}"
roles:
- role: openshift_master
openshift_ca_host: "{{ groups.oo_first_master.0 }}"
@@ -142,6 +194,10 @@
etcd_cert_prefix: "master.etcd-"
r_openshift_master_clean_install: "{{ hostvars[groups.oo_first_master.0].l_clean_install }}"
r_openshift_master_etcd3_storage: "{{ hostvars[groups.oo_first_master.0].l_etcd3_enabled }}"
+ openshift_master_is_scaleup_host: "{{ g_openshift_master_is_scaleup | default(false) }}"
+ openshift_master_default_registry_value: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value }}"
+ openshift_master_default_registry_value_api: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value_api }}"
+ openshift_master_default_registry_value_controllers: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value_controllers }}"
- role: nuage_master
when: openshift.common.use_nuage | bool
- role: calico_master
diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml
index acebabc91..ef7d54f9f 100644
--- a/playbooks/common/openshift-node/config.yml
+++ b/playbooks/common/openshift-node/config.yml
@@ -1,25 +1,4 @@
---
-- name: Gather and set facts for node hosts
- hosts: oo_nodes_to_config
- vars:
- t_oo_option_node_debug_level: "{{ lookup('oo_option', 'openshift_node_debug_level') }}"
- pre_tasks:
- - set_fact:
- openshift_node_debug_level: "{{ t_oo_option_node_debug_level }}"
- when: openshift_node_debug_level is not defined and t_oo_option_node_debug_level != ""
- roles:
- - openshift_facts
- tasks:
- # Since the master is generating the node certificates before they are
- # configured, we need to make sure to set the node properties beforehand if
- # we do not want the defaults
- - openshift_facts:
- role: node
- local_facts:
- labels: "{{ openshift_node_labels | default(None) }}"
- annotations: "{{ openshift_node_annotations | default(None) }}"
- schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}"
-
- name: Evaluate node groups
hosts: localhost
become: no
@@ -32,7 +11,11 @@
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ groups.oo_nodes_to_config | default([]) }}"
- when: hostvars[item].openshift is defined and hostvars[item].openshift.common is defined and hostvars[item].openshift.common.is_containerized | bool and (item in groups.oo_nodes_to_config and item in groups.oo_masters_to_config)
+ when:
+ - hostvars[item].openshift is defined
+ - hostvars[item].openshift.common is defined
+ - hostvars[item].openshift.common.is_containerized | bool
+ - (item in groups.oo_nodes_to_config and item in groups.oo_masters_to_config)
changed_when: False
- name: Configure containerized nodes
@@ -47,8 +30,7 @@
| union(groups['oo_etcd_to_config'] | default([])))
| oo_collect('openshift.common.hostname') | default([]) | join (',')
}}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
roles:
- role: openshift_node
openshift_ca_host: "{{ groups.oo_first_master.0 }}"
@@ -64,8 +46,6 @@
| union(groups['oo_etcd_to_config'] | default([])))
| oo_collect('openshift.common.hostname') | default([]) | join (',')
}}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
roles:
- role: openshift_node
openshift_ca_host: "{{ groups.oo_first_master.0 }}"
diff --git a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
index ccd29be29..4df86effa 100644
--- a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
@@ -49,11 +49,15 @@
- '{{ instances }}'
- [ user-data, meta-data ]
+- name: Check for genisoimage
+ command: which genisoimage
+ register: which_genisoimage
+
- name: Create the cloud-init config drive
- command: 'genisoimage -output {{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso -volid cidata -joliet -rock user-data meta-data'
+ command: "{{ 'genisoimage' if which_genisoimage.rc == 0 else 'mkisofs' }} -output {{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso -volid cidata -joliet -rock user-data meta-data"
args:
- chdir: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/'
- creates: '{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'
+ chdir: "{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/"
+ creates: "{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso"
with_items: '{{ instances }}'
- name: Refresh the libvirt storage pool for openshift
diff --git a/roles/contiv/defaults/main.yml b/roles/contiv/defaults/main.yml
index 8c4d19537..b5d2f7c6e 100644
--- a/roles/contiv/defaults/main.yml
+++ b/roles/contiv/defaults/main.yml
@@ -1,6 +1,6 @@
---
# The version of Contiv binaries to use
-contiv_version: 1.0.1
+contiv_version: 1.1.1
# The version of cni binaries
cni_version: v0.4.0
diff --git a/roles/contiv/meta/main.yml b/roles/contiv/meta/main.yml
index da6409f1e..a2c2f98a7 100644
--- a/roles/contiv/meta/main.yml
+++ b/roles/contiv/meta/main.yml
@@ -27,4 +27,4 @@ dependencies:
etcd_peer_url_scheme: http
when: contiv_role == "netmaster"
- role: contiv_auth_proxy
- when: (contiv_role == "netmaster") and (contiv_enable_auth_proxy == true)
+ when: contiv_role == "netmaster"
diff --git a/roles/contiv/tasks/netmaster.yml b/roles/contiv/tasks/netmaster.yml
index acaf7386e..cc52d3a43 100644
--- a/roles/contiv/tasks/netmaster.yml
+++ b/roles/contiv/tasks/netmaster.yml
@@ -41,6 +41,18 @@
mode: 0644
notify: restart netmaster
+- name: Netmaster | Ensure contiv_config_dir exists
+ file:
+ path: "{{ contiv_config_dir }}"
+ recurse: yes
+ state: directory
+
+- name: Netmaster | Setup contiv.json config for the cni plugin
+ template:
+ src: contiv.cfg.master.j2
+ dest: "{{ contiv_config_dir }}/contiv.json"
+ notify: restart netmaster
+
- name: Netmaster | Copy systemd units for netmaster
template:
src: netmaster.service
diff --git a/roles/contiv/tasks/netmaster_iptables.yml b/roles/contiv/tasks/netmaster_iptables.yml
index 2d0fb95ae..07bb16ea7 100644
--- a/roles/contiv/tasks/netmaster_iptables.yml
+++ b/roles/contiv/tasks/netmaster_iptables.yml
@@ -2,7 +2,7 @@
- name: Netmaster IPtables | Get iptables rules
command: iptables -L --wait
register: iptablesrules
- always_run: yes
+ check_mode: no
- name: Netmaster IPtables | Enable iptables at boot
service:
diff --git a/roles/contiv/tasks/netplugin_iptables.yml b/roles/contiv/tasks/netplugin_iptables.yml
index 184c595c5..3ea34645d 100644
--- a/roles/contiv/tasks/netplugin_iptables.yml
+++ b/roles/contiv/tasks/netplugin_iptables.yml
@@ -2,7 +2,7 @@
- name: Netplugin IPtables | Get iptables rules
command: iptables -L --wait
register: iptablesrules
- always_run: yes
+ check_mode: no
- name: Netplugin IPtables | Enable iptables at boot
service:
diff --git a/roles/contiv/templates/contiv.cfg.j2 b/roles/contiv/templates/contiv.cfg.j2
index 2c9a666a9..f0e99c556 100644
--- a/roles/contiv/templates/contiv.cfg.j2
+++ b/roles/contiv/templates/contiv.cfg.j2
@@ -2,5 +2,6 @@
"K8S_API_SERVER": "https://{{ hostvars[groups['masters'][0]]['ansible_' + netmaster_interface].ipv4.address }}:{{ kube_master_api_port }}",
"K8S_CA": "{{ openshift.common.config_base }}/node/ca.crt",
"K8S_KEY": "{{ openshift.common.config_base }}/node/system:node:{{ openshift.common.hostname }}.key",
- "K8S_CERT": "{{ openshift.common.config_base }}/node/system:node:{{ openshift.common.hostname }}.crt"
+ "K8S_CERT": "{{ openshift.common.config_base }}/node/system:node:{{ openshift.common.hostname }}.crt",
+ "SVC_SUBNET": "172.30.0.0/16"
}
diff --git a/roles/contiv/templates/contiv.cfg.master.j2 b/roles/contiv/templates/contiv.cfg.master.j2
new file mode 100644
index 000000000..fac8e3c4c
--- /dev/null
+++ b/roles/contiv/templates/contiv.cfg.master.j2
@@ -0,0 +1,7 @@
+{
+ "K8S_API_SERVER": "https://{{ hostvars[groups['masters'][0]]['ansible_' + netmaster_interface].ipv4.address }}:{{ kube_master_api_port }}",
+ "K8S_CA": "{{ openshift.common.config_base }}/master/ca.crt",
+ "K8S_KEY": "{{ openshift.common.config_base }}/master/system:node:{{ openshift.common.hostname }}.key",
+ "K8S_CERT": "{{ openshift.common.config_base }}/master/system:node:{{ openshift.common.hostname }}.crt",
+ "SVC_SUBNET": "172.30.0.0/16"
+}
diff --git a/roles/contiv/templates/netmaster.service b/roles/contiv/templates/netmaster.service
index 21c0380be..a602c955e 100644
--- a/roles/contiv/templates/netmaster.service
+++ b/roles/contiv/templates/netmaster.service
@@ -6,3 +6,5 @@ After=auditd.service systemd-user-sessions.service contiv-etcd.service
EnvironmentFile=/etc/default/netmaster
ExecStart={{ bin_dir }}/netmaster $NETMASTER_ARGS
KillMode=control-group
+Restart=on-failure
+RestartSec=10
diff --git a/roles/contiv_auth_proxy/defaults/main.yml b/roles/contiv_auth_proxy/defaults/main.yml
index 4e637a947..e1d904c6a 100644
--- a/roles/contiv_auth_proxy/defaults/main.yml
+++ b/roles/contiv_auth_proxy/defaults/main.yml
@@ -1,11 +1,12 @@
---
-auth_proxy_image: "contiv/auth_proxy:1.0.0-beta.2"
+auth_proxy_image: "contiv/auth_proxy:1.1.1"
auth_proxy_port: 10000
contiv_certs: "/var/contiv/certs"
-cluster_store: "{{ hostvars[groups['masters'][0]]['ansible_' + netmaster_interface].ipv4.address }}:22379"
+cluster_store: "etcd://{{ hostvars[groups['masters'][0]]['ansible_' + netmaster_interface].ipv4.address }}:22379"
auth_proxy_cert: "{{ contiv_certs }}/auth_proxy_cert.pem"
auth_proxy_key: "{{ contiv_certs }}/auth_proxy_key.pem"
auth_proxy_datastore: "{{ cluster_store }}"
auth_proxy_binaries: "/var/contiv_cache"
auth_proxy_local_install: False
auth_proxy_rule_comment: "Contiv auth proxy service"
+service_vip: "{{ hostvars[groups['masters'][0]]['ansible_' + netmaster_interface].ipv4.address }}"
diff --git a/roles/contiv_auth_proxy/files/cert.pem b/roles/contiv_auth_proxy/files/cert.pem
new file mode 100644
index 000000000..63df4603f
--- /dev/null
+++ b/roles/contiv_auth_proxy/files/cert.pem
@@ -0,0 +1,33 @@
+-----BEGIN CERTIFICATE-----
+MIIFuTCCA6GgAwIBAgIJAOFyylO2zW2EMA0GCSqGSIb3DQEBCwUAMHMxCzAJBgNV
+BAYTAlVTMQswCQYDVQQIDAJDQTERMA8GA1UEBwwIU2FuIEpvc2UxDTALBgNVBAoM
+BENQU0cxFjAUBgNVBAsMDUlUIERlcGFydG1lbnQxHTAbBgNVBAMMFGF1dGgtbG9j
+YWwuY2lzY28uY29tMB4XDTE3MDcxMzE5NDYwMVoXDTI3MDcxMTE5NDYwMVowczEL
+MAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMREwDwYDVQQHDAhTYW4gSm9zZTENMAsG
+A1UECgwEQ1BTRzEWMBQGA1UECwwNSVQgRGVwYXJ0bWVudDEdMBsGA1UEAwwUYXV0
+aC1sb2NhbC5jaXNjby5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC
+AQDKCg26dvsD1u3f1lCaLlVptyTyGyanaJ73mlHiUnAMcu0A/p3kzluTeQLZJxtl
+MToM7rT/lun6fbhQC+7TQep9mufBzLhssyzRnT9rnGSeGwN66mO/rlYPZc5C1D7p
+7QZh1uLznzgOA2zMkgnI+n6LB2TZWg+XLhZZIr5SVYE18lj0tnwq3R1uznVv9t06
+grUYK2K7x0Y3Pt2e6yV0e1w2FOGH+7v3mm0c8r1+7U+4EZ2SM3fdG7nyTL/187gl
+yE8X4HOnAyYGbAnULJC02LR/DTQpv/RpLN/YJEpHZWApHZCKh+fbFdIhRRwEnT4L
+DLy3GJVFDEsmFaC91wf24+HAeUl9/hRIbxo9x/7kXmrhMlK38x2oo3cPh0XZxHje
+XmJUGG1OByAuIZaGFwS9lUuGTNvpN8P/v3HN/nORc0RE3fvoXIv4nuhaEfuo32q4
+dvO4aNjmxjz1JcUEx6DiMQe4ECaReYdvI+j9ZkUJj/e89iLsQ8gz5t3FTM+tmBi1
+hrRBAgWyRY5DKECVv2SNFiX55JQGA5vQDGw51qTTuhntfBhkHvhKL7V1FRZazx6N
+wqFyynig/jplb1ZNdKZ9ZxngZr6qHIx4RcGaJ9HdVhik7NyUCiHjWeGagzun2Omq
+FFXAD9Hmfctac5bGxx0FBi95kO8bd8b0GSIh2CWanETjawIDAQABo1AwTjAdBgNV
+HQ4EFgQU5P1g5gFZot//iwEV98MwW2YXzEMwHwYDVR0jBBgwFoAU5P1g5gFZot//
+iwEV98MwW2YXzEMwDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAgEAbWgN
+BkFzzG5sbG7vUb23Ggv/0TCCuMtuKBGOBR0EW5Ssw6Aml7j3AGiy/1+2sdrQMsx2
+nVpexyQW5XS/X+8JjH7H7ifvwl3bVJ8xiR/9ioIJovrQojxQO0cUB2Lljj3bPd/R
+/tddAhPj0uN9N7UAejA12kXGa0Rrzb2U1rIpO9jnTbQYJiTOSzFiiGRMZWx3hfsW
+SDTpPmsV2Mh+jcmuxvPITl0s+vtqsm7SYoUZHwJ80LvrPbmk/5hTZGRsI3W5jipB
+PpOxvBnAWnQH3miMhty2TDaQ9JjYUwnxjFFZvNIYtp8+eH4nlbSldbgZoUeAe8It
+X6SsP8gT/uQh3TPvzNIfYROA7qTwoOQ8ZW8ssai/EttHAztFxketgNEfjwUTz8EJ
+yKeyAJ7qk3zD5k7p33ZNLWjmN0Awx3fCE9OQmNUyNX7PpYb4i+tHWu3h6Clw0RUf
+0gb1I+iyB3PXmpiYtxdMxGSi9CQIyWHzC4bsTQZkrzzIHWFSwewhUWOQ2Wko0hrv
+DnkS5k0cMPn5aNxw56H6OI+6hb+y/GGkTxNY9Gbxypx6lgZson0EY80EPZOJAORM
+XggJtTjiMpzvKh18DZY/Phmdh0C2tt8KYFdG83qLEhya9WZujbLAm38vIziFHbdX
+jOitXBSPyVrV3JvsCVksp+YC8Lnv3FsM494R4kA=
+-----END CERTIFICATE-----
diff --git a/roles/contiv_auth_proxy/files/key.pem b/roles/contiv_auth_proxy/files/key.pem
new file mode 100644
index 000000000..7224e569c
--- /dev/null
+++ b/roles/contiv_auth_proxy/files/key.pem
@@ -0,0 +1,51 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIJKQIBAAKCAgEAygoNunb7A9bt39ZQmi5Vabck8hsmp2ie95pR4lJwDHLtAP6d
+5M5bk3kC2ScbZTE6DO60/5bp+n24UAvu00HqfZrnwcy4bLMs0Z0/a5xknhsDeupj
+v65WD2XOQtQ+6e0GYdbi8584DgNszJIJyPp+iwdk2VoPly4WWSK+UlWBNfJY9LZ8
+Kt0dbs51b/bdOoK1GCtiu8dGNz7dnusldHtcNhThh/u795ptHPK9fu1PuBGdkjN3
+3Ru58ky/9fO4JchPF+BzpwMmBmwJ1CyQtNi0fw00Kb/0aSzf2CRKR2VgKR2Qiofn
+2xXSIUUcBJ0+Cwy8txiVRQxLJhWgvdcH9uPhwHlJff4USG8aPcf+5F5q4TJSt/Md
+qKN3D4dF2cR43l5iVBhtTgcgLiGWhhcEvZVLhkzb6TfD/79xzf5zkXNERN376FyL
++J7oWhH7qN9quHbzuGjY5sY89SXFBMeg4jEHuBAmkXmHbyPo/WZFCY/3vPYi7EPI
+M+bdxUzPrZgYtYa0QQIFskWOQyhAlb9kjRYl+eSUBgOb0AxsOdak07oZ7XwYZB74
+Si+1dRUWWs8ejcKhcsp4oP46ZW9WTXSmfWcZ4Ga+qhyMeEXBmifR3VYYpOzclAoh
+41nhmoM7p9jpqhRVwA/R5n3LWnOWxscdBQYveZDvG3fG9BkiIdglmpxE42sCAwEA
+AQKCAgANVU6EoLd+EGAQZo9ZLXebi2eXxqztXV0oT/nZasFUQP1dFHCNGgU3HURP
+2mHXcsE2+0XcnDQCwOs59R+kt3PnKCLlSkJdghGSH8OAsYh+WqAHK5K7oqCxUXGk
+PWeNfoPuTwUZOMe1PQqgEX8t0UIqoKlKIsRmoLb+2Okge94UFlNCiwx0s7TujBd5
+9Ruycc/LsYlJhSQgHzj29OO65S03sHcVx0onU/yhbW+OAdFB/3+bl2PwppTF5cTB
+UX00mRyHIdvgCLgoslaPtwUxuh9nRxLLMozJqBl5pSN1xL3s2LOiQMfPUIhWg74O
+m+XtSsDlgGzRardG4ySBgsBWzcEnGWi5/xyc/6dtERzR382+CLUfOEoucGJHk6kj
+RdbVx5FCawpAzjs9Wo49Vr+WQceSiBfb2+ndNUTiD0wu7xLEVPcYC6CMk71qZv5H
+0qGlLhtkHF0nSQytbwqwfMz2SGDfkwIHgQ0gTKMpEMWK79E24ewE1BnMiaKC1bgk
+evB6WM1YZFMKS5L7fshJcbeMe9dhSF3s+Y0MYVv5MCL1VMZyIzAcj8mkPYZyBRUk
+MC87GnaebeTvHNtimvqCuWDGVI1SOoc1xtopkxinTqtIYGuQacrSmfyf9D3Rg4+l
+kB0ibtJV+HLP94q266aef/PdpXszs7zo0h6skpLItW/jAuSNuQKCAQEA/VdXpMi8
+nfOtXwOZlGA2+jShYyHyCl2TKgbpfDGl1yKNkbBrIu2/PEl1DpmzSeG1tdNCzN68
+4vEjpF/jBsdSJj4BDiRY6HEcURXpw4yTZ7oCnUCbzadLIo3wX/gFDEVZz+0nQQ29
+5x0XGuQnJXC2fe/CyrkfltKhFSYoTSjtMbma4Pm3Q3HP3wGOvoUKtKNDO5rF26Qh
+YtqJgJSKBAms0wKiy9VVTa6DaXrtSnXTR+Ltud3xnWBrX1Z+idwxYt/Be5W2woHf
+M5zPIqMUgry5ujtRxhLmleFXDAYbaIQR9AZXlSS3w+9Gcl5EDRkFXqlaoCfppwTR
+wakj2lNjbAidPwKCAQEAzCjgko4/Yss/0dCs8ySKd2IaRF93OwC/E2SHVqe5bATh
+rVmDn/KIH4J2fI4FiaIHELT1CU5vmganYbK2k7CoJztjJltM1B7rkpHiVSL+qMqn
+yBZFg3LFq9eiBPZHyQEc+HMJUhFRexjdeqLH78HCoPz1QnKo2xRoGHhSQ/Rh6lXo
+20tldL9HrSxPRmwxnyLgWGcWopv/92JNxu6FgnZcnsVjkpO2mriLD7+Ty5qfvkwc
+RFDBYnq2JjBcvqngrzDIGDzC7hTA5BRuuQdNMZggJwO6nKdZDUrq5NIo9B07FLj1
+IRMVm7D1vJYzYI6HW7Wj4vNRXMY8jG1fwvNG0+xy1QKCAQEA7m14R9bAZWuDnGt3
+7APNWheUWAcHk6fTq/cLYV4cdWfIkvfVLO9STrvXliEjcoIhkPk94jAy1ucZo0a3
+FJccgm9ScOvWXRSvEMUt12ODC1ktwq+esqMi/GdXdgqnPZA7YYwRqJD1TAC90Qou
+qXb12Xp/+mjWCQ08mvnpbgz5hxXmZJvAVZJUj84YeMgfdjg9O2iDlB5ZaX7BcCjb
+58bvRzww2ONzQAPhG7Gch7pyWTKCh64RCgtHold2CesY87QglV4mvdKarSmEbFXN
+JOnXZiUT5fW93AtS8DcDLo81klMxtGT1KksUIukC5MzKl/eNGjPWG+FWRAwaeQyI
+ApHs4wKCAQAI10RSVGKeTprm5Rh4Nv7gCJmGmHO7VF7x4gqSUBURfmyfax7uEDyg
+0K982VGYEjIoIQ3zZzgh/WPGMU0CvEWr3UB/6rg6/1PINxUMBsXsXUpCueQsuw2g
+UWgsutWE+M1eXOzsZt+Waw88PkxWL5fUDOA6DmkNg6a2WI+Hbc/HrAy3Yl50Xcwm
+zaJpNEo5z/LTITOzuvmsps8jbDTP33xHS9jyAf+IV7F97xfhW0LLpNQciTq2nwXA
+RZvejdCzBXPEyOzQDooD1natAInxOds6lUjBe+W5U6M0YX1whMuILDJBSmhHI7Sg
+hAiZh9KIwCbmrw6468S3eA0LjillB/o5AoIBAQCg93syT50nYF2UWWP/rEa7qf6h
++YpBPpJskIl3NDMJtie9OcdsoFpjblpFbsMqsSag9KhGl7wn4f8qXO0HERSb8oYd
+1Zu6BgUCuRXuAKNI4f508IooNpXx9y7xxl4giFBnDPa6W3KWqZ2LMDt92htMd/Zm
+qvoyYZhFhMSyKFzPDAFdsZijJgahqJRKhHeW9BsPqho5i7Ys+PhE8e/vUZs2zUeS
+QEHWhVisDTNKOoJIdz7JXFgEXCPTLAxXIIhYSkIfQxHxsWjt0vs79tzUkV8NlpKt
+d7s0iyHnD6kDvoxYOSI9YmSEnnFBFdgeiD+/VD+7enOdqb5MHsjuw+by09ft
+-----END RSA PRIVATE KEY-----
diff --git a/roles/contiv_auth_proxy/templates/auth_proxy.j2 b/roles/contiv_auth_proxy/templates/auth_proxy.j2
index e82e5b4ab..0ab8c831b 100644
--- a/roles/contiv_auth_proxy/templates/auth_proxy.j2
+++ b/roles/contiv_auth_proxy/templates/auth_proxy.j2
@@ -14,7 +14,7 @@ start)
-p 10000:{{ auth_proxy_port }} \
--net=host --name=auth-proxy \
-e NO_NETMASTER_STARTUP_CHECK=1 \
- -v /var/contiv:/var/contiv \
+ -v /var/contiv:/var/contiv:z \
{{ auth_proxy_image }} \
--tls-key-file={{ auth_proxy_key }} \
--tls-certificate={{ auth_proxy_cert }} \
diff --git a/roles/contiv_facts/tasks/main.yml b/roles/contiv_facts/tasks/main.yml
index 926e0e0be..7a4972fca 100644
--- a/roles/contiv_facts/tasks/main.yml
+++ b/roles/contiv_facts/tasks/main.yml
@@ -3,7 +3,7 @@
stat: path=/run/ostree-booted
register: s
changed_when: false
- always_run: yes
+ check_mode: no
- name: Init the is_atomic fact
set_fact:
@@ -17,7 +17,7 @@
- name: Determine if CoreOS
raw: "grep '^NAME=' /etc/os-release | sed s'/NAME=//'"
register: distro
- always_run: yes
+ check_mode: no
- name: Init the is_coreos fact
set_fact:
@@ -61,7 +61,7 @@
stat: path=/usr/bin/rpm
register: s
changed_when: false
- always_run: yes
+ check_mode: no
- name: Init the has_rpm fact
set_fact:
diff --git a/roles/contiv_facts/tasks/rpm.yml b/roles/contiv_facts/tasks/rpm.yml
index d2f66dac5..07401a6dd 100644
--- a/roles/contiv_facts/tasks/rpm.yml
+++ b/roles/contiv_facts/tasks/rpm.yml
@@ -4,7 +4,7 @@
register: s
changed_when: false
failed_when: false
- always_run: yes
+ check_mode: no
- name: Set the has_firewalld fact
set_fact:
@@ -16,7 +16,7 @@
register: s
changed_when: false
failed_when: false
- always_run: yes
+ check_mode: no
- name: Set the has_iptables fact
set_fact:
diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml
index f0661209f..8c2f392ee 100644
--- a/roles/etcd/tasks/main.yml
+++ b/roles/etcd/tasks/main.yml
@@ -14,7 +14,8 @@
name: etcd_common
vars:
r_etcd_common_action: drop_etcdctl
- when: openshift_etcd_etcdctl_profile | default(true) | bool
+ when:
+ - openshift_etcd_etcdctl_profile | default(true) | bool
- block:
- name: Pull etcd container
diff --git a/roles/etcd_migrate/tasks/check.yml b/roles/etcd_migrate/tasks/check.yml
index 800073873..b66696b55 100644
--- a/roles/etcd_migrate/tasks/check.yml
+++ b/roles/etcd_migrate/tasks/check.yml
@@ -1,4 +1,8 @@
---
+- fail:
+ msg: "Currently etcd v3 migration is unsupported while we test it more thoroughly"
+ when: not openshift_enable_unsupported_configurations | default(false) | bool
+
# Check the cluster is healthy
- include: check_cluster_health.yml
diff --git a/roles/lib_openshift/library/oc_adm_policy_group.py b/roles/lib_openshift/library/oc_adm_policy_group.py
index 221ef5094..7154fd839 100644
--- a/roles/lib_openshift/library/oc_adm_policy_group.py
+++ b/roles/lib_openshift/library/oc_adm_policy_group.py
@@ -1959,28 +1959,28 @@ class PolicyGroup(OpenShiftCLI):
self.verbose = verbose
self._rolebinding = None
self._scc = None
- self._cluster_policy_bindings = None
- self._policy_bindings = None
+ self._cluster_role_bindings = None
+ self._role_bindings = None
@property
- def policybindings(self):
- if self._policy_bindings is None:
- results = self._get('clusterpolicybindings', None)
+ def rolebindings(self):
+ if self._role_bindings is None:
+ results = self._get('rolebindings', None)
if results['returncode'] != 0:
- raise OpenShiftCLIError('Could not retrieve policybindings')
- self._policy_bindings = results['results'][0]['items'][0]
+ raise OpenShiftCLIError('Could not retrieve rolebindings')
+ self._role_bindings = results['results'][0]['items']
- return self._policy_bindings
+ return self._role_bindings
@property
- def clusterpolicybindings(self):
- if self._cluster_policy_bindings is None:
- results = self._get('clusterpolicybindings', None)
+ def clusterrolebindings(self):
+ if self._cluster_role_bindings is None:
+ results = self._get('clusterrolebindings', None)
if results['returncode'] != 0:
- raise OpenShiftCLIError('Could not retrieve clusterpolicybindings')
- self._cluster_policy_bindings = results['results'][0]['items'][0]
+ raise OpenShiftCLIError('Could not retrieve clusterrolebindings')
+ self._cluster_role_bindings = results['results'][0]['items']
- return self._cluster_policy_bindings
+ return self._cluster_role_bindings
@property
def role_binding(self):
@@ -2023,18 +2023,17 @@ class PolicyGroup(OpenShiftCLI):
''' return whether role_binding exists '''
bindings = None
if self.config.config_options['resource_kind']['value'] == 'cluster-role':
- bindings = self.clusterpolicybindings
+ bindings = self.clusterrolebindings
else:
- bindings = self.policybindings
+ bindings = self.rolebindings
if bindings is None:
return False
- for binding in bindings['roleBindings']:
- _rb = binding['roleBinding']
- if _rb['roleRef']['name'] == self.config.config_options['name']['value'] and \
- _rb['groupNames'] is not None and \
- self.config.config_options['group']['value'] in _rb['groupNames']:
+ for binding in bindings:
+ if binding['roleRef']['name'] == self.config.config_options['name']['value'] and \
+ binding['groupNames'] is not None and \
+ self.config.config_options['group']['value'] in binding['groupNames']:
self.role_binding = binding
return True
diff --git a/roles/lib_openshift/library/oc_adm_policy_user.py b/roles/lib_openshift/library/oc_adm_policy_user.py
index 071562875..3fcf49799 100644
--- a/roles/lib_openshift/library/oc_adm_policy_user.py
+++ b/roles/lib_openshift/library/oc_adm_policy_user.py
@@ -1950,36 +1950,36 @@ class PolicyUser(OpenShiftCLI):
''' Class to handle attaching policies to users '''
def __init__(self,
- policy_config,
+ config,
verbose=False):
''' Constructor for PolicyUser '''
- super(PolicyUser, self).__init__(policy_config.namespace, policy_config.kubeconfig, verbose)
- self.config = policy_config
+ super(PolicyUser, self).__init__(config.namespace, config.kubeconfig, verbose)
+ self.config = config
self.verbose = verbose
self._rolebinding = None
self._scc = None
- self._cluster_policy_bindings = None
- self._policy_bindings = None
+ self._cluster_role_bindings = None
+ self._role_bindings = None
@property
- def policybindings(self):
- if self._policy_bindings is None:
- results = self._get('policybindings', None)
+ def rolebindings(self):
+ if self._role_bindings is None:
+ results = self._get('rolebindings', None)
if results['returncode'] != 0:
- raise OpenShiftCLIError('Could not retrieve policybindings')
- self._policy_bindings = results['results'][0]['items'][0]
+ raise OpenShiftCLIError('Could not retrieve rolebindings')
+ self._role_bindings = results['results'][0]['items']
- return self._policy_bindings
+ return self._role_bindings
@property
- def clusterpolicybindings(self):
- if self._cluster_policy_bindings is None:
- results = self._get('clusterpolicybindings', None)
+ def clusterrolebindings(self):
+ if self._cluster_role_bindings is None:
+ results = self._get('clusterrolebindings', None)
if results['returncode'] != 0:
- raise OpenShiftCLIError('Could not retrieve clusterpolicybindings')
- self._cluster_policy_bindings = results['results'][0]['items'][0]
+ raise OpenShiftCLIError('Could not retrieve clusterrolebindings')
+ self._cluster_role_bindings = results['results'][0]['items']
- return self._cluster_policy_bindings
+ return self._cluster_role_bindings
@property
def role_binding(self):
@@ -2017,18 +2017,17 @@ class PolicyUser(OpenShiftCLI):
''' return whether role_binding exists '''
bindings = None
if self.config.config_options['resource_kind']['value'] == 'cluster-role':
- bindings = self.clusterpolicybindings
+ bindings = self.clusterrolebindings
else:
- bindings = self.policybindings
+ bindings = self.rolebindings
if bindings is None:
return False
- for binding in bindings['roleBindings']:
- _rb = binding['roleBinding']
- if _rb['roleRef']['name'] == self.config.config_options['name']['value'] and \
- _rb['userNames'] is not None and \
- self.config.config_options['user']['value'] in _rb['userNames']:
+ for binding in bindings:
+ if binding['roleRef']['name'] == self.config.config_options['name']['value'] and \
+ binding['userNames'] is not None and \
+ self.config.config_options['user']['value'] in binding['userNames']:
self.role_binding = binding
return True
diff --git a/roles/lib_openshift/library/oc_clusterrole.py b/roles/lib_openshift/library/oc_clusterrole.py
index 289f08b83..d101eac1c 100644
--- a/roles/lib_openshift/library/oc_clusterrole.py
+++ b/roles/lib_openshift/library/oc_clusterrole.py
@@ -1671,7 +1671,7 @@ class OCClusterRole(OpenShiftCLI):
self.clusterrole = ClusterRole(content=result['results'][0])
result['results'] = self.clusterrole.yaml_dict
- elif 'clusterrole "{}" not found'.format(self.name) in result['stderr']:
+ elif '"{}" not found'.format(self.name) in result['stderr']:
result['returncode'] = 0
self.clusterrole = None
diff --git a/roles/lib_openshift/library/oc_pvc.py b/roles/lib_openshift/library/oc_pvc.py
index a88639bfc..a21540962 100644
--- a/roles/lib_openshift/library/oc_pvc.py
+++ b/roles/lib_openshift/library/oc_pvc.py
@@ -110,6 +110,18 @@ options:
- ReadOnlyMany
- ReadWriteMany
aliases: []
+ storage_class_name:
+ description:
+ - The storage class name for the PVC
+ required: false
+ default: None
+ aliases: []
+ selector:
+ description:
+ - A hash of key/values for the matchLabels
+ required: false
+ default: None
+ aliases: []
author:
- "Kenny Woodson <kwoodson@redhat.com>"
extends_documentation_fragment: []
@@ -1420,7 +1432,9 @@ class PersistentVolumeClaimConfig(object):
namespace,
kubeconfig,
access_modes=None,
- vol_capacity='1G'):
+ vol_capacity='1G',
+ selector=None,
+ storage_class_name=None):
''' constructor for handling pvc options '''
self.kubeconfig = kubeconfig
self.name = sname
@@ -1428,6 +1442,8 @@ class PersistentVolumeClaimConfig(object):
self.access_modes = access_modes
self.vol_capacity = vol_capacity
self.data = {}
+ self.selector = selector
+ self.storage_class_name = storage_class_name
self.create_dict()
@@ -1445,12 +1461,16 @@ class PersistentVolumeClaimConfig(object):
self.data['spec']['accessModes'] = ['ReadWriteOnce']
if self.access_modes:
self.data['spec']['accessModes'] = self.access_modes
+ if self.selector:
+ self.data['spec']['selector'] = {'matchLabels': self.selector}
# storage capacity
self.data['spec']['resources'] = {}
self.data['spec']['resources']['requests'] = {}
self.data['spec']['resources']['requests']['storage'] = self.vol_capacity
+ if self.storage_class_name:
+ self.data['spec']['storageClassName'] = self.storage_class_name
# pylint: disable=too-many-instance-attributes,too-many-public-methods
class PersistentVolumeClaim(Yedit):
@@ -1460,13 +1480,29 @@ class PersistentVolumeClaim(Yedit):
volume_name_path = "spec.volumeName"
bound_path = "status.phase"
kind = 'PersistentVolumeClaim'
+ selector_path = "spec.selector.matchLabels"
+ storage_class_name_path = "spec.storageClassName"
def __init__(self, content):
- '''RoleBinding constructor'''
+ '''PersistentVolumeClaim constructor'''
super(PersistentVolumeClaim, self).__init__(content=content)
self._access_modes = None
self._volume_capacity = None
self._volume_name = None
+ self._selector = None
+ self._storage_class_name = None
+
+ @property
+ def storage_class_name(self):
+ ''' storage_class_name property '''
+ if self._storage_class_name is None:
+ self._storage_class_name = self.get_storage_class_name()
+ return self._storage_class_name
+
+ @storage_class_name.setter
+ def storage_class_name(self, data):
+ ''' storage_class_name property setter'''
+ self._storage_class_name = data
@property
def volume_name(self):
@@ -1481,6 +1517,24 @@ class PersistentVolumeClaim(Yedit):
self._volume_name = data
@property
+ def selector(self):
+ ''' selector property '''
+ if self._selector is None:
+ self._selector = self.get_selector()
+ if not isinstance(self._selector, dict):
+ self._selector = dict(self._selector)
+
+ return self._selector
+
+ @selector.setter
+ def selector(self, data):
+ ''' selector property setter'''
+ if not isinstance(data, dict):
+ data = dict(data)
+
+ self._selector = data
+
+ @property
def access_modes(self):
''' access_modes property '''
if self._access_modes is None:
@@ -1510,6 +1564,14 @@ class PersistentVolumeClaim(Yedit):
''' volume_capacity property setter'''
self._volume_capacity = data
+ def get_storage_class_name(self):
+ '''get storage_class_name'''
+ return self.get(PersistentVolumeClaim.storage_class_name_path) or []
+
+ def get_selector(self):
+ '''get selector'''
+ return self.get(PersistentVolumeClaim.selector_path) or []
+
def get_access_modes(self):
'''get access_modes'''
return self.get(PersistentVolumeClaim.access_modes_path) or []
@@ -1663,6 +1725,8 @@ class OCPVC(OpenShiftCLI):
params['kubeconfig'],
params['access_modes'],
params['volume_capacity'],
+ params['selector'],
+ params['storage_class_name'],
)
oc_pvc = OCPVC(pconfig, verbose=params['debug'])
@@ -1763,9 +1827,9 @@ def main():
name=dict(default=None, required=True, type='str'),
namespace=dict(default=None, required=True, type='str'),
volume_capacity=dict(default='1G', type='str'),
- access_modes=dict(default='ReadWriteOnce',
- choices=['ReadWriteOnce', 'ReadOnlyMany', 'ReadWriteMany'],
- type='str'),
+ storage_class_name=dict(default=None, required=False, type='str'),
+ selector=dict(default=None, required=False, type='dict'),
+ access_modes=dict(default=['ReadWriteOnce'], type='list'),
),
supports_check_mode=True,
)
diff --git a/roles/lib_openshift/library/oc_storageclass.py b/roles/lib_openshift/library/oc_storageclass.py
index d5375e27a..686119c65 100644
--- a/roles/lib_openshift/library/oc_storageclass.py
+++ b/roles/lib_openshift/library/oc_storageclass.py
@@ -1427,7 +1427,7 @@ class StorageClassConfig(object):
# pylint: disable=too-many-arguments
def __init__(self,
name,
- provisioner=None,
+ provisioner,
parameters=None,
annotations=None,
default_storage_class="false",
@@ -1459,10 +1459,7 @@ class StorageClassConfig(object):
self.data['metadata']['annotations']['storageclass.beta.kubernetes.io/is-default-class'] = \
self.default_storage_class
- if self.provisioner is None:
- self.data['provisioner'] = 'kubernetes.io/aws-ebs'
- else:
- self.data['provisioner'] = self.provisioner
+ self.data['provisioner'] = self.provisioner
self.data['parameters'] = {}
if self.parameters is not None:
@@ -1668,7 +1665,7 @@ def main():
name=dict(default=None, type='str'),
annotations=dict(default=None, type='dict'),
parameters=dict(default=None, type='dict'),
- provisioner=dict(default='aws-ebs', type='str', choices=['aws-ebs', 'gce-pd', 'glusterfs', 'cinder']),
+ provisioner=dict(required=True, type='str', choices=['aws-ebs', 'gce-pd', 'glusterfs', 'cinder']),
api_version=dict(default='v1', type='str'),
default_storage_class=dict(default="false", type='str'),
),
diff --git a/roles/lib_openshift/src/ansible/oc_pvc.py b/roles/lib_openshift/src/ansible/oc_pvc.py
index a5181e281..c98d811d6 100644
--- a/roles/lib_openshift/src/ansible/oc_pvc.py
+++ b/roles/lib_openshift/src/ansible/oc_pvc.py
@@ -16,9 +16,9 @@ def main():
name=dict(default=None, required=True, type='str'),
namespace=dict(default=None, required=True, type='str'),
volume_capacity=dict(default='1G', type='str'),
- access_modes=dict(default='ReadWriteOnce',
- choices=['ReadWriteOnce', 'ReadOnlyMany', 'ReadWriteMany'],
- type='str'),
+ storage_class_name=dict(default=None, required=False, type='str'),
+ selector=dict(default=None, required=False, type='dict'),
+ access_modes=dict(default=['ReadWriteOnce'], type='list'),
),
supports_check_mode=True,
)
diff --git a/roles/lib_openshift/src/ansible/oc_storageclass.py b/roles/lib_openshift/src/ansible/oc_storageclass.py
index 2bd8f18d5..e9f3ebbd3 100644
--- a/roles/lib_openshift/src/ansible/oc_storageclass.py
+++ b/roles/lib_openshift/src/ansible/oc_storageclass.py
@@ -14,7 +14,7 @@ def main():
name=dict(default=None, type='str'),
annotations=dict(default=None, type='dict'),
parameters=dict(default=None, type='dict'),
- provisioner=dict(default='aws-ebs', type='str', choices=['aws-ebs', 'gce-pd', 'glusterfs', 'cinder']),
+ provisioner=dict(required=True, type='str', choices=['aws-ebs', 'gce-pd', 'glusterfs', 'cinder']),
api_version=dict(default='v1', type='str'),
default_storage_class=dict(default="false", type='str'),
),
diff --git a/roles/lib_openshift/src/class/oc_adm_policy_group.py b/roles/lib_openshift/src/class/oc_adm_policy_group.py
index 1e51913e0..6ad57bdce 100644
--- a/roles/lib_openshift/src/class/oc_adm_policy_group.py
+++ b/roles/lib_openshift/src/class/oc_adm_policy_group.py
@@ -41,28 +41,28 @@ class PolicyGroup(OpenShiftCLI):
self.verbose = verbose
self._rolebinding = None
self._scc = None
- self._cluster_policy_bindings = None
- self._policy_bindings = None
+ self._cluster_role_bindings = None
+ self._role_bindings = None
@property
- def policybindings(self):
- if self._policy_bindings is None:
- results = self._get('clusterpolicybindings', None)
+ def rolebindings(self):
+ if self._role_bindings is None:
+ results = self._get('rolebindings', None)
if results['returncode'] != 0:
- raise OpenShiftCLIError('Could not retrieve policybindings')
- self._policy_bindings = results['results'][0]['items'][0]
+ raise OpenShiftCLIError('Could not retrieve rolebindings')
+ self._role_bindings = results['results'][0]['items']
- return self._policy_bindings
+ return self._role_bindings
@property
- def clusterpolicybindings(self):
- if self._cluster_policy_bindings is None:
- results = self._get('clusterpolicybindings', None)
+ def clusterrolebindings(self):
+ if self._cluster_role_bindings is None:
+ results = self._get('clusterrolebindings', None)
if results['returncode'] != 0:
- raise OpenShiftCLIError('Could not retrieve clusterpolicybindings')
- self._cluster_policy_bindings = results['results'][0]['items'][0]
+ raise OpenShiftCLIError('Could not retrieve clusterrolebindings')
+ self._cluster_role_bindings = results['results'][0]['items']
- return self._cluster_policy_bindings
+ return self._cluster_role_bindings
@property
def role_binding(self):
@@ -105,18 +105,17 @@ class PolicyGroup(OpenShiftCLI):
''' return whether role_binding exists '''
bindings = None
if self.config.config_options['resource_kind']['value'] == 'cluster-role':
- bindings = self.clusterpolicybindings
+ bindings = self.clusterrolebindings
else:
- bindings = self.policybindings
+ bindings = self.rolebindings
if bindings is None:
return False
- for binding in bindings['roleBindings']:
- _rb = binding['roleBinding']
- if _rb['roleRef']['name'] == self.config.config_options['name']['value'] and \
- _rb['groupNames'] is not None and \
- self.config.config_options['group']['value'] in _rb['groupNames']:
+ for binding in bindings:
+ if binding['roleRef']['name'] == self.config.config_options['name']['value'] and \
+ binding['groupNames'] is not None and \
+ self.config.config_options['group']['value'] in binding['groupNames']:
self.role_binding = binding
return True
diff --git a/roles/lib_openshift/src/class/oc_adm_policy_user.py b/roles/lib_openshift/src/class/oc_adm_policy_user.py
index 37a685ebb..6fc8145c8 100644
--- a/roles/lib_openshift/src/class/oc_adm_policy_user.py
+++ b/roles/lib_openshift/src/class/oc_adm_policy_user.py
@@ -32,36 +32,36 @@ class PolicyUser(OpenShiftCLI):
''' Class to handle attaching policies to users '''
def __init__(self,
- policy_config,
+ config,
verbose=False):
''' Constructor for PolicyUser '''
- super(PolicyUser, self).__init__(policy_config.namespace, policy_config.kubeconfig, verbose)
- self.config = policy_config
+ super(PolicyUser, self).__init__(config.namespace, config.kubeconfig, verbose)
+ self.config = config
self.verbose = verbose
self._rolebinding = None
self._scc = None
- self._cluster_policy_bindings = None
- self._policy_bindings = None
+ self._cluster_role_bindings = None
+ self._role_bindings = None
@property
- def policybindings(self):
- if self._policy_bindings is None:
- results = self._get('policybindings', None)
+ def rolebindings(self):
+ if self._role_bindings is None:
+ results = self._get('rolebindings', None)
if results['returncode'] != 0:
- raise OpenShiftCLIError('Could not retrieve policybindings')
- self._policy_bindings = results['results'][0]['items'][0]
+ raise OpenShiftCLIError('Could not retrieve rolebindings')
+ self._role_bindings = results['results'][0]['items']
- return self._policy_bindings
+ return self._role_bindings
@property
- def clusterpolicybindings(self):
- if self._cluster_policy_bindings is None:
- results = self._get('clusterpolicybindings', None)
+ def clusterrolebindings(self):
+ if self._cluster_role_bindings is None:
+ results = self._get('clusterrolebindings', None)
if results['returncode'] != 0:
- raise OpenShiftCLIError('Could not retrieve clusterpolicybindings')
- self._cluster_policy_bindings = results['results'][0]['items'][0]
+ raise OpenShiftCLIError('Could not retrieve clusterrolebindings')
+ self._cluster_role_bindings = results['results'][0]['items']
- return self._cluster_policy_bindings
+ return self._cluster_role_bindings
@property
def role_binding(self):
@@ -99,18 +99,17 @@ class PolicyUser(OpenShiftCLI):
''' return whether role_binding exists '''
bindings = None
if self.config.config_options['resource_kind']['value'] == 'cluster-role':
- bindings = self.clusterpolicybindings
+ bindings = self.clusterrolebindings
else:
- bindings = self.policybindings
+ bindings = self.rolebindings
if bindings is None:
return False
- for binding in bindings['roleBindings']:
- _rb = binding['roleBinding']
- if _rb['roleRef']['name'] == self.config.config_options['name']['value'] and \
- _rb['userNames'] is not None and \
- self.config.config_options['user']['value'] in _rb['userNames']:
+ for binding in bindings:
+ if binding['roleRef']['name'] == self.config.config_options['name']['value'] and \
+ binding['userNames'] is not None and \
+ self.config.config_options['user']['value'] in binding['userNames']:
self.role_binding = binding
return True
diff --git a/roles/lib_openshift/src/class/oc_clusterrole.py b/roles/lib_openshift/src/class/oc_clusterrole.py
index ae6795446..328e5cb67 100644
--- a/roles/lib_openshift/src/class/oc_clusterrole.py
+++ b/roles/lib_openshift/src/class/oc_clusterrole.py
@@ -56,7 +56,7 @@ class OCClusterRole(OpenShiftCLI):
self.clusterrole = ClusterRole(content=result['results'][0])
result['results'] = self.clusterrole.yaml_dict
- elif 'clusterrole "{}" not found'.format(self.name) in result['stderr']:
+ elif '"{}" not found'.format(self.name) in result['stderr']:
result['returncode'] = 0
self.clusterrole = None
diff --git a/roles/lib_openshift/src/class/oc_pvc.py b/roles/lib_openshift/src/class/oc_pvc.py
index c73abc47c..6b566c301 100644
--- a/roles/lib_openshift/src/class/oc_pvc.py
+++ b/roles/lib_openshift/src/class/oc_pvc.py
@@ -85,6 +85,8 @@ class OCPVC(OpenShiftCLI):
params['kubeconfig'],
params['access_modes'],
params['volume_capacity'],
+ params['selector'],
+ params['storage_class_name'],
)
oc_pvc = OCPVC(pconfig, verbose=params['debug'])
diff --git a/roles/lib_openshift/src/doc/pvc b/roles/lib_openshift/src/doc/pvc
index 9240f2a0f..268ad0b94 100644
--- a/roles/lib_openshift/src/doc/pvc
+++ b/roles/lib_openshift/src/doc/pvc
@@ -59,6 +59,18 @@ options:
- ReadOnlyMany
- ReadWriteMany
aliases: []
+ storage_class_name:
+ description:
+ - The storage class name for the PVC
+ required: false
+ default: None
+ aliases: []
+ selector:
+ description:
+ - A hash of key/values for the matchLabels
+ required: false
+ default: None
+ aliases: []
author:
- "Kenny Woodson <kwoodson@redhat.com>"
extends_documentation_fragment: []
diff --git a/roles/lib_openshift/src/lib/pvc.py b/roles/lib_openshift/src/lib/pvc.py
index 929b50990..d1e935c32 100644
--- a/roles/lib_openshift/src/lib/pvc.py
+++ b/roles/lib_openshift/src/lib/pvc.py
@@ -11,7 +11,9 @@ class PersistentVolumeClaimConfig(object):
namespace,
kubeconfig,
access_modes=None,
- vol_capacity='1G'):
+ vol_capacity='1G',
+ selector=None,
+ storage_class_name=None):
''' constructor for handling pvc options '''
self.kubeconfig = kubeconfig
self.name = sname
@@ -19,6 +21,8 @@ class PersistentVolumeClaimConfig(object):
self.access_modes = access_modes
self.vol_capacity = vol_capacity
self.data = {}
+ self.selector = selector
+ self.storage_class_name = storage_class_name
self.create_dict()
@@ -36,12 +40,16 @@ class PersistentVolumeClaimConfig(object):
self.data['spec']['accessModes'] = ['ReadWriteOnce']
if self.access_modes:
self.data['spec']['accessModes'] = self.access_modes
+ if self.selector:
+ self.data['spec']['selector'] = {'matchLabels': self.selector}
# storage capacity
self.data['spec']['resources'] = {}
self.data['spec']['resources']['requests'] = {}
self.data['spec']['resources']['requests']['storage'] = self.vol_capacity
+ if self.storage_class_name:
+ self.data['spec']['storageClassName'] = self.storage_class_name
# pylint: disable=too-many-instance-attributes,too-many-public-methods
class PersistentVolumeClaim(Yedit):
@@ -51,13 +59,29 @@ class PersistentVolumeClaim(Yedit):
volume_name_path = "spec.volumeName"
bound_path = "status.phase"
kind = 'PersistentVolumeClaim'
+ selector_path = "spec.selector.matchLabels"
+ storage_class_name_path = "spec.storageClassName"
def __init__(self, content):
- '''RoleBinding constructor'''
+ '''PersistentVolumeClaim constructor'''
super(PersistentVolumeClaim, self).__init__(content=content)
self._access_modes = None
self._volume_capacity = None
self._volume_name = None
+ self._selector = None
+ self._storage_class_name = None
+
+ @property
+ def storage_class_name(self):
+ ''' storage_class_name property '''
+ if self._storage_class_name is None:
+ self._storage_class_name = self.get_storage_class_name()
+ return self._storage_class_name
+
+ @storage_class_name.setter
+ def storage_class_name(self, data):
+ ''' storage_class_name property setter'''
+ self._storage_class_name = data
@property
def volume_name(self):
@@ -72,6 +96,24 @@ class PersistentVolumeClaim(Yedit):
self._volume_name = data
@property
+ def selector(self):
+ ''' selector property '''
+ if self._selector is None:
+ self._selector = self.get_selector()
+ if not isinstance(self._selector, dict):
+ self._selector = dict(self._selector)
+
+ return self._selector
+
+ @selector.setter
+ def selector(self, data):
+ ''' selector property setter'''
+ if not isinstance(data, dict):
+ data = dict(data)
+
+ self._selector = data
+
+ @property
def access_modes(self):
''' access_modes property '''
if self._access_modes is None:
@@ -101,6 +143,14 @@ class PersistentVolumeClaim(Yedit):
''' volume_capacity property setter'''
self._volume_capacity = data
+ def get_storage_class_name(self):
+ '''get storage_class_name'''
+ return self.get(PersistentVolumeClaim.storage_class_name_path) or []
+
+ def get_selector(self):
+ '''get selector'''
+ return self.get(PersistentVolumeClaim.selector_path) or []
+
def get_access_modes(self):
'''get access_modes'''
return self.get(PersistentVolumeClaim.access_modes_path) or []
diff --git a/roles/lib_openshift/src/lib/storageclass.py b/roles/lib_openshift/src/lib/storageclass.py
index ef12a8d2d..c49a3066a 100644
--- a/roles/lib_openshift/src/lib/storageclass.py
+++ b/roles/lib_openshift/src/lib/storageclass.py
@@ -8,7 +8,7 @@ class StorageClassConfig(object):
# pylint: disable=too-many-arguments
def __init__(self,
name,
- provisioner=None,
+ provisioner,
parameters=None,
annotations=None,
default_storage_class="false",
@@ -40,10 +40,7 @@ class StorageClassConfig(object):
self.data['metadata']['annotations']['storageclass.beta.kubernetes.io/is-default-class'] = \
self.default_storage_class
- if self.provisioner is None:
- self.data['provisioner'] = 'kubernetes.io/aws-ebs'
- else:
- self.data['provisioner'] = self.provisioner
+ self.data['provisioner'] = self.provisioner
self.data['parameters'] = {}
if self.parameters is not None:
diff --git a/roles/lib_openshift/src/test/integration/oc_pvc.yml b/roles/lib_openshift/src/test/integration/oc_pvc.yml
new file mode 100755
index 000000000..fb3a4781f
--- /dev/null
+++ b/roles/lib_openshift/src/test/integration/oc_pvc.yml
@@ -0,0 +1,28 @@
+#!/usr/bin/ansible-playbook --module-path=../../../library/
+# ./oc_pvc.yml -e "cli_master_test=$OPENSHIFT_MASTER
+---
+- hosts: "{{ cli_master_test }}"
+ gather_facts: no
+ user: root
+ tasks:
+ - name: create pvc
+ oc_pvc:
+ state: present
+ name: oc-pvc-create-test
+ namespace: default
+ volume_capacity: 3G
+ access_modes:
+ - ReadWriteOnce
+ selector:
+ foo: bar
+ storage_class_name: my-storage-class-name
+ register: pvcout
+ - debug: var=pvcout
+
+ - assert:
+ that:
+ - pvcout.results.results[0]['metadata']['name'] == 'oc-pvc-create-test'
+ - pvcout.results.results[0]['spec']['storageClassName'] == 'my-storage-class-name'
+ - pvcout.results.results[0]['spec']['selector']['matchLabels']['foo'] == 'bar'
+ - pvcout.changed
+ msg: pvc create failed.
diff --git a/roles/lib_openshift/src/test/unit/test_oc_pvc.py b/roles/lib_openshift/src/test/unit/test_oc_pvc.py
index 82187917d..a96f2e4a7 100755
--- a/roles/lib_openshift/src/test/unit/test_oc_pvc.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_pvc.py
@@ -30,6 +30,8 @@ class OCPVCTest(unittest.TestCase):
'name': 'mypvc',
'namespace': 'test',
'volume_capacity': '1G',
+ 'selector': {'foo': 'bar', 'abc': 'a123'},
+ 'storage_class_name': 'mystorage',
'access_modes': 'ReadWriteMany'}
@mock.patch('oc_pvc.Utils.create_tmpfile_copy')
@@ -65,6 +67,13 @@ class OCPVCTest(unittest.TestCase):
"storage": "1Gi"
}
},
+ "selector": {
+ "matchLabels": {
+ "foo": "bar",
+ "abc": "a123"
+ }
+ },
+ "storageClassName": "myStorage",
"volumeName": "pv-aws-ow5vl"
},
"status": {
@@ -93,6 +102,8 @@ class OCPVCTest(unittest.TestCase):
self.assertTrue(results['changed'])
self.assertEqual(results['results']['results'][0]['metadata']['name'], 'mypvc')
+ self.assertEqual(results['results']['results'][0]['spec']['storageClassName'], 'myStorage')
+ self.assertEqual(results['results']['results'][0]['spec']['selector']['matchLabels']['foo'], 'bar')
@mock.patch('oc_pvc.Utils.create_tmpfile_copy')
@mock.patch('oc_pvc.OCPVC._run')
diff --git a/roles/openshift_default_storage_class/defaults/main.yml b/roles/openshift_default_storage_class/defaults/main.yml
index 8d07dbecc..bdece7640 100644
--- a/roles/openshift_default_storage_class/defaults/main.yml
+++ b/roles/openshift_default_storage_class/defaults/main.yml
@@ -1,7 +1,7 @@
---
openshift_storageclass_defaults:
aws:
- provisioner: kubernetes.io/aws-ebs
+ provisioner: aws-ebs
name: gp2
parameters:
type: gp2
@@ -9,7 +9,7 @@ openshift_storageclass_defaults:
encrypted: 'false'
gce:
name: standard
- provisioner: kubernetes.io/gce-pd
+ provisioner: gce-pd
parameters:
type: pd-standard
diff --git a/roles/openshift_excluder/tasks/unexclude.yml b/roles/openshift_excluder/tasks/unexclude.yml
index a5ce8d5c7..a68165bde 100644
--- a/roles/openshift_excluder/tasks/unexclude.yml
+++ b/roles/openshift_excluder/tasks/unexclude.yml
@@ -9,7 +9,7 @@
register: docker_excluder_stat
- name: disable docker excluder
- command: "{{ r_openshift_excluder_service_type }}-docker-excluder unexclude"
+ command: "/sbin/{{ r_openshift_excluder_service_type }}-docker-excluder unexclude"
when:
- unexclude_docker_excluder | default(false) | bool
- docker_excluder_stat.stat.exists
@@ -20,7 +20,7 @@
register: openshift_excluder_stat
- name: disable openshift excluder
- command: "{{ r_openshift_excluder_service_type }}-excluder unexclude"
+ command: "/sbin/{{ r_openshift_excluder_service_type }}-excluder unexclude"
when:
- unexclude_openshift_excluder | default(false) | bool
- openshift_excluder_stat.stat.exists
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 4712ca3a8..42c4945b4 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -1647,6 +1647,13 @@ def set_proxy_facts(facts):
common['no_proxy'] = common['no_proxy'].split(",")
elif 'no_proxy' not in common:
common['no_proxy'] = []
+
+ # See https://bugzilla.redhat.com/show_bug.cgi?id=1466783
+ # masters behind a proxy need to connect to etcd via IP
+ if 'no_proxy_etcd_host_ips' in common:
+ if isinstance(common['no_proxy_etcd_host_ips'], string_types):
+ common['no_proxy'].extend(common['no_proxy_etcd_host_ips'].split(','))
+
if 'generate_no_proxy_hosts' in common and safe_get_bool(common['generate_no_proxy_hosts']):
if 'no_proxy_internal_hostnames' in common:
common['no_proxy'].extend(common['no_proxy_internal_hostnames'].split(','))
@@ -2215,14 +2222,10 @@ class OpenShiftFacts(object):
product_version = self.system_facts['ansible_product_version']
virt_type = self.system_facts['ansible_virtualization_type']
virt_role = self.system_facts['ansible_virtualization_role']
+ bios_vendor = self.system_facts['ansible_system_vendor']
provider = None
metadata = None
- # TODO: this is not exposed through module_utils/facts.py in ansible,
- # need to create PR for ansible to expose it
- bios_vendor = get_file_content( # noqa: F405
- '/sys/devices/virtual/dmi/id/bios_vendor'
- )
if bios_vendor == 'Google':
provider = 'gce'
metadata_url = ('http://metadata.google.internal/'
diff --git a/roles/openshift_health_checker/action_plugins/openshift_health_check.py b/roles/openshift_health_checker/action_plugins/openshift_health_check.py
index 0390dc82e..23da53940 100644
--- a/roles/openshift_health_checker/action_plugins/openshift_health_check.py
+++ b/roles/openshift_health_checker/action_plugins/openshift_health_check.py
@@ -13,6 +13,7 @@ except ImportError:
display = Display()
from ansible.plugins.action import ActionBase
+from ansible.module_utils.six import string_types
# Augment sys.path so that we can import checks from a directory relative to
# this callback plugin.
@@ -37,9 +38,10 @@ class ActionModule(ActionBase):
return result
try:
- known_checks = self.load_known_checks()
+ known_checks = self.load_known_checks(tmp, task_vars)
args = self._task.args
- resolved_checks = resolve_checks(args.get("checks", []), known_checks.values())
+ requested_checks = normalize(args.get('checks', []))
+ resolved_checks = resolve_checks(requested_checks, known_checks.values())
except OpenShiftCheckException as e:
result["failed"] = True
result["msg"] = str(e)
@@ -47,22 +49,19 @@ class ActionModule(ActionBase):
result["checks"] = check_results = {}
- user_disabled_checks = [
- check.strip()
- for check in task_vars.get("openshift_disable_check", "").split(",")
- ]
+ user_disabled_checks = normalize(task_vars.get('openshift_disable_check', []))
for check_name in resolved_checks:
display.banner("CHECK [{} : {}]".format(check_name, task_vars["ansible_host"]))
check = known_checks[check_name]
- if not check.is_active(task_vars):
+ if not check.is_active():
r = dict(skipped=True, skipped_reason="Not active for this host")
elif check_name in user_disabled_checks:
r = dict(skipped=True, skipped_reason="Disabled by user request")
else:
try:
- r = check.run(tmp, task_vars)
+ r = check.run()
except OpenShiftCheckException as e:
r = dict(
failed=True,
@@ -78,7 +77,7 @@ class ActionModule(ActionBase):
result["changed"] = any(r.get("changed", False) for r in check_results.values())
return result
- def load_known_checks(self):
+ def load_known_checks(self, tmp, task_vars):
load_checks()
known_checks = {}
@@ -91,7 +90,7 @@ class ActionModule(ActionBase):
check_name,
cls.__module__, cls.__name__,
other_cls.__module__, other_cls.__name__))
- known_checks[check_name] = cls(execute_module=self._execute_module)
+ known_checks[check_name] = cls(execute_module=self._execute_module, tmp=tmp, task_vars=task_vars)
return known_checks
@@ -134,3 +133,14 @@ def resolve_checks(names, all_checks):
resolved.update(tag_to_checks[tag])
return resolved
+
+
+def normalize(checks):
+ """Return a clean list of check names.
+
+ The input may be a comma-separated string or a sequence. Leading and
+ trailing whitespace characters are removed. Empty items are discarded.
+ """
+ if isinstance(checks, string_types):
+ checks = checks.split(',')
+ return [name.strip() for name in checks if name.strip()]
diff --git a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py
index 443b76ea1..d10200719 100644
--- a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py
+++ b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py
@@ -1,6 +1,6 @@
-'''
-Ansible callback plugin.
-'''
+"""
+Ansible callback plugin to give a nicely formatted summary of failures.
+"""
# Reason: In several locations below we disable pylint protected-access
# for Ansible objects that do not give us any public way
@@ -16,11 +16,11 @@ from ansible.utils.color import stringc
class CallbackModule(CallbackBase):
- '''
+ """
This callback plugin stores task results and summarizes failures.
The file name is prefixed with `zz_` to make this plugin be loaded last by
Ansible, thus making its output the last thing that users see.
- '''
+ """
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'aggregate'
@@ -48,7 +48,7 @@ class CallbackModule(CallbackBase):
self._print_failure_details(self.__failures)
def _print_failure_details(self, failures):
- '''Print a summary of failed tasks or checks.'''
+ """Print a summary of failed tasks or checks."""
self._display.display(u'\nFailure summary:\n')
width = len(str(len(failures)))
@@ -69,7 +69,9 @@ class CallbackModule(CallbackBase):
playbook_context = None
# re: result attrs see top comment # pylint: disable=protected-access
for failure in failures:
- # get context from check task result since callback plugins cannot access task vars
+ # Get context from check task result since callback plugins cannot access task vars.
+ # NOTE: thus context is not known unless checks run. Failures prior to checks running
+ # don't have playbook_context in the results. But we only use it now when checks fail.
playbook_context = playbook_context or failure['result']._result.get('playbook_context')
failed_checks.update(
name
@@ -81,8 +83,11 @@ class CallbackModule(CallbackBase):
def _print_check_failure_summary(self, failed_checks, context):
checks = ','.join(sorted(failed_checks))
- # NOTE: context is not set if all failures occurred prior to checks task
- summary = (
+ # The purpose of specifying context is to vary the output depending on what the user was
+ # expecting to happen (based on which playbook they ran). The only use currently is to
+ # vary the message depending on whether the user was deliberately running checks or was
+ # trying to install/upgrade and checks are just included. Other use cases may arise.
+ summary = ( # default to explaining what checks are in the first place
'\n'
'The execution of "{playbook}"\n'
'includes checks designed to fail early if the requirements\n'
@@ -94,27 +99,26 @@ class CallbackModule(CallbackBase):
'Some checks may be configurable by variables if your requirements\n'
'are different from the defaults; consult check documentation.\n'
'Variables can be set in the inventory or passed on the\n'
- 'command line using the -e flag to ansible-playbook.\n'
+ 'command line using the -e flag to ansible-playbook.\n\n'
).format(playbook=self._playbook_file, checks=checks)
if context in ['pre-install', 'health']:
- summary = (
+ summary = ( # user was expecting to run checks, less explanation needed
'\n'
'You may choose to configure or disable failing checks by\n'
'setting Ansible variables. To disable those above:\n\n'
' openshift_disable_check={checks}\n\n'
'Consult check documentation for configurable variables.\n'
'Variables can be set in the inventory or passed on the\n'
- 'command line using the -e flag to ansible-playbook.\n'
+ 'command line using the -e flag to ansible-playbook.\n\n'
).format(checks=checks)
- # other expected contexts: install, upgrade
self._display.display(summary)
# re: result attrs see top comment # pylint: disable=protected-access
def _format_failure(failure):
- '''Return a list of pretty-formatted text entries describing a failure, including
+ """Return a list of pretty-formatted text entries describing a failure, including
relevant information about it. Expect that the list of text entries will be joined
- by a newline separator when output to the user.'''
+ by a newline separator when output to the user."""
result = failure['result']
host = result._host.get_name()
play = _get_play(result._task)
@@ -135,7 +139,7 @@ def _format_failure(failure):
def _format_failed_checks(checks):
- '''Return pretty-formatted text describing checks that failed.'''
+ """Return pretty-formatted text describing checks that failed."""
failed_check_msgs = []
for check, body in checks.items():
if body.get('failed', False): # only show the failed checks
@@ -150,7 +154,7 @@ def _format_failed_checks(checks):
# This is inspired by ansible.playbook.base.Base.dump_me.
# re: play/task/block attrs see top comment # pylint: disable=protected-access
def _get_play(obj):
- '''Given a task or block, recursively tries to find its parent play.'''
+ """Given a task or block, recursively try to find its parent play."""
if hasattr(obj, '_play'):
return obj._play
if getattr(obj, '_parent'):
diff --git a/roles/openshift_health_checker/library/aos_version.py b/roles/openshift_health_checker/library/aos_version.py
index 4f43ee751..f9babebb9 100755..100644
--- a/roles/openshift_health_checker/library/aos_version.py
+++ b/roles/openshift_health_checker/library/aos_version.py
@@ -1,5 +1,5 @@
#!/usr/bin/python
-'''
+"""
Ansible module for yum-based systems determining if multiple releases
of an OpenShift package are available, and if the release requested
(if any) is available down to the given precision.
@@ -16,7 +16,7 @@ of release availability already. Without duplicating all that, we would
like the user to have a helpful error message if we detect things will
not work out right. Note that if openshift_release is not specified in
the inventory, the version comparison checks just pass.
-'''
+"""
from ansible.module_utils.basic import AnsibleModule
# NOTE: because of the dependency on yum (Python 2-only), this module does not
@@ -32,7 +32,7 @@ except ImportError as err:
class AosVersionException(Exception):
- '''Base exception class for package version problems'''
+ """Base exception class for package version problems"""
def __init__(self, message, problem_pkgs=None):
Exception.__init__(self, message)
self.problem_pkgs = problem_pkgs
diff --git a/roles/openshift_health_checker/library/check_yum_update.py b/roles/openshift_health_checker/library/check_yum_update.py
index 433795b67..433795b67 100755..100644
--- a/roles/openshift_health_checker/library/check_yum_update.py
+++ b/roles/openshift_health_checker/library/check_yum_update.py
diff --git a/roles/openshift_health_checker/library/docker_info.py b/roles/openshift_health_checker/library/docker_info.py
index 7f712bcff..0d0ddae8b 100644
--- a/roles/openshift_health_checker/library/docker_info.py
+++ b/roles/openshift_health_checker/library/docker_info.py
@@ -1,4 +1,3 @@
-# pylint: disable=missing-docstring
"""
Ansible module for determining information about the docker host.
@@ -13,6 +12,7 @@ from ansible.module_utils.docker_common import AnsibleDockerClient
def main():
+ """Entrypoint for running an Ansible module."""
client = AnsibleDockerClient()
client.module.exit_json(
diff --git a/roles/openshift_health_checker/openshift_checks/__init__.py b/roles/openshift_health_checker/openshift_checks/__init__.py
index 5c9949ced..85cbc6224 100644
--- a/roles/openshift_health_checker/openshift_checks/__init__.py
+++ b/roles/openshift_health_checker/openshift_checks/__init__.py
@@ -19,15 +19,21 @@ class OpenShiftCheckException(Exception):
@six.add_metaclass(ABCMeta)
class OpenShiftCheck(object):
- """A base class for defining checks for an OpenShift cluster environment."""
+ """
+ A base class for defining checks for an OpenShift cluster environment.
+
+ Expect optional params: method execute_module, dict task_vars, and string tmp.
+ execute_module is expected to have a signature compatible with _execute_module
+ from ansible plugins/action/__init__.py, e.g.:
+ def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None, *args):
+ This is stored so that it can be invoked in subclasses via check.execute_module("name", args)
+ which provides the check's stored task_vars and tmp.
+ """
- def __init__(self, execute_module=None, module_executor=None):
- if execute_module is module_executor is None:
- raise TypeError(
- "__init__() takes either execute_module (recommended) "
- "or module_executor (deprecated), none given")
- self.execute_module = execute_module or module_executor
- self.module_executor = self.execute_module
+ def __init__(self, execute_module=None, task_vars=None, tmp=None):
+ self._execute_module = execute_module
+ self.task_vars = task_vars or {}
+ self.tmp = tmp
@abstractproperty
def name(self):
@@ -43,13 +49,13 @@ class OpenShiftCheck(object):
"""
return []
- @classmethod
- def is_active(cls, task_vars): # pylint: disable=unused-argument
+ @staticmethod
+ def is_active():
"""Returns true if this check applies to the ansible-playbook run."""
return True
@abstractmethod
- def run(self, tmp, task_vars):
+ def run(self):
"""Executes a check, normally implemented as a module."""
return {}
@@ -62,6 +68,66 @@ class OpenShiftCheck(object):
for subclass in subclass.subclasses():
yield subclass
+ def execute_module(self, module_name=None, module_args=None):
+ """Invoke an Ansible module from a check.
+
+ Invoke stored _execute_module, normally copied from the action
+ plugin, with its params and the task_vars and tmp given at
+ check initialization. No positional parameters beyond these
+ are specified. If it's necessary to specify any of the other
+ parameters to _execute_module then that should just be invoked
+ directly (with awareness of changes in method signature per
+ Ansible version).
+
+ So e.g. check.execute_module("foo", dict(arg1=...))
+ Return: result hash from module execution.
+ """
+ if self._execute_module is None:
+ raise NotImplementedError(
+ self.__class__.__name__ +
+ " invoked execute_module without providing the method at initialization."
+ )
+ return self._execute_module(module_name, module_args, self.tmp, self.task_vars)
+
+ def get_var(self, *keys, **kwargs):
+ """Get deeply nested values from task_vars.
+
+ Ansible task_vars structures are Python dicts, often mapping strings to
+ other dicts. This helper makes it easier to get a nested value, raising
+ OpenShiftCheckException when a key is not found or returning a default value
+ provided as a keyword argument.
+ """
+ try:
+ value = reduce(operator.getitem, keys, self.task_vars)
+ except (KeyError, TypeError):
+ if "default" in kwargs:
+ return kwargs["default"]
+ raise OpenShiftCheckException("'{}' is undefined".format(".".join(map(str, keys))))
+ return value
+
+ @staticmethod
+ def get_major_minor_version(openshift_image_tag):
+ """Parse and return the deployed version of OpenShift as a tuple."""
+ if openshift_image_tag and openshift_image_tag[0] == 'v':
+ openshift_image_tag = openshift_image_tag[1:]
+
+ # map major release versions across releases
+ # to a common major version
+ openshift_major_release_version = {
+ "1": "3",
+ }
+
+ components = openshift_image_tag.split(".")
+ if not components or len(components) < 2:
+ msg = "An invalid version of OpenShift was found for this host: {}"
+ raise OpenShiftCheckException(msg.format(openshift_image_tag))
+
+ if components[0] in openshift_major_release_version:
+ components[0] = openshift_major_release_version[components[0]]
+
+ components = tuple(int(x) for x in components[:2])
+ return components
+
LOADER_EXCLUDES = (
"__init__.py",
@@ -86,20 +152,3 @@ def load_checks(path=None, subpkg=""):
modules.append(import_module(__package__ + subpkg + "." + name[:-3]))
return modules
-
-
-def get_var(task_vars, *keys, **kwargs):
- """Helper function to get deeply nested values from task_vars.
-
- Ansible task_vars structures are Python dicts, often mapping strings to
- other dicts. This helper makes it easier to get a nested value, raising
- OpenShiftCheckException when a key is not found or returning a default value
- provided as a keyword argument.
- """
- try:
- value = reduce(operator.getitem, keys, task_vars)
- except (KeyError, TypeError):
- if "default" in kwargs:
- return kwargs["default"]
- raise OpenShiftCheckException("'{}' is undefined".format(".".join(map(str, keys))))
- return value
diff --git a/roles/openshift_health_checker/openshift_checks/disk_availability.py b/roles/openshift_health_checker/openshift_checks/disk_availability.py
index e93e81efa..283461294 100644
--- a/roles/openshift_health_checker/openshift_checks/disk_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/disk_availability.py
@@ -3,7 +3,7 @@
import os.path
import tempfile
-from openshift_checks import OpenShiftCheck, OpenShiftCheckException, get_var
+from openshift_checks import OpenShiftCheck, OpenShiftCheckException
class DiskAvailability(OpenShiftCheck):
@@ -35,22 +35,21 @@ class DiskAvailability(OpenShiftCheck):
},
}
- @classmethod
- def is_active(cls, task_vars):
+ def is_active(self):
"""Skip hosts that do not have recommended disk space requirements."""
- group_names = get_var(task_vars, "group_names", default=[])
+ group_names = self.get_var("group_names", default=[])
active_groups = set()
- for recommendation in cls.recommended_disk_space_bytes.values():
+ for recommendation in self.recommended_disk_space_bytes.values():
active_groups.update(recommendation.keys())
has_disk_space_recommendation = bool(active_groups.intersection(group_names))
- return super(DiskAvailability, cls).is_active(task_vars) and has_disk_space_recommendation
+ return super(DiskAvailability, self).is_active() and has_disk_space_recommendation
- def run(self, tmp, task_vars):
- group_names = get_var(task_vars, "group_names")
- ansible_mounts = get_var(task_vars, "ansible_mounts")
+ def run(self):
+ group_names = self.get_var("group_names")
+ ansible_mounts = self.get_var("ansible_mounts")
ansible_mounts = {mount['mount']: mount for mount in ansible_mounts}
- user_config = get_var(task_vars, "openshift_check_min_host_disk_gb", default={})
+ user_config = self.get_var("openshift_check_min_host_disk_gb", default={})
try:
# For backwards-compatibility, if openshift_check_min_host_disk_gb
# is a number, then it overrides the required config for '/var'.
diff --git a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
index bde81ad2c..77180223e 100644
--- a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
@@ -1,6 +1,6 @@
"""Check that required Docker images are available."""
-from openshift_checks import OpenShiftCheck, get_var
+from openshift_checks import OpenShiftCheck
from openshift_checks.mixins import DockerHostMixin
@@ -22,25 +22,26 @@ DEPLOYMENT_IMAGE_INFO = {
class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
"""Check that required Docker images are available.
- This check attempts to ensure that required docker images are
- either present locally, or able to be pulled down from available
- registries defined in a host machine.
+ Determine docker images that an install would require and check that they
+ are either present in the host's docker index, or available for the host to pull
+ with known registries as defined in our inventory file (or defaults).
"""
name = "docker_image_availability"
tags = ["preflight"]
- dependencies = ["skopeo", "python-docker-py"]
+ # we use python-docker-py to check local docker for images, and skopeo
+ # to look for images available remotely without waiting to pull them.
+ dependencies = ["python-docker-py", "skopeo"]
- @classmethod
- def is_active(cls, task_vars):
+ def is_active(self):
"""Skip hosts with unsupported deployment types."""
- deployment_type = get_var(task_vars, "openshift_deployment_type")
+ deployment_type = self.get_var("openshift_deployment_type")
has_valid_deployment_type = deployment_type in DEPLOYMENT_IMAGE_INFO
- return super(DockerImageAvailability, cls).is_active(task_vars) and has_valid_deployment_type
+ return super(DockerImageAvailability, self).is_active() and has_valid_deployment_type
- def run(self, tmp, task_vars):
- msg, failed, changed = self.ensure_dependencies(task_vars)
+ def run(self):
+ msg, failed, changed = self.ensure_dependencies()
if failed:
return {
"failed": True,
@@ -48,18 +49,18 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
"msg": "Some dependencies are required in order to check Docker image availability.\n" + msg
}
- required_images = self.required_images(task_vars)
- missing_images = set(required_images) - set(self.local_images(required_images, task_vars))
+ required_images = self.required_images()
+ missing_images = set(required_images) - set(self.local_images(required_images))
# exit early if all images were found locally
if not missing_images:
return {"changed": changed}
- registries = self.known_docker_registries(task_vars)
+ registries = self.known_docker_registries()
if not registries:
return {"failed": True, "msg": "Unable to retrieve any docker registries.", "changed": changed}
- available_images = self.available_images(missing_images, registries, task_vars)
+ available_images = self.available_images(missing_images, registries)
unavailable_images = set(missing_images) - set(available_images)
if unavailable_images:
@@ -74,8 +75,7 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
return {"changed": changed}
- @staticmethod
- def required_images(task_vars):
+ def required_images(self):
"""
Determine which images we expect to need for this host.
Returns: a set of required images like 'openshift/origin:v3.6'
@@ -92,17 +92,17 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
Registry is not included in constructed images. It may be in oreg_url or etcd image.
"""
required = set()
- deployment_type = get_var(task_vars, "openshift_deployment_type")
- host_groups = get_var(task_vars, "group_names")
+ deployment_type = self.get_var("openshift_deployment_type")
+ host_groups = self.get_var("group_names")
# containerized etcd may not have openshift_image_tag, see bz 1466622
- image_tag = get_var(task_vars, "openshift_image_tag", default="latest")
+ image_tag = self.get_var("openshift_image_tag", default="latest")
image_info = DEPLOYMENT_IMAGE_INFO[deployment_type]
if not image_info:
return required
# template for images that run on top of OpenShift
image_url = "{}/{}-{}:{}".format(image_info["namespace"], image_info["name"], "${component}", "${version}")
- image_url = get_var(task_vars, "oreg_url", default="") or image_url
+ image_url = self.get_var("oreg_url", default="") or image_url
if 'nodes' in host_groups:
for suffix in NODE_IMAGE_SUFFIXES:
required.add(image_url.replace("${component}", suffix).replace("${version}", image_tag))
@@ -112,7 +112,7 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
required.add(image_info["registry_console_image"])
# images for containerized components
- if get_var(task_vars, "openshift", "common", "is_containerized"):
+ if self.get_var("openshift", "common", "is_containerized"):
components = set()
if 'nodes' in host_groups:
components.update(["node", "openvswitch"])
@@ -125,28 +125,27 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
return required
- def local_images(self, images, task_vars):
+ def local_images(self, images):
"""Filter a list of images and return those available locally."""
return [
image for image in images
- if self.is_image_local(image, task_vars)
+ if self.is_image_local(image)
]
- def is_image_local(self, image, task_vars):
+ def is_image_local(self, image):
"""Check if image is already in local docker index."""
- result = self.execute_module("docker_image_facts", {"name": image}, task_vars=task_vars)
+ result = self.execute_module("docker_image_facts", {"name": image})
if result.get("failed", False):
return False
return bool(result.get("images", []))
- @staticmethod
- def known_docker_registries(task_vars):
+ def known_docker_registries(self):
"""Build a list of docker registries available according to inventory vars."""
- docker_facts = get_var(task_vars, "openshift", "docker")
+ docker_facts = self.get_var("openshift", "docker")
regs = set(docker_facts["additional_registries"])
- deployment_type = get_var(task_vars, "openshift_deployment_type")
+ deployment_type = self.get_var("openshift_deployment_type")
if deployment_type == "origin":
regs.update(["docker.io"])
elif "enterprise" in deployment_type:
@@ -154,24 +153,25 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
return list(regs)
- def available_images(self, images, registries, task_vars):
- """Inspect existing images using Skopeo and return all images successfully inspected."""
+ def available_images(self, images, default_registries):
+ """Search remotely for images. Returns: list of images found."""
return [
image for image in images
- if self.is_available_skopeo_image(image, registries, task_vars)
+ if self.is_available_skopeo_image(image, default_registries)
]
- def is_available_skopeo_image(self, image, registries, task_vars):
+ def is_available_skopeo_image(self, image, default_registries):
"""Use Skopeo to determine if required image exists in known registry(s)."""
+ registries = default_registries
- # if image does already includes a registry, just use that
+ # if image already includes a registry, only use that
if image.count("/") > 1:
registry, image = image.split("/", 1)
registries = [registry]
for registry in registries:
args = {"_raw_params": "skopeo inspect --tls-verify=false docker://{}/{}".format(registry, image)}
- result = self.execute_module("command", args, task_vars=task_vars)
+ result = self.execute_module("command", args)
if result.get("rc", 0) == 0 and not result.get("failed"):
return True
diff --git a/roles/openshift_health_checker/openshift_checks/docker_storage.py b/roles/openshift_health_checker/openshift_checks/docker_storage.py
index d2227d244..dea15a56e 100644
--- a/roles/openshift_health_checker/openshift_checks/docker_storage.py
+++ b/roles/openshift_health_checker/openshift_checks/docker_storage.py
@@ -2,7 +2,7 @@
import json
import os.path
import re
-from openshift_checks import OpenShiftCheck, OpenShiftCheckException, get_var
+from openshift_checks import OpenShiftCheck, OpenShiftCheckException
from openshift_checks.mixins import DockerHostMixin
@@ -42,8 +42,8 @@ class DockerStorage(DockerHostMixin, OpenShiftCheck):
),
]
- def run(self, tmp, task_vars):
- msg, failed, changed = self.ensure_dependencies(task_vars)
+ def run(self):
+ msg, failed, changed = self.ensure_dependencies()
if failed:
return {
"failed": True,
@@ -52,7 +52,7 @@ class DockerStorage(DockerHostMixin, OpenShiftCheck):
}
# attempt to get the docker info hash from the API
- docker_info = self.execute_module("docker_info", {}, task_vars=task_vars)
+ docker_info = self.execute_module("docker_info", {})
if docker_info.get("failed"):
return {"failed": True, "changed": changed,
"msg": "Failed to query Docker API. Is docker running on this host?"}
@@ -76,15 +76,15 @@ class DockerStorage(DockerHostMixin, OpenShiftCheck):
result = {}
if driver == "devicemapper":
- result = self.check_devicemapper_support(driver_status, task_vars)
+ result = self.check_devicemapper_support(driver_status)
if driver in ['overlay', 'overlay2']:
- result = self.check_overlay_support(docker_info, driver_status, task_vars)
+ result = self.check_overlay_support(docker_info, driver_status)
result['changed'] = result.get('changed', False) or changed
return result
- def check_devicemapper_support(self, driver_status, task_vars):
+ def check_devicemapper_support(self, driver_status):
"""Check if dm storage driver is supported as configured. Return: result dict."""
if driver_status.get("Data loop file"):
msg = (
@@ -94,10 +94,10 @@ class DockerStorage(DockerHostMixin, OpenShiftCheck):
"See http://red.ht/2rNperO for further information."
)
return {"failed": True, "msg": msg}
- result = self.check_dm_usage(driver_status, task_vars)
+ result = self.check_dm_usage(driver_status)
return result
- def check_dm_usage(self, driver_status, task_vars):
+ def check_dm_usage(self, driver_status):
"""Check usage thresholds for Docker dm storage driver. Return: result dict.
Backing assumptions: We expect devicemapper to be backed by an auto-expanding thin pool
implemented as an LV in an LVM2 VG. This is how docker-storage-setup currently configures
@@ -109,7 +109,7 @@ class DockerStorage(DockerHostMixin, OpenShiftCheck):
could run out of space first; so we check both.
"""
vals = dict(
- vg_free=self.get_vg_free(driver_status.get("Pool Name"), task_vars),
+ vg_free=self.get_vg_free(driver_status.get("Pool Name")),
data_used=driver_status.get("Data Space Used"),
data_total=driver_status.get("Data Space Total"),
metadata_used=driver_status.get("Metadata Space Used"),
@@ -130,7 +130,7 @@ class DockerStorage(DockerHostMixin, OpenShiftCheck):
# determine the threshold percentages which usage should not exceed
for name, default in [("data", self.max_thinpool_data_usage_percent),
("metadata", self.max_thinpool_meta_usage_percent)]:
- percent = get_var(task_vars, "max_thinpool_" + name + "_usage_percent", default=default)
+ percent = self.get_var("max_thinpool_" + name + "_usage_percent", default=default)
try:
vals[name + "_threshold"] = float(percent)
except ValueError:
@@ -157,7 +157,7 @@ class DockerStorage(DockerHostMixin, OpenShiftCheck):
vals["msg"] = "\n".join(messages or ["Thinpool usage is within thresholds."])
return vals
- def get_vg_free(self, pool, task_vars):
+ def get_vg_free(self, pool):
"""Determine which VG to examine according to the pool name. Return: size vgs reports.
Pool name is the only indicator currently available from the Docker API driver info.
We assume a name that looks like "vg--name-docker--pool";
@@ -174,7 +174,7 @@ class DockerStorage(DockerHostMixin, OpenShiftCheck):
vgs_cmd = "/sbin/vgs --noheadings -o vg_free --units g --select vg_name=" + vg_name
# should return free space like " 12.00g" if the VG exists; empty if it does not
- ret = self.execute_module("command", {"_raw_params": vgs_cmd}, task_vars=task_vars)
+ ret = self.execute_module("command", {"_raw_params": vgs_cmd})
if ret.get("failed") or ret.get("rc", 0) != 0:
raise OpenShiftCheckException(
"Is LVM installed? Failed to run /sbin/vgs "
@@ -213,7 +213,7 @@ class DockerStorage(DockerHostMixin, OpenShiftCheck):
return float(number) * multiplier
- def check_overlay_support(self, docker_info, driver_status, task_vars):
+ def check_overlay_support(self, docker_info, driver_status):
"""Check if overlay storage driver is supported for this host. Return: result dict."""
# check for xfs as backing store
backing_fs = driver_status.get("Backing Filesystem", "[NONE]")
@@ -239,13 +239,13 @@ class DockerStorage(DockerHostMixin, OpenShiftCheck):
# NOTE: we could check for --selinux-enabled here but docker won't even start with
# that option until it's supported in the kernel so we don't need to.
- return self.check_overlay_usage(docker_info, task_vars)
+ return self.check_overlay_usage(docker_info)
- def check_overlay_usage(self, docker_info, task_vars):
+ def check_overlay_usage(self, docker_info):
"""Check disk usage on OverlayFS backing store volume. Return: result dict."""
path = docker_info.get("DockerRootDir", "/var/lib/docker") + "/" + docker_info["Driver"]
- threshold = get_var(task_vars, "max_overlay_usage_percent", default=self.max_overlay_usage_percent)
+ threshold = self.get_var("max_overlay_usage_percent", default=self.max_overlay_usage_percent)
try:
threshold = float(threshold)
except ValueError:
@@ -254,7 +254,7 @@ class DockerStorage(DockerHostMixin, OpenShiftCheck):
"msg": "Specified 'max_overlay_usage_percent' is not a percentage: {}".format(threshold),
}
- mount = self.find_ansible_mount(path, get_var(task_vars, "ansible_mounts"))
+ mount = self.find_ansible_mount(path, self.get_var("ansible_mounts"))
try:
free_bytes = mount['size_available']
total_bytes = mount['size_total']
diff --git a/roles/openshift_health_checker/openshift_checks/etcd_imagedata_size.py b/roles/openshift_health_checker/openshift_checks/etcd_imagedata_size.py
index c04a69765..28c38504d 100644
--- a/roles/openshift_health_checker/openshift_checks/etcd_imagedata_size.py
+++ b/roles/openshift_health_checker/openshift_checks/etcd_imagedata_size.py
@@ -2,7 +2,7 @@
Ansible module for determining if the size of OpenShift image data exceeds a specified limit in an etcd cluster.
"""
-from openshift_checks import OpenShiftCheck, OpenShiftCheckException, get_var
+from openshift_checks import OpenShiftCheck, OpenShiftCheckException
class EtcdImageDataSize(OpenShiftCheck):
@@ -11,24 +11,25 @@ class EtcdImageDataSize(OpenShiftCheck):
name = "etcd_imagedata_size"
tags = ["etcd"]
- def run(self, tmp, task_vars):
- etcd_mountpath = self._get_etcd_mountpath(get_var(task_vars, "ansible_mounts"))
+ def run(self):
+ etcd_mountpath = self._get_etcd_mountpath(self.get_var("ansible_mounts"))
etcd_avail_diskspace = etcd_mountpath["size_available"]
etcd_total_diskspace = etcd_mountpath["size_total"]
- etcd_imagedata_size_limit = get_var(task_vars,
- "etcd_max_image_data_size_bytes",
- default=int(0.5 * float(etcd_total_diskspace - etcd_avail_diskspace)))
+ etcd_imagedata_size_limit = self.get_var(
+ "etcd_max_image_data_size_bytes",
+ default=int(0.5 * float(etcd_total_diskspace - etcd_avail_diskspace))
+ )
- etcd_is_ssl = get_var(task_vars, "openshift", "master", "etcd_use_ssl", default=False)
- etcd_port = get_var(task_vars, "openshift", "master", "etcd_port", default=2379)
- etcd_hosts = get_var(task_vars, "openshift", "master", "etcd_hosts")
+ etcd_is_ssl = self.get_var("openshift", "master", "etcd_use_ssl", default=False)
+ etcd_port = self.get_var("openshift", "master", "etcd_port", default=2379)
+ etcd_hosts = self.get_var("openshift", "master", "etcd_hosts")
- config_base = get_var(task_vars, "openshift", "common", "config_base")
+ config_base = self.get_var("openshift", "common", "config_base")
- cert = task_vars.get("etcd_client_cert", config_base + "/master/master.etcd-client.crt")
- key = task_vars.get("etcd_client_key", config_base + "/master/master.etcd-client.key")
- ca_cert = task_vars.get("etcd_client_ca_cert", config_base + "/master/master.etcd-ca.crt")
+ cert = self.get_var("etcd_client_cert", default=config_base + "/master/master.etcd-client.crt")
+ key = self.get_var("etcd_client_key", default=config_base + "/master/master.etcd-client.key")
+ ca_cert = self.get_var("etcd_client_ca_cert", default=config_base + "/master/master.etcd-ca.crt")
for etcd_host in list(etcd_hosts):
args = {
@@ -46,7 +47,7 @@ class EtcdImageDataSize(OpenShiftCheck):
},
}
- etcdkeysize = self.module_executor("etcdkeysize", args, task_vars)
+ etcdkeysize = self.execute_module("etcdkeysize", args)
if etcdkeysize.get("rc", 0) != 0 or etcdkeysize.get("failed"):
msg = 'Failed to retrieve stats for etcd host "{host}": {reason}'
diff --git a/roles/openshift_health_checker/openshift_checks/etcd_traffic.py b/roles/openshift_health_checker/openshift_checks/etcd_traffic.py
index 40c87873d..cc1b14d8a 100644
--- a/roles/openshift_health_checker/openshift_checks/etcd_traffic.py
+++ b/roles/openshift_health_checker/openshift_checks/etcd_traffic.py
@@ -1,6 +1,6 @@
"""Check that scans journalctl for messages caused as a symptom of increased etcd traffic."""
-from openshift_checks import OpenShiftCheck, get_var
+from openshift_checks import OpenShiftCheck
class EtcdTraffic(OpenShiftCheck):
@@ -9,19 +9,18 @@ class EtcdTraffic(OpenShiftCheck):
name = "etcd_traffic"
tags = ["health", "etcd"]
- @classmethod
- def is_active(cls, task_vars):
+ def is_active(self):
"""Skip hosts that do not have etcd in their group names."""
- group_names = get_var(task_vars, "group_names", default=[])
+ group_names = self.get_var("group_names", default=[])
valid_group_names = "etcd" in group_names
- version = get_var(task_vars, "openshift", "common", "short_version")
+ version = self.get_var("openshift", "common", "short_version")
valid_version = version in ("3.4", "3.5", "1.4", "1.5")
- return super(EtcdTraffic, cls).is_active(task_vars) and valid_group_names and valid_version
+ return super(EtcdTraffic, self).is_active() and valid_group_names and valid_version
- def run(self, tmp, task_vars):
- is_containerized = get_var(task_vars, "openshift", "common", "is_containerized")
+ def run(self):
+ is_containerized = self.get_var("openshift", "common", "is_containerized")
unit = "etcd_container" if is_containerized else "etcd"
log_matchers = [{
@@ -30,9 +29,7 @@ class EtcdTraffic(OpenShiftCheck):
"unit": unit
}]
- match = self.execute_module("search_journalctl", {
- "log_matchers": log_matchers,
- }, task_vars)
+ match = self.execute_module("search_journalctl", {"log_matchers": log_matchers})
if match.get("matched"):
msg = ("Higher than normal etcd traffic detected.\n"
diff --git a/roles/openshift_health_checker/openshift_checks/etcd_volume.py b/roles/openshift_health_checker/openshift_checks/etcd_volume.py
index 7452c9cc1..da7d0364a 100644
--- a/roles/openshift_health_checker/openshift_checks/etcd_volume.py
+++ b/roles/openshift_health_checker/openshift_checks/etcd_volume.py
@@ -1,6 +1,6 @@
"""A health check for OpenShift clusters."""
-from openshift_checks import OpenShiftCheck, OpenShiftCheckException, get_var
+from openshift_checks import OpenShiftCheck, OpenShiftCheckException
class EtcdVolume(OpenShiftCheck):
@@ -14,21 +14,18 @@ class EtcdVolume(OpenShiftCheck):
# Where to find ectd data, higher priority first.
supported_mount_paths = ["/var/lib/etcd", "/var/lib", "/var", "/"]
- @classmethod
- def is_active(cls, task_vars):
- etcd_hosts = get_var(task_vars, "groups", "etcd", default=[]) or get_var(task_vars, "groups", "masters",
- default=[]) or []
- is_etcd_host = get_var(task_vars, "ansible_ssh_host") in etcd_hosts
- return super(EtcdVolume, cls).is_active(task_vars) and is_etcd_host
+ def is_active(self):
+ etcd_hosts = self.get_var("groups", "etcd", default=[]) or self.get_var("groups", "masters", default=[]) or []
+ is_etcd_host = self.get_var("ansible_ssh_host") in etcd_hosts
+ return super(EtcdVolume, self).is_active() and is_etcd_host
- def run(self, tmp, task_vars):
- mount_info = self._etcd_mount_info(task_vars)
+ def run(self):
+ mount_info = self._etcd_mount_info()
available = mount_info["size_available"]
total = mount_info["size_total"]
used = total - available
- threshold = get_var(
- task_vars,
+ threshold = self.get_var(
"etcd_device_usage_threshold_percent",
default=self.default_threshold_percent
)
@@ -45,8 +42,8 @@ class EtcdVolume(OpenShiftCheck):
return {"changed": False}
- def _etcd_mount_info(self, task_vars):
- ansible_mounts = get_var(task_vars, "ansible_mounts")
+ def _etcd_mount_info(self):
+ ansible_mounts = self.get_var("ansible_mounts")
mounts = {mnt.get("mount"): mnt for mnt in ansible_mounts}
for path in self.supported_mount_paths:
diff --git a/roles/openshift_health_checker/openshift_checks/logging/curator.py b/roles/openshift_health_checker/openshift_checks/logging/curator.py
index c9fc59896..32d853d57 100644
--- a/roles/openshift_health_checker/openshift_checks/logging/curator.py
+++ b/roles/openshift_health_checker/openshift_checks/logging/curator.py
@@ -1,28 +1,21 @@
-"""
-Module for performing checks on an Curator logging deployment
-"""
+"""Check for an aggregated logging Curator deployment"""
-from openshift_checks import get_var
from openshift_checks.logging.logging import LoggingCheck
class Curator(LoggingCheck):
- """Module that checks an integrated logging Curator deployment"""
+ """Check for an aggregated logging Curator deployment"""
name = "curator"
tags = ["health", "logging"]
- logging_namespace = None
-
- def run(self, tmp, task_vars):
+ def run(self):
"""Check various things and gather errors. Returns: result as hash"""
- self.logging_namespace = get_var(task_vars, "openshift_logging_namespace", default="logging")
- curator_pods, error = super(Curator, self).get_pods_for_component(
- self.module_executor,
+ self.logging_namespace = self.get_var("openshift_logging_namespace", default="logging")
+ curator_pods, error = self.get_pods_for_component(
self.logging_namespace,
"curator",
- task_vars
)
if error:
return {"failed": True, "changed": False, "msg": error}
@@ -30,7 +23,6 @@ class Curator(LoggingCheck):
if check_error:
msg = ("The following Curator deployment issue was found:"
- "\n-------\n"
"{}".format(check_error))
return {"failed": True, "changed": False, "msg": msg}
@@ -46,7 +38,7 @@ class Curator(LoggingCheck):
"Is Curator correctly deployed?"
)
- not_running = super(Curator, self).not_running_pods(pods)
+ not_running = self.not_running_pods(pods)
if len(not_running) == len(pods):
return (
"The Curator pod is not currently in a running state,\n"
diff --git a/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py b/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py
index 01cb35b81..8bdda1f32 100644
--- a/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py
+++ b/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py
@@ -1,39 +1,31 @@
-"""
-Module for performing checks on an Elasticsearch logging deployment
-"""
+"""Check for an aggregated logging Elasticsearch deployment"""
import json
import re
-from openshift_checks import get_var
from openshift_checks.logging.logging import LoggingCheck
class Elasticsearch(LoggingCheck):
- """Module that checks an integrated logging Elasticsearch deployment"""
+ """Check for an aggregated logging Elasticsearch deployment"""
name = "elasticsearch"
tags = ["health", "logging"]
- logging_namespace = None
-
- def run(self, tmp, task_vars):
+ def run(self):
"""Check various things and gather errors. Returns: result as hash"""
- self.logging_namespace = get_var(task_vars, "openshift_logging_namespace", default="logging")
- es_pods, error = super(Elasticsearch, self).get_pods_for_component(
- self.execute_module,
+ self.logging_namespace = self.get_var("openshift_logging_namespace", default="logging")
+ es_pods, error = self.get_pods_for_component(
self.logging_namespace,
"es",
- task_vars,
)
if error:
return {"failed": True, "changed": False, "msg": error}
- check_error = self.check_elasticsearch(es_pods, task_vars)
+ check_error = self.check_elasticsearch(es_pods)
if check_error:
msg = ("The following Elasticsearch deployment issue was found:"
- "\n-------\n"
"{}".format(check_error))
return {"failed": True, "changed": False, "msg": msg}
@@ -41,8 +33,8 @@ class Elasticsearch(LoggingCheck):
return {"failed": False, "changed": False, "msg": 'No problems found with Elasticsearch deployment.'}
def _not_running_elasticsearch_pods(self, es_pods):
- """Returns: list of running pods, list of errors about non-running pods"""
- not_running = super(Elasticsearch, self).not_running_pods(es_pods)
+ """Returns: list of pods that are not running, list of errors about non-running pods"""
+ not_running = self.not_running_pods(es_pods)
if not_running:
return not_running, [(
'The following Elasticsearch pods are not running:\n'
@@ -54,7 +46,7 @@ class Elasticsearch(LoggingCheck):
))]
return not_running, []
- def check_elasticsearch(self, es_pods, task_vars):
+ def check_elasticsearch(self, es_pods):
"""Various checks for elasticsearch. Returns: error string"""
not_running_pods, error_msgs = self._not_running_elasticsearch_pods(es_pods)
running_pods = [pod for pod in es_pods if pod not in not_running_pods]
@@ -65,10 +57,10 @@ class Elasticsearch(LoggingCheck):
}
if not pods_by_name:
return 'No logging Elasticsearch pods were found. Is logging deployed?'
- error_msgs += self._check_elasticsearch_masters(pods_by_name, task_vars)
- error_msgs += self._check_elasticsearch_node_list(pods_by_name, task_vars)
- error_msgs += self._check_es_cluster_health(pods_by_name, task_vars)
- error_msgs += self._check_elasticsearch_diskspace(pods_by_name, task_vars)
+ error_msgs += self._check_elasticsearch_masters(pods_by_name)
+ error_msgs += self._check_elasticsearch_node_list(pods_by_name)
+ error_msgs += self._check_es_cluster_health(pods_by_name)
+ error_msgs += self._check_elasticsearch_diskspace(pods_by_name)
return '\n'.join(error_msgs)
@staticmethod
@@ -76,14 +68,14 @@ class Elasticsearch(LoggingCheck):
base = "exec {name} -- curl -s --cert {base}cert --key {base}key --cacert {base}ca -XGET '{url}'"
return base.format(base="/etc/elasticsearch/secret/admin-", name=pod_name, url=url)
- def _check_elasticsearch_masters(self, pods_by_name, task_vars):
+ def _check_elasticsearch_masters(self, pods_by_name):
"""Check that Elasticsearch masters are sane. Returns: list of error strings"""
es_master_names = set()
error_msgs = []
for pod_name in pods_by_name.keys():
# Compare what each ES node reports as master and compare for split brain
get_master_cmd = self._build_es_curl_cmd(pod_name, "https://localhost:9200/_cat/master")
- master_name_str = self._exec_oc(get_master_cmd, [], task_vars)
+ master_name_str = self.exec_oc(self.logging_namespace, get_master_cmd, [])
master_names = (master_name_str or '').split(' ')
if len(master_names) > 1:
es_master_names.add(master_names[1])
@@ -108,7 +100,7 @@ class Elasticsearch(LoggingCheck):
return error_msgs
- def _check_elasticsearch_node_list(self, pods_by_name, task_vars):
+ def _check_elasticsearch_node_list(self, pods_by_name):
"""Check that reported ES masters are accounted for by pods. Returns: list of error strings"""
if not pods_by_name:
@@ -116,7 +108,7 @@ class Elasticsearch(LoggingCheck):
# get ES cluster nodes
node_cmd = self._build_es_curl_cmd(list(pods_by_name.keys())[0], 'https://localhost:9200/_nodes')
- cluster_node_data = self._exec_oc(node_cmd, [], task_vars)
+ cluster_node_data = self.exec_oc(self.logging_namespace, node_cmd, [])
try:
cluster_nodes = json.loads(cluster_node_data)['nodes']
except (ValueError, KeyError):
@@ -138,12 +130,12 @@ class Elasticsearch(LoggingCheck):
return error_msgs
- def _check_es_cluster_health(self, pods_by_name, task_vars):
+ def _check_es_cluster_health(self, pods_by_name):
"""Exec into the elasticsearch pods and check the cluster health. Returns: list of errors"""
error_msgs = []
for pod_name in pods_by_name.keys():
cluster_health_cmd = self._build_es_curl_cmd(pod_name, 'https://localhost:9200/_cluster/health?pretty=true')
- cluster_health_data = self._exec_oc(cluster_health_cmd, [], task_vars)
+ cluster_health_data = self.exec_oc(self.logging_namespace, cluster_health_cmd, [])
try:
health_res = json.loads(cluster_health_data)
if not health_res or not health_res.get('status'):
@@ -162,7 +154,7 @@ class Elasticsearch(LoggingCheck):
return error_msgs
- def _check_elasticsearch_diskspace(self, pods_by_name, task_vars):
+ def _check_elasticsearch_diskspace(self, pods_by_name):
"""
Exec into an ES pod and query the diskspace on the persistent volume.
Returns: list of errors
@@ -170,7 +162,7 @@ class Elasticsearch(LoggingCheck):
error_msgs = []
for pod_name in pods_by_name.keys():
df_cmd = 'exec {} -- df --output=ipcent,pcent /elasticsearch/persistent'.format(pod_name)
- disk_output = self._exec_oc(df_cmd, [], task_vars)
+ disk_output = self.exec_oc(self.logging_namespace, df_cmd, [])
lines = disk_output.splitlines()
# expecting one header looking like 'IUse% Use%' and one body line
body_re = r'\s*(\d+)%?\s+(\d+)%?\s*$'
@@ -182,7 +174,7 @@ class Elasticsearch(LoggingCheck):
continue
inode_pct, disk_pct = re.match(body_re, lines[1]).groups()
- inode_pct_thresh = get_var(task_vars, 'openshift_check_efk_es_inode_pct', default='90')
+ inode_pct_thresh = self.get_var('openshift_check_efk_es_inode_pct', default='90')
if int(inode_pct) >= int(inode_pct_thresh):
error_msgs.append(
'Inode percent usage on the storage volume for logging ES pod "{pod}"\n'
@@ -193,7 +185,7 @@ class Elasticsearch(LoggingCheck):
limit=str(inode_pct_thresh),
param='openshift_check_efk_es_inode_pct',
))
- disk_pct_thresh = get_var(task_vars, 'openshift_check_efk_es_storage_pct', default='80')
+ disk_pct_thresh = self.get_var('openshift_check_efk_es_storage_pct', default='80')
if int(disk_pct) >= int(disk_pct_thresh):
error_msgs.append(
'Disk percent usage on the storage volume for logging ES pod "{pod}"\n'
@@ -206,12 +198,3 @@ class Elasticsearch(LoggingCheck):
))
return error_msgs
-
- def _exec_oc(self, cmd_str, extra_args, task_vars):
- return super(Elasticsearch, self).exec_oc(
- self.execute_module,
- self.logging_namespace,
- cmd_str,
- extra_args,
- task_vars,
- )
diff --git a/roles/openshift_health_checker/openshift_checks/logging/fluentd.py b/roles/openshift_health_checker/openshift_checks/logging/fluentd.py
index 627567293..b3485bf44 100644
--- a/roles/openshift_health_checker/openshift_checks/logging/fluentd.py
+++ b/roles/openshift_health_checker/openshift_checks/logging/fluentd.py
@@ -1,37 +1,30 @@
-"""
-Module for performing checks on an Fluentd logging deployment
-"""
+"""Check for an aggregated logging Fluentd deployment"""
import json
-from openshift_checks import get_var
from openshift_checks.logging.logging import LoggingCheck
class Fluentd(LoggingCheck):
- """Module that checks an integrated logging Fluentd deployment"""
+ """Check for an aggregated logging Fluentd deployment"""
+
name = "fluentd"
tags = ["health", "logging"]
- logging_namespace = None
-
- def run(self, tmp, task_vars):
+ def run(self):
"""Check various things and gather errors. Returns: result as hash"""
- self.logging_namespace = get_var(task_vars, "openshift_logging_namespace", default="logging")
+ self.logging_namespace = self.get_var("openshift_logging_namespace", default="logging")
fluentd_pods, error = super(Fluentd, self).get_pods_for_component(
- self.execute_module,
self.logging_namespace,
"fluentd",
- task_vars,
)
if error:
return {"failed": True, "changed": False, "msg": error}
- check_error = self.check_fluentd(fluentd_pods, task_vars)
+ check_error = self.check_fluentd(fluentd_pods)
if check_error:
msg = ("The following Fluentd deployment issue was found:"
- "\n-------\n"
"{}".format(check_error))
return {"failed": True, "changed": False, "msg": msg}
@@ -53,10 +46,9 @@ class Fluentd(LoggingCheck):
).format(label=node_selector)
return fluentd_nodes, None
- @staticmethod
- def _check_node_labeling(nodes_by_name, fluentd_nodes, node_selector, task_vars):
+ def _check_node_labeling(self, nodes_by_name, fluentd_nodes, node_selector):
"""Note if nodes are not labeled as expected. Returns: error string"""
- intended_nodes = get_var(task_vars, 'openshift_logging_fluentd_hosts', default=['--all'])
+ intended_nodes = self.get_var('openshift_logging_fluentd_hosts', default=['--all'])
if not intended_nodes or '--all' in intended_nodes:
intended_nodes = nodes_by_name.keys()
nodes_missing_labels = set(intended_nodes) - set(fluentd_nodes.keys())
@@ -114,13 +106,15 @@ class Fluentd(LoggingCheck):
))
return None
- def check_fluentd(self, pods, task_vars):
+ def check_fluentd(self, pods):
"""Verify fluentd is running everywhere. Returns: error string"""
- node_selector = get_var(task_vars, 'openshift_logging_fluentd_nodeselector',
- default='logging-infra-fluentd=true')
+ node_selector = self.get_var(
+ 'openshift_logging_fluentd_nodeselector',
+ default='logging-infra-fluentd=true'
+ )
- nodes_by_name, error = self.get_nodes_by_name(task_vars)
+ nodes_by_name, error = self.get_nodes_by_name()
if error:
return error
@@ -129,7 +123,7 @@ class Fluentd(LoggingCheck):
return error
error_msgs = []
- error = self._check_node_labeling(nodes_by_name, fluentd_nodes, node_selector, task_vars)
+ error = self._check_node_labeling(nodes_by_name, fluentd_nodes, node_selector)
if error:
error_msgs.append(error)
error = self._check_nodes_have_fluentd(pods, fluentd_nodes)
@@ -148,9 +142,13 @@ class Fluentd(LoggingCheck):
return '\n'.join(error_msgs)
- def get_nodes_by_name(self, task_vars):
+ def get_nodes_by_name(self):
"""Retrieve all the node definitions. Returns: dict(name: node), error string"""
- nodes_json = self._exec_oc("get nodes -o json", [], task_vars)
+ nodes_json = self.exec_oc(
+ self.logging_namespace,
+ "get nodes -o json",
+ []
+ )
try:
nodes = json.loads(nodes_json)
except ValueError: # no valid json - should not happen
@@ -161,10 +159,3 @@ class Fluentd(LoggingCheck):
node['metadata']['name']: node
for node in nodes['items']
}, None
-
- def _exec_oc(self, cmd_str, extra_args, task_vars):
- return super(Fluentd, self).exec_oc(self.execute_module,
- self.logging_namespace,
- cmd_str,
- extra_args,
- task_vars)
diff --git a/roles/openshift_health_checker/openshift_checks/logging/fluentd_config.py b/roles/openshift_health_checker/openshift_checks/logging/fluentd_config.py
new file mode 100644
index 000000000..0970f0a63
--- /dev/null
+++ b/roles/openshift_health_checker/openshift_checks/logging/fluentd_config.py
@@ -0,0 +1,138 @@
+"""
+Module for performing checks on a Fluentd logging deployment configuration
+"""
+
+from openshift_checks import OpenShiftCheckException
+from openshift_checks.logging.logging import LoggingCheck
+
+
+class FluentdConfig(LoggingCheck):
+ """Module that checks logging configuration of an integrated logging Fluentd deployment"""
+ name = "fluentd_config"
+ tags = ["health"]
+
+ def is_active(self):
+ logging_deployed = self.get_var("openshift_hosted_logging_deploy", default=False)
+
+ try:
+ version = self.get_major_minor_version(self.get_var("openshift_image_tag"))
+ except ValueError:
+ # if failed to parse OpenShift version, perform check anyway (if logging enabled)
+ return logging_deployed
+
+ return logging_deployed and version < (3, 6)
+
+ def run(self):
+ """Check that Fluentd has running pods, and that its logging config matches Docker's logging config."""
+ self.logging_namespace = self.get_var("openshift_logging_namespace", default=self.logging_namespace)
+ config_error = self.check_logging_config()
+ if config_error:
+ msg = ("The following Fluentd logging configuration problem was found:"
+ "\n{}".format(config_error))
+ return {"failed": True, "msg": msg}
+
+ return {}
+
+ def check_logging_config(self):
+ """Ensure that the configured Docker logging driver matches fluentd settings.
+ This means that, at least for now, if the following condition is met:
+
+ openshift_logging_fluentd_use_journal == True
+
+ then the value of the configured Docker logging driver should be "journald".
+ Otherwise, the value of the Docker logging driver should be "json-file".
+ Returns an error string if the above condition is not met, or None otherwise."""
+ use_journald = self.get_var("openshift_logging_fluentd_use_journal", default=True)
+
+ # if check is running on a master, retrieve all running pods
+ # and check any pod's container for the env var "USE_JOURNAL"
+ group_names = self.get_var("group_names")
+ if "masters" in group_names:
+ use_journald = self.check_fluentd_env_var()
+
+ docker_info = self.execute_module("docker_info", {})
+ try:
+ logging_driver = docker_info["info"]["LoggingDriver"]
+ except KeyError:
+ return "Unable to determine Docker logging driver."
+
+ logging_driver = docker_info["info"]["LoggingDriver"]
+ recommended_logging_driver = "journald"
+ error = None
+
+ # If fluentd is set to use journald but Docker is not, recommend setting the `--log-driver`
+ # option as an inventory file variable, or adding the log driver value as part of the
+ # Docker configuration in /etc/docker/daemon.json. There is no global --log-driver flag that
+ # can be passed to the Docker binary; the only other recommendation that can be made, would be
+ # to pass the `--log-driver` flag to the "run" sub-command of the `docker` binary when running
+ # individual containers.
+ if use_journald and logging_driver != "journald":
+ error = ('Your Fluentd configuration is set to aggregate Docker container logs from "journald".\n'
+ 'This differs from your Docker configuration, which has been set to use "{driver}" '
+ 'as the default method of storing logs.\n'
+ 'This discrepancy in configuration will prevent Fluentd from receiving any logs'
+ 'from your Docker containers.').format(driver=logging_driver)
+ elif not use_journald and logging_driver != "json-file":
+ recommended_logging_driver = "json-file"
+ error = ('Your Fluentd configuration is set to aggregate Docker container logs from '
+ 'individual json log files per container.\n '
+ 'This differs from your Docker configuration, which has been set to use '
+ '"{driver}" as the default method of storing logs.\n'
+ 'This discrepancy in configuration will prevent Fluentd from receiving any logs'
+ 'from your Docker containers.').format(driver=logging_driver)
+
+ if error:
+ error += ('\nTo resolve this issue, add the following variable to your Ansible inventory file:\n\n'
+ ' openshift_docker_options="--log-driver={driver}"\n\n'
+ 'Alternatively, you can add the following option to your Docker configuration, located in'
+ '"/etc/docker/daemon.json":\n\n'
+ '{{ "log-driver": "{driver}" }}\n\n'
+ 'See https://docs.docker.com/engine/admin/logging/json-file '
+ 'for more information.').format(driver=recommended_logging_driver)
+
+ return error
+
+ def check_fluentd_env_var(self):
+ """Read and return the value of the 'USE_JOURNAL' environment variable on a fluentd pod."""
+ running_pods = self.running_fluentd_pods()
+
+ try:
+ pod_containers = running_pods[0]["spec"]["containers"]
+ except KeyError:
+ return "Unable to detect running containers on selected Fluentd pod."
+
+ if not pod_containers:
+ msg = ('There are no running containers on selected Fluentd pod "{}".\n'
+ 'Unable to calculate expected logging driver.').format(running_pods[0]["metadata"].get("name", ""))
+ raise OpenShiftCheckException(msg)
+
+ pod_env = pod_containers[0].get("env")
+ if not pod_env:
+ msg = ('There are no environment variables set on the Fluentd container "{}".\n'
+ 'Unable to calculate expected logging driver.').format(pod_containers[0].get("name"))
+ raise OpenShiftCheckException(msg)
+
+ for env in pod_env:
+ if env["name"] == "USE_JOURNAL":
+ return env.get("value", "false") != "false"
+
+ return False
+
+ def running_fluentd_pods(self):
+ """Return a list of running fluentd pods."""
+ fluentd_pods, error = self.get_pods_for_component(
+ self.logging_namespace,
+ "fluentd",
+ )
+ if error:
+ msg = 'Unable to retrieve any pods for the "fluentd" logging component: {}'.format(error)
+ raise OpenShiftCheckException(msg)
+
+ running_fluentd_pods = [pod for pod in fluentd_pods if pod['status']['phase'] == 'Running']
+ if not running_fluentd_pods:
+ msg = ('No Fluentd pods were found to be in the "Running" state. '
+ 'At least one Fluentd pod is required in order to perform this check.')
+
+ raise OpenShiftCheckException(msg)
+
+ return running_fluentd_pods
diff --git a/roles/openshift_health_checker/openshift_checks/logging/kibana.py b/roles/openshift_health_checker/openshift_checks/logging/kibana.py
index 551e8dfa0..efb14ab42 100644
--- a/roles/openshift_health_checker/openshift_checks/logging/kibana.py
+++ b/roles/openshift_health_checker/openshift_checks/logging/kibana.py
@@ -12,7 +12,6 @@ except ImportError:
from urllib.error import HTTPError, URLError
import urllib.request as urllib2
-from openshift_checks import get_var
from openshift_checks.logging.logging import LoggingCheck
@@ -22,35 +21,30 @@ class Kibana(LoggingCheck):
name = "kibana"
tags = ["health", "logging"]
- logging_namespace = None
-
- def run(self, tmp, task_vars):
+ def run(self):
"""Check various things and gather errors. Returns: result as hash"""
- self.logging_namespace = get_var(task_vars, "openshift_logging_namespace", default="logging")
- kibana_pods, error = super(Kibana, self).get_pods_for_component(
- self.execute_module,
+ self.logging_namespace = self.get_var("openshift_logging_namespace", default="logging")
+ kibana_pods, error = self.get_pods_for_component(
self.logging_namespace,
"kibana",
- task_vars,
)
if error:
return {"failed": True, "changed": False, "msg": error}
check_error = self.check_kibana(kibana_pods)
if not check_error:
- check_error = self._check_kibana_route(task_vars)
+ check_error = self._check_kibana_route()
if check_error:
msg = ("The following Kibana deployment issue was found:"
- "\n-------\n"
"{}".format(check_error))
return {"failed": True, "changed": False, "msg": msg}
# TODO(lmeyer): run it all again for the ops cluster
return {"failed": False, "changed": False, "msg": 'No problems found with Kibana deployment.'}
- def _verify_url_internal(self, url, task_vars):
+ def _verify_url_internal(self, url):
"""
Try to reach a URL from the host.
Returns: success (bool), reason (for failure)
@@ -62,7 +56,7 @@ class Kibana(LoggingCheck):
# TODO(lmeyer): give users option to validate certs
status_code=302,
)
- result = self.execute_module('uri', args, None, task_vars)
+ result = self.execute_module('uri', args)
if result.get('failed'):
return result['msg']
return None
@@ -114,14 +108,18 @@ class Kibana(LoggingCheck):
return None
- def _get_kibana_url(self, task_vars):
+ def _get_kibana_url(self):
"""
Get kibana route or report error.
Returns: url (or empty), reason for failure
"""
# Get logging url
- get_route = self._exec_oc("get route logging-kibana -o json", [], task_vars)
+ get_route = self.exec_oc(
+ self.logging_namespace,
+ "get route logging-kibana -o json",
+ [],
+ )
if not get_route:
return None, 'no_route_exists'
@@ -139,7 +137,7 @@ class Kibana(LoggingCheck):
return 'https://{}/'.format(host), None
- def _check_kibana_route(self, task_vars):
+ def _check_kibana_route(self):
"""
Check to see if kibana route is up and working.
Returns: error string
@@ -160,12 +158,12 @@ class Kibana(LoggingCheck):
),
)
- kibana_url, error = self._get_kibana_url(task_vars)
+ kibana_url, error = self._get_kibana_url()
if not kibana_url:
return known_errors.get(error, error)
# first, check that kibana is reachable from the master.
- error = self._verify_url_internal(kibana_url, task_vars)
+ error = self._verify_url_internal(kibana_url)
if error:
if 'urlopen error [Errno 111] Connection refused' in error:
error = (
@@ -190,7 +188,7 @@ class Kibana(LoggingCheck):
# in production we would like the kibana route to work from outside the
# cluster too; but that may not be the case, so allow disabling just this part.
- if not get_var(task_vars, "openshift_check_efk_kibana_external", default=True):
+ if not self.get_var("openshift_check_efk_kibana_external", default=True):
return None
error = self._verify_url_external(kibana_url)
if error:
@@ -220,10 +218,3 @@ class Kibana(LoggingCheck):
).format(error=error)
return error
return None
-
- def _exec_oc(self, cmd_str, extra_args, task_vars):
- return super(Kibana, self).exec_oc(self.execute_module,
- self.logging_namespace,
- cmd_str,
- extra_args,
- task_vars)
diff --git a/roles/openshift_health_checker/openshift_checks/logging/logging.py b/roles/openshift_health_checker/openshift_checks/logging/logging.py
index 02a094007..43ba6c406 100644
--- a/roles/openshift_health_checker/openshift_checks/logging/logging.py
+++ b/roles/openshift_health_checker/openshift_checks/logging/logging.py
@@ -5,40 +5,39 @@ Util functions for performing checks on an Elasticsearch, Fluentd, and Kibana st
import json
import os
-from openshift_checks import OpenShiftCheck, OpenShiftCheckException, get_var
+from openshift_checks import OpenShiftCheck, OpenShiftCheckException
class LoggingCheck(OpenShiftCheck):
- """Base class for logging component checks"""
+ """Base class for OpenShift aggregated logging component checks"""
+
+ # FIXME: this should not be listed as a check, since it is not meant to be
+ # run by itself.
name = "logging"
logging_namespace = "logging"
- @classmethod
- def is_active(cls, task_vars):
- logging_deployed = get_var(task_vars, "openshift_hosted_logging_deploy", default=False)
- return super(LoggingCheck, cls).is_active(task_vars) and cls.is_first_master(task_vars) and logging_deployed
+ def is_active(self):
+ logging_deployed = self.get_var("openshift_hosted_logging_deploy", default=False)
+ return logging_deployed and super(LoggingCheck, self).is_active() and self.is_first_master()
- @staticmethod
- def is_first_master(task_vars):
- """Run only on first master. Returns: bool"""
+ def is_first_master(self):
+ """Determine if running on first master. Returns: bool"""
# Note: It would be nice to use membership in oo_first_master group, however for now it
# seems best to avoid requiring that setup and just check this is the first master.
- hostname = get_var(task_vars, "ansible_ssh_host") or [None]
- masters = get_var(task_vars, "groups", "masters", default=None) or [None]
- return masters and masters[0] == hostname
+ hostname = self.get_var("ansible_ssh_host") or [None]
+ masters = self.get_var("groups", "masters", default=None) or [None]
+ return masters[0] == hostname
- def run(self, tmp, task_vars):
- pass
+ def run(self):
+ return {}
- def get_pods_for_component(self, execute_module, namespace, logging_component, task_vars):
+ def get_pods_for_component(self, namespace, logging_component):
"""Get all pods for a given component. Returns: list of pods for component, error string"""
pod_output = self.exec_oc(
- execute_module,
namespace,
"get pods -l component={} -o json".format(logging_component),
[],
- task_vars
)
try:
pods = json.loads(pod_output)
@@ -64,14 +63,13 @@ class LoggingCheck(OpenShiftCheck):
)
]
- @staticmethod
- def exec_oc(execute_module=None, namespace="logging", cmd_str="", extra_args=None, task_vars=None):
+ def exec_oc(self, namespace="logging", cmd_str="", extra_args=None):
"""
Execute an 'oc' command in the remote host.
Returns: output of command and namespace,
or raises OpenShiftCheckException on error
"""
- config_base = get_var(task_vars, "openshift", "common", "config_base")
+ config_base = self.get_var("openshift", "common", "config_base")
args = {
"namespace": namespace,
"config_file": os.path.join(config_base, "master", "admin.kubeconfig"),
@@ -79,7 +77,7 @@ class LoggingCheck(OpenShiftCheck):
"extra_args": list(extra_args) if extra_args else [],
}
- result = execute_module("ocutil", args, None, task_vars)
+ result = self.execute_module("ocutil", args)
if result.get("failed"):
msg = (
'Unexpected error using `oc` to validate the logging stack components.\n'
diff --git a/roles/openshift_health_checker/openshift_checks/logging/logging_index_time.py b/roles/openshift_health_checker/openshift_checks/logging/logging_index_time.py
index 2ddd7549d..b24e88e05 100644
--- a/roles/openshift_health_checker/openshift_checks/logging/logging_index_time.py
+++ b/roles/openshift_health_checker/openshift_checks/logging/logging_index_time.py
@@ -7,7 +7,7 @@ import time
from uuid import uuid4
-from openshift_checks import get_var, OpenShiftCheckException
+from openshift_checks import OpenShiftCheckException
from openshift_checks.logging.logging import LoggingCheck
@@ -21,11 +21,11 @@ class LoggingIndexTime(LoggingCheck):
logging_namespace = "logging"
- def run(self, tmp, task_vars):
+ def run(self):
"""Add log entry by making unique request to Kibana. Check for unique entry in the ElasticSearch pod logs."""
try:
log_index_timeout = int(
- get_var(task_vars, "openshift_check_logging_index_timeout_seconds", default=ES_CMD_TIMEOUT_SECONDS)
+ self.get_var("openshift_check_logging_index_timeout_seconds", default=ES_CMD_TIMEOUT_SECONDS)
)
except ValueError:
return {
@@ -37,11 +37,9 @@ class LoggingIndexTime(LoggingCheck):
running_component_pods = dict()
# get all component pods
- self.logging_namespace = get_var(task_vars, "openshift_logging_namespace", default=self.logging_namespace)
+ self.logging_namespace = self.get_var("openshift_logging_namespace", default=self.logging_namespace)
for component, name in (['kibana', 'Kibana'], ['es', 'Elasticsearch']):
- pods, error = self.get_pods_for_component(
- self.execute_module, self.logging_namespace, component, task_vars,
- )
+ pods, error = self.get_pods_for_component(self.logging_namespace, component)
if error:
msg = 'Unable to retrieve pods for the {} logging component: {}'
@@ -56,29 +54,29 @@ class LoggingIndexTime(LoggingCheck):
running_component_pods[component] = running_pods
- uuid = self.curl_kibana_with_uuid(running_component_pods["kibana"][0], task_vars)
- self.wait_until_cmd_or_err(running_component_pods["es"][0], uuid, log_index_timeout, task_vars)
+ uuid = self.curl_kibana_with_uuid(running_component_pods["kibana"][0])
+ self.wait_until_cmd_or_err(running_component_pods["es"][0], uuid, log_index_timeout)
return {}
- def wait_until_cmd_or_err(self, es_pod, uuid, timeout_secs, task_vars):
+ def wait_until_cmd_or_err(self, es_pod, uuid, timeout_secs):
"""Retry an Elasticsearch query every second until query success, or a defined
length of time has passed."""
deadline = time.time() + timeout_secs
interval = 1
- while not self.query_es_from_es(es_pod, uuid, task_vars):
+ while not self.query_es_from_es(es_pod, uuid):
if time.time() + interval > deadline:
msg = "expecting match in Elasticsearch for message with uuid {}, but no matches were found after {}s."
raise OpenShiftCheckException(msg.format(uuid, timeout_secs))
time.sleep(interval)
- def curl_kibana_with_uuid(self, kibana_pod, task_vars):
+ def curl_kibana_with_uuid(self, kibana_pod):
"""curl Kibana with a unique uuid."""
uuid = self.generate_uuid()
pod_name = kibana_pod["metadata"]["name"]
exec_cmd = "exec {pod_name} -c kibana -- curl --max-time 30 -s http://localhost:5601/{uuid}"
exec_cmd = exec_cmd.format(pod_name=pod_name, uuid=uuid)
- error_str = self.exec_oc(self.execute_module, self.logging_namespace, exec_cmd, [], task_vars)
+ error_str = self.exec_oc(self.logging_namespace, exec_cmd, [])
try:
error_code = json.loads(error_str)["statusCode"]
@@ -97,7 +95,7 @@ class LoggingIndexTime(LoggingCheck):
return uuid
- def query_es_from_es(self, es_pod, uuid, task_vars):
+ def query_es_from_es(self, es_pod, uuid):
"""curl the Elasticsearch pod and look for a unique uuid in its logs."""
pod_name = es_pod["metadata"]["name"]
exec_cmd = (
@@ -108,7 +106,7 @@ class LoggingIndexTime(LoggingCheck):
"https://logging-es:9200/project.{namespace}*/_count?q=message:{uuid}"
)
exec_cmd = exec_cmd.format(pod_name=pod_name, namespace=self.logging_namespace, uuid=uuid)
- result = self.exec_oc(self.execute_module, self.logging_namespace, exec_cmd, [], task_vars)
+ result = self.exec_oc(self.logging_namespace, exec_cmd, [])
try:
count = json.loads(result)["count"]
diff --git a/roles/openshift_health_checker/openshift_checks/memory_availability.py b/roles/openshift_health_checker/openshift_checks/memory_availability.py
index f4e31065f..765ba072d 100644
--- a/roles/openshift_health_checker/openshift_checks/memory_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/memory_availability.py
@@ -1,5 +1,5 @@
-# pylint: disable=missing-docstring
-from openshift_checks import OpenShiftCheck, get_var
+"""Check that recommended memory is available."""
+from openshift_checks import OpenShiftCheck
MIB = 2**20
GIB = 2**30
@@ -21,19 +21,18 @@ class MemoryAvailability(OpenShiftCheck):
# https://access.redhat.com/solutions/3006511 physical RAM is partly reserved from memtotal
memtotal_adjustment = 1 * GIB
- @classmethod
- def is_active(cls, task_vars):
+ def is_active(self):
"""Skip hosts that do not have recommended memory requirements."""
- group_names = get_var(task_vars, "group_names", default=[])
- has_memory_recommendation = bool(set(group_names).intersection(cls.recommended_memory_bytes))
- return super(MemoryAvailability, cls).is_active(task_vars) and has_memory_recommendation
+ group_names = self.get_var("group_names", default=[])
+ has_memory_recommendation = bool(set(group_names).intersection(self.recommended_memory_bytes))
+ return super(MemoryAvailability, self).is_active() and has_memory_recommendation
- def run(self, tmp, task_vars):
- group_names = get_var(task_vars, "group_names")
- total_memory_bytes = get_var(task_vars, "ansible_memtotal_mb") * MIB
+ def run(self):
+ group_names = self.get_var("group_names")
+ total_memory_bytes = self.get_var("ansible_memtotal_mb") * MIB
recommended_min = max(self.recommended_memory_bytes.get(name, 0) for name in group_names)
- configured_min = float(get_var(task_vars, "openshift_check_min_host_memory_gb", default=0)) * GIB
+ configured_min = float(self.get_var("openshift_check_min_host_memory_gb", default=0)) * GIB
min_memory_bytes = configured_min or recommended_min
if total_memory_bytes + self.memtotal_adjustment < min_memory_bytes:
diff --git a/roles/openshift_health_checker/openshift_checks/mixins.py b/roles/openshift_health_checker/openshift_checks/mixins.py
index 2cb2e21aa..3b2c64e6a 100644
--- a/roles/openshift_health_checker/openshift_checks/mixins.py
+++ b/roles/openshift_health_checker/openshift_checks/mixins.py
@@ -2,19 +2,16 @@
Mixin classes meant to be used with subclasses of OpenShiftCheck.
"""
-from openshift_checks import get_var
-
class NotContainerizedMixin(object):
"""Mixin for checks that are only active when not in containerized mode."""
# permanent # pylint: disable=too-few-public-methods
# Reason: The mixin is not intended to stand on its own as a class.
- @classmethod
- def is_active(cls, task_vars):
+ def is_active(self):
"""Only run on non-containerized hosts."""
- is_containerized = get_var(task_vars, "openshift", "common", "is_containerized")
- return super(NotContainerizedMixin, cls).is_active(task_vars) and not is_containerized
+ is_containerized = self.get_var("openshift", "common", "is_containerized")
+ return super(NotContainerizedMixin, self).is_active() and not is_containerized
class DockerHostMixin(object):
@@ -22,28 +19,26 @@ class DockerHostMixin(object):
dependencies = []
- @classmethod
- def is_active(cls, task_vars):
+ def is_active(self):
"""Only run on hosts that depend on Docker."""
- is_containerized = get_var(task_vars, "openshift", "common", "is_containerized")
- is_node = "nodes" in get_var(task_vars, "group_names", default=[])
- return super(DockerHostMixin, cls).is_active(task_vars) and (is_containerized or is_node)
+ is_containerized = self.get_var("openshift", "common", "is_containerized")
+ is_node = "nodes" in self.get_var("group_names", default=[])
+ return super(DockerHostMixin, self).is_active() and (is_containerized or is_node)
- def ensure_dependencies(self, task_vars):
+ def ensure_dependencies(self):
"""
Ensure that docker-related packages exist, but not on atomic hosts
(which would not be able to install but should already have them).
Returns: msg, failed, changed
"""
- if get_var(task_vars, "openshift", "common", "is_atomic"):
+ if self.get_var("openshift", "common", "is_atomic"):
return "", False, False
# NOTE: we would use the "package" module but it's actually an action plugin
# and it's not clear how to invoke one of those. This is about the same anyway:
result = self.execute_module(
- get_var(task_vars, "ansible_pkg_mgr", default="yum"),
+ self.get_var("ansible_pkg_mgr", default="yum"),
{"name": self.dependencies, "state": "present"},
- task_vars=task_vars,
)
msg = result.get("msg", "")
if result.get("failed"):
diff --git a/roles/openshift_health_checker/openshift_checks/ovs_version.py b/roles/openshift_health_checker/openshift_checks/ovs_version.py
index 2dd045f1f..d5e55bc25 100644
--- a/roles/openshift_health_checker/openshift_checks/ovs_version.py
+++ b/roles/openshift_health_checker/openshift_checks/ovs_version.py
@@ -3,7 +3,7 @@ Ansible module for determining if an installed version of Open vSwitch is incomp
currently installed version of OpenShift.
"""
-from openshift_checks import OpenShiftCheck, OpenShiftCheckException, get_var
+from openshift_checks import OpenShiftCheck, OpenShiftCheckException
from openshift_checks.mixins import NotContainerizedMixin
@@ -21,58 +21,34 @@ class OvsVersion(NotContainerizedMixin, OpenShiftCheck):
"3.4": "2.4",
}
- # map major release versions across releases
- # to a common major version
- openshift_major_release_version = {
- "1": "3",
- }
-
- @classmethod
- def is_active(cls, task_vars):
+ def is_active(self):
"""Skip hosts that do not have package requirements."""
- group_names = get_var(task_vars, "group_names", default=[])
+ group_names = self.get_var("group_names", default=[])
master_or_node = 'masters' in group_names or 'nodes' in group_names
- return super(OvsVersion, cls).is_active(task_vars) and master_or_node
+ return super(OvsVersion, self).is_active() and master_or_node
- def run(self, tmp, task_vars):
+ def run(self):
args = {
"package_list": [
{
"name": "openvswitch",
- "version": self.get_required_ovs_version(task_vars),
+ "version": self.get_required_ovs_version(),
},
],
}
- return self.execute_module("rpm_version", args, task_vars=task_vars)
+ return self.execute_module("rpm_version", args)
- def get_required_ovs_version(self, task_vars):
+ def get_required_ovs_version(self):
"""Return the correct Open vSwitch version for the current OpenShift version"""
- openshift_version = self._get_openshift_version(task_vars)
+ openshift_version_tuple = self.get_major_minor_version(self.get_var("openshift_image_tag"))
- if float(openshift_version) < 3.5:
+ if openshift_version_tuple < (3, 5):
return self.openshift_to_ovs_version["3.4"]
- ovs_version = self.openshift_to_ovs_version.get(str(openshift_version))
+ openshift_version = ".".join(str(x) for x in openshift_version_tuple)
+ ovs_version = self.openshift_to_ovs_version.get(openshift_version)
if ovs_version:
- return self.openshift_to_ovs_version[str(openshift_version)]
+ return self.openshift_to_ovs_version[openshift_version]
msg = "There is no recommended version of Open vSwitch for the current version of OpenShift: {}"
raise OpenShiftCheckException(msg.format(openshift_version))
-
- def _get_openshift_version(self, task_vars):
- openshift_version = get_var(task_vars, "openshift_image_tag")
- if openshift_version and openshift_version[0] == 'v':
- openshift_version = openshift_version[1:]
-
- return self._parse_version(openshift_version)
-
- def _parse_version(self, version):
- components = version.split(".")
- if not components or len(components) < 2:
- msg = "An invalid version of OpenShift was found for this host: {}"
- raise OpenShiftCheckException(msg.format(version))
-
- if components[0] in self.openshift_major_release_version:
- components[0] = self.openshift_major_release_version[components[0]]
-
- return '.'.join(components[:2])
diff --git a/roles/openshift_health_checker/openshift_checks/package_availability.py b/roles/openshift_health_checker/openshift_checks/package_availability.py
index 0dd2b1286..a86180b00 100644
--- a/roles/openshift_health_checker/openshift_checks/package_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/package_availability.py
@@ -1,5 +1,6 @@
-# pylint: disable=missing-docstring
-from openshift_checks import OpenShiftCheck, get_var
+"""Check that required RPM packages are available."""
+
+from openshift_checks import OpenShiftCheck
from openshift_checks.mixins import NotContainerizedMixin
@@ -9,13 +10,13 @@ class PackageAvailability(NotContainerizedMixin, OpenShiftCheck):
name = "package_availability"
tags = ["preflight"]
- @classmethod
- def is_active(cls, task_vars):
- return super(PackageAvailability, cls).is_active(task_vars) and task_vars["ansible_pkg_mgr"] == "yum"
+ def is_active(self):
+ """Run only when yum is the package manager as the code is specific to it."""
+ return super(PackageAvailability, self).is_active() and self.get_var("ansible_pkg_mgr") == "yum"
- def run(self, tmp, task_vars):
- rpm_prefix = get_var(task_vars, "openshift", "common", "service_type")
- group_names = get_var(task_vars, "group_names", default=[])
+ def run(self):
+ rpm_prefix = self.get_var("openshift", "common", "service_type")
+ group_names = self.get_var("group_names", default=[])
packages = set()
@@ -25,10 +26,11 @@ class PackageAvailability(NotContainerizedMixin, OpenShiftCheck):
packages.update(self.node_packages(rpm_prefix))
args = {"packages": sorted(set(packages))}
- return self.execute_module("check_yum_update", args, tmp=tmp, task_vars=task_vars)
+ return self.execute_module("check_yum_update", args)
@staticmethod
def master_packages(rpm_prefix):
+ """Return a list of RPMs that we expect a master install to have available."""
return [
"{rpm_prefix}".format(rpm_prefix=rpm_prefix),
"{rpm_prefix}-clients".format(rpm_prefix=rpm_prefix),
@@ -44,6 +46,7 @@ class PackageAvailability(NotContainerizedMixin, OpenShiftCheck):
@staticmethod
def node_packages(rpm_prefix):
+ """Return a list of RPMs that we expect a node install to have available."""
return [
"{rpm_prefix}".format(rpm_prefix=rpm_prefix),
"{rpm_prefix}-node".format(rpm_prefix=rpm_prefix),
diff --git a/roles/openshift_health_checker/openshift_checks/package_update.py b/roles/openshift_health_checker/openshift_checks/package_update.py
index f432380c6..1e9aecbe0 100644
--- a/roles/openshift_health_checker/openshift_checks/package_update.py
+++ b/roles/openshift_health_checker/openshift_checks/package_update.py
@@ -1,14 +1,14 @@
-# pylint: disable=missing-docstring
+"""Check that a yum update would not run into conflicts with available packages."""
from openshift_checks import OpenShiftCheck
from openshift_checks.mixins import NotContainerizedMixin
class PackageUpdate(NotContainerizedMixin, OpenShiftCheck):
- """Check that there are no conflicts in RPM packages."""
+ """Check that a yum update would not run into conflicts with available packages."""
name = "package_update"
tags = ["preflight"]
- def run(self, tmp, task_vars):
+ def run(self):
args = {"packages": []}
- return self.execute_module("check_yum_update", args, tmp=tmp, task_vars=task_vars)
+ return self.execute_module("check_yum_update", args)
diff --git a/roles/openshift_health_checker/openshift_checks/package_version.py b/roles/openshift_health_checker/openshift_checks/package_version.py
index 204752bd0..020786804 100644
--- a/roles/openshift_health_checker/openshift_checks/package_version.py
+++ b/roles/openshift_health_checker/openshift_checks/package_version.py
@@ -1,5 +1,5 @@
-# pylint: disable=missing-docstring
-from openshift_checks import OpenShiftCheck, OpenShiftCheckException, get_var
+"""Check that available RPM packages match the required versions."""
+from openshift_checks import OpenShiftCheck, OpenShiftCheckException
from openshift_checks.mixins import NotContainerizedMixin
@@ -28,29 +28,28 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck):
"1": "3",
}
- @classmethod
- def is_active(cls, task_vars):
+ def is_active(self):
"""Skip hosts that do not have package requirements."""
- group_names = get_var(task_vars, "group_names", default=[])
+ group_names = self.get_var("group_names", default=[])
master_or_node = 'masters' in group_names or 'nodes' in group_names
- return super(PackageVersion, cls).is_active(task_vars) and master_or_node
+ return super(PackageVersion, self).is_active() and master_or_node
- def run(self, tmp, task_vars):
- rpm_prefix = get_var(task_vars, "openshift", "common", "service_type")
- openshift_release = get_var(task_vars, "openshift_release", default='')
- deployment_type = get_var(task_vars, "openshift_deployment_type")
+ def run(self):
+ rpm_prefix = self.get_var("openshift", "common", "service_type")
+ openshift_release = self.get_var("openshift_release", default='')
+ deployment_type = self.get_var("openshift_deployment_type")
check_multi_minor_release = deployment_type in ['openshift-enterprise']
args = {
"package_list": [
{
"name": "openvswitch",
- "version": self.get_required_ovs_version(task_vars),
+ "version": self.get_required_ovs_version(),
"check_multi": False,
},
{
"name": "docker",
- "version": self.get_required_docker_version(task_vars),
+ "version": self.get_required_docker_version(),
"check_multi": False,
},
{
@@ -71,13 +70,13 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck):
],
}
- return self.execute_module("aos_version", args, tmp=tmp, task_vars=task_vars)
+ return self.execute_module("aos_version", args)
- def get_required_ovs_version(self, task_vars):
+ def get_required_ovs_version(self):
"""Return the correct Open vSwitch version for the current OpenShift version.
If the current OpenShift version is >= 3.5, ensure Open vSwitch version 2.6,
Else ensure Open vSwitch version 2.4"""
- openshift_version = self.get_openshift_version(task_vars)
+ openshift_version = self.get_openshift_version()
if float(openshift_version) < 3.5:
return self.openshift_to_ovs_version["3.4"]
@@ -89,12 +88,12 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck):
msg = "There is no recommended version of Open vSwitch for the current version of OpenShift: {}"
raise OpenShiftCheckException(msg.format(openshift_version))
- def get_required_docker_version(self, task_vars):
+ def get_required_docker_version(self):
"""Return the correct Docker version for the current OpenShift version.
If the OpenShift version is 3.1, ensure Docker version 1.8.
If the OpenShift version is 3.2 or 3.3, ensure Docker version 1.10.
If the current OpenShift version is >= 3.4, ensure Docker version 1.12."""
- openshift_version = self.get_openshift_version(task_vars)
+ openshift_version = self.get_openshift_version()
if float(openshift_version) >= 3.4:
return self.openshift_to_docker_version["3.4"]
@@ -106,14 +105,16 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck):
msg = "There is no recommended version of Docker for the current version of OpenShift: {}"
raise OpenShiftCheckException(msg.format(openshift_version))
- def get_openshift_version(self, task_vars):
- openshift_version = get_var(task_vars, "openshift_image_tag")
+ def get_openshift_version(self):
+ """Return received image tag as a normalized X.Y minor version string."""
+ openshift_version = self.get_var("openshift_image_tag")
if openshift_version and openshift_version[0] == 'v':
openshift_version = openshift_version[1:]
return self.parse_version(openshift_version)
def parse_version(self, version):
+ """Return a normalized X.Y minor version string."""
components = version.split(".")
if not components or len(components) < 2:
msg = "An invalid version of OpenShift was found for this host: {}"
diff --git a/roles/openshift_health_checker/test/action_plugin_test.py b/roles/openshift_health_checker/test/action_plugin_test.py
index 9383b233c..2d068be3d 100644
--- a/roles/openshift_health_checker/test/action_plugin_test.py
+++ b/roles/openshift_health_checker/test/action_plugin_test.py
@@ -15,14 +15,13 @@ def fake_check(name='fake_check', tags=None, is_active=True, run_return=None, ru
name = _name
tags = _tags or []
- def __init__(self, execute_module=None):
+ def __init__(self, execute_module=None, task_vars=None, tmp=None):
pass
- @classmethod
- def is_active(cls, task_vars):
+ def is_active(self):
return is_active
- def run(self, tmp, task_vars):
+ def run(self):
if run_exception is not None:
raise run_exception
return run_return
@@ -124,7 +123,7 @@ def test_action_plugin_skip_disabled_checks(plugin, task_vars, monkeypatch):
def test_action_plugin_run_check_ok(plugin, task_vars, monkeypatch):
check_return_value = {'ok': 'test'}
check_class = fake_check(run_return=check_return_value)
- monkeypatch.setattr(plugin, 'load_known_checks', lambda: {'fake_check': check_class()})
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda tmp, task_vars: {'fake_check': check_class()})
monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
result = plugin.run(tmp=None, task_vars=task_vars)
@@ -138,7 +137,7 @@ def test_action_plugin_run_check_ok(plugin, task_vars, monkeypatch):
def test_action_plugin_run_check_changed(plugin, task_vars, monkeypatch):
check_return_value = {'ok': 'test', 'changed': True}
check_class = fake_check(run_return=check_return_value)
- monkeypatch.setattr(plugin, 'load_known_checks', lambda: {'fake_check': check_class()})
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda tmp, task_vars: {'fake_check': check_class()})
monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
result = plugin.run(tmp=None, task_vars=task_vars)
@@ -152,7 +151,7 @@ def test_action_plugin_run_check_changed(plugin, task_vars, monkeypatch):
def test_action_plugin_run_check_fail(plugin, task_vars, monkeypatch):
check_return_value = {'failed': True}
check_class = fake_check(run_return=check_return_value)
- monkeypatch.setattr(plugin, 'load_known_checks', lambda: {'fake_check': check_class()})
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda tmp, task_vars: {'fake_check': check_class()})
monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
result = plugin.run(tmp=None, task_vars=task_vars)
@@ -167,7 +166,7 @@ def test_action_plugin_run_check_exception(plugin, task_vars, monkeypatch):
exception_msg = 'fake check has an exception'
run_exception = OpenShiftCheckException(exception_msg)
check_class = fake_check(run_exception=run_exception)
- monkeypatch.setattr(plugin, 'load_known_checks', lambda: {'fake_check': check_class()})
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda tmp, task_vars: {'fake_check': check_class()})
monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
result = plugin.run(tmp=None, task_vars=task_vars)
@@ -179,7 +178,7 @@ def test_action_plugin_run_check_exception(plugin, task_vars, monkeypatch):
def test_action_plugin_resolve_checks_exception(plugin, task_vars, monkeypatch):
- monkeypatch.setattr(plugin, 'load_known_checks', lambda: {})
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda tmp, task_vars: {})
result = plugin.run(tmp=None, task_vars=task_vars)
diff --git a/roles/openshift_health_checker/test/disk_availability_test.py b/roles/openshift_health_checker/test/disk_availability_test.py
index 945b9eafc..e98d02c58 100644
--- a/roles/openshift_health_checker/test/disk_availability_test.py
+++ b/roles/openshift_health_checker/test/disk_availability_test.py
@@ -17,7 +17,7 @@ def test_is_active(group_names, is_active):
task_vars = dict(
group_names=group_names,
)
- assert DiskAvailability.is_active(task_vars=task_vars) == is_active
+ assert DiskAvailability(None, task_vars).is_active() == is_active
@pytest.mark.parametrize('ansible_mounts,extra_words', [
@@ -30,10 +30,9 @@ def test_cannot_determine_available_disk(ansible_mounts, extra_words):
group_names=['masters'],
ansible_mounts=ansible_mounts,
)
- check = DiskAvailability(execute_module=fake_execute_module)
with pytest.raises(OpenShiftCheckException) as excinfo:
- check.run(tmp=None, task_vars=task_vars)
+ DiskAvailability(fake_execute_module, task_vars).run()
for word in 'determine disk availability'.split() + extra_words:
assert word in str(excinfo.value)
@@ -93,8 +92,7 @@ def test_succeeds_with_recommended_disk_space(group_names, configured_min, ansib
ansible_mounts=ansible_mounts,
)
- check = DiskAvailability(execute_module=fake_execute_module)
- result = check.run(tmp=None, task_vars=task_vars)
+ result = DiskAvailability(fake_execute_module, task_vars).run()
assert not result.get('failed', False)
@@ -168,8 +166,7 @@ def test_fails_with_insufficient_disk_space(group_names, configured_min, ansible
ansible_mounts=ansible_mounts,
)
- check = DiskAvailability(execute_module=fake_execute_module)
- result = check.run(tmp=None, task_vars=task_vars)
+ result = DiskAvailability(fake_execute_module, task_vars).run()
assert result['failed']
for word in 'below recommended'.split() + extra_words:
diff --git a/roles/openshift_health_checker/test/docker_image_availability_test.py b/roles/openshift_health_checker/test/docker_image_availability_test.py
index 3b9e097fb..8d0a53df9 100644
--- a/roles/openshift_health_checker/test/docker_image_availability_test.py
+++ b/roles/openshift_health_checker/test/docker_image_availability_test.py
@@ -21,7 +21,7 @@ def test_is_active(deployment_type, is_containerized, group_names, expect_active
openshift_deployment_type=deployment_type,
group_names=group_names,
)
- assert DockerImageAvailability.is_active(task_vars=task_vars) == expect_active
+ assert DockerImageAvailability(None, task_vars).is_active() == expect_active
@pytest.mark.parametrize("is_containerized,is_atomic", [
@@ -31,7 +31,7 @@ def test_is_active(deployment_type, is_containerized, group_names, expect_active
(False, True),
])
def test_all_images_available_locally(is_containerized, is_atomic):
- def execute_module(module_name, module_args, task_vars):
+ def execute_module(module_name, module_args, *_):
if module_name == "yum":
return {"changed": True}
@@ -42,7 +42,7 @@ def test_all_images_available_locally(is_containerized, is_atomic):
'images': [module_args['name']],
}
- result = DockerImageAvailability(execute_module=execute_module).run(tmp=None, task_vars=dict(
+ result = DockerImageAvailability(execute_module, task_vars=dict(
openshift=dict(
common=dict(
service_type='origin',
@@ -54,7 +54,7 @@ def test_all_images_available_locally(is_containerized, is_atomic):
openshift_deployment_type='origin',
openshift_image_tag='3.4',
group_names=['nodes', 'masters'],
- ))
+ )).run()
assert not result.get('failed', False)
@@ -64,12 +64,12 @@ def test_all_images_available_locally(is_containerized, is_atomic):
True,
])
def test_all_images_available_remotely(available_locally):
- def execute_module(module_name, module_args, task_vars):
+ def execute_module(module_name, *_):
if module_name == 'docker_image_facts':
return {'images': [], 'failed': available_locally}
return {'changed': False}
- result = DockerImageAvailability(execute_module=execute_module).run(tmp=None, task_vars=dict(
+ result = DockerImageAvailability(execute_module, task_vars=dict(
openshift=dict(
common=dict(
service_type='origin',
@@ -81,13 +81,13 @@ def test_all_images_available_remotely(available_locally):
openshift_deployment_type='origin',
openshift_image_tag='v3.4',
group_names=['nodes', 'masters'],
- ))
+ )).run()
assert not result.get('failed', False)
def test_all_images_unavailable():
- def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
+ def execute_module(module_name=None, *_):
if module_name == "command":
return {
'failed': True,
@@ -97,8 +97,7 @@ def test_all_images_unavailable():
'changed': False,
}
- check = DockerImageAvailability(execute_module=execute_module)
- actual = check.run(tmp=None, task_vars=dict(
+ actual = DockerImageAvailability(execute_module, task_vars=dict(
openshift=dict(
common=dict(
service_type='origin',
@@ -110,7 +109,7 @@ def test_all_images_unavailable():
openshift_deployment_type="openshift-enterprise",
openshift_image_tag='latest',
group_names=['nodes', 'masters'],
- ))
+ )).run()
assert actual['failed']
assert "required Docker images are not available" in actual['msg']
@@ -127,7 +126,7 @@ def test_all_images_unavailable():
),
])
def test_skopeo_update_failure(message, extra_words):
- def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
+ def execute_module(module_name=None, *_):
if module_name == "yum":
return {
"failed": True,
@@ -137,7 +136,7 @@ def test_skopeo_update_failure(message, extra_words):
return {'changed': False}
- actual = DockerImageAvailability(execute_module=execute_module).run(tmp=None, task_vars=dict(
+ actual = DockerImageAvailability(execute_module, task_vars=dict(
openshift=dict(
common=dict(
service_type='origin',
@@ -149,7 +148,7 @@ def test_skopeo_update_failure(message, extra_words):
openshift_deployment_type="openshift-enterprise",
openshift_image_tag='',
group_names=['nodes', 'masters'],
- ))
+ )).run()
assert actual["failed"]
for word in extra_words:
@@ -162,12 +161,12 @@ def test_skopeo_update_failure(message, extra_words):
("openshift-enterprise", []),
])
def test_registry_availability(deployment_type, registries):
- def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
+ def execute_module(module_name=None, *_):
return {
'changed': False,
}
- actual = DockerImageAvailability(execute_module=execute_module).run(tmp=None, task_vars=dict(
+ actual = DockerImageAvailability(execute_module, task_vars=dict(
openshift=dict(
common=dict(
service_type='origin',
@@ -179,7 +178,7 @@ def test_registry_availability(deployment_type, registries):
openshift_deployment_type=deployment_type,
openshift_image_tag='',
group_names=['nodes', 'masters'],
- ))
+ )).run()
assert not actual.get("failed", False)
@@ -258,7 +257,7 @@ def test_required_images(deployment_type, is_containerized, groups, oreg_url, ex
openshift_image_tag='vtest',
)
- assert expected == DockerImageAvailability("DUMMY").required_images(task_vars)
+ assert expected == DockerImageAvailability("DUMMY", task_vars).required_images()
def test_containerized_etcd():
@@ -272,4 +271,4 @@ def test_containerized_etcd():
group_names=['etcd'],
)
expected = set(['registry.access.redhat.com/rhel7/etcd'])
- assert expected == DockerImageAvailability("DUMMY").required_images(task_vars)
+ assert expected == DockerImageAvailability("DUMMY", task_vars).required_images()
diff --git a/roles/openshift_health_checker/test/docker_storage_test.py b/roles/openshift_health_checker/test/docker_storage_test.py
index 99c529054..e0dccc062 100644
--- a/roles/openshift_health_checker/test/docker_storage_test.py
+++ b/roles/openshift_health_checker/test/docker_storage_test.py
@@ -4,12 +4,6 @@ from openshift_checks import OpenShiftCheckException
from openshift_checks.docker_storage import DockerStorage
-def dummy_check(execute_module=None):
- def dummy_exec(self, status, task_vars):
- raise Exception("dummy executor called")
- return DockerStorage(execute_module=execute_module or dummy_exec)
-
-
@pytest.mark.parametrize('is_containerized, group_names, is_active', [
(False, ["masters", "etcd"], False),
(False, ["masters", "nodes"], True),
@@ -20,7 +14,7 @@ def test_is_active(is_containerized, group_names, is_active):
openshift=dict(common=dict(is_containerized=is_containerized)),
group_names=group_names,
)
- assert DockerStorage.is_active(task_vars=task_vars) == is_active
+ assert DockerStorage(None, task_vars).is_active() == is_active
def non_atomic_task_vars():
@@ -99,17 +93,17 @@ def non_atomic_task_vars():
),
])
def test_check_storage_driver(docker_info, failed, expect_msg):
- def execute_module(module_name, module_args, tmp=None, task_vars=None):
+ def execute_module(module_name, *_):
if module_name == "yum":
return {}
if module_name != "docker_info":
raise ValueError("not expecting module " + module_name)
return docker_info
- check = dummy_check(execute_module=execute_module)
- check.check_dm_usage = lambda status, task_vars: dict() # stub out for this test
- check.check_overlay_usage = lambda info, task_vars: dict() # stub out for this test
- result = check.run(tmp=None, task_vars=non_atomic_task_vars())
+ check = DockerStorage(execute_module, non_atomic_task_vars())
+ check.check_dm_usage = lambda status: dict() # stub out for this test
+ check.check_overlay_usage = lambda info: dict() # stub out for this test
+ result = check.run()
if failed:
assert result["failed"]
@@ -168,9 +162,9 @@ not_enough_space = {
),
])
def test_dm_usage(task_vars, driver_status, vg_free, success, expect_msg):
- check = dummy_check()
- check.get_vg_free = lambda pool, task_vars: vg_free
- result = check.check_dm_usage(driver_status, task_vars)
+ check = DockerStorage(None, task_vars)
+ check.get_vg_free = lambda pool: vg_free
+ result = check.check_dm_usage(driver_status)
result_success = not result.get("failed")
assert result_success is success
@@ -210,18 +204,18 @@ def test_dm_usage(task_vars, driver_status, vg_free, success, expect_msg):
)
])
def test_vg_free(pool, command_returns, raises, returns):
- def execute_module(module_name, module_args, tmp=None, task_vars=None):
+ def execute_module(module_name, *_):
if module_name != "command":
raise ValueError("not expecting module " + module_name)
return command_returns
- check = dummy_check(execute_module=execute_module)
+ check = DockerStorage(execute_module)
if raises:
with pytest.raises(OpenShiftCheckException) as err:
- check.get_vg_free(pool, {})
+ check.get_vg_free(pool)
assert raises in str(err.value)
else:
- ret = check.get_vg_free(pool, {})
+ ret = check.get_vg_free(pool)
assert ret == returns
@@ -298,13 +292,13 @@ ansible_mounts_zero_size = [{
),
])
def test_overlay_usage(ansible_mounts, threshold, expect_fail, expect_msg):
- check = dummy_check()
task_vars = non_atomic_task_vars()
task_vars["ansible_mounts"] = ansible_mounts
if threshold is not None:
task_vars["max_overlay_usage_percent"] = threshold
+ check = DockerStorage(None, task_vars)
docker_info = dict(DockerRootDir="/var/lib/docker", Driver="overlay")
- result = check.check_overlay_usage(docker_info, task_vars)
+ result = check.check_overlay_usage(docker_info)
assert expect_fail == bool(result.get("failed"))
for msg in expect_msg:
diff --git a/roles/openshift_health_checker/test/elasticsearch_test.py b/roles/openshift_health_checker/test/elasticsearch_test.py
index b9d375d8c..67408609a 100644
--- a/roles/openshift_health_checker/test/elasticsearch_test.py
+++ b/roles/openshift_health_checker/test/elasticsearch_test.py
@@ -6,14 +6,6 @@ from openshift_checks.logging.elasticsearch import Elasticsearch
task_vars_config_base = dict(openshift=dict(common=dict(config_base='/etc/origin')))
-def canned_elasticsearch(exec_oc=None):
- """Create an Elasticsearch check object with canned exec_oc method"""
- check = Elasticsearch("dummy") # fails if a module is actually invoked
- if exec_oc:
- check._exec_oc = exec_oc
- return check
-
-
def assert_error(error, expect_error):
if expect_error:
assert error
@@ -50,10 +42,10 @@ split_es_pod = {
def test_check_elasticsearch():
- assert 'No logging Elasticsearch pods' in canned_elasticsearch().check_elasticsearch([], {})
+ assert 'No logging Elasticsearch pods' in Elasticsearch().check_elasticsearch([])
# canned oc responses to match so all the checks pass
- def _exec_oc(cmd, args, task_vars):
+ def _exec_oc(ns, cmd, args):
if '_cat/master' in cmd:
return 'name logging-es'
elif '/_nodes' in cmd:
@@ -65,7 +57,9 @@ def test_check_elasticsearch():
else:
raise Exception(cmd)
- assert not canned_elasticsearch(_exec_oc).check_elasticsearch([plain_es_pod], {})
+ check = Elasticsearch(None, {})
+ check.exec_oc = _exec_oc
+ assert not check.check_elasticsearch([plain_es_pod])
def pods_by_name(pods):
@@ -88,9 +82,9 @@ def pods_by_name(pods):
])
def test_check_elasticsearch_masters(pods, expect_error):
test_pods = list(pods)
- check = canned_elasticsearch(lambda cmd, args, task_vars: test_pods.pop(0)['_test_master_name_str'])
-
- errors = check._check_elasticsearch_masters(pods_by_name(pods), task_vars_config_base)
+ check = Elasticsearch(None, task_vars_config_base)
+ check.execute_module = lambda cmd, args: {'result': test_pods.pop(0)['_test_master_name_str']}
+ errors = check._check_elasticsearch_masters(pods_by_name(pods))
assert_error(''.join(errors), expect_error)
@@ -124,9 +118,10 @@ es_node_list = {
),
])
def test_check_elasticsearch_node_list(pods, node_list, expect_error):
- check = canned_elasticsearch(lambda cmd, args, task_vars: json.dumps(node_list))
+ check = Elasticsearch(None, task_vars_config_base)
+ check.execute_module = lambda cmd, args: {'result': json.dumps(node_list)}
- errors = check._check_elasticsearch_node_list(pods_by_name(pods), task_vars_config_base)
+ errors = check._check_elasticsearch_node_list(pods_by_name(pods))
assert_error(''.join(errors), expect_error)
@@ -149,9 +144,10 @@ def test_check_elasticsearch_node_list(pods, node_list, expect_error):
])
def test_check_elasticsearch_cluster_health(pods, health_data, expect_error):
test_health_data = list(health_data)
- check = canned_elasticsearch(lambda cmd, args, task_vars: json.dumps(test_health_data.pop(0)))
+ check = Elasticsearch(None, task_vars_config_base)
+ check.execute_module = lambda cmd, args: {'result': json.dumps(test_health_data.pop(0))}
- errors = check._check_es_cluster_health(pods_by_name(pods), task_vars_config_base)
+ errors = check._check_es_cluster_health(pods_by_name(pods))
assert_error(''.join(errors), expect_error)
@@ -174,7 +170,8 @@ def test_check_elasticsearch_cluster_health(pods, health_data, expect_error):
),
])
def test_check_elasticsearch_diskspace(disk_data, expect_error):
- check = canned_elasticsearch(lambda cmd, args, task_vars: disk_data)
+ check = Elasticsearch(None, task_vars_config_base)
+ check.execute_module = lambda cmd, args: {'result': disk_data}
- errors = check._check_elasticsearch_diskspace(pods_by_name([plain_es_pod]), task_vars_config_base)
+ errors = check._check_elasticsearch_diskspace(pods_by_name([plain_es_pod]))
assert_error(''.join(errors), expect_error)
diff --git a/roles/openshift_health_checker/test/etcd_imagedata_size_test.py b/roles/openshift_health_checker/test/etcd_imagedata_size_test.py
index df9d52d41..e3d6706fa 100644
--- a/roles/openshift_health_checker/test/etcd_imagedata_size_test.py
+++ b/roles/openshift_health_checker/test/etcd_imagedata_size_test.py
@@ -51,10 +51,10 @@ def test_cannot_determine_available_mountpath(ansible_mounts, extra_words):
task_vars = dict(
ansible_mounts=ansible_mounts,
)
- check = EtcdImageDataSize(execute_module=fake_execute_module)
+ check = EtcdImageDataSize(fake_execute_module, task_vars)
with pytest.raises(OpenShiftCheckException) as excinfo:
- check.run(tmp=None, task_vars=task_vars)
+ check.run()
for word in 'determine valid etcd mountpath'.split() + extra_words:
assert word in str(excinfo.value)
@@ -111,14 +111,14 @@ def test_cannot_determine_available_mountpath(ansible_mounts, extra_words):
)
])
def test_check_etcd_key_size_calculates_correct_limit(ansible_mounts, tree, size_limit, should_fail, extra_words):
- def execute_module(module_name, args, tmp=None, task_vars=None):
+ def execute_module(module_name, module_args, *_):
if module_name != "etcdkeysize":
return {
"changed": False,
}
client = fake_etcd_client(tree)
- s, limit_exceeded = check_etcd_key_size(client, tree["key"], args["size_limit_bytes"])
+ s, limit_exceeded = check_etcd_key_size(client, tree["key"], module_args["size_limit_bytes"])
return {"size_limit_exceeded": limit_exceeded}
@@ -133,7 +133,7 @@ def test_check_etcd_key_size_calculates_correct_limit(ansible_mounts, tree, size
if size_limit is None:
task_vars.pop("etcd_max_image_data_size_bytes")
- check = EtcdImageDataSize(execute_module=execute_module).run(tmp=None, task_vars=task_vars)
+ check = EtcdImageDataSize(execute_module, task_vars).run()
if should_fail:
assert check["failed"]
@@ -267,14 +267,14 @@ def test_check_etcd_key_size_calculates_correct_limit(ansible_mounts, tree, size
),
])
def test_etcd_key_size_check_calculates_correct_size(ansible_mounts, tree, root_path, expected_size, extra_words):
- def execute_module(module_name, args, tmp=None, task_vars=None):
+ def execute_module(module_name, module_args, *_):
if module_name != "etcdkeysize":
return {
"changed": False,
}
client = fake_etcd_client(tree)
- size, limit_exceeded = check_etcd_key_size(client, root_path, args["size_limit_bytes"])
+ size, limit_exceeded = check_etcd_key_size(client, root_path, module_args["size_limit_bytes"])
assert size == expected_size
return {
@@ -289,12 +289,12 @@ def test_etcd_key_size_check_calculates_correct_size(ansible_mounts, tree, root_
)
)
- check = EtcdImageDataSize(execute_module=execute_module).run(tmp=None, task_vars=task_vars)
+ check = EtcdImageDataSize(execute_module, task_vars).run()
assert not check.get("failed", False)
def test_etcdkeysize_module_failure():
- def execute_module(module_name, tmp=None, task_vars=None):
+ def execute_module(module_name, *_):
if module_name != "etcdkeysize":
return {
"changed": False,
@@ -317,7 +317,7 @@ def test_etcdkeysize_module_failure():
)
)
- check = EtcdImageDataSize(execute_module=execute_module).run(tmp=None, task_vars=task_vars)
+ check = EtcdImageDataSize(execute_module, task_vars).run()
assert check["failed"]
for word in "Failed to retrieve stats":
diff --git a/roles/openshift_health_checker/test/etcd_traffic_test.py b/roles/openshift_health_checker/test/etcd_traffic_test.py
index 287175e29..f4316c423 100644
--- a/roles/openshift_health_checker/test/etcd_traffic_test.py
+++ b/roles/openshift_health_checker/test/etcd_traffic_test.py
@@ -21,7 +21,7 @@ def test_is_active(group_names, version, is_active):
common=dict(short_version=version),
),
)
- assert EtcdTraffic.is_active(task_vars=task_vars) == is_active
+ assert EtcdTraffic(task_vars=task_vars).is_active() == is_active
@pytest.mark.parametrize('group_names,matched,failed,extra_words', [
@@ -30,7 +30,7 @@ def test_is_active(group_names, version, is_active):
(["etcd"], False, False, []),
])
def test_log_matches_high_traffic_msg(group_names, matched, failed, extra_words):
- def execute_module(module_name, args, task_vars):
+ def execute_module(module_name, *_):
return {
"matched": matched,
"failed": failed,
@@ -43,8 +43,7 @@ def test_log_matches_high_traffic_msg(group_names, matched, failed, extra_words)
)
)
- check = EtcdTraffic(execute_module=execute_module)
- result = check.run(tmp=None, task_vars=task_vars)
+ result = EtcdTraffic(execute_module, task_vars).run()
for word in extra_words:
assert word in result.get("msg", "")
@@ -63,7 +62,7 @@ def test_systemd_unit_matches_deployment_type(is_containerized, expected_unit_va
)
)
- def execute_module(module_name, args, task_vars):
+ def execute_module(module_name, args, *_):
assert module_name == "search_journalctl"
matchers = args["log_matchers"]
@@ -72,9 +71,4 @@ def test_systemd_unit_matches_deployment_type(is_containerized, expected_unit_va
return {"failed": False}
- check = EtcdTraffic(execute_module=execute_module)
- check.run(tmp=None, task_vars=task_vars)
-
-
-def fake_execute_module(*args):
- raise AssertionError('this function should not be called')
+ EtcdTraffic(execute_module, task_vars).run()
diff --git a/roles/openshift_health_checker/test/etcd_volume_test.py b/roles/openshift_health_checker/test/etcd_volume_test.py
index 917045526..0b255136e 100644
--- a/roles/openshift_health_checker/test/etcd_volume_test.py
+++ b/roles/openshift_health_checker/test/etcd_volume_test.py
@@ -11,10 +11,9 @@ def test_cannot_determine_available_disk(ansible_mounts, extra_words):
task_vars = dict(
ansible_mounts=ansible_mounts,
)
- check = EtcdVolume(execute_module=fake_execute_module)
with pytest.raises(OpenShiftCheckException) as excinfo:
- check.run(tmp=None, task_vars=task_vars)
+ EtcdVolume(fake_execute_module, task_vars).run()
for word in 'Unable to find etcd storage mount point'.split() + extra_words:
assert word in str(excinfo.value)
@@ -76,8 +75,7 @@ def test_succeeds_with_recommended_disk_space(size_limit, ansible_mounts):
if task_vars["etcd_device_usage_threshold_percent"] is None:
task_vars.pop("etcd_device_usage_threshold_percent")
- check = EtcdVolume(execute_module=fake_execute_module)
- result = check.run(tmp=None, task_vars=task_vars)
+ result = EtcdVolume(fake_execute_module, task_vars).run()
assert not result.get('failed', False)
@@ -137,8 +135,7 @@ def test_fails_with_insufficient_disk_space(size_limit_percent, ansible_mounts,
if task_vars["etcd_device_usage_threshold_percent"] is None:
task_vars.pop("etcd_device_usage_threshold_percent")
- check = EtcdVolume(execute_module=fake_execute_module)
- result = check.run(tmp=None, task_vars=task_vars)
+ result = EtcdVolume(fake_execute_module, task_vars).run()
assert result['failed']
for word in extra_words:
diff --git a/roles/openshift_health_checker/test/fluentd_config_test.py b/roles/openshift_health_checker/test/fluentd_config_test.py
new file mode 100644
index 000000000..8a2d8b72b
--- /dev/null
+++ b/roles/openshift_health_checker/test/fluentd_config_test.py
@@ -0,0 +1,357 @@
+import pytest
+
+from openshift_checks.logging.fluentd_config import FluentdConfig, OpenShiftCheckException
+
+
+def canned_fluentd_pod(containers):
+ return {
+ "metadata": {
+ "labels": {"component": "fluentd", "deploymentconfig": "logging-fluentd"},
+ "name": "logging-fluentd-1",
+ },
+ "spec": {
+ "host": "node1",
+ "nodeName": "node1",
+ "containers": containers,
+ },
+ "status": {
+ "phase": "Running",
+ "containerStatuses": [{"ready": True}],
+ "conditions": [{"status": "True", "type": "Ready"}],
+ }
+ }
+
+
+fluentd_pod = {
+ "metadata": {
+ "labels": {"component": "fluentd", "deploymentconfig": "logging-fluentd"},
+ "name": "logging-fluentd-1",
+ },
+ "spec": {
+ "host": "node1",
+ "nodeName": "node1",
+ "containers": [
+ {
+ "name": "container1",
+ "env": [
+ {
+ "name": "USE_JOURNAL",
+ "value": "true",
+ }
+ ],
+ }
+ ],
+ },
+ "status": {
+ "phase": "Running",
+ "containerStatuses": [{"ready": True}],
+ "conditions": [{"status": "True", "type": "Ready"}],
+ }
+}
+
+not_running_fluentd_pod = {
+ "metadata": {
+ "labels": {"component": "fluentd", "deploymentconfig": "logging-fluentd"},
+ "name": "logging-fluentd-2",
+ },
+ "status": {
+ "phase": "Unknown",
+ "containerStatuses": [{"ready": True}, {"ready": False}],
+ "conditions": [{"status": "True", "type": "Ready"}],
+ }
+}
+
+
+@pytest.mark.parametrize('name, use_journald, logging_driver, extra_words', [
+ (
+ 'test success with use_journald=false, and docker config set to use "json-file"',
+ False,
+ "json-file",
+ [],
+ ),
+], ids=lambda argvals: argvals[0])
+def test_check_logging_config_non_master(name, use_journald, logging_driver, extra_words):
+ def execute_module(module_name, args):
+ if module_name == "docker_info":
+ return {
+ "info": {
+ "LoggingDriver": logging_driver,
+ }
+ }
+
+ return {}
+
+ task_vars = dict(
+ group_names=["nodes", "etcd"],
+ openshift_logging_fluentd_use_journal=use_journald,
+ openshift=dict(
+ common=dict(config_base=""),
+ ),
+ )
+
+ check = FluentdConfig(execute_module, task_vars)
+ check.execute_module = execute_module
+ error = check.check_logging_config()
+
+ assert error is None
+
+
+@pytest.mark.parametrize('name, use_journald, logging_driver, words', [
+ (
+ 'test failure with use_journald=false, but docker config set to use "journald"',
+ False,
+ "journald",
+ ['json log files', 'has been set to use "journald"'],
+ ),
+ (
+ 'test failure with use_journald=false, but docker config set to use an "unsupported" driver',
+ False,
+ "unsupported",
+ ["json log files", 'has been set to use "unsupported"'],
+ ),
+ (
+ 'test failure with use_journald=true, but docker config set to use "json-file"',
+ True,
+ "json-file",
+ ['logs from "journald"', 'has been set to use "json-file"'],
+ ),
+], ids=lambda argvals: argvals[0])
+def test_check_logging_config_non_master_failed(name, use_journald, logging_driver, words):
+ def execute_module(module_name, args):
+ if module_name == "docker_info":
+ return {
+ "info": {
+ "LoggingDriver": logging_driver,
+ }
+ }
+
+ return {}
+
+ task_vars = dict(
+ group_names=["nodes", "etcd"],
+ openshift_logging_fluentd_use_journal=use_journald,
+ openshift=dict(
+ common=dict(config_base=""),
+ ),
+ )
+
+ check = FluentdConfig(execute_module, task_vars)
+ check.execute_module = execute_module
+ error = check.check_logging_config()
+
+ assert error is not None
+ for word in words:
+ assert word in error
+
+
+@pytest.mark.parametrize('name, pods, logging_driver, extra_words', [
+ # use_journald returns false (not using journald), but check succeeds
+ # since docker is set to use json-file
+ (
+ 'test success with use_journald=false, and docker config set to use default driver "json-file"',
+ [canned_fluentd_pod(
+ [
+ {
+ "name": "container1",
+ "env": [{
+ "name": "USE_JOURNAL",
+ "value": "false",
+ }],
+ },
+ ]
+ )],
+ "json-file",
+ [],
+ ),
+ (
+ 'test success with USE_JOURNAL env var missing and docker config set to use default driver "json-file"',
+ [canned_fluentd_pod(
+ [
+ {
+ "name": "container1",
+ "env": [{
+ "name": "RANDOM",
+ "value": "value",
+ }],
+ },
+ ]
+ )],
+ "json-file",
+ [],
+ ),
+], ids=lambda argvals: argvals[0])
+def test_check_logging_config_master(name, pods, logging_driver, extra_words):
+ def execute_module(module_name, args):
+ if module_name == "docker_info":
+ return {
+ "info": {
+ "LoggingDriver": logging_driver,
+ }
+ }
+
+ return {}
+
+ task_vars = dict(
+ group_names=["masters"],
+ openshift=dict(
+ common=dict(config_base=""),
+ ),
+ )
+
+ def get_pods(namespace, logging_component):
+ return pods, None
+
+ check = FluentdConfig(execute_module, task_vars)
+ check.execute_module = execute_module
+ check.get_pods_for_component = get_pods
+ error = check.check_logging_config()
+
+ assert error is None
+
+
+@pytest.mark.parametrize('name, pods, logging_driver, words', [
+ (
+ 'test failure with use_journald=false, but docker config set to use "journald"',
+ [canned_fluentd_pod(
+ [
+ {
+ "name": "container1",
+ "env": [{
+ "name": "USE_JOURNAL",
+ "value": "false",
+ }],
+ },
+ ]
+ )],
+ "journald",
+ ['json log files', 'has been set to use "journald"'],
+ ),
+ (
+ 'test failure with use_journald=true, but docker config set to use "json-file"',
+ [fluentd_pod],
+ "json-file",
+ ['logs from "journald"', 'has been set to use "json-file"'],
+ ),
+ (
+ 'test failure with use_journald=false, but docker set to use an "unsupported" driver',
+ [canned_fluentd_pod(
+ [
+ {
+ "name": "container1",
+ "env": [{
+ "name": "USE_JOURNAL",
+ "value": "false",
+ }],
+ },
+ ]
+ )],
+ "unsupported",
+ ["json log files", 'has been set to use "unsupported"'],
+ ),
+ (
+ 'test failure with USE_JOURNAL env var missing and docker config set to use "journald"',
+ [canned_fluentd_pod(
+ [
+ {
+ "name": "container1",
+ "env": [{
+ "name": "RANDOM",
+ "value": "value",
+ }],
+ },
+ ]
+ )],
+ "journald",
+ ["configuration is set to", "json log files"],
+ ),
+], ids=lambda argvals: argvals[0])
+def test_check_logging_config_master_failed(name, pods, logging_driver, words):
+ def execute_module(module_name, args):
+ if module_name == "docker_info":
+ return {
+ "info": {
+ "LoggingDriver": logging_driver,
+ }
+ }
+
+ return {}
+
+ task_vars = dict(
+ group_names=["masters"],
+ openshift=dict(
+ common=dict(config_base=""),
+ ),
+ )
+
+ def get_pods(namespace, logging_component):
+ return pods, None
+
+ check = FluentdConfig(execute_module, task_vars)
+ check.execute_module = execute_module
+ check.get_pods_for_component = get_pods
+ error = check.check_logging_config()
+
+ assert error is not None
+ for word in words:
+ assert word in error
+
+
+@pytest.mark.parametrize('name, pods, response, logging_driver, extra_words', [
+ (
+ 'test OpenShiftCheckException with no running containers',
+ [canned_fluentd_pod([])],
+ {
+ "failed": True,
+ "result": "unexpected",
+ },
+ "json-file",
+ ['no running containers'],
+ ),
+ (
+ 'test OpenShiftCheckException one container and no env vars set',
+ [canned_fluentd_pod(
+ [
+ {
+ "name": "container1",
+ "env": [],
+ },
+ ]
+ )],
+ {
+ "failed": True,
+ "result": "unexpected",
+ },
+ "json-file",
+ ['no environment variables'],
+ ),
+], ids=lambda argvals: argvals[0])
+def test_check_logging_config_master_fails_on_unscheduled_deployment(name, pods, response, logging_driver, extra_words):
+ def execute_module(module_name, args):
+ if module_name == "docker_info":
+ return {
+ "info": {
+ "LoggingDriver": logging_driver,
+ }
+ }
+
+ return {}
+
+ task_vars = dict(
+ group_names=["masters"],
+ openshift=dict(
+ common=dict(config_base=""),
+ ),
+ )
+
+ def get_pods(namespace, logging_component):
+ return pods, None
+
+ check = FluentdConfig(execute_module, task_vars)
+ check.get_pods_for_component = get_pods
+
+ with pytest.raises(OpenShiftCheckException) as error:
+ check.check_logging_config()
+
+ assert error is not None
+ for word in extra_words:
+ assert word in str(error)
diff --git a/roles/openshift_health_checker/test/fluentd_test.py b/roles/openshift_health_checker/test/fluentd_test.py
index d151c0b19..a84d89cef 100644
--- a/roles/openshift_health_checker/test/fluentd_test.py
+++ b/roles/openshift_health_checker/test/fluentd_test.py
@@ -4,14 +4,6 @@ import json
from openshift_checks.logging.fluentd import Fluentd
-def canned_fluentd(exec_oc=None):
- """Create a Fluentd check object with canned exec_oc method"""
- check = Fluentd("dummy") # fails if a module is actually invoked
- if exec_oc:
- check._exec_oc = exec_oc
- return check
-
-
def assert_error(error, expect_error):
if expect_error:
assert error
@@ -103,7 +95,7 @@ fluentd_node3_unlabeled = {
),
])
def test_get_fluentd_pods(pods, nodes, expect_error):
- check = canned_fluentd(lambda cmd, args, task_vars: json.dumps(dict(items=nodes)))
-
- error = check.check_fluentd(pods, {})
+ check = Fluentd()
+ check.exec_oc = lambda ns, cmd, args: json.dumps(dict(items=nodes))
+ error = check.check_fluentd(pods)
assert_error(error, expect_error)
diff --git a/roles/openshift_health_checker/test/kibana_test.py b/roles/openshift_health_checker/test/kibana_test.py
index 40a5d19d8..0bf492511 100644
--- a/roles/openshift_health_checker/test/kibana_test.py
+++ b/roles/openshift_health_checker/test/kibana_test.py
@@ -11,14 +11,6 @@ except ImportError:
from openshift_checks.logging.kibana import Kibana
-def canned_kibana(exec_oc=None):
- """Create a Kibana check object with canned exec_oc method"""
- check = Kibana("dummy") # fails if a module is actually invoked
- if exec_oc:
- check._exec_oc = exec_oc
- return check
-
-
def assert_error(error, expect_error):
if expect_error:
assert error
@@ -68,7 +60,7 @@ not_running_kibana_pod = {
),
])
def test_check_kibana(pods, expect_error):
- check = canned_kibana()
+ check = Kibana()
error = check.check_kibana(pods)
assert_error(error, expect_error)
@@ -137,9 +129,10 @@ def test_check_kibana(pods, expect_error):
),
])
def test_get_kibana_url(route, expect_url, expect_error):
- check = canned_kibana(lambda cmd, args, task_vars: json.dumps(route) if route else "")
+ check = Kibana()
+ check.exec_oc = lambda ns, cmd, args: json.dumps(route) if route else ""
- url, error = check._get_kibana_url({})
+ url, error = check._get_kibana_url()
if expect_url:
assert url == expect_url
else:
@@ -169,10 +162,10 @@ def test_get_kibana_url(route, expect_url, expect_error):
),
])
def test_verify_url_internal_failure(exec_result, expect):
- check = Kibana(execute_module=lambda module_name, args, tmp, task_vars: dict(failed=True, msg=exec_result))
- check._get_kibana_url = lambda task_vars: ('url', None)
+ check = Kibana(execute_module=lambda *_: dict(failed=True, msg=exec_result))
+ check._get_kibana_url = lambda: ('url', None)
- error = check._check_kibana_route({})
+ error = check._check_kibana_route()
assert_error(error, expect)
@@ -210,9 +203,9 @@ def test_verify_url_external_failure(lib_result, expect, monkeypatch):
raise lib_result
monkeypatch.setattr(urllib2, 'urlopen', urlopen)
- check = canned_kibana()
- check._get_kibana_url = lambda task_vars: ('url', None)
- check._verify_url_internal = lambda url, task_vars: None
+ check = Kibana()
+ check._get_kibana_url = lambda: ('url', None)
+ check._verify_url_internal = lambda url: None
- error = check._check_kibana_route({})
+ error = check._check_kibana_route()
assert_error(error, expect)
diff --git a/roles/openshift_health_checker/test/logging_check_test.py b/roles/openshift_health_checker/test/logging_check_test.py
index 4f71fbf52..6f1697ee6 100644
--- a/roles/openshift_health_checker/test/logging_check_test.py
+++ b/roles/openshift_health_checker/test/logging_check_test.py
@@ -11,7 +11,7 @@ logging_namespace = "logging"
def canned_loggingcheck(exec_oc=None):
"""Create a LoggingCheck object with canned exec_oc method"""
- check = LoggingCheck("dummy") # fails if a module is actually invoked
+ check = LoggingCheck() # fails if a module is actually invoked
check.logging_namespace = 'logging'
if exec_oc:
check.exec_oc = exec_oc
@@ -90,15 +90,15 @@ plain_curator_pod = {
("Permission denied", "Unexpected error using `oc`"),
])
def test_oc_failure(problem, expect):
- def execute_module(module_name, args, tmp, task_vars):
+ def execute_module(module_name, *_):
if module_name == "ocutil":
return dict(failed=True, result=problem)
return dict(changed=False)
- check = LoggingCheck({})
+ check = LoggingCheck(execute_module, task_vars_config_base)
with pytest.raises(OpenShiftCheckException) as excinfo:
- check.exec_oc(execute_module, logging_namespace, 'get foo', [], task_vars=task_vars_config_base)
+ check.exec_oc(logging_namespace, 'get foo', [])
assert expect in str(excinfo)
@@ -121,7 +121,7 @@ def test_is_active(groups, logging_deployed, is_active):
openshift_hosted_logging_deploy=logging_deployed,
)
- assert LoggingCheck.is_active(task_vars=task_vars) == is_active
+ assert LoggingCheck(None, task_vars).is_active() == is_active
@pytest.mark.parametrize('pod_output, expect_pods, expect_error', [
@@ -137,12 +137,10 @@ def test_is_active(groups, logging_deployed, is_active):
),
])
def test_get_pods_for_component(pod_output, expect_pods, expect_error):
- check = canned_loggingcheck(lambda exec_module, namespace, cmd, args, task_vars: pod_output)
+ check = canned_loggingcheck(lambda namespace, cmd, args: pod_output)
pods, error = check.get_pods_for_component(
- lambda name, args, task_vars: {},
logging_namespace,
"es",
- {}
)
assert_error(error, expect_error)
diff --git a/roles/openshift_health_checker/test/logging_index_time_test.py b/roles/openshift_health_checker/test/logging_index_time_test.py
index 79e7c7d4c..178d7cd84 100644
--- a/roles/openshift_health_checker/test/logging_index_time_test.py
+++ b/roles/openshift_health_checker/test/logging_index_time_test.py
@@ -10,7 +10,7 @@ SAMPLE_UUID = "unique-test-uuid"
def canned_loggingindextime(exec_oc=None):
"""Create a check object with a canned exec_oc method"""
- check = LoggingIndexTime("dummy") # fails if a module is actually invoked
+ check = LoggingIndexTime() # fails if a module is actually invoked
if exec_oc:
check.exec_oc = exec_oc
return check
@@ -64,7 +64,7 @@ not_running_kibana_pod = {
)
])
def test_check_running_pods(pods, expect_pods):
- check = canned_loggingindextime(None)
+ check = canned_loggingindextime()
pods = check.running_pods(pods)
assert pods == expect_pods
@@ -81,11 +81,8 @@ def test_check_running_pods(pods, expect_pods):
),
], ids=lambda argval: argval[0])
def test_wait_until_cmd_or_err_succeeds(name, json_response, uuid, timeout, extra_words):
- def exec_oc(execute_module, ns, exec_cmd, args, task_vars):
- return json.dumps(json_response)
-
- check = canned_loggingindextime(exec_oc)
- check.wait_until_cmd_or_err(plain_running_elasticsearch_pod, uuid, timeout, None)
+ check = canned_loggingindextime(lambda *_: json.dumps(json_response))
+ check.wait_until_cmd_or_err(plain_running_elasticsearch_pod, uuid, timeout)
@pytest.mark.parametrize('name, json_response, uuid, timeout, extra_words', [
@@ -116,12 +113,9 @@ def test_wait_until_cmd_or_err_succeeds(name, json_response, uuid, timeout, extr
)
], ids=lambda argval: argval[0])
def test_wait_until_cmd_or_err(name, json_response, uuid, timeout, extra_words):
- def exec_oc(execute_module, ns, exec_cmd, args, task_vars):
- return json.dumps(json_response)
-
- check = canned_loggingindextime(exec_oc)
+ check = canned_loggingindextime(lambda *_: json.dumps(json_response))
with pytest.raises(OpenShiftCheckException) as error:
- check.wait_until_cmd_or_err(plain_running_elasticsearch_pod, uuid, timeout, None)
+ check.wait_until_cmd_or_err(plain_running_elasticsearch_pod, uuid, timeout)
for word in extra_words:
assert word in str(error)
@@ -138,13 +132,10 @@ def test_wait_until_cmd_or_err(name, json_response, uuid, timeout, extra_words):
),
], ids=lambda argval: argval[0])
def test_curl_kibana_with_uuid(name, json_response, uuid, extra_words):
- def exec_oc(execute_module, ns, exec_cmd, args, task_vars):
- return json.dumps(json_response)
-
- check = canned_loggingindextime(exec_oc)
+ check = canned_loggingindextime(lambda *_: json.dumps(json_response))
check.generate_uuid = lambda: uuid
- result = check.curl_kibana_with_uuid(plain_running_kibana_pod, None)
+ result = check.curl_kibana_with_uuid(plain_running_kibana_pod)
for word in extra_words:
assert word in result
@@ -169,14 +160,11 @@ def test_curl_kibana_with_uuid(name, json_response, uuid, extra_words):
),
], ids=lambda argval: argval[0])
def test_failed_curl_kibana_with_uuid(name, json_response, uuid, extra_words):
- def exec_oc(execute_module, ns, exec_cmd, args, task_vars):
- return json.dumps(json_response)
-
- check = canned_loggingindextime(exec_oc)
+ check = canned_loggingindextime(lambda *_: json.dumps(json_response))
check.generate_uuid = lambda: uuid
with pytest.raises(OpenShiftCheckException) as error:
- check.curl_kibana_with_uuid(plain_running_kibana_pod, None)
+ check.curl_kibana_with_uuid(plain_running_kibana_pod)
for word in extra_words:
assert word in str(error)
diff --git a/roles/openshift_health_checker/test/memory_availability_test.py b/roles/openshift_health_checker/test/memory_availability_test.py
index 4fbaea0a9..aee2f0416 100644
--- a/roles/openshift_health_checker/test/memory_availability_test.py
+++ b/roles/openshift_health_checker/test/memory_availability_test.py
@@ -17,7 +17,7 @@ def test_is_active(group_names, is_active):
task_vars = dict(
group_names=group_names,
)
- assert MemoryAvailability.is_active(task_vars=task_vars) == is_active
+ assert MemoryAvailability(None, task_vars).is_active() == is_active
@pytest.mark.parametrize('group_names,configured_min,ansible_memtotal_mb', [
@@ -59,8 +59,7 @@ def test_succeeds_with_recommended_memory(group_names, configured_min, ansible_m
ansible_memtotal_mb=ansible_memtotal_mb,
)
- check = MemoryAvailability(execute_module=fake_execute_module)
- result = check.run(tmp=None, task_vars=task_vars)
+ result = MemoryAvailability(fake_execute_module, task_vars).run()
assert not result.get('failed', False)
@@ -117,8 +116,7 @@ def test_fails_with_insufficient_memory(group_names, configured_min, ansible_mem
ansible_memtotal_mb=ansible_memtotal_mb,
)
- check = MemoryAvailability(execute_module=fake_execute_module)
- result = check.run(tmp=None, task_vars=task_vars)
+ result = MemoryAvailability(fake_execute_module, task_vars).run()
assert result.get('failed', False)
for word in 'below recommended'.split() + extra_words:
diff --git a/roles/openshift_health_checker/test/mixins_test.py b/roles/openshift_health_checker/test/mixins_test.py
index 2d83e207d..b1a41ca3c 100644
--- a/roles/openshift_health_checker/test/mixins_test.py
+++ b/roles/openshift_health_checker/test/mixins_test.py
@@ -14,10 +14,10 @@ class NotContainerizedCheck(NotContainerizedMixin, OpenShiftCheck):
(dict(openshift=dict(common=dict(is_containerized=True))), False),
])
def test_is_active(task_vars, expected):
- assert NotContainerizedCheck.is_active(task_vars) == expected
+ assert NotContainerizedCheck(None, task_vars).is_active() == expected
def test_is_active_missing_task_vars():
with pytest.raises(OpenShiftCheckException) as excinfo:
- NotContainerizedCheck.is_active(task_vars={})
+ NotContainerizedCheck().is_active()
assert 'is_containerized' in str(excinfo.value)
diff --git a/roles/openshift_health_checker/test/openshift_check_test.py b/roles/openshift_health_checker/test/openshift_check_test.py
index e3153979c..43aa875f4 100644
--- a/roles/openshift_health_checker/test/openshift_check_test.py
+++ b/roles/openshift_health_checker/test/openshift_check_test.py
@@ -1,7 +1,7 @@
import pytest
from openshift_checks import OpenShiftCheck, OpenShiftCheckException
-from openshift_checks import load_checks, get_var
+from openshift_checks import load_checks
# Fixtures
@@ -28,34 +28,23 @@ def test_OpenShiftCheck_init():
name = "test_check"
run = NotImplemented
- # initialization requires at least one argument (apart from self)
- with pytest.raises(TypeError) as excinfo:
- TestCheck()
+ # execute_module required at init if it will be used
+ with pytest.raises(RuntimeError) as excinfo:
+ TestCheck().execute_module("foo")
assert 'execute_module' in str(excinfo.value)
- assert 'module_executor' in str(excinfo.value)
execute_module = object()
# initialize with positional argument
check = TestCheck(execute_module)
- # new recommended name
- assert check.execute_module == execute_module
- # deprecated attribute name
- assert check.module_executor == execute_module
+ assert check._execute_module == execute_module
- # initialize with keyword argument, recommended name
+ # initialize with keyword argument
check = TestCheck(execute_module=execute_module)
- # new recommended name
- assert check.execute_module == execute_module
- # deprecated attribute name
- assert check.module_executor == execute_module
+ assert check._execute_module == execute_module
- # initialize with keyword argument, deprecated name
- check = TestCheck(module_executor=execute_module)
- # new recommended name
- assert check.execute_module == execute_module
- # deprecated attribute name
- assert check.module_executor == execute_module
+ assert check.task_vars == {}
+ assert check.tmp is None
def test_subclasses():
@@ -81,19 +70,27 @@ def test_load_checks():
assert modules
+def dummy_check(task_vars):
+ class TestCheck(OpenShiftCheck):
+ name = "dummy"
+ run = NotImplemented
+
+ return TestCheck(task_vars=task_vars)
+
+
@pytest.mark.parametrize("keys,expected", [
(("foo",), 42),
(("bar", "baz"), "openshift"),
])
def test_get_var_ok(task_vars, keys, expected):
- assert get_var(task_vars, *keys) == expected
+ assert dummy_check(task_vars).get_var(*keys) == expected
def test_get_var_error(task_vars, missing_keys):
with pytest.raises(OpenShiftCheckException):
- get_var(task_vars, *missing_keys)
+ dummy_check(task_vars).get_var(*missing_keys)
def test_get_var_default(task_vars, missing_keys):
default = object()
- assert get_var(task_vars, *missing_keys, default=default) == default
+ assert dummy_check(task_vars).get_var(*missing_keys, default=default) == default
diff --git a/roles/openshift_health_checker/test/ovs_version_test.py b/roles/openshift_health_checker/test/ovs_version_test.py
index 6494e1c06..b6acef5a6 100644
--- a/roles/openshift_health_checker/test/ovs_version_test.py
+++ b/roles/openshift_health_checker/test/ovs_version_test.py
@@ -4,7 +4,7 @@ from openshift_checks.ovs_version import OvsVersion, OpenShiftCheckException
def test_openshift_version_not_supported():
- def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
+ def execute_module(*_):
return {}
openshift_release = '111.7.0'
@@ -16,15 +16,14 @@ def test_openshift_version_not_supported():
openshift_deployment_type='origin',
)
- check = OvsVersion(execute_module=execute_module)
with pytest.raises(OpenShiftCheckException) as excinfo:
- check.run(tmp=None, task_vars=task_vars)
+ OvsVersion(execute_module, task_vars).run()
assert "no recommended version of Open vSwitch" in str(excinfo.value)
def test_invalid_openshift_release_format():
- def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
+ def execute_module(*_):
return {}
task_vars = dict(
@@ -33,9 +32,8 @@ def test_invalid_openshift_release_format():
openshift_deployment_type='origin',
)
- check = OvsVersion(execute_module=execute_module)
with pytest.raises(OpenShiftCheckException) as excinfo:
- check.run(tmp=None, task_vars=task_vars)
+ OvsVersion(execute_module, task_vars).run()
assert "invalid version" in str(excinfo.value)
@@ -54,7 +52,7 @@ def test_ovs_package_version(openshift_release, expected_ovs_version):
)
return_value = object()
- def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
+ def execute_module(module_name=None, module_args=None, *_):
assert module_name == 'rpm_version'
assert "package_list" in module_args
@@ -64,8 +62,7 @@ def test_ovs_package_version(openshift_release, expected_ovs_version):
return return_value
- check = OvsVersion(execute_module=execute_module)
- result = check.run(tmp=None, task_vars=task_vars)
+ result = OvsVersion(execute_module, task_vars).run()
assert result is return_value
@@ -86,4 +83,4 @@ def test_ovs_version_skip_when_not_master_nor_node(group_names, is_containerized
group_names=group_names,
openshift=dict(common=dict(is_containerized=is_containerized)),
)
- assert OvsVersion.is_active(task_vars=task_vars) == is_active
+ assert OvsVersion(None, task_vars).is_active() == is_active
diff --git a/roles/openshift_health_checker/test/package_availability_test.py b/roles/openshift_health_checker/test/package_availability_test.py
index f7e916a46..1fe648b75 100644
--- a/roles/openshift_health_checker/test/package_availability_test.py
+++ b/roles/openshift_health_checker/test/package_availability_test.py
@@ -14,7 +14,7 @@ def test_is_active(pkg_mgr, is_containerized, is_active):
ansible_pkg_mgr=pkg_mgr,
openshift=dict(common=dict(is_containerized=is_containerized)),
)
- assert PackageAvailability.is_active(task_vars=task_vars) == is_active
+ assert PackageAvailability(None, task_vars).is_active() == is_active
@pytest.mark.parametrize('task_vars,must_have_packages,must_not_have_packages', [
@@ -51,13 +51,12 @@ def test_is_active(pkg_mgr, is_containerized, is_active):
def test_package_availability(task_vars, must_have_packages, must_not_have_packages):
return_value = object()
- def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
+ def execute_module(module_name=None, module_args=None, *_):
assert module_name == 'check_yum_update'
assert 'packages' in module_args
assert set(module_args['packages']).issuperset(must_have_packages)
assert not set(module_args['packages']).intersection(must_not_have_packages)
return return_value
- check = PackageAvailability(execute_module=execute_module)
- result = check.run(tmp=None, task_vars=task_vars)
+ result = PackageAvailability(execute_module, task_vars).run()
assert result is return_value
diff --git a/roles/openshift_health_checker/test/package_update_test.py b/roles/openshift_health_checker/test/package_update_test.py
index 5e000cff5..06489b0d7 100644
--- a/roles/openshift_health_checker/test/package_update_test.py
+++ b/roles/openshift_health_checker/test/package_update_test.py
@@ -4,13 +4,12 @@ from openshift_checks.package_update import PackageUpdate
def test_package_update():
return_value = object()
- def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
+ def execute_module(module_name=None, module_args=None, *_):
assert module_name == 'check_yum_update'
assert 'packages' in module_args
# empty list of packages means "generic check if 'yum update' will work"
assert module_args['packages'] == []
return return_value
- check = PackageUpdate(execute_module=execute_module)
- result = check.run(tmp=None, task_vars=None)
+ result = PackageUpdate(execute_module).run()
assert result is return_value
diff --git a/roles/openshift_health_checker/test/package_version_test.py b/roles/openshift_health_checker/test/package_version_test.py
index 1bb6371ae..1ddb9cecb 100644
--- a/roles/openshift_health_checker/test/package_version_test.py
+++ b/roles/openshift_health_checker/test/package_version_test.py
@@ -8,7 +8,7 @@ from openshift_checks.package_version import PackageVersion, OpenShiftCheckExcep
('0.0.0', ["no recommended version of Docker"]),
])
def test_openshift_version_not_supported(openshift_release, extra_words):
- def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
+ def execute_module(*_):
return {}
task_vars = dict(
@@ -18,16 +18,16 @@ def test_openshift_version_not_supported(openshift_release, extra_words):
openshift_deployment_type='origin',
)
- check = PackageVersion(execute_module=execute_module)
+ check = PackageVersion(execute_module, task_vars)
with pytest.raises(OpenShiftCheckException) as excinfo:
- check.run(tmp=None, task_vars=task_vars)
+ check.run()
for word in extra_words:
assert word in str(excinfo.value)
def test_invalid_openshift_release_format():
- def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
+ def execute_module(*_):
return {}
task_vars = dict(
@@ -36,9 +36,9 @@ def test_invalid_openshift_release_format():
openshift_deployment_type='origin',
)
- check = PackageVersion(execute_module=execute_module)
+ check = PackageVersion(execute_module, task_vars)
with pytest.raises(OpenShiftCheckException) as excinfo:
- check.run(tmp=None, task_vars=task_vars)
+ check.run()
assert "invalid version" in str(excinfo.value)
@@ -57,7 +57,7 @@ def test_package_version(openshift_release):
)
return_value = object()
- def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
+ def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None, *_):
assert module_name == 'aos_version'
assert "package_list" in module_args
@@ -67,8 +67,8 @@ def test_package_version(openshift_release):
return return_value
- check = PackageVersion(execute_module=execute_module)
- result = check.run(tmp=None, task_vars=task_vars)
+ check = PackageVersion(execute_module, task_vars)
+ result = check.run()
assert result is return_value
@@ -89,7 +89,7 @@ def test_docker_package_version(deployment_type, openshift_release, expected_doc
)
return_value = object()
- def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
+ def execute_module(module_name=None, module_args=None, *_):
assert module_name == 'aos_version'
assert "package_list" in module_args
@@ -99,8 +99,8 @@ def test_docker_package_version(deployment_type, openshift_release, expected_doc
return return_value
- check = PackageVersion(execute_module=execute_module)
- result = check.run(tmp=None, task_vars=task_vars)
+ check = PackageVersion(execute_module, task_vars)
+ result = check.run()
assert result is return_value
@@ -121,4 +121,4 @@ def test_package_version_skip_when_not_master_nor_node(group_names, is_container
group_names=group_names,
openshift=dict(common=dict(is_containerized=is_containerized)),
)
- assert PackageVersion.is_active(task_vars=task_vars) == is_active
+ assert PackageVersion(None, task_vars).is_active() == is_active
diff --git a/roles/openshift_hosted/defaults/main.yml b/roles/openshift_hosted/defaults/main.yml
index 089054e2f..0391e5602 100644
--- a/roles/openshift_hosted/defaults/main.yml
+++ b/roles/openshift_hosted/defaults/main.yml
@@ -29,7 +29,7 @@ openshift_hosted_routers:
openshift_hosted_router_certificate: {}
openshift_hosted_registry_cert_expire_days: 730
-openshift_hosted_router_create_certificate: False
+openshift_hosted_router_create_certificate: True
os_firewall_allow:
- service: Docker Registry Port
diff --git a/roles/openshift_hosted/tasks/router/router.yml b/roles/openshift_hosted/tasks/router/router.yml
index c60b67862..dd485a64a 100644
--- a/roles/openshift_hosted/tasks/router/router.yml
+++ b/roles/openshift_hosted/tasks/router/router.yml
@@ -23,8 +23,8 @@
signer_key: "{{ openshift_master_config_dir }}/ca.key"
signer_serial: "{{ openshift_master_config_dir }}/ca.serial.txt"
hostnames:
- - "{{ openshift_master_default_subdomain }}"
- - "*.{{ openshift_master_default_subdomain }}"
+ - "{{ openshift_master_default_subdomain | default('router.default.svc.cluster.local') }}"
+ - "*.{{ openshift_master_default_subdomain | default('router.default.svc.cluster.local') }}"
cert: "{{ ('/etc/origin/master/' ~ (item.certificate.certfile | basename)) if 'certfile' in item.certificate else ((openshift_master_config_dir) ~ '/openshift-router.crt') }}"
key: "{{ ('/etc/origin/master/' ~ (item.certificate.keyfile | basename)) if 'keyfile' in item.certificate else ((openshift_master_config_dir) ~ '/openshift-router.key') }}"
with_items: "{{ openshift_hosted_routers }}"
@@ -37,7 +37,7 @@
cafile: "{{ openshift_master_config_dir ~ '/ca.crt' }}"
# End Block
- when: openshift_hosted_router_create_certificate | bool
+ when: ( openshift_hosted_router_create_certificate | bool ) and openshift_hosted_router_certificate == {}
- name: Get the certificate contents for router
copy:
diff --git a/roles/openshift_loadbalancer/README.md b/roles/openshift_loadbalancer/README.md
index bea4c509b..330895f20 100644
--- a/roles/openshift_loadbalancer/README.md
+++ b/roles/openshift_loadbalancer/README.md
@@ -25,6 +25,7 @@ From this role:
| openshift_loadbalancer_default_maxconn | 20000 | Maximum per-process number of concurrent connections. |
| openshift_loadbalancer_frontends | none | List of frontends. See example below. |
| openshift_loadbalancer_backends | none | List of backends. See example below. |
+| openshift_image_tag | none | Image tag for containerized haproxy image. |
Dependencies
------------
@@ -64,6 +65,7 @@ Example Playbook
- name: master3
address: "192.168.122.223:8443"
opts: check
+ openshift_image_tag: v3.6.153
```
License
diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md
index d2ef7cc71..84ead3548 100644
--- a/roles/openshift_logging/README.md
+++ b/roles/openshift_logging/README.md
@@ -57,6 +57,7 @@ When both `openshift_logging_install_logging` and `openshift_logging_upgrade_log
- `openshift_logging_fluentd_hosts`: List of nodes that should be labeled for Fluentd to be deployed to. Defaults to ['--all'].
- `openshift_logging_fluentd_buffer_queue_limit`: Buffer queue limit for Fluentd. Defaults to 1024.
- `openshift_logging_fluentd_buffer_size_limit`: Buffer chunk limit for Fluentd. Defaults to 1m.
+- `openshift_logging_fluentd_file_buffer_limit`: Fluentd will set the value to the file buffer limit. Defaults to '1Gi' per destination.
- `openshift_logging_es_host`: The name of the ES service Fluentd should send logs to. Defaults to 'logging-es'.
@@ -134,16 +135,23 @@ Elasticsearch OPS too, if using an OPS cluster:
secure_forward forwarder for the node agent Fluentd daemonsets running in the
cluster. This can be used to reduce the number of connections to the
OpenShift API server, by using `mux` and configuring each node Fluentd to
- send raw logs to mux and turn off the k8s metadata plugin.
+ send raw logs to mux and turn off the k8s metadata plugin. This requires the
+ use of `openshift_logging_mux_client_mode` (see below).
- `openshift_logging_mux_allow_external`: Default `False`. If this is `True`,
the `mux` service will be deployed, and it will be configured to allow
Fluentd clients running outside of the cluster to send logs using
secure_forward. This allows OpenShift logging to be used as a central
logging service for clients other than OpenShift, or other OpenShift
clusters.
-- `openshift_logging_use_mux_client`: Default `False`. If this is `True`, the
- node agent Fluentd services will be configured to send logs to the mux
- service rather than directly to Elasticsearch.
+- `openshift_logging_mux_client_mode`: Values - `minimal`, `maximal`.
+ Default is unset. Setting this value will cause the Fluentd node agent to
+ send logs to mux rather than directly to Elasticsearch. The value
+ `maximal` means that Fluentd will do as much processing as possible at the
+ node before sending the records to mux. This is the current recommended
+ way to use mux due to current scaling issues.
+ The value `minimal` means that Fluentd will do *no* processing at all, and
+ send the raw logs to mux for processing. We do not currently recommend using
+ this mode, and ansible will warn you about this.
- `openshift_logging_mux_hostname`: Default is "mux." +
`openshift_master_default_subdomain`. This is the hostname *external*_
clients will use to connect to mux, and will be used in the TLS server cert
@@ -160,3 +168,18 @@ Elasticsearch OPS too, if using an OPS cluster:
need to set this
- `openshift_logging_mux_buffer_queue_limit`: Default `[1024]` - Buffer queue limit for Mux.
- `openshift_logging_mux_buffer_size_limit`: Default `[1m]` - Buffer chunk limit for Mux.
+- `openshift_logging_mux_file_buffer_limit`: Default `[2Gi]` per destination - Mux will
+ set the value to the file buffer limit.
+- `openshift_logging_mux_file_buffer_storage_type`: Default `[emptydir]` - Storage
+ type for the file buffer. One of [`emptydir`, `pvc`, `hostmount`]
+
+- `openshift_logging_mux_file_buffer_pvc_size`: The requested size for the file buffer
+ PVC, when not provided the role will not generate any PVCs. Defaults to `4Gi`.
+- `openshift_logging_mux_file_buffer_pvc_dynamic`: Whether or not to add the dynamic
+ PVC annotation for any generated PVCs. Defaults to 'False'.
+- `openshift_logging_mux_file_buffer_pvc_pv_selector`: A key/value map added to a PVC
+ in order to select specific PVs. Defaults to 'None'.
+- `openshift_logging_mux_file_buffer_pvc_prefix`: The prefix for the generated PVCs.
+ Defaults to 'logging-mux'.
+- `openshift_logging_mux_file_buffer_storage_group`: The storage group used for Mux.
+ Defaults to '65534'.
diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml
index 1c243f934..8b0f4cb62 100644
--- a/roles/openshift_logging/defaults/main.yml
+++ b/roles/openshift_logging/defaults/main.yml
@@ -157,8 +157,6 @@ openshift_logging_storage_access_modes: "{{ openshift_hosted_logging_storage_acc
# mux - secure_forward listener service
openshift_logging_mux_allow_external: False
openshift_logging_use_mux: "{{ openshift_logging_mux_allow_external | default(False) }}"
-# this tells the fluentd node agent to use mux instead of sending directly to Elasticsearch
-openshift_logging_use_mux_client: False
openshift_logging_mux_hostname: "{{ 'mux.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
openshift_logging_mux_port: 24284
openshift_logging_mux_cpu_limit: 500m
diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml
index 5c5bbf84c..464e8594f 100644
--- a/roles/openshift_logging/tasks/install_logging.yaml
+++ b/roles/openshift_logging/tasks/install_logging.yaml
@@ -60,6 +60,9 @@
- set_fact: openshift_logging_es_pvc_prefix="logging-es"
when: openshift_logging_es_pvc_prefix == ""
+- set_fact:
+ elasticsearch_storage_type: "{{ openshift_logging_elasticsearch_storage_type | default('pvc' if ( openshift_logging_es_pvc_dynamic | bool or openshift_hosted_logging_storage_kind | default('') == 'nfs' or openshift_logging_es_pvc_size | length > 0) else 'emptydir') }}"
+
# We don't allow scaling down of ES nodes currently
- include_role:
name: openshift_logging_elasticsearch
@@ -70,7 +73,7 @@
openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_pvc_prefix ~ '-' ~ item.2 if item.1 is none else item.1 }}"
openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_cluster_size | int }}"
- openshift_logging_elasticsearch_storage_type: "{{ 'pvc' if ( openshift_logging_es_pvc_dynamic | bool or openshift_hosted_logging_storage_kind | default('') == 'nfs') else 'emptydir' }}"
+ openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}"
openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_pvc_size }}"
openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_pvc_dynamic }}"
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}"
@@ -91,7 +94,7 @@
openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_pvc_prefix }}-{{ item | int + openshift_logging_facts.elasticsearch.deploymentconfigs | count - 1 }}"
openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_cluster_size | int }}"
- openshift_logging_elasticsearch_storage_type: "{{ 'pvc' if ( openshift_logging_es_pvc_dynamic | bool or openshift_hosted_logging_storage_kind | default('') == 'nfs') else 'emptydir' }}"
+ openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}"
openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_pvc_size }}"
openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_pvc_dynamic }}"
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}"
@@ -110,6 +113,11 @@
- set_fact: openshift_logging_es_ops_pvc_prefix="logging-es-ops"
when: openshift_logging_es_ops_pvc_prefix == ""
+- set_fact:
+ elasticsearch_storage_type: "{{ openshift_logging_elasticsearch_storage_type | default('pvc' if ( openshift_logging_es_ops_pvc_dynamic | bool or openshift_hosted_logging_storage_kind | default('') == 'nfs' or openshift_logging_es_ops_pvc_size | length > 0) else 'emptydir') }}"
+ when:
+ - openshift_logging_use_ops | bool
+
- include_role:
name: openshift_logging_elasticsearch
vars:
@@ -120,7 +128,7 @@
openshift_logging_elasticsearch_ops_deployment: true
openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_ops_cluster_size | int }}"
- openshift_logging_elasticsearch_storage_type: "{{ 'pvc' if ( openshift_logging_es_ops_pvc_dynamic | bool or openshift_hosted_logging_storage_kind | default('') == 'nfs') else 'emptydir' }}"
+ openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}"
openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}"
openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic }}"
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}"
@@ -149,7 +157,7 @@
openshift_logging_elasticsearch_ops_deployment: true
openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_ops_cluster_size | int }}"
- openshift_logging_elasticsearch_storage_type: "{{ 'pvc' if ( openshift_logging_es_ops_pvc_dynamic | bool or openshift_hosted_logging_storage_kind | default('') == 'nfs') else 'emptydir' }}"
+ openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}"
openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}"
openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic }}"
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}"
diff --git a/roles/openshift_logging/vars/openshift-enterprise.yml b/roles/openshift_logging/vars/openshift-enterprise.yml
index 92e68a0a3..49e8a18af 100644
--- a/roles/openshift_logging/vars/openshift-enterprise.yml
+++ b/roles/openshift_logging/vars/openshift-enterprise.yml
@@ -1,3 +1,3 @@
---
__openshift_logging_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('registry.access.redhat.com/openshift3/') }}"
-__openshift_logging_image_version: "{{ openshift_hosted_logging_deployer_version | default ('3.6.0') }}"
+__openshift_logging_image_version: "{{ openshift_hosted_logging_deployer_version | default ('v3.6') }}"
diff --git a/roles/openshift_logging_curator/tasks/main.yaml b/roles/openshift_logging_curator/tasks/main.yaml
index ae7e48caa..3113fb3c9 100644
--- a/roles/openshift_logging_curator/tasks/main.yaml
+++ b/roles/openshift_logging_curator/tasks/main.yaml
@@ -91,7 +91,7 @@
es_port: "{{ openshift_logging_curator_es_port }}"
curator_cpu_limit: "{{ openshift_logging_curator_cpu_limit }}"
curator_memory_limit: "{{ openshift_logging_curator_memory_limit }}"
- replicas: "{{ openshift_logging_curator_replicas | default (1) }}"
+ curator_replicas: "{{ openshift_logging_curator_replicas | default (1) }}"
curator_node_selector: "{{openshift_logging_curator_nodeselector | default({})}}"
check_mode: no
changed_when: no
diff --git a/roles/openshift_logging_curator/templates/curator.j2 b/roles/openshift_logging_curator/templates/curator.j2
index 1bf9b9de2..6431f86d9 100644
--- a/roles/openshift_logging_curator/templates/curator.j2
+++ b/roles/openshift_logging_curator/templates/curator.j2
@@ -7,7 +7,7 @@ metadata:
component: "{{component}}"
logging-infra: "{{logging_component}}"
spec:
- replicas: {{replicas|default(1)}}
+ replicas: {{curator_replicas|default(1)}}
selector:
provider: openshift
component: "{{component}}"
diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml
index 68726aa78..0548e3c40 100644
--- a/roles/openshift_logging_elasticsearch/tasks/main.yaml
+++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml
@@ -206,7 +206,7 @@
storage_class_name: "{{ openshift_logging_elasticsearch_pvc_storage_class_name | default('', true) }}"
when:
- openshift_logging_elasticsearch_storage_type == "pvc"
- - not openshift_logging_elasticsearch_pvc_dynamic
+ - not openshift_logging_elasticsearch_pvc_dynamic | bool
# Storageclasses are used by default if configured
- name: Creating ES storage template - dynamic
@@ -220,7 +220,7 @@
pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}"
when:
- openshift_logging_elasticsearch_storage_type == "pvc"
- - openshift_logging_elasticsearch_pvc_dynamic
+ - openshift_logging_elasticsearch_pvc_dynamic | bool
- name: Set ES storage
oc_obj:
@@ -257,7 +257,7 @@
es_memory_limit: "{{ openshift_logging_elasticsearch_memory_limit }}"
es_node_selector: "{{ openshift_logging_elasticsearch_nodeselector | default({}) }}"
deploy_type: "{{ openshift_logging_elasticsearch_deployment_type }}"
- replicas: 1
+ es_replicas: 1
- name: Set ES dc
oc_obj:
diff --git a/roles/openshift_logging_elasticsearch/templates/es.j2 b/roles/openshift_logging_elasticsearch/templates/es.j2
index 7424db6f6..cbe6b89f2 100644
--- a/roles/openshift_logging_elasticsearch/templates/es.j2
+++ b/roles/openshift_logging_elasticsearch/templates/es.j2
@@ -8,7 +8,7 @@ metadata:
deployment: "{{deploy_name}}"
logging-infra: "{{logging_component}}"
spec:
- replicas: {{replicas|default(1)}}
+ replicas: {{es_replicas|default(1)}}
selector:
provider: openshift
component: "{{component}}"
@@ -78,7 +78,7 @@ spec:
name: "INSTANCE_RAM"
value: "{{openshift_logging_elasticsearch_memory_limit}}"
-
- name: "HEAP_DUMP_LOCATION"
+ name: "HEAP_DUMP_LOCATION"
value: "/elasticsearch/persistent/heapdump.hprof"
-
name: "NODE_QUORUM"
diff --git a/roles/openshift_logging_fluentd/defaults/main.yml b/roles/openshift_logging_fluentd/defaults/main.yml
index ce7cfc433..a53bbd2df 100644
--- a/roles/openshift_logging_fluentd/defaults/main.yml
+++ b/roles/openshift_logging_fluentd/defaults/main.yml
@@ -48,7 +48,6 @@ openshift_logging_fluentd_aggregating_strict: "no"
openshift_logging_fluentd_aggregating_cert_path: none
openshift_logging_fluentd_aggregating_key_path: none
openshift_logging_fluentd_aggregating_passphrase: none
-openshift_logging_use_mux_client: False
### Deprecating in 3.6
openshift_logging_fluentd_es_copy: false
@@ -57,3 +56,5 @@ openshift_logging_fluentd_es_copy: false
#fluentd_config_contents:
#fluentd_throttle_contents:
#fluentd_secureforward_contents:
+
+openshift_logging_fluentd_file_buffer_limit: 1Gi
diff --git a/roles/openshift_logging_fluentd/tasks/main.yaml b/roles/openshift_logging_fluentd/tasks/main.yaml
index 55de2ae8d..9dfc6fc86 100644
--- a/roles/openshift_logging_fluentd/tasks/main.yaml
+++ b/roles/openshift_logging_fluentd/tasks/main.yaml
@@ -23,6 +23,14 @@
msg: openshift_hosted_logging_use_journal is deprecated. Fluentd will automatically detect which logging driver is being used.
when: openshift_hosted_logging_use_journal is defined
+- fail:
+ msg: Invalid openshift_logging_mux_client_mode [{{ openshift_logging_mux_client_mode }}], one of {{ __allowed_mux_client_modes }} allowed
+ when: openshift_logging_mux_client_mode is defined and not openshift_logging_mux_client_mode in __allowed_mux_client_modes
+
+- debug:
+ msg: WARNING Use of openshift_logging_mux_client_mode=minimal is not recommended due to current scaling issues
+ when: openshift_logging_mux_client_mode is defined and openshift_logging_mux_client_mode == 'minimal'
+
- include: determine_version.yaml
# allow passing in a tempdir
diff --git a/roles/openshift_logging_fluentd/templates/fluentd.j2 b/roles/openshift_logging_fluentd/templates/fluentd.j2
index 970e5c2a5..39dffba19 100644
--- a/roles/openshift_logging_fluentd/templates/fluentd.j2
+++ b/roles/openshift_logging_fluentd/templates/fluentd.j2
@@ -62,7 +62,9 @@ spec:
- name: dockerdaemoncfg
mountPath: /etc/docker
readOnly: true
-{% if openshift_logging_use_mux_client | bool %}
+ - name: filebufferstorage
+ mountPath: /var/lib/fluentd
+{% if openshift_logging_mux_client_mode is defined %}
- name: muxcerts
mountPath: /etc/fluent/muxkeys
readOnly: true
@@ -110,8 +112,12 @@ spec:
resourceFieldRef:
containerName: "{{ daemonset_container_name }}"
resource: limits.memory
- - name: "USE_MUX_CLIENT"
- value: "{{ openshift_logging_use_mux_client | default('false') | lower }}"
+ - name: "FILE_BUFFER_LIMIT"
+ value: "{{ openshift_logging_fluentd_file_buffer_limit | default('1Gi') }}"
+{% if openshift_logging_mux_client_mode is defined %}
+ - name: "MUX_CLIENT_MODE"
+ value: "{{ openshift_logging_mux_client_mode }}"
+{% endif %}
volumes:
- name: runlogjournal
hostPath:
@@ -140,8 +146,11 @@ spec:
- name: dockerdaemoncfg
hostPath:
path: /etc/docker
-{% if openshift_logging_use_mux_client | bool %}
+{% if openshift_logging_mux_client_mode is defined %}
- name: muxcerts
secret:
secretName: logging-mux
{% endif %}
+ - name: filebufferstorage
+ hostPath:
+ path: "/var/lib/fluentd"
diff --git a/roles/openshift_logging_fluentd/vars/main.yml b/roles/openshift_logging_fluentd/vars/main.yml
index ad3fb0bdd..ec8e565c3 100644
--- a/roles/openshift_logging_fluentd/vars/main.yml
+++ b/roles/openshift_logging_fluentd/vars/main.yml
@@ -2,3 +2,4 @@
__latest_fluentd_version: "3_5"
__allowed_fluentd_versions: ["3_5", "3_6"]
__allowed_fluentd_types: ["hosted", "secure-aggregator", "secure-host"]
+__allowed_mux_client_modes: ["minimal", "maximal"]
diff --git a/roles/openshift_logging_kibana/tasks/main.yaml b/roles/openshift_logging_kibana/tasks/main.yaml
index 93cb82793..62bc26e37 100644
--- a/roles/openshift_logging_kibana/tasks/main.yaml
+++ b/roles/openshift_logging_kibana/tasks/main.yaml
@@ -233,7 +233,7 @@
kibana_memory_limit: "{{ openshift_logging_kibana_memory_limit }}"
kibana_proxy_cpu_limit: "{{ openshift_logging_kibana_proxy_cpu_limit }}"
kibana_proxy_memory_limit: "{{ openshift_logging_kibana_proxy_memory_limit }}"
- replicas: "{{ openshift_logging_kibana_replicas | default (1) }}"
+ kibana_replicas: "{{ openshift_logging_kibana_replicas | default (1) }}"
kibana_node_selector: "{{ openshift_logging_kibana_nodeselector | default({}) }}"
- name: Set Kibana DC
diff --git a/roles/openshift_logging_kibana/templates/kibana.j2 b/roles/openshift_logging_kibana/templates/kibana.j2
index f8043812b..512d99d06 100644
--- a/roles/openshift_logging_kibana/templates/kibana.j2
+++ b/roles/openshift_logging_kibana/templates/kibana.j2
@@ -7,7 +7,7 @@ metadata:
component: "{{ component }}"
logging-infra: "{{ logging_component }}"
spec:
- replicas: {{ replicas | default(1) }}
+ replicas: {{ kibana_replicas | default(1) }}
selector:
provider: openshift
component: "{{ component }}"
diff --git a/roles/openshift_logging_mux/defaults/main.yml b/roles/openshift_logging_mux/defaults/main.yml
index 797a27c1b..7a3da9b4c 100644
--- a/roles/openshift_logging_mux/defaults/main.yml
+++ b/roles/openshift_logging_mux/defaults/main.yml
@@ -28,6 +28,7 @@ openshift_logging_mux_journal_source: "{{ openshift_hosted_logging_journal_sourc
openshift_logging_mux_journal_read_from_head: "{{ openshift_hosted_logging_journal_read_from_head | default('') }}"
openshift_logging_mux_allow_external: False
+openshift_logging_use_mux: "{{ openshift_logging_mux_allow_external | default(False) }}"
openshift_logging_mux_hostname: "{{ 'mux.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
openshift_logging_mux_port: 24284
# the namespace to use for undefined projects should come first, followed by any
@@ -47,3 +48,20 @@ openshift_logging_mux_ops_ca: /etc/fluent/keys/ca
#mux_config_contents:
#mux_throttle_contents:
#mux_secureforward_contents:
+
+# One of ['emptydir', 'pvc', 'hostmount']
+openshift_logging_mux_file_buffer_storage_type: "emptydir"
+
+# pvc options
+# the name of the PVC we will bind to -- create it if it does not exist
+openshift_logging_mux_file_buffer_pvc_name: "logging-mux-pvc"
+
+# required if the PVC does not already exist
+openshift_logging_mux_file_buffer_pvc_size: 4Gi
+openshift_logging_mux_file_buffer_pvc_dynamic: false
+openshift_logging_mux_file_buffer_pvc_pv_selector: {}
+openshift_logging_mux_file_buffer_pvc_access_modes: ['ReadWriteOnce']
+openshift_logging_mux_file_buffer_storage_group: '65534'
+
+openshift_logging_mux_file_buffer_pvc_prefix: "logging-mux"
+openshift_logging_mux_file_buffer_limit: 2Gi
diff --git a/roles/openshift_logging_mux/tasks/main.yaml b/roles/openshift_logging_mux/tasks/main.yaml
index 54af40070..8ec93de7d 100644
--- a/roles/openshift_logging_mux/tasks/main.yaml
+++ b/roles/openshift_logging_mux/tasks/main.yaml
@@ -172,11 +172,23 @@
ops_port: "{{ openshift_logging_mux_ops_port }}"
mux_cpu_limit: "{{ openshift_logging_mux_cpu_limit }}"
mux_memory_limit: "{{ openshift_logging_mux_memory_limit }}"
- replicas: "{{ openshift_logging_mux_replicas | default(1) }}"
+ mux_replicas: "{{ openshift_logging_mux_replicas | default(1) }}"
mux_node_selector: "{{ openshift_logging_mux_nodeselector | default({}) }}"
check_mode: no
changed_when: no
+- name: Create Mux PVC
+ oc_pvc:
+ state: present
+ name: "{{ openshift_logging_mux_file_buffer_pvc_name }}"
+ namespace: "{{ openshift_logging_mux_namespace }}"
+ volume_capacity: "{{ openshift_logging_mux_file_buffer_pvc_size }}"
+ access_modes: "{{ openshift_logging_mux_file_buffer_pvc_access_modes | list }}"
+ selector: "{{ openshift_logging_mux_file_buffer_pvc_pv_selector }}"
+ storage_class_name: "{{ openshift_logging_mux_file_buffer_pvc_storage_class_name | default('', true) }}"
+ when:
+ - openshift_logging_mux_file_buffer_storage_type == "pvc"
+
- name: Set logging-mux DC
oc_obj:
state: present
diff --git a/roles/openshift_logging_mux/templates/mux.j2 b/roles/openshift_logging_mux/templates/mux.j2
index 226294847..70afe5cee 100644
--- a/roles/openshift_logging_mux/templates/mux.j2
+++ b/roles/openshift_logging_mux/templates/mux.j2
@@ -7,7 +7,7 @@ metadata:
component: "{{component}}"
logging-infra: "{{logging_component}}"
spec:
- replicas: {{replicas|default(1)}}
+ replicas: {{mux_replicas|default(1)}}
selector:
provider: openshift
component: "{{component}}"
@@ -66,6 +66,8 @@ spec:
- name: muxcerts
mountPath: /etc/fluent/muxkeys
readOnly: true
+ - name: filebufferstorage
+ mountPath: /var/lib/fluentd
env:
- name: "K8S_HOST_URL"
value: "{{openshift_logging_mux_master_url}}"
@@ -99,8 +101,6 @@ spec:
value: "{{ openshift_logging_mux_port }}"
- name: USE_MUX
value: "true"
- - name: MUX_ALLOW_EXTERNAL
- value: "{{ openshift_logging_mux_allow_external | default('false') | lower }}"
- name: "BUFFER_QUEUE_LIMIT"
value: "{{ openshift_logging_mux_buffer_queue_limit }}"
- name: "BUFFER_SIZE_LIMIT"
@@ -115,6 +115,8 @@ spec:
resourceFieldRef:
containerName: "mux"
resource: limits.memory
+ - name: "FILE_BUFFER_LIMIT"
+ value: "{{ openshift_logging_mux_file_buffer_limit | default('2Gi') }}"
volumes:
- name: config
configMap:
@@ -131,3 +133,13 @@ spec:
- name: muxcerts
secret:
secretName: logging-mux
+ - name: filebufferstorage
+{% if openshift_logging_mux_file_buffer_storage_type == 'pvc' %}
+ persistentVolumeClaim:
+ claimName: {{ openshift_logging_mux_file_buffer_pvc_name }}
+{% elif openshift_logging_mux_file_buffer_storage_type == 'hostmount' %}
+ hostPath:
+ path: "/var/log/fluentd"
+{% else %}
+ emptydir: {}
+{% endif %}
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index 9b7125240..1f182a25c 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -133,12 +133,18 @@
- block:
- name: check whether our docker-registry setting exists in the env file
command: "awk '/^OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000/' /etc/sysconfig/{{ openshift.common.service_type }}-master"
- ignore_errors: true
+ failed_when: false
changed_when: false
register: already_set
- set_fact:
- openshift_push_via_dns: "{{ (openshift_use_dnsmasq | default(true) and openshift.common.version_gte_3_6) or (already_set.stdout | match('OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000')) }}"
+ openshift_push_via_dns: "{{ (openshift_use_dnsmasq | default(true) and openshift.common.version_gte_3_6) or (already_set.stdout is defined and already_set.stdout | match('OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000')) }}"
+
+- name: Set fact of all etcd host IPs
+ openshift_facts:
+ role: common
+ local_facts:
+ no_proxy_etcd_host_ips: "{{ openshift_no_proxy_etcd_host_ips }}"
- name: Install the systemd units
include: systemd_units.yml
@@ -200,6 +206,10 @@
delay: 60
notify: Verify API Server
+- name: Dump logs from master service if it failed
+ command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-master
+ when: start_result | failed
+
- name: Stop and disable non-HA master when running HA
systemd:
name: "{{ openshift.common.service_type }}-master"
@@ -233,6 +243,10 @@
retries: 1
delay: 60
+- name: Dump logs from master-api if it failed
+ command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-master-api
+ when: start_result | failed
+
- set_fact:
master_api_service_status_changed: "{{ start_result | changed }}"
when: openshift_master_ha | bool and openshift.master.cluster_method == 'native' and inventory_hostname == openshift_master_hosts[0]
@@ -252,6 +266,10 @@
retries: 1
delay: 60
+- name: Dump logs from master-api if it failed
+ command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-master-api
+ when: start_result | failed
+
- set_fact:
master_api_service_status_changed: "{{ start_result | changed }}"
when: openshift_master_ha | bool and openshift.master.cluster_method == 'native' and inventory_hostname != openshift_master_hosts[0]
@@ -288,6 +306,10 @@
retries: 1
delay: 60
+- name: Dump logs from master-controllers if it failed
+ command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-master-controllers
+ when: start_result | failed
+
- name: Wait for master controller service to start on first master
pause:
seconds: 15
@@ -304,6 +326,10 @@
retries: 1
delay: 60
+- name: Dump logs from master-controllers if it failed
+ command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-master-controllers
+ when: start_result | failed
+
- set_fact:
master_controllers_service_status_changed: "{{ start_result | changed }}"
when: openshift_master_ha | bool and openshift.master.cluster_method == 'native'
diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml
index dfc255b3d..d71ad3459 100644
--- a/roles/openshift_master/tasks/systemd_units.yml
+++ b/roles/openshift_master/tasks/systemd_units.yml
@@ -23,7 +23,7 @@
when: openshift.common.is_containerized | bool and not openshift.common.is_master_system_container | bool
# workaround for missing systemd unit files
-- name: Create the systemd unit files
+- name: "Create the {{ openshift.common.service_type }} systemd unit file"
template:
src: "master_docker/master.docker.service.j2"
dest: "{{ containerized_svc_dir }}/{{ openshift.common.service_type }}-master.service"
@@ -32,7 +32,7 @@
- not openshift.common.is_master_system_container | bool
register: create_master_unit_file
-- name: Install Master service file
+- name: "Install {{ openshift.common.service_type }} systemd unit file"
copy:
dest: "/etc/systemd/system/{{ openshift.common.service_type }}-master.service"
src: "{{ openshift.common.service_type }}-master.service"
@@ -44,7 +44,7 @@
- command: systemctl daemon-reload
when: create_master_unit_file | changed
-- name: Create the ha systemd unit files
+- name: Create the ha systemd unit files for api and controller services
template:
src: "{{ ha_svc_template_path }}/atomic-openshift-master-{{ item }}.service.j2"
dest: "{{ containerized_svc_dir }}/{{ openshift.common.service_type }}-master-{{ item }}.service"
diff --git a/roles/openshift_master/templates/atomic-openshift-master.j2 b/roles/openshift_master/templates/atomic-openshift-master.j2
index 850fae0e4..b931f1414 100644
--- a/roles/openshift_master/templates/atomic-openshift-master.j2
+++ b/roles/openshift_master/templates/atomic-openshift-master.j2
@@ -1,6 +1,9 @@
OPTIONS=--loglevel={{ openshift.master.debug_level | default(2) }}
CONFIG_FILE={{ openshift_master_config_file }}
-{% if openshift_push_via_dns | default(false) %}
+{# Preserve existing OPENSHIFT_DEFAULT_REGISTRY settings in scale up runs #}
+{% if openshift_master_is_scaleup_host %}
+{{ openshift_master_default_registry_value }}
+{% elif openshift_push_via_dns | default(false) %}
OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000
{% endif %}
{% if openshift.common.is_containerized | bool %}
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
index af3ebc6d2..7964bbb48 100644
--- a/roles/openshift_master/templates/master.yaml.v1.j2
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -164,16 +164,16 @@ masterClients:
externalKubernetesClientConnectionOverrides:
acceptContentTypes: application/vnd.kubernetes.protobuf,application/json
contentType: application/vnd.kubernetes.protobuf
- burst: 400
- qps: 200
+ burst: {{ openshift_master_external_ratelimit_burst | default(400) }}
+ qps: {{ openshift_master_external_ratelimit_qps | default(200) }}
{% endif %}
externalKubernetesKubeConfig: ""
{% if openshift.common.version_gte_3_3_or_1_3 | bool %}
openshiftLoopbackClientConnectionOverrides:
acceptContentTypes: application/vnd.kubernetes.protobuf,application/json
contentType: application/vnd.kubernetes.protobuf
- burst: 600
- qps: 300
+ burst: {{ openshift_master_loopback_ratelimit_burst | default(600) }}
+ qps: {{ openshift_master_loopback_ratelimit_qps | default(300) }}
{% endif %}
openshiftLoopbackKubeConfig: openshift-master.kubeconfig
masterPublicURL: {{ openshift.master.public_api_url }}
diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2
index c05a27559..63eb3ea1b 100644
--- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2
+++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2
@@ -1,6 +1,9 @@
OPTIONS=--loglevel={{ openshift.master.debug_level }} --listen={{ 'https' if openshift.master.api_use_ssl else 'http' }}://{{ openshift.master.bind_addr }}:{{ openshift.master.api_port }} --master={{ openshift.master.loopback_api_url }}
CONFIG_FILE={{ openshift_master_config_file }}
-{% if openshift_push_via_dns | default(false) %}
+{# Preserve existing OPENSHIFT_DEFAULT_REGISTRY settings in scale up runs #}
+{% if openshift_master_is_scaleup_host %}
+{{ openshift_master_default_registry_value_api }}
+{% elif openshift_push_via_dns | default(false) %}
OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000
{% endif %}
{% if openshift.common.is_containerized | bool %}
diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2
index a153fb33d..0adfd05b6 100644
--- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2
+++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2
@@ -1,13 +1,16 @@
OPTIONS=--loglevel={{ openshift.master.debug_level }} --listen={{ 'https' if openshift.master.api_use_ssl else 'http' }}://{{ openshift.master.bind_addr }}:{{ openshift.master.controllers_port }}
CONFIG_FILE={{ openshift_master_config_file }}
-{% if openshift_push_via_dns | default(false) %}
+{# Preserve existing OPENSHIFT_DEFAULT_REGISTRY settings in scale up runs #}
+{% if openshift_master_is_scaleup_host %}
+{{ openshift_master_default_registry_value_controllers }}
+{% elif openshift_push_via_dns | default(false) %}
OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000
{% endif %}
{% if openshift.common.is_containerized | bool %}
IMAGE_VERSION={{ openshift_image_tag }}
{% endif %}
-{% if openshift_cloudprovider_kind | default('') == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_access_key is defined %}
+{% if openshift_cloudprovider_kind | default('') == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined %}
AWS_ACCESS_KEY_ID={{ openshift_cloudprovider_aws_access_key }}
AWS_SECRET_ACCESS_KEY={{ openshift_cloudprovider_aws_secret_key }}
{% endif %}
diff --git a/roles/openshift_master/vars/main.yml b/roles/openshift_master/vars/main.yml
index c5ba20409..7745d014f 100644
--- a/roles/openshift_master/vars/main.yml
+++ b/roles/openshift_master/vars/main.yml
@@ -20,3 +20,4 @@ openshift_master_valid_grant_methods:
- deny
l_is_ha: "{{ openshift.master.ha is defined and openshift.master.ha | bool }}"
+openshift_master_is_scaleup_host: False
diff --git a/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml b/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
index 8d7ee00ed..31129a6ac 100644
--- a/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
+++ b/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
@@ -26,7 +26,6 @@
- name: generate htpasswd file for hawkular metrics
local_action: htpasswd path="{{ local_tmp.stdout }}/hawkular-metrics.htpasswd" name=hawkular password="{{ hawkular_metrics_pwd.content | b64decode }}"
- no_log: true
become: false
- name: copy local generated passwords to target
diff --git a/roles/openshift_metrics/tasks/generate_rolebindings.yaml b/roles/openshift_metrics/tasks/generate_rolebindings.yaml
index e050c8eb2..9882b1eb5 100644
--- a/roles/openshift_metrics/tasks/generate_rolebindings.yaml
+++ b/roles/openshift_metrics/tasks/generate_rolebindings.yaml
@@ -13,3 +13,36 @@
- kind: ServiceAccount
name: hawkular
changed_when: no
+
+- name: generate hawkular-metrics cluster role binding for the hawkular service account
+ template:
+ src: rolebinding.j2
+ dest: "{{ mktemp.stdout }}/templates/hawkular-cluster-rolebinding.yaml"
+ vars:
+ cluster: True
+ obj_name: hawkular-namespace-watcher
+ labels:
+ metrics-infra: hawkular
+ roleRef:
+ kind: ClusterRole
+ name: hawkular-metrics
+ subjects:
+ - kind: ServiceAccount
+ name: hawkular
+ namespace: "{{openshift_metrics_project}}"
+ changed_when: no
+
+- name: generate the hawkular cluster role
+ template:
+ src: hawkular_metrics_role.j2
+ dest: "{{ mktemp.stdout }}/templates/hawkular-cluster-role.yaml"
+ changed_when: no
+
+- name: Set hawkular cluster roles
+ oc_obj:
+ name: hawkular-metrics
+ namespace: "{{ openshift_metrics_hawkular_agent_namespace }}"
+ kind: clusterrole
+ files:
+ - "{{ mktemp.stdout }}/templates/hawkular-cluster-role.yaml"
+ delete_after: true
diff --git a/roles/openshift_metrics/tasks/generate_serviceaccounts.yaml b/roles/openshift_metrics/tasks/generate_serviceaccounts.yaml
index e9d70f74f..db27680fe 100644
--- a/roles/openshift_metrics/tasks/generate_serviceaccounts.yaml
+++ b/roles/openshift_metrics/tasks/generate_serviceaccounts.yaml
@@ -13,3 +13,15 @@
- name: cassandra
secret: hawkular-cassandra-secrets
changed_when: no
+
+- name: Set serviceaccounts for hawkular metrics/cassandra
+ oc_obj:
+ name: "{{ item }}"
+ kind: serviceaccount
+ namespace: "{{ openshift_metrics_hawkular_agent_namespace }}"
+ files:
+ - "{{ mktemp.stdout }}/templates/metrics-{{ item }}-sa.yaml"
+ delete_after: true
+ with_items:
+ - hawkular
+ - cassandra
diff --git a/roles/openshift_metrics/tasks/uninstall_metrics.yaml b/roles/openshift_metrics/tasks/uninstall_metrics.yaml
index 9a5d52eb6..403b1252c 100644
--- a/roles/openshift_metrics/tasks/uninstall_metrics.yaml
+++ b/roles/openshift_metrics/tasks/uninstall_metrics.yaml
@@ -6,7 +6,7 @@
command: >
{{ openshift.common.client_binary }} -n {{ openshift_metrics_project }} --config={{ mktemp.stdout }}/admin.kubeconfig
delete --ignore-not-found --selector=metrics-infra
- all,sa,secrets,templates,routes,pvc,rolebindings,clusterrolebindings
+ all,sa,secrets,templates,routes,pvc,rolebindings,clusterrolebindings,clusterrole
register: delete_metrics
changed_when: delete_metrics.stdout != 'No resources found'
@@ -16,4 +16,5 @@
delete --ignore-not-found
rolebinding/hawkular-view
clusterrolebinding/heapster-cluster-reader
+ clusterrolebinding/hawkular-metrics
changed_when: delete_metrics.stdout != 'No resources found'
diff --git a/roles/openshift_metrics/templates/hawkular_metrics_role.j2 b/roles/openshift_metrics/templates/hawkular_metrics_role.j2
new file mode 100644
index 000000000..6c9dbf5d6
--- /dev/null
+++ b/roles/openshift_metrics/templates/hawkular_metrics_role.j2
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: ClusterRole
+metadata:
+ name: hawkular-metrics
+ labels:
+ metrics-infra: hawkular-metrics
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - namespaces
+ verbs:
+ - list
+ - get
+ - watch
diff --git a/roles/openshift_metrics/templates/route.j2 b/roles/openshift_metrics/templates/route.j2
index 08ca87288..423ab54a3 100644
--- a/roles/openshift_metrics/templates/route.j2
+++ b/roles/openshift_metrics/templates/route.j2
@@ -17,7 +17,7 @@ spec:
tls:
termination: {{ tls.termination }}
{% if tls.ca_certificate is defined and tls.ca_certificate | length > 0 %}
- CACertificate: |
+ caCertificate: |
{{ tls.ca_certificate|indent(6, true) }}
{% endif %}
{% if tls.key is defined and tls.key | length > 0 %}
diff --git a/roles/openshift_metrics/vars/openshift-enterprise.yml b/roles/openshift_metrics/vars/openshift-enterprise.yml
index b20957550..f0bdac7d2 100644
--- a/roles/openshift_metrics/vars/openshift-enterprise.yml
+++ b/roles/openshift_metrics/vars/openshift-enterprise.yml
@@ -1,3 +1,3 @@
---
__openshift_metrics_image_prefix: "{{ openshift_hosted_metrics_deployer_prefix | default('registry.access.redhat.com/openshift3/') }}"
-__openshift_metrics_image_version: "{{ openshift_hosted_metrics_deployer_version | default ('3.6.0') }}"
+__openshift_metrics_image_version: "{{ openshift_hosted_metrics_deployer_version | default ('v3.6') }}"
diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml
index 5904ca9bc..47073ee0f 100644
--- a/roles/openshift_node/defaults/main.yml
+++ b/roles/openshift_node/defaults/main.yml
@@ -8,7 +8,7 @@ os_firewall_allow:
port: 443/tcp
- service: OpenShift OVS sdn
port: 4789/udp
- when: openshift.common.use_openshift_sdn | bool
+ when: openshift.common.use_openshift_sdn | default(true) | bool
- service: Calico BGP Port
port: 179/tcp
when: openshift.common.use_calico | bool
diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml
index 6b38da7f8..f2c45a4bd 100644
--- a/roles/openshift_node/handlers/main.yml
+++ b/roles/openshift_node/handlers/main.yml
@@ -3,7 +3,7 @@
systemd:
name: openvswitch
state: restarted
- when: (not skip_node_svc_handlers | default(False) | bool) and not (ovs_service_status_changed | default(false) | bool) and openshift.common.use_openshift_sdn | bool
+ when: (not skip_node_svc_handlers | default(False) | bool) and not (ovs_service_status_changed | default(false) | bool) and openshift.common.use_openshift_sdn | default(true) | bool
register: l_openshift_node_stop_openvswitch_result
until: not l_openshift_node_stop_openvswitch_result | failed
retries: 3
diff --git a/roles/openshift_node/meta/main.yml b/roles/openshift_node/meta/main.yml
index 3b7e8126a..4fb841add 100644
--- a/roles/openshift_node/meta/main.yml
+++ b/roles/openshift_node/meta/main.yml
@@ -12,6 +12,7 @@ galaxy_info:
categories:
- cloud
dependencies:
+- role: openshift_node_facts
- role: lib_openshift
- role: openshift_common
- role: openshift_clock
@@ -30,7 +31,7 @@ dependencies:
os_firewall_allow:
- service: OpenShift OVS sdn
port: 4789/udp
- when: openshift.common.use_openshift_sdn | bool
+ when: openshift.common.use_openshift_sdn | default(true) | bool
- role: os_firewall
os_firewall_allow:
- service: Calico BGP Port
diff --git a/roles/openshift_node/tasks/config/configure-node-settings.yml b/roles/openshift_node/tasks/config/configure-node-settings.yml
new file mode 100644
index 000000000..1186062eb
--- /dev/null
+++ b/roles/openshift_node/tasks/config/configure-node-settings.yml
@@ -0,0 +1,16 @@
+---
+- name: Configure Node settings
+ lineinfile:
+ dest: /etc/sysconfig/{{ openshift.common.service_type }}-node
+ regexp: "{{ item.regex }}"
+ line: "{{ item.line }}"
+ create: true
+ with_items:
+ - regex: '^OPTIONS='
+ line: "OPTIONS=--loglevel={{ openshift.node.debug_level | default(2) }}"
+ - regex: '^CONFIG_FILE='
+ line: "CONFIG_FILE={{ openshift.common.config_base }}/node/node-config.yaml"
+ - regex: '^IMAGE_VERSION='
+ line: "IMAGE_VERSION={{ openshift_image_tag }}"
+ notify:
+ - restart node
diff --git a/roles/openshift_node/tasks/config/configure-proxy-settings.yml b/roles/openshift_node/tasks/config/configure-proxy-settings.yml
new file mode 100644
index 000000000..d60794305
--- /dev/null
+++ b/roles/openshift_node/tasks/config/configure-proxy-settings.yml
@@ -0,0 +1,17 @@
+---
+- name: Configure Proxy Settings
+ lineinfile:
+ dest: /etc/sysconfig/{{ openshift.common.service_type }}-node
+ regexp: "{{ item.regex }}"
+ line: "{{ item.line }}"
+ create: true
+ with_items:
+ - regex: '^HTTP_PROXY='
+ line: "HTTP_PROXY={{ openshift.common.http_proxy | default('') }}"
+ - regex: '^HTTPS_PROXY='
+ line: "HTTPS_PROXY={{ openshift.common.https_proxy | default('') }}"
+ - regex: '^NO_PROXY='
+ line: "NO_PROXY={{ openshift.common.no_proxy | default([]) }},{{ openshift.common.portal_net }},{{ hostvars[groups.oo_first_master.0].openshift.master.sdn_cluster_network_cidr }}"
+ when: ('http_proxy' in openshift.common and openshift.common.http_proxy != '')
+ notify:
+ - restart node
diff --git a/roles/openshift_node/tasks/config/install-node-deps-docker-service-file.yml b/roles/openshift_node/tasks/config/install-node-deps-docker-service-file.yml
new file mode 100644
index 000000000..ee91a88ab
--- /dev/null
+++ b/roles/openshift_node/tasks/config/install-node-deps-docker-service-file.yml
@@ -0,0 +1,8 @@
+---
+- name: Install Node dependencies docker service file
+ template:
+ dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node-dep.service"
+ src: openshift.docker.node.dep.service
+ notify:
+ - reload systemd units
+ - restart node
diff --git a/roles/openshift_node/tasks/config/install-node-docker-service-file.yml b/roles/openshift_node/tasks/config/install-node-docker-service-file.yml
new file mode 100644
index 000000000..f92ff79b5
--- /dev/null
+++ b/roles/openshift_node/tasks/config/install-node-docker-service-file.yml
@@ -0,0 +1,8 @@
+---
+- name: Install Node docker service file
+ template:
+ dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service"
+ src: openshift.docker.node.service
+ notify:
+ - reload systemd units
+ - restart node
diff --git a/roles/openshift_node/tasks/config/install-ovs-docker-service-file.yml b/roles/openshift_node/tasks/config/install-ovs-docker-service-file.yml
new file mode 100644
index 000000000..c2c5ea1d4
--- /dev/null
+++ b/roles/openshift_node/tasks/config/install-ovs-docker-service-file.yml
@@ -0,0 +1,8 @@
+---
+- name: Install OpenvSwitch docker service file
+ template:
+ dest: "/etc/systemd/system/openvswitch.service"
+ src: openvswitch.docker.service
+ notify:
+ - reload systemd units
+ - restart openvswitch
diff --git a/roles/openshift_node/tasks/config/install-ovs-service-env-file.yml b/roles/openshift_node/tasks/config/install-ovs-service-env-file.yml
new file mode 100644
index 000000000..1d75a3355
--- /dev/null
+++ b/roles/openshift_node/tasks/config/install-ovs-service-env-file.yml
@@ -0,0 +1,8 @@
+---
+- name: Create the openvswitch service env file
+ template:
+ src: openvswitch.sysconfig.j2
+ dest: /etc/sysconfig/openvswitch
+ notify:
+ - reload systemd units
+ - restart openvswitch
diff --git a/roles/openshift_node/tasks/config/workaround-bz1331590-ovs-oom-fix.yml b/roles/openshift_node/tasks/config/workaround-bz1331590-ovs-oom-fix.yml
new file mode 100644
index 000000000..5df1abc79
--- /dev/null
+++ b/roles/openshift_node/tasks/config/workaround-bz1331590-ovs-oom-fix.yml
@@ -0,0 +1,13 @@
+---
+# May be a temporary workaround.
+# https://bugzilla.redhat.com/show_bug.cgi?id=1331590
+- name: Create OpenvSwitch service.d directory
+ file: path=/etc/systemd/system/openvswitch.service.d/ state=directory
+
+- name: Install OpenvSwitch service OOM fix
+ template:
+ dest: "/etc/systemd/system/openvswitch.service.d/01-avoid-oom.conf"
+ src: openvswitch-avoid-oom.conf
+ notify:
+ - reload systemd units
+ - restart openvswitch
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index 879f6c207..87b1f6537 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -6,34 +6,6 @@
(not ansible_selinux or ansible_selinux.status != 'enabled') and
deployment_type in ['enterprise', 'online', 'atomic-enterprise', 'openshift-enterprise']
-- name: Set node facts
- openshift_facts:
- role: "{{ item.role }}"
- local_facts: "{{ item.local_facts }}"
- with_items:
- # Reset node labels to an empty dictionary.
- - role: node
- local_facts:
- labels: {}
- - role: node
- local_facts:
- annotations: "{{ openshift_node_annotations | default(none) }}"
- debug_level: "{{ openshift_node_debug_level | default(openshift.common.debug_level) }}"
- iptables_sync_period: "{{ openshift_node_iptables_sync_period | default(None) }}"
- kubelet_args: "{{ openshift_node_kubelet_args | default(None) }}"
- labels: "{{ lookup('oo_option', 'openshift_node_labels') | default( openshift_node_labels | default(none), true) }}"
- registry_url: "{{ oreg_url_node | default(oreg_url) | default(None) }}"
- schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}"
- sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}"
- storage_plugin_deps: "{{ osn_storage_plugin_deps | default(None) }}"
- set_node_ip: "{{ openshift_set_node_ip | default(None) }}"
- node_image: "{{ osn_image | default(None) }}"
- ovs_image: "{{ osn_ovs_image | default(None) }}"
- proxy_mode: "{{ openshift_node_proxy_mode | default('iptables') }}"
- local_quota_per_fsgroup: "{{ openshift_node_local_quota_per_fsgroup | default(None) }}"
- dns_ip: "{{ openshift_dns_ip | default(none) | get_dns_ip(hostvars[inventory_hostname])}}"
- env_vars: "{{ openshift_node_env_vars | default(None) }}"
-
# https://docs.openshift.com/container-platform/3.4/admin_guide/overcommit.html#disabling-swap-memory
- name: Check for swap usage
command: grep "^[^#].*swap" /etc/fstab
@@ -90,7 +62,9 @@
package:
name: "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version | oo_image_tag_to_rpm_version(include_dash=True) }}"
state: present
- when: openshift.common.use_openshift_sdn and not openshift.common.is_containerized | bool
+ when:
+ - openshift.common.use_openshift_sdn | default(true) | bool
+ - not openshift.common.is_containerized | bool
- name: Install conntrack-tools package
package:
@@ -119,7 +93,9 @@
enabled: yes
state: started
daemon_reload: yes
- when: openshift.common.is_containerized | bool and openshift.common.use_openshift_sdn | bool
+ when:
+ - openshift.common.is_containerized | bool
+ - openshift.common.use_openshift_sdn | default(true) | bool
register: ovs_start_result
until: not ovs_start_result | failed
retries: 3
@@ -230,7 +206,7 @@
ignore_errors: true
- name: Dump logs from node service if it failed
- command: journalctl --no-pager -n 100 {{ openshift.common.service_type }}-node
+ command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-node
when: node_start_result | failed
- name: Abort if node failed to start
diff --git a/roles/openshift_node/tasks/systemd_units.yml b/roles/openshift_node/tasks/systemd_units.yml
index 2ccc28461..b86bb1549 100644
--- a/roles/openshift_node/tasks/systemd_units.yml
+++ b/roles/openshift_node/tasks/systemd_units.yml
@@ -2,15 +2,8 @@
# This file is included both in the openshift_master role and in the upgrade
# playbooks.
-- name: Install Node dependencies docker service file
- template:
- dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node-dep.service"
- src: openshift.docker.node.dep.service
- register: install_node_dep_result
+- include: config/install-node-deps-docker-service-file.yml
when: openshift.common.is_containerized | bool
- notify:
- - reload systemd units
- - restart node
- block:
- name: Pre-pull node image
@@ -19,14 +12,7 @@
register: pull_result
changed_when: "'Downloaded newer image' in pull_result.stdout"
- - name: Install Node docker service file
- template:
- dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service"
- src: openshift.docker.node.service
- register: install_node_result
- notify:
- - reload systemd units
- - restart node
+ - include: config/install-node-docker-service-file.yml
when:
- openshift.common.is_containerized | bool
- not openshift.common.is_node_system_container | bool
@@ -35,21 +21,13 @@
template:
dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service"
src: "node.service.j2"
- register: install_node_result
when: not openshift.common.is_containerized | bool
notify:
- reload systemd units
- restart node
-- name: Create the openvswitch service env file
- template:
- src: openvswitch.sysconfig.j2
- dest: /etc/sysconfig/openvswitch
+- include: config/install-ovs-service-env-file.yml
when: openshift.common.is_containerized | bool
- register: install_ovs_sysconfig
- notify:
- - reload systemd units
- - restart openvswitch
- name: Install Node system container
include: node_system_container.yml
@@ -64,22 +42,9 @@
- openshift.common.is_containerized | bool
- openshift.common.is_openvswitch_system_container | bool
-# May be a temporary workaround.
-# https://bugzilla.redhat.com/show_bug.cgi?id=1331590
-- name: Create OpenvSwitch service.d directory
- file: path=/etc/systemd/system/openvswitch.service.d/ state=directory
+- include: config/workaround-bz1331590-ovs-oom-fix.yml
when: openshift.common.use_openshift_sdn | default(true) | bool
-- name: Install OpenvSwitch service OOM fix
- template:
- dest: "/etc/systemd/system/openvswitch.service.d/01-avoid-oom.conf"
- src: openvswitch-avoid-oom.conf
- when: openshift.common.use_openshift_sdn | default(true) | bool
- register: install_oom_fix_result
- notify:
- - reload systemd units
- - restart openvswitch
-
- block:
- name: Pre-pull openvswitch image
command: >
@@ -87,47 +52,11 @@
register: pull_result
changed_when: "'Downloaded newer image' in pull_result.stdout"
- - name: Install OpenvSwitch docker service file
- template:
- dest: "/etc/systemd/system/openvswitch.service"
- src: openvswitch.docker.service
- notify:
- - reload systemd units
- - restart openvswitch
+ - include: config/install-ovs-docker-service-file.yml
when:
- openshift.common.is_containerized | bool
- openshift.common.use_openshift_sdn | default(true) | bool
- not openshift.common.is_openvswitch_system_container | bool
-- name: Configure Node settings
- lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-node
- regexp: "{{ item.regex }}"
- line: "{{ item.line }}"
- create: true
- with_items:
- - regex: '^OPTIONS='
- line: "OPTIONS=--loglevel={{ openshift.node.debug_level | default(2) }}"
- - regex: '^CONFIG_FILE='
- line: "CONFIG_FILE={{ openshift.common.config_base }}/node/node-config.yaml"
- - regex: '^IMAGE_VERSION='
- line: "IMAGE_VERSION={{ openshift_image_tag }}"
- notify:
- - restart node
-
-- name: Configure Proxy Settings
- lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-node
- regexp: "{{ item.regex }}"
- line: "{{ item.line }}"
- create: true
- with_items:
- - regex: '^HTTP_PROXY='
- line: "HTTP_PROXY={{ openshift.common.http_proxy | default('') }}"
- - regex: '^HTTPS_PROXY='
- line: "HTTPS_PROXY={{ openshift.common.https_proxy | default('') }}"
- - regex: '^NO_PROXY='
- line: "NO_PROXY={{ openshift.common.no_proxy | default([]) }},{{ openshift.common.portal_net }},{{ hostvars[groups.oo_first_master.0].openshift.master.sdn_cluster_network_cidr }}"
- when: ('http_proxy' in openshift.common and openshift.common.http_proxy != '')
- notify:
- - restart node
+- include: config/configure-node-settings.yml
+- include: config/configure-proxy-settings.yml
diff --git a/roles/openshift_node/templates/node.service.j2 b/roles/openshift_node/templates/node.service.j2
index 1dbe58439..e12a52c15 100644
--- a/roles/openshift_node/templates/node.service.j2
+++ b/roles/openshift_node/templates/node.service.j2
@@ -24,8 +24,8 @@ WorkingDirectory=/var/lib/origin/
SyslogIdentifier={{ openshift.common.service_type }}-node
Restart=always
RestartSec=5s
+TimeoutStartSec=300
OOMScoreAdjust=-999
-KillMode=process
[Install]
WantedBy=multi-user.target
diff --git a/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh b/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh
index 924226d09..4aab8f2e9 100755
--- a/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh
+++ b/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh
@@ -37,6 +37,8 @@ if [[ $2 =~ ^(up|dhcp4-change|dhcp6-change)$ ]]; then
UPSTREAM_DNS_TMP_SORTED=`mktemp`
CURRENT_UPSTREAM_DNS_SORTED=`mktemp`
NEW_RESOLV_CONF=`mktemp`
+ NEW_NODE_RESOLV_CONF=`mktemp`
+
######################################################################
# couldn't find an existing method to determine if the interface owns the
@@ -60,12 +62,14 @@ EOF
fi
######################################################################
- # Generate a new origin dns config file
+ # Write out default nameservers for /etc/dnsmasq.d/origin-upstream-dns.conf
+ # and /etc/origin/node/resolv.conf in their respective formats
for ns in ${IP4_NAMESERVERS}; do
if [[ ! -z $ns ]]; then
- echo "server=${ns}"
+ echo "server=${ns}" >> $UPSTREAM_DNS_TMP
+ echo "nameserver ${ns}" >> $NEW_NODE_RESOLV_CONF
fi
- done > $UPSTREAM_DNS_TMP
+ done
# Sort it in case DNS servers arrived in a different order
sort $UPSTREAM_DNS_TMP > $UPSTREAM_DNS_TMP_SORTED
@@ -74,7 +78,6 @@ EOF
# Compare to the current config file (sorted)
NEW_DNS_SUM=`md5sum ${UPSTREAM_DNS_TMP_SORTED} | awk '{print $1}'`
CURRENT_DNS_SUM=`md5sum ${CURRENT_UPSTREAM_DNS_SORTED} | awk '{print $1}'`
-
if [ "${NEW_DNS_SUM}" != "${CURRENT_DNS_SUM}" ]; then
# DNS has changed, copy the temp file to the proper location (-Z
# sets default selinux context) and set the restart flag
@@ -82,6 +85,13 @@ EOF
NEEDS_RESTART=1
fi
+ # compare /etc/origin/node/resolv.conf checksum and replace it if different
+ NEW_NODE_RESOLV_CONF_MD5=`md5sum ${NEW_NODE_RESOLV_CONF}`
+ OLD_NODE_RESOLV_CONF_MD5=`md5sum /etc/origin/node/resolv.conf`
+ if [ "${NEW_NODE_RESOLV_CONF_MD5}" != "${OLD_NODE_RESOLV_CONF_MD5}" ]; then
+ cp -Z $NEW_NODE_RESOLV_CONF /etc/origin/node/resolv.conf
+ fi
+
if ! `systemctl -q is-active dnsmasq.service`; then
NEEDS_RESTART=1
fi
@@ -91,17 +101,14 @@ EOF
systemctl restart dnsmasq
fi
- # Only if dnsmasq is running properly make it our only nameserver, copy
- # original resolv.conf to /etc/origin/node/resolv.conf for node service to
- # bypass dnsmasq
+ # Only if dnsmasq is running properly make it our only nameserver and place
+ # a watermark on /etc/resolv.conf
if `systemctl -q is-active dnsmasq.service`; then
- if ! grep -q '99-origin-dns.sh' ${NEW_RESOLV_CONF}; then
+ if ! grep -q '99-origin-dns.sh' /etc/resolv.conf; then
echo "# nameserver updated by /etc/NetworkManager/dispatcher.d/99-origin-dns.sh" >> ${NEW_RESOLV_CONF}
- cp /etc/resolv.conf /etc/origin/node/resolv.conf
fi
- sed -e '/^nameserver.*$/d' /etc/resolv.conf > ${NEW_RESOLV_CONF}
+ sed -e '/^nameserver.*$/d' /etc/resolv.conf >> ${NEW_RESOLV_CONF}
echo "nameserver "${def_route_ip}"" >> ${NEW_RESOLV_CONF}
-
if ! grep -q 'search.*cluster.local' ${NEW_RESOLV_CONF}; then
sed -i '/^search/ s/$/ cluster.local/' ${NEW_RESOLV_CONF}
fi
diff --git a/roles/openshift_node_facts/meta/main.yml b/roles/openshift_node_facts/meta/main.yml
new file mode 100644
index 000000000..59bf680ce
--- /dev/null
+++ b/roles/openshift_node_facts/meta/main.yml
@@ -0,0 +1,15 @@
+---
+galaxy_info:
+ author: Andrew Butcher
+ description: OpenShift Node Facts
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 1.9
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- role: openshift_facts
diff --git a/roles/openshift_node_facts/tasks/main.yml b/roles/openshift_node_facts/tasks/main.yml
new file mode 100644
index 000000000..c268c945e
--- /dev/null
+++ b/roles/openshift_node_facts/tasks/main.yml
@@ -0,0 +1,34 @@
+---
+- set_fact:
+ openshift_node_debug_level: "{{ lookup('oo_option', 'openshift_node_debug_level') }}"
+ when:
+ - openshift_node_debug_level is not defined
+ - lookup('oo_option', 'openshift_node_debug_level') != ""
+
+- name: Set node facts
+ openshift_facts:
+ role: "{{ item.role }}"
+ local_facts: "{{ item.local_facts }}"
+ with_items:
+ # Reset node labels to an empty dictionary.
+ - role: node
+ local_facts:
+ labels: {}
+ - role: node
+ local_facts:
+ annotations: "{{ openshift_node_annotations | default(none) }}"
+ debug_level: "{{ openshift_node_debug_level | default(openshift.common.debug_level) }}"
+ iptables_sync_period: "{{ openshift_node_iptables_sync_period | default(None) }}"
+ kubelet_args: "{{ openshift_node_kubelet_args | default(None) }}"
+ labels: "{{ lookup('oo_option', 'openshift_node_labels') | default( openshift_node_labels | default(none), true) }}"
+ registry_url: "{{ oreg_url_node | default(oreg_url) | default(None) }}"
+ schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}"
+ sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}"
+ storage_plugin_deps: "{{ osn_storage_plugin_deps | default(None) }}"
+ set_node_ip: "{{ openshift_set_node_ip | default(None) }}"
+ node_image: "{{ osn_image | default(None) }}"
+ ovs_image: "{{ osn_ovs_image | default(None) }}"
+ proxy_mode: "{{ openshift_node_proxy_mode | default('iptables') }}"
+ local_quota_per_fsgroup: "{{ openshift_node_local_quota_per_fsgroup | default(None) }}"
+ dns_ip: "{{ openshift_dns_ip | default(none) | get_dns_ip(hostvars[inventory_hostname])}}"
+ env_vars: "{{ openshift_node_env_vars | default(None) }}"
diff --git a/roles/openshift_node_upgrade/handlers/main.yml b/roles/openshift_node_upgrade/handlers/main.yml
index 110dfe5ce..d31b899cf 100644
--- a/roles/openshift_node_upgrade/handlers/main.yml
+++ b/roles/openshift_node_upgrade/handlers/main.yml
@@ -3,7 +3,10 @@
systemd:
name: openvswitch
state: restarted
- when: (not skip_node_svc_handlers | default(False) | bool) and not (ovs_service_status_changed | default(false) | bool) and openshift.common.use_openshift_sdn | bool
+ when:
+ - not skip_node_svc_handlers | default(False) | bool
+ - not (ovs_service_status_changed | default(false) | bool)
+ - openshift.common.use_openshift_sdn | default(true) | bool
register: l_openshift_node_upgrade_stop_openvswitch_result
until: not l_openshift_node_upgrade_stop_openvswitch_result | failed
retries: 3
@@ -26,3 +29,8 @@
when:
- (not skip_node_svc_handlers | default(False) | bool)
- not (node_service_status_changed | default(false) | bool)
+
+# TODO(jchaloup): once it is verified the systemd module works as expected
+# switch to it: http://docs.ansible.com/ansible/latest/systemd_module.html
+- name: reload systemd units
+ command: systemctl daemon-reload
diff --git a/roles/openshift_node_upgrade/tasks/config/configure-node-settings.yml b/roles/openshift_node_upgrade/tasks/config/configure-node-settings.yml
new file mode 100644
index 000000000..1186062eb
--- /dev/null
+++ b/roles/openshift_node_upgrade/tasks/config/configure-node-settings.yml
@@ -0,0 +1,16 @@
+---
+- name: Configure Node settings
+ lineinfile:
+ dest: /etc/sysconfig/{{ openshift.common.service_type }}-node
+ regexp: "{{ item.regex }}"
+ line: "{{ item.line }}"
+ create: true
+ with_items:
+ - regex: '^OPTIONS='
+ line: "OPTIONS=--loglevel={{ openshift.node.debug_level | default(2) }}"
+ - regex: '^CONFIG_FILE='
+ line: "CONFIG_FILE={{ openshift.common.config_base }}/node/node-config.yaml"
+ - regex: '^IMAGE_VERSION='
+ line: "IMAGE_VERSION={{ openshift_image_tag }}"
+ notify:
+ - restart node
diff --git a/roles/openshift_node_upgrade/tasks/config/configure-proxy-settings.yml b/roles/openshift_node_upgrade/tasks/config/configure-proxy-settings.yml
new file mode 100644
index 000000000..d60794305
--- /dev/null
+++ b/roles/openshift_node_upgrade/tasks/config/configure-proxy-settings.yml
@@ -0,0 +1,17 @@
+---
+- name: Configure Proxy Settings
+ lineinfile:
+ dest: /etc/sysconfig/{{ openshift.common.service_type }}-node
+ regexp: "{{ item.regex }}"
+ line: "{{ item.line }}"
+ create: true
+ with_items:
+ - regex: '^HTTP_PROXY='
+ line: "HTTP_PROXY={{ openshift.common.http_proxy | default('') }}"
+ - regex: '^HTTPS_PROXY='
+ line: "HTTPS_PROXY={{ openshift.common.https_proxy | default('') }}"
+ - regex: '^NO_PROXY='
+ line: "NO_PROXY={{ openshift.common.no_proxy | default([]) }},{{ openshift.common.portal_net }},{{ hostvars[groups.oo_first_master.0].openshift.master.sdn_cluster_network_cidr }}"
+ when: ('http_proxy' in openshift.common and openshift.common.http_proxy != '')
+ notify:
+ - restart node
diff --git a/roles/openshift_node_upgrade/tasks/config/install-node-deps-docker-service-file.yml b/roles/openshift_node_upgrade/tasks/config/install-node-deps-docker-service-file.yml
new file mode 100644
index 000000000..ee91a88ab
--- /dev/null
+++ b/roles/openshift_node_upgrade/tasks/config/install-node-deps-docker-service-file.yml
@@ -0,0 +1,8 @@
+---
+- name: Install Node dependencies docker service file
+ template:
+ dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node-dep.service"
+ src: openshift.docker.node.dep.service
+ notify:
+ - reload systemd units
+ - restart node
diff --git a/roles/openshift_node_upgrade/tasks/config/install-node-docker-service-file.yml b/roles/openshift_node_upgrade/tasks/config/install-node-docker-service-file.yml
new file mode 100644
index 000000000..f92ff79b5
--- /dev/null
+++ b/roles/openshift_node_upgrade/tasks/config/install-node-docker-service-file.yml
@@ -0,0 +1,8 @@
+---
+- name: Install Node docker service file
+ template:
+ dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service"
+ src: openshift.docker.node.service
+ notify:
+ - reload systemd units
+ - restart node
diff --git a/roles/openshift_node_upgrade/tasks/config/install-ovs-docker-service-file.yml b/roles/openshift_node_upgrade/tasks/config/install-ovs-docker-service-file.yml
new file mode 100644
index 000000000..c2c5ea1d4
--- /dev/null
+++ b/roles/openshift_node_upgrade/tasks/config/install-ovs-docker-service-file.yml
@@ -0,0 +1,8 @@
+---
+- name: Install OpenvSwitch docker service file
+ template:
+ dest: "/etc/systemd/system/openvswitch.service"
+ src: openvswitch.docker.service
+ notify:
+ - reload systemd units
+ - restart openvswitch
diff --git a/roles/openshift_node_upgrade/tasks/config/install-ovs-service-env-file.yml b/roles/openshift_node_upgrade/tasks/config/install-ovs-service-env-file.yml
new file mode 100644
index 000000000..1d75a3355
--- /dev/null
+++ b/roles/openshift_node_upgrade/tasks/config/install-ovs-service-env-file.yml
@@ -0,0 +1,8 @@
+---
+- name: Create the openvswitch service env file
+ template:
+ src: openvswitch.sysconfig.j2
+ dest: /etc/sysconfig/openvswitch
+ notify:
+ - reload systemd units
+ - restart openvswitch
diff --git a/roles/openshift_node_upgrade/tasks/config/workaround-bz1331590-ovs-oom-fix.yml b/roles/openshift_node_upgrade/tasks/config/workaround-bz1331590-ovs-oom-fix.yml
new file mode 100644
index 000000000..5df1abc79
--- /dev/null
+++ b/roles/openshift_node_upgrade/tasks/config/workaround-bz1331590-ovs-oom-fix.yml
@@ -0,0 +1,13 @@
+---
+# May be a temporary workaround.
+# https://bugzilla.redhat.com/show_bug.cgi?id=1331590
+- name: Create OpenvSwitch service.d directory
+ file: path=/etc/systemd/system/openvswitch.service.d/ state=directory
+
+- name: Install OpenvSwitch service OOM fix
+ template:
+ dest: "/etc/systemd/system/openvswitch.service.d/01-avoid-oom.conf"
+ src: openvswitch-avoid-oom.conf
+ notify:
+ - reload systemd units
+ - restart openvswitch
diff --git a/roles/openshift_node_upgrade/tasks/main.yml b/roles/openshift_node_upgrade/tasks/main.yml
index ac9ea32cb..f984a04b2 100644
--- a/roles/openshift_node_upgrade/tasks/main.yml
+++ b/roles/openshift_node_upgrade/tasks/main.yml
@@ -43,7 +43,9 @@
docker pull {{ openshift.node.ovs_image }}:{{ openshift_image_tag }}
register: pull_result
changed_when: "'Downloaded newer image' in pull_result.stdout"
- when: openshift.common.is_containerized | bool and openshift.common.use_openshift_sdn | bool
+ when:
+ - openshift.common.is_containerized | bool
+ - openshift.common.use_openshift_sdn | default(true) | bool
- include: docker/upgrade.yml
vars:
diff --git a/roles/openshift_node_upgrade/tasks/systemd_units.yml b/roles/openshift_node_upgrade/tasks/systemd_units.yml
index e8f017445..4e9550150 100644
--- a/roles/openshift_node_upgrade/tasks/systemd_units.yml
+++ b/roles/openshift_node_upgrade/tasks/systemd_units.yml
@@ -18,87 +18,20 @@
# This file is included both in the openshift_master role and in the upgrade
# playbooks.
-- name: Install Node dependencies docker service file
- template:
- dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node-dep.service"
- src: openshift.docker.node.dep.service
- register: install_node_dep_result
+- include: config/install-node-deps-docker-service-file.yml
when: openshift.common.is_containerized | bool
-- name: Install Node docker service file
- template:
- dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service"
- src: openshift.docker.node.service
- register: install_node_result
+- include: config/install-node-docker-service-file.yml
when: openshift.common.is_containerized | bool
-- name: Create the openvswitch service env file
- template:
- src: openvswitch.sysconfig.j2
- dest: /etc/sysconfig/openvswitch
+- include: config/install-ovs-service-env-file.yml
when: openshift.common.is_containerized | bool
- register: install_ovs_sysconfig
- notify:
- - restart openvswitch
-# May be a temporary workaround.
-# https://bugzilla.redhat.com/show_bug.cgi?id=1331590
-- name: Create OpenvSwitch service.d directory
- file: path=/etc/systemd/system/openvswitch.service.d/ state=directory
+- include: config/workaround-bz1331590-ovs-oom-fix.yml
when: openshift.common.use_openshift_sdn | default(true) | bool
-- name: Install OpenvSwitch service OOM fix
- template:
- dest: "/etc/systemd/system/openvswitch.service.d/01-avoid-oom.conf"
- src: openvswitch-avoid-oom.conf
- when: openshift.common.use_openshift_sdn | default(true) | bool
- register: install_oom_fix_result
- notify:
- - restart openvswitch
-
-- name: Install OpenvSwitch docker service file
- template:
- dest: "/etc/systemd/system/openvswitch.service"
- src: openvswitch.docker.service
+- include: config/install-ovs-docker-service-file.yml
when: openshift.common.is_containerized | bool and openshift.common.use_openshift_sdn | default(true) | bool
- notify:
- - restart openvswitch
-
-- name: Configure Node settings
- lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-node
- regexp: "{{ item.regex }}"
- line: "{{ item.line }}"
- create: true
- with_items:
- - regex: '^OPTIONS='
- line: "OPTIONS=--loglevel={{ openshift.node.debug_level | default(2) }}"
- - regex: '^CONFIG_FILE='
- line: "CONFIG_FILE={{ openshift.common.config_base }}/node/node-config.yaml"
- - regex: '^IMAGE_VERSION='
- line: "IMAGE_VERSION={{ openshift_image_tag }}"
- notify:
- - restart node
-
-- name: Configure Proxy Settings
- lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-node
- regexp: "{{ item.regex }}"
- line: "{{ item.line }}"
- create: true
- with_items:
- - regex: '^HTTP_PROXY='
- line: "HTTP_PROXY={{ openshift.common.http_proxy | default('') }}"
- - regex: '^HTTPS_PROXY='
- line: "HTTPS_PROXY={{ openshift.common.https_proxy | default('') }}"
- - regex: '^NO_PROXY='
- line: "NO_PROXY={{ openshift.common.no_proxy | default([]) }},{{ openshift.common.portal_net }},{{ hostvars[groups.oo_first_master.0].openshift.master.sdn_cluster_network_cidr }}"
- when: ('http_proxy' in openshift.common and openshift.common.http_proxy != '')
- notify:
- - restart node
-- name: Reload systemd units
- command: systemctl daemon-reload
- when: (openshift.common.is_containerized | bool and (install_node_result | changed or install_ovs_sysconfig | changed or install_node_dep_result | changed)) or install_oom_fix_result | changed
- notify:
- - restart node
+- include: config/configure-node-settings.yml
+- include: config/configure-proxy-settings.yml
diff --git a/roles/openshift_node_upgrade/templates/node.service.j2 b/roles/openshift_node_upgrade/templates/node.service.j2
index 1dbe58439..e12a52c15 100644
--- a/roles/openshift_node_upgrade/templates/node.service.j2
+++ b/roles/openshift_node_upgrade/templates/node.service.j2
@@ -24,8 +24,8 @@ WorkingDirectory=/var/lib/origin/
SyslogIdentifier={{ openshift.common.service_type }}-node
Restart=always
RestartSec=5s
+TimeoutStartSec=300
OOMScoreAdjust=-999
-KillMode=process
[Install]
WantedBy=multi-user.target
diff --git a/roles/openshift_service_catalog/files/kubeservicecatalog_roles_bindings.yml b/roles/openshift_service_catalog/files/kubeservicecatalog_roles_bindings.yml
index c30c15778..71e21a269 100644
--- a/roles/openshift_service_catalog/files/kubeservicecatalog_roles_bindings.yml
+++ b/roles/openshift_service_catalog/files/kubeservicecatalog_roles_bindings.yml
@@ -99,7 +99,6 @@ objects:
- ""
resources:
- secrets
- - podpresets
verbs:
- create
- update
@@ -149,6 +148,11 @@ objects:
- podpresets
verbs:
- create
+ - update
+ - delete
+ - get
+ - list
+ - watch
- kind: ClusterRoleBinding
apiVersion: v1
diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml
index a846889ca..2823a7610 100644
--- a/roles/openshift_storage_glusterfs/defaults/main.yml
+++ b/roles/openshift_storage_glusterfs/defaults/main.yml
@@ -1,6 +1,5 @@
---
openshift_storage_glusterfs_timeout: 300
-openshift_storage_glusterfs_namespace: 'glusterfs'
openshift_storage_glusterfs_is_native: True
openshift_storage_glusterfs_name: 'storage'
openshift_storage_glusterfs_nodeselector: "glusterfs={{ openshift_storage_glusterfs_name }}-host"
@@ -25,6 +24,7 @@ openshift_storage_glusterfs_heketi_ssh_port: 22
openshift_storage_glusterfs_heketi_ssh_user: 'root'
openshift_storage_glusterfs_heketi_ssh_sudo: False
openshift_storage_glusterfs_heketi_ssh_keyfile: '/dev/null'
+openshift_storage_glusterfs_namespace: "{{ 'glusterfs' | quote if openshift_storage_glusterfs_is_native or openshift_storage_glusterfs_heketi_is_native else 'default' | quote }}"
openshift_storage_glusterfs_registry_timeout: "{{ openshift_storage_glusterfs_timeout }}"
openshift_storage_glusterfs_registry_namespace: "{{ openshift.hosted.registry.namespace | default(openshift_storage_glusterfs_namespace) }}"
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
index 600d8f676..19eb3cdf7 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
@@ -15,7 +15,7 @@
oc_project:
state: present
name: "{{ glusterfs_namespace }}"
- when: glusterfs_is_native or glusterfs_heketi_is_native
+ when: glusterfs_is_native or glusterfs_heketi_is_native or glusterfs_storageclass
- name: Delete pre-existing heketi resources
oc_obj:
diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py
index ce6e54664..ca5e7dc1a 100644
--- a/utils/src/ooinstall/openshift_ansible.py
+++ b/utils/src/ooinstall/openshift_ansible.py
@@ -122,6 +122,8 @@ def write_inventory_vars(base_inventory, lb):
if CFG.deployment.variables['ansible_ssh_user'] != 'root':
base_inventory.write('ansible_become=yes\n')
+ base_inventory.write('openshift_override_hostname_check=true\n')
+
if lb is not None:
base_inventory.write('openshift_master_cluster_method=native\n')
base_inventory.write("openshift_master_cluster_hostname={}\n".format(lb.hostname))