From 801779eeb6f6308f81ae7c48409de7686c04a0aa Mon Sep 17 00:00:00 2001 From: Michael Gugino Date: Wed, 13 Dec 2017 12:42:32 -0500 Subject: Relocate filter plugins to lib_utils This commit relocates filter_plugings to lib_utils, changes the namespacing to prevent unintended use of older versions that may be present in filter_plugins/ directory on existing installs. Add lib_utils to meta depends for roles Also consolidate some plugins into lib_utils from various other areas. Update rpm spec, obsolete plugin rpms. --- roles/openshift_logging_fluentd/meta/main.yaml | 1 + roles/openshift_logging_fluentd/tasks/label_and_wait.yaml | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'roles/openshift_logging_fluentd') diff --git a/roles/openshift_logging_fluentd/meta/main.yaml b/roles/openshift_logging_fluentd/meta/main.yaml index 89c98204f..62f076780 100644 --- a/roles/openshift_logging_fluentd/meta/main.yaml +++ b/roles/openshift_logging_fluentd/meta/main.yaml @@ -14,3 +14,4 @@ galaxy_info: dependencies: - role: lib_openshift - role: openshift_facts +- role: lib_utils diff --git a/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml b/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml index 12b4f5bfd..1cef6c25e 100644 --- a/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml +++ b/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml @@ -4,7 +4,7 @@ name: "{{ node }}" kind: node state: add - labels: "{{ openshift_logging_fluentd_nodeselector | oo_dict_to_list_of_dict }}" + labels: "{{ openshift_logging_fluentd_nodeselector | lib_utils_oo_dict_to_list_of_dict }}" # wait half a second between labels - local_action: command sleep {{ openshift_logging_fluentd_label_delay | default('.5') }} -- cgit v1.2.3 From e3cf9edff6d0186b09b1a112592f283fab6857d0 Mon Sep 17 00:00:00 2001 From: Michael Gugino Date: Tue, 19 Dec 2017 16:36:47 -0500 Subject: Remove references to deployment_type Move openshift_deployment_type check into sanity_check action plugin. Remove compatibility for deployment_type. deployment_type has been deprecated for some time now. --- .papr.inventory | 2 +- inventory/hosts.example | 2 +- playbooks/byo/rhel_subscribe.yml | 2 +- .../upgrades/pre/verify_cluster.yml | 5 --- .../upgrades/pre/verify_upgrade_targets.yml | 2 +- .../openshift-cluster/upgrades/v3_6/upgrade.yml | 2 +- .../upgrades/v3_6/upgrade_control_plane.yml | 2 +- .../upgrades/v3_6/upgrade_nodes.yml | 2 +- playbooks/init/base_packages.yml | 37 +++++++++++++++++++ playbooks/init/facts.yml | 41 +++++----------------- playbooks/init/repos.yml | 2 +- playbooks/openshift-glusterfs/README.md | 2 +- .../openshift-master/private/additional_config.yml | 2 +- playbooks/openshift-master/private/config.yml | 2 +- playbooks/prerequisites.yml | 4 ++- roles/ansible_service_broker/tasks/install.yml | 2 +- roles/container_runtime/meta/main.yml | 1 + roles/contiv_facts/tasks/main.yml | 2 +- roles/lib_utils/action_plugins/sanity_checks.py | 18 +++++++++- roles/openshift_facts/defaults/main.yml | 4 +-- roles/openshift_logging_curator/tasks/main.yaml | 2 +- .../tasks/main.yaml | 4 +-- .../openshift_logging_eventrouter/tasks/main.yaml | 4 +-- roles/openshift_logging_fluentd/tasks/main.yaml | 4 +-- roles/openshift_logging_kibana/tasks/main.yaml | 4 +-- roles/openshift_logging_mux/tasks/main.yaml | 4 +-- roles/openshift_metrics/tasks/main.yaml | 4 +-- roles/openshift_node/tasks/main.yml | 2 +- roles/openshift_node/tasks/upgrade.yml | 1 - roles/openshift_prometheus/tasks/main.yaml | 2 +- roles/openshift_repos/tasks/main.yaml | 2 +- roles/openshift_sanitize_inventory/tasks/main.yml | 26 -------------- roles/openshift_sanitize_inventory/vars/main.yml | 3 -- roles/openshift_service_catalog/tasks/install.yml | 4 +-- .../openshift_storage_glusterfs/defaults/main.yml | 8 ++--- roles/template_service_broker/tasks/install.yml | 4 +-- .../package_availability_missing_required.yml | 2 +- .../playbooks/package_availability_succeeds.yml | 2 +- .../playbooks/package_version_matches.yml | 2 +- .../playbooks/package_version_mismatches.yml | 2 +- 40 files changed, 109 insertions(+), 113 deletions(-) create mode 100644 playbooks/init/base_packages.yml (limited to 'roles/openshift_logging_fluentd') diff --git a/.papr.inventory b/.papr.inventory index aa4324c21..c678e76aa 100644 --- a/.papr.inventory +++ b/.papr.inventory @@ -6,7 +6,7 @@ etcd [OSEv3:vars] ansible_ssh_user=root ansible_python_interpreter=/usr/bin/python3 -deployment_type=origin +openshift_deployment_type=origin openshift_image_tag="{{ lookup('env', 'OPENSHIFT_IMAGE_TAG') }}" openshift_master_default_subdomain="{{ lookup('env', 'RHCI_ocp_node1_IP') }}.xip.io" openshift_check_min_host_disk_gb=1.5 diff --git a/inventory/hosts.example b/inventory/hosts.example index d857cd1a7..b009b4fc8 100644 --- a/inventory/hosts.example +++ b/inventory/hosts.example @@ -941,7 +941,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', #openshift_master_audit_config={"enabled": true, "auditFilePath": "/var/log/openpaas-oscp-audit/openpaas-oscp-audit.log", "maximumFileRetentionDays": 14, "maximumFileSizeMegabytes": 500, "maximumRetainedFiles": 5} # Enable origin repos that point at Centos PAAS SIG, defaults to true, only used -# by deployment_type=origin +# by openshift_deployment_type=origin #openshift_enable_origin_repo=false # Validity of the auto-generated OpenShift certificates in days. diff --git a/playbooks/byo/rhel_subscribe.yml b/playbooks/byo/rhel_subscribe.yml index dc9d0a139..f70f05bac 100644 --- a/playbooks/byo/rhel_subscribe.yml +++ b/playbooks/byo/rhel_subscribe.yml @@ -6,7 +6,7 @@ roles: - role: rhel_subscribe when: - - deployment_type == 'openshift-enterprise' + - openshift_deployment_type == 'openshift-enterprise' - ansible_distribution == "RedHat" - rhsub_user is defined - rhsub_pass is defined diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml index 4713f8633..693ab2d96 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml @@ -5,11 +5,6 @@ hosts: oo_first_master gather_facts: no tasks: - - fail: - msg: > - This upgrade is only supported for origin and openshift-enterprise - deployment types - when: deployment_type not in ['origin','openshift-enterprise'] # Error out in situations where the user has older versions specified in their # inventory in any of the openshift_release, openshift_image_tag, and diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml index 95c37c38c..b0b5a7e4b 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml @@ -49,5 +49,5 @@ fail: msg: "This upgrade playbook must be run against OpenShift {{ openshift_upgrade_min }} or later" when: - - deployment_type == 'origin' + - openshift_deployment_type == 'origin' - openshift.common.version is version_compare(openshift_upgrade_min,'<') diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml index a5ad3801d..d520c6aee 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml @@ -13,7 +13,7 @@ tasks: - set_fact: openshift_upgrade_target: '3.6' - openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" + openshift_upgrade_min: "{{ '1.5' if openshift_deployment_type == 'origin' else '3.5' }}" - import_playbook: ../pre/config.yml vars: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml index 1498db4c5..a956fdde5 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml @@ -20,7 +20,7 @@ tasks: - set_fact: openshift_upgrade_target: '3.6' - openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" + openshift_upgrade_min: "{{ '1.5' if openshift_deployment_type == 'origin' else '3.5' }}" - import_playbook: ../pre/config.yml vars: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml index 6958652d8..4febe76ee 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml @@ -15,7 +15,7 @@ tasks: - set_fact: openshift_upgrade_target: '3.6' - openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" + openshift_upgrade_min: "{{ '1.5' if openshift_deployment_type == 'origin' else '3.5' }}" - import_playbook: ../pre/config.yml vars: diff --git a/playbooks/init/base_packages.yml b/playbooks/init/base_packages.yml new file mode 100644 index 000000000..f7007087c --- /dev/null +++ b/playbooks/init/base_packages.yml @@ -0,0 +1,37 @@ +--- +- name: Ensure that all non-node hosts are accessible + hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nfs_to_config + any_errors_fatal: true + tasks: + - when: + - not openshift_is_atomic | bool + block: + - name: Ensure openshift-ansible installer package deps are installed + package: + name: "{{ item }}" + state: present + with_items: + - iproute + - "{{ 'python3-dbus' if ansible_distribution == 'Fedora' else 'dbus-python' }}" + - "{{ 'python3-PyYAML' if ansible_distribution == 'Fedora' else 'PyYAML' }}" + - yum-utils + register: result + until: result is succeeded + + - name: Ensure various deps for running system containers are installed + package: + name: "{{ item }}" + state: present + with_items: + - atomic + - ostree + - runc + when: + - > + (openshift_use_system_containers | default(False)) | bool + or (openshift_use_etcd_system_container | default(False)) | bool + or (openshift_use_openvswitch_system_container | default(False)) | bool + or (openshift_use_node_system_container | default(False)) | bool + or (openshift_use_master_system_container | default(False)) | bool + register: result + until: result is succeeded diff --git a/playbooks/init/facts.yml b/playbooks/init/facts.yml index 1a5e3b513..9e411a551 100644 --- a/playbooks/init/facts.yml +++ b/playbooks/init/facts.yml @@ -21,6 +21,14 @@ path: /run/ostree-booted register: ostree_booted + # TODO(michaelgugino) remove this line once CI is updated. + - name: set openshift_deployment_type if unset + set_fact: + openshift_deployment_type: "{{ deployment_type }}" + when: + - openshift_deployment_type is undefined + - deployment_type is defined + - name: initialize_facts set fact openshift_is_atomic and openshift_is_containerized set_fact: openshift_is_atomic: "{{ ostree_booted.stat.exists }}" @@ -48,39 +56,6 @@ - l_atomic_docker_version.stdout | replace('"', '') is version_compare('1.12','>=') msg: Installation on Atomic Host requires Docker 1.12 or later. Please upgrade and restart the Atomic Host. - - when: - - not openshift_is_atomic | bool - block: - - name: Ensure openshift-ansible installer package deps are installed - package: - name: "{{ item }}" - state: present - with_items: - - iproute - - "{{ 'python3-dbus' if ansible_distribution == 'Fedora' else 'dbus-python' }}" - - "{{ 'python3-PyYAML' if ansible_distribution == 'Fedora' else 'PyYAML' }}" - - yum-utils - register: result - until: result is succeeded - - - name: Ensure various deps for running system containers are installed - package: - name: "{{ item }}" - state: present - with_items: - - atomic - - ostree - - runc - when: - - > - (openshift_use_system_containers | default(False)) | bool - or (openshift_use_etcd_system_container | default(False)) | bool - or (openshift_use_openvswitch_system_container | default(False)) | bool - or (openshift_use_node_system_container | default(False)) | bool - or (openshift_use_master_system_container | default(False)) | bool - register: result - until: result is succeeded - - name: Gather Cluster facts openshift_facts: role: common diff --git a/playbooks/init/repos.yml b/playbooks/init/repos.yml index 66786a41a..866c889b6 100644 --- a/playbooks/init/repos.yml +++ b/playbooks/init/repos.yml @@ -8,7 +8,7 @@ name: rhel_subscribe when: - ansible_distribution == 'RedHat' - - deployment_type == 'openshift-enterprise' + - openshift_deployment_type == 'openshift-enterprise' - rhsub_user is defined - rhsub_pass is defined - name: initialize openshift repos diff --git a/playbooks/openshift-glusterfs/README.md b/playbooks/openshift-glusterfs/README.md index 107bbfff6..19c381490 100644 --- a/playbooks/openshift-glusterfs/README.md +++ b/playbooks/openshift-glusterfs/README.md @@ -63,7 +63,7 @@ glusterfs [OSEv3:vars] ansible_ssh_user=root -deployment_type=origin +openshift_deployment_type=origin [masters] master diff --git a/playbooks/openshift-master/private/additional_config.yml b/playbooks/openshift-master/private/additional_config.yml index 81bb8cc5c..85be0e600 100644 --- a/playbooks/openshift-master/private/additional_config.yml +++ b/playbooks/openshift-master/private/additional_config.yml @@ -31,7 +31,7 @@ - role: cockpit when: - not openshift_is_atomic | bool - - deployment_type == 'openshift-enterprise' + - openshift_deployment_type == 'openshift-enterprise' - osm_use_cockpit is undefined or osm_use_cockpit | bool - openshift.common.deployment_subtype != 'registry' - role: flannel_register diff --git a/playbooks/openshift-master/private/config.yml b/playbooks/openshift-master/private/config.yml index 3093444b4..e53a6f093 100644 --- a/playbooks/openshift-master/private/config.yml +++ b/playbooks/openshift-master/private/config.yml @@ -47,7 +47,7 @@ state: absent when: - rpmgenerated_config.stat.exists == true - - deployment_type == 'openshift-enterprise' + - openshift_deployment_type == 'openshift-enterprise' with_items: - master - node diff --git a/playbooks/prerequisites.yml b/playbooks/prerequisites.yml index 113d68e0f..7802f83d9 100644 --- a/playbooks/prerequisites.yml +++ b/playbooks/prerequisites.yml @@ -3,11 +3,13 @@ vars: skip_verison: True -- import_playbook: validate_hostnames.yml +- import_playbook: init/validate_hostnames.yml when: not (skip_validate_hostnames | default(False)) - import_playbook: init/repos.yml +- import_playbook: init/base_packages.yml + # This is required for container runtime for crio, only needs to run once. - name: Configure os_firewall hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nfs_to_config:oo_nodes_to_config diff --git a/roles/ansible_service_broker/tasks/install.yml b/roles/ansible_service_broker/tasks/install.yml index 4ca47d074..ba2f7293b 100644 --- a/roles/ansible_service_broker/tasks/install.yml +++ b/roles/ansible_service_broker/tasks/install.yml @@ -4,7 +4,7 @@ - name: Set default image variables based on deployment type include_vars: "{{ item }}" with_first_found: - - "{{ openshift_deployment_type | default(deployment_type) }}.yml" + - "{{ openshift_deployment_type }}.yml" - "default_images.yml" - name: set ansible_service_broker facts diff --git a/roles/container_runtime/meta/main.yml b/roles/container_runtime/meta/main.yml index 5c4c569de..3bc2607fb 100644 --- a/roles/container_runtime/meta/main.yml +++ b/roles/container_runtime/meta/main.yml @@ -12,3 +12,4 @@ galaxy_info: dependencies: - role: lib_openshift - role: lib_utils +- role: openshift_facts diff --git a/roles/contiv_facts/tasks/main.yml b/roles/contiv_facts/tasks/main.yml index c6f8ad1d6..ced04759d 100644 --- a/roles/contiv_facts/tasks/main.yml +++ b/roles/contiv_facts/tasks/main.yml @@ -70,4 +70,4 @@ when: has_rpm - include_tasks: fedora-install.yml - when: not is_atomic and ansible_distribution == "Fedora" + when: not openshift_is_atomic and ansible_distribution == "Fedora" diff --git a/roles/lib_utils/action_plugins/sanity_checks.py b/roles/lib_utils/action_plugins/sanity_checks.py index 2ddcf77e4..1bf332678 100644 --- a/roles/lib_utils/action_plugins/sanity_checks.py +++ b/roles/lib_utils/action_plugins/sanity_checks.py @@ -5,6 +5,9 @@ appropriately and no conflicting options have been provided. from ansible.plugins.action import ActionBase from ansible import errors +# Valid values for openshift_deployment_type +VALID_DEPLOYMENT_TYPES = ('origin', 'openshift-enterprise') + # Tuple of variable names and default values if undefined. NET_PLUGIN_LIST = (('openshift_use_openshift_sdn', True), ('openshift_use_flannel', False), @@ -16,7 +19,10 @@ NET_PLUGIN_LIST = (('openshift_use_openshift_sdn', True), def to_bool(var_to_check): """Determine a boolean value given the multiple ways bools can be specified in ansible.""" - yes_list = (True, 1, "True", "1", "true", "Yes", "yes") + # http://yaml.org/type/bool.html + yes_list = (True, 1, "True", "1", "true", "TRUE", + "Yes", "yes", "Y", "y", "YES", + "on", "ON", "On") return var_to_check in yes_list @@ -30,6 +36,15 @@ class ActionModule(ActionBase): return None return self._templar.template(res) + def check_openshift_deployment_type(self, hostvars, host): + """Ensure a valid openshift_deployment_type is set""" + openshift_deployment_type = self.template_var(hostvars, host, + 'openshift_deployment_type') + if openshift_deployment_type not in VALID_DEPLOYMENT_TYPES: + type_strings = ", ".join(VALID_DEPLOYMENT_TYPES) + msg = "openshift_deployment_type must be defined and one of {}".format(type_strings) + raise errors.AnsibleModuleError(msg) + def check_python_version(self, hostvars, host, distro): """Ensure python version is 3 for Fedora and python 2 for others""" ansible_python = self.template_var(hostvars, host, 'ansible_python') @@ -73,6 +88,7 @@ class ActionModule(ActionBase): def run_checks(self, hostvars, host): """Execute the hostvars validations against host""" distro = self.template_var(hostvars, host, 'ansible_distribution') + self.check_openshift_deployment_type(hostvars, host) self.check_python_version(hostvars, host, distro) self.network_plugin_check(hostvars, host) self.check_hostname_vars(hostvars, host) diff --git a/roles/openshift_facts/defaults/main.yml b/roles/openshift_facts/defaults/main.yml index a4252afb0..980350d14 100644 --- a/roles/openshift_facts/defaults/main.yml +++ b/roles/openshift_facts/defaults/main.yml @@ -5,8 +5,8 @@ openshift_cli_image_dict: origin: 'openshift/origin' openshift-enterprise: 'openshift3/ose' -repoquery_cmd: "{{ ansible_pkg_mgr == 'dnf' | ternary('dnf repoquery --latest-limit 1 -d 0', 'repoquery --plugins') }}" -repoquery_installed: "{{ ansible_pkg_mgr == 'dnf' | ternary('dnf repoquery --latest-limit 1 -d 0 --disableexcludes=all --installed', 'repoquery --plugins --installed') }}" +repoquery_cmd: "{{ (ansible_pkg_mgr == 'dnf') | ternary('dnf repoquery --latest-limit 1 -d 0', 'repoquery --plugins') }}" +repoquery_installed: "{{ (ansible_pkg_mgr == 'dnf') | ternary('dnf repoquery --latest-limit 1 -d 0 --disableexcludes=all --installed', 'repoquery --plugins --installed') }}" openshift_hosted_images_dict: origin: 'openshift/origin-${component}:${version}' diff --git a/roles/openshift_logging_curator/tasks/main.yaml b/roles/openshift_logging_curator/tasks/main.yaml index e7ef5ff22..524e239b7 100644 --- a/roles/openshift_logging_curator/tasks/main.yaml +++ b/roles/openshift_logging_curator/tasks/main.yaml @@ -2,7 +2,7 @@ - name: Set default image variables based on deployment_type include_vars: "{{ var_file_name }}" with_first_found: - - "{{ openshift_deployment_type | default(deployment_type) }}.yml" + - "{{ openshift_deployment_type }}.yml" - "default_images.yml" loop_control: loop_var: var_file_name diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml index 7790dc435..6ddeb122e 100644 --- a/roles/openshift_logging_elasticsearch/tasks/main.yaml +++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml @@ -15,10 +15,10 @@ elasticsearch_name: "{{ 'logging-elasticsearch' ~ ( (openshift_logging_elasticsearch_ops_deployment | default(false) | bool) | ternary('-ops', '')) }}" es_component: "{{ 'es' ~ ( (openshift_logging_elasticsearch_ops_deployment | default(false) | bool) | ternary('-ops', '') ) }}" -- name: Set default image variables based on deployment_type +- name: Set default image variables based on openshift_deployment_type include_vars: "{{ var_file_name }}" with_first_found: - - "{{ openshift_deployment_type | default(deployment_type) }}.yml" + - "{{ openshift_deployment_type }}.yml" - "default_images.yml" loop_control: loop_var: var_file_name diff --git a/roles/openshift_logging_eventrouter/tasks/main.yaml b/roles/openshift_logging_eventrouter/tasks/main.yaml index 96b181d61..31780a343 100644 --- a/roles/openshift_logging_eventrouter/tasks/main.yaml +++ b/roles/openshift_logging_eventrouter/tasks/main.yaml @@ -1,8 +1,8 @@ --- -- name: Set default image variables based on deployment_type +- name: Set default image variables based on openshift_deployment_type include_vars: "{{ var_file_name }}" with_first_found: - - "{{ openshift_deployment_type | default(deployment_type) }}.yml" + - "{{ openshift_deployment_type }}.yml" - "default_images.yml" loop_control: loop_var: var_file_name diff --git a/roles/openshift_logging_fluentd/tasks/main.yaml b/roles/openshift_logging_fluentd/tasks/main.yaml index 87eedfb4b..08d7561ac 100644 --- a/roles/openshift_logging_fluentd/tasks/main.yaml +++ b/roles/openshift_logging_fluentd/tasks/main.yaml @@ -34,10 +34,10 @@ msg: WARNING Use of openshift_logging_mux_client_mode=minimal is not recommended due to current scaling issues when: openshift_logging_mux_client_mode is defined and openshift_logging_mux_client_mode == 'minimal' -- name: Set default image variables based on deployment_type +- name: Set default image variables based on openshift_deployment_type include_vars: "{{ var_file_name }}" with_first_found: - - "{{ openshift_deployment_type | default(deployment_type) }}.yml" + - "{{ openshift_deployment_type }}.yml" - "default_images.yml" loop_control: loop_var: var_file_name diff --git a/roles/openshift_logging_kibana/tasks/main.yaml b/roles/openshift_logging_kibana/tasks/main.yaml index a00248d11..3c3bd902e 100644 --- a/roles/openshift_logging_kibana/tasks/main.yaml +++ b/roles/openshift_logging_kibana/tasks/main.yaml @@ -1,9 +1,9 @@ --- # fail is we don't have an endpoint for ES to connect to? -- name: Set default image variables based on deployment_type +- name: Set default image variables based on openshift_deployment_type include_vars: "{{ var_file_name }}" with_first_found: - - "{{ openshift_deployment_type | default(deployment_type) }}.yml" + - "{{ openshift_deployment_type }}.yml" - "default_images.yml" loop_control: loop_var: var_file_name diff --git a/roles/openshift_logging_mux/tasks/main.yaml b/roles/openshift_logging_mux/tasks/main.yaml index 68948bce2..59a6301d7 100644 --- a/roles/openshift_logging_mux/tasks/main.yaml +++ b/roles/openshift_logging_mux/tasks/main.yaml @@ -7,10 +7,10 @@ msg: Operations logs destination is required when: not openshift_logging_mux_ops_host or openshift_logging_mux_ops_host == '' -- name: Set default image variables based on deployment_type +- name: Set default image variables based on openshift_deployment_type include_vars: "{{ var_file_name }}" with_first_found: - - "{{ openshift_deployment_type | default(deployment_type) }}.yml" + - "{{ openshift_deployment_type }}.yml" - "default_images.yml" loop_control: loop_var: var_file_name diff --git a/roles/openshift_metrics/tasks/main.yaml b/roles/openshift_metrics/tasks/main.yaml index 9dfe360bb..b67077bca 100644 --- a/roles/openshift_metrics/tasks/main.yaml +++ b/roles/openshift_metrics/tasks/main.yaml @@ -9,10 +9,10 @@ - "'not installed' not in passlib_result.stdout" msg: "python-passlib rpm must be installed on control host" -- name: Set default image variables based on deployment_type +- name: Set default image variables based on openshift_deployment_type include_vars: "{{ item }}" with_first_found: - - "{{ openshift_deployment_type | default(deployment_type) }}.yml" + - "{{ openshift_deployment_type }}.yml" - "default_images.yml" - name: Set metrics image facts diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index 2daa6c75f..eb362816a 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -3,7 +3,7 @@ msg: "SELinux is disabled, This deployment type requires that SELinux is enabled." when: - (not ansible_selinux or ansible_selinux.status != 'enabled') - - deployment_type == 'openshift-enterprise' + - openshift_deployment_type == 'openshift-enterprise' - not openshift_use_crio - include_tasks: dnsmasq_install.yml diff --git a/roles/openshift_node/tasks/upgrade.yml b/roles/openshift_node/tasks/upgrade.yml index f62bde784..02e417937 100644 --- a/roles/openshift_node/tasks/upgrade.yml +++ b/roles/openshift_node/tasks/upgrade.yml @@ -5,7 +5,6 @@ # - node_config_hook # - openshift_pkg_version # - openshift_is_containerized -# - deployment_type # - openshift_release # tasks file for openshift_node_upgrade diff --git a/roles/openshift_prometheus/tasks/main.yaml b/roles/openshift_prometheus/tasks/main.yaml index 38798e1f5..b859eb111 100644 --- a/roles/openshift_prometheus/tasks/main.yaml +++ b/roles/openshift_prometheus/tasks/main.yaml @@ -1,5 +1,5 @@ --- -- name: Set default image variables based on deployment_type +- name: Set default image variables based on openshift_deployment_type include_vars: "{{ item }}" with_first_found: - "{{ openshift_deployment_type }}.yml" diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml index 35206049f..911005bb6 100644 --- a/roles/openshift_repos/tasks/main.yaml +++ b/roles/openshift_repos/tasks/main.yaml @@ -40,7 +40,7 @@ - include_tasks: rhel_repos.yml when: - ansible_distribution == 'RedHat' - - deployment_type == 'openshift-enterprise' + - openshift_deployment_type == 'openshift-enterprise' - rhsub_user is defined - rhsub_pass is defined diff --git a/roles/openshift_sanitize_inventory/tasks/main.yml b/roles/openshift_sanitize_inventory/tasks/main.yml index 651d896cf..62d460272 100644 --- a/roles/openshift_sanitize_inventory/tasks/main.yml +++ b/roles/openshift_sanitize_inventory/tasks/main.yml @@ -3,37 +3,11 @@ # the user would also be aware of any deprecated variables they should note to adjust - include_tasks: deprecations.yml -- name: Abort when conflicting deployment type variables are set - when: - - deployment_type is defined - - openshift_deployment_type is defined - - openshift_deployment_type != deployment_type - fail: - msg: |- - openshift_deployment_type is set to "{{ openshift_deployment_type }}". - deployment_type is set to "{{ deployment_type }}". - To avoid unexpected results, this conflict is not allowed. - deployment_type is deprecated in favor of openshift_deployment_type. - Please specify only openshift_deployment_type, or make both the same. - - name: Standardize on latest variable names set_fact: - # goal is to deprecate deployment_type in favor of openshift_deployment_type. - # both will be accepted for now, but code should refer to the new name. - # TODO: once this is well-documented, add deprecation notice if using old name. - deployment_type: "{{ openshift_deployment_type | default(deployment_type) | default | string }}" - openshift_deployment_type: "{{ openshift_deployment_type | default(deployment_type) | default | string }}" deployment_subtype: "{{ openshift_deployment_subtype | default(deployment_subtype) | default('basic') | string }}" openshift_deployment_subtype: "{{ openshift_deployment_subtype | default(deployment_subtype) | default('basic') | string }}" -- name: Abort when deployment type is invalid - # this variable is required; complain early and clearly if it is invalid. - when: openshift_deployment_type not in known_openshift_deployment_types - fail: - msg: |- - Please set openshift_deployment_type to one of: - {{ known_openshift_deployment_types | join(', ') }} - - name: Normalize openshift_release set_fact: # Normalize release if provided, e.g. "v3.5" => "3.5" diff --git a/roles/openshift_sanitize_inventory/vars/main.yml b/roles/openshift_sanitize_inventory/vars/main.yml index 0fc2372d2..df15948d2 100644 --- a/roles/openshift_sanitize_inventory/vars/main.yml +++ b/roles/openshift_sanitize_inventory/vars/main.yml @@ -1,7 +1,4 @@ --- -# origin uses community packages named 'origin' -# openshift-enterprise uses Red Hat packages named 'atomic-openshift' -known_openshift_deployment_types: ['origin', 'openshift-enterprise'] __deprecation_header: "[DEPRECATION WARNING]:" diff --git a/roles/openshift_service_catalog/tasks/install.yml b/roles/openshift_service_catalog/tasks/install.yml index 452d869f6..cfecaa12c 100644 --- a/roles/openshift_service_catalog/tasks/install.yml +++ b/roles/openshift_service_catalog/tasks/install.yml @@ -6,10 +6,10 @@ register: mktemp changed_when: False -- name: Set default image variables based on deployment_type +- name: Set default image variables based on openshift_deployment_type include_vars: "{{ item }}" with_first_found: - - "{{ openshift_deployment_type | default(deployment_type) }}.yml" + - "{{ openshift_deployment_type }}.yml" - "default_images.yml" - name: Set service_catalog image facts diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml index da34fab2a..4cbe262d2 100644 --- a/roles/openshift_storage_glusterfs/defaults/main.yml +++ b/roles/openshift_storage_glusterfs/defaults/main.yml @@ -6,16 +6,16 @@ openshift_storage_glusterfs_nodeselector: "glusterfs={{ openshift_storage_gluste openshift_storage_glusterfs_use_default_selector: False openshift_storage_glusterfs_storageclass: True openshift_storage_glusterfs_storageclass_default: False -openshift_storage_glusterfs_image: "{{ 'rhgs3/rhgs-server-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/gluster-centos' | quote }}" +openshift_storage_glusterfs_image: "{{ 'rhgs3/rhgs-server-rhel7' | quote if openshift_deployment_type == 'openshift-enterprise' else 'gluster/gluster-centos' | quote }}" openshift_storage_glusterfs_version: 'latest' openshift_storage_glusterfs_block_deploy: True -openshift_storage_glusterfs_block_image: "{{ 'rhgs3/rhgs-gluster-block-prov-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/glusterblock-provisioner' | quote }}" +openshift_storage_glusterfs_block_image: "{{ 'rhgs3/rhgs-gluster-block-prov-rhel7' | quote if openshift_deployment_type == 'openshift-enterprise' else 'gluster/glusterblock-provisioner' | quote }}" openshift_storage_glusterfs_block_version: 'latest' openshift_storage_glusterfs_block_host_vol_create: True openshift_storage_glusterfs_block_host_vol_size: 100 openshift_storage_glusterfs_block_host_vol_max: 15 openshift_storage_glusterfs_s3_deploy: True -openshift_storage_glusterfs_s3_image: "{{ 'rhgs3/rhgs-gluster-s3-server-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/gluster-object' | quote }}" +openshift_storage_glusterfs_s3_image: "{{ 'rhgs3/rhgs-gluster-s3-server-rhel7' | quote if openshift_deployment_type == 'openshift-enterprise' else 'gluster/gluster-object' | quote }}" openshift_storage_glusterfs_s3_version: 'latest' openshift_storage_glusterfs_s3_account: "{{ omit }}" openshift_storage_glusterfs_s3_user: "{{ omit }}" @@ -29,7 +29,7 @@ openshift_storage_glusterfs_heketi_is_native: "{{ openshift_storage_glusterfs_is openshift_storage_glusterfs_heketi_is_missing: True openshift_storage_glusterfs_heketi_deploy_is_missing: True openshift_storage_glusterfs_heketi_cli: 'heketi-cli' -openshift_storage_glusterfs_heketi_image: "{{ 'rhgs3/rhgs-volmanager-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'heketi/heketi' | quote }}" +openshift_storage_glusterfs_heketi_image: "{{ 'rhgs3/rhgs-volmanager-rhel7' | quote if openshift_deployment_type == 'openshift-enterprise' else 'heketi/heketi' | quote }}" openshift_storage_glusterfs_heketi_version: 'latest' openshift_storage_glusterfs_heketi_admin_key: "{{ omit }}" openshift_storage_glusterfs_heketi_user_key: "{{ omit }}" diff --git a/roles/template_service_broker/tasks/install.yml b/roles/template_service_broker/tasks/install.yml index 2fc9779d6..765263db5 100644 --- a/roles/template_service_broker/tasks/install.yml +++ b/roles/template_service_broker/tasks/install.yml @@ -1,9 +1,9 @@ --- # Fact setting -- name: Set default image variables based on deployment type +- name: Set default image variables based on openshift_deployment_type include_vars: "{{ item }}" with_first_found: - - "{{ openshift_deployment_type | default(deployment_type) }}.yml" + - "{{ openshift_deployment_type }}.yml" - "default_images.yml" - name: set template_service_broker facts diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_availability_missing_required.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_availability_missing_required.yml index 006a71bd9..451ac0972 100644 --- a/test/integration/openshift_health_checker/preflight/playbooks/package_availability_missing_required.yml +++ b/test/integration/openshift_health_checker/preflight/playbooks/package_availability_missing_required.yml @@ -4,7 +4,7 @@ vars: image: preflight-aos-package-checks l_host_vars: - deployment_type: openshift-enterprise + openshift_deployment_type: openshift-enterprise - name: Fail as required packages cannot be installed hosts: all diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_availability_succeeds.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_availability_succeeds.yml index b4f18e3b5..e37487f13 100644 --- a/test/integration/openshift_health_checker/preflight/playbooks/package_availability_succeeds.yml +++ b/test/integration/openshift_health_checker/preflight/playbooks/package_availability_succeeds.yml @@ -3,7 +3,7 @@ vars: image: preflight-aos-package-checks l_host_vars: - deployment_type: origin + openshift_deployment_type: origin - name: Succeeds as Origin packages are public hosts: all diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_version_matches.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_version_matches.yml index 4e2b8a50c..9c845e1e5 100644 --- a/test/integration/openshift_health_checker/preflight/playbooks/package_version_matches.yml +++ b/test/integration/openshift_health_checker/preflight/playbooks/package_version_matches.yml @@ -3,7 +3,7 @@ vars: image: preflight-aos-package-checks l_host_vars: - deployment_type: openshift-enterprise + openshift_deployment_type: openshift-enterprise openshift_release: 3.2 - name: Success when AOS version matches openshift_release diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_version_mismatches.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_version_mismatches.yml index e1f8d74e6..9ae811939 100644 --- a/test/integration/openshift_health_checker/preflight/playbooks/package_version_mismatches.yml +++ b/test/integration/openshift_health_checker/preflight/playbooks/package_version_mismatches.yml @@ -4,7 +4,7 @@ vars: image: preflight-aos-package-checks l_host_vars: - deployment_type: openshift-enterprise + openshift_deployment_type: openshift-enterprise openshift_release: 3.2 - name: Failure when AOS version doesn't match openshift_release -- cgit v1.2.3 From 6e7b7448e0b9d88d649b3ae70c1e91b06ad7d97e Mon Sep 17 00:00:00 2001 From: Noriko Hosoi Date: Mon, 18 Dec 2017 13:45:33 -0800 Subject: Bug 1527178 - installation of logging stack failed: Invalid version specified for Elasticsearch openshift_logging_{curator,elasicsearch,fluentd,kibana,mux}/vars/main.yml: - adding "3_8" to __allowed_.*_versions - replacing the value of __latest_.*_version "3_6" with "3_8". --- roles/openshift_logging_curator/vars/main.yml | 4 ++-- roles/openshift_logging_elasticsearch/vars/main.yml | 4 ++-- roles/openshift_logging_fluentd/vars/main.yml | 4 ++-- roles/openshift_logging_kibana/vars/main.yml | 4 ++-- roles/openshift_logging_mux/vars/main.yml | 4 ++-- 5 files changed, 10 insertions(+), 10 deletions(-) (limited to 'roles/openshift_logging_fluentd') diff --git a/roles/openshift_logging_curator/vars/main.yml b/roles/openshift_logging_curator/vars/main.yml index 95bf462d1..5bee58725 100644 --- a/roles/openshift_logging_curator/vars/main.yml +++ b/roles/openshift_logging_curator/vars/main.yml @@ -1,3 +1,3 @@ --- -__latest_curator_version: "3_6" -__allowed_curator_versions: ["3_5", "3_6", "3_7"] +__latest_curator_version: "3_8" +__allowed_curator_versions: ["3_5", "3_6", "3_7", "3_8"] diff --git a/roles/openshift_logging_elasticsearch/vars/main.yml b/roles/openshift_logging_elasticsearch/vars/main.yml index c8e995146..0e56a6eac 100644 --- a/roles/openshift_logging_elasticsearch/vars/main.yml +++ b/roles/openshift_logging_elasticsearch/vars/main.yml @@ -1,6 +1,6 @@ --- -__latest_es_version: "3_6" -__allowed_es_versions: ["3_5", "3_6", "3_7"] +__latest_es_version: "3_8" +__allowed_es_versions: ["3_5", "3_6", "3_7", "3_8"] __allowed_es_types: ["data-master", "data-client", "master", "client"] __es_log_appenders: ['file', 'console'] __kibana_index_modes: ["unique", "shared_ops"] diff --git a/roles/openshift_logging_fluentd/vars/main.yml b/roles/openshift_logging_fluentd/vars/main.yml index 92a426952..762e3d4d0 100644 --- a/roles/openshift_logging_fluentd/vars/main.yml +++ b/roles/openshift_logging_fluentd/vars/main.yml @@ -1,5 +1,5 @@ --- -__latest_fluentd_version: "3_6" -__allowed_fluentd_versions: ["3_5", "3_6", "3_7"] +__latest_fluentd_version: "3_8" +__allowed_fluentd_versions: ["3_5", "3_6", "3_7", "3_8"] __allowed_fluentd_types: ["hosted", "secure-aggregator", "secure-host"] __allowed_mux_client_modes: ["minimal", "maximal"] diff --git a/roles/openshift_logging_kibana/vars/main.yml b/roles/openshift_logging_kibana/vars/main.yml index 241877a02..a2c54d8e4 100644 --- a/roles/openshift_logging_kibana/vars/main.yml +++ b/roles/openshift_logging_kibana/vars/main.yml @@ -1,3 +1,3 @@ --- -__latest_kibana_version: "3_6" -__allowed_kibana_versions: ["3_5", "3_6", "3_7"] +__latest_kibana_version: "3_8" +__allowed_kibana_versions: ["3_5", "3_6", "3_7", "3_8"] diff --git a/roles/openshift_logging_mux/vars/main.yml b/roles/openshift_logging_mux/vars/main.yml index e7b57f4b5..1da053b4a 100644 --- a/roles/openshift_logging_mux/vars/main.yml +++ b/roles/openshift_logging_mux/vars/main.yml @@ -1,3 +1,3 @@ --- -__latest_mux_version: "3_6" -__allowed_mux_versions: ["3_5", "3_6", "3_7"] +__latest_mux_version: "3_8" +__allowed_mux_versions: ["3_5", "3_6", "3_7", "3_8"] -- cgit v1.2.3 From 8cb27ae800df71ee816852df56cd2c861a0f0a0a Mon Sep 17 00:00:00 2001 From: Eric Wolinetz Date: Wed, 25 Oct 2017 20:45:34 -0500 Subject: Updating logging_facts to be able to pull values from config maps yaml files, use diffs to keep custom changes, white list certain settings when creating diffs --- .../lib_openshift/library/conditional_set_fact.py | 74 ++++++++++++++ .../filter_plugins/openshift_logging.py | 25 ++++- roles/openshift_logging/library/logging_patch.py | 112 +++++++++++++++++++++ .../library/openshift_logging_facts.py | 13 ++- roles/openshift_logging/tasks/install_logging.yaml | 3 + .../tasks/patch_configmap_file.yaml | 35 +++++++ .../tasks/patch_configmap_files.yaml | 31 ++++++ .../tasks/set_defaults_from_current.yml | 34 +++++++ roles/openshift_logging_curator/tasks/main.yaml | 15 +-- .../tasks/main.yaml | 32 +++--- roles/openshift_logging_fluentd/tasks/main.yaml | 36 +++---- roles/openshift_logging_mux/tasks/main.yaml | 24 ++--- .../library/conditional_set_fact.py | 68 ------------- roles/openshift_sanitize_inventory/meta/main.yml | 1 + 14 files changed, 374 insertions(+), 129 deletions(-) create mode 100644 roles/lib_openshift/library/conditional_set_fact.py create mode 100644 roles/openshift_logging/library/logging_patch.py create mode 100644 roles/openshift_logging/tasks/patch_configmap_file.yaml create mode 100644 roles/openshift_logging/tasks/patch_configmap_files.yaml create mode 100644 roles/openshift_logging/tasks/set_defaults_from_current.yml delete mode 100644 roles/openshift_sanitize_inventory/library/conditional_set_fact.py (limited to 'roles/openshift_logging_fluentd') diff --git a/roles/lib_openshift/library/conditional_set_fact.py b/roles/lib_openshift/library/conditional_set_fact.py new file mode 100644 index 000000000..363399f33 --- /dev/null +++ b/roles/lib_openshift/library/conditional_set_fact.py @@ -0,0 +1,74 @@ +#!/usr/bin/python + +""" Ansible module to help with setting facts conditionally based on other facts """ + +from ansible.module_utils.basic import AnsibleModule + + +DOCUMENTATION = ''' +--- +module: conditional_set_fact + +short_description: This will set a fact if the value is defined + +description: + - "To avoid constant set_fact & when conditions for each var we can use this" + +author: + - Eric Wolinetz ewolinet@redhat.com +''' + + +EXAMPLES = ''' +- name: Conditionally set fact + conditional_set_fact: + fact1: not_defined_variable + +- name: Conditionally set fact + conditional_set_fact: + fact1: not_defined_variable + fact2: defined_variable + +- name: Conditionally set fact falling back on default + conditional_set_fact: + fact1: not_defined_var | defined_variable + +''' + + +def run_module(): + """ The body of the module, we check if the variable name specified as the value + for the key is defined. If it is then we use that value as for the original key """ + + module = AnsibleModule( + argument_spec=dict( + facts=dict(type='dict', required=True), + vars=dict(required=False, type='dict', default=[]) + ), + supports_check_mode=True + ) + + local_facts = dict() + is_changed = False + + for param in module.params['vars']: + other_vars = module.params['vars'][param].replace(" ", "") + + for other_var in other_vars.split('|'): + if other_var in module.params['facts']: + local_facts[param] = module.params['facts'][other_var] + if not is_changed: + is_changed = True + break + + return module.exit_json(changed=is_changed, # noqa: F405 + ansible_facts=local_facts) + + +def main(): + """ main """ + run_module() + + +if __name__ == '__main__': + main() diff --git a/roles/openshift_logging/filter_plugins/openshift_logging.py b/roles/openshift_logging/filter_plugins/openshift_logging.py index e1a5ea726..ba412b5a6 100644 --- a/roles/openshift_logging/filter_plugins/openshift_logging.py +++ b/roles/openshift_logging/filter_plugins/openshift_logging.py @@ -102,6 +102,28 @@ def serviceaccount_namespace(qualified_sa, default=None): return seg[-1] +def flatten_dict(data, parent_key=None): + """ This filter plugin will flatten a dict and its sublists into a single dict + """ + if not isinstance(data, dict): + raise RuntimeError("flatten_dict failed, expects to flatten a dict") + + merged = dict() + + for key in data: + if parent_key is not None: + insert_key = '.'.join((parent_key, key)) + else: + insert_key = key + + if isinstance(data[key], dict): + merged.update(flatten_dict(data[key], insert_key)) + else: + merged[insert_key] = data[key] + + return merged + + # pylint: disable=too-few-public-methods class FilterModule(object): ''' OpenShift Logging Filters ''' @@ -117,5 +139,6 @@ class FilterModule(object): 'es_storage': es_storage, 'serviceaccount_name': serviceaccount_name, 'serviceaccount_namespace': serviceaccount_namespace, - 'walk': walk + 'walk': walk, + "flatten_dict": flatten_dict } diff --git a/roles/openshift_logging/library/logging_patch.py b/roles/openshift_logging/library/logging_patch.py new file mode 100644 index 000000000..d2c0bc456 --- /dev/null +++ b/roles/openshift_logging/library/logging_patch.py @@ -0,0 +1,112 @@ +#!/usr/bin/python + +""" Ansible module to help with creating context patch file with whitelisting for logging """ + +import difflib +import re + +from ansible.module_utils.basic import AnsibleModule + + +DOCUMENTATION = ''' +--- +module: logging_patch + +short_description: This will create a context patch file while giving ability + to whitelist some lines (excluding them from comparison) + +description: + - "To create configmap patches for logging" + +author: + - Eric Wolinetz ewolinet@redhat.com +''' + + +EXAMPLES = ''' +- logging_patch: + original_file: "{{ tempdir }}/current.yml" + new_file: "{{ configmap_new_file }}" + whitelist: "{{ configmap_protected_lines | default([]) }}" + +''' + + +def account_for_whitelist(file_contents, white_list=None): + """ This method will remove lines that contain whitelist values from the content + of the file so that we aren't build a patch based on that line + + Usage: + + for file_contents: + + index: + number_of_shards: {{ es_number_of_shards | default ('1') }} + number_of_replicas: {{ es_number_of_replicas | default ('0') }} + unassigned.node_left.delayed_timeout: 2m + translog: + flush_threshold_size: 256mb + flush_threshold_period: 5m + + + and white_list: + + ['number_of_shards', 'number_of_replicas'] + + + We would end up with: + + index: + unassigned.node_left.delayed_timeout: 2m + translog: + flush_threshold_size: 256mb + flush_threshold_period: 5m + + """ + + for line in white_list: + file_contents = re.sub(r".*%s:.*\n" % line, "", file_contents) + + return file_contents + + +def run_module(): + """ The body of the module, we check if the variable name specified as the value + for the key is defined. If it is then we use that value as for the original key """ + + module = AnsibleModule( + argument_spec=dict( + original_file=dict(type='str', required=True), + new_file=dict(type='str', required=True), + whitelist=dict(required=False, type='list', default=[]) + ), + supports_check_mode=True + ) + + original_fh = open(module.params['original_file'], "r") + original_contents = original_fh.read() + original_fh.close() + + original_contents = account_for_whitelist(original_contents, module.params['whitelist']) + + new_fh = open(module.params['new_file'], "r") + new_contents = new_fh.read() + new_fh.close() + + new_contents = account_for_whitelist(new_contents, module.params['whitelist']) + + uni_diff = difflib.unified_diff(new_contents.splitlines(), + original_contents.splitlines(), + lineterm='') + + return module.exit_json(changed=False, # noqa: F405 + raw_patch="\n".join(uni_diff)) + + +def main(): + """ main """ + run_module() + + +if __name__ == '__main__': + main() diff --git a/roles/openshift_logging/library/openshift_logging_facts.py b/roles/openshift_logging/library/openshift_logging_facts.py index 98d0d1c4f..302a9b4c9 100644 --- a/roles/openshift_logging/library/openshift_logging_facts.py +++ b/roles/openshift_logging/library/openshift_logging_facts.py @@ -204,6 +204,14 @@ class OpenshiftLoggingFacts(OCBaseCommand): if comp is not None: self.add_facts_for(comp, "services", name, dict()) + # pylint: disable=too-many-arguments + def facts_from_configmap(self, comp, kind, name, config_key, yaml_file=None): + '''Extracts facts in logging namespace from configmap''' + if yaml_file is not None: + config_facts = yaml.load(yaml_file) + self.facts[comp][kind][name][config_key] = config_facts + self.facts[comp][kind][name]["raw"] = yaml_file + def facts_for_configmaps(self, namespace): ''' Gathers facts for configmaps in logging namespace ''' self.default_keys_for("configmaps") @@ -214,7 +222,10 @@ class OpenshiftLoggingFacts(OCBaseCommand): name = item["metadata"]["name"] comp = self.comp(name) if comp is not None: - self.add_facts_for(comp, "configmaps", name, item["data"]) + self.add_facts_for(comp, "configmaps", name, dict(item["data"])) + if comp in ["elasticsearch", "elasticsearch_ops"]: + for config_key in item["data"]: + self.facts_from_configmap(comp, "configmaps", name, config_key, item["data"][config_key]) def facts_for_oauthclients(self, namespace): ''' Gathers facts for oauthclients used with logging ''' diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml index 11f59652c..913478027 100644 --- a/roles/openshift_logging/tasks/install_logging.yaml +++ b/roles/openshift_logging/tasks/install_logging.yaml @@ -4,6 +4,9 @@ oc_bin: "{{openshift_client_binary}}" openshift_logging_namespace: "{{openshift_logging_namespace}}" +## This is include vs import because we need access to group/inventory variables +- include_tasks: set_defaults_from_current.yml + - name: Set logging project oc_project: state: present diff --git a/roles/openshift_logging/tasks/patch_configmap_file.yaml b/roles/openshift_logging/tasks/patch_configmap_file.yaml new file mode 100644 index 000000000..30087fe6a --- /dev/null +++ b/roles/openshift_logging/tasks/patch_configmap_file.yaml @@ -0,0 +1,35 @@ +--- +## The purpose of this task file is to get a patch that is based on the diff +## between configmap_current_file and configmap_new_file. The module +## logging_patch takes the paths of two files to compare and also a list of +## variables whose line we exclude from the diffs. +## We then patch the new configmap file so that we can build a configmap +## using that file later. We then use oc apply to idempotenly modify any +## existing configmap. + +## The following variables are expected to be provided when including this task: +# __configmap_output -- This is provided to us from patch_configmap_files.yaml +# it is a dict of the configmap where configmap_current_file exists +# configmap_current_file -- The name of the data file in the __configmap_output +# configmap_new_file -- The path to the file that we intend to oc apply later +# we apply our generated patch to this file. +# configmap_protected_lines -- The list of variables to exclude from the diff + +- copy: + content: "{{ __configmap_output.results.results[0]['data'][configmap_current_file] }}" + dest: "{{ tempdir }}/current.yml" + +- logging_patch: + original_file: "{{ tempdir }}/current.yml" + new_file: "{{ configmap_new_file }}" + whitelist: "{{ configmap_protected_lines | default([]) }}" + register: patch_output + +- copy: + content: "{{ patch_output.raw_patch }}\n" + dest: "{{ tempdir }}/patch.patch" + when: patch_output.raw_patch | length > 0 + +- command: > + patch --force --quiet -u "{{ configmap_new_file }}" "{{ tempdir }}/patch.patch" + when: patch_output.raw_patch | length > 0 diff --git a/roles/openshift_logging/tasks/patch_configmap_files.yaml b/roles/openshift_logging/tasks/patch_configmap_files.yaml new file mode 100644 index 000000000..74a9cc287 --- /dev/null +++ b/roles/openshift_logging/tasks/patch_configmap_files.yaml @@ -0,0 +1,31 @@ +--- +## The purpose of this task file is to take in a list of configmap files provided +## in the variable configmap_file_names, which correspond to the data sections +## within a configmap. We iterate over each of these files and create a patch +## from the diff between current_file and new_file to try to maintain any custom +## changes that a user may have made to a currently deployed configmap while +## trying to idempotently update with any role provided files. + +## The following variables are expected to be provided when including this task: +# configmap_name -- This is the name of the configmap that the files exist in +# configmap_namespace -- The namespace that the configmap lives in +# configmap_file_names -- This is expected to be passed in as a dict +# current_file -- The name of the data entry within the configmap +# new_file -- The file path to the file we are comparing to current_file +# protected_lines -- List of variables whose line will be excluded when creating a diff + +- oc_configmap: + name: "{{ configmap_name }}" + state: list + namespace: "{{ configmap_namespace }}" + register: __configmap_output + +- when: __configmap_output.results.stderr is undefined + include_tasks: patch_configmap_file.yaml + vars: + configmap_current_file: "{{ configmap_files.current_file }}" + configmap_new_file: "{{ configmap_files.new_file }}" + configmap_protected_lines: "{{ configmap_files.protected_lines | default([]) }}" + with_items: "{{ configmap_file_names }}" + loop_control: + loop_var: configmap_files diff --git a/roles/openshift_logging/tasks/set_defaults_from_current.yml b/roles/openshift_logging/tasks/set_defaults_from_current.yml new file mode 100644 index 000000000..dde362abe --- /dev/null +++ b/roles/openshift_logging/tasks/set_defaults_from_current.yml @@ -0,0 +1,34 @@ +--- + +## We are pulling default values from configmaps if they exist already +## Using conditional_set_fact allows us to set the value of a variable based on +## the value of another one, if it is already defined. Else we don't set the +## left hand side (it stays undefined as well). + +## conditional_set_fact allows us to specify a fact source, so first we try to +## set variables in the logging-elasticsearch & logging-elasticsearch-ops configmaps +## afterwards we set the value of the variable based on the value in the inventory +## but fall back to using the value from a configmap as a default. If neither is set +## then the variable remains undefined and the role default will be used. + +- conditional_set_fact: + facts: "{{ openshift_logging_facts['elasticsearch']['configmaps']['logging-elasticsearch']['elasticsearch.yml'] | flatten_dict }}" + vars: + __openshift_logging_es_number_of_shards: index.number_of_shards + __openshift_logging_es_number_of_replicas: index.number_of_replicas + when: openshift_logging_facts['elasticsearch']['configmaps']['logging-elasticsearch'] is defined + +- conditional_set_fact: + facts: "{{ openshift_logging_facts['elasticsearch_ops']['configmaps']['logging-elasticsearch-ops']['elasticsearch.yml'] | flatten_dict }}" + vars: + __openshift_logging_es_ops_number_of_shards: index.number_of_shards + __openshift_logging_es_ops_number_of_replicas: index.number_of_replicas + when: openshift_logging_facts['elasticsearch_ops']['configmaps']['logging-elasticsearch-ops'] is defined + +- conditional_set_fact: + facts: "{{ hostvars[inventory_hostname] }}" + vars: + openshift_logging_es_number_of_shards: openshift_logging_es_number_of_shards | __openshift_logging_es_number_of_shards + openshift_logging_es_number_of_replicas: openshift_logging_es_number_of_replicas | __openshift_logging_es_number_of_replicas + openshift_logging_es_ops_number_of_shards: openshift_logging_es_ops_number_of_shards | __openshift_logging_es_ops_number_of_shards + openshift_logging_es_ops_number_of_replicas: openshift_logging_es_ops_number_of_replicas | __openshift_logging_es_ops_number_of_replicas diff --git a/roles/openshift_logging_curator/tasks/main.yaml b/roles/openshift_logging_curator/tasks/main.yaml index 524e239b7..53b464113 100644 --- a/roles/openshift_logging_curator/tasks/main.yaml +++ b/roles/openshift_logging_curator/tasks/main.yaml @@ -54,14 +54,17 @@ - copy: src: curator.yml dest: "{{ tempdir }}/curator.yml" - when: curator_config_contents is undefined changed_when: no -- copy: - content: "{{ curator_config_contents }}" - dest: "{{ tempdir }}/curator.yml" - when: curator_config_contents is defined - changed_when: no +- include_role: + name: openshift_logging + tasks_from: patch_configmap_files.yaml + vars: + configmap_name: "logging-curator" + configmap_namespace: "logging" + configmap_file_names: + - current_file: "config.yaml" + new_file: "{{ tempdir }}/curator.yml" - name: Set Curator configmap oc_configmap: diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml index 6ddeb122e..9e7646379 100644 --- a/roles/openshift_logging_elasticsearch/tasks/main.yaml +++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml @@ -168,33 +168,31 @@ when: es_logging_contents is undefined changed_when: no -- set_fact: - __es_num_of_shards: "{{ _es_configmap | default({}) | walk('index.number_of_shards', '1') }}" - __es_num_of_replicas: "{{ _es_configmap | default({}) | walk('index.number_of_replicas', '0') }}" - - template: src: elasticsearch.yml.j2 dest: "{{ tempdir }}/elasticsearch.yml" vars: allow_cluster_reader: "{{ openshift_logging_elasticsearch_ops_allow_cluster_reader | lower | default('false') }}" - es_number_of_shards: "{{ openshift_logging_es_number_of_shards | default(None) or __es_num_of_shards }}" - es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas | default(None) or __es_num_of_replicas }}" + es_number_of_shards: "{{ openshift_logging_es_number_of_shards | default(1) }}" + es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas| default(0) }}" es_kibana_index_mode: "{{ openshift_logging_elasticsearch_kibana_index_mode | default('unique') }}" when: es_config_contents is undefined changed_when: no -- copy: - content: "{{ es_logging_contents }}" - dest: "{{ tempdir }}/elasticsearch-logging.yml" - when: es_logging_contents is defined - changed_when: no - -- copy: - content: "{{ es_config_contents }}" - dest: "{{ tempdir }}/elasticsearch.yml" - when: es_config_contents is defined - changed_when: no +# create diff between current configmap files and our current files +- include_role: + name: openshift_logging + tasks_from: patch_configmap_files.yaml + vars: + configmap_name: "logging-elasticsearch" + configmap_namespace: "logging" + configmap_file_names: + - current_file: "elasticsearch.yml" + new_file: "{{ tempdir }}/elasticsearch.yml" + protected_lines: ["number_of_shards", "number_of_replicas"] + - current_file: "logging.yml" + new_file: "{{ tempdir }}/elasticsearch-logging.yml" - name: Set ES configmap oc_configmap: diff --git a/roles/openshift_logging_fluentd/tasks/main.yaml b/roles/openshift_logging_fluentd/tasks/main.yaml index 08d7561ac..486cfb8bc 100644 --- a/roles/openshift_logging_fluentd/tasks/main.yaml +++ b/roles/openshift_logging_fluentd/tasks/main.yaml @@ -108,38 +108,28 @@ dest: "{{ tempdir }}/fluent.conf" vars: deploy_type: "{{ openshift_logging_fluentd_deployment_type }}" - when: fluentd_config_contents is undefined - changed_when: no - copy: src: fluentd-throttle-config.yaml dest: "{{ tempdir }}/fluentd-throttle-config.yaml" - when: fluentd_throttle_contents is undefined - changed_when: no - copy: src: secure-forward.conf dest: "{{ tempdir }}/secure-forward.conf" - when: fluentd_secureforward_contents is undefined - changed_when: no - -- copy: - content: "{{ fluentd_config_contents }}" - dest: "{{ tempdir }}/fluent.conf" - when: fluentd_config_contents is defined - changed_when: no -- copy: - content: "{{ fluentd_throttle_contents }}" - dest: "{{ tempdir }}/fluentd-throttle-config.yaml" - when: fluentd_throttle_contents is defined - changed_when: no - -- copy: - content: "{{ fluentd_secureforward_contents }}" - dest: "{{ tempdir }}/secure-forward.conf" - when: fluentd_secureforward_contents is defined - changed_when: no +- include_role: + name: openshift_logging + tasks_from: patch_configmap_files.yaml + vars: + configmap_name: "logging-fluentd" + configmap_namespace: "logging" + configmap_file_names: + - current_file: "fluent.conf" + new_file: "{{ tempdir }}/fluent.conf" + - current_file: "throttle-config.yaml" + new_file: "{{ tempdir }}/fluentd-throttle-config.yaml" + - current_file: "secure-forward.conf" + new_file: "{{ tempdir }}/secure-forward.conf" - name: Set Fluentd configmap oc_configmap: diff --git a/roles/openshift_logging_mux/tasks/main.yaml b/roles/openshift_logging_mux/tasks/main.yaml index 59a6301d7..a281c6a53 100644 --- a/roles/openshift_logging_mux/tasks/main.yaml +++ b/roles/openshift_logging_mux/tasks/main.yaml @@ -88,26 +88,24 @@ - copy: src: fluent.conf dest: "{{mktemp.stdout}}/fluent-mux.conf" - when: fluentd_mux_config_contents is undefined changed_when: no - copy: src: secure-forward.conf dest: "{{mktemp.stdout}}/secure-forward-mux.conf" - when: fluentd_mux_securefoward_contents is undefined changed_when: no -- copy: - content: "{{fluentd_mux_config_contents}}" - dest: "{{mktemp.stdout}}/fluent-mux.conf" - when: fluentd_mux_config_contents is defined - changed_when: no - -- copy: - content: "{{fluentd_mux_secureforward_contents}}" - dest: "{{mktemp.stdout}}/secure-forward-mux.conf" - when: fluentd_mux_secureforward_contents is defined - changed_when: no +- include_role: + name: openshift_logging + tasks_from: patch_configmap_files.yaml + vars: + configmap_name: "logging-mux" + configmap_namespace: "{{ openshift_logging_mux_namespace }}" + configmap_file_names: + - current_file: "fluent.conf" + new_file: "{{ tempdir }}/fluent-mux.conf" + - current_file: "secure-forward.conf" + new_file: "{{ tempdir }}/secure-forward-mux.conf" - name: Set Mux configmap oc_configmap: diff --git a/roles/openshift_sanitize_inventory/library/conditional_set_fact.py b/roles/openshift_sanitize_inventory/library/conditional_set_fact.py deleted file mode 100644 index f61801714..000000000 --- a/roles/openshift_sanitize_inventory/library/conditional_set_fact.py +++ /dev/null @@ -1,68 +0,0 @@ -#!/usr/bin/python - -""" Ansible module to help with setting facts conditionally based on other facts """ - -from ansible.module_utils.basic import AnsibleModule - - -DOCUMENTATION = ''' ---- -module: conditional_set_fact - -short_description: This will set a fact if the value is defined - -description: - - "To avoid constant set_fact & when conditions for each var we can use this" - -author: - - Eric Wolinetz ewolinet@redhat.com -''' - - -EXAMPLES = ''' -- name: Conditionally set fact - conditional_set_fact: - fact1: not_defined_variable - -- name: Conditionally set fact - conditional_set_fact: - fact1: not_defined_variable - fact2: defined_variable - -''' - - -def run_module(): - """ The body of the module, we check if the variable name specified as the value - for the key is defined. If it is then we use that value as for the original key """ - - module = AnsibleModule( - argument_spec=dict( - facts=dict(type='dict', required=True), - vars=dict(required=False, type='dict', default=[]) - ), - supports_check_mode=True - ) - - local_facts = dict() - is_changed = False - - for param in module.params['vars']: - other_var = module.params['vars'][param] - - if other_var in module.params['facts']: - local_facts[param] = module.params['facts'][other_var] - if not is_changed: - is_changed = True - - return module.exit_json(changed=is_changed, # noqa: F405 - ansible_facts=local_facts) - - -def main(): - """ main """ - run_module() - - -if __name__ == '__main__': - main() diff --git a/roles/openshift_sanitize_inventory/meta/main.yml b/roles/openshift_sanitize_inventory/meta/main.yml index 324ba06d8..cde3eccb6 100644 --- a/roles/openshift_sanitize_inventory/meta/main.yml +++ b/roles/openshift_sanitize_inventory/meta/main.yml @@ -14,3 +14,4 @@ galaxy_info: - system dependencies: - role: lib_utils +- role: lib_openshift -- cgit v1.2.3 From eacc12897ca86a255f89b8a4537ce2b7004cf319 Mon Sep 17 00:00:00 2001 From: Scott Dodson Date: Fri, 5 Jan 2018 12:44:56 -0500 Subject: Migrate to import_role for static role inclusion In Ansible 2.2, the include_role directive came into existence as a Tech Preview. It is still a Tech Preview through Ansible 2.4 (and in current devel branch), but with a noteable change. The default behavior switched from static: true to static: false because that functionality moved to the newly introduced import_role directive (in order to stay consistent with include* being dynamic in nature and `import* being static in nature). The dynamic include is considerably more memory intensive as it will dynamically create a role import for every host in the inventory list to be used. (Also worth noting, there is at the time of this writing an object allocation inefficiency in the dynamic include that can in certain situations amplify this effect considerably) This change is meant to mitigate the pressure on memory for the Ansible control host. We need to evaluate where it makes sense to dynamically include roles and revert back to dynamic inclusion if and where it makes sense to do so. --- docs/proposals/crt_management_proposal.md | 8 ++++---- docs/proposals/role_decomposition.md | 14 ++++++------- openshift-ansible.spec | 8 ++++---- playbooks/adhoc/openshift_hosted_logging_efk.yaml | 2 +- playbooks/aws/openshift-cluster/install.yml | 4 ++-- playbooks/aws/openshift-cluster/provision.yml | 2 +- .../aws/openshift-cluster/provision_instance.yml | 2 +- .../aws/openshift-cluster/provision_nodes.yml | 2 +- .../aws/openshift-cluster/provision_sec_group.yml | 2 +- .../openshift-cluster/provision_ssh_keypair.yml | 2 +- playbooks/aws/openshift-cluster/provision_vpc.yml | 2 +- playbooks/aws/openshift-cluster/seal_ami.yml | 2 +- .../upgrades/docker/docker_upgrade.yml | 2 +- .../openshift-cluster/upgrades/pre/config.yml | 2 +- .../upgrades/pre/verify_upgrade_targets.yml | 2 +- .../upgrades/upgrade_control_plane.yml | 6 +++--- .../openshift-cluster/upgrades/upgrade_nodes.yml | 6 +++--- .../upgrades/upgrade_scale_group.yml | 4 ++-- playbooks/container-runtime/private/config.yml | 6 +++--- .../container-runtime/private/setup_storage.yml | 2 +- playbooks/gcp/provision.yml | 2 +- playbooks/init/facts.yml | 2 +- playbooks/init/repos.yml | 4 ++-- playbooks/openshift-etcd/private/ca.yml | 2 +- .../openshift-etcd/private/certificates-backup.yml | 6 +++--- .../openshift-etcd/private/embedded2external.yml | 24 +++++++++++----------- playbooks/openshift-etcd/private/migrate.yml | 14 ++++++------- playbooks/openshift-etcd/private/redeploy-ca.yml | 8 ++++---- playbooks/openshift-etcd/private/restart.yml | 4 ++-- playbooks/openshift-etcd/private/scaleup.yml | 4 ++-- .../openshift-etcd/private/server_certificates.yml | 2 +- .../openshift-etcd/private/upgrade_backup.yml | 2 +- .../private/upgrade_image_members.yml | 2 +- playbooks/openshift-etcd/private/upgrade_main.yml | 2 +- .../openshift-etcd/private/upgrade_rpm_members.yml | 2 +- playbooks/openshift-etcd/private/upgrade_step.yml | 4 ++-- playbooks/openshift-glusterfs/private/config.yml | 10 ++++----- .../openshift-hosted/private/install_docker_gc.yml | 2 +- .../private/openshift_hosted_create_projects.yml | 2 +- .../private/openshift_hosted_registry.yml | 2 +- .../private/openshift_hosted_registry_storage.yml | 2 +- .../private/openshift_hosted_router.yml | 2 +- .../private/openshift_hosted_wait_for_pods.yml | 4 ++-- .../private/redeploy-router-certificates.yml | 2 +- playbooks/openshift-logging/private/config.yml | 2 +- .../add_many_container_providers.yml | 2 +- .../private/add_container_provider.yml | 2 +- playbooks/openshift-management/private/config.yml | 2 +- .../openshift-management/private/uninstall.yml | 2 +- playbooks/openshift-master/private/config.yml | 4 ++-- .../private/tasks/restart_services.yml | 2 +- playbooks/openshift-metrics/private/config.yml | 2 +- .../openshift-node/private/additional_config.yml | 2 +- playbooks/openshift-node/private/image_prep.yml | 2 +- .../openstack/openshift-cluster/prerequisites.yml | 4 ++-- .../openstack/openshift-cluster/provision.yml | 12 +++++------ roles/calico/tasks/main.yml | 2 +- roles/container_runtime/README.md | 4 ++-- roles/container_runtime/tasks/common/post.yml | 2 +- roles/container_runtime/tasks/main.yml | 2 +- .../callback_plugins/openshift_quick_installer.py | 4 ++-- roles/openshift_aws/README.md | 6 +++--- roles/openshift_cluster_autoscaler/README.md | 2 +- .../tasks/main.yml | 2 +- roles/openshift_hosted/tasks/main.yml | 4 ++-- roles/openshift_logging/tasks/delete_logging.yaml | 2 +- roles/openshift_logging/tasks/install_logging.yaml | 22 ++++++++++---------- roles/openshift_logging_curator/tasks/main.yaml | 2 +- .../tasks/main.yaml | 2 +- roles/openshift_logging_fluentd/tasks/main.yaml | 2 +- roles/openshift_logging_mux/tasks/main.yaml | 2 +- .../tasks/add_container_provider.yml | 2 +- roles/openshift_management/tasks/main.yml | 2 +- roles/openshift_management/tasks/storage/nfs.yml | 6 +++--- roles/openshift_nfs/tasks/create_export.yml | 2 +- roles/os_firewall/README.md | 4 ++-- 76 files changed, 152 insertions(+), 152 deletions(-) (limited to 'roles/openshift_logging_fluentd') diff --git a/docs/proposals/crt_management_proposal.md b/docs/proposals/crt_management_proposal.md index 5fc1ad08d..bf4048744 100644 --- a/docs/proposals/crt_management_proposal.md +++ b/docs/proposals/crt_management_proposal.md @@ -30,7 +30,7 @@ configure, restart, or change the container runtime as much as feasible. ## Design The container_runtime role should be comprised of 3 'pseudo-roles' which will be -consumed using include_role; each component area should be enabled/disabled with +consumed using import_role; each component area should be enabled/disabled with a boolean value, defaulting to true. I call them 'pseudo-roles' because they are more or less independent functional @@ -46,15 +46,15 @@ an abundance of roles), and make things as modular as possible. # container_runtime_setup.yml - hosts: "{{ openshift_runtime_manage_hosts | default('oo_nodes_to_config') }}" tasks: - - include_role: + - import_role: name: container_runtime tasks_from: install.yml when: openshift_container_runtime_install | default(True) | bool - - include_role: + - import_role: name: container_runtime tasks_from: storage.yml when: openshift_container_runtime_storage | default(True) | bool - - include_role: + - import_role: name: container_runtime tasks_from: configure.yml when: openshift_container_runtime_configure | default(True) | bool diff --git a/docs/proposals/role_decomposition.md b/docs/proposals/role_decomposition.md index 37d080d5c..61690e8bd 100644 --- a/docs/proposals/role_decomposition.md +++ b/docs/proposals/role_decomposition.md @@ -115,12 +115,12 @@ providing the location of the generated certificates to the individual roles. generated_certs_dir: "{{openshift.common.config_base}}/logging" ## Elasticsearch -- include_role: +- import_role: name: openshift_logging_elasticsearch vars: generated_certs_dir: "{{openshift.common.config_base}}/logging" -- include_role: +- import_role: name: openshift_logging_elasticsearch vars: generated_certs_dir: "{{openshift.common.config_base}}/logging" @@ -130,7 +130,7 @@ providing the location of the generated certificates to the individual roles. ## Kibana -- include_role: +- import_role: name: openshift_logging_kibana vars: generated_certs_dir: "{{openshift.common.config_base}}/logging" @@ -144,7 +144,7 @@ providing the location of the generated certificates to the individual roles. openshift_logging_kibana_es_port: "{{ openshift_logging_es_port }}" openshift_logging_kibana_image_pull_secret: "{{ openshift_logging_image_pull_secret }}" -- include_role: +- import_role: name: openshift_logging_kibana vars: generated_certs_dir: "{{openshift.common.config_base}}/logging" @@ -173,7 +173,7 @@ providing the location of the generated certificates to the individual roles. ## Curator -- include_role: +- import_role: name: openshift_logging_curator vars: generated_certs_dir: "{{openshift.common.config_base}}/logging" @@ -183,7 +183,7 @@ providing the location of the generated certificates to the individual roles. openshift_logging_curator_image_version: "{{ openshift_logging_image_version }}" openshift_logging_curator_image_pull_secret: "{{ openshift_logging_image_pull_secret }}" -- include_role: +- import_role: name: openshift_logging_curator vars: generated_certs_dir: "{{openshift.common.config_base}}/logging" @@ -201,7 +201,7 @@ providing the location of the generated certificates to the individual roles. ## Fluentd -- include_role: +- import_role: name: openshift_logging_fluentd vars: generated_certs_dir: "{{openshift.common.config_base}}/logging" diff --git a/openshift-ansible.spec b/openshift-ansible.spec index 80b5c23d0..06f5d3669 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -408,7 +408,7 @@ Atomic OpenShift Utilities includes - Update prometheus to 2.0.0 GA (zgalor@redhat.com) - remove schedulable from openshift_facts (mgugino@redhat.com) - inventory: Add example for service catalog vars (smilner@redhat.com) -- Correct usage of include_role (rteague@redhat.com) +- Correct usage of import_role (rteague@redhat.com) - Remove openshift.common.cli_image (mgugino@redhat.com) - Fix openshift_env fact creation within openshift_facts. (abutcher@redhat.com) - Combine openshift_node and openshift_node_dnsmasq (mgugino@redhat.com) @@ -1001,7 +1001,7 @@ Atomic OpenShift Utilities includes - Renaming csr to bootstrap for consistency. (kwoodson@redhat.com) - Add master config upgrade hook to upgrade-all plays (mgugino@redhat.com) - Remove 'Not Started' status from playbook checkpoint (rteague@redhat.com) -- Force include_role to static for loading openshift_facts module +- Force import_role to static for loading openshift_facts module (rteague@redhat.com) - Make openshift-ansible depend on all subpackages (sdodson@redhat.com) - Refactor health check playbooks (rteague@redhat.com) @@ -3729,9 +3729,9 @@ Atomic OpenShift Utilities includes - run node upgrade if master is node as part of the control plan upgrade only (jchaloup@redhat.com) - Appease yamllint (sdodson@redhat.com) -- Adding include_role to block to resolve when eval (ewolinet@redhat.com) +- Adding import_role to block to resolve when eval (ewolinet@redhat.com) - Updating oc_apply to use command instead of shell (ewolinet@redhat.com) -- Wrap openshift_hosted_logging include_role within a block. +- Wrap openshift_hosted_logging import_role within a block. (abutcher@redhat.com) - Adding unit test. Fixed redudant calls to get. (kwoodson@redhat.com) - Fixing doc and generating new label with updated base. (kwoodson@redhat.com) diff --git a/playbooks/adhoc/openshift_hosted_logging_efk.yaml b/playbooks/adhoc/openshift_hosted_logging_efk.yaml index 69b2541bb..faeb332ad 100644 --- a/playbooks/adhoc/openshift_hosted_logging_efk.yaml +++ b/playbooks/adhoc/openshift_hosted_logging_efk.yaml @@ -10,7 +10,7 @@ - set_fact: openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ openshift_master_default_subdomain }}" tasks: - - include_role: + - import_role: name: openshift_logging tasks_from: update_master_config when: openshift_hosted_logging_deploy | default(false) | bool diff --git a/playbooks/aws/openshift-cluster/install.yml b/playbooks/aws/openshift-cluster/install.yml index b03fb0b7f..a3fc82f9a 100644 --- a/playbooks/aws/openshift-cluster/install.yml +++ b/playbooks/aws/openshift-cluster/install.yml @@ -2,7 +2,7 @@ - name: Setup the master node group hosts: localhost tasks: - - include_role: + - import_role: name: openshift_aws tasks_from: setup_master_group.yml @@ -11,7 +11,7 @@ gather_facts: no remote_user: root tasks: - - include_role: + - import_role: name: openshift_aws tasks_from: master_facts.yml diff --git a/playbooks/aws/openshift-cluster/provision.yml b/playbooks/aws/openshift-cluster/provision.yml index 4b5bd22ea..7dde60b7d 100644 --- a/playbooks/aws/openshift-cluster/provision.yml +++ b/playbooks/aws/openshift-cluster/provision.yml @@ -12,6 +12,6 @@ msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}" - name: provision cluster - include_role: + import_role: name: openshift_aws tasks_from: provision.yml diff --git a/playbooks/aws/openshift-cluster/provision_instance.yml b/playbooks/aws/openshift-cluster/provision_instance.yml index 6e843453c..6c7c1f069 100644 --- a/playbooks/aws/openshift-cluster/provision_instance.yml +++ b/playbooks/aws/openshift-cluster/provision_instance.yml @@ -7,6 +7,6 @@ gather_facts: no tasks: - name: create an instance and prepare for ami - include_role: + import_role: name: openshift_aws tasks_from: provision_instance.yml diff --git a/playbooks/aws/openshift-cluster/provision_nodes.yml b/playbooks/aws/openshift-cluster/provision_nodes.yml index 44c686e08..82f147865 100644 --- a/playbooks/aws/openshift-cluster/provision_nodes.yml +++ b/playbooks/aws/openshift-cluster/provision_nodes.yml @@ -13,6 +13,6 @@ msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}" - name: create the node groups - include_role: + import_role: name: openshift_aws tasks_from: provision_nodes.yml diff --git a/playbooks/aws/openshift-cluster/provision_sec_group.yml b/playbooks/aws/openshift-cluster/provision_sec_group.yml index 7d74a691a..a0d4ec728 100644 --- a/playbooks/aws/openshift-cluster/provision_sec_group.yml +++ b/playbooks/aws/openshift-cluster/provision_sec_group.yml @@ -7,7 +7,7 @@ gather_facts: no tasks: - name: create security groups - include_role: + import_role: name: openshift_aws tasks_from: security_group.yml when: openshift_aws_create_security_groups | default(True) | bool diff --git a/playbooks/aws/openshift-cluster/provision_ssh_keypair.yml b/playbooks/aws/openshift-cluster/provision_ssh_keypair.yml index 3ec683958..d86ff9f9b 100644 --- a/playbooks/aws/openshift-cluster/provision_ssh_keypair.yml +++ b/playbooks/aws/openshift-cluster/provision_ssh_keypair.yml @@ -4,7 +4,7 @@ gather_facts: no tasks: - name: create an instance and prepare for ami - include_role: + import_role: name: openshift_aws tasks_from: ssh_keys.yml vars: diff --git a/playbooks/aws/openshift-cluster/provision_vpc.yml b/playbooks/aws/openshift-cluster/provision_vpc.yml index 0a23a6d32..cf72f6c87 100644 --- a/playbooks/aws/openshift-cluster/provision_vpc.yml +++ b/playbooks/aws/openshift-cluster/provision_vpc.yml @@ -4,7 +4,7 @@ gather_facts: no tasks: - name: create a vpc - include_role: + import_role: name: openshift_aws tasks_from: vpc.yml when: openshift_aws_create_vpc | default(True) | bool diff --git a/playbooks/aws/openshift-cluster/seal_ami.yml b/playbooks/aws/openshift-cluster/seal_ami.yml index 8239a64fb..f315db604 100644 --- a/playbooks/aws/openshift-cluster/seal_ami.yml +++ b/playbooks/aws/openshift-cluster/seal_ami.yml @@ -7,6 +7,6 @@ become: no tasks: - name: seal the ami - include_role: + import_role: name: openshift_aws tasks_from: seal_ami.yml diff --git a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml index 28ddc3ded..ffb11670d 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml @@ -16,7 +16,7 @@ msg: Cannot upgrade Docker on Atomic operating systems. when: openshift_is_atomic | bool - - include_role: + - import_role: name: container_runtime tasks_from: docker_upgrade_check.yml when: docker_upgrade is not defined or docker_upgrade | bool diff --git a/playbooks/common/openshift-cluster/upgrades/pre/config.yml b/playbooks/common/openshift-cluster/upgrades/pre/config.yml index de74c8ab8..cfc0c8745 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/config.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/config.yml @@ -72,6 +72,6 @@ - name: Verify docker upgrade targets hosts: "{{ l_upgrade_docker_target_hosts }}" tasks: - - include_role: + - import_role: name: container_runtime tasks_from: docker_upgrade_check.yml diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml index b0b5a7e4b..4c1156f4b 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml @@ -5,7 +5,7 @@ when: openshift.common.version is not defined - name: Update oreg_auth docker login credentials if necessary - include_role: + import_role: name: container_runtime tasks_from: registry_auth.yml when: oreg_auth_user is defined diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index 0263e721d..91d496ff4 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -50,7 +50,7 @@ openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" serial: 1 tasks: - - include_role: + - import_role: name: openshift_facts # Run the pre-upgrade hook if defined: @@ -60,7 +60,7 @@ - include_tasks: "{{ openshift_master_upgrade_pre_hook }}" when: openshift_master_upgrade_pre_hook is defined - - include_role: + - import_role: name: openshift_master tasks_from: upgrade.yml @@ -301,7 +301,7 @@ roles: - openshift_facts post_tasks: - - include_role: + - import_role: name: openshift_node tasks_from: upgrade.yml vars: diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml index ece69a3d5..aba179c2b 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml @@ -4,7 +4,7 @@ roles: - role: openshift_facts tasks: - - include_role: + - import_role: name: openshift_node tasks_from: upgrade_pre.yml vars: @@ -43,7 +43,7 @@ delay: 60 post_tasks: - - include_role: + - import_role: name: openshift_node tasks_from: upgrade.yml vars: @@ -62,7 +62,7 @@ - name: Re-enable excluders hosts: oo_nodes_to_upgrade:!oo_masters_to_config tasks: - - include_role: + - import_role: name: openshift_excluder vars: r_openshift_excluder_action: enable diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml index a90082760..6d59bfd0b 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml @@ -3,7 +3,7 @@ hosts: localhost tasks: - name: build upgrade scale groups - include_role: + import_role: name: openshift_aws tasks_from: upgrade_node_group.yml @@ -61,6 +61,6 @@ hosts: localhost tasks: - name: clean up scale group - include_role: + import_role: name: openshift_aws tasks_from: remove_scale_group.yml diff --git a/playbooks/container-runtime/private/config.yml b/playbooks/container-runtime/private/config.yml index d8fc93710..dd13fa4a2 100644 --- a/playbooks/container-runtime/private/config.yml +++ b/playbooks/container-runtime/private/config.yml @@ -8,19 +8,19 @@ roles: - role: container_runtime tasks: - - include_role: + - import_role: name: container_runtime tasks_from: package_docker.yml when: - not openshift_docker_use_system_container | bool - not openshift_use_crio_only | bool - - include_role: + - import_role: name: container_runtime tasks_from: systemcontainer_docker.yml when: - openshift_docker_use_system_container | bool - not openshift_use_crio_only | bool - - include_role: + - import_role: name: container_runtime tasks_from: systemcontainer_crio.yml when: diff --git a/playbooks/container-runtime/private/setup_storage.yml b/playbooks/container-runtime/private/setup_storage.yml index 54fa5ca66..357f67f0c 100644 --- a/playbooks/container-runtime/private/setup_storage.yml +++ b/playbooks/container-runtime/private/setup_storage.yml @@ -8,7 +8,7 @@ roles: - role: container_runtime tasks: - - include_role: + - import_role: name: container_runtime tasks_from: docker_storage_setup_overlay.yml when: diff --git a/playbooks/gcp/provision.yml b/playbooks/gcp/provision.yml index 6016e6a78..b6edf9961 100644 --- a/playbooks/gcp/provision.yml +++ b/playbooks/gcp/provision.yml @@ -6,7 +6,7 @@ tasks: - name: provision a GCP cluster in the specified project - include_role: + import_role: name: openshift_gcp - name: run the cluster deploy diff --git a/playbooks/init/facts.yml b/playbooks/init/facts.yml index 9e411a551..6759240c9 100644 --- a/playbooks/init/facts.yml +++ b/playbooks/init/facts.yml @@ -13,7 +13,7 @@ # TODO: Should this role be refactored into health_checks?? - name: Run openshift_sanitize_inventory to set variables - include_role: + import_role: name: openshift_sanitize_inventory - name: Detecting Operating System from ostree_booted diff --git a/playbooks/init/repos.yml b/playbooks/init/repos.yml index 866c889b6..667f38ddd 100644 --- a/playbooks/init/repos.yml +++ b/playbooks/init/repos.yml @@ -4,7 +4,7 @@ gather_facts: no tasks: - name: subscribe instances to Red Hat Subscription Manager - include_role: + import_role: name: rhel_subscribe when: - ansible_distribution == 'RedHat' @@ -12,5 +12,5 @@ - rhsub_user is defined - rhsub_pass is defined - name: initialize openshift repos - include_role: + import_role: name: openshift_repos diff --git a/playbooks/openshift-etcd/private/ca.yml b/playbooks/openshift-etcd/private/ca.yml index f3bb3c2d1..72c39d546 100644 --- a/playbooks/openshift-etcd/private/ca.yml +++ b/playbooks/openshift-etcd/private/ca.yml @@ -5,7 +5,7 @@ - role: openshift_clock - role: openshift_etcd_facts tasks: - - include_role: + - import_role: name: etcd tasks_from: ca.yml vars: diff --git a/playbooks/openshift-etcd/private/certificates-backup.yml b/playbooks/openshift-etcd/private/certificates-backup.yml index ce21a1f96..2f9bef799 100644 --- a/playbooks/openshift-etcd/private/certificates-backup.yml +++ b/playbooks/openshift-etcd/private/certificates-backup.yml @@ -3,10 +3,10 @@ hosts: oo_first_etcd any_errors_fatal: true tasks: - - include_role: + - import_role: name: etcd tasks_from: backup_generated_certificates.yml - - include_role: + - import_role: name: etcd tasks_from: remove_generated_certificates.yml @@ -14,6 +14,6 @@ hosts: oo_etcd_to_config any_errors_fatal: true tasks: - - include_role: + - import_role: name: etcd tasks_from: backup_server_certificates.yml diff --git a/playbooks/openshift-etcd/private/embedded2external.yml b/playbooks/openshift-etcd/private/embedded2external.yml index be177b714..b71eaacd0 100644 --- a/playbooks/openshift-etcd/private/embedded2external.yml +++ b/playbooks/openshift-etcd/private/embedded2external.yml @@ -18,7 +18,7 @@ - role: openshift_facts tasks: - name: Check the master API is ready - include_role: + import_role: name: openshift_master tasks_from: check_master_api_is_ready.yml - set_fact: @@ -31,8 +31,8 @@ name: "{{ master_service }}" state: stopped # 2. backup embedded etcd - # Can't use with_items with include_role: https://github.com/ansible/ansible/issues/21285 - - include_role: + # Can't use with_items with import_role: https://github.com/ansible/ansible/issues/21285 + - import_role: name: etcd tasks_from: backup.yml vars: @@ -40,7 +40,7 @@ r_etcd_common_embedded_etcd: "{{ true }}" r_etcd_common_backup_sufix_name: "{{ embedded_etcd_backup_suffix }}" - - include_role: + - import_role: name: etcd tasks_from: backup.archive.yml vars: @@ -56,7 +56,7 @@ - name: Backup etcd client certificates for master host hosts: oo_first_master tasks: - - include_role: + - import_role: name: etcd tasks_from: backup_master_etcd_certificates.yml @@ -73,10 +73,10 @@ hosts: oo_etcd_to_config[0] gather_facts: no pre_tasks: - - include_role: + - import_role: name: etcd tasks_from: disable_etcd.yml - - include_role: + - import_role: name: etcd tasks_from: clean_data.yml @@ -91,7 +91,7 @@ changed_when: False become: no - - include_role: + - import_role: name: etcd tasks_from: backup.fetch.yml vars: @@ -101,7 +101,7 @@ r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}" delegate_to: "{{ groups.oo_first_master[0] }}" - - include_role: + - import_role: name: etcd tasks_from: backup.copy.yml vars: @@ -122,14 +122,14 @@ - name: Force new etcd cluster hosts: oo_etcd_to_config[0] tasks: - - include_role: + - import_role: name: etcd tasks_from: backup.unarchive.yml vars: r_etcd_common_backup_tag: pre-migrate r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}" - - include_role: + - import_role: name: etcd tasks_from: backup.force_new_cluster.yml vars: @@ -143,7 +143,7 @@ - name: Configure master to use external etcd hosts: oo_first_master tasks: - - include_role: + - import_role: name: openshift_master tasks_from: configure_external_etcd.yml vars: diff --git a/playbooks/openshift-etcd/private/migrate.yml b/playbooks/openshift-etcd/private/migrate.yml index cad0ebcaa..0a2ac7f1a 100644 --- a/playbooks/openshift-etcd/private/migrate.yml +++ b/playbooks/openshift-etcd/private/migrate.yml @@ -15,7 +15,7 @@ - name: Run pre-checks hosts: oo_etcd_to_migrate tasks: - - include_role: + - import_role: name: etcd tasks_from: migrate.pre_check.yml vars: @@ -43,7 +43,7 @@ roles: - role: openshift_facts post_tasks: - - include_role: + - import_role: name: etcd tasks_from: backup.yml vars: @@ -70,7 +70,7 @@ hosts: oo_etcd_to_migrate gather_facts: no pre_tasks: - - include_role: + - import_role: name: etcd tasks_from: disable_etcd.yml @@ -78,7 +78,7 @@ hosts: oo_etcd_to_migrate[0] gather_facts: no tasks: - - include_role: + - import_role: name: etcd tasks_from: migrate.yml vars: @@ -90,7 +90,7 @@ hosts: oo_etcd_to_migrate[1:] gather_facts: no tasks: - - include_role: + - import_role: name: etcd tasks_from: clean_data.yml vars: @@ -126,7 +126,7 @@ - name: Add TTLs on the first master hosts: oo_first_master[0] tasks: - - include_role: + - import_role: name: etcd tasks_from: migrate.add_ttls.yml vars: @@ -138,7 +138,7 @@ - name: Configure masters if etcd data migration is succesfull hosts: oo_masters_to_config tasks: - - include_role: + - import_role: name: etcd tasks_from: migrate.configure_master.yml when: etcd_migration_failed | length == 0 diff --git a/playbooks/openshift-etcd/private/redeploy-ca.yml b/playbooks/openshift-etcd/private/redeploy-ca.yml index 0995945cc..7b0d99255 100644 --- a/playbooks/openshift-etcd/private/redeploy-ca.yml +++ b/playbooks/openshift-etcd/private/redeploy-ca.yml @@ -14,10 +14,10 @@ - name: Backup existing etcd CA certificate directories hosts: oo_etcd_to_config tasks: - - include_role: + - import_role: name: etcd tasks_from: backup_ca_certificates.yml - - include_role: + - import_role: name: etcd tasks_from: remove_ca_certificates.yml @@ -37,7 +37,7 @@ - name: Distribute etcd CA to etcd hosts hosts: oo_etcd_to_config tasks: - - include_role: + - import_role: name: etcd tasks_from: distribute_ca.yml vars: @@ -54,7 +54,7 @@ - name: Retrieve etcd CA certificate hosts: oo_first_etcd tasks: - - include_role: + - import_role: name: etcd tasks_from: retrieve_ca_certificates.yml vars: diff --git a/playbooks/openshift-etcd/private/restart.yml b/playbooks/openshift-etcd/private/restart.yml index 0751480e2..a2a53651b 100644 --- a/playbooks/openshift-etcd/private/restart.yml +++ b/playbooks/openshift-etcd/private/restart.yml @@ -3,7 +3,7 @@ hosts: oo_etcd_to_config serial: 1 tasks: - - include_role: + - import_role: name: etcd tasks_from: restart.yml when: @@ -12,7 +12,7 @@ - name: Restart etcd hosts: oo_etcd_to_config tasks: - - include_role: + - import_role: name: etcd tasks_from: restart.yml when: diff --git a/playbooks/openshift-etcd/private/scaleup.yml b/playbooks/openshift-etcd/private/scaleup.yml index dc667958f..8a9811a25 100644 --- a/playbooks/openshift-etcd/private/scaleup.yml +++ b/playbooks/openshift-etcd/private/scaleup.yml @@ -30,7 +30,7 @@ retries: 3 delay: 10 until: etcd_add_check.rc == 0 - - include_role: + - import_role: name: etcd tasks_from: server_certificates.yml vars: @@ -76,6 +76,6 @@ roles: - role: openshift_master_facts post_tasks: - - include_role: + - import_role: name: openshift_master tasks_from: update_etcd_client_urls.yml diff --git a/playbooks/openshift-etcd/private/server_certificates.yml b/playbooks/openshift-etcd/private/server_certificates.yml index 695b53990..ebcf4a5ff 100644 --- a/playbooks/openshift-etcd/private/server_certificates.yml +++ b/playbooks/openshift-etcd/private/server_certificates.yml @@ -5,7 +5,7 @@ roles: - role: openshift_etcd_facts post_tasks: - - include_role: + - import_role: name: etcd tasks_from: server_certificates.yml vars: diff --git a/playbooks/openshift-etcd/private/upgrade_backup.yml b/playbooks/openshift-etcd/private/upgrade_backup.yml index 0d8943d93..97b6edba5 100644 --- a/playbooks/openshift-etcd/private/upgrade_backup.yml +++ b/playbooks/openshift-etcd/private/upgrade_backup.yml @@ -4,7 +4,7 @@ roles: - role: openshift_etcd_facts post_tasks: - - include_role: + - import_role: name: etcd tasks_from: backup.yml vars: diff --git a/playbooks/openshift-etcd/private/upgrade_image_members.yml b/playbooks/openshift-etcd/private/upgrade_image_members.yml index d4386249e..f9e50e748 100644 --- a/playbooks/openshift-etcd/private/upgrade_image_members.yml +++ b/playbooks/openshift-etcd/private/upgrade_image_members.yml @@ -6,7 +6,7 @@ hosts: oo_etcd_hosts_to_upgrade serial: 1 tasks: - - include_role: + - import_role: name: etcd tasks_from: upgrade_image.yml vars: diff --git a/playbooks/openshift-etcd/private/upgrade_main.yml b/playbooks/openshift-etcd/private/upgrade_main.yml index e373a4a4c..8997680f9 100644 --- a/playbooks/openshift-etcd/private/upgrade_main.yml +++ b/playbooks/openshift-etcd/private/upgrade_main.yml @@ -14,7 +14,7 @@ - name: Drop etcdctl profiles hosts: oo_etcd_hosts_to_upgrade tasks: - - include_role: + - import_role: name: etcd tasks_from: drop_etcdctl.yml diff --git a/playbooks/openshift-etcd/private/upgrade_rpm_members.yml b/playbooks/openshift-etcd/private/upgrade_rpm_members.yml index f7fe6cd9c..e78cc5826 100644 --- a/playbooks/openshift-etcd/private/upgrade_rpm_members.yml +++ b/playbooks/openshift-etcd/private/upgrade_rpm_members.yml @@ -6,7 +6,7 @@ hosts: oo_etcd_hosts_to_upgrade serial: 1 tasks: - - include_role: + - import_role: name: etcd tasks_from: upgrade_rpm.yml vars: diff --git a/playbooks/openshift-etcd/private/upgrade_step.yml b/playbooks/openshift-etcd/private/upgrade_step.yml index 05c543d62..6aec838d4 100644 --- a/playbooks/openshift-etcd/private/upgrade_step.yml +++ b/playbooks/openshift-etcd/private/upgrade_step.yml @@ -2,7 +2,7 @@ - name: Determine etcd version hosts: oo_etcd_hosts_to_upgrade tasks: - - include_role: + - import_role: name: etcd tasks_from: version_detect.yml @@ -54,7 +54,7 @@ hosts: oo_etcd_hosts_to_upgrade serial: 1 tasks: - - include_role: + - import_role: name: etcd tasks_from: upgrade_image.yml vars: diff --git a/playbooks/openshift-glusterfs/private/config.yml b/playbooks/openshift-glusterfs/private/config.yml index 19e14ab3e..9a5bc143d 100644 --- a/playbooks/openshift-glusterfs/private/config.yml +++ b/playbooks/openshift-glusterfs/private/config.yml @@ -14,12 +14,12 @@ - name: Open firewall ports for GlusterFS nodes hosts: glusterfs tasks: - - include_role: + - import_role: name: openshift_storage_glusterfs tasks_from: firewall.yml when: - openshift_storage_glusterfs_is_native | default(True) | bool - - include_role: + - import_role: name: openshift_storage_glusterfs tasks_from: kernel_modules.yml when: @@ -28,12 +28,12 @@ - name: Open firewall ports for GlusterFS registry nodes hosts: glusterfs_registry tasks: - - include_role: + - import_role: name: openshift_storage_glusterfs tasks_from: firewall.yml when: - openshift_storage_glusterfs_registry_is_native | default(True) | bool - - include_role: + - import_role: name: openshift_storage_glusterfs tasks_from: kernel_modules.yml when: @@ -43,7 +43,7 @@ hosts: oo_first_master tasks: - name: setup glusterfs - include_role: + import_role: name: openshift_storage_glusterfs when: groups.oo_glusterfs_to_config | default([]) | count > 0 diff --git a/playbooks/openshift-hosted/private/install_docker_gc.yml b/playbooks/openshift-hosted/private/install_docker_gc.yml index 1e3dfee07..03eb542d3 100644 --- a/playbooks/openshift-hosted/private/install_docker_gc.yml +++ b/playbooks/openshift-hosted/private/install_docker_gc.yml @@ -3,5 +3,5 @@ hosts: oo_first_master gather_facts: false tasks: - - include_role: + - import_role: name: openshift_docker_gc diff --git a/playbooks/openshift-hosted/private/openshift_hosted_create_projects.yml b/playbooks/openshift-hosted/private/openshift_hosted_create_projects.yml index d5ca5185c..b09432da2 100644 --- a/playbooks/openshift-hosted/private/openshift_hosted_create_projects.yml +++ b/playbooks/openshift-hosted/private/openshift_hosted_create_projects.yml @@ -2,6 +2,6 @@ - name: Create Hosted Resources - openshift projects hosts: oo_first_master tasks: - - include_role: + - import_role: name: openshift_hosted tasks_from: create_projects.yml diff --git a/playbooks/openshift-hosted/private/openshift_hosted_registry.yml b/playbooks/openshift-hosted/private/openshift_hosted_registry.yml index 2a91a827c..659c95eda 100644 --- a/playbooks/openshift-hosted/private/openshift_hosted_registry.yml +++ b/playbooks/openshift-hosted/private/openshift_hosted_registry.yml @@ -5,7 +5,7 @@ - set_fact: openshift_hosted_registry_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}" when: "'master' in hostvars[groups.oo_first_master.0].openshift and 'registry_url' in hostvars[groups.oo_first_master.0].openshift.master" - - include_role: + - import_role: name: openshift_hosted tasks_from: registry.yml when: diff --git a/playbooks/openshift-hosted/private/openshift_hosted_registry_storage.yml b/playbooks/openshift-hosted/private/openshift_hosted_registry_storage.yml index 9a407b69e..cfc47c9b2 100644 --- a/playbooks/openshift-hosted/private/openshift_hosted_registry_storage.yml +++ b/playbooks/openshift-hosted/private/openshift_hosted_registry_storage.yml @@ -5,7 +5,7 @@ - name: Poll for hosted pod deployments hosts: oo_first_master tasks: - - include_role: + - import_role: name: openshift_hosted tasks_from: registry_storage.yml when: diff --git a/playbooks/openshift-hosted/private/openshift_hosted_router.yml b/playbooks/openshift-hosted/private/openshift_hosted_router.yml index bcb5a34a4..353377189 100644 --- a/playbooks/openshift-hosted/private/openshift_hosted_router.yml +++ b/playbooks/openshift-hosted/private/openshift_hosted_router.yml @@ -5,7 +5,7 @@ - set_fact: openshift_hosted_router_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}" when: "'master' in hostvars[groups.oo_first_master.0].openshift and 'registry_url' in hostvars[groups.oo_first_master.0].openshift.master" - - include_role: + - import_role: name: openshift_hosted tasks_from: router.yml when: diff --git a/playbooks/openshift-hosted/private/openshift_hosted_wait_for_pods.yml b/playbooks/openshift-hosted/private/openshift_hosted_wait_for_pods.yml index 204cb1781..1f6868c2a 100644 --- a/playbooks/openshift-hosted/private/openshift_hosted_wait_for_pods.yml +++ b/playbooks/openshift-hosted/private/openshift_hosted_wait_for_pods.yml @@ -5,7 +5,7 @@ - name: Poll for hosted pod deployments hosts: oo_first_master tasks: - - include_role: + - import_role: name: openshift_hosted tasks_from: wait_for_pod.yml vars: @@ -15,7 +15,7 @@ - openshift_hosted_manage_router | default(True) | bool - openshift_hosted_router_registryurl is defined - - include_role: + - import_role: name: openshift_hosted tasks_from: wait_for_pod.yml vars: diff --git a/playbooks/openshift-hosted/private/redeploy-router-certificates.yml b/playbooks/openshift-hosted/private/redeploy-router-certificates.yml index c19147d41..0df748f47 100644 --- a/playbooks/openshift-hosted/private/redeploy-router-certificates.yml +++ b/playbooks/openshift-hosted/private/redeploy-router-certificates.yml @@ -115,7 +115,7 @@ - ('service.alpha.openshift.io/serving-cert-secret-name') not in router_service_annotations - ('service.alpha.openshift.io/serving-cert-signed-by') not in router_service_annotations - - include_role: + - import_role: name: openshift_hosted tasks_from: main vars: diff --git a/playbooks/openshift-logging/private/config.yml b/playbooks/openshift-logging/private/config.yml index bc59bd95a..d5256f55c 100644 --- a/playbooks/openshift-logging/private/config.yml +++ b/playbooks/openshift-logging/private/config.yml @@ -20,7 +20,7 @@ hosts: oo_masters:!oo_first_master tasks: - block: - - include_role: + - import_role: name: openshift_logging tasks_from: update_master_config diff --git a/playbooks/openshift-management/add_many_container_providers.yml b/playbooks/openshift-management/add_many_container_providers.yml index 62fdb11c5..45231a495 100644 --- a/playbooks/openshift-management/add_many_container_providers.yml +++ b/playbooks/openshift-management/add_many_container_providers.yml @@ -27,7 +27,7 @@ register: results # Include openshift_management for access to filter_plugins. - - include_role: + - import_role: name: openshift_management tasks_from: noop diff --git a/playbooks/openshift-management/private/add_container_provider.yml b/playbooks/openshift-management/private/add_container_provider.yml index facb3a5b9..25d4058e5 100644 --- a/playbooks/openshift-management/private/add_container_provider.yml +++ b/playbooks/openshift-management/private/add_container_provider.yml @@ -3,6 +3,6 @@ hosts: oo_first_master tasks: - name: Run the Management Integration Tasks - include_role: + import_role: name: openshift_management tasks_from: add_container_provider diff --git a/playbooks/openshift-management/private/config.yml b/playbooks/openshift-management/private/config.yml index 3f1cdf713..22f3ee8f3 100644 --- a/playbooks/openshift-management/private/config.yml +++ b/playbooks/openshift-management/private/config.yml @@ -21,7 +21,7 @@ tasks: - name: Run the CFME Setup Role - include_role: + import_role: name: openshift_management vars: template_dir: "{{ hostvars[groups.masters.0].r_openshift_management_mktemp.stdout }}" diff --git a/playbooks/openshift-management/private/uninstall.yml b/playbooks/openshift-management/private/uninstall.yml index 9f35cc276..6097ea45a 100644 --- a/playbooks/openshift-management/private/uninstall.yml +++ b/playbooks/openshift-management/private/uninstall.yml @@ -3,6 +3,6 @@ hosts: masters[0] tasks: - name: Run the CFME Uninstall Role Tasks - include_role: + import_role: name: openshift_management tasks_from: uninstall diff --git a/playbooks/openshift-master/private/config.yml b/playbooks/openshift-master/private/config.yml index e53a6f093..4752ba78e 100644 --- a/playbooks/openshift-master/private/config.yml +++ b/playbooks/openshift-master/private/config.yml @@ -206,13 +206,13 @@ - role: calico_master when: openshift_use_calico | default(false) | bool tasks: - - include_role: + - import_role: name: kuryr tasks_from: master when: openshift_use_kuryr | default(false) | bool - name: Setup the node group config maps - include_role: + import_role: name: openshift_node_group when: openshift_master_bootstrap_enabled | default(false) | bool run_once: True diff --git a/playbooks/openshift-master/private/tasks/restart_services.yml b/playbooks/openshift-master/private/tasks/restart_services.yml index 4e1b3a3be..cf2c282e3 100644 --- a/playbooks/openshift-master/private/tasks/restart_services.yml +++ b/playbooks/openshift-master/private/tasks/restart_services.yml @@ -1,4 +1,4 @@ --- -- include_role: +- import_role: name: openshift_master tasks_from: restart.yml diff --git a/playbooks/openshift-metrics/private/config.yml b/playbooks/openshift-metrics/private/config.yml index 80cd93e5f..327f034d3 100644 --- a/playbooks/openshift-metrics/private/config.yml +++ b/playbooks/openshift-metrics/private/config.yml @@ -21,7 +21,7 @@ serial: 1 tasks: - name: Setup the non-first masters configs - include_role: + import_role: name: openshift_metrics tasks_from: update_master_config.yaml diff --git a/playbooks/openshift-node/private/additional_config.yml b/playbooks/openshift-node/private/additional_config.yml index b86cb3cc2..54ed1927d 100644 --- a/playbooks/openshift-node/private/additional_config.yml +++ b/playbooks/openshift-node/private/additional_config.yml @@ -57,7 +57,7 @@ - name: Configure Kuryr node hosts: oo_nodes_use_kuryr tasks: - - include_role: + - import_role: name: kuryr tasks_from: node when: openshift_use_kuryr | default(false) | bool diff --git a/playbooks/openshift-node/private/image_prep.yml b/playbooks/openshift-node/private/image_prep.yml index c0ddcd926..adcbb0fdb 100644 --- a/playbooks/openshift-node/private/image_prep.yml +++ b/playbooks/openshift-node/private/image_prep.yml @@ -15,7 +15,7 @@ - name: node bootstrap config hosts: oo_nodes_to_config:!oo_containerized_master_nodes tasks: - - include_role: + - import_role: name: openshift_node tasks_from: bootstrap.yml diff --git a/playbooks/openstack/openshift-cluster/prerequisites.yml b/playbooks/openstack/openshift-cluster/prerequisites.yml index 0356b37dd..8bb700501 100644 --- a/playbooks/openstack/openshift-cluster/prerequisites.yml +++ b/playbooks/openstack/openshift-cluster/prerequisites.yml @@ -2,11 +2,11 @@ - hosts: localhost tasks: - name: Check dependencies and OpenStack prerequisites - include_role: + import_role: name: openshift_openstack tasks_from: check-prerequisites.yml - name: Check network configuration - include_role: + import_role: name: openshift_openstack tasks_from: net_vars_check.yaml diff --git a/playbooks/openstack/openshift-cluster/provision.yml b/playbooks/openstack/openshift-cluster/provision.yml index fa5c91ace..a38d7bff7 100644 --- a/playbooks/openstack/openshift-cluster/provision.yml +++ b/playbooks/openstack/openshift-cluster/provision.yml @@ -3,7 +3,7 @@ hosts: localhost tasks: - name: provision cluster - include_role: + import_role: name: openshift_openstack tasks_from: provision.yml @@ -36,7 +36,7 @@ hosts: localhost tasks: - name: Populate DNS entries - include_role: + import_role: name: openshift_openstack tasks_from: populate-dns.yml when: @@ -49,7 +49,7 @@ gather_facts: yes tasks: - name: Subscribe RHEL instances - include_role: + import_role: name: rhel_subscribe when: - ansible_distribution == "RedHat" @@ -57,18 +57,18 @@ - rhsub_pass is defined - name: Enable required YUM repositories - include_role: + import_role: name: openshift_repos when: - ansible_distribution == "RedHat" - rh_subscribed is defined - name: Install dependencies - include_role: + import_role: name: openshift_openstack tasks_from: node-packages.yml - name: Configure Node - include_role: + import_role: name: openshift_openstack tasks_from: node-configuration.yml diff --git a/roles/calico/tasks/main.yml b/roles/calico/tasks/main.yml index bbc6edd48..556953a71 100644 --- a/roles/calico/tasks/main.yml +++ b/roles/calico/tasks/main.yml @@ -7,7 +7,7 @@ - not (calico_etcd_cert_dir is defined and calico_etcd_ca_cert_file is defined and calico_etcd_cert_file is defined and calico_etcd_key_file is defined and calico_etcd_endpoints is defined) - name: Calico Node | Generate OpenShift-etcd certs - include_role: + import_role: name: etcd tasks_from: client_certificates when: calico_etcd_ca_cert_file is not defined or calico_etcd_cert_file is not defined or calico_etcd_key_file is not defined or calico_etcd_endpoints is not defined or calico_etcd_cert_dir is not defined diff --git a/roles/container_runtime/README.md b/roles/container_runtime/README.md index 51f469aaf..665b1b012 100644 --- a/roles/container_runtime/README.md +++ b/roles/container_runtime/README.md @@ -5,7 +5,7 @@ Ensures docker package or system container is installed, and optionally raises t container-daemon.json items may be found at https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-configuration-file -This role is designed to be used with include_role and tasks_from. +This role is designed to be used with import_role and tasks_from. Entry points ------------ @@ -30,7 +30,7 @@ Example Playbook - hosts: servers tasks: - - include_role: container_runtime + - import_role: container_runtime tasks_from: package_docker.yml License diff --git a/roles/container_runtime/tasks/common/post.yml b/roles/container_runtime/tasks/common/post.yml index d790eb2c0..b90190ebf 100644 --- a/roles/container_runtime/tasks/common/post.yml +++ b/roles/container_runtime/tasks/common/post.yml @@ -11,7 +11,7 @@ - meta: flush_handlers # This needs to run after docker is restarted to account for proxy settings. -# registry_auth is called directly with include_role in some places, so we +# registry_auth is called directly with import_role in some places, so we # have to put it in the root of the tasks/ directory. - include_tasks: ../registry_auth.yml diff --git a/roles/container_runtime/tasks/main.yml b/roles/container_runtime/tasks/main.yml index 96d8606c6..07da831c4 100644 --- a/roles/container_runtime/tasks/main.yml +++ b/roles/container_runtime/tasks/main.yml @@ -1,2 +1,2 @@ --- -# This role is meant to be used with include_role and tasks_from. +# This role is meant to be used with import_role and tasks_from. diff --git a/roles/lib_utils/callback_plugins/openshift_quick_installer.py b/roles/lib_utils/callback_plugins/openshift_quick_installer.py index c0fdbc650..365e2443d 100644 --- a/roles/lib_utils/callback_plugins/openshift_quick_installer.py +++ b/roles/lib_utils/callback_plugins/openshift_quick_installer.py @@ -192,7 +192,7 @@ The only thing we change here is adding `log_only=True` to the """ delegated_vars = result._result.get('_ansible_delegated_vars', None) self._clean_results(result._result, result._task.action) - if result._task.action in ('include', 'include_role'): + if result._task.action in ('include', 'import_role'): return elif result._result.get('changed', False): if delegated_vars: @@ -220,7 +220,7 @@ The only thing we change here is adding `log_only=True` to the def v2_runner_item_on_ok(self, result): """Print out task results for items you're iterating over""" delegated_vars = result._result.get('_ansible_delegated_vars', None) - if result._task.action in ('include', 'include_role'): + if result._task.action in ('include', 'import_role'): return elif result._result.get('changed', False): msg = 'changed' diff --git a/roles/openshift_aws/README.md b/roles/openshift_aws/README.md index 4aca5c7a8..de73ab01d 100644 --- a/roles/openshift_aws/README.md +++ b/roles/openshift_aws/README.md @@ -7,9 +7,9 @@ This role contains many task-areas to provision resources and perform actions against an AWS account for the purposes of dynamically building an openshift cluster. -This role is primarily intended to be used with "include_role" and "tasks_from". +This role is primarily intended to be used with "import_role" and "tasks_from". -include_role can be called from the tasks section in a play. See example +import_role can be called from the tasks section in a play. See example playbook below for reference. These task-areas are: @@ -40,7 +40,7 @@ Example Playbook ---------------- ```yaml -- include_role: +- import_role: name: openshift_aws tasks_from: vpc.yml vars: diff --git a/roles/openshift_cluster_autoscaler/README.md b/roles/openshift_cluster_autoscaler/README.md index d775a8a71..137ae0cef 100644 --- a/roles/openshift_cluster_autoscaler/README.md +++ b/roles/openshift_cluster_autoscaler/README.md @@ -28,7 +28,7 @@ Example Playbook remote_user: root tasks: - name: include role autoscaler - include_role: + import_role: name: openshift_cluster_autoscaler vars: openshift_clusterid: opstest diff --git a/roles/openshift_etcd_client_certificates/tasks/main.yml b/roles/openshift_etcd_client_certificates/tasks/main.yml index 7f8b667f0..18d07fc2f 100644 --- a/roles/openshift_etcd_client_certificates/tasks/main.yml +++ b/roles/openshift_etcd_client_certificates/tasks/main.yml @@ -1,4 +1,4 @@ --- -- include_role: +- import_role: name: etcd tasks_from: client_certificates diff --git a/roles/openshift_hosted/tasks/main.yml b/roles/openshift_hosted/tasks/main.yml index d306adf42..57f59f872 100644 --- a/roles/openshift_hosted/tasks/main.yml +++ b/roles/openshift_hosted/tasks/main.yml @@ -1,6 +1,6 @@ --- -# This role is intended to be used with include_role. -# include_role: +# This role is intended to be used with import_role. +# import_role: # name: openshift_hosted # tasks_from: "{{ item }}" # with_items: diff --git a/roles/openshift_logging/tasks/delete_logging.yaml b/roles/openshift_logging/tasks/delete_logging.yaml index 51d6d0efd..b1ceade88 100644 --- a/roles/openshift_logging/tasks/delete_logging.yaml +++ b/roles/openshift_logging/tasks/delete_logging.yaml @@ -126,7 +126,7 @@ - __logging_ops_projects.stderr | length == 0 ## EventRouter -- include_role: +- import_role: name: openshift_logging_eventrouter when: not openshift_logging_install_eventrouter | default(false) | bool diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml index 913478027..6aae251c1 100644 --- a/roles/openshift_logging/tasks/install_logging.yaml +++ b/roles/openshift_logging/tasks/install_logging.yaml @@ -75,7 +75,7 @@ elasticsearch_storage_type: "{{ openshift_logging_elasticsearch_storage_type | default('pvc' if ( openshift_logging_es_pvc_dynamic | bool or openshift_hosted_logging_storage_kind | default('') == 'nfs' or openshift_logging_es_pvc_size | length > 0) else 'emptydir') }}" # We don't allow scaling down of ES nodes currently -- include_role: +- import_role: name: openshift_logging_elasticsearch vars: generated_certs_dir: "{{openshift.common.config_base}}/logging" @@ -103,7 +103,7 @@ - openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count > 0 # Create any new DC that may be required -- include_role: +- import_role: name: openshift_logging_elasticsearch vars: generated_certs_dir: "{{openshift.common.config_base}}/logging" @@ -137,7 +137,7 @@ when: - openshift_logging_use_ops | bool -- include_role: +- import_role: name: openshift_logging_elasticsearch vars: generated_certs_dir: "{{openshift.common.config_base}}/logging" @@ -180,7 +180,7 @@ - openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | count > 0 # Create any new DC that may be required -- include_role: +- import_role: name: openshift_logging_elasticsearch vars: generated_certs_dir: "{{openshift.common.config_base}}/logging" @@ -213,7 +213,7 @@ ## Kibana -- include_role: +- import_role: name: openshift_logging_kibana vars: generated_certs_dir: "{{openshift.common.config_base}}/logging" @@ -226,7 +226,7 @@ openshift_logging_kibana_image_pull_secret: "{{ openshift_logging_image_pull_secret }}" -- include_role: +- import_role: name: openshift_logging_kibana vars: generated_certs_dir: "{{openshift.common.config_base}}/logging" @@ -256,7 +256,7 @@ - include_tasks: annotate_ops_projects.yaml ## Curator -- include_role: +- import_role: name: openshift_logging_curator vars: generated_certs_dir: "{{openshift.common.config_base}}/logging" @@ -266,7 +266,7 @@ openshift_logging_curator_master_url: "{{ openshift_logging_master_url }}" openshift_logging_curator_image_pull_secret: "{{ openshift_logging_image_pull_secret }}" -- include_role: +- import_role: name: openshift_logging_curator vars: generated_certs_dir: "{{openshift.common.config_base}}/logging" @@ -284,7 +284,7 @@ - openshift_logging_use_ops | bool ## Mux -- include_role: +- import_role: name: openshift_logging_mux vars: generated_certs_dir: "{{openshift.common.config_base}}/logging" @@ -297,7 +297,7 @@ ## Fluentd -- include_role: +- import_role: name: openshift_logging_fluentd vars: generated_certs_dir: "{{openshift.common.config_base}}/logging" @@ -308,7 +308,7 @@ ## EventRouter -- include_role: +- import_role: name: openshift_logging_eventrouter when: openshift_logging_install_eventrouter | default(false) | bool diff --git a/roles/openshift_logging_curator/tasks/main.yaml b/roles/openshift_logging_curator/tasks/main.yaml index 53b464113..cc68998f5 100644 --- a/roles/openshift_logging_curator/tasks/main.yaml +++ b/roles/openshift_logging_curator/tasks/main.yaml @@ -56,7 +56,7 @@ dest: "{{ tempdir }}/curator.yml" changed_when: no -- include_role: +- import_role: name: openshift_logging tasks_from: patch_configmap_files.yaml vars: diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml index 9e7646379..9bd37f33c 100644 --- a/roles/openshift_logging_elasticsearch/tasks/main.yaml +++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml @@ -181,7 +181,7 @@ changed_when: no # create diff between current configmap files and our current files -- include_role: +- import_role: name: openshift_logging tasks_from: patch_configmap_files.yaml vars: diff --git a/roles/openshift_logging_fluentd/tasks/main.yaml b/roles/openshift_logging_fluentd/tasks/main.yaml index 486cfb8bc..529859983 100644 --- a/roles/openshift_logging_fluentd/tasks/main.yaml +++ b/roles/openshift_logging_fluentd/tasks/main.yaml @@ -117,7 +117,7 @@ src: secure-forward.conf dest: "{{ tempdir }}/secure-forward.conf" -- include_role: +- import_role: name: openshift_logging tasks_from: patch_configmap_files.yaml vars: diff --git a/roles/openshift_logging_mux/tasks/main.yaml b/roles/openshift_logging_mux/tasks/main.yaml index a281c6a53..34bdb891c 100644 --- a/roles/openshift_logging_mux/tasks/main.yaml +++ b/roles/openshift_logging_mux/tasks/main.yaml @@ -95,7 +95,7 @@ dest: "{{mktemp.stdout}}/secure-forward-mux.conf" changed_when: no -- include_role: +- import_role: name: openshift_logging tasks_from: patch_configmap_files.yaml vars: diff --git a/roles/openshift_management/tasks/add_container_provider.yml b/roles/openshift_management/tasks/add_container_provider.yml index ca381b105..357e6a710 100644 --- a/roles/openshift_management/tasks/add_container_provider.yml +++ b/roles/openshift_management/tasks/add_container_provider.yml @@ -1,6 +1,6 @@ --- - name: Ensure OpenShift facts module is available - include_role: + import_role: role: openshift_facts - name: Ensure OpenShift facts are loaded diff --git a/roles/openshift_management/tasks/main.yml b/roles/openshift_management/tasks/main.yml index f212dba7c..c4b204b98 100644 --- a/roles/openshift_management/tasks/main.yml +++ b/roles/openshift_management/tasks/main.yml @@ -8,7 +8,7 @@ # This creates a service account allowing Container Provider # integration (managing OCP/Origin via MIQ/Management) - name: Enable Container Provider Integration - include_role: + import_role: role: openshift_manageiq - name: "Ensure the Management '{{ openshift_management_project }}' namespace exists" diff --git a/roles/openshift_management/tasks/storage/nfs.yml b/roles/openshift_management/tasks/storage/nfs.yml index 94e11137c..9e3a4d43a 100644 --- a/roles/openshift_management/tasks/storage/nfs.yml +++ b/roles/openshift_management/tasks/storage/nfs.yml @@ -5,14 +5,14 @@ - name: Setting up NFS storage block: - name: Include the NFS Setup role tasks - include_role: + import_role: role: openshift_nfs tasks_from: setup vars: l_nfs_base_dir: "{{ openshift_management_storage_nfs_base_dir }}" - name: Create the App export - include_role: + import_role: role: openshift_nfs tasks_from: create_export vars: @@ -22,7 +22,7 @@ l_nfs_options: "*(rw,no_root_squash,no_wdelay)" - name: Create the DB export - include_role: + import_role: role: openshift_nfs tasks_from: create_export vars: diff --git a/roles/openshift_nfs/tasks/create_export.yml b/roles/openshift_nfs/tasks/create_export.yml index 5fcdbf76e..331685289 100644 --- a/roles/openshift_nfs/tasks/create_export.yml +++ b/roles/openshift_nfs/tasks/create_export.yml @@ -3,7 +3,7 @@ # # Include signature # -# include_role: +# import_role: # role: openshift_nfs # tasks_from: create_export # vars: diff --git a/roles/os_firewall/README.md b/roles/os_firewall/README.md index be0b8291a..5ee11f7bd 100644 --- a/roles/os_firewall/README.md +++ b/roles/os_firewall/README.md @@ -32,7 +32,7 @@ Use iptables: --- - hosts: servers task: - - include_role: + - import_role: name: os_firewall vars: os_firewall_use_firewalld: false @@ -44,7 +44,7 @@ Use firewalld: - hosts: servers vars: tasks: - - include_role: + - import_role: name: os_firewall vars: os_firewall_use_firewalld: true -- cgit v1.2.3 From cb5e1a99a19b4fa498607f344fd6067493976f6a Mon Sep 17 00:00:00 2001 From: Noriko Hosoi Date: Tue, 9 Jan 2018 10:09:37 -0800 Subject: Bug 1527178 - installation of logging stack failed: Invalid version specified for Elasticsearch openshift_logging_{curator,elasicsearch,fluentd,kibana,mux}/vars/main.yml: - adding "3_9" to __allowed_.*_versions - bumping __latest_.*_version to "3_9" --- roles/openshift_logging_curator/vars/main.yml | 4 ++-- roles/openshift_logging_elasticsearch/vars/main.yml | 4 ++-- roles/openshift_logging_fluentd/vars/main.yml | 4 ++-- roles/openshift_logging_kibana/vars/main.yml | 4 ++-- roles/openshift_logging_mux/vars/main.yml | 4 ++-- 5 files changed, 10 insertions(+), 10 deletions(-) (limited to 'roles/openshift_logging_fluentd') diff --git a/roles/openshift_logging_curator/vars/main.yml b/roles/openshift_logging_curator/vars/main.yml index 5bee58725..df5299a83 100644 --- a/roles/openshift_logging_curator/vars/main.yml +++ b/roles/openshift_logging_curator/vars/main.yml @@ -1,3 +1,3 @@ --- -__latest_curator_version: "3_8" -__allowed_curator_versions: ["3_5", "3_6", "3_7", "3_8"] +__latest_curator_version: "3_9" +__allowed_curator_versions: ["3_5", "3_6", "3_7", "3_8", "3_9"] diff --git a/roles/openshift_logging_elasticsearch/vars/main.yml b/roles/openshift_logging_elasticsearch/vars/main.yml index 0e56a6eac..db28244e0 100644 --- a/roles/openshift_logging_elasticsearch/vars/main.yml +++ b/roles/openshift_logging_elasticsearch/vars/main.yml @@ -1,6 +1,6 @@ --- -__latest_es_version: "3_8" -__allowed_es_versions: ["3_5", "3_6", "3_7", "3_8"] +__latest_es_version: "3_9" +__allowed_es_versions: ["3_5", "3_6", "3_7", "3_8", "3_9"] __allowed_es_types: ["data-master", "data-client", "master", "client"] __es_log_appenders: ['file', 'console'] __kibana_index_modes: ["unique", "shared_ops"] diff --git a/roles/openshift_logging_fluentd/vars/main.yml b/roles/openshift_logging_fluentd/vars/main.yml index 762e3d4d0..b60da814f 100644 --- a/roles/openshift_logging_fluentd/vars/main.yml +++ b/roles/openshift_logging_fluentd/vars/main.yml @@ -1,5 +1,5 @@ --- -__latest_fluentd_version: "3_8" -__allowed_fluentd_versions: ["3_5", "3_6", "3_7", "3_8"] +__latest_fluentd_version: "3_9" +__allowed_fluentd_versions: ["3_5", "3_6", "3_7", "3_8", "3_9"] __allowed_fluentd_types: ["hosted", "secure-aggregator", "secure-host"] __allowed_mux_client_modes: ["minimal", "maximal"] diff --git a/roles/openshift_logging_kibana/vars/main.yml b/roles/openshift_logging_kibana/vars/main.yml index a2c54d8e4..fed926a3b 100644 --- a/roles/openshift_logging_kibana/vars/main.yml +++ b/roles/openshift_logging_kibana/vars/main.yml @@ -1,3 +1,3 @@ --- -__latest_kibana_version: "3_8" -__allowed_kibana_versions: ["3_5", "3_6", "3_7", "3_8"] +__latest_kibana_version: "3_9" +__allowed_kibana_versions: ["3_5", "3_6", "3_7", "3_8", "3_9"] diff --git a/roles/openshift_logging_mux/vars/main.yml b/roles/openshift_logging_mux/vars/main.yml index 1da053b4a..e87205bad 100644 --- a/roles/openshift_logging_mux/vars/main.yml +++ b/roles/openshift_logging_mux/vars/main.yml @@ -1,3 +1,3 @@ --- -__latest_mux_version: "3_8" -__allowed_mux_versions: ["3_5", "3_6", "3_7", "3_8"] +__latest_mux_version: "3_9" +__allowed_mux_versions: ["3_5", "3_6", "3_7", "3_8", "3_9"] -- cgit v1.2.3 From 3b07acdcd41e215dedc4d4c7c7303b807e59333d Mon Sep 17 00:00:00 2001 From: Michael Gugino Date: Tue, 9 Jan 2018 14:11:16 -0500 Subject: Remove become statements This commit removes become:no statements that break the installer in various ways. --- .../openshift-cluster/upgrades/create_service_signer_cert.yml | 2 -- .../common/openshift-cluster/upgrades/upgrade_control_plane.yml | 2 -- playbooks/init/evaluate_groups.yml | 1 - playbooks/openshift-etcd/private/embedded2external.yml | 2 -- playbooks/openshift-etcd/private/migrate.yml | 2 -- playbooks/openshift-etcd/private/redeploy-ca.yml | 2 -- playbooks/openshift-etcd/private/upgrade_backup.yml | 1 - playbooks/openshift-master/private/redeploy-openshift-ca.yml | 2 -- playbooks/openshift-master/private/tasks/restart_hosts.yml | 1 - playbooks/openshift-master/private/validate_restart.yml | 2 -- playbooks/openshift-master/scaleup.yml | 1 - playbooks/openshift-node/private/setup.yml | 1 - playbooks/openshift-node/scaleup.yml | 1 - .../openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py | 1 - roles/openshift_certificate_expiry/tasks/main.yml | 2 -- roles/openshift_expand_partition/README.md | 2 -- roles/openshift_logging/tasks/generate_jks.yaml | 6 ------ roles/openshift_logging/tasks/main.yaml | 2 -- roles/openshift_logging_fluentd/tasks/label_and_wait.yaml | 1 - roles/openshift_master_certificates/tasks/main.yml | 2 -- roles/openshift_named_certificates/tasks/main.yml | 1 - roles/openshift_storage_nfs_lvm/README.md | 8 +++----- roles/openshift_web_console/tasks/install.yml | 2 -- roles/openshift_web_console/tasks/update_asset_config.yml | 2 -- roles/template_service_broker/tasks/install.yml | 2 -- roles/template_service_broker/tasks/remove.yml | 2 -- 26 files changed, 3 insertions(+), 50 deletions(-) (limited to 'roles/openshift_logging_fluentd') diff --git a/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml b/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml index 372a39e74..00a731cb0 100644 --- a/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml +++ b/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml @@ -2,7 +2,6 @@ - name: Create local temp directory for syncing certs hosts: localhost connection: local - become: no gather_facts: no tasks: - name: Create local temp directory for syncing certs @@ -65,7 +64,6 @@ - name: Delete local temp directory hosts: localhost connection: local - become: no gather_facts: no tasks: - name: Delete local temp directory diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index 50be0dee0..412075d41 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -108,7 +108,6 @@ - name: Gate on master update hosts: localhost connection: local - become: no tasks: - set_fact: master_update_completed: "{{ hostvars @@ -242,7 +241,6 @@ - name: Gate on reconcile hosts: localhost connection: local - become: no tasks: - set_fact: reconcile_completed: "{{ hostvars diff --git a/playbooks/init/evaluate_groups.yml b/playbooks/init/evaluate_groups.yml index 8087f6ffc..c4cd226c9 100644 --- a/playbooks/init/evaluate_groups.yml +++ b/playbooks/init/evaluate_groups.yml @@ -2,7 +2,6 @@ - name: Populate config host groups hosts: localhost connection: local - become: no gather_facts: no tasks: - name: Load group name mapping variables diff --git a/playbooks/openshift-etcd/private/embedded2external.yml b/playbooks/openshift-etcd/private/embedded2external.yml index b71eaacd0..917cfc800 100644 --- a/playbooks/openshift-etcd/private/embedded2external.yml +++ b/playbooks/openshift-etcd/private/embedded2external.yml @@ -89,7 +89,6 @@ local_action: command mktemp -d /tmp/etcd_backup-XXXXXXX register: g_etcd_client_mktemp changed_when: False - become: no - import_role: name: etcd @@ -116,7 +115,6 @@ - name: Delete temporary directory local_action: file path="{{ g_etcd_client_mktemp.stdout }}" state=absent changed_when: False - become: no # 7. force new cluster from the backup - name: Force new etcd cluster diff --git a/playbooks/openshift-etcd/private/migrate.yml b/playbooks/openshift-etcd/private/migrate.yml index 0a2ac7f1a..3f8b44032 100644 --- a/playbooks/openshift-etcd/private/migrate.yml +++ b/playbooks/openshift-etcd/private/migrate.yml @@ -2,7 +2,6 @@ - name: Check if the master has embedded etcd hosts: localhost connection: local - become: no gather_facts: no tags: - always @@ -53,7 +52,6 @@ - name: Gate on etcd backup hosts: localhost connection: local - become: no tasks: - set_fact: etcd_backup_completed: "{{ hostvars diff --git a/playbooks/openshift-etcd/private/redeploy-ca.yml b/playbooks/openshift-etcd/private/redeploy-ca.yml index 7b0d99255..e6dd87de1 100644 --- a/playbooks/openshift-etcd/private/redeploy-ca.yml +++ b/playbooks/openshift-etcd/private/redeploy-ca.yml @@ -26,7 +26,6 @@ - name: Create temp directory for syncing certs hosts: localhost connection: local - become: no gather_facts: no tasks: - name: Create local temp directory for syncing certs @@ -74,7 +73,6 @@ - name: Delete temporary directory on localhost hosts: localhost connection: local - become: no gather_facts: no tasks: - file: diff --git a/playbooks/openshift-etcd/private/upgrade_backup.yml b/playbooks/openshift-etcd/private/upgrade_backup.yml index 97b6edba5..081c024fc 100644 --- a/playbooks/openshift-etcd/private/upgrade_backup.yml +++ b/playbooks/openshift-etcd/private/upgrade_backup.yml @@ -14,7 +14,6 @@ - name: Gate on etcd backup hosts: localhost connection: local - become: no tasks: - set_fact: etcd_backup_completed: "{{ hostvars diff --git a/playbooks/openshift-master/private/redeploy-openshift-ca.yml b/playbooks/openshift-master/private/redeploy-openshift-ca.yml index 9d3c12ba1..f649af976 100644 --- a/playbooks/openshift-master/private/redeploy-openshift-ca.yml +++ b/playbooks/openshift-master/private/redeploy-openshift-ca.yml @@ -125,7 +125,6 @@ - name: Create temp directory for syncing certs hosts: localhost connection: local - become: no gather_facts: no tasks: - name: Create local temp directory for syncing certs @@ -264,7 +263,6 @@ - name: Delete temporary directory on localhost hosts: localhost connection: local - become: no gather_facts: no tasks: - file: diff --git a/playbooks/openshift-master/private/tasks/restart_hosts.yml b/playbooks/openshift-master/private/tasks/restart_hosts.yml index a5dbe0590..76e1ea5f3 100644 --- a/playbooks/openshift-master/private/tasks/restart_hosts.yml +++ b/playbooks/openshift-master/private/tasks/restart_hosts.yml @@ -27,7 +27,6 @@ delay=10 timeout=600 port="{{ ansible_port | default(ansible_ssh_port | default(22,boolean=True),boolean=True) }}" - become: no # Now that ssh is back up we can wait for API on the remote system, # avoiding some potential connection issues from local system: diff --git a/playbooks/openshift-master/private/validate_restart.yml b/playbooks/openshift-master/private/validate_restart.yml index 1077d0b9c..60b0e5bb6 100644 --- a/playbooks/openshift-master/private/validate_restart.yml +++ b/playbooks/openshift-master/private/validate_restart.yml @@ -21,7 +21,6 @@ - name: Create temp file on localhost hosts: localhost connection: local - become: no gather_facts: no tasks: - local_action: command mktemp @@ -38,7 +37,6 @@ - name: Cleanup temp file on localhost hosts: localhost connection: local - become: no gather_facts: no tasks: - file: path="{{ hostvars.localhost.mktemp.stdout }}" state=absent diff --git a/playbooks/openshift-master/scaleup.yml b/playbooks/openshift-master/scaleup.yml index f717cd0e9..7d31340a2 100644 --- a/playbooks/openshift-master/scaleup.yml +++ b/playbooks/openshift-master/scaleup.yml @@ -4,7 +4,6 @@ - name: Ensure there are new_masters or new_nodes hosts: localhost connection: local - become: no gather_facts: no tasks: - fail: diff --git a/playbooks/openshift-node/private/setup.yml b/playbooks/openshift-node/private/setup.yml index 802dce37e..41c323f2b 100644 --- a/playbooks/openshift-node/private/setup.yml +++ b/playbooks/openshift-node/private/setup.yml @@ -8,7 +8,6 @@ - name: Evaluate node groups hosts: localhost - become: no connection: local tasks: - name: Evaluate oo_containerized_master_nodes diff --git a/playbooks/openshift-node/scaleup.yml b/playbooks/openshift-node/scaleup.yml index bdfd3d3e6..cf13692ae 100644 --- a/playbooks/openshift-node/scaleup.yml +++ b/playbooks/openshift-node/scaleup.yml @@ -4,7 +4,6 @@ - name: Ensure there are new_nodes hosts: localhost connection: local - become: no gather_facts: no tasks: - fail: diff --git a/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py b/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py index a2bc9ecdb..58b228fee 100644 --- a/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py +++ b/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py @@ -31,7 +31,6 @@ certificates Example playbook usage: - name: Generate expiration results JSON - become: no run_once: yes delegate_to: localhost when: openshift_certificate_expiry_save_json_results|bool diff --git a/roles/openshift_certificate_expiry/tasks/main.yml b/roles/openshift_certificate_expiry/tasks/main.yml index b5234bd1e..8dea2c07f 100644 --- a/roles/openshift_certificate_expiry/tasks/main.yml +++ b/roles/openshift_certificate_expiry/tasks/main.yml @@ -7,7 +7,6 @@ register: check_results - name: Generate expiration report HTML - become: no run_once: yes template: src: cert-expiry-table.html.j2 @@ -21,7 +20,6 @@ when: openshift_certificate_expiry_save_json_results|bool - name: Generate results JSON file - become: no run_once: yes template: src: save_json_results.j2 diff --git a/roles/openshift_expand_partition/README.md b/roles/openshift_expand_partition/README.md index c9c7b378c..402c3dc3e 100644 --- a/roles/openshift_expand_partition/README.md +++ b/roles/openshift_expand_partition/README.md @@ -45,7 +45,6 @@ space on /dev/xvda, and the file system will be expanded to fill the new partition space. - hosts: mynodes - become: no remote_user: root gather_facts: no roles: @@ -68,7 +67,6 @@ partition space. * Create an ansible playbook, say `expandvar.yaml`: ``` - hosts: mynodes - become: no remote_user: root gather_facts: no roles: diff --git a/roles/openshift_logging/tasks/generate_jks.yaml b/roles/openshift_logging/tasks/generate_jks.yaml index d6ac88dcc..6e3204589 100644 --- a/roles/openshift_logging/tasks/generate_jks.yaml +++ b/roles/openshift_logging/tasks/generate_jks.yaml @@ -24,25 +24,21 @@ local_action: file path="{{local_tmp.stdout}}/elasticsearch.jks" state=touch mode="u=rw,g=r,o=r" when: elasticsearch_jks.stat.exists changed_when: False - become: no - name: Create placeholder for previously created JKS certs to prevent recreating... local_action: file path="{{local_tmp.stdout}}/logging-es.jks" state=touch mode="u=rw,g=r,o=r" when: logging_es_jks.stat.exists changed_when: False - become: no - name: Create placeholder for previously created JKS certs to prevent recreating... local_action: file path="{{local_tmp.stdout}}/system.admin.jks" state=touch mode="u=rw,g=r,o=r" when: system_admin_jks.stat.exists changed_when: False - become: no - name: Create placeholder for previously created JKS certs to prevent recreating... local_action: file path="{{local_tmp.stdout}}/truststore.jks" state=touch mode="u=rw,g=r,o=r" when: truststore_jks.stat.exists changed_when: False - become: no - name: pulling down signing items from host fetch: @@ -61,12 +57,10 @@ vars: - top_dir: "{{local_tmp.stdout}}" when: not elasticsearch_jks.stat.exists or not logging_es_jks.stat.exists or not system_admin_jks.stat.exists or not truststore_jks.stat.exists - become: no - name: Run JKS generation script local_action: script generate-jks.sh {{local_tmp.stdout}} {{openshift_logging_namespace}} check_mode: no - become: no when: not elasticsearch_jks.stat.exists or not logging_es_jks.stat.exists or not system_admin_jks.stat.exists or not truststore_jks.stat.exists - name: Pushing locally generated JKS certs to remote host... diff --git a/roles/openshift_logging/tasks/main.yaml b/roles/openshift_logging/tasks/main.yaml index 9949bb95d..1d8f2c53a 100644 --- a/roles/openshift_logging/tasks/main.yaml +++ b/roles/openshift_logging/tasks/main.yaml @@ -17,7 +17,6 @@ register: local_tmp changed_when: False check_mode: no - become: no - include_tasks: install_logging.yaml when: @@ -31,4 +30,3 @@ local_action: file path="{{local_tmp.stdout}}" state=absent tags: logging_cleanup changed_when: False - become: no diff --git a/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml b/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml index 1cef6c25e..2721438f0 100644 --- a/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml +++ b/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml @@ -8,4 +8,3 @@ # wait half a second between labels - local_action: command sleep {{ openshift_logging_fluentd_label_delay | default('.5') }} - become: no diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml index 00cabe574..a80950cde 100644 --- a/roles/openshift_master_certificates/tasks/main.yml +++ b/roles/openshift_master_certificates/tasks/main.yml @@ -120,7 +120,6 @@ register: g_master_certs_mktemp changed_when: False when: master_certs_missing | bool - become: no - name: Create a tarball of the master certs command: > @@ -157,7 +156,6 @@ local_action: file path="{{ g_master_certs_mktemp.stdout }}" state=absent changed_when: False when: master_certs_missing | bool - become: no - name: Lookup default group for ansible_ssh_user command: "/usr/bin/id -g {{ ansible_ssh_user | quote }}" diff --git a/roles/openshift_named_certificates/tasks/main.yml b/roles/openshift_named_certificates/tasks/main.yml index ad5472445..021fa8385 100644 --- a/roles/openshift_named_certificates/tasks/main.yml +++ b/roles/openshift_named_certificates/tasks/main.yml @@ -3,7 +3,6 @@ parsed_named_certificates: "{{ named_certificates | lib_utils_oo_parse_named_certificates(named_certs_dir, internal_hostnames) }}" when: named_certificates | length > 0 delegate_to: localhost - become: no run_once: true - openshift_facts: diff --git a/roles/openshift_storage_nfs_lvm/README.md b/roles/openshift_storage_nfs_lvm/README.md index cc674d3fd..a11219f6d 100644 --- a/roles/openshift_storage_nfs_lvm/README.md +++ b/roles/openshift_storage_nfs_lvm/README.md @@ -1,7 +1,7 @@ # openshift_storage_nfs_lvm This role is useful to create and export nfs disks for openshift persistent volumes. -It does so by creating lvm partitions on an already setup pv/vg, creating xfs +It does so by creating lvm partitions on an already setup pv/vg, creating xfs filesystem on each partition, mounting the partitions, exporting the mounts via NFS and creating a json file for each mount that an openshift master can use to create persistent volumes. @@ -20,7 +20,7 @@ create persistent volumes. osnl_nfs_export_options: "*(rw,sync,all_squash)" # Directory, where the created partitions should be mounted. They will be -# mounted as / +# mounted as / osnl_mount_dir: /exports/openshift # Volume Group to use. @@ -64,11 +64,10 @@ None ## Example Playbook With this playbook, 2 5Gig lvm partitions are created, named stg5g0003 and stg5g0004 -Both of them are mounted into `/exports/openshift` directory. Both directories are +Both of them are mounted into `/exports/openshift` directory. Both directories are exported via NFS. json files are created in /root. - hosts: nfsservers - become: no remote_user: root gather_facts: no roles: @@ -94,7 +93,6 @@ exported via NFS. json files are created in /root. * Create an ansible playbook, say `setupnfs.yaml`: ``` - hosts: nfsservers - become: no remote_user: root gather_facts: no roles: diff --git a/roles/openshift_web_console/tasks/install.yml b/roles/openshift_web_console/tasks/install.yml index 8120c13e3..8ee95e36b 100644 --- a/roles/openshift_web_console/tasks/install.yml +++ b/roles/openshift_web_console/tasks/install.yml @@ -23,7 +23,6 @@ command: mktemp -d /tmp/console-ansible-XXXXXX register: mktemp changed_when: False - become: no - name: Copy asset config template to temp directory copy: @@ -76,4 +75,3 @@ state: absent name: "{{ mktemp.stdout }}" changed_when: False - become: no diff --git a/roles/openshift_web_console/tasks/update_asset_config.yml b/roles/openshift_web_console/tasks/update_asset_config.yml index 36e37e35d..0992b32e1 100644 --- a/roles/openshift_web_console/tasks/update_asset_config.yml +++ b/roles/openshift_web_console/tasks/update_asset_config.yml @@ -30,7 +30,6 @@ command: mktemp -d /tmp/console-ansible-XXXXXX register: mktemp changed_when: False - become: no - name: Copy asset config to temp file copy: @@ -55,7 +54,6 @@ state: absent name: "{{ mktemp.stdout }}" changed_when: False - become: no # There's currently no command to trigger a rollout for a k8s deployment # without changing the pod spec. Add an annotation to force a rollout after diff --git a/roles/template_service_broker/tasks/install.yml b/roles/template_service_broker/tasks/install.yml index 765263db5..604e94602 100644 --- a/roles/template_service_broker/tasks/install.yml +++ b/roles/template_service_broker/tasks/install.yml @@ -21,7 +21,6 @@ - command: mktemp -d /tmp/tsb-ansible-XXXXXX register: mktemp changed_when: False - become: no - copy: src: "{{ __tsb_files_location }}/{{ item }}" @@ -86,4 +85,3 @@ state: absent name: "{{ mktemp.stdout }}" changed_when: False - become: no diff --git a/roles/template_service_broker/tasks/remove.yml b/roles/template_service_broker/tasks/remove.yml index 8b4d798db..db1b558e4 100644 --- a/roles/template_service_broker/tasks/remove.yml +++ b/roles/template_service_broker/tasks/remove.yml @@ -2,7 +2,6 @@ - command: mktemp -d /tmp/tsb-ansible-XXXXXX register: mktemp changed_when: False - become: no - copy: src: "{{ __tsb_files_location }}/{{ item }}" @@ -32,4 +31,3 @@ state: absent name: "{{ mktemp.stdout }}" changed_when: False - become: no -- cgit v1.2.3 From d07d3d922cec9e88fe16d4392c738ec7e3e1f82e Mon Sep 17 00:00:00 2001 From: Vadim Rutkovsky Date: Wed, 10 Jan 2018 10:18:59 +0100 Subject: logging: fix jinja filters to support py3 Since py3 returns `dict_items` for dict.keys() call instead of a list, it should be converted into a list for compatibility Signed-off-by: Vadim Rutkovsky --- roles/openshift_logging/tasks/install_logging.yaml | 4 ++-- roles/openshift_logging_fluentd/tasks/main.yaml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'roles/openshift_logging_fluentd') diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml index 67904a9d3..ebd2d747b 100644 --- a/roles/openshift_logging/tasks/install_logging.yaml +++ b/roles/openshift_logging/tasks/install_logging.yaml @@ -94,7 +94,7 @@ _es_configmap: "{{ openshift_logging_facts | walk('elasticsearch#configmaps#logging-elasticsearch#elasticsearch.yml', '{}', delimiter='#') | from_yaml }}" with_together: - - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.values() }}" + - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.values() | list }}" - "{{ openshift_logging_facts.elasticsearch.pvcs }}" - "{{ es_indices }}" loop_control: @@ -169,7 +169,7 @@ _es_configmap: "{{ openshift_logging_facts | walk('elasticsearch_ops#configmaps#logging-elasticsearch-ops#elasticsearch.yml', '{}', delimiter='#') | from_yaml }}" with_together: - - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.values() }}" + - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.values() | list }}" - "{{ openshift_logging_facts.elasticsearch_ops.pvcs }}" - "{{ es_ops_indices }}" loop_control: diff --git a/roles/openshift_logging_fluentd/tasks/main.yaml b/roles/openshift_logging_fluentd/tasks/main.yaml index 529859983..79ebbca08 100644 --- a/roles/openshift_logging_fluentd/tasks/main.yaml +++ b/roles/openshift_logging_fluentd/tasks/main.yaml @@ -172,8 +172,8 @@ app_port: "{{ openshift_logging_fluentd_app_port }}" ops_host: "{{ openshift_logging_fluentd_ops_host }}" ops_port: "{{ openshift_logging_fluentd_ops_port }}" - fluentd_nodeselector_key: "{{ openshift_logging_fluentd_nodeselector.keys()[0] }}" - fluentd_nodeselector_value: "{{ openshift_logging_fluentd_nodeselector.values()[0] }}" + fluentd_nodeselector_key: "{{ openshift_logging_fluentd_nodeselector.keys() | first }}" + fluentd_nodeselector_value: "{{ openshift_logging_fluentd_nodeselector.values() | first }}" fluentd_cpu_limit: "{{ openshift_logging_fluentd_cpu_limit }}" fluentd_cpu_request: "{{ openshift_logging_fluentd_cpu_request | min_cpu(openshift_logging_fluentd_cpu_limit | default(none)) }}" fluentd_memory_limit: "{{ openshift_logging_fluentd_memory_limit }}" -- cgit v1.2.3 From d3fefc32a727fe3c13159c4e9fe4399f35b487a8 Mon Sep 17 00:00:00 2001 From: Michael Gugino Date: Thu, 4 Jan 2018 23:55:34 -0500 Subject: Move more plugins to lib_utils This commit continues moving plugins into lib_utils. This commit does not move any plugins for add-on roles such as logging and metrics. --- .../private/certificates-backup.yml | 1 + roles/etcd/library/delegated_serial_command.py | 274 ------- .../fetch_client_certificates_from_ca.yml | 1 + .../fetch_server_certificates_from_ca.yml | 2 + .../action_plugins/generate_pv_pvcs_list.py | 157 ++++ roles/lib_utils/filter_plugins/oo_cert_expiry.py | 66 ++ roles/lib_utils/filter_plugins/oo_filters.py | 9 + .../filter_plugins/openshift_aws_filters.py | 74 ++ .../filter_plugins/openshift_hosted_filters.py | 42 ++ roles/lib_utils/filter_plugins/openshift_master.py | 532 +++++++++++++ .../lib_utils/library/delegated_serial_command.py | 274 +++++++ roles/lib_utils/library/openshift_cert_expiry.py | 839 +++++++++++++++++++++ .../library/openshift_container_binary_sync.py | 205 +++++ .../openshift_master_facts_default_predicates.py | 143 ++++ .../openshift_master_facts_default_priorities.py | 117 +++ roles/lib_utils/test/conftest.py | 172 +++++ .../test/openshift_master_facts_bad_input_tests.py | 57 ++ .../test/openshift_master_facts_conftest.py | 54 ++ ...nshift_master_facts_default_predicates_tests.py | 193 +++++ ...nshift_master_facts_default_priorities_tests.py | 167 ++++ roles/lib_utils/test/test_fakeopensslclasses.py | 90 +++ roles/lib_utils/test/test_load_and_handle_cert.py | 67 ++ roles/openshift_aws/defaults/main.yml | 2 + .../filter_plugins/openshift_aws_filters.py | 74 -- roles/openshift_aws/tasks/build_node_group.yml | 1 + roles/openshift_aws/tasks/wait_for_groups.yml | 1 + .../filter_plugins/oo_cert_expiry.py | 66 -- .../library/openshift_cert_expiry.py | 839 --------------------- roles/openshift_certificate_expiry/tasks/main.yml | 4 +- .../openshift_certificate_expiry/test/conftest.py | 119 --- .../test/test_fakeopensslclasses.py | 90 --- .../test/test_load_and_handle_cert.py | 67 -- .../library/openshift_container_binary_sync.py | 205 ----- roles/openshift_cli/tasks/main.yml | 2 + .../openshift_checks/disk_availability.py | 2 +- .../filter_plugins/openshift_hosted_filters.py | 42 -- roles/openshift_hosted/tasks/router.yml | 1 + .../filter_plugins/openshift_logging.py | 9 - roles/openshift_logging_fluentd/defaults/main.yml | 1 + roles/openshift_logging_mux/defaults/main.yml | 1 + roles/openshift_master/tasks/main.yml | 1 + .../tasks/upgrade/upgrade_scheduler.yml | 2 + roles/openshift_master_certificates/tasks/main.yml | 1 + .../filter_plugins/openshift_master.py | 532 ------------- roles/openshift_master_facts/tasks/main.yml | 3 + roles/openshift_master_facts/test/conftest.py | 54 -- .../test/openshift_master_facts_bad_input_tests.py | 57 -- ...nshift_master_facts_default_predicates_tests.py | 193 ----- ...nshift_master_facts_default_priorities_tests.py | 167 ---- .../filter_plugins/openshift_named_certificates.py | 21 - .../action_plugins/generate_pv_pvcs_list.py | 157 ---- roles/openshift_persistent_volumes/tasks/main.yml | 3 +- .../filter_plugins/openshift_sanitize_inventory.py | 10 - .../filter_plugins/openshift_storage_glusterfs.py | 23 - .../tasks/glusterfs_config.yml | 1 + .../tasks/glusterfs_registry.yml | 1 + 56 files changed, 3286 insertions(+), 3002 deletions(-) delete mode 100755 roles/etcd/library/delegated_serial_command.py create mode 100644 roles/lib_utils/action_plugins/generate_pv_pvcs_list.py create mode 100644 roles/lib_utils/filter_plugins/oo_cert_expiry.py create mode 100644 roles/lib_utils/filter_plugins/openshift_aws_filters.py create mode 100644 roles/lib_utils/filter_plugins/openshift_hosted_filters.py create mode 100644 roles/lib_utils/filter_plugins/openshift_master.py create mode 100755 roles/lib_utils/library/delegated_serial_command.py create mode 100644 roles/lib_utils/library/openshift_cert_expiry.py create mode 100644 roles/lib_utils/library/openshift_container_binary_sync.py create mode 100644 roles/lib_utils/lookup_plugins/openshift_master_facts_default_predicates.py create mode 100644 roles/lib_utils/lookup_plugins/openshift_master_facts_default_priorities.py create mode 100644 roles/lib_utils/test/conftest.py create mode 100644 roles/lib_utils/test/openshift_master_facts_bad_input_tests.py create mode 100644 roles/lib_utils/test/openshift_master_facts_conftest.py create mode 100644 roles/lib_utils/test/openshift_master_facts_default_predicates_tests.py create mode 100644 roles/lib_utils/test/openshift_master_facts_default_priorities_tests.py create mode 100644 roles/lib_utils/test/test_fakeopensslclasses.py create mode 100644 roles/lib_utils/test/test_load_and_handle_cert.py delete mode 100644 roles/openshift_aws/filter_plugins/openshift_aws_filters.py delete mode 100644 roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py delete mode 100644 roles/openshift_certificate_expiry/library/openshift_cert_expiry.py delete mode 100644 roles/openshift_certificate_expiry/test/conftest.py delete mode 100644 roles/openshift_certificate_expiry/test/test_fakeopensslclasses.py delete mode 100644 roles/openshift_certificate_expiry/test/test_load_and_handle_cert.py delete mode 100644 roles/openshift_cli/library/openshift_container_binary_sync.py delete mode 100644 roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py delete mode 100644 roles/openshift_master_facts/filter_plugins/openshift_master.py delete mode 100644 roles/openshift_master_facts/test/conftest.py delete mode 100644 roles/openshift_master_facts/test/openshift_master_facts_bad_input_tests.py delete mode 100644 roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py delete mode 100644 roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py delete mode 100644 roles/openshift_named_certificates/filter_plugins/openshift_named_certificates.py delete mode 100644 roles/openshift_persistent_volumes/action_plugins/generate_pv_pvcs_list.py delete mode 100644 roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py (limited to 'roles/openshift_logging_fluentd') diff --git a/playbooks/openshift-master/private/certificates-backup.yml b/playbooks/openshift-master/private/certificates-backup.yml index 4dbc041b0..56af18ca7 100644 --- a/playbooks/openshift-master/private/certificates-backup.yml +++ b/playbooks/openshift-master/private/certificates-backup.yml @@ -28,6 +28,7 @@ path: "{{ openshift.common.config_base }}/master/{{ item }}" state: absent with_items: + # certificates_to_synchronize is a custom filter in lib_utils - "{{ hostvars[inventory_hostname] | certificates_to_synchronize(include_keys=false, include_ca=false) }}" - "etcd.server.crt" - "etcd.server.key" diff --git a/roles/etcd/library/delegated_serial_command.py b/roles/etcd/library/delegated_serial_command.py deleted file mode 100755 index 0cab1ca88..000000000 --- a/roles/etcd/library/delegated_serial_command.py +++ /dev/null @@ -1,274 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Michael DeHaan , and others -# (c) 2016, Andrew Butcher -# -# This module is derrived from the Ansible command module. -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - - -# pylint: disable=unused-wildcard-import,wildcard-import,unused-import,redefined-builtin - -''' delegated_serial_command ''' - -import datetime -import errno -import glob -import shlex -import os -import fcntl -import time - -DOCUMENTATION = ''' ---- -module: delegated_serial_command -short_description: Executes a command on a remote node -version_added: historical -description: - - The M(command) module takes the command name followed by a list - of space-delimited arguments. - - The given command will be executed on all selected nodes. It - will not be processed through the shell, so variables like - C($HOME) and operations like C("<"), C(">"), C("|"), and C("&") - will not work (use the M(shell) module if you need these - features). - - Creates and maintains a lockfile such that this module will - wait for other invocations to proceed. -options: - command: - description: - - the command to run - required: true - default: null - creates: - description: - - a filename or (since 2.0) glob pattern, when it already - exists, this step will B(not) be run. - required: no - default: null - removes: - description: - - a filename or (since 2.0) glob pattern, when it does not - exist, this step will B(not) be run. - version_added: "0.8" - required: no - default: null - chdir: - description: - - cd into this directory before running the command - version_added: "0.6" - required: false - default: null - executable: - description: - - change the shell used to execute the command. Should be an - absolute path to the executable. - required: false - default: null - version_added: "0.9" - warn: - version_added: "1.8" - default: yes - description: - - if command warnings are on in ansible.cfg, do not warn about - this particular line if set to no/false. - required: false - lockfile: - default: yes - description: - - the lockfile that will be created - timeout: - default: yes - description: - - time in milliseconds to wait to obtain the lock -notes: - - If you want to run a command through the shell (say you are using C(<), - C(>), C(|), etc), you actually want the M(shell) module instead. The - M(command) module is much more secure as it's not affected by the user's - environment. - - " C(creates), C(removes), and C(chdir) can be specified after - the command. For instance, if you only want to run a command if - a certain file does not exist, use this." -author: - - Ansible Core Team - - Michael DeHaan - - Andrew Butcher -''' - -EXAMPLES = ''' -# Example from Ansible Playbooks. -- delegated_serial_command: - command: /sbin/shutdown -t now - -# Run the command if the specified file does not exist. -- delegated_serial_command: - command: /usr/bin/make_database.sh arg1 arg2 - creates: /path/to/database -''' - -# Dict of options and their defaults -OPTIONS = {'chdir': None, - 'creates': None, - 'command': None, - 'executable': None, - 'NO_LOG': None, - 'removes': None, - 'warn': True, - 'lockfile': None, - 'timeout': None} - - -def check_command(commandline): - ''' Check provided command ''' - arguments = {'chown': 'owner', 'chmod': 'mode', 'chgrp': 'group', - 'ln': 'state=link', 'mkdir': 'state=directory', - 'rmdir': 'state=absent', 'rm': 'state=absent', 'touch': 'state=touch'} - commands = {'git': 'git', 'hg': 'hg', 'curl': 'get_url or uri', 'wget': 'get_url or uri', - 'svn': 'subversion', 'service': 'service', - 'mount': 'mount', 'rpm': 'yum, dnf or zypper', 'yum': 'yum', 'apt-get': 'apt', - 'tar': 'unarchive', 'unzip': 'unarchive', 'sed': 'template or lineinfile', - 'rsync': 'synchronize', 'dnf': 'dnf', 'zypper': 'zypper'} - become = ['sudo', 'su', 'pbrun', 'pfexec', 'runas'] - warnings = list() - command = os.path.basename(commandline.split()[0]) - # pylint: disable=line-too-long - if command in arguments: - warnings.append("Consider using file module with {0} rather than running {1}".format(arguments[command], command)) - if command in commands: - warnings.append("Consider using {0} module rather than running {1}".format(commands[command], command)) - if command in become: - warnings.append( - "Consider using 'become', 'become_method', and 'become_user' rather than running {0}".format(command,)) - return warnings - - -# pylint: disable=too-many-statements,too-many-branches,too-many-locals -def main(): - ''' Main module function ''' - module = AnsibleModule( # noqa: F405 - argument_spec=dict( - _uses_shell=dict(type='bool', default=False), - command=dict(required=True), - chdir=dict(), - executable=dict(), - creates=dict(), - removes=dict(), - warn=dict(type='bool', default=True), - lockfile=dict(default='/tmp/delegated_serial_command.lock'), - timeout=dict(type='int', default=30) - ) - ) - - shell = module.params['_uses_shell'] - chdir = module.params['chdir'] - executable = module.params['executable'] - command = module.params['command'] - creates = module.params['creates'] - removes = module.params['removes'] - warn = module.params['warn'] - lockfile = module.params['lockfile'] - timeout = module.params['timeout'] - - if command.strip() == '': - module.fail_json(rc=256, msg="no command given") - - iterated = 0 - lockfd = open(lockfile, 'w+') - while iterated < timeout: - try: - fcntl.flock(lockfd, fcntl.LOCK_EX | fcntl.LOCK_NB) - break - # pylint: disable=invalid-name - except IOError as e: - if e.errno != errno.EAGAIN: - module.fail_json(msg="I/O Error {0}: {1}".format(e.errno, e.strerror)) - else: - iterated += 1 - time.sleep(0.1) - - if chdir: - chdir = os.path.abspath(os.path.expanduser(chdir)) - os.chdir(chdir) - - if creates: - # do not run the command if the line contains creates=filename - # and the filename already exists. This allows idempotence - # of command executions. - path = os.path.expanduser(creates) - if glob.glob(path): - module.exit_json( - cmd=command, - stdout="skipped, since %s exists" % path, - changed=False, - stderr=False, - rc=0 - ) - - if removes: - # do not run the command if the line contains removes=filename - # and the filename does not exist. This allows idempotence - # of command executions. - path = os.path.expanduser(removes) - if not glob.glob(path): - module.exit_json( - cmd=command, - stdout="skipped, since %s does not exist" % path, - changed=False, - stderr=False, - rc=0 - ) - - warnings = list() - if warn: - warnings = check_command(command) - - if not shell: - command = shlex.split(command) - startd = datetime.datetime.now() - - # pylint: disable=invalid-name - rc, out, err = module.run_command(command, executable=executable, use_unsafe_shell=shell) - - fcntl.flock(lockfd, fcntl.LOCK_UN) - lockfd.close() - - endd = datetime.datetime.now() - delta = endd - startd - - if out is None: - out = '' - if err is None: - err = '' - - module.exit_json( - cmd=command, - stdout=out.rstrip("\r\n"), - stderr=err.rstrip("\r\n"), - rc=rc, - start=str(startd), - end=str(endd), - delta=str(delta), - changed=True, - warnings=warnings, - iterated=iterated - ) - - -# import module snippets -# pylint: disable=wrong-import-position -from ansible.module_utils.basic import * # noqa: F402,F403 - -main() diff --git a/roles/etcd/tasks/certificates/fetch_client_certificates_from_ca.yml b/roles/etcd/tasks/certificates/fetch_client_certificates_from_ca.yml index 78578a055..ce295d2f5 100644 --- a/roles/etcd/tasks/certificates/fetch_client_certificates_from_ca.yml +++ b/roles/etcd/tasks/certificates/fetch_client_certificates_from_ca.yml @@ -57,6 +57,7 @@ # Certificates must be signed serially in order to avoid competing # for the serial file. +# delegated_serial_command is a custom module in lib_utils - name: Sign and create the client crt delegated_serial_command: command: > diff --git a/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml b/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml index 987380d0c..7c8b87d99 100644 --- a/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml +++ b/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml @@ -50,6 +50,7 @@ # Certificates must be signed serially in order to avoid competing # for the serial file. +# delegated_serial_command is a custom module in lib_utils - name: Sign and create the server crt delegated_serial_command: command: > @@ -83,6 +84,7 @@ # Certificates must be signed serially in order to avoid competing # for the serial file. +# delegated_serial_command is a custom module in lib_utils - name: Sign and create the peer crt delegated_serial_command: command: > diff --git a/roles/lib_utils/action_plugins/generate_pv_pvcs_list.py b/roles/lib_utils/action_plugins/generate_pv_pvcs_list.py new file mode 100644 index 000000000..eb13a58ba --- /dev/null +++ b/roles/lib_utils/action_plugins/generate_pv_pvcs_list.py @@ -0,0 +1,157 @@ +""" +Ansible action plugin to generate pv and pvc dictionaries lists +""" + +from ansible.plugins.action import ActionBase +from ansible import errors + + +class ActionModule(ActionBase): + """Action plugin to execute health checks.""" + + def get_templated(self, var_to_template): + """Return a properly templated ansible variable""" + return self._templar.template(self.task_vars.get(var_to_template)) + + def build_common(self, varname=None): + """Retrieve common variables for each pv and pvc type""" + volume = self.get_templated(str(varname) + '_volume_name') + size = self.get_templated(str(varname) + '_volume_size') + labels = self.task_vars.get(str(varname) + '_labels') + if labels: + labels = self._templar.template(labels) + else: + labels = dict() + access_modes = self.get_templated(str(varname) + '_access_modes') + return (volume, size, labels, access_modes) + + def build_pv_nfs(self, varname=None): + """Build pv dictionary for nfs storage type""" + host = self.task_vars.get(str(varname) + '_host') + if host: + self._templar.template(host) + elif host is None: + groups = self.task_vars.get('groups') + default_group_name = self.get_templated('openshift_persistent_volumes_default_nfs_group') + if groups and default_group_name and default_group_name in groups and len(groups[default_group_name]) > 0: + host = groups['oo_nfs_to_config'][0] + else: + raise errors.AnsibleModuleError("|failed no storage host detected") + volume, size, labels, access_modes = self.build_common(varname=varname) + directory = self.get_templated(str(varname) + '_nfs_directory') + path = directory + '/' + volume + return dict( + name="{0}-volume".format(volume), + capacity=size, + labels=labels, + access_modes=access_modes, + storage=dict( + nfs=dict( + server=host, + path=path))) + + def build_pv_openstack(self, varname=None): + """Build pv dictionary for openstack storage type""" + volume, size, labels, access_modes = self.build_common(varname=varname) + filesystem = self.get_templated(str(varname) + '_openstack_filesystem') + volume_id = self.get_templated(str(varname) + '_openstack_volumeID') + return dict( + name="{0}-volume".format(volume), + capacity=size, + labels=labels, + access_modes=access_modes, + storage=dict( + cinder=dict( + fsType=filesystem, + volumeID=volume_id))) + + def build_pv_glusterfs(self, varname=None): + """Build pv dictionary for glusterfs storage type""" + volume, size, labels, access_modes = self.build_common(varname=varname) + endpoints = self.get_templated(str(varname) + '_glusterfs_endpoints') + path = self.get_templated(str(varname) + '_glusterfs_path') + read_only = self.get_templated(str(varname) + '_glusterfs_readOnly') + return dict( + name="{0}-volume".format(volume), + capacity=size, + labels=labels, + access_modes=access_modes, + storage=dict( + glusterfs=dict( + endpoints=endpoints, + path=path, + readOnly=read_only))) + + def build_pv_dict(self, varname=None): + """Check for the existence of PV variables""" + kind = self.task_vars.get(str(varname) + '_kind') + if kind: + kind = self._templar.template(kind) + create_pv = self.task_vars.get(str(varname) + '_create_pv') + if create_pv and self._templar.template(create_pv): + if kind == 'nfs': + return self.build_pv_nfs(varname=varname) + + elif kind == 'openstack': + return self.build_pv_openstack(varname=varname) + + elif kind == 'glusterfs': + return self.build_pv_glusterfs(varname=varname) + + elif not (kind == 'object' or kind == 'dynamic'): + msg = "|failed invalid storage kind '{0}' for component '{1}'".format( + kind, + varname) + raise errors.AnsibleModuleError(msg) + return None + + def build_pvc_dict(self, varname=None): + """Check for the existence of PVC variables""" + kind = self.task_vars.get(str(varname) + '_kind') + if kind: + kind = self._templar.template(kind) + create_pv = self.task_vars.get(str(varname) + '_create_pv') + if create_pv: + create_pv = self._templar.template(create_pv) + create_pvc = self.task_vars.get(str(varname) + '_create_pvc') + if create_pvc: + create_pvc = self._templar.template(create_pvc) + if kind != 'object' and create_pv and create_pvc: + volume, size, _, access_modes = self.build_common(varname=varname) + return dict( + name="{0}-claim".format(volume), + capacity=size, + access_modes=access_modes) + return None + + def run(self, tmp=None, task_vars=None): + """Run generate_pv_pvcs_list action plugin""" + result = super(ActionModule, self).run(tmp, task_vars) + # Ignore settting self.task_vars outside of init. + # pylint: disable=W0201 + self.task_vars = task_vars or {} + + result["changed"] = False + result["failed"] = False + result["msg"] = "persistent_volumes list and persistent_volume_claims list created" + vars_to_check = ['openshift_hosted_registry_storage', + 'openshift_hosted_router_storage', + 'openshift_hosted_etcd_storage', + 'openshift_logging_storage', + 'openshift_loggingops_storage', + 'openshift_metrics_storage', + 'openshift_prometheus_storage', + 'openshift_prometheus_alertmanager_storage', + 'openshift_prometheus_alertbuffer_storage'] + persistent_volumes = [] + persistent_volume_claims = [] + for varname in vars_to_check: + pv_dict = self.build_pv_dict(varname) + if pv_dict: + persistent_volumes.append(pv_dict) + pvc_dict = self.build_pvc_dict(varname) + if pvc_dict: + persistent_volume_claims.append(pvc_dict) + result["persistent_volumes"] = persistent_volumes + result["persistent_volume_claims"] = persistent_volume_claims + return result diff --git a/roles/lib_utils/filter_plugins/oo_cert_expiry.py b/roles/lib_utils/filter_plugins/oo_cert_expiry.py new file mode 100644 index 000000000..58b228fee --- /dev/null +++ b/roles/lib_utils/filter_plugins/oo_cert_expiry.py @@ -0,0 +1,66 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +""" +Custom filters for use in openshift-ansible +""" + + +# Disabling too-many-public-methods, since filter methods are necessarily +# public +# pylint: disable=too-many-public-methods +class FilterModule(object): + """ Custom ansible filters """ + + @staticmethod + def oo_cert_expiry_results_to_json(hostvars, play_hosts): + """Takes results (`hostvars`) from the openshift_cert_expiry role +check and serializes them into proper machine-readable JSON +output. This filter parameter **MUST** be the playbook `hostvars` +variable. The `play_hosts` parameter is so we know what to loop over +when we're extrating the values. + +Returns: + +Results are collected into two top-level keys under the `json_results` +dict: + +* `json_results.data` [dict] - Each individual host check result, keys are hostnames +* `json_results.summary` [dict] - Summary of number of `warning` and `expired` +certificates + +Example playbook usage: + + - name: Generate expiration results JSON + run_once: yes + delegate_to: localhost + when: openshift_certificate_expiry_save_json_results|bool + copy: + content: "{{ hostvars|oo_cert_expiry_results_to_json() }}" + dest: "{{ openshift_certificate_expiry_json_results_path }}" + + """ + json_result = { + 'data': {}, + 'summary': {}, + } + + for host in play_hosts: + json_result['data'][host] = hostvars[host]['check_results']['check_results'] + + total_warnings = sum([hostvars[h]['check_results']['summary']['warning'] for h in play_hosts]) + total_expired = sum([hostvars[h]['check_results']['summary']['expired'] for h in play_hosts]) + total_ok = sum([hostvars[h]['check_results']['summary']['ok'] for h in play_hosts]) + total_total = sum([hostvars[h]['check_results']['summary']['total'] for h in play_hosts]) + + json_result['summary']['warning'] = total_warnings + json_result['summary']['expired'] = total_expired + json_result['summary']['ok'] = total_ok + json_result['summary']['total'] = total_total + + return json_result + + def filters(self): + """ returns a mapping of filters to methods """ + return { + "oo_cert_expiry_results_to_json": self.oo_cert_expiry_results_to_json, + } diff --git a/roles/lib_utils/filter_plugins/oo_filters.py b/roles/lib_utils/filter_plugins/oo_filters.py index a2ea287cf..fc14b5633 100644 --- a/roles/lib_utils/filter_plugins/oo_filters.py +++ b/roles/lib_utils/filter_plugins/oo_filters.py @@ -589,6 +589,14 @@ that result to this filter plugin. return secret_name +def map_from_pairs(source, delim="="): + ''' Returns a dict given the source and delim delimited ''' + if source == '': + return dict() + + return dict(item.split(delim) for item in source.split(",")) + + class FilterModule(object): """ Custom ansible filter mapping """ @@ -618,4 +626,5 @@ class FilterModule(object): "lib_utils_oo_contains_rule": lib_utils_oo_contains_rule, "lib_utils_oo_selector_to_string_list": lib_utils_oo_selector_to_string_list, "lib_utils_oo_filter_sa_secrets": lib_utils_oo_filter_sa_secrets, + "map_from_pairs": map_from_pairs } diff --git a/roles/lib_utils/filter_plugins/openshift_aws_filters.py b/roles/lib_utils/filter_plugins/openshift_aws_filters.py new file mode 100644 index 000000000..dfcb11da3 --- /dev/null +++ b/roles/lib_utils/filter_plugins/openshift_aws_filters.py @@ -0,0 +1,74 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +''' +Custom filters for use in openshift_aws +''' + +from ansible import errors + + +class FilterModule(object): + ''' Custom ansible filters for use by openshift_aws role''' + + @staticmethod + def scale_groups_serial(scale_group_info, upgrade=False): + ''' This function will determine what the deployment serial should be and return it + + Search through the tags and find the deployment_serial tag. Once found, + determine if an increment is needed during an upgrade. + if upgrade is true then increment the serial and return it + else return the serial + ''' + if scale_group_info == []: + return 1 + + scale_group_info = scale_group_info[0] + + if not isinstance(scale_group_info, dict): + raise errors.AnsibleFilterError("|filter plugin failed: Expected scale_group_info to be a dict") + + serial = None + + for tag in scale_group_info['tags']: + if tag['key'] == 'deployment_serial': + serial = int(tag['value']) + if upgrade: + serial += 1 + break + else: + raise errors.AnsibleFilterError("|filter plugin failed: deployment_serial tag was not found") + + return serial + + @staticmethod + def scale_groups_match_capacity(scale_group_info): + ''' This function will verify that the scale group instance count matches + the scale group desired capacity + + ''' + for scale_group in scale_group_info: + if scale_group['desired_capacity'] != len(scale_group['instances']): + return False + + return True + + @staticmethod + def build_instance_tags(clusterid): + ''' This function will return a dictionary of the instance tags. + + The main desire to have this inside of a filter_plugin is that we + need to build the following key. + + {"kubernetes.io/cluster/{{ openshift_aws_clusterid }}": "{{ openshift_aws_clusterid}}"} + + ''' + tags = {'clusterid': clusterid, + 'kubernetes.io/cluster/{}'.format(clusterid): clusterid} + + return tags + + def filters(self): + ''' returns a mapping of filters to methods ''' + return {'build_instance_tags': self.build_instance_tags, + 'scale_groups_match_capacity': self.scale_groups_match_capacity, + 'scale_groups_serial': self.scale_groups_serial} diff --git a/roles/lib_utils/filter_plugins/openshift_hosted_filters.py b/roles/lib_utils/filter_plugins/openshift_hosted_filters.py new file mode 100644 index 000000000..003ce5f9e --- /dev/null +++ b/roles/lib_utils/filter_plugins/openshift_hosted_filters.py @@ -0,0 +1,42 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +''' +Custom filters for use in openshift_hosted +''' + + +class FilterModule(object): + ''' Custom ansible filters for use by openshift_hosted role''' + + @staticmethod + def get_router_replicas(replicas=None, router_nodes=None): + ''' This function will return the number of replicas + based on the results from the defined + openshift_hosted_router_replicas OR + the query from oc_obj on openshift nodes with a selector OR + default to 1 + + ''' + # We always use what they've specified if they've specified a value + if replicas is not None: + return replicas + + replicas = 1 + + # Ignore boolean expression limit of 5. + # pylint: disable=too-many-boolean-expressions + if (isinstance(router_nodes, dict) and + 'results' in router_nodes and + 'results' in router_nodes['results'] and + isinstance(router_nodes['results']['results'], list) and + len(router_nodes['results']['results']) > 0 and + 'items' in router_nodes['results']['results'][0]): + + if len(router_nodes['results']['results'][0]['items']) > 0: + replicas = len(router_nodes['results']['results'][0]['items']) + + return replicas + + def filters(self): + ''' returns a mapping of filters to methods ''' + return {'get_router_replicas': self.get_router_replicas} diff --git a/roles/lib_utils/filter_plugins/openshift_master.py b/roles/lib_utils/filter_plugins/openshift_master.py new file mode 100644 index 000000000..ff15f693b --- /dev/null +++ b/roles/lib_utils/filter_plugins/openshift_master.py @@ -0,0 +1,532 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +''' +Custom filters for use in openshift-master +''' +import copy +import sys + +from ansible import errors +from ansible.parsing.yaml.dumper import AnsibleDumper +from ansible.plugins.filter.core import to_bool as ansible_bool + +# ansible.compat.six goes away with Ansible 2.4 +try: + from ansible.compat.six import string_types, u +except ImportError: + from ansible.module_utils.six import string_types, u + +import yaml + + +class IdentityProviderBase(object): + """ IdentityProviderBase + + Attributes: + name (str): Identity provider Name + login (bool): Is this identity provider a login provider? + challenge (bool): Is this identity provider a challenge provider? + provider (dict): Provider specific config + _idp (dict): internal copy of the IDP dict passed in + _required (list): List of lists of strings for required attributes + _optional (list): List of lists of strings for optional attributes + _allow_additional (bool): Does this provider support attributes + not in _required and _optional + + Args: + api_version(str): OpenShift config version + idp (dict): idp config dict + + Raises: + AnsibleFilterError: + """ + # disabling this check since the number of instance attributes are + # necessary for this class + # pylint: disable=too-many-instance-attributes + def __init__(self, api_version, idp): + if api_version not in ['v1']: + raise errors.AnsibleFilterError("|failed api version {0} unknown".format(api_version)) + + self._idp = copy.deepcopy(idp) + + if 'name' not in self._idp: + raise errors.AnsibleFilterError("|failed identity provider missing a name") + + if 'kind' not in self._idp: + raise errors.AnsibleFilterError("|failed identity provider missing a kind") + + self.name = self._idp.pop('name') + self.login = ansible_bool(self._idp.pop('login', False)) + self.challenge = ansible_bool(self._idp.pop('challenge', False)) + self.provider = dict(apiVersion=api_version, kind=self._idp.pop('kind')) + + mm_keys = ('mappingMethod', 'mapping_method') + mapping_method = None + for key in mm_keys: + if key in self._idp: + mapping_method = self._idp.pop(key) + if mapping_method is None: + mapping_method = self.get_default('mappingMethod') + self.mapping_method = mapping_method + + valid_mapping_methods = ['add', 'claim', 'generate', 'lookup'] + if self.mapping_method not in valid_mapping_methods: + raise errors.AnsibleFilterError("|failed unknown mapping method " + "for provider {0}".format(self.__class__.__name__)) + self._required = [] + self._optional = [] + self._allow_additional = True + + @staticmethod + def validate_idp_list(idp_list): + ''' validates a list of idps ''' + names = [x.name for x in idp_list] + if len(set(names)) != len(names): + raise errors.AnsibleFilterError("|failed more than one provider configured with the same name") + + for idp in idp_list: + idp.validate() + + def validate(self): + ''' validate an instance of this idp class ''' + pass + + @staticmethod + def get_default(key): + ''' get a default value for a given key ''' + if key == 'mappingMethod': + return 'claim' + else: + return None + + def set_provider_item(self, items, required=False): + ''' set a provider item based on the list of item names provided. ''' + for item in items: + provider_key = items[0] + if item in self._idp: + self.provider[provider_key] = self._idp.pop(item) + break + else: + default = self.get_default(provider_key) + if default is not None: + self.provider[provider_key] = default + elif required: + raise errors.AnsibleFilterError("|failed provider {0} missing " + "required key {1}".format(self.__class__.__name__, provider_key)) + + def set_provider_items(self): + ''' set the provider items for this idp ''' + for items in self._required: + self.set_provider_item(items, True) + for items in self._optional: + self.set_provider_item(items) + if self._allow_additional: + for key in self._idp.keys(): + self.set_provider_item([key]) + else: + if len(self._idp) > 0: + raise errors.AnsibleFilterError("|failed provider {0} " + "contains unknown keys " + "{1}".format(self.__class__.__name__, ', '.join(self._idp.keys()))) + + def to_dict(self): + ''' translate this idp to a dictionary ''' + return dict(name=self.name, challenge=self.challenge, + login=self.login, mappingMethod=self.mapping_method, + provider=self.provider) + + +class LDAPPasswordIdentityProvider(IdentityProviderBase): + """ LDAPPasswordIdentityProvider + + Attributes: + + Args: + api_version(str): OpenShift config version + idp (dict): idp config dict + + Raises: + AnsibleFilterError: + """ + def __init__(self, api_version, idp): + super(LDAPPasswordIdentityProvider, self).__init__(api_version, idp) + self._allow_additional = False + self._required += [['attributes'], ['url'], ['insecure']] + self._optional += [['ca'], + ['bindDN', 'bind_dn'], + ['bindPassword', 'bind_password']] + + self._idp['insecure'] = ansible_bool(self._idp.pop('insecure', False)) + + if 'attributes' in self._idp and 'preferred_username' in self._idp['attributes']: + pref_user = self._idp['attributes'].pop('preferred_username') + self._idp['attributes']['preferredUsername'] = pref_user + + def validate(self): + ''' validate this idp instance ''' + if not isinstance(self.provider['attributes'], dict): + raise errors.AnsibleFilterError("|failed attributes for provider " + "{0} must be a dictionary".format(self.__class__.__name__)) + + attrs = ['id', 'email', 'name', 'preferredUsername'] + for attr in attrs: + if attr in self.provider['attributes'] and not isinstance(self.provider['attributes'][attr], list): + raise errors.AnsibleFilterError("|failed {0} attribute for " + "provider {1} must be a list".format(attr, self.__class__.__name__)) + + unknown_attrs = set(self.provider['attributes'].keys()) - set(attrs) + if len(unknown_attrs) > 0: + raise errors.AnsibleFilterError("|failed provider {0} has unknown " + "attributes: {1}".format(self.__class__.__name__, ', '.join(unknown_attrs))) + + +class KeystonePasswordIdentityProvider(IdentityProviderBase): + """ KeystoneIdentityProvider + + Attributes: + + Args: + api_version(str): OpenShift config version + idp (dict): idp config dict + + Raises: + AnsibleFilterError: + """ + def __init__(self, api_version, idp): + super(KeystonePasswordIdentityProvider, self).__init__(api_version, idp) + self._allow_additional = False + self._required += [['url'], ['domainName', 'domain_name']] + self._optional += [['ca'], ['certFile', 'cert_file'], ['keyFile', 'key_file']] + + +class RequestHeaderIdentityProvider(IdentityProviderBase): + """ RequestHeaderIdentityProvider + + Attributes: + + Args: + api_version(str): OpenShift config version + idp (dict): idp config dict + + Raises: + AnsibleFilterError: + """ + def __init__(self, api_version, idp): + super(RequestHeaderIdentityProvider, self).__init__(api_version, idp) + self._allow_additional = False + self._required += [['headers']] + self._optional += [['challengeURL', 'challenge_url'], + ['loginURL', 'login_url'], + ['clientCA', 'client_ca'], + ['clientCommonNames', 'client_common_names'], + ['emailHeaders', 'email_headers'], + ['nameHeaders', 'name_headers'], + ['preferredUsernameHeaders', 'preferred_username_headers']] + + def validate(self): + ''' validate this idp instance ''' + if not isinstance(self.provider['headers'], list): + raise errors.AnsibleFilterError("|failed headers for provider {0} " + "must be a list".format(self.__class__.__name__)) + + +class AllowAllPasswordIdentityProvider(IdentityProviderBase): + """ AllowAllPasswordIdentityProvider + + Attributes: + + Args: + api_version(str): OpenShift config version + idp (dict): idp config dict + + Raises: + AnsibleFilterError: + """ + def __init__(self, api_version, idp): + super(AllowAllPasswordIdentityProvider, self).__init__(api_version, idp) + self._allow_additional = False + + +class DenyAllPasswordIdentityProvider(IdentityProviderBase): + """ DenyAllPasswordIdentityProvider + + Attributes: + + Args: + api_version(str): OpenShift config version + idp (dict): idp config dict + + Raises: + AnsibleFilterError: + """ + def __init__(self, api_version, idp): + super(DenyAllPasswordIdentityProvider, self).__init__(api_version, idp) + self._allow_additional = False + + +class HTPasswdPasswordIdentityProvider(IdentityProviderBase): + """ HTPasswdPasswordIdentity + + Attributes: + + Args: + api_version(str): OpenShift config version + idp (dict): idp config dict + + Raises: + AnsibleFilterError: + """ + def __init__(self, api_version, idp): + super(HTPasswdPasswordIdentityProvider, self).__init__(api_version, idp) + self._allow_additional = False + self._required += [['file', 'filename', 'fileName', 'file_name']] + + @staticmethod + def get_default(key): + if key == 'file': + return '/etc/origin/htpasswd' + else: + return IdentityProviderBase.get_default(key) + + +class BasicAuthPasswordIdentityProvider(IdentityProviderBase): + """ BasicAuthPasswordIdentityProvider + + Attributes: + + Args: + api_version(str): OpenShift config version + idp (dict): idp config dict + + Raises: + AnsibleFilterError: + """ + def __init__(self, api_version, idp): + super(BasicAuthPasswordIdentityProvider, self).__init__(api_version, idp) + self._allow_additional = False + self._required += [['url']] + self._optional += [['ca'], ['certFile', 'cert_file'], ['keyFile', 'key_file']] + + +class IdentityProviderOauthBase(IdentityProviderBase): + """ IdentityProviderOauthBase + + Attributes: + + Args: + api_version(str): OpenShift config version + idp (dict): idp config dict + + Raises: + AnsibleFilterError: + """ + def __init__(self, api_version, idp): + super(IdentityProviderOauthBase, self).__init__(api_version, idp) + self._allow_additional = False + self._required += [['clientID', 'client_id'], ['clientSecret', 'client_secret']] + + def validate(self): + ''' validate an instance of this idp class ''' + pass + + +class OpenIDIdentityProvider(IdentityProviderOauthBase): + """ OpenIDIdentityProvider + + Attributes: + + Args: + api_version(str): OpenShift config version + idp (dict): idp config dict + + Raises: + AnsibleFilterError: + """ + def __init__(self, api_version, idp): + IdentityProviderOauthBase.__init__(self, api_version, idp) + self._required += [['claims'], ['urls']] + self._optional += [['ca'], + ['extraScopes'], + ['extraAuthorizeParameters']] + if 'claims' in self._idp and 'preferred_username' in self._idp['claims']: + pref_user = self._idp['claims'].pop('preferred_username') + self._idp['claims']['preferredUsername'] = pref_user + if 'urls' in self._idp and 'user_info' in self._idp['urls']: + user_info = self._idp['urls'].pop('user_info') + self._idp['urls']['userInfo'] = user_info + if 'extra_scopes' in self._idp: + self._idp['extraScopes'] = self._idp.pop('extra_scopes') + if 'extra_authorize_parameters' in self._idp: + self._idp['extraAuthorizeParameters'] = self._idp.pop('extra_authorize_parameters') + + def validate(self): + ''' validate this idp instance ''' + if not isinstance(self.provider['claims'], dict): + raise errors.AnsibleFilterError("|failed claims for provider {0} " + "must be a dictionary".format(self.__class__.__name__)) + + for var, var_type in (('extraScopes', list), ('extraAuthorizeParameters', dict)): + if var in self.provider and not isinstance(self.provider[var], var_type): + raise errors.AnsibleFilterError("|failed {1} for provider " + "{0} must be a {2}".format(self.__class__.__name__, + var, + var_type.__class__.__name__)) + + required_claims = ['id'] + optional_claims = ['email', 'name', 'preferredUsername'] + all_claims = required_claims + optional_claims + + for claim in required_claims: + if claim in required_claims and claim not in self.provider['claims']: + raise errors.AnsibleFilterError("|failed {0} claim missing " + "for provider {1}".format(claim, self.__class__.__name__)) + + for claim in all_claims: + if claim in self.provider['claims'] and not isinstance(self.provider['claims'][claim], list): + raise errors.AnsibleFilterError("|failed {0} claims for " + "provider {1} must be a list".format(claim, self.__class__.__name__)) + + unknown_claims = set(self.provider['claims'].keys()) - set(all_claims) + if len(unknown_claims) > 0: + raise errors.AnsibleFilterError("|failed provider {0} has unknown " + "claims: {1}".format(self.__class__.__name__, ', '.join(unknown_claims))) + + if not isinstance(self.provider['urls'], dict): + raise errors.AnsibleFilterError("|failed urls for provider {0} " + "must be a dictionary".format(self.__class__.__name__)) + + required_urls = ['authorize', 'token'] + optional_urls = ['userInfo'] + all_urls = required_urls + optional_urls + + for url in required_urls: + if url not in self.provider['urls']: + raise errors.AnsibleFilterError("|failed {0} url missing for " + "provider {1}".format(url, self.__class__.__name__)) + + unknown_urls = set(self.provider['urls'].keys()) - set(all_urls) + if len(unknown_urls) > 0: + raise errors.AnsibleFilterError("|failed provider {0} has unknown " + "urls: {1}".format(self.__class__.__name__, ', '.join(unknown_urls))) + + +class GoogleIdentityProvider(IdentityProviderOauthBase): + """ GoogleIdentityProvider + + Attributes: + + Args: + api_version(str): OpenShift config version + idp (dict): idp config dict + + Raises: + AnsibleFilterError: + """ + def __init__(self, api_version, idp): + IdentityProviderOauthBase.__init__(self, api_version, idp) + self._optional += [['hostedDomain', 'hosted_domain']] + + def validate(self): + ''' validate this idp instance ''' + if self.challenge: + raise errors.AnsibleFilterError("|failed provider {0} does not " + "allow challenge authentication".format(self.__class__.__name__)) + + +class GitHubIdentityProvider(IdentityProviderOauthBase): + """ GitHubIdentityProvider + + Attributes: + + Args: + api_version(str): OpenShift config version + idp (dict): idp config dict + + Raises: + AnsibleFilterError: + """ + def __init__(self, api_version, idp): + IdentityProviderOauthBase.__init__(self, api_version, idp) + self._optional += [['organizations'], + ['teams']] + + def validate(self): + ''' validate this idp instance ''' + if self.challenge: + raise errors.AnsibleFilterError("|failed provider {0} does not " + "allow challenge authentication".format(self.__class__.__name__)) + + +class FilterModule(object): + ''' Custom ansible filters for use by the openshift_master role''' + + @staticmethod + def translate_idps(idps, api_version): + ''' Translates a list of dictionaries into a valid identityProviders config ''' + idp_list = [] + + if not isinstance(idps, list): + raise errors.AnsibleFilterError("|failed expects to filter on a list of identity providers") + for idp in idps: + if not isinstance(idp, dict): + raise errors.AnsibleFilterError("|failed identity providers must be a list of dictionaries") + + cur_module = sys.modules[__name__] + idp_class = getattr(cur_module, idp['kind'], None) + idp_inst = idp_class(api_version, idp) if idp_class is not None else IdentityProviderBase(api_version, idp) + idp_inst.set_provider_items() + idp_list.append(idp_inst) + + IdentityProviderBase.validate_idp_list(idp_list) + return u(yaml.dump([idp.to_dict() for idp in idp_list], + allow_unicode=True, + default_flow_style=False, + width=float("inf"), + Dumper=AnsibleDumper)) + + @staticmethod + def certificates_to_synchronize(hostvars, include_keys=True, include_ca=True): + ''' Return certificates to synchronize based on facts. ''' + if not issubclass(type(hostvars), dict): + raise errors.AnsibleFilterError("|failed expects hostvars is a dict") + certs = ['admin.crt', + 'admin.key', + 'admin.kubeconfig', + 'master.kubelet-client.crt', + 'master.kubelet-client.key', + 'master.proxy-client.crt', + 'master.proxy-client.key', + 'service-signer.crt', + 'service-signer.key'] + if bool(include_ca): + certs += ['ca.crt', 'ca.key', 'ca-bundle.crt', 'client-ca-bundle.crt'] + if bool(include_keys): + certs += ['serviceaccounts.private.key', + 'serviceaccounts.public.key'] + return certs + + @staticmethod + def oo_htpasswd_users_from_file(file_contents): + ''' return a dictionary of htpasswd users from htpasswd file contents ''' + htpasswd_entries = {} + if not isinstance(file_contents, string_types): + raise errors.AnsibleFilterError("failed, expects to filter on a string") + for line in file_contents.splitlines(): + user = None + passwd = None + if len(line) == 0: + continue + if ':' in line: + user, passwd = line.split(':', 1) + + if user is None or len(user) == 0 or passwd is None or len(passwd) == 0: + error_msg = "failed, expects each line to be a colon separated string representing the user and passwd" + raise errors.AnsibleFilterError(error_msg) + htpasswd_entries[user] = passwd + return htpasswd_entries + + def filters(self): + ''' returns a mapping of filters to methods ''' + return {"translate_idps": self.translate_idps, + "certificates_to_synchronize": self.certificates_to_synchronize, + "oo_htpasswd_users_from_file": self.oo_htpasswd_users_from_file} diff --git a/roles/lib_utils/library/delegated_serial_command.py b/roles/lib_utils/library/delegated_serial_command.py new file mode 100755 index 000000000..0cab1ca88 --- /dev/null +++ b/roles/lib_utils/library/delegated_serial_command.py @@ -0,0 +1,274 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2012, Michael DeHaan , and others +# (c) 2016, Andrew Butcher +# +# This module is derrived from the Ansible command module. +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +# pylint: disable=unused-wildcard-import,wildcard-import,unused-import,redefined-builtin + +''' delegated_serial_command ''' + +import datetime +import errno +import glob +import shlex +import os +import fcntl +import time + +DOCUMENTATION = ''' +--- +module: delegated_serial_command +short_description: Executes a command on a remote node +version_added: historical +description: + - The M(command) module takes the command name followed by a list + of space-delimited arguments. + - The given command will be executed on all selected nodes. It + will not be processed through the shell, so variables like + C($HOME) and operations like C("<"), C(">"), C("|"), and C("&") + will not work (use the M(shell) module if you need these + features). + - Creates and maintains a lockfile such that this module will + wait for other invocations to proceed. +options: + command: + description: + - the command to run + required: true + default: null + creates: + description: + - a filename or (since 2.0) glob pattern, when it already + exists, this step will B(not) be run. + required: no + default: null + removes: + description: + - a filename or (since 2.0) glob pattern, when it does not + exist, this step will B(not) be run. + version_added: "0.8" + required: no + default: null + chdir: + description: + - cd into this directory before running the command + version_added: "0.6" + required: false + default: null + executable: + description: + - change the shell used to execute the command. Should be an + absolute path to the executable. + required: false + default: null + version_added: "0.9" + warn: + version_added: "1.8" + default: yes + description: + - if command warnings are on in ansible.cfg, do not warn about + this particular line if set to no/false. + required: false + lockfile: + default: yes + description: + - the lockfile that will be created + timeout: + default: yes + description: + - time in milliseconds to wait to obtain the lock +notes: + - If you want to run a command through the shell (say you are using C(<), + C(>), C(|), etc), you actually want the M(shell) module instead. The + M(command) module is much more secure as it's not affected by the user's + environment. + - " C(creates), C(removes), and C(chdir) can be specified after + the command. For instance, if you only want to run a command if + a certain file does not exist, use this." +author: + - Ansible Core Team + - Michael DeHaan + - Andrew Butcher +''' + +EXAMPLES = ''' +# Example from Ansible Playbooks. +- delegated_serial_command: + command: /sbin/shutdown -t now + +# Run the command if the specified file does not exist. +- delegated_serial_command: + command: /usr/bin/make_database.sh arg1 arg2 + creates: /path/to/database +''' + +# Dict of options and their defaults +OPTIONS = {'chdir': None, + 'creates': None, + 'command': None, + 'executable': None, + 'NO_LOG': None, + 'removes': None, + 'warn': True, + 'lockfile': None, + 'timeout': None} + + +def check_command(commandline): + ''' Check provided command ''' + arguments = {'chown': 'owner', 'chmod': 'mode', 'chgrp': 'group', + 'ln': 'state=link', 'mkdir': 'state=directory', + 'rmdir': 'state=absent', 'rm': 'state=absent', 'touch': 'state=touch'} + commands = {'git': 'git', 'hg': 'hg', 'curl': 'get_url or uri', 'wget': 'get_url or uri', + 'svn': 'subversion', 'service': 'service', + 'mount': 'mount', 'rpm': 'yum, dnf or zypper', 'yum': 'yum', 'apt-get': 'apt', + 'tar': 'unarchive', 'unzip': 'unarchive', 'sed': 'template or lineinfile', + 'rsync': 'synchronize', 'dnf': 'dnf', 'zypper': 'zypper'} + become = ['sudo', 'su', 'pbrun', 'pfexec', 'runas'] + warnings = list() + command = os.path.basename(commandline.split()[0]) + # pylint: disable=line-too-long + if command in arguments: + warnings.append("Consider using file module with {0} rather than running {1}".format(arguments[command], command)) + if command in commands: + warnings.append("Consider using {0} module rather than running {1}".format(commands[command], command)) + if command in become: + warnings.append( + "Consider using 'become', 'become_method', and 'become_user' rather than running {0}".format(command,)) + return warnings + + +# pylint: disable=too-many-statements,too-many-branches,too-many-locals +def main(): + ''' Main module function ''' + module = AnsibleModule( # noqa: F405 + argument_spec=dict( + _uses_shell=dict(type='bool', default=False), + command=dict(required=True), + chdir=dict(), + executable=dict(), + creates=dict(), + removes=dict(), + warn=dict(type='bool', default=True), + lockfile=dict(default='/tmp/delegated_serial_command.lock'), + timeout=dict(type='int', default=30) + ) + ) + + shell = module.params['_uses_shell'] + chdir = module.params['chdir'] + executable = module.params['executable'] + command = module.params['command'] + creates = module.params['creates'] + removes = module.params['removes'] + warn = module.params['warn'] + lockfile = module.params['lockfile'] + timeout = module.params['timeout'] + + if command.strip() == '': + module.fail_json(rc=256, msg="no command given") + + iterated = 0 + lockfd = open(lockfile, 'w+') + while iterated < timeout: + try: + fcntl.flock(lockfd, fcntl.LOCK_EX | fcntl.LOCK_NB) + break + # pylint: disable=invalid-name + except IOError as e: + if e.errno != errno.EAGAIN: + module.fail_json(msg="I/O Error {0}: {1}".format(e.errno, e.strerror)) + else: + iterated += 1 + time.sleep(0.1) + + if chdir: + chdir = os.path.abspath(os.path.expanduser(chdir)) + os.chdir(chdir) + + if creates: + # do not run the command if the line contains creates=filename + # and the filename already exists. This allows idempotence + # of command executions. + path = os.path.expanduser(creates) + if glob.glob(path): + module.exit_json( + cmd=command, + stdout="skipped, since %s exists" % path, + changed=False, + stderr=False, + rc=0 + ) + + if removes: + # do not run the command if the line contains removes=filename + # and the filename does not exist. This allows idempotence + # of command executions. + path = os.path.expanduser(removes) + if not glob.glob(path): + module.exit_json( + cmd=command, + stdout="skipped, since %s does not exist" % path, + changed=False, + stderr=False, + rc=0 + ) + + warnings = list() + if warn: + warnings = check_command(command) + + if not shell: + command = shlex.split(command) + startd = datetime.datetime.now() + + # pylint: disable=invalid-name + rc, out, err = module.run_command(command, executable=executable, use_unsafe_shell=shell) + + fcntl.flock(lockfd, fcntl.LOCK_UN) + lockfd.close() + + endd = datetime.datetime.now() + delta = endd - startd + + if out is None: + out = '' + if err is None: + err = '' + + module.exit_json( + cmd=command, + stdout=out.rstrip("\r\n"), + stderr=err.rstrip("\r\n"), + rc=rc, + start=str(startd), + end=str(endd), + delta=str(delta), + changed=True, + warnings=warnings, + iterated=iterated + ) + + +# import module snippets +# pylint: disable=wrong-import-position +from ansible.module_utils.basic import * # noqa: F402,F403 + +main() diff --git a/roles/lib_utils/library/openshift_cert_expiry.py b/roles/lib_utils/library/openshift_cert_expiry.py new file mode 100644 index 000000000..e355266b0 --- /dev/null +++ b/roles/lib_utils/library/openshift_cert_expiry.py @@ -0,0 +1,839 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# pylint: disable=line-too-long,invalid-name + +"""For details on this module see DOCUMENTATION (below)""" + +import base64 +import datetime +import io +import os +import subprocess +import yaml + +# pylint import-error disabled because pylint cannot find the package +# when installed in a virtualenv +from ansible.module_utils.six.moves import configparser # pylint: disable=import-error +from ansible.module_utils.basic import AnsibleModule + +try: + # You can comment this import out and include a 'pass' in this + # block if you're manually testing this module on a NON-ATOMIC + # HOST (or any host that just doesn't have PyOpenSSL + # available). That will force the `load_and_handle_cert` function + # to use the Fake OpenSSL classes. + import OpenSSL.crypto + HAS_OPENSSL = True +except ImportError: + # Some platforms (such as RHEL Atomic) may not have the Python + # OpenSSL library installed. In this case we will use a manual + # work-around to parse each certificate. + # + # Check for 'OpenSSL.crypto' in `sys.modules` later. + HAS_OPENSSL = False + +DOCUMENTATION = ''' +--- +module: openshift_cert_expiry +short_description: Check OpenShift Container Platform (OCP) and Kube certificate expirations on a cluster +description: + - The M(openshift_cert_expiry) module has two basic functions: to flag certificates which will expire in a set window of time from now, and to notify you about certificates which have already expired. + - When the module finishes, a summary of the examination is returned. Each certificate in the summary has a C(health) key with a value of one of the following: + - C(ok) - not expired, and outside of the expiration C(warning_days) window. + - C(warning) - not expired, but will expire between now and the C(warning_days) window. + - C(expired) - an expired certificate. + - Certificate flagging follow this logic: + - If the expiration date is before now then the certificate is classified as C(expired). + - The certificates time to live (expiration date - now) is calculated, if that time window is less than C(warning_days) the certificate is classified as C(warning). + - All other conditions are classified as C(ok). + - The following keys are ALSO present in the certificate summary: + - C(cert_cn) - The common name of the certificate (additional CNs present in SAN extensions are omitted) + - C(days_remaining) - The number of days until the certificate expires. + - C(expiry) - The date the certificate expires on. + - C(path) - The full path to the certificate on the examined host. +version_added: "1.0" +options: + config_base: + description: + - Base path to OCP system settings. + required: false + default: /etc/origin + warning_days: + description: + - Flag certificates which will expire in C(warning_days) days from now. + required: false + default: 30 + show_all: + description: + - Enable this option to show analysis of ALL certificates examined by this module. + - By default only certificates which have expired, or will expire within the C(warning_days) window will be reported. + required: false + default: false + +author: "Tim Bielawa (@tbielawa) " +''' + +EXAMPLES = ''' +# Default invocation, only notify about expired certificates or certificates which will expire within 30 days from now +- openshift_cert_expiry: + +# Expand the warning window to show certificates expiring within a year from now +- openshift_cert_expiry: warning_days=365 + +# Show expired, soon to expire (now + 30 days), and all other certificates examined +- openshift_cert_expiry: show_all=true +''' + + +class FakeOpenSSLCertificate(object): + """This provides a rough mock of what you get from +`OpenSSL.crypto.load_certificate()`. This is a work-around for +platforms missing the Python OpenSSL library. + """ + def __init__(self, cert_string): + """`cert_string` is a certificate in the form you get from running a +.crt through 'openssl x509 -in CERT.cert -text'""" + self.cert_string = cert_string + self.serial = None + self.subject = None + self.extensions = [] + self.not_after = None + self._parse_cert() + + def _parse_cert(self): + """Manually parse the certificate line by line""" + self.extensions = [] + + PARSING_ALT_NAMES = False + PARSING_HEX_SERIAL = False + for line in self.cert_string.split('\n'): + l = line.strip() + if PARSING_ALT_NAMES: + # We're parsing a 'Subject Alternative Name' line + self.extensions.append( + FakeOpenSSLCertificateSANExtension(l)) + + PARSING_ALT_NAMES = False + continue + + if PARSING_HEX_SERIAL: + # Hex serials arrive colon-delimited + serial_raw = l.replace(':', '') + # Convert to decimal + self.serial = int('0x' + serial_raw, base=16) + PARSING_HEX_SERIAL = False + continue + + # parse out the bits that we can + if l.startswith('Serial Number:'): + # Decimal format: + # Serial Number: 11 (0xb) + # => 11 + # Hex Format (large serials): + # Serial Number: + # 0a:de:eb:24:04:75:ab:56:39:14:e9:5a:22:e2:85:bf + # => 14449739080294792594019643629255165375 + if l.endswith(':'): + PARSING_HEX_SERIAL = True + continue + self.serial = int(l.split()[-2]) + + elif l.startswith('Not After :'): + # Not After : Feb 7 18:19:35 2019 GMT + # => strptime(str, '%b %d %H:%M:%S %Y %Z') + # => strftime('%Y%m%d%H%M%SZ') + # => 20190207181935Z + not_after_raw = l.partition(' : ')[-1] + # Last item: ('Not After', ' : ', 'Feb 7 18:19:35 2019 GMT') + not_after_parsed = datetime.datetime.strptime(not_after_raw, '%b %d %H:%M:%S %Y %Z') + self.not_after = not_after_parsed.strftime('%Y%m%d%H%M%SZ') + + elif l.startswith('X509v3 Subject Alternative Name:'): + PARSING_ALT_NAMES = True + continue + + elif l.startswith('Subject:'): + # O = system:nodes, CN = system:node:m01.example.com + self.subject = FakeOpenSSLCertificateSubjects(l.partition(': ')[-1]) + + def get_serial_number(self): + """Return the serial number of the cert""" + return self.serial + + def get_subject(self): + """Subjects must implement get_components() and return dicts or +tuples. An 'openssl x509 -in CERT.cert -text' with 'Subject': + + Subject: Subject: O=system:nodes, CN=system:node:m01.example.com + +might return: [('O=system', 'nodes'), ('CN=system', 'node:m01.example.com')] + """ + return self.subject + + def get_extension(self, i): + """Extensions must implement get_short_name() and return the string +'subjectAltName'""" + return self.extensions[i] + + def get_extension_count(self): + """ get_extension_count """ + return len(self.extensions) + + def get_notAfter(self): + """Returns a date stamp as a string in the form +'20180922170439Z'. strptime the result with format param: +'%Y%m%d%H%M%SZ'.""" + return self.not_after + + +class FakeOpenSSLCertificateSANExtension(object): # pylint: disable=too-few-public-methods + """Mocks what happens when `get_extension` is called on a certificate +object""" + + def __init__(self, san_string): + """With `san_string` as you get from: + + $ openssl x509 -in certificate.crt -text + """ + self.san_string = san_string + self.short_name = 'subjectAltName' + + def get_short_name(self): + """Return the 'type' of this extension. It's always the same though +because we only care about subjectAltName's""" + return self.short_name + + def __str__(self): + """Return this extension and the value as a simple string""" + return self.san_string + + +# pylint: disable=too-few-public-methods +class FakeOpenSSLCertificateSubjects(object): + """Mocks what happens when `get_subject` is called on a certificate +object""" + + def __init__(self, subject_string): + """With `subject_string` as you get from: + + $ openssl x509 -in certificate.crt -text + """ + self.subjects = [] + for s in subject_string.split(', '): + name, _, value = s.partition(' = ') + self.subjects.append((name, value)) + + def get_components(self): + """Returns a list of tuples""" + return self.subjects + + +###################################################################### +def filter_paths(path_list): + """`path_list` - A list of file paths to check. Only files which exist +will be returned + """ + return [p for p in path_list if os.path.exists(os.path.realpath(p))] + + +# pylint: disable=too-many-locals,too-many-branches +# +# TODO: Break this function down into smaller chunks +def load_and_handle_cert(cert_string, now, base64decode=False, ans_module=None): + """Load a certificate, split off the good parts, and return some +useful data + +Params: + +- `cert_string` (string) - a certificate loaded into a string object +- `now` (datetime) - a datetime object of the time to calculate the certificate 'time_remaining' against +- `base64decode` (bool) - run base64.b64decode() on the input +- `ans_module` (AnsibleModule) - The AnsibleModule object for this module (so we can raise errors) + +Returns: +A tuple of the form: + (cert_subject, cert_expiry_date, time_remaining, cert_serial_number) + """ + if base64decode: + _cert_string = base64.b64decode(cert_string).decode('utf-8') + else: + _cert_string = cert_string + + # Disable this. We 'redefine' the type because we are working + # around a missing library on the target host. + # + # pylint: disable=redefined-variable-type + if HAS_OPENSSL: + # No work-around required + cert_loaded = OpenSSL.crypto.load_certificate( + OpenSSL.crypto.FILETYPE_PEM, _cert_string) + else: + # Missing library, work-around required. Run the 'openssl' + # command on it to decode it + cmd = 'openssl x509 -text' + try: + openssl_proc = subprocess.Popen(cmd.split(), + stdout=subprocess.PIPE, + stdin=subprocess.PIPE) + except OSError: + ans_module.fail_json(msg="Error: The 'OpenSSL' python library and CLI command were not found on the target host. Unable to parse any certificates. This host will not be included in generated reports.") + else: + openssl_decoded = openssl_proc.communicate(_cert_string.encode('utf-8'))[0].decode('utf-8') + cert_loaded = FakeOpenSSLCertificate(openssl_decoded) + + ###################################################################### + # Read all possible names from the cert + cert_subjects = [] + for name, value in cert_loaded.get_subject().get_components(): + if isinstance(name, bytes) or isinstance(value, bytes): + name = name.decode('utf-8') + value = value.decode('utf-8') + cert_subjects.append('{}:{}'.format(name, value)) + + # To read SANs from a cert we must read the subjectAltName + # extension from the X509 Object. What makes this more difficult + # is that pyOpenSSL does not give extensions as an iterable + san = None + for i in range(cert_loaded.get_extension_count()): + ext = cert_loaded.get_extension(i) + if ext.get_short_name() == 'subjectAltName': + san = ext + + if san is not None: + # The X509Extension object for subjectAltName prints as a + # string with the alt names separated by a comma and a + # space. Split the string by ', ' and then add our new names + # to the list of existing names + cert_subjects.extend(str(san).split(', ')) + + cert_subject = ', '.join(cert_subjects) + ###################################################################### + + # Grab the expiration date + not_after = cert_loaded.get_notAfter() + # example get_notAfter() => 20180922170439Z + if isinstance(not_after, bytes): + not_after = not_after.decode('utf-8') + + cert_expiry_date = datetime.datetime.strptime( + not_after, + '%Y%m%d%H%M%SZ') + + time_remaining = cert_expiry_date - now + + return (cert_subject, cert_expiry_date, time_remaining, cert_loaded.get_serial_number()) + + +def classify_cert(cert_meta, now, time_remaining, expire_window, cert_list): + """Given metadata about a certificate under examination, classify it + into one of three categories, 'ok', 'warning', and 'expired'. + +Params: + +- `cert_meta` dict - A dict with certificate metadata. Required fields + include: 'cert_cn', 'path', 'expiry', 'days_remaining', 'health'. +- `now` (datetime) - a datetime object of the time to calculate the certificate 'time_remaining' against +- `time_remaining` (datetime.timedelta) - a timedelta for how long until the cert expires +- `expire_window` (datetime.timedelta) - a timedelta for how long the warning window is +- `cert_list` list - A list to shove the classified cert into + +Return: +- `cert_list` - The updated list of classified certificates + """ + expiry_str = str(cert_meta['expiry']) + # Categorization + if cert_meta['expiry'] < now: + # This already expired, must NOTIFY + cert_meta['health'] = 'expired' + elif time_remaining < expire_window: + # WARN about this upcoming expirations + cert_meta['health'] = 'warning' + else: + # Not expired or about to expire + cert_meta['health'] = 'ok' + + cert_meta['expiry'] = expiry_str + cert_meta['serial_hex'] = hex(int(cert_meta['serial'])) + cert_list.append(cert_meta) + return cert_list + + +def tabulate_summary(certificates, kubeconfigs, etcd_certs, router_certs, registry_certs): + """Calculate the summary text for when the module finishes +running. This includes counts of each classification and what have +you. + +Params: + +- `certificates` (list of dicts) - Processed `expire_check_result` + dicts with filled in `health` keys for system certificates. +- `kubeconfigs` - as above for kubeconfigs +- `etcd_certs` - as above for etcd certs + +Return: + +- `summary_results` (dict) - Counts of each cert type classification + and total items examined. + """ + items = certificates + kubeconfigs + etcd_certs + router_certs + registry_certs + + summary_results = { + 'system_certificates': len(certificates), + 'kubeconfig_certificates': len(kubeconfigs), + 'etcd_certificates': len(etcd_certs), + 'router_certs': len(router_certs), + 'registry_certs': len(registry_certs), + 'total': len(items), + 'ok': 0, + 'warning': 0, + 'expired': 0 + } + + summary_results['expired'] = len([c for c in items if c['health'] == 'expired']) + summary_results['warning'] = len([c for c in items if c['health'] == 'warning']) + summary_results['ok'] = len([c for c in items if c['health'] == 'ok']) + + return summary_results + + +###################################################################### +# This is our module MAIN function after all, so there's bound to be a +# lot of code bundled up into one block +# +# Reason: These checks are disabled because the issue was introduced +# during a period where the pylint checks weren't enabled for this file +# Status: temporarily disabled pending future refactoring +# pylint: disable=too-many-locals,too-many-statements,too-many-branches +def main(): + """This module examines certificates (in various forms) which compose +an OpenShift Container Platform cluster + """ + + module = AnsibleModule( + argument_spec=dict( + config_base=dict( + required=False, + default="/etc/origin", + type='str'), + warning_days=dict( + required=False, + default=30, + type='int'), + show_all=dict( + required=False, + default=False, + type='bool') + ), + supports_check_mode=True, + ) + + # Basic scaffolding for OpenShift specific certs + openshift_base_config_path = os.path.realpath(module.params['config_base']) + openshift_master_config_path = os.path.join(openshift_base_config_path, + "master", "master-config.yaml") + openshift_node_config_path = os.path.join(openshift_base_config_path, + "node", "node-config.yaml") + openshift_cert_check_paths = [ + openshift_master_config_path, + openshift_node_config_path, + ] + + # Paths for Kubeconfigs. Additional kubeconfigs are conditionally + # checked later in the code + master_kube_configs = ['admin', 'openshift-master', + 'openshift-node', 'openshift-router', + 'openshift-registry'] + + kubeconfig_paths = [] + for m_kube_config in master_kube_configs: + kubeconfig_paths.append( + os.path.join(openshift_base_config_path, "master", m_kube_config + ".kubeconfig") + ) + + # Validate some paths we have the ability to do ahead of time + openshift_cert_check_paths = filter_paths(openshift_cert_check_paths) + kubeconfig_paths = filter_paths(kubeconfig_paths) + + # etcd, where do you hide your certs? Used when parsing etcd.conf + etcd_cert_params = [ + "ETCD_CA_FILE", + "ETCD_CERT_FILE", + "ETCD_PEER_CA_FILE", + "ETCD_PEER_CERT_FILE", + ] + + # Expiry checking stuff + now = datetime.datetime.now() + # todo, catch exception for invalid input and return a fail_json + warning_days = int(module.params['warning_days']) + expire_window = datetime.timedelta(days=warning_days) + + # Module stuff + # + # The results of our cert checking to return from the task call + check_results = {} + check_results['meta'] = {} + check_results['meta']['warning_days'] = warning_days + check_results['meta']['checked_at_time'] = str(now) + check_results['meta']['warn_before_date'] = str(now + expire_window) + check_results['meta']['show_all'] = str(module.params['show_all']) + # All the analyzed certs accumulate here + ocp_certs = [] + + ###################################################################### + # Sure, why not? Let's enable check mode. + if module.check_mode: + check_results['ocp_certs'] = [] + module.exit_json( + check_results=check_results, + msg="Checked 0 total certificates. Expired/Warning/OK: 0/0/0. Warning window: %s days" % module.params['warning_days'], + rc=0, + changed=False + ) + + ###################################################################### + # Check for OpenShift Container Platform specific certs + ###################################################################### + for os_cert in filter_paths(openshift_cert_check_paths): + # Open up that config file and locate the cert and CA + with io.open(os_cert, 'r', encoding='utf-8') as fp: + cert_meta = {} + cfg = yaml.load(fp) + # cert files are specified in parsed `fp` as relative to the path + # of the original config file. 'master-config.yaml' with certFile + # = 'foo.crt' implies that 'foo.crt' is in the same + # directory. certFile = '../foo.crt' is in the parent directory. + cfg_path = os.path.dirname(fp.name) + cert_meta['certFile'] = os.path.join(cfg_path, cfg['servingInfo']['certFile']) + cert_meta['clientCA'] = os.path.join(cfg_path, cfg['servingInfo']['clientCA']) + + ###################################################################### + # Load the certificate and the CA, parse their expiration dates into + # datetime objects so we can manipulate them later + for v in cert_meta.values(): + with io.open(v, 'r', encoding='utf-8') as fp: + cert = fp.read() + (cert_subject, + cert_expiry_date, + time_remaining, + cert_serial) = load_and_handle_cert(cert, now, ans_module=module) + + expire_check_result = { + 'cert_cn': cert_subject, + 'path': fp.name, + 'expiry': cert_expiry_date, + 'days_remaining': time_remaining.days, + 'health': None, + 'serial': cert_serial + } + + classify_cert(expire_check_result, now, time_remaining, expire_window, ocp_certs) + + ###################################################################### + # /Check for OpenShift Container Platform specific certs + ###################################################################### + + ###################################################################### + # Check service Kubeconfigs + ###################################################################### + kubeconfigs = [] + + # There may be additional kubeconfigs to check, but their naming + # is less predictable than the ones we've already assembled. + + try: + # Try to read the standard 'node-config.yaml' file to check if + # this host is a node. + with io.open(openshift_node_config_path, 'r', encoding='utf-8') as fp: + cfg = yaml.load(fp) + + # OK, the config file exists, therefore this is a + # node. Nodes have their own kubeconfig files to + # communicate with the master API. Let's read the relative + # path to that file from the node config. + node_masterKubeConfig = cfg['masterKubeConfig'] + # As before, the path to the 'masterKubeConfig' file is + # relative to `fp` + cfg_path = os.path.dirname(fp.name) + node_kubeconfig = os.path.join(cfg_path, node_masterKubeConfig) + + with io.open(node_kubeconfig, 'r', encoding='utf8') as fp: + # Read in the nodes kubeconfig file and grab the good stuff + cfg = yaml.load(fp) + + c = cfg['users'][0]['user']['client-certificate-data'] + (cert_subject, + cert_expiry_date, + time_remaining, + cert_serial) = load_and_handle_cert(c, now, base64decode=True, ans_module=module) + + expire_check_result = { + 'cert_cn': cert_subject, + 'path': fp.name, + 'expiry': cert_expiry_date, + 'days_remaining': time_remaining.days, + 'health': None, + 'serial': cert_serial + } + + classify_cert(expire_check_result, now, time_remaining, expire_window, kubeconfigs) + except IOError: + # This is not a node + pass + + for kube in filter_paths(kubeconfig_paths): + with io.open(kube, 'r', encoding='utf-8') as fp: + # TODO: Maybe consider catching exceptions here? + cfg = yaml.load(fp) + + # Per conversation, "the kubeconfigs you care about: + # admin, router, registry should all be single + # value". Following that advice we only grab the data for + # the user at index 0 in the 'users' list. There should + # not be more than one user. + c = cfg['users'][0]['user']['client-certificate-data'] + (cert_subject, + cert_expiry_date, + time_remaining, + cert_serial) = load_and_handle_cert(c, now, base64decode=True, ans_module=module) + + expire_check_result = { + 'cert_cn': cert_subject, + 'path': fp.name, + 'expiry': cert_expiry_date, + 'days_remaining': time_remaining.days, + 'health': None, + 'serial': cert_serial + } + + classify_cert(expire_check_result, now, time_remaining, expire_window, kubeconfigs) + + ###################################################################### + # /Check service Kubeconfigs + ###################################################################### + + ###################################################################### + # Check etcd certs + # + # Two things to check: 'external' etcd, and embedded etcd. + ###################################################################### + # FIRST: The 'external' etcd + # + # Some values may be duplicated, make this a set for now so we + # unique them all + etcd_certs_to_check = set([]) + etcd_certs = [] + etcd_cert_params.append('dne') + try: + with io.open('/etc/etcd/etcd.conf', 'r', encoding='utf-8') as fp: + # Add dummy header section. + config = io.StringIO() + config.write(u'[ETCD]\n') + config.write(fp.read().replace('%', '%%')) + config.seek(0, os.SEEK_SET) + + etcd_config = configparser.ConfigParser() + etcd_config.readfp(config) + + for param in etcd_cert_params: + try: + etcd_certs_to_check.add(etcd_config.get('ETCD', param)) + except configparser.NoOptionError: + # That parameter does not exist, oh well... + pass + except IOError: + # No etcd to see here, move along + pass + + for etcd_cert in filter_paths(etcd_certs_to_check): + with io.open(etcd_cert, 'r', encoding='utf-8') as fp: + c = fp.read() + (cert_subject, + cert_expiry_date, + time_remaining, + cert_serial) = load_and_handle_cert(c, now, ans_module=module) + + expire_check_result = { + 'cert_cn': cert_subject, + 'path': fp.name, + 'expiry': cert_expiry_date, + 'days_remaining': time_remaining.days, + 'health': None, + 'serial': cert_serial + } + + classify_cert(expire_check_result, now, time_remaining, expire_window, etcd_certs) + + ###################################################################### + # Now the embedded etcd + ###################################################################### + try: + with io.open('/etc/origin/master/master-config.yaml', 'r', encoding='utf-8') as fp: + cfg = yaml.load(fp) + except IOError: + # Not present + pass + else: + if cfg.get('etcdConfig', {}).get('servingInfo', {}).get('certFile', None) is not None: + # This is embedded + etcd_crt_name = cfg['etcdConfig']['servingInfo']['certFile'] + else: + # Not embedded + etcd_crt_name = None + + if etcd_crt_name is not None: + # etcd_crt_name is relative to the location of the + # master-config.yaml file + cfg_path = os.path.dirname(fp.name) + etcd_cert = os.path.join(cfg_path, etcd_crt_name) + with open(etcd_cert, 'r') as etcd_fp: + (cert_subject, + cert_expiry_date, + time_remaining, + cert_serial) = load_and_handle_cert(etcd_fp.read(), now, ans_module=module) + + expire_check_result = { + 'cert_cn': cert_subject, + 'path': etcd_fp.name, + 'expiry': cert_expiry_date, + 'days_remaining': time_remaining.days, + 'health': None, + 'serial': cert_serial + } + + classify_cert(expire_check_result, now, time_remaining, expire_window, etcd_certs) + + ###################################################################### + # /Check etcd certs + ###################################################################### + + ###################################################################### + # Check router/registry certs + # + # These are saved as secrets in etcd. That means that we can not + # simply read a file to grab the data. Instead we're going to + # subprocess out to the 'oc get' command. On non-masters this + # command will fail, that is expected so we catch that exception. + ###################################################################### + router_certs = [] + registry_certs = [] + + ###################################################################### + # First the router certs + try: + router_secrets_raw = subprocess.Popen('oc get -n default secret router-certs -o yaml'.split(), + stdout=subprocess.PIPE) + router_ds = yaml.load(router_secrets_raw.communicate()[0]) + router_c = router_ds['data']['tls.crt'] + router_path = router_ds['metadata']['selfLink'] + except TypeError: + # YAML couldn't load the result, this is not a master + pass + except OSError: + # The OC command doesn't exist here. Move along. + pass + else: + (cert_subject, + cert_expiry_date, + time_remaining, + cert_serial) = load_and_handle_cert(router_c, now, base64decode=True, ans_module=module) + + expire_check_result = { + 'cert_cn': cert_subject, + 'path': router_path, + 'expiry': cert_expiry_date, + 'days_remaining': time_remaining.days, + 'health': None, + 'serial': cert_serial + } + + classify_cert(expire_check_result, now, time_remaining, expire_window, router_certs) + + ###################################################################### + # Now for registry + try: + registry_secrets_raw = subprocess.Popen('oc get -n default secret registry-certificates -o yaml'.split(), + stdout=subprocess.PIPE) + registry_ds = yaml.load(registry_secrets_raw.communicate()[0]) + registry_c = registry_ds['data']['registry.crt'] + registry_path = registry_ds['metadata']['selfLink'] + except TypeError: + # YAML couldn't load the result, this is not a master + pass + except OSError: + # The OC command doesn't exist here. Move along. + pass + else: + (cert_subject, + cert_expiry_date, + time_remaining, + cert_serial) = load_and_handle_cert(registry_c, now, base64decode=True, ans_module=module) + + expire_check_result = { + 'cert_cn': cert_subject, + 'path': registry_path, + 'expiry': cert_expiry_date, + 'days_remaining': time_remaining.days, + 'health': None, + 'serial': cert_serial + } + + classify_cert(expire_check_result, now, time_remaining, expire_window, registry_certs) + + ###################################################################### + # /Check router/registry certs + ###################################################################### + + res = tabulate_summary(ocp_certs, kubeconfigs, etcd_certs, router_certs, registry_certs) + + msg = "Checked {count} total certificates. Expired/Warning/OK: {exp}/{warn}/{ok}. Warning window: {window} days".format( + count=res['total'], + exp=res['expired'], + warn=res['warning'], + ok=res['ok'], + window=int(module.params['warning_days']), + ) + + # By default we only return detailed information about expired or + # warning certificates. If show_all is true then we will print all + # the certificates examined. + if not module.params['show_all']: + check_results['ocp_certs'] = [crt for crt in ocp_certs if crt['health'] in ['expired', 'warning']] + check_results['kubeconfigs'] = [crt for crt in kubeconfigs if crt['health'] in ['expired', 'warning']] + check_results['etcd'] = [crt for crt in etcd_certs if crt['health'] in ['expired', 'warning']] + check_results['registry'] = [crt for crt in registry_certs if crt['health'] in ['expired', 'warning']] + check_results['router'] = [crt for crt in router_certs if crt['health'] in ['expired', 'warning']] + else: + check_results['ocp_certs'] = ocp_certs + check_results['kubeconfigs'] = kubeconfigs + check_results['etcd'] = etcd_certs + check_results['registry'] = registry_certs + check_results['router'] = router_certs + + # Sort the final results to report in order of ascending safety + # time. That is to say, the certificates which will expire sooner + # will be at the front of the list and certificates which will + # expire later are at the end. Router and registry certs should be + # limited to just 1 result, so don't bother sorting those. + def cert_key(item): + ''' return the days_remaining key ''' + return item['days_remaining'] + + check_results['ocp_certs'] = sorted(check_results['ocp_certs'], key=cert_key) + check_results['kubeconfigs'] = sorted(check_results['kubeconfigs'], key=cert_key) + check_results['etcd'] = sorted(check_results['etcd'], key=cert_key) + + # This module will never change anything, but we might want to + # change the return code parameter if there is some catastrophic + # error we noticed earlier + module.exit_json( + check_results=check_results, + summary=res, + msg=msg, + rc=0, + changed=False + ) + + +if __name__ == '__main__': + main() diff --git a/roles/lib_utils/library/openshift_container_binary_sync.py b/roles/lib_utils/library/openshift_container_binary_sync.py new file mode 100644 index 000000000..440b8ec28 --- /dev/null +++ b/roles/lib_utils/library/openshift_container_binary_sync.py @@ -0,0 +1,205 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# pylint: disable=missing-docstring,invalid-name + +import random +import tempfile +import shutil +import os.path + +# pylint: disable=redefined-builtin,wildcard-import,unused-wildcard-import +from ansible.module_utils.basic import * # noqa: F403 + + +DOCUMENTATION = ''' +--- +module: openshift_container_binary_sync +short_description: Copies OpenShift binaries out of the given image tag to host system. +''' + + +class BinarySyncError(Exception): + def __init__(self, msg): + super(BinarySyncError, self).__init__(msg) + self.msg = msg + + +# pylint: disable=too-few-public-methods,too-many-instance-attributes +class BinarySyncer(object): + """ + Syncs the openshift, oc, and kubectl binaries/symlinks out of + a container onto the host system. + """ + + def __init__(self, module, image, tag, backend): + self.module = module + self.changed = False + self.output = [] + self.bin_dir = '/usr/local/bin' + self._image = image + self.tag = tag + self.backend = backend + self.temp_dir = None # TBD + + def sync(self): + if self.backend == 'atomic': + return self._sync_atomic() + + return self._sync_docker() + + def _sync_atomic(self): + self.temp_dir = tempfile.mkdtemp() + temp_dir_mount = tempfile.mkdtemp() + try: + image_spec = '%s:%s' % (self.image, self.tag) + rc, stdout, stderr = self.module.run_command(['atomic', 'mount', + '--storage', "ostree", + image_spec, temp_dir_mount]) + if rc: + raise BinarySyncError("Error mounting image. stdout=%s, stderr=%s" % + (stdout, stderr)) + for i in ["openshift", "oc"]: + src_file = os.path.join(temp_dir_mount, "usr/bin", i) + shutil.copy(src_file, self.temp_dir) + + self._sync_binaries() + finally: + self.module.run_command(['atomic', 'umount', temp_dir_mount]) + shutil.rmtree(temp_dir_mount) + shutil.rmtree(self.temp_dir) + + def _sync_docker(self): + container_name = "openshift-cli-%s" % random.randint(1, 100000) + rc, stdout, stderr = self.module.run_command(['docker', 'create', '--name', + container_name, '%s:%s' % (self.image, self.tag)]) + if rc: + raise BinarySyncError("Error creating temporary docker container. stdout=%s, stderr=%s" % + (stdout, stderr)) + self.output.append(stdout) + try: + self.temp_dir = tempfile.mkdtemp() + self.output.append("Using temp dir: %s" % self.temp_dir) + + rc, stdout, stderr = self.module.run_command(['docker', 'cp', "%s:/usr/bin/openshift" % container_name, + self.temp_dir]) + if rc: + raise BinarySyncError("Error copying file from docker container: stdout=%s, stderr=%s" % + (stdout, stderr)) + + rc, stdout, stderr = self.module.run_command(['docker', 'cp', "%s:/usr/bin/oc" % container_name, + self.temp_dir]) + if rc: + raise BinarySyncError("Error copying file from docker container: stdout=%s, stderr=%s" % + (stdout, stderr)) + + self._sync_binaries() + finally: + shutil.rmtree(self.temp_dir) + self.module.run_command(['docker', 'rm', container_name]) + + def _sync_binaries(self): + self._sync_binary('openshift') + + # In older versions, oc was a symlink to openshift: + if os.path.islink(os.path.join(self.temp_dir, 'oc')): + self._sync_symlink('oc', 'openshift') + else: + self._sync_binary('oc') + + # Ensure correct symlinks created: + self._sync_symlink('kubectl', 'openshift') + + # Remove old oadm binary + if os.path.exists(os.path.join(self.bin_dir, 'oadm')): + os.remove(os.path.join(self.bin_dir, 'oadm')) + + def _sync_symlink(self, binary_name, link_to): + """ Ensure the given binary name exists and links to the expected binary. """ + + # The symlink we are creating: + link_path = os.path.join(self.bin_dir, binary_name) + + # The expected file we should be linking to: + link_dest = os.path.join(self.bin_dir, link_to) + + if not os.path.exists(link_path) or \ + not os.path.islink(link_path) or \ + os.path.realpath(link_path) != os.path.realpath(link_dest): + if os.path.exists(link_path): + os.remove(link_path) + os.symlink(link_to, os.path.join(self.bin_dir, binary_name)) + self.output.append("Symlinked %s to %s." % (link_path, link_dest)) + self.changed = True + + def _sync_binary(self, binary_name): + src_path = os.path.join(self.temp_dir, binary_name) + dest_path = os.path.join(self.bin_dir, binary_name) + incoming_checksum = self.module.run_command(['sha256sum', src_path])[1] + if not os.path.exists(dest_path) or self.module.run_command(['sha256sum', dest_path])[1] != incoming_checksum: + + # See: https://github.com/openshift/openshift-ansible/issues/4965 + if os.path.islink(dest_path): + os.unlink(dest_path) + self.output.append('Removed old symlink {} before copying binary.'.format(dest_path)) + shutil.move(src_path, dest_path) + self.output.append("Moved %s to %s." % (src_path, dest_path)) + self.changed = True + + @property + def raw_image(self): + """ + Returns the image as it was originally passed in to the instance. + + .. note:: + This image string will only work directly with the atomic command. + + :returns: The original image passed in. + :rtype: str + """ + return self._image + + @property + def image(self): + """ + Returns the image without atomic prefixes used to map to skopeo args. + + :returns: The image string without prefixes + :rtype: str + """ + image = self._image + for remove in ('oci:', 'http:', 'https:'): + if image.startswith(remove): + image = image.replace(remove, '') + return image + + +def main(): + module = AnsibleModule( # noqa: F405 + argument_spec=dict( + image=dict(required=True), + tag=dict(required=True), + backend=dict(required=True), + ), + supports_check_mode=True + ) + + image = module.params['image'] + tag = module.params['tag'] + backend = module.params['backend'] + + if backend not in ["docker", "atomic"]: + module.fail_json(msg="unknown backend") + + binary_syncer = BinarySyncer(module, image, tag, backend) + + try: + binary_syncer.sync() + except BinarySyncError as ex: + module.fail_json(msg=ex.msg) + + return module.exit_json(changed=binary_syncer.changed, + output=binary_syncer.output) + + +if __name__ == '__main__': + main() diff --git a/roles/lib_utils/lookup_plugins/openshift_master_facts_default_predicates.py b/roles/lib_utils/lookup_plugins/openshift_master_facts_default_predicates.py new file mode 100644 index 000000000..4858c5ec6 --- /dev/null +++ b/roles/lib_utils/lookup_plugins/openshift_master_facts_default_predicates.py @@ -0,0 +1,143 @@ +# pylint: disable=missing-docstring + +import re +from ansible.errors import AnsibleError +from ansible.plugins.lookup import LookupBase + + +class LookupModule(LookupBase): + # pylint: disable=too-many-branches,too-many-statements,too-many-arguments + + def run(self, terms, variables=None, regions_enabled=True, short_version=None, + deployment_type=None, **kwargs): + + predicates = [] + + if short_version is None or deployment_type is None: + if 'openshift' not in variables: + raise AnsibleError("This lookup module requires openshift_facts to be run prior to use") + + if deployment_type is None: + if 'common' not in variables['openshift'] or 'deployment_type' not in variables['openshift']['common']: + raise AnsibleError("This lookup module requires that the deployment_type be set") + + deployment_type = variables['openshift']['common']['deployment_type'] + + if short_version is None: + if 'short_version' in variables['openshift']['common']: + short_version = variables['openshift']['common']['short_version'] + elif 'openshift_release' in variables: + release = variables['openshift_release'] + if release.startswith('v'): + short_version = release[1:] + else: + short_version = release + short_version = '.'.join(short_version.split('.')[0:2]) + elif 'openshift_version' in variables: + version = variables['openshift_version'] + short_version = '.'.join(version.split('.')[0:2]) + else: + # pylint: disable=line-too-long + raise AnsibleError("Either OpenShift needs to be installed or openshift_release needs to be specified") + if deployment_type == 'origin': + if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '3.6', '3.7', '3.8', '3.9', 'latest']: + raise AnsibleError("Unknown short_version %s" % short_version) + elif deployment_type == 'openshift-enterprise': + if short_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6', '3.7', '3.8', '3.9', 'latest']: + raise AnsibleError("Unknown short_version %s" % short_version) + else: + raise AnsibleError("Unknown deployment_type %s" % deployment_type) + + if deployment_type == 'origin': + # convert short_version to enterprise short_version + short_version = re.sub('^1.', '3.', short_version) + + if short_version == 'latest': + short_version = '3.9' + + # Predicates ordered according to OpenShift Origin source: + # origin/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go + + if short_version == '3.1': + predicates.extend([ + {'name': 'PodFitsHostPorts'}, + {'name': 'PodFitsResources'}, + {'name': 'NoDiskConflict'}, + {'name': 'MatchNodeSelector'}, + ]) + + if short_version == '3.2': + predicates.extend([ + {'name': 'PodFitsHostPorts'}, + {'name': 'PodFitsResources'}, + {'name': 'NoDiskConflict'}, + {'name': 'NoVolumeZoneConflict'}, + {'name': 'MatchNodeSelector'}, + {'name': 'MaxEBSVolumeCount'}, + {'name': 'MaxGCEPDVolumeCount'} + ]) + + if short_version == '3.3': + predicates.extend([ + {'name': 'NoDiskConflict'}, + {'name': 'NoVolumeZoneConflict'}, + {'name': 'MaxEBSVolumeCount'}, + {'name': 'MaxGCEPDVolumeCount'}, + {'name': 'GeneralPredicates'}, + {'name': 'PodToleratesNodeTaints'}, + {'name': 'CheckNodeMemoryPressure'} + ]) + + if short_version == '3.4': + predicates.extend([ + {'name': 'NoDiskConflict'}, + {'name': 'NoVolumeZoneConflict'}, + {'name': 'MaxEBSVolumeCount'}, + {'name': 'MaxGCEPDVolumeCount'}, + {'name': 'GeneralPredicates'}, + {'name': 'PodToleratesNodeTaints'}, + {'name': 'CheckNodeMemoryPressure'}, + {'name': 'CheckNodeDiskPressure'}, + {'name': 'MatchInterPodAffinity'} + ]) + + if short_version in ['3.5', '3.6']: + predicates.extend([ + {'name': 'NoVolumeZoneConflict'}, + {'name': 'MaxEBSVolumeCount'}, + {'name': 'MaxGCEPDVolumeCount'}, + {'name': 'MatchInterPodAffinity'}, + {'name': 'NoDiskConflict'}, + {'name': 'GeneralPredicates'}, + {'name': 'PodToleratesNodeTaints'}, + {'name': 'CheckNodeMemoryPressure'}, + {'name': 'CheckNodeDiskPressure'}, + ]) + + if short_version in ['3.7', '3.8', '3.9']: + predicates.extend([ + {'name': 'NoVolumeZoneConflict'}, + {'name': 'MaxEBSVolumeCount'}, + {'name': 'MaxGCEPDVolumeCount'}, + {'name': 'MaxAzureDiskVolumeCount'}, + {'name': 'MatchInterPodAffinity'}, + {'name': 'NoDiskConflict'}, + {'name': 'GeneralPredicates'}, + {'name': 'PodToleratesNodeTaints'}, + {'name': 'CheckNodeMemoryPressure'}, + {'name': 'CheckNodeDiskPressure'}, + {'name': 'NoVolumeNodeConflict'}, + ]) + + if regions_enabled: + region_predicate = { + 'name': 'Region', + 'argument': { + 'serviceAffinity': { + 'labels': ['region'] + } + } + } + predicates.append(region_predicate) + + return predicates diff --git a/roles/lib_utils/lookup_plugins/openshift_master_facts_default_priorities.py b/roles/lib_utils/lookup_plugins/openshift_master_facts_default_priorities.py new file mode 100644 index 000000000..18e1b2e0c --- /dev/null +++ b/roles/lib_utils/lookup_plugins/openshift_master_facts_default_priorities.py @@ -0,0 +1,117 @@ +# pylint: disable=missing-docstring + +import re +from ansible.errors import AnsibleError +from ansible.plugins.lookup import LookupBase + + +class LookupModule(LookupBase): + # pylint: disable=too-many-branches,too-many-statements,too-many-arguments + + def run(self, terms, variables=None, zones_enabled=True, short_version=None, + deployment_type=None, **kwargs): + + priorities = [] + + if short_version is None or deployment_type is None: + if 'openshift' not in variables: + raise AnsibleError("This lookup module requires openshift_facts to be run prior to use") + + if deployment_type is None: + if 'common' not in variables['openshift'] or 'deployment_type' not in variables['openshift']['common']: + raise AnsibleError("This lookup module requires that the deployment_type be set") + + deployment_type = variables['openshift']['common']['deployment_type'] + + if short_version is None: + if 'short_version' in variables['openshift']['common']: + short_version = variables['openshift']['common']['short_version'] + elif 'openshift_release' in variables: + release = variables['openshift_release'] + if release.startswith('v'): + short_version = release[1:] + else: + short_version = release + short_version = '.'.join(short_version.split('.')[0:2]) + elif 'openshift_version' in variables: + version = variables['openshift_version'] + short_version = '.'.join(version.split('.')[0:2]) + else: + # pylint: disable=line-too-long + raise AnsibleError("Either OpenShift needs to be installed or openshift_release needs to be specified") + + if deployment_type == 'origin': + if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '3.6', '3.7', '3.8', '3.9', 'latest']: + raise AnsibleError("Unknown short_version %s" % short_version) + elif deployment_type == 'openshift-enterprise': + if short_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6', '3.7', '3.8', '3.9', 'latest']: + raise AnsibleError("Unknown short_version %s" % short_version) + else: + raise AnsibleError("Unknown deployment_type %s" % deployment_type) + + if deployment_type == 'origin': + # convert short_version to origin short_version + short_version = re.sub('^1.', '3.', short_version) + + if short_version == 'latest': + short_version = '3.9' + + if short_version == '3.1': + priorities.extend([ + {'name': 'LeastRequestedPriority', 'weight': 1}, + {'name': 'BalancedResourceAllocation', 'weight': 1}, + {'name': 'SelectorSpreadPriority', 'weight': 1} + ]) + + if short_version == '3.2': + priorities.extend([ + {'name': 'LeastRequestedPriority', 'weight': 1}, + {'name': 'BalancedResourceAllocation', 'weight': 1}, + {'name': 'SelectorSpreadPriority', 'weight': 1}, + {'name': 'NodeAffinityPriority', 'weight': 1} + ]) + + if short_version == '3.3': + priorities.extend([ + {'name': 'LeastRequestedPriority', 'weight': 1}, + {'name': 'BalancedResourceAllocation', 'weight': 1}, + {'name': 'SelectorSpreadPriority', 'weight': 1}, + {'name': 'NodeAffinityPriority', 'weight': 1}, + {'name': 'TaintTolerationPriority', 'weight': 1} + ]) + + if short_version == '3.4': + priorities.extend([ + {'name': 'LeastRequestedPriority', 'weight': 1}, + {'name': 'BalancedResourceAllocation', 'weight': 1}, + {'name': 'SelectorSpreadPriority', 'weight': 1}, + {'name': 'NodePreferAvoidPodsPriority', 'weight': 10000}, + {'name': 'NodeAffinityPriority', 'weight': 1}, + {'name': 'TaintTolerationPriority', 'weight': 1}, + {'name': 'InterPodAffinityPriority', 'weight': 1} + ]) + + if short_version in ['3.5', '3.6', '3.7', '3.8', '3.9']: + priorities.extend([ + {'name': 'SelectorSpreadPriority', 'weight': 1}, + {'name': 'InterPodAffinityPriority', 'weight': 1}, + {'name': 'LeastRequestedPriority', 'weight': 1}, + {'name': 'BalancedResourceAllocation', 'weight': 1}, + {'name': 'NodePreferAvoidPodsPriority', 'weight': 10000}, + {'name': 'NodeAffinityPriority', 'weight': 1}, + {'name': 'TaintTolerationPriority', 'weight': 1} + ]) + + if zones_enabled: + zone_priority = { + 'name': 'Zone', + 'argument': { + 'serviceAntiAffinity': { + 'label': 'zone' + } + }, + 'weight': 2 + } + priorities.append(zone_priority) + + return priorities diff --git a/roles/lib_utils/test/conftest.py b/roles/lib_utils/test/conftest.py new file mode 100644 index 000000000..aabdd4fa1 --- /dev/null +++ b/roles/lib_utils/test/conftest.py @@ -0,0 +1,172 @@ +# pylint: disable=missing-docstring,invalid-name,redefined-outer-name +import os +import pytest +import sys + +from OpenSSL import crypto + +sys.path.insert(1, os.path.join(os.path.dirname(__file__), os.pardir, "lookup_plugins")) + +from openshift_master_facts_default_predicates import LookupModule as PredicatesLookupModule # noqa: E402 +from openshift_master_facts_default_priorities import LookupModule as PrioritiesLookupModule # noqa: E402 + +# Parameter list for valid_cert fixture +VALID_CERTIFICATE_PARAMS = [ + { + 'short_name': 'client', + 'cn': 'client.example.com', + 'serial': 4, + 'uses': b'clientAuth', + 'dns': [], + 'ip': [], + }, + { + 'short_name': 'server', + 'cn': 'server.example.com', + 'serial': 5, + 'uses': b'serverAuth', + 'dns': ['kubernetes', 'openshift'], + 'ip': ['10.0.0.1', '192.168.0.1'] + }, + { + 'short_name': 'combined', + 'cn': 'combined.example.com', + # Verify that HUGE serials parse correctly. + # Frobs PARSING_HEX_SERIAL in _parse_cert + # See https://bugzilla.redhat.com/show_bug.cgi?id=1464240 + 'serial': 14449739080294792594019643629255165375, + 'uses': b'clientAuth, serverAuth', + 'dns': ['etcd'], + 'ip': ['10.0.0.2', '192.168.0.2'] + } +] + +# Extract the short_name from VALID_CERTIFICATE_PARAMS to provide +# friendly naming for the valid_cert fixture +VALID_CERTIFICATE_IDS = [param['short_name'] for param in VALID_CERTIFICATE_PARAMS] + + +@pytest.fixture(scope='session') +def ca(tmpdir_factory): + ca_dir = tmpdir_factory.mktemp('ca') + + key = crypto.PKey() + key.generate_key(crypto.TYPE_RSA, 2048) + + cert = crypto.X509() + cert.set_version(3) + cert.set_serial_number(1) + cert.get_subject().commonName = 'test-signer' + cert.gmtime_adj_notBefore(0) + cert.gmtime_adj_notAfter(24 * 60 * 60) + cert.set_issuer(cert.get_subject()) + cert.set_pubkey(key) + cert.add_extensions([ + crypto.X509Extension(b'basicConstraints', True, b'CA:TRUE, pathlen:0'), + crypto.X509Extension(b'keyUsage', True, + b'digitalSignature, keyEncipherment, keyCertSign, cRLSign'), + crypto.X509Extension(b'subjectKeyIdentifier', False, b'hash', subject=cert) + ]) + cert.add_extensions([ + crypto.X509Extension(b'authorityKeyIdentifier', False, b'keyid:always', issuer=cert) + ]) + cert.sign(key, 'sha256') + + return { + 'dir': ca_dir, + 'key': key, + 'cert': cert, + } + + +@pytest.fixture(scope='session', + ids=VALID_CERTIFICATE_IDS, + params=VALID_CERTIFICATE_PARAMS) +def valid_cert(request, ca): + common_name = request.param['cn'] + + key = crypto.PKey() + key.generate_key(crypto.TYPE_RSA, 2048) + + cert = crypto.X509() + cert.set_serial_number(request.param['serial']) + cert.gmtime_adj_notBefore(0) + cert.gmtime_adj_notAfter(24 * 60 * 60) + cert.set_issuer(ca['cert'].get_subject()) + cert.set_pubkey(key) + cert.set_version(3) + cert.get_subject().commonName = common_name + cert.add_extensions([ + crypto.X509Extension(b'basicConstraints', True, b'CA:FALSE'), + crypto.X509Extension(b'keyUsage', True, b'digitalSignature, keyEncipherment'), + crypto.X509Extension(b'extendedKeyUsage', False, request.param['uses']), + ]) + + if request.param['dns'] or request.param['ip']: + san_list = ['DNS:{}'.format(common_name)] + san_list.extend(['DNS:{}'.format(x) for x in request.param['dns']]) + san_list.extend(['IP:{}'.format(x) for x in request.param['ip']]) + + cert.add_extensions([ + crypto.X509Extension(b'subjectAltName', False, ', '.join(san_list).encode('utf8')) + ]) + cert.sign(ca['key'], 'sha256') + + cert_contents = crypto.dump_certificate(crypto.FILETYPE_PEM, cert) + cert_file = ca['dir'].join('{}.crt'.format(common_name)) + cert_file.write_binary(cert_contents) + + return { + 'common_name': common_name, + 'serial': request.param['serial'], + 'dns': request.param['dns'], + 'ip': request.param['ip'], + 'uses': request.param['uses'], + 'cert_file': cert_file, + 'cert': cert + } + + +@pytest.fixture() +def predicates_lookup(): + return PredicatesLookupModule() + + +@pytest.fixture() +def priorities_lookup(): + return PrioritiesLookupModule() + + +@pytest.fixture() +def facts(): + return { + 'openshift': { + 'common': {} + } + } + + +@pytest.fixture(params=[True, False]) +def regions_enabled(request): + return request.param + + +@pytest.fixture(params=[True, False]) +def zones_enabled(request): + return request.param + + +def v_prefix(release): + """Prefix a release number with 'v'.""" + return "v" + release + + +def minor(release): + """Add a suffix to release, making 'X.Y' become 'X.Y.Z'.""" + return release + ".1" + + +@pytest.fixture(params=[str, v_prefix, minor]) +def release_mod(request): + """Modifies a release string to alternative valid values.""" + return request.param diff --git a/roles/lib_utils/test/openshift_master_facts_bad_input_tests.py b/roles/lib_utils/test/openshift_master_facts_bad_input_tests.py new file mode 100644 index 000000000..e8da1e04a --- /dev/null +++ b/roles/lib_utils/test/openshift_master_facts_bad_input_tests.py @@ -0,0 +1,57 @@ +import copy +import os +import sys + +from ansible.errors import AnsibleError +import pytest + +sys.path.insert(1, os.path.join(os.path.dirname(__file__), os.pardir, "lookup_plugins")) + +from openshift_master_facts_default_predicates import LookupModule # noqa: E402 + + +class TestOpenShiftMasterFactsBadInput(object): + lookup = LookupModule() + default_facts = { + 'openshift': { + 'common': {} + } + } + + def test_missing_openshift_facts(self): + with pytest.raises(AnsibleError): + facts = {} + self.lookup.run(None, variables=facts) + + def test_missing_deployment_type(self): + with pytest.raises(AnsibleError): + facts = copy.deepcopy(self.default_facts) + facts['openshift']['common']['short_version'] = '10.10' + self.lookup.run(None, variables=facts) + + def test_missing_short_version_and_missing_openshift_release(self): + with pytest.raises(AnsibleError): + facts = copy.deepcopy(self.default_facts) + facts['openshift']['common']['deployment_type'] = 'origin' + self.lookup.run(None, variables=facts) + + def test_unknown_deployment_types(self): + with pytest.raises(AnsibleError): + facts = copy.deepcopy(self.default_facts) + facts['openshift']['common']['short_version'] = '1.1' + facts['openshift']['common']['deployment_type'] = 'bogus' + self.lookup.run(None, variables=facts) + + def test_unknown_origin_version(self): + with pytest.raises(AnsibleError): + facts = copy.deepcopy(self.default_facts) + facts['openshift']['common']['short_version'] = '0.1' + facts['openshift']['common']['deployment_type'] = 'origin' + self.lookup.run(None, variables=facts) + + def test_unknown_ocp_version(self): + with pytest.raises(AnsibleError): + facts = copy.deepcopy(self.default_facts) + facts['openshift']['common']['short_version'] = '0.1' + facts['openshift']['common']['deployment_type'] = 'openshift-enterprise' + self.lookup.run(None, variables=facts) diff --git a/roles/lib_utils/test/openshift_master_facts_conftest.py b/roles/lib_utils/test/openshift_master_facts_conftest.py new file mode 100644 index 000000000..140cced73 --- /dev/null +++ b/roles/lib_utils/test/openshift_master_facts_conftest.py @@ -0,0 +1,54 @@ +import os +import sys + +import pytest + +sys.path.insert(1, os.path.join(os.path.dirname(__file__), os.pardir, "lookup_plugins")) + +from openshift_master_facts_default_predicates import LookupModule as PredicatesLookupModule # noqa: E402 +from openshift_master_facts_default_priorities import LookupModule as PrioritiesLookupModule # noqa: E402 + + +@pytest.fixture() +def predicates_lookup(): + return PredicatesLookupModule() + + +@pytest.fixture() +def priorities_lookup(): + return PrioritiesLookupModule() + + +@pytest.fixture() +def facts(): + return { + 'openshift': { + 'common': {} + } + } + + +@pytest.fixture(params=[True, False]) +def regions_enabled(request): + return request.param + + +@pytest.fixture(params=[True, False]) +def zones_enabled(request): + return request.param + + +def v_prefix(release): + """Prefix a release number with 'v'.""" + return "v" + release + + +def minor(release): + """Add a suffix to release, making 'X.Y' become 'X.Y.Z'.""" + return release + ".1" + + +@pytest.fixture(params=[str, v_prefix, minor]) +def release_mod(request): + """Modifies a release string to alternative valid values.""" + return request.param diff --git a/roles/lib_utils/test/openshift_master_facts_default_predicates_tests.py b/roles/lib_utils/test/openshift_master_facts_default_predicates_tests.py new file mode 100644 index 000000000..11aad9f03 --- /dev/null +++ b/roles/lib_utils/test/openshift_master_facts_default_predicates_tests.py @@ -0,0 +1,193 @@ +import pytest + + +# Predicates ordered according to OpenShift Origin source: +# origin/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go + +DEFAULT_PREDICATES_1_1 = [ + {'name': 'PodFitsHostPorts'}, + {'name': 'PodFitsResources'}, + {'name': 'NoDiskConflict'}, + {'name': 'MatchNodeSelector'}, +] + +DEFAULT_PREDICATES_1_2 = [ + {'name': 'PodFitsHostPorts'}, + {'name': 'PodFitsResources'}, + {'name': 'NoDiskConflict'}, + {'name': 'NoVolumeZoneConflict'}, + {'name': 'MatchNodeSelector'}, + {'name': 'MaxEBSVolumeCount'}, + {'name': 'MaxGCEPDVolumeCount'} +] + +DEFAULT_PREDICATES_1_3 = [ + {'name': 'NoDiskConflict'}, + {'name': 'NoVolumeZoneConflict'}, + {'name': 'MaxEBSVolumeCount'}, + {'name': 'MaxGCEPDVolumeCount'}, + {'name': 'GeneralPredicates'}, + {'name': 'PodToleratesNodeTaints'}, + {'name': 'CheckNodeMemoryPressure'} +] + +DEFAULT_PREDICATES_1_4 = [ + {'name': 'NoDiskConflict'}, + {'name': 'NoVolumeZoneConflict'}, + {'name': 'MaxEBSVolumeCount'}, + {'name': 'MaxGCEPDVolumeCount'}, + {'name': 'GeneralPredicates'}, + {'name': 'PodToleratesNodeTaints'}, + {'name': 'CheckNodeMemoryPressure'}, + {'name': 'CheckNodeDiskPressure'}, + {'name': 'MatchInterPodAffinity'} +] + +DEFAULT_PREDICATES_1_5 = [ + {'name': 'NoVolumeZoneConflict'}, + {'name': 'MaxEBSVolumeCount'}, + {'name': 'MaxGCEPDVolumeCount'}, + {'name': 'MatchInterPodAffinity'}, + {'name': 'NoDiskConflict'}, + {'name': 'GeneralPredicates'}, + {'name': 'PodToleratesNodeTaints'}, + {'name': 'CheckNodeMemoryPressure'}, + {'name': 'CheckNodeDiskPressure'}, +] + +DEFAULT_PREDICATES_3_6 = DEFAULT_PREDICATES_1_5 + +DEFAULT_PREDICATES_3_7 = [ + {'name': 'NoVolumeZoneConflict'}, + {'name': 'MaxEBSVolumeCount'}, + {'name': 'MaxGCEPDVolumeCount'}, + {'name': 'MaxAzureDiskVolumeCount'}, + {'name': 'MatchInterPodAffinity'}, + {'name': 'NoDiskConflict'}, + {'name': 'GeneralPredicates'}, + {'name': 'PodToleratesNodeTaints'}, + {'name': 'CheckNodeMemoryPressure'}, + {'name': 'CheckNodeDiskPressure'}, + {'name': 'NoVolumeNodeConflict'}, +] + +DEFAULT_PREDICATES_3_9 = DEFAULT_PREDICATES_3_8 = DEFAULT_PREDICATES_3_7 + +REGION_PREDICATE = { + 'name': 'Region', + 'argument': { + 'serviceAffinity': { + 'labels': ['region'] + } + } +} + +TEST_VARS = [ + ('1.1', 'origin', DEFAULT_PREDICATES_1_1), + ('3.1', 'openshift-enterprise', DEFAULT_PREDICATES_1_1), + ('1.2', 'origin', DEFAULT_PREDICATES_1_2), + ('3.2', 'openshift-enterprise', DEFAULT_PREDICATES_1_2), + ('1.3', 'origin', DEFAULT_PREDICATES_1_3), + ('3.3', 'openshift-enterprise', DEFAULT_PREDICATES_1_3), + ('1.4', 'origin', DEFAULT_PREDICATES_1_4), + ('3.4', 'openshift-enterprise', DEFAULT_PREDICATES_1_4), + ('1.5', 'origin', DEFAULT_PREDICATES_1_5), + ('3.5', 'openshift-enterprise', DEFAULT_PREDICATES_1_5), + ('3.6', 'origin', DEFAULT_PREDICATES_3_6), + ('3.6', 'openshift-enterprise', DEFAULT_PREDICATES_3_6), + ('3.7', 'origin', DEFAULT_PREDICATES_3_7), + ('3.7', 'openshift-enterprise', DEFAULT_PREDICATES_3_7), + ('3.8', 'origin', DEFAULT_PREDICATES_3_8), + ('3.8', 'openshift-enterprise', DEFAULT_PREDICATES_3_8), + ('3.9', 'origin', DEFAULT_PREDICATES_3_9), + ('3.9', 'openshift-enterprise', DEFAULT_PREDICATES_3_9), +] + + +def assert_ok(predicates_lookup, default_predicates, regions_enabled, **kwargs): + results = predicates_lookup.run(None, regions_enabled=regions_enabled, **kwargs) + if regions_enabled: + assert results == default_predicates + [REGION_PREDICATE] + else: + assert results == default_predicates + + +def test_openshift_version(predicates_lookup, openshift_version_fixture, regions_enabled): + facts, default_predicates = openshift_version_fixture + assert_ok(predicates_lookup, default_predicates, variables=facts, regions_enabled=regions_enabled) + + +@pytest.fixture(params=TEST_VARS) +def openshift_version_fixture(request, facts): + version, deployment_type, default_predicates = request.param + version += '.1' + facts['openshift_version'] = version + facts['openshift']['common']['deployment_type'] = deployment_type + return facts, default_predicates + + +def test_openshift_release(predicates_lookup, openshift_release_fixture, regions_enabled): + facts, default_predicates = openshift_release_fixture + assert_ok(predicates_lookup, default_predicates, variables=facts, regions_enabled=regions_enabled) + + +@pytest.fixture(params=TEST_VARS) +def openshift_release_fixture(request, facts, release_mod): + release, deployment_type, default_predicates = request.param + facts['openshift_release'] = release_mod(release) + facts['openshift']['common']['deployment_type'] = deployment_type + return facts, default_predicates + + +def test_short_version(predicates_lookup, short_version_fixture, regions_enabled): + facts, default_predicates = short_version_fixture + assert_ok(predicates_lookup, default_predicates, variables=facts, regions_enabled=regions_enabled) + + +@pytest.fixture(params=TEST_VARS) +def short_version_fixture(request, facts): + short_version, deployment_type, default_predicates = request.param + facts['openshift']['common']['short_version'] = short_version + facts['openshift']['common']['deployment_type'] = deployment_type + return facts, default_predicates + + +def test_short_version_kwarg(predicates_lookup, short_version_kwarg_fixture, regions_enabled): + facts, short_version, default_predicates = short_version_kwarg_fixture + assert_ok( + predicates_lookup, default_predicates, variables=facts, + regions_enabled=regions_enabled, short_version=short_version) + + +@pytest.fixture(params=TEST_VARS) +def short_version_kwarg_fixture(request, facts): + short_version, deployment_type, default_predicates = request.param + facts['openshift']['common']['deployment_type'] = deployment_type + return facts, short_version, default_predicates + + +def test_deployment_type_kwarg(predicates_lookup, deployment_type_kwarg_fixture, regions_enabled): + facts, deployment_type, default_predicates = deployment_type_kwarg_fixture + assert_ok( + predicates_lookup, default_predicates, variables=facts, + regions_enabled=regions_enabled, deployment_type=deployment_type) + + +@pytest.fixture(params=TEST_VARS) +def deployment_type_kwarg_fixture(request, facts): + short_version, deployment_type, default_predicates = request.param + facts['openshift']['common']['short_version'] = short_version + return facts, deployment_type, default_predicates + + +def test_short_version_deployment_type_kwargs( + predicates_lookup, short_version_deployment_type_kwargs_fixture, regions_enabled): + short_version, deployment_type, default_predicates = short_version_deployment_type_kwargs_fixture + assert_ok( + predicates_lookup, default_predicates, regions_enabled=regions_enabled, + short_version=short_version, deployment_type=deployment_type) + + +@pytest.fixture(params=TEST_VARS) +def short_version_deployment_type_kwargs_fixture(request): + return request.param diff --git a/roles/lib_utils/test/openshift_master_facts_default_priorities_tests.py b/roles/lib_utils/test/openshift_master_facts_default_priorities_tests.py new file mode 100644 index 000000000..527fc9ff4 --- /dev/null +++ b/roles/lib_utils/test/openshift_master_facts_default_priorities_tests.py @@ -0,0 +1,167 @@ +import pytest + + +DEFAULT_PRIORITIES_1_1 = [ + {'name': 'LeastRequestedPriority', 'weight': 1}, + {'name': 'BalancedResourceAllocation', 'weight': 1}, + {'name': 'SelectorSpreadPriority', 'weight': 1} +] + +DEFAULT_PRIORITIES_1_2 = [ + {'name': 'LeastRequestedPriority', 'weight': 1}, + {'name': 'BalancedResourceAllocation', 'weight': 1}, + {'name': 'SelectorSpreadPriority', 'weight': 1}, + {'name': 'NodeAffinityPriority', 'weight': 1} +] + +DEFAULT_PRIORITIES_1_3 = [ + {'name': 'LeastRequestedPriority', 'weight': 1}, + {'name': 'BalancedResourceAllocation', 'weight': 1}, + {'name': 'SelectorSpreadPriority', 'weight': 1}, + {'name': 'NodeAffinityPriority', 'weight': 1}, + {'name': 'TaintTolerationPriority', 'weight': 1} +] + +DEFAULT_PRIORITIES_1_4 = [ + {'name': 'LeastRequestedPriority', 'weight': 1}, + {'name': 'BalancedResourceAllocation', 'weight': 1}, + {'name': 'SelectorSpreadPriority', 'weight': 1}, + {'name': 'NodePreferAvoidPodsPriority', 'weight': 10000}, + {'name': 'NodeAffinityPriority', 'weight': 1}, + {'name': 'TaintTolerationPriority', 'weight': 1}, + {'name': 'InterPodAffinityPriority', 'weight': 1} +] + +DEFAULT_PRIORITIES_1_5 = [ + {'name': 'SelectorSpreadPriority', 'weight': 1}, + {'name': 'InterPodAffinityPriority', 'weight': 1}, + {'name': 'LeastRequestedPriority', 'weight': 1}, + {'name': 'BalancedResourceAllocation', 'weight': 1}, + {'name': 'NodePreferAvoidPodsPriority', 'weight': 10000}, + {'name': 'NodeAffinityPriority', 'weight': 1}, + {'name': 'TaintTolerationPriority', 'weight': 1} +] + +DEFAULT_PRIORITIES_3_6 = DEFAULT_PRIORITIES_1_5 + +DEFAULT_PRIORITIES_3_9 = DEFAULT_PRIORITIES_3_8 = DEFAULT_PRIORITIES_3_7 = DEFAULT_PRIORITIES_3_6 + +ZONE_PRIORITY = { + 'name': 'Zone', + 'argument': { + 'serviceAntiAffinity': { + 'label': 'zone' + } + }, + 'weight': 2 +} + +TEST_VARS = [ + ('1.1', 'origin', DEFAULT_PRIORITIES_1_1), + ('3.1', 'openshift-enterprise', DEFAULT_PRIORITIES_1_1), + ('1.2', 'origin', DEFAULT_PRIORITIES_1_2), + ('3.2', 'openshift-enterprise', DEFAULT_PRIORITIES_1_2), + ('1.3', 'origin', DEFAULT_PRIORITIES_1_3), + ('3.3', 'openshift-enterprise', DEFAULT_PRIORITIES_1_3), + ('1.4', 'origin', DEFAULT_PRIORITIES_1_4), + ('3.4', 'openshift-enterprise', DEFAULT_PRIORITIES_1_4), + ('1.5', 'origin', DEFAULT_PRIORITIES_1_5), + ('3.5', 'openshift-enterprise', DEFAULT_PRIORITIES_1_5), + ('3.6', 'origin', DEFAULT_PRIORITIES_3_6), + ('3.6', 'openshift-enterprise', DEFAULT_PRIORITIES_3_6), + ('3.7', 'origin', DEFAULT_PRIORITIES_3_7), + ('3.7', 'openshift-enterprise', DEFAULT_PRIORITIES_3_7), + ('3.8', 'origin', DEFAULT_PRIORITIES_3_8), + ('3.8', 'openshift-enterprise', DEFAULT_PRIORITIES_3_8), + ('3.9', 'origin', DEFAULT_PRIORITIES_3_9), + ('3.9', 'openshift-enterprise', DEFAULT_PRIORITIES_3_9), +] + + +def assert_ok(priorities_lookup, default_priorities, zones_enabled, **kwargs): + results = priorities_lookup.run(None, zones_enabled=zones_enabled, **kwargs) + if zones_enabled: + assert results == default_priorities + [ZONE_PRIORITY] + else: + assert results == default_priorities + + +def test_openshift_version(priorities_lookup, openshift_version_fixture, zones_enabled): + facts, default_priorities = openshift_version_fixture + assert_ok(priorities_lookup, default_priorities, variables=facts, zones_enabled=zones_enabled) + + +@pytest.fixture(params=TEST_VARS) +def openshift_version_fixture(request, facts): + version, deployment_type, default_priorities = request.param + version += '.1' + facts['openshift_version'] = version + facts['openshift']['common']['deployment_type'] = deployment_type + return facts, default_priorities + + +def test_openshift_release(priorities_lookup, openshift_release_fixture, zones_enabled): + facts, default_priorities = openshift_release_fixture + assert_ok(priorities_lookup, default_priorities, variables=facts, zones_enabled=zones_enabled) + + +@pytest.fixture(params=TEST_VARS) +def openshift_release_fixture(request, facts, release_mod): + release, deployment_type, default_priorities = request.param + facts['openshift_release'] = release_mod(release) + facts['openshift']['common']['deployment_type'] = deployment_type + return facts, default_priorities + + +def test_short_version(priorities_lookup, short_version_fixture, zones_enabled): + facts, default_priorities = short_version_fixture + assert_ok(priorities_lookup, default_priorities, variables=facts, zones_enabled=zones_enabled) + + +@pytest.fixture(params=TEST_VARS) +def short_version_fixture(request, facts): + short_version, deployment_type, default_priorities = request.param + facts['openshift']['common']['short_version'] = short_version + facts['openshift']['common']['deployment_type'] = deployment_type + return facts, default_priorities + + +def test_short_version_kwarg(priorities_lookup, short_version_kwarg_fixture, zones_enabled): + facts, short_version, default_priorities = short_version_kwarg_fixture + assert_ok( + priorities_lookup, default_priorities, variables=facts, + zones_enabled=zones_enabled, short_version=short_version) + + +@pytest.fixture(params=TEST_VARS) +def short_version_kwarg_fixture(request, facts): + short_version, deployment_type, default_priorities = request.param + facts['openshift']['common']['deployment_type'] = deployment_type + return facts, short_version, default_priorities + + +def test_deployment_type_kwarg(priorities_lookup, deployment_type_kwarg_fixture, zones_enabled): + facts, deployment_type, default_priorities = deployment_type_kwarg_fixture + assert_ok( + priorities_lookup, default_priorities, variables=facts, + zones_enabled=zones_enabled, deployment_type=deployment_type) + + +@pytest.fixture(params=TEST_VARS) +def deployment_type_kwarg_fixture(request, facts): + short_version, deployment_type, default_priorities = request.param + facts['openshift']['common']['short_version'] = short_version + return facts, deployment_type, default_priorities + + +def test_short_version_deployment_type_kwargs( + priorities_lookup, short_version_deployment_type_kwargs_fixture, zones_enabled): + short_version, deployment_type, default_priorities = short_version_deployment_type_kwargs_fixture + assert_ok( + priorities_lookup, default_priorities, zones_enabled=zones_enabled, + short_version=short_version, deployment_type=deployment_type) + + +@pytest.fixture(params=TEST_VARS) +def short_version_deployment_type_kwargs_fixture(request): + return request.param diff --git a/roles/lib_utils/test/test_fakeopensslclasses.py b/roles/lib_utils/test/test_fakeopensslclasses.py new file mode 100644 index 000000000..8a521a765 --- /dev/null +++ b/roles/lib_utils/test/test_fakeopensslclasses.py @@ -0,0 +1,90 @@ +''' + Unit tests for the FakeOpenSSL classes +''' +import os +import subprocess +import sys + +import pytest + +MODULE_PATH = os.path.realpath(os.path.join(__file__, os.pardir, os.pardir, 'library')) +sys.path.insert(1, MODULE_PATH) + +# pylint: disable=import-error,wrong-import-position,missing-docstring +# pylint: disable=invalid-name,redefined-outer-name +from openshift_cert_expiry import FakeOpenSSLCertificate # noqa: E402 + + +@pytest.fixture(scope='module') +def fake_valid_cert(valid_cert): + cmd = ['openssl', 'x509', '-in', str(valid_cert['cert_file']), '-text', + '-nameopt', 'oneline'] + cert = subprocess.check_output(cmd) + return FakeOpenSSLCertificate(cert.decode('utf8')) + + +def test_not_after(valid_cert, fake_valid_cert): + ''' Validate value returned back from get_notAfter() ''' + real_cert = valid_cert['cert'] + + # Internal representation of pyOpenSSL is bytes, while FakeOpenSSLCertificate + # is text, so decode the result from pyOpenSSL prior to comparing + assert real_cert.get_notAfter().decode('utf8') == fake_valid_cert.get_notAfter() + + +def test_serial(valid_cert, fake_valid_cert): + ''' Validate value returned back form get_serialnumber() ''' + real_cert = valid_cert['cert'] + assert real_cert.get_serial_number() == fake_valid_cert.get_serial_number() + + +def test_get_subject(valid_cert, fake_valid_cert): + ''' Validate the certificate subject ''' + + # Gather the subject components and create a list of colon separated strings. + # Since the internal representation of pyOpenSSL uses bytes, we need to decode + # the results before comparing. + c_subjects = valid_cert['cert'].get_subject().get_components() + c_subj = ', '.join(['{}:{}'.format(x.decode('utf8'), y.decode('utf8')) for x, y in c_subjects]) + f_subjects = fake_valid_cert.get_subject().get_components() + f_subj = ', '.join(['{}:{}'.format(x, y) for x, y in f_subjects]) + assert c_subj == f_subj + + +def get_san_extension(cert): + # Internal representation of pyOpenSSL is bytes, while FakeOpenSSLCertificate + # is text, so we need to set the value to search for accordingly. + if isinstance(cert, FakeOpenSSLCertificate): + san_short_name = 'subjectAltName' + else: + san_short_name = b'subjectAltName' + + for i in range(cert.get_extension_count()): + ext = cert.get_extension(i) + if ext.get_short_name() == san_short_name: + # return the string representation to compare the actual SAN + # values instead of the data types + return str(ext) + + return None + + +def test_subject_alt_names(valid_cert, fake_valid_cert): + real_cert = valid_cert['cert'] + + san = get_san_extension(real_cert) + f_san = get_san_extension(fake_valid_cert) + + assert san == f_san + + # If there are either dns or ip sans defined, verify common_name present + if valid_cert['ip'] or valid_cert['dns']: + assert 'DNS:' + valid_cert['common_name'] in f_san + + # Verify all ip sans are present + for ip in valid_cert['ip']: + assert 'IP Address:' + ip in f_san + + # Verify all dns sans are present + for name in valid_cert['dns']: + assert 'DNS:' + name in f_san diff --git a/roles/lib_utils/test/test_load_and_handle_cert.py b/roles/lib_utils/test/test_load_and_handle_cert.py new file mode 100644 index 000000000..98792e2ee --- /dev/null +++ b/roles/lib_utils/test/test_load_and_handle_cert.py @@ -0,0 +1,67 @@ +''' + Unit tests for the load_and_handle_cert method +''' +import datetime +import os +import sys + +import pytest + +MODULE_PATH = os.path.realpath(os.path.join(__file__, os.pardir, os.pardir, 'library')) +sys.path.insert(1, MODULE_PATH) + +# pylint: disable=import-error,wrong-import-position,missing-docstring +# pylint: disable=invalid-name,redefined-outer-name +import openshift_cert_expiry # noqa: E402 + +# TODO: More testing on the results of the load_and_handle_cert function +# could be implemented here as well, such as verifying subjects +# match up. + + +@pytest.fixture(params=['OpenSSLCertificate', 'FakeOpenSSLCertificate']) +def loaded_cert(request, valid_cert): + """ parameterized fixture to provide load_and_handle_cert results + for both OpenSSL and FakeOpenSSL parsed certificates + """ + now = datetime.datetime.now() + + openshift_cert_expiry.HAS_OPENSSL = request.param == 'OpenSSLCertificate' + + # valid_cert['cert_file'] is a `py.path.LocalPath` object and + # provides a read_text() method for reading the file contents. + cert_string = valid_cert['cert_file'].read_text('utf8') + + (subject, + expiry_date, + time_remaining, + serial) = openshift_cert_expiry.load_and_handle_cert(cert_string, now) + + return { + 'now': now, + 'subject': subject, + 'expiry_date': expiry_date, + 'time_remaining': time_remaining, + 'serial': serial, + } + + +def test_serial(loaded_cert, valid_cert): + """Params: + + * `loaded_cert` comes from the `loaded_cert` fixture in this file + * `valid_cert` comes from the 'valid_cert' fixture in conftest.py + """ + valid_cert_serial = valid_cert['cert'].get_serial_number() + assert loaded_cert['serial'] == valid_cert_serial + + +def test_expiry(loaded_cert): + """Params: + + * `loaded_cert` comes from the `loaded_cert` fixture in this file + """ + expiry_date = loaded_cert['expiry_date'] + time_remaining = loaded_cert['time_remaining'] + now = loaded_cert['now'] + assert expiry_date == now + time_remaining diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml index 71de24339..8c8227b5e 100644 --- a/roles/openshift_aws/defaults/main.yml +++ b/roles/openshift_aws/defaults/main.yml @@ -109,6 +109,7 @@ openshift_aws_node_group_config_node_volumes: device_type: gp2 delete_on_termination: True +# build_instance_tags is a custom filter in role lib_utils openshift_aws_node_group_config_tags: "{{ openshift_aws_clusterid | build_instance_tags }}" openshift_aws_node_group_termination_policy: Default openshift_aws_node_group_replace_instances: [] @@ -201,6 +202,7 @@ openshift_aws_node_group_config: openshift_aws_elb_tags: "{{ openshift_aws_kube_tags }}" openshift_aws_elb_az_load_balancing: False +# build_instance_tags is a custom filter in role lib_utils openshift_aws_kube_tags: "{{ openshift_aws_clusterid | build_instance_tags }}" openshift_aws_elb_security_groups: "{{ openshift_aws_launch_config_security_groups }}" diff --git a/roles/openshift_aws/filter_plugins/openshift_aws_filters.py b/roles/openshift_aws/filter_plugins/openshift_aws_filters.py deleted file mode 100644 index dfcb11da3..000000000 --- a/roles/openshift_aws/filter_plugins/openshift_aws_filters.py +++ /dev/null @@ -1,74 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -''' -Custom filters for use in openshift_aws -''' - -from ansible import errors - - -class FilterModule(object): - ''' Custom ansible filters for use by openshift_aws role''' - - @staticmethod - def scale_groups_serial(scale_group_info, upgrade=False): - ''' This function will determine what the deployment serial should be and return it - - Search through the tags and find the deployment_serial tag. Once found, - determine if an increment is needed during an upgrade. - if upgrade is true then increment the serial and return it - else return the serial - ''' - if scale_group_info == []: - return 1 - - scale_group_info = scale_group_info[0] - - if not isinstance(scale_group_info, dict): - raise errors.AnsibleFilterError("|filter plugin failed: Expected scale_group_info to be a dict") - - serial = None - - for tag in scale_group_info['tags']: - if tag['key'] == 'deployment_serial': - serial = int(tag['value']) - if upgrade: - serial += 1 - break - else: - raise errors.AnsibleFilterError("|filter plugin failed: deployment_serial tag was not found") - - return serial - - @staticmethod - def scale_groups_match_capacity(scale_group_info): - ''' This function will verify that the scale group instance count matches - the scale group desired capacity - - ''' - for scale_group in scale_group_info: - if scale_group['desired_capacity'] != len(scale_group['instances']): - return False - - return True - - @staticmethod - def build_instance_tags(clusterid): - ''' This function will return a dictionary of the instance tags. - - The main desire to have this inside of a filter_plugin is that we - need to build the following key. - - {"kubernetes.io/cluster/{{ openshift_aws_clusterid }}": "{{ openshift_aws_clusterid}}"} - - ''' - tags = {'clusterid': clusterid, - 'kubernetes.io/cluster/{}'.format(clusterid): clusterid} - - return tags - - def filters(self): - ''' returns a mapping of filters to methods ''' - return {'build_instance_tags': self.build_instance_tags, - 'scale_groups_match_capacity': self.scale_groups_match_capacity, - 'scale_groups_serial': self.scale_groups_serial} diff --git a/roles/openshift_aws/tasks/build_node_group.yml b/roles/openshift_aws/tasks/build_node_group.yml index 9485cc3ac..a9f9cc3c4 100644 --- a/roles/openshift_aws/tasks/build_node_group.yml +++ b/roles/openshift_aws/tasks/build_node_group.yml @@ -43,6 +43,7 @@ - name: set the value for the deployment_serial and the current asgs set_fact: + # scale_groups_serial is a custom filter in role lib_utils l_deployment_serial: "{{ openshift_aws_node_group_deployment_serial if openshift_aws_node_group_deployment_serial is defined else asgs.results | scale_groups_serial(openshift_aws_node_group_upgrade) }}" openshift_aws_current_asgs: "{{ asgs.results | map(attribute='auto_scaling_group_name') | list | union(openshift_aws_current_asgs) }}" diff --git a/roles/openshift_aws/tasks/wait_for_groups.yml b/roles/openshift_aws/tasks/wait_for_groups.yml index 1f4ef3e1c..3ad876e37 100644 --- a/roles/openshift_aws/tasks/wait_for_groups.yml +++ b/roles/openshift_aws/tasks/wait_for_groups.yml @@ -8,6 +8,7 @@ tags: "{{ {'kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid } }}" register: qasg + # scale_groups_match_capacity is a custom filter in role lib_utils until: qasg | json_query('results[*]') | scale_groups_match_capacity | bool delay: 10 retries: 60 diff --git a/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py b/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py deleted file mode 100644 index 58b228fee..000000000 --- a/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py +++ /dev/null @@ -1,66 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -""" -Custom filters for use in openshift-ansible -""" - - -# Disabling too-many-public-methods, since filter methods are necessarily -# public -# pylint: disable=too-many-public-methods -class FilterModule(object): - """ Custom ansible filters """ - - @staticmethod - def oo_cert_expiry_results_to_json(hostvars, play_hosts): - """Takes results (`hostvars`) from the openshift_cert_expiry role -check and serializes them into proper machine-readable JSON -output. This filter parameter **MUST** be the playbook `hostvars` -variable. The `play_hosts` parameter is so we know what to loop over -when we're extrating the values. - -Returns: - -Results are collected into two top-level keys under the `json_results` -dict: - -* `json_results.data` [dict] - Each individual host check result, keys are hostnames -* `json_results.summary` [dict] - Summary of number of `warning` and `expired` -certificates - -Example playbook usage: - - - name: Generate expiration results JSON - run_once: yes - delegate_to: localhost - when: openshift_certificate_expiry_save_json_results|bool - copy: - content: "{{ hostvars|oo_cert_expiry_results_to_json() }}" - dest: "{{ openshift_certificate_expiry_json_results_path }}" - - """ - json_result = { - 'data': {}, - 'summary': {}, - } - - for host in play_hosts: - json_result['data'][host] = hostvars[host]['check_results']['check_results'] - - total_warnings = sum([hostvars[h]['check_results']['summary']['warning'] for h in play_hosts]) - total_expired = sum([hostvars[h]['check_results']['summary']['expired'] for h in play_hosts]) - total_ok = sum([hostvars[h]['check_results']['summary']['ok'] for h in play_hosts]) - total_total = sum([hostvars[h]['check_results']['summary']['total'] for h in play_hosts]) - - json_result['summary']['warning'] = total_warnings - json_result['summary']['expired'] = total_expired - json_result['summary']['ok'] = total_ok - json_result['summary']['total'] = total_total - - return json_result - - def filters(self): - """ returns a mapping of filters to methods """ - return { - "oo_cert_expiry_results_to_json": self.oo_cert_expiry_results_to_json, - } diff --git a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py b/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py deleted file mode 100644 index e355266b0..000000000 --- a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py +++ /dev/null @@ -1,839 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# pylint: disable=line-too-long,invalid-name - -"""For details on this module see DOCUMENTATION (below)""" - -import base64 -import datetime -import io -import os -import subprocess -import yaml - -# pylint import-error disabled because pylint cannot find the package -# when installed in a virtualenv -from ansible.module_utils.six.moves import configparser # pylint: disable=import-error -from ansible.module_utils.basic import AnsibleModule - -try: - # You can comment this import out and include a 'pass' in this - # block if you're manually testing this module on a NON-ATOMIC - # HOST (or any host that just doesn't have PyOpenSSL - # available). That will force the `load_and_handle_cert` function - # to use the Fake OpenSSL classes. - import OpenSSL.crypto - HAS_OPENSSL = True -except ImportError: - # Some platforms (such as RHEL Atomic) may not have the Python - # OpenSSL library installed. In this case we will use a manual - # work-around to parse each certificate. - # - # Check for 'OpenSSL.crypto' in `sys.modules` later. - HAS_OPENSSL = False - -DOCUMENTATION = ''' ---- -module: openshift_cert_expiry -short_description: Check OpenShift Container Platform (OCP) and Kube certificate expirations on a cluster -description: - - The M(openshift_cert_expiry) module has two basic functions: to flag certificates which will expire in a set window of time from now, and to notify you about certificates which have already expired. - - When the module finishes, a summary of the examination is returned. Each certificate in the summary has a C(health) key with a value of one of the following: - - C(ok) - not expired, and outside of the expiration C(warning_days) window. - - C(warning) - not expired, but will expire between now and the C(warning_days) window. - - C(expired) - an expired certificate. - - Certificate flagging follow this logic: - - If the expiration date is before now then the certificate is classified as C(expired). - - The certificates time to live (expiration date - now) is calculated, if that time window is less than C(warning_days) the certificate is classified as C(warning). - - All other conditions are classified as C(ok). - - The following keys are ALSO present in the certificate summary: - - C(cert_cn) - The common name of the certificate (additional CNs present in SAN extensions are omitted) - - C(days_remaining) - The number of days until the certificate expires. - - C(expiry) - The date the certificate expires on. - - C(path) - The full path to the certificate on the examined host. -version_added: "1.0" -options: - config_base: - description: - - Base path to OCP system settings. - required: false - default: /etc/origin - warning_days: - description: - - Flag certificates which will expire in C(warning_days) days from now. - required: false - default: 30 - show_all: - description: - - Enable this option to show analysis of ALL certificates examined by this module. - - By default only certificates which have expired, or will expire within the C(warning_days) window will be reported. - required: false - default: false - -author: "Tim Bielawa (@tbielawa) " -''' - -EXAMPLES = ''' -# Default invocation, only notify about expired certificates or certificates which will expire within 30 days from now -- openshift_cert_expiry: - -# Expand the warning window to show certificates expiring within a year from now -- openshift_cert_expiry: warning_days=365 - -# Show expired, soon to expire (now + 30 days), and all other certificates examined -- openshift_cert_expiry: show_all=true -''' - - -class FakeOpenSSLCertificate(object): - """This provides a rough mock of what you get from -`OpenSSL.crypto.load_certificate()`. This is a work-around for -platforms missing the Python OpenSSL library. - """ - def __init__(self, cert_string): - """`cert_string` is a certificate in the form you get from running a -.crt through 'openssl x509 -in CERT.cert -text'""" - self.cert_string = cert_string - self.serial = None - self.subject = None - self.extensions = [] - self.not_after = None - self._parse_cert() - - def _parse_cert(self): - """Manually parse the certificate line by line""" - self.extensions = [] - - PARSING_ALT_NAMES = False - PARSING_HEX_SERIAL = False - for line in self.cert_string.split('\n'): - l = line.strip() - if PARSING_ALT_NAMES: - # We're parsing a 'Subject Alternative Name' line - self.extensions.append( - FakeOpenSSLCertificateSANExtension(l)) - - PARSING_ALT_NAMES = False - continue - - if PARSING_HEX_SERIAL: - # Hex serials arrive colon-delimited - serial_raw = l.replace(':', '') - # Convert to decimal - self.serial = int('0x' + serial_raw, base=16) - PARSING_HEX_SERIAL = False - continue - - # parse out the bits that we can - if l.startswith('Serial Number:'): - # Decimal format: - # Serial Number: 11 (0xb) - # => 11 - # Hex Format (large serials): - # Serial Number: - # 0a:de:eb:24:04:75:ab:56:39:14:e9:5a:22:e2:85:bf - # => 14449739080294792594019643629255165375 - if l.endswith(':'): - PARSING_HEX_SERIAL = True - continue - self.serial = int(l.split()[-2]) - - elif l.startswith('Not After :'): - # Not After : Feb 7 18:19:35 2019 GMT - # => strptime(str, '%b %d %H:%M:%S %Y %Z') - # => strftime('%Y%m%d%H%M%SZ') - # => 20190207181935Z - not_after_raw = l.partition(' : ')[-1] - # Last item: ('Not After', ' : ', 'Feb 7 18:19:35 2019 GMT') - not_after_parsed = datetime.datetime.strptime(not_after_raw, '%b %d %H:%M:%S %Y %Z') - self.not_after = not_after_parsed.strftime('%Y%m%d%H%M%SZ') - - elif l.startswith('X509v3 Subject Alternative Name:'): - PARSING_ALT_NAMES = True - continue - - elif l.startswith('Subject:'): - # O = system:nodes, CN = system:node:m01.example.com - self.subject = FakeOpenSSLCertificateSubjects(l.partition(': ')[-1]) - - def get_serial_number(self): - """Return the serial number of the cert""" - return self.serial - - def get_subject(self): - """Subjects must implement get_components() and return dicts or -tuples. An 'openssl x509 -in CERT.cert -text' with 'Subject': - - Subject: Subject: O=system:nodes, CN=system:node:m01.example.com - -might return: [('O=system', 'nodes'), ('CN=system', 'node:m01.example.com')] - """ - return self.subject - - def get_extension(self, i): - """Extensions must implement get_short_name() and return the string -'subjectAltName'""" - return self.extensions[i] - - def get_extension_count(self): - """ get_extension_count """ - return len(self.extensions) - - def get_notAfter(self): - """Returns a date stamp as a string in the form -'20180922170439Z'. strptime the result with format param: -'%Y%m%d%H%M%SZ'.""" - return self.not_after - - -class FakeOpenSSLCertificateSANExtension(object): # pylint: disable=too-few-public-methods - """Mocks what happens when `get_extension` is called on a certificate -object""" - - def __init__(self, san_string): - """With `san_string` as you get from: - - $ openssl x509 -in certificate.crt -text - """ - self.san_string = san_string - self.short_name = 'subjectAltName' - - def get_short_name(self): - """Return the 'type' of this extension. It's always the same though -because we only care about subjectAltName's""" - return self.short_name - - def __str__(self): - """Return this extension and the value as a simple string""" - return self.san_string - - -# pylint: disable=too-few-public-methods -class FakeOpenSSLCertificateSubjects(object): - """Mocks what happens when `get_subject` is called on a certificate -object""" - - def __init__(self, subject_string): - """With `subject_string` as you get from: - - $ openssl x509 -in certificate.crt -text - """ - self.subjects = [] - for s in subject_string.split(', '): - name, _, value = s.partition(' = ') - self.subjects.append((name, value)) - - def get_components(self): - """Returns a list of tuples""" - return self.subjects - - -###################################################################### -def filter_paths(path_list): - """`path_list` - A list of file paths to check. Only files which exist -will be returned - """ - return [p for p in path_list if os.path.exists(os.path.realpath(p))] - - -# pylint: disable=too-many-locals,too-many-branches -# -# TODO: Break this function down into smaller chunks -def load_and_handle_cert(cert_string, now, base64decode=False, ans_module=None): - """Load a certificate, split off the good parts, and return some -useful data - -Params: - -- `cert_string` (string) - a certificate loaded into a string object -- `now` (datetime) - a datetime object of the time to calculate the certificate 'time_remaining' against -- `base64decode` (bool) - run base64.b64decode() on the input -- `ans_module` (AnsibleModule) - The AnsibleModule object for this module (so we can raise errors) - -Returns: -A tuple of the form: - (cert_subject, cert_expiry_date, time_remaining, cert_serial_number) - """ - if base64decode: - _cert_string = base64.b64decode(cert_string).decode('utf-8') - else: - _cert_string = cert_string - - # Disable this. We 'redefine' the type because we are working - # around a missing library on the target host. - # - # pylint: disable=redefined-variable-type - if HAS_OPENSSL: - # No work-around required - cert_loaded = OpenSSL.crypto.load_certificate( - OpenSSL.crypto.FILETYPE_PEM, _cert_string) - else: - # Missing library, work-around required. Run the 'openssl' - # command on it to decode it - cmd = 'openssl x509 -text' - try: - openssl_proc = subprocess.Popen(cmd.split(), - stdout=subprocess.PIPE, - stdin=subprocess.PIPE) - except OSError: - ans_module.fail_json(msg="Error: The 'OpenSSL' python library and CLI command were not found on the target host. Unable to parse any certificates. This host will not be included in generated reports.") - else: - openssl_decoded = openssl_proc.communicate(_cert_string.encode('utf-8'))[0].decode('utf-8') - cert_loaded = FakeOpenSSLCertificate(openssl_decoded) - - ###################################################################### - # Read all possible names from the cert - cert_subjects = [] - for name, value in cert_loaded.get_subject().get_components(): - if isinstance(name, bytes) or isinstance(value, bytes): - name = name.decode('utf-8') - value = value.decode('utf-8') - cert_subjects.append('{}:{}'.format(name, value)) - - # To read SANs from a cert we must read the subjectAltName - # extension from the X509 Object. What makes this more difficult - # is that pyOpenSSL does not give extensions as an iterable - san = None - for i in range(cert_loaded.get_extension_count()): - ext = cert_loaded.get_extension(i) - if ext.get_short_name() == 'subjectAltName': - san = ext - - if san is not None: - # The X509Extension object for subjectAltName prints as a - # string with the alt names separated by a comma and a - # space. Split the string by ', ' and then add our new names - # to the list of existing names - cert_subjects.extend(str(san).split(', ')) - - cert_subject = ', '.join(cert_subjects) - ###################################################################### - - # Grab the expiration date - not_after = cert_loaded.get_notAfter() - # example get_notAfter() => 20180922170439Z - if isinstance(not_after, bytes): - not_after = not_after.decode('utf-8') - - cert_expiry_date = datetime.datetime.strptime( - not_after, - '%Y%m%d%H%M%SZ') - - time_remaining = cert_expiry_date - now - - return (cert_subject, cert_expiry_date, time_remaining, cert_loaded.get_serial_number()) - - -def classify_cert(cert_meta, now, time_remaining, expire_window, cert_list): - """Given metadata about a certificate under examination, classify it - into one of three categories, 'ok', 'warning', and 'expired'. - -Params: - -- `cert_meta` dict - A dict with certificate metadata. Required fields - include: 'cert_cn', 'path', 'expiry', 'days_remaining', 'health'. -- `now` (datetime) - a datetime object of the time to calculate the certificate 'time_remaining' against -- `time_remaining` (datetime.timedelta) - a timedelta for how long until the cert expires -- `expire_window` (datetime.timedelta) - a timedelta for how long the warning window is -- `cert_list` list - A list to shove the classified cert into - -Return: -- `cert_list` - The updated list of classified certificates - """ - expiry_str = str(cert_meta['expiry']) - # Categorization - if cert_meta['expiry'] < now: - # This already expired, must NOTIFY - cert_meta['health'] = 'expired' - elif time_remaining < expire_window: - # WARN about this upcoming expirations - cert_meta['health'] = 'warning' - else: - # Not expired or about to expire - cert_meta['health'] = 'ok' - - cert_meta['expiry'] = expiry_str - cert_meta['serial_hex'] = hex(int(cert_meta['serial'])) - cert_list.append(cert_meta) - return cert_list - - -def tabulate_summary(certificates, kubeconfigs, etcd_certs, router_certs, registry_certs): - """Calculate the summary text for when the module finishes -running. This includes counts of each classification and what have -you. - -Params: - -- `certificates` (list of dicts) - Processed `expire_check_result` - dicts with filled in `health` keys for system certificates. -- `kubeconfigs` - as above for kubeconfigs -- `etcd_certs` - as above for etcd certs - -Return: - -- `summary_results` (dict) - Counts of each cert type classification - and total items examined. - """ - items = certificates + kubeconfigs + etcd_certs + router_certs + registry_certs - - summary_results = { - 'system_certificates': len(certificates), - 'kubeconfig_certificates': len(kubeconfigs), - 'etcd_certificates': len(etcd_certs), - 'router_certs': len(router_certs), - 'registry_certs': len(registry_certs), - 'total': len(items), - 'ok': 0, - 'warning': 0, - 'expired': 0 - } - - summary_results['expired'] = len([c for c in items if c['health'] == 'expired']) - summary_results['warning'] = len([c for c in items if c['health'] == 'warning']) - summary_results['ok'] = len([c for c in items if c['health'] == 'ok']) - - return summary_results - - -###################################################################### -# This is our module MAIN function after all, so there's bound to be a -# lot of code bundled up into one block -# -# Reason: These checks are disabled because the issue was introduced -# during a period where the pylint checks weren't enabled for this file -# Status: temporarily disabled pending future refactoring -# pylint: disable=too-many-locals,too-many-statements,too-many-branches -def main(): - """This module examines certificates (in various forms) which compose -an OpenShift Container Platform cluster - """ - - module = AnsibleModule( - argument_spec=dict( - config_base=dict( - required=False, - default="/etc/origin", - type='str'), - warning_days=dict( - required=False, - default=30, - type='int'), - show_all=dict( - required=False, - default=False, - type='bool') - ), - supports_check_mode=True, - ) - - # Basic scaffolding for OpenShift specific certs - openshift_base_config_path = os.path.realpath(module.params['config_base']) - openshift_master_config_path = os.path.join(openshift_base_config_path, - "master", "master-config.yaml") - openshift_node_config_path = os.path.join(openshift_base_config_path, - "node", "node-config.yaml") - openshift_cert_check_paths = [ - openshift_master_config_path, - openshift_node_config_path, - ] - - # Paths for Kubeconfigs. Additional kubeconfigs are conditionally - # checked later in the code - master_kube_configs = ['admin', 'openshift-master', - 'openshift-node', 'openshift-router', - 'openshift-registry'] - - kubeconfig_paths = [] - for m_kube_config in master_kube_configs: - kubeconfig_paths.append( - os.path.join(openshift_base_config_path, "master", m_kube_config + ".kubeconfig") - ) - - # Validate some paths we have the ability to do ahead of time - openshift_cert_check_paths = filter_paths(openshift_cert_check_paths) - kubeconfig_paths = filter_paths(kubeconfig_paths) - - # etcd, where do you hide your certs? Used when parsing etcd.conf - etcd_cert_params = [ - "ETCD_CA_FILE", - "ETCD_CERT_FILE", - "ETCD_PEER_CA_FILE", - "ETCD_PEER_CERT_FILE", - ] - - # Expiry checking stuff - now = datetime.datetime.now() - # todo, catch exception for invalid input and return a fail_json - warning_days = int(module.params['warning_days']) - expire_window = datetime.timedelta(days=warning_days) - - # Module stuff - # - # The results of our cert checking to return from the task call - check_results = {} - check_results['meta'] = {} - check_results['meta']['warning_days'] = warning_days - check_results['meta']['checked_at_time'] = str(now) - check_results['meta']['warn_before_date'] = str(now + expire_window) - check_results['meta']['show_all'] = str(module.params['show_all']) - # All the analyzed certs accumulate here - ocp_certs = [] - - ###################################################################### - # Sure, why not? Let's enable check mode. - if module.check_mode: - check_results['ocp_certs'] = [] - module.exit_json( - check_results=check_results, - msg="Checked 0 total certificates. Expired/Warning/OK: 0/0/0. Warning window: %s days" % module.params['warning_days'], - rc=0, - changed=False - ) - - ###################################################################### - # Check for OpenShift Container Platform specific certs - ###################################################################### - for os_cert in filter_paths(openshift_cert_check_paths): - # Open up that config file and locate the cert and CA - with io.open(os_cert, 'r', encoding='utf-8') as fp: - cert_meta = {} - cfg = yaml.load(fp) - # cert files are specified in parsed `fp` as relative to the path - # of the original config file. 'master-config.yaml' with certFile - # = 'foo.crt' implies that 'foo.crt' is in the same - # directory. certFile = '../foo.crt' is in the parent directory. - cfg_path = os.path.dirname(fp.name) - cert_meta['certFile'] = os.path.join(cfg_path, cfg['servingInfo']['certFile']) - cert_meta['clientCA'] = os.path.join(cfg_path, cfg['servingInfo']['clientCA']) - - ###################################################################### - # Load the certificate and the CA, parse their expiration dates into - # datetime objects so we can manipulate them later - for v in cert_meta.values(): - with io.open(v, 'r', encoding='utf-8') as fp: - cert = fp.read() - (cert_subject, - cert_expiry_date, - time_remaining, - cert_serial) = load_and_handle_cert(cert, now, ans_module=module) - - expire_check_result = { - 'cert_cn': cert_subject, - 'path': fp.name, - 'expiry': cert_expiry_date, - 'days_remaining': time_remaining.days, - 'health': None, - 'serial': cert_serial - } - - classify_cert(expire_check_result, now, time_remaining, expire_window, ocp_certs) - - ###################################################################### - # /Check for OpenShift Container Platform specific certs - ###################################################################### - - ###################################################################### - # Check service Kubeconfigs - ###################################################################### - kubeconfigs = [] - - # There may be additional kubeconfigs to check, but their naming - # is less predictable than the ones we've already assembled. - - try: - # Try to read the standard 'node-config.yaml' file to check if - # this host is a node. - with io.open(openshift_node_config_path, 'r', encoding='utf-8') as fp: - cfg = yaml.load(fp) - - # OK, the config file exists, therefore this is a - # node. Nodes have their own kubeconfig files to - # communicate with the master API. Let's read the relative - # path to that file from the node config. - node_masterKubeConfig = cfg['masterKubeConfig'] - # As before, the path to the 'masterKubeConfig' file is - # relative to `fp` - cfg_path = os.path.dirname(fp.name) - node_kubeconfig = os.path.join(cfg_path, node_masterKubeConfig) - - with io.open(node_kubeconfig, 'r', encoding='utf8') as fp: - # Read in the nodes kubeconfig file and grab the good stuff - cfg = yaml.load(fp) - - c = cfg['users'][0]['user']['client-certificate-data'] - (cert_subject, - cert_expiry_date, - time_remaining, - cert_serial) = load_and_handle_cert(c, now, base64decode=True, ans_module=module) - - expire_check_result = { - 'cert_cn': cert_subject, - 'path': fp.name, - 'expiry': cert_expiry_date, - 'days_remaining': time_remaining.days, - 'health': None, - 'serial': cert_serial - } - - classify_cert(expire_check_result, now, time_remaining, expire_window, kubeconfigs) - except IOError: - # This is not a node - pass - - for kube in filter_paths(kubeconfig_paths): - with io.open(kube, 'r', encoding='utf-8') as fp: - # TODO: Maybe consider catching exceptions here? - cfg = yaml.load(fp) - - # Per conversation, "the kubeconfigs you care about: - # admin, router, registry should all be single - # value". Following that advice we only grab the data for - # the user at index 0 in the 'users' list. There should - # not be more than one user. - c = cfg['users'][0]['user']['client-certificate-data'] - (cert_subject, - cert_expiry_date, - time_remaining, - cert_serial) = load_and_handle_cert(c, now, base64decode=True, ans_module=module) - - expire_check_result = { - 'cert_cn': cert_subject, - 'path': fp.name, - 'expiry': cert_expiry_date, - 'days_remaining': time_remaining.days, - 'health': None, - 'serial': cert_serial - } - - classify_cert(expire_check_result, now, time_remaining, expire_window, kubeconfigs) - - ###################################################################### - # /Check service Kubeconfigs - ###################################################################### - - ###################################################################### - # Check etcd certs - # - # Two things to check: 'external' etcd, and embedded etcd. - ###################################################################### - # FIRST: The 'external' etcd - # - # Some values may be duplicated, make this a set for now so we - # unique them all - etcd_certs_to_check = set([]) - etcd_certs = [] - etcd_cert_params.append('dne') - try: - with io.open('/etc/etcd/etcd.conf', 'r', encoding='utf-8') as fp: - # Add dummy header section. - config = io.StringIO() - config.write(u'[ETCD]\n') - config.write(fp.read().replace('%', '%%')) - config.seek(0, os.SEEK_SET) - - etcd_config = configparser.ConfigParser() - etcd_config.readfp(config) - - for param in etcd_cert_params: - try: - etcd_certs_to_check.add(etcd_config.get('ETCD', param)) - except configparser.NoOptionError: - # That parameter does not exist, oh well... - pass - except IOError: - # No etcd to see here, move along - pass - - for etcd_cert in filter_paths(etcd_certs_to_check): - with io.open(etcd_cert, 'r', encoding='utf-8') as fp: - c = fp.read() - (cert_subject, - cert_expiry_date, - time_remaining, - cert_serial) = load_and_handle_cert(c, now, ans_module=module) - - expire_check_result = { - 'cert_cn': cert_subject, - 'path': fp.name, - 'expiry': cert_expiry_date, - 'days_remaining': time_remaining.days, - 'health': None, - 'serial': cert_serial - } - - classify_cert(expire_check_result, now, time_remaining, expire_window, etcd_certs) - - ###################################################################### - # Now the embedded etcd - ###################################################################### - try: - with io.open('/etc/origin/master/master-config.yaml', 'r', encoding='utf-8') as fp: - cfg = yaml.load(fp) - except IOError: - # Not present - pass - else: - if cfg.get('etcdConfig', {}).get('servingInfo', {}).get('certFile', None) is not None: - # This is embedded - etcd_crt_name = cfg['etcdConfig']['servingInfo']['certFile'] - else: - # Not embedded - etcd_crt_name = None - - if etcd_crt_name is not None: - # etcd_crt_name is relative to the location of the - # master-config.yaml file - cfg_path = os.path.dirname(fp.name) - etcd_cert = os.path.join(cfg_path, etcd_crt_name) - with open(etcd_cert, 'r') as etcd_fp: - (cert_subject, - cert_expiry_date, - time_remaining, - cert_serial) = load_and_handle_cert(etcd_fp.read(), now, ans_module=module) - - expire_check_result = { - 'cert_cn': cert_subject, - 'path': etcd_fp.name, - 'expiry': cert_expiry_date, - 'days_remaining': time_remaining.days, - 'health': None, - 'serial': cert_serial - } - - classify_cert(expire_check_result, now, time_remaining, expire_window, etcd_certs) - - ###################################################################### - # /Check etcd certs - ###################################################################### - - ###################################################################### - # Check router/registry certs - # - # These are saved as secrets in etcd. That means that we can not - # simply read a file to grab the data. Instead we're going to - # subprocess out to the 'oc get' command. On non-masters this - # command will fail, that is expected so we catch that exception. - ###################################################################### - router_certs = [] - registry_certs = [] - - ###################################################################### - # First the router certs - try: - router_secrets_raw = subprocess.Popen('oc get -n default secret router-certs -o yaml'.split(), - stdout=subprocess.PIPE) - router_ds = yaml.load(router_secrets_raw.communicate()[0]) - router_c = router_ds['data']['tls.crt'] - router_path = router_ds['metadata']['selfLink'] - except TypeError: - # YAML couldn't load the result, this is not a master - pass - except OSError: - # The OC command doesn't exist here. Move along. - pass - else: - (cert_subject, - cert_expiry_date, - time_remaining, - cert_serial) = load_and_handle_cert(router_c, now, base64decode=True, ans_module=module) - - expire_check_result = { - 'cert_cn': cert_subject, - 'path': router_path, - 'expiry': cert_expiry_date, - 'days_remaining': time_remaining.days, - 'health': None, - 'serial': cert_serial - } - - classify_cert(expire_check_result, now, time_remaining, expire_window, router_certs) - - ###################################################################### - # Now for registry - try: - registry_secrets_raw = subprocess.Popen('oc get -n default secret registry-certificates -o yaml'.split(), - stdout=subprocess.PIPE) - registry_ds = yaml.load(registry_secrets_raw.communicate()[0]) - registry_c = registry_ds['data']['registry.crt'] - registry_path = registry_ds['metadata']['selfLink'] - except TypeError: - # YAML couldn't load the result, this is not a master - pass - except OSError: - # The OC command doesn't exist here. Move along. - pass - else: - (cert_subject, - cert_expiry_date, - time_remaining, - cert_serial) = load_and_handle_cert(registry_c, now, base64decode=True, ans_module=module) - - expire_check_result = { - 'cert_cn': cert_subject, - 'path': registry_path, - 'expiry': cert_expiry_date, - 'days_remaining': time_remaining.days, - 'health': None, - 'serial': cert_serial - } - - classify_cert(expire_check_result, now, time_remaining, expire_window, registry_certs) - - ###################################################################### - # /Check router/registry certs - ###################################################################### - - res = tabulate_summary(ocp_certs, kubeconfigs, etcd_certs, router_certs, registry_certs) - - msg = "Checked {count} total certificates. Expired/Warning/OK: {exp}/{warn}/{ok}. Warning window: {window} days".format( - count=res['total'], - exp=res['expired'], - warn=res['warning'], - ok=res['ok'], - window=int(module.params['warning_days']), - ) - - # By default we only return detailed information about expired or - # warning certificates. If show_all is true then we will print all - # the certificates examined. - if not module.params['show_all']: - check_results['ocp_certs'] = [crt for crt in ocp_certs if crt['health'] in ['expired', 'warning']] - check_results['kubeconfigs'] = [crt for crt in kubeconfigs if crt['health'] in ['expired', 'warning']] - check_results['etcd'] = [crt for crt in etcd_certs if crt['health'] in ['expired', 'warning']] - check_results['registry'] = [crt for crt in registry_certs if crt['health'] in ['expired', 'warning']] - check_results['router'] = [crt for crt in router_certs if crt['health'] in ['expired', 'warning']] - else: - check_results['ocp_certs'] = ocp_certs - check_results['kubeconfigs'] = kubeconfigs - check_results['etcd'] = etcd_certs - check_results['registry'] = registry_certs - check_results['router'] = router_certs - - # Sort the final results to report in order of ascending safety - # time. That is to say, the certificates which will expire sooner - # will be at the front of the list and certificates which will - # expire later are at the end. Router and registry certs should be - # limited to just 1 result, so don't bother sorting those. - def cert_key(item): - ''' return the days_remaining key ''' - return item['days_remaining'] - - check_results['ocp_certs'] = sorted(check_results['ocp_certs'], key=cert_key) - check_results['kubeconfigs'] = sorted(check_results['kubeconfigs'], key=cert_key) - check_results['etcd'] = sorted(check_results['etcd'], key=cert_key) - - # This module will never change anything, but we might want to - # change the return code parameter if there is some catastrophic - # error we noticed earlier - module.exit_json( - check_results=check_results, - summary=res, - msg=msg, - rc=0, - changed=False - ) - - -if __name__ == '__main__': - main() diff --git a/roles/openshift_certificate_expiry/tasks/main.yml b/roles/openshift_certificate_expiry/tasks/main.yml index 8dea2c07f..7062b5060 100644 --- a/roles/openshift_certificate_expiry/tasks/main.yml +++ b/roles/openshift_certificate_expiry/tasks/main.yml @@ -16,7 +16,9 @@ - name: Generate the result JSON string run_once: yes - set_fact: json_result_string="{{ hostvars|oo_cert_expiry_results_to_json(play_hosts) }}" + set_fact: + # oo_cert_expiry_results_to_json is a custom filter in role lib_utils + json_result_string: "{{ hostvars|oo_cert_expiry_results_to_json(play_hosts) }}" when: openshift_certificate_expiry_save_json_results|bool - name: Generate results JSON file diff --git a/roles/openshift_certificate_expiry/test/conftest.py b/roles/openshift_certificate_expiry/test/conftest.py deleted file mode 100644 index df948fff0..000000000 --- a/roles/openshift_certificate_expiry/test/conftest.py +++ /dev/null @@ -1,119 +0,0 @@ -# pylint: disable=missing-docstring,invalid-name,redefined-outer-name -import pytest -from OpenSSL import crypto - -# Parameter list for valid_cert fixture -VALID_CERTIFICATE_PARAMS = [ - { - 'short_name': 'client', - 'cn': 'client.example.com', - 'serial': 4, - 'uses': b'clientAuth', - 'dns': [], - 'ip': [], - }, - { - 'short_name': 'server', - 'cn': 'server.example.com', - 'serial': 5, - 'uses': b'serverAuth', - 'dns': ['kubernetes', 'openshift'], - 'ip': ['10.0.0.1', '192.168.0.1'] - }, - { - 'short_name': 'combined', - 'cn': 'combined.example.com', - # Verify that HUGE serials parse correctly. - # Frobs PARSING_HEX_SERIAL in _parse_cert - # See https://bugzilla.redhat.com/show_bug.cgi?id=1464240 - 'serial': 14449739080294792594019643629255165375, - 'uses': b'clientAuth, serverAuth', - 'dns': ['etcd'], - 'ip': ['10.0.0.2', '192.168.0.2'] - } -] - -# Extract the short_name from VALID_CERTIFICATE_PARAMS to provide -# friendly naming for the valid_cert fixture -VALID_CERTIFICATE_IDS = [param['short_name'] for param in VALID_CERTIFICATE_PARAMS] - - -@pytest.fixture(scope='session') -def ca(tmpdir_factory): - ca_dir = tmpdir_factory.mktemp('ca') - - key = crypto.PKey() - key.generate_key(crypto.TYPE_RSA, 2048) - - cert = crypto.X509() - cert.set_version(3) - cert.set_serial_number(1) - cert.get_subject().commonName = 'test-signer' - cert.gmtime_adj_notBefore(0) - cert.gmtime_adj_notAfter(24 * 60 * 60) - cert.set_issuer(cert.get_subject()) - cert.set_pubkey(key) - cert.add_extensions([ - crypto.X509Extension(b'basicConstraints', True, b'CA:TRUE, pathlen:0'), - crypto.X509Extension(b'keyUsage', True, - b'digitalSignature, keyEncipherment, keyCertSign, cRLSign'), - crypto.X509Extension(b'subjectKeyIdentifier', False, b'hash', subject=cert) - ]) - cert.add_extensions([ - crypto.X509Extension(b'authorityKeyIdentifier', False, b'keyid:always', issuer=cert) - ]) - cert.sign(key, 'sha256') - - return { - 'dir': ca_dir, - 'key': key, - 'cert': cert, - } - - -@pytest.fixture(scope='session', - ids=VALID_CERTIFICATE_IDS, - params=VALID_CERTIFICATE_PARAMS) -def valid_cert(request, ca): - common_name = request.param['cn'] - - key = crypto.PKey() - key.generate_key(crypto.TYPE_RSA, 2048) - - cert = crypto.X509() - cert.set_serial_number(request.param['serial']) - cert.gmtime_adj_notBefore(0) - cert.gmtime_adj_notAfter(24 * 60 * 60) - cert.set_issuer(ca['cert'].get_subject()) - cert.set_pubkey(key) - cert.set_version(3) - cert.get_subject().commonName = common_name - cert.add_extensions([ - crypto.X509Extension(b'basicConstraints', True, b'CA:FALSE'), - crypto.X509Extension(b'keyUsage', True, b'digitalSignature, keyEncipherment'), - crypto.X509Extension(b'extendedKeyUsage', False, request.param['uses']), - ]) - - if request.param['dns'] or request.param['ip']: - san_list = ['DNS:{}'.format(common_name)] - san_list.extend(['DNS:{}'.format(x) for x in request.param['dns']]) - san_list.extend(['IP:{}'.format(x) for x in request.param['ip']]) - - cert.add_extensions([ - crypto.X509Extension(b'subjectAltName', False, ', '.join(san_list).encode('utf8')) - ]) - cert.sign(ca['key'], 'sha256') - - cert_contents = crypto.dump_certificate(crypto.FILETYPE_PEM, cert) - cert_file = ca['dir'].join('{}.crt'.format(common_name)) - cert_file.write_binary(cert_contents) - - return { - 'common_name': common_name, - 'serial': request.param['serial'], - 'dns': request.param['dns'], - 'ip': request.param['ip'], - 'uses': request.param['uses'], - 'cert_file': cert_file, - 'cert': cert - } diff --git a/roles/openshift_certificate_expiry/test/test_fakeopensslclasses.py b/roles/openshift_certificate_expiry/test/test_fakeopensslclasses.py deleted file mode 100644 index 8a521a765..000000000 --- a/roles/openshift_certificate_expiry/test/test_fakeopensslclasses.py +++ /dev/null @@ -1,90 +0,0 @@ -''' - Unit tests for the FakeOpenSSL classes -''' -import os -import subprocess -import sys - -import pytest - -MODULE_PATH = os.path.realpath(os.path.join(__file__, os.pardir, os.pardir, 'library')) -sys.path.insert(1, MODULE_PATH) - -# pylint: disable=import-error,wrong-import-position,missing-docstring -# pylint: disable=invalid-name,redefined-outer-name -from openshift_cert_expiry import FakeOpenSSLCertificate # noqa: E402 - - -@pytest.fixture(scope='module') -def fake_valid_cert(valid_cert): - cmd = ['openssl', 'x509', '-in', str(valid_cert['cert_file']), '-text', - '-nameopt', 'oneline'] - cert = subprocess.check_output(cmd) - return FakeOpenSSLCertificate(cert.decode('utf8')) - - -def test_not_after(valid_cert, fake_valid_cert): - ''' Validate value returned back from get_notAfter() ''' - real_cert = valid_cert['cert'] - - # Internal representation of pyOpenSSL is bytes, while FakeOpenSSLCertificate - # is text, so decode the result from pyOpenSSL prior to comparing - assert real_cert.get_notAfter().decode('utf8') == fake_valid_cert.get_notAfter() - - -def test_serial(valid_cert, fake_valid_cert): - ''' Validate value returned back form get_serialnumber() ''' - real_cert = valid_cert['cert'] - assert real_cert.get_serial_number() == fake_valid_cert.get_serial_number() - - -def test_get_subject(valid_cert, fake_valid_cert): - ''' Validate the certificate subject ''' - - # Gather the subject components and create a list of colon separated strings. - # Since the internal representation of pyOpenSSL uses bytes, we need to decode - # the results before comparing. - c_subjects = valid_cert['cert'].get_subject().get_components() - c_subj = ', '.join(['{}:{}'.format(x.decode('utf8'), y.decode('utf8')) for x, y in c_subjects]) - f_subjects = fake_valid_cert.get_subject().get_components() - f_subj = ', '.join(['{}:{}'.format(x, y) for x, y in f_subjects]) - assert c_subj == f_subj - - -def get_san_extension(cert): - # Internal representation of pyOpenSSL is bytes, while FakeOpenSSLCertificate - # is text, so we need to set the value to search for accordingly. - if isinstance(cert, FakeOpenSSLCertificate): - san_short_name = 'subjectAltName' - else: - san_short_name = b'subjectAltName' - - for i in range(cert.get_extension_count()): - ext = cert.get_extension(i) - if ext.get_short_name() == san_short_name: - # return the string representation to compare the actual SAN - # values instead of the data types - return str(ext) - - return None - - -def test_subject_alt_names(valid_cert, fake_valid_cert): - real_cert = valid_cert['cert'] - - san = get_san_extension(real_cert) - f_san = get_san_extension(fake_valid_cert) - - assert san == f_san - - # If there are either dns or ip sans defined, verify common_name present - if valid_cert['ip'] or valid_cert['dns']: - assert 'DNS:' + valid_cert['common_name'] in f_san - - # Verify all ip sans are present - for ip in valid_cert['ip']: - assert 'IP Address:' + ip in f_san - - # Verify all dns sans are present - for name in valid_cert['dns']: - assert 'DNS:' + name in f_san diff --git a/roles/openshift_certificate_expiry/test/test_load_and_handle_cert.py b/roles/openshift_certificate_expiry/test/test_load_and_handle_cert.py deleted file mode 100644 index 98792e2ee..000000000 --- a/roles/openshift_certificate_expiry/test/test_load_and_handle_cert.py +++ /dev/null @@ -1,67 +0,0 @@ -''' - Unit tests for the load_and_handle_cert method -''' -import datetime -import os -import sys - -import pytest - -MODULE_PATH = os.path.realpath(os.path.join(__file__, os.pardir, os.pardir, 'library')) -sys.path.insert(1, MODULE_PATH) - -# pylint: disable=import-error,wrong-import-position,missing-docstring -# pylint: disable=invalid-name,redefined-outer-name -import openshift_cert_expiry # noqa: E402 - -# TODO: More testing on the results of the load_and_handle_cert function -# could be implemented here as well, such as verifying subjects -# match up. - - -@pytest.fixture(params=['OpenSSLCertificate', 'FakeOpenSSLCertificate']) -def loaded_cert(request, valid_cert): - """ parameterized fixture to provide load_and_handle_cert results - for both OpenSSL and FakeOpenSSL parsed certificates - """ - now = datetime.datetime.now() - - openshift_cert_expiry.HAS_OPENSSL = request.param == 'OpenSSLCertificate' - - # valid_cert['cert_file'] is a `py.path.LocalPath` object and - # provides a read_text() method for reading the file contents. - cert_string = valid_cert['cert_file'].read_text('utf8') - - (subject, - expiry_date, - time_remaining, - serial) = openshift_cert_expiry.load_and_handle_cert(cert_string, now) - - return { - 'now': now, - 'subject': subject, - 'expiry_date': expiry_date, - 'time_remaining': time_remaining, - 'serial': serial, - } - - -def test_serial(loaded_cert, valid_cert): - """Params: - - * `loaded_cert` comes from the `loaded_cert` fixture in this file - * `valid_cert` comes from the 'valid_cert' fixture in conftest.py - """ - valid_cert_serial = valid_cert['cert'].get_serial_number() - assert loaded_cert['serial'] == valid_cert_serial - - -def test_expiry(loaded_cert): - """Params: - - * `loaded_cert` comes from the `loaded_cert` fixture in this file - """ - expiry_date = loaded_cert['expiry_date'] - time_remaining = loaded_cert['time_remaining'] - now = loaded_cert['now'] - assert expiry_date == now + time_remaining diff --git a/roles/openshift_cli/library/openshift_container_binary_sync.py b/roles/openshift_cli/library/openshift_container_binary_sync.py deleted file mode 100644 index 440b8ec28..000000000 --- a/roles/openshift_cli/library/openshift_container_binary_sync.py +++ /dev/null @@ -1,205 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# pylint: disable=missing-docstring,invalid-name - -import random -import tempfile -import shutil -import os.path - -# pylint: disable=redefined-builtin,wildcard-import,unused-wildcard-import -from ansible.module_utils.basic import * # noqa: F403 - - -DOCUMENTATION = ''' ---- -module: openshift_container_binary_sync -short_description: Copies OpenShift binaries out of the given image tag to host system. -''' - - -class BinarySyncError(Exception): - def __init__(self, msg): - super(BinarySyncError, self).__init__(msg) - self.msg = msg - - -# pylint: disable=too-few-public-methods,too-many-instance-attributes -class BinarySyncer(object): - """ - Syncs the openshift, oc, and kubectl binaries/symlinks out of - a container onto the host system. - """ - - def __init__(self, module, image, tag, backend): - self.module = module - self.changed = False - self.output = [] - self.bin_dir = '/usr/local/bin' - self._image = image - self.tag = tag - self.backend = backend - self.temp_dir = None # TBD - - def sync(self): - if self.backend == 'atomic': - return self._sync_atomic() - - return self._sync_docker() - - def _sync_atomic(self): - self.temp_dir = tempfile.mkdtemp() - temp_dir_mount = tempfile.mkdtemp() - try: - image_spec = '%s:%s' % (self.image, self.tag) - rc, stdout, stderr = self.module.run_command(['atomic', 'mount', - '--storage', "ostree", - image_spec, temp_dir_mount]) - if rc: - raise BinarySyncError("Error mounting image. stdout=%s, stderr=%s" % - (stdout, stderr)) - for i in ["openshift", "oc"]: - src_file = os.path.join(temp_dir_mount, "usr/bin", i) - shutil.copy(src_file, self.temp_dir) - - self._sync_binaries() - finally: - self.module.run_command(['atomic', 'umount', temp_dir_mount]) - shutil.rmtree(temp_dir_mount) - shutil.rmtree(self.temp_dir) - - def _sync_docker(self): - container_name = "openshift-cli-%s" % random.randint(1, 100000) - rc, stdout, stderr = self.module.run_command(['docker', 'create', '--name', - container_name, '%s:%s' % (self.image, self.tag)]) - if rc: - raise BinarySyncError("Error creating temporary docker container. stdout=%s, stderr=%s" % - (stdout, stderr)) - self.output.append(stdout) - try: - self.temp_dir = tempfile.mkdtemp() - self.output.append("Using temp dir: %s" % self.temp_dir) - - rc, stdout, stderr = self.module.run_command(['docker', 'cp', "%s:/usr/bin/openshift" % container_name, - self.temp_dir]) - if rc: - raise BinarySyncError("Error copying file from docker container: stdout=%s, stderr=%s" % - (stdout, stderr)) - - rc, stdout, stderr = self.module.run_command(['docker', 'cp', "%s:/usr/bin/oc" % container_name, - self.temp_dir]) - if rc: - raise BinarySyncError("Error copying file from docker container: stdout=%s, stderr=%s" % - (stdout, stderr)) - - self._sync_binaries() - finally: - shutil.rmtree(self.temp_dir) - self.module.run_command(['docker', 'rm', container_name]) - - def _sync_binaries(self): - self._sync_binary('openshift') - - # In older versions, oc was a symlink to openshift: - if os.path.islink(os.path.join(self.temp_dir, 'oc')): - self._sync_symlink('oc', 'openshift') - else: - self._sync_binary('oc') - - # Ensure correct symlinks created: - self._sync_symlink('kubectl', 'openshift') - - # Remove old oadm binary - if os.path.exists(os.path.join(self.bin_dir, 'oadm')): - os.remove(os.path.join(self.bin_dir, 'oadm')) - - def _sync_symlink(self, binary_name, link_to): - """ Ensure the given binary name exists and links to the expected binary. """ - - # The symlink we are creating: - link_path = os.path.join(self.bin_dir, binary_name) - - # The expected file we should be linking to: - link_dest = os.path.join(self.bin_dir, link_to) - - if not os.path.exists(link_path) or \ - not os.path.islink(link_path) or \ - os.path.realpath(link_path) != os.path.realpath(link_dest): - if os.path.exists(link_path): - os.remove(link_path) - os.symlink(link_to, os.path.join(self.bin_dir, binary_name)) - self.output.append("Symlinked %s to %s." % (link_path, link_dest)) - self.changed = True - - def _sync_binary(self, binary_name): - src_path = os.path.join(self.temp_dir, binary_name) - dest_path = os.path.join(self.bin_dir, binary_name) - incoming_checksum = self.module.run_command(['sha256sum', src_path])[1] - if not os.path.exists(dest_path) or self.module.run_command(['sha256sum', dest_path])[1] != incoming_checksum: - - # See: https://github.com/openshift/openshift-ansible/issues/4965 - if os.path.islink(dest_path): - os.unlink(dest_path) - self.output.append('Removed old symlink {} before copying binary.'.format(dest_path)) - shutil.move(src_path, dest_path) - self.output.append("Moved %s to %s." % (src_path, dest_path)) - self.changed = True - - @property - def raw_image(self): - """ - Returns the image as it was originally passed in to the instance. - - .. note:: - This image string will only work directly with the atomic command. - - :returns: The original image passed in. - :rtype: str - """ - return self._image - - @property - def image(self): - """ - Returns the image without atomic prefixes used to map to skopeo args. - - :returns: The image string without prefixes - :rtype: str - """ - image = self._image - for remove in ('oci:', 'http:', 'https:'): - if image.startswith(remove): - image = image.replace(remove, '') - return image - - -def main(): - module = AnsibleModule( # noqa: F405 - argument_spec=dict( - image=dict(required=True), - tag=dict(required=True), - backend=dict(required=True), - ), - supports_check_mode=True - ) - - image = module.params['image'] - tag = module.params['tag'] - backend = module.params['backend'] - - if backend not in ["docker", "atomic"]: - module.fail_json(msg="unknown backend") - - binary_syncer = BinarySyncer(module, image, tag, backend) - - try: - binary_syncer.sync() - except BinarySyncError as ex: - module.fail_json(msg=ex.msg) - - return module.exit_json(changed=binary_syncer.changed, - output=binary_syncer.output) - - -if __name__ == '__main__': - main() diff --git a/roles/openshift_cli/tasks/main.yml b/roles/openshift_cli/tasks/main.yml index 37bed9dbe..ae8d1ace0 100644 --- a/roles/openshift_cli/tasks/main.yml +++ b/roles/openshift_cli/tasks/main.yml @@ -12,6 +12,7 @@ register: pull_result changed_when: "'Downloaded newer image' in pull_result.stdout" + # openshift_container_binary_sync is a custom module in lib_utils - name: Copy client binaries/symlinks out of CLI image for use on the host openshift_container_binary_sync: image: "{{ openshift_cli_image }}" @@ -28,6 +29,7 @@ register: pull_result changed_when: "'Pulling layer' in pull_result.stdout" + # openshift_container_binary_sync is a custom module in lib_utils - name: Copy client binaries/symlinks out of CLI image for use on the host openshift_container_binary_sync: image: "{{ '' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift_cli_image }}" diff --git a/roles/openshift_health_checker/openshift_checks/disk_availability.py b/roles/openshift_health_checker/openshift_checks/disk_availability.py index 87e6146d4..6e30a8610 100644 --- a/roles/openshift_health_checker/openshift_checks/disk_availability.py +++ b/roles/openshift_health_checker/openshift_checks/disk_availability.py @@ -21,7 +21,7 @@ class DiskAvailability(OpenShiftCheck): 'oo_etcd_to_config': 20 * 10**9, }, # Used to copy client binaries into, - # see roles/openshift_cli/library/openshift_container_binary_sync.py. + # see roles/lib_utils/library/openshift_container_binary_sync.py. '/usr/local/bin': { 'oo_masters_to_config': 1 * 10**9, 'oo_nodes_to_config': 1 * 10**9, diff --git a/roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py b/roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py deleted file mode 100644 index 003ce5f9e..000000000 --- a/roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py +++ /dev/null @@ -1,42 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -''' -Custom filters for use in openshift_hosted -''' - - -class FilterModule(object): - ''' Custom ansible filters for use by openshift_hosted role''' - - @staticmethod - def get_router_replicas(replicas=None, router_nodes=None): - ''' This function will return the number of replicas - based on the results from the defined - openshift_hosted_router_replicas OR - the query from oc_obj on openshift nodes with a selector OR - default to 1 - - ''' - # We always use what they've specified if they've specified a value - if replicas is not None: - return replicas - - replicas = 1 - - # Ignore boolean expression limit of 5. - # pylint: disable=too-many-boolean-expressions - if (isinstance(router_nodes, dict) and - 'results' in router_nodes and - 'results' in router_nodes['results'] and - isinstance(router_nodes['results']['results'], list) and - len(router_nodes['results']['results']) > 0 and - 'items' in router_nodes['results']['results'][0]): - - if len(router_nodes['results']['results'][0]['items']) > 0: - replicas = len(router_nodes['results']['results'][0]['items']) - - return replicas - - def filters(self): - ''' returns a mapping of filters to methods ''' - return {'get_router_replicas': self.get_router_replicas} diff --git a/roles/openshift_hosted/tasks/router.yml b/roles/openshift_hosted/tasks/router.yml index 2dc9c98f6..c2be00d19 100644 --- a/roles/openshift_hosted/tasks/router.yml +++ b/roles/openshift_hosted/tasks/router.yml @@ -18,6 +18,7 @@ - name: set_fact replicas set_fact: + # get_router_replicas is a custom filter in role lib_utils replicas: "{{ openshift_hosted_router_replicas | default(None) | get_router_replicas(router_nodes) }}" - name: Get the certificate contents for router diff --git a/roles/openshift_logging/filter_plugins/openshift_logging.py b/roles/openshift_logging/filter_plugins/openshift_logging.py index ba412b5a6..247c7e4df 100644 --- a/roles/openshift_logging/filter_plugins/openshift_logging.py +++ b/roles/openshift_logging/filter_plugins/openshift_logging.py @@ -79,14 +79,6 @@ def entry_from_named_pair(register_pairs, key): raise RuntimeError("There was no entry found in the dict that had an item with a name that matched {}".format(key)) -def map_from_pairs(source, delim="="): - ''' Returns a dict given the source and delim delimited ''' - if source == '': - return dict() - - return dict(item.split(delim) for item in source.split(",")) - - def serviceaccount_name(qualified_sa): ''' Returns the simple name from a fully qualified name ''' return qualified_sa.split(":")[-1] @@ -134,7 +126,6 @@ class FilterModule(object): return { 'random_word': random_word, 'entry_from_named_pair': entry_from_named_pair, - 'map_from_pairs': map_from_pairs, 'min_cpu': min_cpu, 'es_storage': es_storage, 'serviceaccount_name': serviceaccount_name, diff --git a/roles/openshift_logging_fluentd/defaults/main.yml b/roles/openshift_logging_fluentd/defaults/main.yml index 9b58e4456..87b4204b5 100644 --- a/roles/openshift_logging_fluentd/defaults/main.yml +++ b/roles/openshift_logging_fluentd/defaults/main.yml @@ -5,6 +5,7 @@ openshift_logging_fluentd_master_url: "https://kubernetes.default.svc.{{ openshi openshift_logging_fluentd_namespace: logging ### Common settings +# map_from_pairs is a custom filter plugin in role lib_utils openshift_logging_fluentd_nodeselector: "{{ openshift_hosted_logging_fluentd_nodeselector_label | default('logging-infra-fluentd=true') | map_from_pairs }}" openshift_logging_fluentd_cpu_limit: null openshift_logging_fluentd_cpu_request: 100m diff --git a/roles/openshift_logging_mux/defaults/main.yml b/roles/openshift_logging_mux/defaults/main.yml index db6f23126..369ba86b3 100644 --- a/roles/openshift_logging_mux/defaults/main.yml +++ b/roles/openshift_logging_mux/defaults/main.yml @@ -6,6 +6,7 @@ openshift_logging_mux_master_public_url: "{{ openshift_hosted_logging_master_pub openshift_logging_mux_namespace: logging ### Common settings +# map_from_pairs is a custom filter plugin in role lib_utils openshift_logging_mux_nodeselector: "{{ openshift_hosted_logging_mux_nodeselector_label | default('') | map_from_pairs }}" openshift_logging_mux_cpu_limit: null openshift_logging_mux_cpu_request: 100m diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index eea1401b8..b12a6b346 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -181,6 +181,7 @@ - restart master api - set_fact: + # translate_idps is a custom filter in role lib_utils translated_identity_providers: "{{ openshift.master.identity_providers | translate_idps('v1') }}" # TODO: add the validate parameter when there is a validation command to run diff --git a/roles/openshift_master/tasks/upgrade/upgrade_scheduler.yml b/roles/openshift_master/tasks/upgrade/upgrade_scheduler.yml index 8558bf3e9..995a5ab70 100644 --- a/roles/openshift_master/tasks/upgrade/upgrade_scheduler.yml +++ b/roles/openshift_master/tasks/upgrade/upgrade_scheduler.yml @@ -1,6 +1,8 @@ --- # Upgrade predicates - vars: + # openshift_master_facts_default_predicates is a custom lookup plugin in + # role lib_utils prev_predicates: "{{ lookup('openshift_master_facts_default_predicates', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type) }}" prev_predicates_no_region: "{{ lookup('openshift_master_facts_default_predicates', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type, regions_enabled=False) }}" default_predicates_no_region: "{{ lookup('openshift_master_facts_default_predicates', regions_enabled=False) }}" diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml index 649a4bc5d..ce27e238f 100644 --- a/roles/openshift_master_certificates/tasks/main.yml +++ b/roles/openshift_master_certificates/tasks/main.yml @@ -101,6 +101,7 @@ state: hard force: true with_items: + # certificates_to_synchronize is a custom filter in lib_utils - "{{ hostvars[inventory_hostname] | certificates_to_synchronize }}" when: master_certs_missing | bool and inventory_hostname != openshift_ca_host delegate_to: "{{ openshift_ca_host }}" diff --git a/roles/openshift_master_facts/filter_plugins/openshift_master.py b/roles/openshift_master_facts/filter_plugins/openshift_master.py deleted file mode 100644 index ff15f693b..000000000 --- a/roles/openshift_master_facts/filter_plugins/openshift_master.py +++ /dev/null @@ -1,532 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -''' -Custom filters for use in openshift-master -''' -import copy -import sys - -from ansible import errors -from ansible.parsing.yaml.dumper import AnsibleDumper -from ansible.plugins.filter.core import to_bool as ansible_bool - -# ansible.compat.six goes away with Ansible 2.4 -try: - from ansible.compat.six import string_types, u -except ImportError: - from ansible.module_utils.six import string_types, u - -import yaml - - -class IdentityProviderBase(object): - """ IdentityProviderBase - - Attributes: - name (str): Identity provider Name - login (bool): Is this identity provider a login provider? - challenge (bool): Is this identity provider a challenge provider? - provider (dict): Provider specific config - _idp (dict): internal copy of the IDP dict passed in - _required (list): List of lists of strings for required attributes - _optional (list): List of lists of strings for optional attributes - _allow_additional (bool): Does this provider support attributes - not in _required and _optional - - Args: - api_version(str): OpenShift config version - idp (dict): idp config dict - - Raises: - AnsibleFilterError: - """ - # disabling this check since the number of instance attributes are - # necessary for this class - # pylint: disable=too-many-instance-attributes - def __init__(self, api_version, idp): - if api_version not in ['v1']: - raise errors.AnsibleFilterError("|failed api version {0} unknown".format(api_version)) - - self._idp = copy.deepcopy(idp) - - if 'name' not in self._idp: - raise errors.AnsibleFilterError("|failed identity provider missing a name") - - if 'kind' not in self._idp: - raise errors.AnsibleFilterError("|failed identity provider missing a kind") - - self.name = self._idp.pop('name') - self.login = ansible_bool(self._idp.pop('login', False)) - self.challenge = ansible_bool(self._idp.pop('challenge', False)) - self.provider = dict(apiVersion=api_version, kind=self._idp.pop('kind')) - - mm_keys = ('mappingMethod', 'mapping_method') - mapping_method = None - for key in mm_keys: - if key in self._idp: - mapping_method = self._idp.pop(key) - if mapping_method is None: - mapping_method = self.get_default('mappingMethod') - self.mapping_method = mapping_method - - valid_mapping_methods = ['add', 'claim', 'generate', 'lookup'] - if self.mapping_method not in valid_mapping_methods: - raise errors.AnsibleFilterError("|failed unknown mapping method " - "for provider {0}".format(self.__class__.__name__)) - self._required = [] - self._optional = [] - self._allow_additional = True - - @staticmethod - def validate_idp_list(idp_list): - ''' validates a list of idps ''' - names = [x.name for x in idp_list] - if len(set(names)) != len(names): - raise errors.AnsibleFilterError("|failed more than one provider configured with the same name") - - for idp in idp_list: - idp.validate() - - def validate(self): - ''' validate an instance of this idp class ''' - pass - - @staticmethod - def get_default(key): - ''' get a default value for a given key ''' - if key == 'mappingMethod': - return 'claim' - else: - return None - - def set_provider_item(self, items, required=False): - ''' set a provider item based on the list of item names provided. ''' - for item in items: - provider_key = items[0] - if item in self._idp: - self.provider[provider_key] = self._idp.pop(item) - break - else: - default = self.get_default(provider_key) - if default is not None: - self.provider[provider_key] = default - elif required: - raise errors.AnsibleFilterError("|failed provider {0} missing " - "required key {1}".format(self.__class__.__name__, provider_key)) - - def set_provider_items(self): - ''' set the provider items for this idp ''' - for items in self._required: - self.set_provider_item(items, True) - for items in self._optional: - self.set_provider_item(items) - if self._allow_additional: - for key in self._idp.keys(): - self.set_provider_item([key]) - else: - if len(self._idp) > 0: - raise errors.AnsibleFilterError("|failed provider {0} " - "contains unknown keys " - "{1}".format(self.__class__.__name__, ', '.join(self._idp.keys()))) - - def to_dict(self): - ''' translate this idp to a dictionary ''' - return dict(name=self.name, challenge=self.challenge, - login=self.login, mappingMethod=self.mapping_method, - provider=self.provider) - - -class LDAPPasswordIdentityProvider(IdentityProviderBase): - """ LDAPPasswordIdentityProvider - - Attributes: - - Args: - api_version(str): OpenShift config version - idp (dict): idp config dict - - Raises: - AnsibleFilterError: - """ - def __init__(self, api_version, idp): - super(LDAPPasswordIdentityProvider, self).__init__(api_version, idp) - self._allow_additional = False - self._required += [['attributes'], ['url'], ['insecure']] - self._optional += [['ca'], - ['bindDN', 'bind_dn'], - ['bindPassword', 'bind_password']] - - self._idp['insecure'] = ansible_bool(self._idp.pop('insecure', False)) - - if 'attributes' in self._idp and 'preferred_username' in self._idp['attributes']: - pref_user = self._idp['attributes'].pop('preferred_username') - self._idp['attributes']['preferredUsername'] = pref_user - - def validate(self): - ''' validate this idp instance ''' - if not isinstance(self.provider['attributes'], dict): - raise errors.AnsibleFilterError("|failed attributes for provider " - "{0} must be a dictionary".format(self.__class__.__name__)) - - attrs = ['id', 'email', 'name', 'preferredUsername'] - for attr in attrs: - if attr in self.provider['attributes'] and not isinstance(self.provider['attributes'][attr], list): - raise errors.AnsibleFilterError("|failed {0} attribute for " - "provider {1} must be a list".format(attr, self.__class__.__name__)) - - unknown_attrs = set(self.provider['attributes'].keys()) - set(attrs) - if len(unknown_attrs) > 0: - raise errors.AnsibleFilterError("|failed provider {0} has unknown " - "attributes: {1}".format(self.__class__.__name__, ', '.join(unknown_attrs))) - - -class KeystonePasswordIdentityProvider(IdentityProviderBase): - """ KeystoneIdentityProvider - - Attributes: - - Args: - api_version(str): OpenShift config version - idp (dict): idp config dict - - Raises: - AnsibleFilterError: - """ - def __init__(self, api_version, idp): - super(KeystonePasswordIdentityProvider, self).__init__(api_version, idp) - self._allow_additional = False - self._required += [['url'], ['domainName', 'domain_name']] - self._optional += [['ca'], ['certFile', 'cert_file'], ['keyFile', 'key_file']] - - -class RequestHeaderIdentityProvider(IdentityProviderBase): - """ RequestHeaderIdentityProvider - - Attributes: - - Args: - api_version(str): OpenShift config version - idp (dict): idp config dict - - Raises: - AnsibleFilterError: - """ - def __init__(self, api_version, idp): - super(RequestHeaderIdentityProvider, self).__init__(api_version, idp) - self._allow_additional = False - self._required += [['headers']] - self._optional += [['challengeURL', 'challenge_url'], - ['loginURL', 'login_url'], - ['clientCA', 'client_ca'], - ['clientCommonNames', 'client_common_names'], - ['emailHeaders', 'email_headers'], - ['nameHeaders', 'name_headers'], - ['preferredUsernameHeaders', 'preferred_username_headers']] - - def validate(self): - ''' validate this idp instance ''' - if not isinstance(self.provider['headers'], list): - raise errors.AnsibleFilterError("|failed headers for provider {0} " - "must be a list".format(self.__class__.__name__)) - - -class AllowAllPasswordIdentityProvider(IdentityProviderBase): - """ AllowAllPasswordIdentityProvider - - Attributes: - - Args: - api_version(str): OpenShift config version - idp (dict): idp config dict - - Raises: - AnsibleFilterError: - """ - def __init__(self, api_version, idp): - super(AllowAllPasswordIdentityProvider, self).__init__(api_version, idp) - self._allow_additional = False - - -class DenyAllPasswordIdentityProvider(IdentityProviderBase): - """ DenyAllPasswordIdentityProvider - - Attributes: - - Args: - api_version(str): OpenShift config version - idp (dict): idp config dict - - Raises: - AnsibleFilterError: - """ - def __init__(self, api_version, idp): - super(DenyAllPasswordIdentityProvider, self).__init__(api_version, idp) - self._allow_additional = False - - -class HTPasswdPasswordIdentityProvider(IdentityProviderBase): - """ HTPasswdPasswordIdentity - - Attributes: - - Args: - api_version(str): OpenShift config version - idp (dict): idp config dict - - Raises: - AnsibleFilterError: - """ - def __init__(self, api_version, idp): - super(HTPasswdPasswordIdentityProvider, self).__init__(api_version, idp) - self._allow_additional = False - self._required += [['file', 'filename', 'fileName', 'file_name']] - - @staticmethod - def get_default(key): - if key == 'file': - return '/etc/origin/htpasswd' - else: - return IdentityProviderBase.get_default(key) - - -class BasicAuthPasswordIdentityProvider(IdentityProviderBase): - """ BasicAuthPasswordIdentityProvider - - Attributes: - - Args: - api_version(str): OpenShift config version - idp (dict): idp config dict - - Raises: - AnsibleFilterError: - """ - def __init__(self, api_version, idp): - super(BasicAuthPasswordIdentityProvider, self).__init__(api_version, idp) - self._allow_additional = False - self._required += [['url']] - self._optional += [['ca'], ['certFile', 'cert_file'], ['keyFile', 'key_file']] - - -class IdentityProviderOauthBase(IdentityProviderBase): - """ IdentityProviderOauthBase - - Attributes: - - Args: - api_version(str): OpenShift config version - idp (dict): idp config dict - - Raises: - AnsibleFilterError: - """ - def __init__(self, api_version, idp): - super(IdentityProviderOauthBase, self).__init__(api_version, idp) - self._allow_additional = False - self._required += [['clientID', 'client_id'], ['clientSecret', 'client_secret']] - - def validate(self): - ''' validate an instance of this idp class ''' - pass - - -class OpenIDIdentityProvider(IdentityProviderOauthBase): - """ OpenIDIdentityProvider - - Attributes: - - Args: - api_version(str): OpenShift config version - idp (dict): idp config dict - - Raises: - AnsibleFilterError: - """ - def __init__(self, api_version, idp): - IdentityProviderOauthBase.__init__(self, api_version, idp) - self._required += [['claims'], ['urls']] - self._optional += [['ca'], - ['extraScopes'], - ['extraAuthorizeParameters']] - if 'claims' in self._idp and 'preferred_username' in self._idp['claims']: - pref_user = self._idp['claims'].pop('preferred_username') - self._idp['claims']['preferredUsername'] = pref_user - if 'urls' in self._idp and 'user_info' in self._idp['urls']: - user_info = self._idp['urls'].pop('user_info') - self._idp['urls']['userInfo'] = user_info - if 'extra_scopes' in self._idp: - self._idp['extraScopes'] = self._idp.pop('extra_scopes') - if 'extra_authorize_parameters' in self._idp: - self._idp['extraAuthorizeParameters'] = self._idp.pop('extra_authorize_parameters') - - def validate(self): - ''' validate this idp instance ''' - if not isinstance(self.provider['claims'], dict): - raise errors.AnsibleFilterError("|failed claims for provider {0} " - "must be a dictionary".format(self.__class__.__name__)) - - for var, var_type in (('extraScopes', list), ('extraAuthorizeParameters', dict)): - if var in self.provider and not isinstance(self.provider[var], var_type): - raise errors.AnsibleFilterError("|failed {1} for provider " - "{0} must be a {2}".format(self.__class__.__name__, - var, - var_type.__class__.__name__)) - - required_claims = ['id'] - optional_claims = ['email', 'name', 'preferredUsername'] - all_claims = required_claims + optional_claims - - for claim in required_claims: - if claim in required_claims and claim not in self.provider['claims']: - raise errors.AnsibleFilterError("|failed {0} claim missing " - "for provider {1}".format(claim, self.__class__.__name__)) - - for claim in all_claims: - if claim in self.provider['claims'] and not isinstance(self.provider['claims'][claim], list): - raise errors.AnsibleFilterError("|failed {0} claims for " - "provider {1} must be a list".format(claim, self.__class__.__name__)) - - unknown_claims = set(self.provider['claims'].keys()) - set(all_claims) - if len(unknown_claims) > 0: - raise errors.AnsibleFilterError("|failed provider {0} has unknown " - "claims: {1}".format(self.__class__.__name__, ', '.join(unknown_claims))) - - if not isinstance(self.provider['urls'], dict): - raise errors.AnsibleFilterError("|failed urls for provider {0} " - "must be a dictionary".format(self.__class__.__name__)) - - required_urls = ['authorize', 'token'] - optional_urls = ['userInfo'] - all_urls = required_urls + optional_urls - - for url in required_urls: - if url not in self.provider['urls']: - raise errors.AnsibleFilterError("|failed {0} url missing for " - "provider {1}".format(url, self.__class__.__name__)) - - unknown_urls = set(self.provider['urls'].keys()) - set(all_urls) - if len(unknown_urls) > 0: - raise errors.AnsibleFilterError("|failed provider {0} has unknown " - "urls: {1}".format(self.__class__.__name__, ', '.join(unknown_urls))) - - -class GoogleIdentityProvider(IdentityProviderOauthBase): - """ GoogleIdentityProvider - - Attributes: - - Args: - api_version(str): OpenShift config version - idp (dict): idp config dict - - Raises: - AnsibleFilterError: - """ - def __init__(self, api_version, idp): - IdentityProviderOauthBase.__init__(self, api_version, idp) - self._optional += [['hostedDomain', 'hosted_domain']] - - def validate(self): - ''' validate this idp instance ''' - if self.challenge: - raise errors.AnsibleFilterError("|failed provider {0} does not " - "allow challenge authentication".format(self.__class__.__name__)) - - -class GitHubIdentityProvider(IdentityProviderOauthBase): - """ GitHubIdentityProvider - - Attributes: - - Args: - api_version(str): OpenShift config version - idp (dict): idp config dict - - Raises: - AnsibleFilterError: - """ - def __init__(self, api_version, idp): - IdentityProviderOauthBase.__init__(self, api_version, idp) - self._optional += [['organizations'], - ['teams']] - - def validate(self): - ''' validate this idp instance ''' - if self.challenge: - raise errors.AnsibleFilterError("|failed provider {0} does not " - "allow challenge authentication".format(self.__class__.__name__)) - - -class FilterModule(object): - ''' Custom ansible filters for use by the openshift_master role''' - - @staticmethod - def translate_idps(idps, api_version): - ''' Translates a list of dictionaries into a valid identityProviders config ''' - idp_list = [] - - if not isinstance(idps, list): - raise errors.AnsibleFilterError("|failed expects to filter on a list of identity providers") - for idp in idps: - if not isinstance(idp, dict): - raise errors.AnsibleFilterError("|failed identity providers must be a list of dictionaries") - - cur_module = sys.modules[__name__] - idp_class = getattr(cur_module, idp['kind'], None) - idp_inst = idp_class(api_version, idp) if idp_class is not None else IdentityProviderBase(api_version, idp) - idp_inst.set_provider_items() - idp_list.append(idp_inst) - - IdentityProviderBase.validate_idp_list(idp_list) - return u(yaml.dump([idp.to_dict() for idp in idp_list], - allow_unicode=True, - default_flow_style=False, - width=float("inf"), - Dumper=AnsibleDumper)) - - @staticmethod - def certificates_to_synchronize(hostvars, include_keys=True, include_ca=True): - ''' Return certificates to synchronize based on facts. ''' - if not issubclass(type(hostvars), dict): - raise errors.AnsibleFilterError("|failed expects hostvars is a dict") - certs = ['admin.crt', - 'admin.key', - 'admin.kubeconfig', - 'master.kubelet-client.crt', - 'master.kubelet-client.key', - 'master.proxy-client.crt', - 'master.proxy-client.key', - 'service-signer.crt', - 'service-signer.key'] - if bool(include_ca): - certs += ['ca.crt', 'ca.key', 'ca-bundle.crt', 'client-ca-bundle.crt'] - if bool(include_keys): - certs += ['serviceaccounts.private.key', - 'serviceaccounts.public.key'] - return certs - - @staticmethod - def oo_htpasswd_users_from_file(file_contents): - ''' return a dictionary of htpasswd users from htpasswd file contents ''' - htpasswd_entries = {} - if not isinstance(file_contents, string_types): - raise errors.AnsibleFilterError("failed, expects to filter on a string") - for line in file_contents.splitlines(): - user = None - passwd = None - if len(line) == 0: - continue - if ':' in line: - user, passwd = line.split(':', 1) - - if user is None or len(user) == 0 or passwd is None or len(passwd) == 0: - error_msg = "failed, expects each line to be a colon separated string representing the user and passwd" - raise errors.AnsibleFilterError(error_msg) - htpasswd_entries[user] = passwd - return htpasswd_entries - - def filters(self): - ''' returns a mapping of filters to methods ''' - return {"translate_idps": self.translate_idps, - "certificates_to_synchronize": self.certificates_to_synchronize, - "oo_htpasswd_users_from_file": self.oo_htpasswd_users_from_file} diff --git a/roles/openshift_master_facts/tasks/main.yml b/roles/openshift_master_facts/tasks/main.yml index 85d0ac25c..f450c916a 100644 --- a/roles/openshift_master_facts/tasks/main.yml +++ b/roles/openshift_master_facts/tasks/main.yml @@ -57,6 +57,7 @@ access_token_max_seconds: "{{ openshift_master_access_token_max_seconds | default(None) }}" auth_token_max_seconds: "{{ openshift_master_auth_token_max_seconds | default(None) }}" identity_providers: "{{ openshift_master_identity_providers | default(None) }}" + # oo_htpasswd_users_from_file is a custom filter in role lib_utils htpasswd_users: "{{ openshift_master_htpasswd_users | default(lookup('file', openshift_master_htpasswd_file) | oo_htpasswd_users_from_file if openshift_master_htpasswd_file is defined else None) }}" manage_htpasswd: "{{ openshift_master_manage_htpasswd | default(true) }}" ldap_ca: "{{ openshift_master_ldap_ca | default(lookup('file', openshift_master_ldap_ca_file) if openshift_master_ldap_ca_file is defined else None) }}" @@ -90,6 +91,8 @@ - name: Set Default scheduler predicates and priorities set_fact: + # openshift_master_facts_default_predicates is a custom lookup plugin in + # role lib_utils openshift_master_scheduler_default_predicates: "{{ lookup('openshift_master_facts_default_predicates') }}" openshift_master_scheduler_default_priorities: "{{ lookup('openshift_master_facts_default_priorities') }}" diff --git a/roles/openshift_master_facts/test/conftest.py b/roles/openshift_master_facts/test/conftest.py deleted file mode 100644 index 140cced73..000000000 --- a/roles/openshift_master_facts/test/conftest.py +++ /dev/null @@ -1,54 +0,0 @@ -import os -import sys - -import pytest - -sys.path.insert(1, os.path.join(os.path.dirname(__file__), os.pardir, "lookup_plugins")) - -from openshift_master_facts_default_predicates import LookupModule as PredicatesLookupModule # noqa: E402 -from openshift_master_facts_default_priorities import LookupModule as PrioritiesLookupModule # noqa: E402 - - -@pytest.fixture() -def predicates_lookup(): - return PredicatesLookupModule() - - -@pytest.fixture() -def priorities_lookup(): - return PrioritiesLookupModule() - - -@pytest.fixture() -def facts(): - return { - 'openshift': { - 'common': {} - } - } - - -@pytest.fixture(params=[True, False]) -def regions_enabled(request): - return request.param - - -@pytest.fixture(params=[True, False]) -def zones_enabled(request): - return request.param - - -def v_prefix(release): - """Prefix a release number with 'v'.""" - return "v" + release - - -def minor(release): - """Add a suffix to release, making 'X.Y' become 'X.Y.Z'.""" - return release + ".1" - - -@pytest.fixture(params=[str, v_prefix, minor]) -def release_mod(request): - """Modifies a release string to alternative valid values.""" - return request.param diff --git a/roles/openshift_master_facts/test/openshift_master_facts_bad_input_tests.py b/roles/openshift_master_facts/test/openshift_master_facts_bad_input_tests.py deleted file mode 100644 index e8da1e04a..000000000 --- a/roles/openshift_master_facts/test/openshift_master_facts_bad_input_tests.py +++ /dev/null @@ -1,57 +0,0 @@ -import copy -import os -import sys - -from ansible.errors import AnsibleError -import pytest - -sys.path.insert(1, os.path.join(os.path.dirname(__file__), os.pardir, "lookup_plugins")) - -from openshift_master_facts_default_predicates import LookupModule # noqa: E402 - - -class TestOpenShiftMasterFactsBadInput(object): - lookup = LookupModule() - default_facts = { - 'openshift': { - 'common': {} - } - } - - def test_missing_openshift_facts(self): - with pytest.raises(AnsibleError): - facts = {} - self.lookup.run(None, variables=facts) - - def test_missing_deployment_type(self): - with pytest.raises(AnsibleError): - facts = copy.deepcopy(self.default_facts) - facts['openshift']['common']['short_version'] = '10.10' - self.lookup.run(None, variables=facts) - - def test_missing_short_version_and_missing_openshift_release(self): - with pytest.raises(AnsibleError): - facts = copy.deepcopy(self.default_facts) - facts['openshift']['common']['deployment_type'] = 'origin' - self.lookup.run(None, variables=facts) - - def test_unknown_deployment_types(self): - with pytest.raises(AnsibleError): - facts = copy.deepcopy(self.default_facts) - facts['openshift']['common']['short_version'] = '1.1' - facts['openshift']['common']['deployment_type'] = 'bogus' - self.lookup.run(None, variables=facts) - - def test_unknown_origin_version(self): - with pytest.raises(AnsibleError): - facts = copy.deepcopy(self.default_facts) - facts['openshift']['common']['short_version'] = '0.1' - facts['openshift']['common']['deployment_type'] = 'origin' - self.lookup.run(None, variables=facts) - - def test_unknown_ocp_version(self): - with pytest.raises(AnsibleError): - facts = copy.deepcopy(self.default_facts) - facts['openshift']['common']['short_version'] = '0.1' - facts['openshift']['common']['deployment_type'] = 'openshift-enterprise' - self.lookup.run(None, variables=facts) diff --git a/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py b/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py deleted file mode 100644 index 11aad9f03..000000000 --- a/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py +++ /dev/null @@ -1,193 +0,0 @@ -import pytest - - -# Predicates ordered according to OpenShift Origin source: -# origin/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go - -DEFAULT_PREDICATES_1_1 = [ - {'name': 'PodFitsHostPorts'}, - {'name': 'PodFitsResources'}, - {'name': 'NoDiskConflict'}, - {'name': 'MatchNodeSelector'}, -] - -DEFAULT_PREDICATES_1_2 = [ - {'name': 'PodFitsHostPorts'}, - {'name': 'PodFitsResources'}, - {'name': 'NoDiskConflict'}, - {'name': 'NoVolumeZoneConflict'}, - {'name': 'MatchNodeSelector'}, - {'name': 'MaxEBSVolumeCount'}, - {'name': 'MaxGCEPDVolumeCount'} -] - -DEFAULT_PREDICATES_1_3 = [ - {'name': 'NoDiskConflict'}, - {'name': 'NoVolumeZoneConflict'}, - {'name': 'MaxEBSVolumeCount'}, - {'name': 'MaxGCEPDVolumeCount'}, - {'name': 'GeneralPredicates'}, - {'name': 'PodToleratesNodeTaints'}, - {'name': 'CheckNodeMemoryPressure'} -] - -DEFAULT_PREDICATES_1_4 = [ - {'name': 'NoDiskConflict'}, - {'name': 'NoVolumeZoneConflict'}, - {'name': 'MaxEBSVolumeCount'}, - {'name': 'MaxGCEPDVolumeCount'}, - {'name': 'GeneralPredicates'}, - {'name': 'PodToleratesNodeTaints'}, - {'name': 'CheckNodeMemoryPressure'}, - {'name': 'CheckNodeDiskPressure'}, - {'name': 'MatchInterPodAffinity'} -] - -DEFAULT_PREDICATES_1_5 = [ - {'name': 'NoVolumeZoneConflict'}, - {'name': 'MaxEBSVolumeCount'}, - {'name': 'MaxGCEPDVolumeCount'}, - {'name': 'MatchInterPodAffinity'}, - {'name': 'NoDiskConflict'}, - {'name': 'GeneralPredicates'}, - {'name': 'PodToleratesNodeTaints'}, - {'name': 'CheckNodeMemoryPressure'}, - {'name': 'CheckNodeDiskPressure'}, -] - -DEFAULT_PREDICATES_3_6 = DEFAULT_PREDICATES_1_5 - -DEFAULT_PREDICATES_3_7 = [ - {'name': 'NoVolumeZoneConflict'}, - {'name': 'MaxEBSVolumeCount'}, - {'name': 'MaxGCEPDVolumeCount'}, - {'name': 'MaxAzureDiskVolumeCount'}, - {'name': 'MatchInterPodAffinity'}, - {'name': 'NoDiskConflict'}, - {'name': 'GeneralPredicates'}, - {'name': 'PodToleratesNodeTaints'}, - {'name': 'CheckNodeMemoryPressure'}, - {'name': 'CheckNodeDiskPressure'}, - {'name': 'NoVolumeNodeConflict'}, -] - -DEFAULT_PREDICATES_3_9 = DEFAULT_PREDICATES_3_8 = DEFAULT_PREDICATES_3_7 - -REGION_PREDICATE = { - 'name': 'Region', - 'argument': { - 'serviceAffinity': { - 'labels': ['region'] - } - } -} - -TEST_VARS = [ - ('1.1', 'origin', DEFAULT_PREDICATES_1_1), - ('3.1', 'openshift-enterprise', DEFAULT_PREDICATES_1_1), - ('1.2', 'origin', DEFAULT_PREDICATES_1_2), - ('3.2', 'openshift-enterprise', DEFAULT_PREDICATES_1_2), - ('1.3', 'origin', DEFAULT_PREDICATES_1_3), - ('3.3', 'openshift-enterprise', DEFAULT_PREDICATES_1_3), - ('1.4', 'origin', DEFAULT_PREDICATES_1_4), - ('3.4', 'openshift-enterprise', DEFAULT_PREDICATES_1_4), - ('1.5', 'origin', DEFAULT_PREDICATES_1_5), - ('3.5', 'openshift-enterprise', DEFAULT_PREDICATES_1_5), - ('3.6', 'origin', DEFAULT_PREDICATES_3_6), - ('3.6', 'openshift-enterprise', DEFAULT_PREDICATES_3_6), - ('3.7', 'origin', DEFAULT_PREDICATES_3_7), - ('3.7', 'openshift-enterprise', DEFAULT_PREDICATES_3_7), - ('3.8', 'origin', DEFAULT_PREDICATES_3_8), - ('3.8', 'openshift-enterprise', DEFAULT_PREDICATES_3_8), - ('3.9', 'origin', DEFAULT_PREDICATES_3_9), - ('3.9', 'openshift-enterprise', DEFAULT_PREDICATES_3_9), -] - - -def assert_ok(predicates_lookup, default_predicates, regions_enabled, **kwargs): - results = predicates_lookup.run(None, regions_enabled=regions_enabled, **kwargs) - if regions_enabled: - assert results == default_predicates + [REGION_PREDICATE] - else: - assert results == default_predicates - - -def test_openshift_version(predicates_lookup, openshift_version_fixture, regions_enabled): - facts, default_predicates = openshift_version_fixture - assert_ok(predicates_lookup, default_predicates, variables=facts, regions_enabled=regions_enabled) - - -@pytest.fixture(params=TEST_VARS) -def openshift_version_fixture(request, facts): - version, deployment_type, default_predicates = request.param - version += '.1' - facts['openshift_version'] = version - facts['openshift']['common']['deployment_type'] = deployment_type - return facts, default_predicates - - -def test_openshift_release(predicates_lookup, openshift_release_fixture, regions_enabled): - facts, default_predicates = openshift_release_fixture - assert_ok(predicates_lookup, default_predicates, variables=facts, regions_enabled=regions_enabled) - - -@pytest.fixture(params=TEST_VARS) -def openshift_release_fixture(request, facts, release_mod): - release, deployment_type, default_predicates = request.param - facts['openshift_release'] = release_mod(release) - facts['openshift']['common']['deployment_type'] = deployment_type - return facts, default_predicates - - -def test_short_version(predicates_lookup, short_version_fixture, regions_enabled): - facts, default_predicates = short_version_fixture - assert_ok(predicates_lookup, default_predicates, variables=facts, regions_enabled=regions_enabled) - - -@pytest.fixture(params=TEST_VARS) -def short_version_fixture(request, facts): - short_version, deployment_type, default_predicates = request.param - facts['openshift']['common']['short_version'] = short_version - facts['openshift']['common']['deployment_type'] = deployment_type - return facts, default_predicates - - -def test_short_version_kwarg(predicates_lookup, short_version_kwarg_fixture, regions_enabled): - facts, short_version, default_predicates = short_version_kwarg_fixture - assert_ok( - predicates_lookup, default_predicates, variables=facts, - regions_enabled=regions_enabled, short_version=short_version) - - -@pytest.fixture(params=TEST_VARS) -def short_version_kwarg_fixture(request, facts): - short_version, deployment_type, default_predicates = request.param - facts['openshift']['common']['deployment_type'] = deployment_type - return facts, short_version, default_predicates - - -def test_deployment_type_kwarg(predicates_lookup, deployment_type_kwarg_fixture, regions_enabled): - facts, deployment_type, default_predicates = deployment_type_kwarg_fixture - assert_ok( - predicates_lookup, default_predicates, variables=facts, - regions_enabled=regions_enabled, deployment_type=deployment_type) - - -@pytest.fixture(params=TEST_VARS) -def deployment_type_kwarg_fixture(request, facts): - short_version, deployment_type, default_predicates = request.param - facts['openshift']['common']['short_version'] = short_version - return facts, deployment_type, default_predicates - - -def test_short_version_deployment_type_kwargs( - predicates_lookup, short_version_deployment_type_kwargs_fixture, regions_enabled): - short_version, deployment_type, default_predicates = short_version_deployment_type_kwargs_fixture - assert_ok( - predicates_lookup, default_predicates, regions_enabled=regions_enabled, - short_version=short_version, deployment_type=deployment_type) - - -@pytest.fixture(params=TEST_VARS) -def short_version_deployment_type_kwargs_fixture(request): - return request.param diff --git a/roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py b/roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py deleted file mode 100644 index 527fc9ff4..000000000 --- a/roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py +++ /dev/null @@ -1,167 +0,0 @@ -import pytest - - -DEFAULT_PRIORITIES_1_1 = [ - {'name': 'LeastRequestedPriority', 'weight': 1}, - {'name': 'BalancedResourceAllocation', 'weight': 1}, - {'name': 'SelectorSpreadPriority', 'weight': 1} -] - -DEFAULT_PRIORITIES_1_2 = [ - {'name': 'LeastRequestedPriority', 'weight': 1}, - {'name': 'BalancedResourceAllocation', 'weight': 1}, - {'name': 'SelectorSpreadPriority', 'weight': 1}, - {'name': 'NodeAffinityPriority', 'weight': 1} -] - -DEFAULT_PRIORITIES_1_3 = [ - {'name': 'LeastRequestedPriority', 'weight': 1}, - {'name': 'BalancedResourceAllocation', 'weight': 1}, - {'name': 'SelectorSpreadPriority', 'weight': 1}, - {'name': 'NodeAffinityPriority', 'weight': 1}, - {'name': 'TaintTolerationPriority', 'weight': 1} -] - -DEFAULT_PRIORITIES_1_4 = [ - {'name': 'LeastRequestedPriority', 'weight': 1}, - {'name': 'BalancedResourceAllocation', 'weight': 1}, - {'name': 'SelectorSpreadPriority', 'weight': 1}, - {'name': 'NodePreferAvoidPodsPriority', 'weight': 10000}, - {'name': 'NodeAffinityPriority', 'weight': 1}, - {'name': 'TaintTolerationPriority', 'weight': 1}, - {'name': 'InterPodAffinityPriority', 'weight': 1} -] - -DEFAULT_PRIORITIES_1_5 = [ - {'name': 'SelectorSpreadPriority', 'weight': 1}, - {'name': 'InterPodAffinityPriority', 'weight': 1}, - {'name': 'LeastRequestedPriority', 'weight': 1}, - {'name': 'BalancedResourceAllocation', 'weight': 1}, - {'name': 'NodePreferAvoidPodsPriority', 'weight': 10000}, - {'name': 'NodeAffinityPriority', 'weight': 1}, - {'name': 'TaintTolerationPriority', 'weight': 1} -] - -DEFAULT_PRIORITIES_3_6 = DEFAULT_PRIORITIES_1_5 - -DEFAULT_PRIORITIES_3_9 = DEFAULT_PRIORITIES_3_8 = DEFAULT_PRIORITIES_3_7 = DEFAULT_PRIORITIES_3_6 - -ZONE_PRIORITY = { - 'name': 'Zone', - 'argument': { - 'serviceAntiAffinity': { - 'label': 'zone' - } - }, - 'weight': 2 -} - -TEST_VARS = [ - ('1.1', 'origin', DEFAULT_PRIORITIES_1_1), - ('3.1', 'openshift-enterprise', DEFAULT_PRIORITIES_1_1), - ('1.2', 'origin', DEFAULT_PRIORITIES_1_2), - ('3.2', 'openshift-enterprise', DEFAULT_PRIORITIES_1_2), - ('1.3', 'origin', DEFAULT_PRIORITIES_1_3), - ('3.3', 'openshift-enterprise', DEFAULT_PRIORITIES_1_3), - ('1.4', 'origin', DEFAULT_PRIORITIES_1_4), - ('3.4', 'openshift-enterprise', DEFAULT_PRIORITIES_1_4), - ('1.5', 'origin', DEFAULT_PRIORITIES_1_5), - ('3.5', 'openshift-enterprise', DEFAULT_PRIORITIES_1_5), - ('3.6', 'origin', DEFAULT_PRIORITIES_3_6), - ('3.6', 'openshift-enterprise', DEFAULT_PRIORITIES_3_6), - ('3.7', 'origin', DEFAULT_PRIORITIES_3_7), - ('3.7', 'openshift-enterprise', DEFAULT_PRIORITIES_3_7), - ('3.8', 'origin', DEFAULT_PRIORITIES_3_8), - ('3.8', 'openshift-enterprise', DEFAULT_PRIORITIES_3_8), - ('3.9', 'origin', DEFAULT_PRIORITIES_3_9), - ('3.9', 'openshift-enterprise', DEFAULT_PRIORITIES_3_9), -] - - -def assert_ok(priorities_lookup, default_priorities, zones_enabled, **kwargs): - results = priorities_lookup.run(None, zones_enabled=zones_enabled, **kwargs) - if zones_enabled: - assert results == default_priorities + [ZONE_PRIORITY] - else: - assert results == default_priorities - - -def test_openshift_version(priorities_lookup, openshift_version_fixture, zones_enabled): - facts, default_priorities = openshift_version_fixture - assert_ok(priorities_lookup, default_priorities, variables=facts, zones_enabled=zones_enabled) - - -@pytest.fixture(params=TEST_VARS) -def openshift_version_fixture(request, facts): - version, deployment_type, default_priorities = request.param - version += '.1' - facts['openshift_version'] = version - facts['openshift']['common']['deployment_type'] = deployment_type - return facts, default_priorities - - -def test_openshift_release(priorities_lookup, openshift_release_fixture, zones_enabled): - facts, default_priorities = openshift_release_fixture - assert_ok(priorities_lookup, default_priorities, variables=facts, zones_enabled=zones_enabled) - - -@pytest.fixture(params=TEST_VARS) -def openshift_release_fixture(request, facts, release_mod): - release, deployment_type, default_priorities = request.param - facts['openshift_release'] = release_mod(release) - facts['openshift']['common']['deployment_type'] = deployment_type - return facts, default_priorities - - -def test_short_version(priorities_lookup, short_version_fixture, zones_enabled): - facts, default_priorities = short_version_fixture - assert_ok(priorities_lookup, default_priorities, variables=facts, zones_enabled=zones_enabled) - - -@pytest.fixture(params=TEST_VARS) -def short_version_fixture(request, facts): - short_version, deployment_type, default_priorities = request.param - facts['openshift']['common']['short_version'] = short_version - facts['openshift']['common']['deployment_type'] = deployment_type - return facts, default_priorities - - -def test_short_version_kwarg(priorities_lookup, short_version_kwarg_fixture, zones_enabled): - facts, short_version, default_priorities = short_version_kwarg_fixture - assert_ok( - priorities_lookup, default_priorities, variables=facts, - zones_enabled=zones_enabled, short_version=short_version) - - -@pytest.fixture(params=TEST_VARS) -def short_version_kwarg_fixture(request, facts): - short_version, deployment_type, default_priorities = request.param - facts['openshift']['common']['deployment_type'] = deployment_type - return facts, short_version, default_priorities - - -def test_deployment_type_kwarg(priorities_lookup, deployment_type_kwarg_fixture, zones_enabled): - facts, deployment_type, default_priorities = deployment_type_kwarg_fixture - assert_ok( - priorities_lookup, default_priorities, variables=facts, - zones_enabled=zones_enabled, deployment_type=deployment_type) - - -@pytest.fixture(params=TEST_VARS) -def deployment_type_kwarg_fixture(request, facts): - short_version, deployment_type, default_priorities = request.param - facts['openshift']['common']['short_version'] = short_version - return facts, deployment_type, default_priorities - - -def test_short_version_deployment_type_kwargs( - priorities_lookup, short_version_deployment_type_kwargs_fixture, zones_enabled): - short_version, deployment_type, default_priorities = short_version_deployment_type_kwargs_fixture - assert_ok( - priorities_lookup, default_priorities, zones_enabled=zones_enabled, - short_version=short_version, deployment_type=deployment_type) - - -@pytest.fixture(params=TEST_VARS) -def short_version_deployment_type_kwargs_fixture(request): - return request.param diff --git a/roles/openshift_named_certificates/filter_plugins/openshift_named_certificates.py b/roles/openshift_named_certificates/filter_plugins/openshift_named_certificates.py deleted file mode 100644 index 6ed6d404c..000000000 --- a/roles/openshift_named_certificates/filter_plugins/openshift_named_certificates.py +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -''' -Custom filters for use with openshift named certificates -''' - - -class FilterModule(object): - ''' Custom ansible filters for use with openshift named certificates''' - - @staticmethod - def oo_named_certificates_list(named_certificates): - ''' Returns named certificates list with correct fields for the master - config file.''' - return [{'certFile': named_certificate['certfile'], - 'keyFile': named_certificate['keyfile'], - 'names': named_certificate['names']} for named_certificate in named_certificates] - - def filters(self): - ''' returns a mapping of filters to methods ''' - return {"oo_named_certificates_list": self.oo_named_certificates_list} diff --git a/roles/openshift_persistent_volumes/action_plugins/generate_pv_pvcs_list.py b/roles/openshift_persistent_volumes/action_plugins/generate_pv_pvcs_list.py deleted file mode 100644 index eb13a58ba..000000000 --- a/roles/openshift_persistent_volumes/action_plugins/generate_pv_pvcs_list.py +++ /dev/null @@ -1,157 +0,0 @@ -""" -Ansible action plugin to generate pv and pvc dictionaries lists -""" - -from ansible.plugins.action import ActionBase -from ansible import errors - - -class ActionModule(ActionBase): - """Action plugin to execute health checks.""" - - def get_templated(self, var_to_template): - """Return a properly templated ansible variable""" - return self._templar.template(self.task_vars.get(var_to_template)) - - def build_common(self, varname=None): - """Retrieve common variables for each pv and pvc type""" - volume = self.get_templated(str(varname) + '_volume_name') - size = self.get_templated(str(varname) + '_volume_size') - labels = self.task_vars.get(str(varname) + '_labels') - if labels: - labels = self._templar.template(labels) - else: - labels = dict() - access_modes = self.get_templated(str(varname) + '_access_modes') - return (volume, size, labels, access_modes) - - def build_pv_nfs(self, varname=None): - """Build pv dictionary for nfs storage type""" - host = self.task_vars.get(str(varname) + '_host') - if host: - self._templar.template(host) - elif host is None: - groups = self.task_vars.get('groups') - default_group_name = self.get_templated('openshift_persistent_volumes_default_nfs_group') - if groups and default_group_name and default_group_name in groups and len(groups[default_group_name]) > 0: - host = groups['oo_nfs_to_config'][0] - else: - raise errors.AnsibleModuleError("|failed no storage host detected") - volume, size, labels, access_modes = self.build_common(varname=varname) - directory = self.get_templated(str(varname) + '_nfs_directory') - path = directory + '/' + volume - return dict( - name="{0}-volume".format(volume), - capacity=size, - labels=labels, - access_modes=access_modes, - storage=dict( - nfs=dict( - server=host, - path=path))) - - def build_pv_openstack(self, varname=None): - """Build pv dictionary for openstack storage type""" - volume, size, labels, access_modes = self.build_common(varname=varname) - filesystem = self.get_templated(str(varname) + '_openstack_filesystem') - volume_id = self.get_templated(str(varname) + '_openstack_volumeID') - return dict( - name="{0}-volume".format(volume), - capacity=size, - labels=labels, - access_modes=access_modes, - storage=dict( - cinder=dict( - fsType=filesystem, - volumeID=volume_id))) - - def build_pv_glusterfs(self, varname=None): - """Build pv dictionary for glusterfs storage type""" - volume, size, labels, access_modes = self.build_common(varname=varname) - endpoints = self.get_templated(str(varname) + '_glusterfs_endpoints') - path = self.get_templated(str(varname) + '_glusterfs_path') - read_only = self.get_templated(str(varname) + '_glusterfs_readOnly') - return dict( - name="{0}-volume".format(volume), - capacity=size, - labels=labels, - access_modes=access_modes, - storage=dict( - glusterfs=dict( - endpoints=endpoints, - path=path, - readOnly=read_only))) - - def build_pv_dict(self, varname=None): - """Check for the existence of PV variables""" - kind = self.task_vars.get(str(varname) + '_kind') - if kind: - kind = self._templar.template(kind) - create_pv = self.task_vars.get(str(varname) + '_create_pv') - if create_pv and self._templar.template(create_pv): - if kind == 'nfs': - return self.build_pv_nfs(varname=varname) - - elif kind == 'openstack': - return self.build_pv_openstack(varname=varname) - - elif kind == 'glusterfs': - return self.build_pv_glusterfs(varname=varname) - - elif not (kind == 'object' or kind == 'dynamic'): - msg = "|failed invalid storage kind '{0}' for component '{1}'".format( - kind, - varname) - raise errors.AnsibleModuleError(msg) - return None - - def build_pvc_dict(self, varname=None): - """Check for the existence of PVC variables""" - kind = self.task_vars.get(str(varname) + '_kind') - if kind: - kind = self._templar.template(kind) - create_pv = self.task_vars.get(str(varname) + '_create_pv') - if create_pv: - create_pv = self._templar.template(create_pv) - create_pvc = self.task_vars.get(str(varname) + '_create_pvc') - if create_pvc: - create_pvc = self._templar.template(create_pvc) - if kind != 'object' and create_pv and create_pvc: - volume, size, _, access_modes = self.build_common(varname=varname) - return dict( - name="{0}-claim".format(volume), - capacity=size, - access_modes=access_modes) - return None - - def run(self, tmp=None, task_vars=None): - """Run generate_pv_pvcs_list action plugin""" - result = super(ActionModule, self).run(tmp, task_vars) - # Ignore settting self.task_vars outside of init. - # pylint: disable=W0201 - self.task_vars = task_vars or {} - - result["changed"] = False - result["failed"] = False - result["msg"] = "persistent_volumes list and persistent_volume_claims list created" - vars_to_check = ['openshift_hosted_registry_storage', - 'openshift_hosted_router_storage', - 'openshift_hosted_etcd_storage', - 'openshift_logging_storage', - 'openshift_loggingops_storage', - 'openshift_metrics_storage', - 'openshift_prometheus_storage', - 'openshift_prometheus_alertmanager_storage', - 'openshift_prometheus_alertbuffer_storage'] - persistent_volumes = [] - persistent_volume_claims = [] - for varname in vars_to_check: - pv_dict = self.build_pv_dict(varname) - if pv_dict: - persistent_volumes.append(pv_dict) - pvc_dict = self.build_pvc_dict(varname) - if pvc_dict: - persistent_volume_claims.append(pvc_dict) - result["persistent_volumes"] = persistent_volumes - result["persistent_volume_claims"] = persistent_volume_claims - return result diff --git a/roles/openshift_persistent_volumes/tasks/main.yml b/roles/openshift_persistent_volumes/tasks/main.yml index 0b4dd7d1f..b1d9c8cca 100644 --- a/roles/openshift_persistent_volumes/tasks/main.yml +++ b/roles/openshift_persistent_volumes/tasks/main.yml @@ -26,7 +26,8 @@ when: openshift_hosted_registry_storage_glusterfs_swap | default(False) - name: create standard pv and pvc lists - # generate_pv_pvcs_list is a custom action module defined in ../action_plugins + # generate_pv_pvcs_list is a custom action module defined in + # roles/lib_utils/action_plugins/generate_pv_pvcs_list.py generate_pv_pvcs_list: {} register: l_pv_pvcs_list diff --git a/roles/openshift_sanitize_inventory/filter_plugins/openshift_sanitize_inventory.py b/roles/openshift_sanitize_inventory/filter_plugins/openshift_sanitize_inventory.py index 72c47b8ee..14f1f72c2 100644 --- a/roles/openshift_sanitize_inventory/filter_plugins/openshift_sanitize_inventory.py +++ b/roles/openshift_sanitize_inventory/filter_plugins/openshift_sanitize_inventory.py @@ -6,15 +6,6 @@ import re -# This should be removed after map_from_pairs is no longer used in __deprecations_logging.yml -def map_from_pairs(source, delim="="): - ''' Returns a dict given the source and delim delimited ''' - if source == '': - return dict() - - return dict(item.split(delim) for item in source.split(",")) - - def vars_with_pattern(source, pattern=""): ''' Returns a list of variables whose name matches the given pattern ''' if source == '': @@ -39,6 +30,5 @@ class FilterModule(object): def filters(self): ''' Returns the names of the filters provided by this class ''' return { - 'map_from_pairs': map_from_pairs, 'vars_with_pattern': vars_with_pattern } diff --git a/roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py b/roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py deleted file mode 100644 index a86c96df7..000000000 --- a/roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py +++ /dev/null @@ -1,23 +0,0 @@ -''' - Openshift Storage GlusterFS class that provides useful filters used in GlusterFS -''' - - -def map_from_pairs(source, delim="="): - ''' Returns a dict given the source and delim delimited ''' - if source == '': - return dict() - - return dict(item.split(delim) for item in source.split(",")) - - -# pylint: disable=too-few-public-methods -class FilterModule(object): - ''' OpenShift Storage GlusterFS Filters ''' - - # pylint: disable=no-self-use, too-few-public-methods - def filters(self): - ''' Returns the names of the filters provided by this class ''' - return { - 'map_from_pairs': map_from_pairs - } diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml index 2ea7286f3..a374df0ce 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml @@ -4,6 +4,7 @@ glusterfs_namespace: "{{ openshift_storage_glusterfs_namespace }}" glusterfs_is_native: "{{ openshift_storage_glusterfs_is_native | bool }}" glusterfs_name: "{{ openshift_storage_glusterfs_name }}" + # map_from_pairs is a custom filter plugin in role lib_utils glusterfs_nodeselector: "{{ openshift_storage_glusterfs_nodeselector | default(['storagenode', openshift_storage_glusterfs_name] | join('=')) | map_from_pairs }}" glusterfs_use_default_selector: "{{ openshift_storage_glusterfs_use_default_selector }}" glusterfs_storageclass: "{{ openshift_storage_glusterfs_storageclass }}" diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml index b7cff6514..544a6f491 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml @@ -4,6 +4,7 @@ glusterfs_namespace: "{{ openshift_storage_glusterfs_registry_namespace }}" glusterfs_is_native: "{{ openshift_storage_glusterfs_registry_is_native | bool }}" glusterfs_name: "{{ openshift_storage_glusterfs_registry_name }}" + # map_from_pairs is a custom filter plugin in role lib_utils glusterfs_nodeselector: "{{ openshift_storage_glusterfs_registry_nodeselector | default(['storagenode', openshift_storage_glusterfs_registry_name] | join('=')) | map_from_pairs }}" glusterfs_use_default_selector: "{{ openshift_storage_glusterfs_registry_use_default_selector }}" glusterfs_storageclass: "{{ openshift_storage_glusterfs_registry_storageclass }}" -- cgit v1.2.3