diff options
39 files changed, 334 insertions, 213 deletions
diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml index c0978c6f6..acf5469bf 100644 --- a/playbooks/byo/openshift-cluster/config.yml +++ b/playbooks/byo/openshift-cluster/config.yml @@ -3,6 +3,10 @@ tags: - always +- include: ../../common/openshift-cluster/std_include.yml + tags: + - always + - include: ../../common/openshift-cluster/config.yml vars: openshift_cluster_id: "{{ cluster_id | default('default') }}" diff --git a/playbooks/byo/openshift-cluster/openshift-logging.yml b/playbooks/byo/openshift-cluster/openshift-logging.yml index 76f165c6d..bbec3a4c2 100644 --- a/playbooks/byo/openshift-cluster/openshift-logging.yml +++ b/playbooks/byo/openshift-cluster/openshift-logging.yml @@ -5,6 +5,12 @@ # currently supported method. # - include: initialize_groups.yml + tags: + - always + +- include: ../../common/openshift-cluster/std_include.yml + tags: + - always - include: ../../common/openshift-cluster/openshift_logging.yml vars: diff --git a/playbooks/common/openshift-checks/health.yml b/playbooks/common/openshift-checks/health.yml index 7e83b4aa6..ff5b5af67 100644 --- a/playbooks/common/openshift-checks/health.yml +++ b/playbooks/common/openshift-checks/health.yml @@ -1,5 +1,7 @@ --- -- include: ../openshift-cluster/evaluate_groups.yml +- include: ../openshift-cluster/std_include.yml + tags: + - always - name: Run OpenShift health checks hosts: OSEv3 diff --git a/playbooks/common/openshift-checks/pre-install.yml b/playbooks/common/openshift-checks/pre-install.yml index afd4f95e0..861229f21 100644 --- a/playbooks/common/openshift-checks/pre-install.yml +++ b/playbooks/common/openshift-checks/pre-install.yml @@ -1,5 +1,7 @@ --- -- include: ../openshift-cluster/evaluate_groups.yml +- include: ../openshift-cluster/std_include.yml + tags: + - always - hosts: OSEv3 name: run OpenShift pre-install checks diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml index 14d7d9822..e1df71112 100644 --- a/playbooks/common/openshift-cluster/config.yml +++ b/playbooks/common/openshift-cluster/config.yml @@ -1,8 +1,4 @@ --- -- include: std_include.yml - tags: - - always - # TODO: refactor this into its own include # and pass a variable for ctx - name: Verify Requirements @@ -22,6 +18,10 @@ - docker_image_availability - docker_storage +- include: initialize_firewall.yml + tags: + - always + - hosts: localhost tasks: - fail: diff --git a/playbooks/common/openshift-cluster/openshift_logging.yml b/playbooks/common/openshift-cluster/openshift_logging.yml index c5f0c406a..c1a5d83cd 100644 --- a/playbooks/common/openshift-cluster/openshift_logging.yml +++ b/playbooks/common/openshift-cluster/openshift_logging.yml @@ -1,6 +1,4 @@ --- -- include: std_include.yml - - name: OpenShift Aggregated Logging hosts: oo_first_master roles: diff --git a/playbooks/common/openshift-cluster/std_include.yml b/playbooks/common/openshift-cluster/std_include.yml index 5a1187ec7..6cc56889a 100644 --- a/playbooks/common/openshift-cluster/std_include.yml +++ b/playbooks/common/openshift-cluster/std_include.yml @@ -18,7 +18,3 @@ - include: initialize_openshift_version.yml tags: - always - -- include: initialize_firewall.yml - tags: - - always diff --git a/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml deleted file mode 100644 index 9f7961614..000000000 --- a/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml +++ /dev/null @@ -1,14 +0,0 @@ ---- -# This is a hack to allow us to use systemd_units.yml, but skip the handlers which -# restart services. We will unconditionally restart all containerized services -# because we have to unconditionally restart Docker: -- set_fact: - skip_node_svc_handlers: True - -- name: Update systemd units - include: ../../../../roles/openshift_node/tasks/systemd_units.yml openshift_version={{ openshift_image_tag }} - -# This is a no-op because of skip_node_svc_handlers, but lets us trigger it before end of -# play when the node has already been marked schedulable again. (this would look strange -# in logs otherwise) -- meta: flush_handlers diff --git a/playbooks/common/openshift-cluster/upgrades/master_docker b/playbooks/common/openshift-cluster/upgrades/master_docker deleted file mode 120000 index 6aeca2842..000000000 --- a/playbooks/common/openshift-cluster/upgrades/master_docker +++ /dev/null @@ -1 +0,0 @@ -../../../../roles/openshift_master/templates/master_docker
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/validate_hostnames.yml b/playbooks/common/openshift-cluster/validate_hostnames.yml index 33fc5630f..be2e6a15a 100644 --- a/playbooks/common/openshift-cluster/validate_hostnames.yml +++ b/playbooks/common/openshift-cluster/validate_hostnames.yml @@ -1,17 +1,22 @@ --- -- name: Gather and set facts for node hosts +- name: Validate node hostnames hosts: oo_nodes_to_config - roles: - - openshift_facts tasks: - - shell: + - name: Query DNS for IP address of {{ openshift.common.hostname }} + shell: getent ahostsv4 {{ openshift.common.hostname }} | head -n 1 | awk '{ print $1 }' register: lookupip changed_when: false failed_when: false - name: Warn user about bad openshift_hostname values pause: - prompt: "The hostname \"{{ openshift.common.hostname }}\" for \"{{ ansible_nodename }}\" doesn't resolve to an ip address owned by this host. Please set openshift_hostname variable to a hostname that when resolved on the host in question resolves to an IP address matching an interface on this host. This host will fail liveness checks for pods utilizing hostPorts, press ENTER to continue or CTRL-C to abort." + prompt: + The hostname {{ openshift.common.hostname }} for {{ ansible_nodename }} + doesn't resolve to an IP address owned by this host. Please set + openshift_hostname variable to a hostname that when resolved on the host + in question resolves to an IP address matching an interface on this + host. This host will fail liveness checks for pods utilizing hostPorts, + press ENTER to continue or CTRL-C to abort. seconds: "{{ 10 if openshift_override_hostname_check | default(false) | bool else omit }}" when: - lookupip.stdout != '127.0.0.1' diff --git a/roles/etcd_common/tasks/main.yml b/roles/etcd_common/tasks/main.yml index 6ed87e6c7..f5bcd03ee 100644 --- a/roles/etcd_common/tasks/main.yml +++ b/roles/etcd_common/tasks/main.yml @@ -6,4 +6,4 @@ - name: Include main action task file include: "{{ r_etcd_common_action }}.yml" - when: r_etcd_common_action != "noop" + when: '"noop" not in r_etcd_common_action' diff --git a/roles/lib_openshift/library/oc_obj.py b/roles/lib_openshift/library/oc_obj.py index 9b0c0e0e4..7d9392af9 100644 --- a/roles/lib_openshift/library/oc_obj.py +++ b/roles/lib_openshift/library/oc_obj.py @@ -1478,7 +1478,16 @@ class OCObject(OpenShiftCLI): if files: return self._create(files[0]) - content['data'] = yaml.dump(content['data']) + # pylint: disable=no-member + # The purpose of this change is twofold: + # - we need a check to only use the ruamel specific dumper if ruamel is loaded + # - the dumper or the flow style change is needed so openshift is able to parse + # the resulting yaml, at least until gopkg.in/yaml.v2 is updated + if hasattr(yaml, 'RoundTripDumper'): + content['data'] = yaml.dump(content['data'], Dumper=yaml.RoundTripDumper) + else: + content['data'] = yaml.safe_dump(content['data'], default_flow_style=False) + content_file = Utils.create_tmp_files_from_contents(content)[0] return self._create(content_file['path']) diff --git a/roles/lib_openshift/src/class/oc_obj.py b/roles/lib_openshift/src/class/oc_obj.py index 5e423bea9..68f7818e4 100644 --- a/roles/lib_openshift/src/class/oc_obj.py +++ b/roles/lib_openshift/src/class/oc_obj.py @@ -50,7 +50,16 @@ class OCObject(OpenShiftCLI): if files: return self._create(files[0]) - content['data'] = yaml.dump(content['data']) + # pylint: disable=no-member + # The purpose of this change is twofold: + # - we need a check to only use the ruamel specific dumper if ruamel is loaded + # - the dumper or the flow style change is needed so openshift is able to parse + # the resulting yaml, at least until gopkg.in/yaml.v2 is updated + if hasattr(yaml, 'RoundTripDumper'): + content['data'] = yaml.dump(content['data'], Dumper=yaml.RoundTripDumper) + else: + content['data'] = yaml.safe_dump(content['data'], default_flow_style=False) + content_file = Utils.create_tmp_files_from_contents(content)[0] return self._create(content_file['path']) diff --git a/roles/openshift_cfme/defaults/main.yml b/roles/openshift_cfme/defaults/main.yml index 27ed57703..393bee1f3 100644 --- a/roles/openshift_cfme/defaults/main.yml +++ b/roles/openshift_cfme/defaults/main.yml @@ -1,6 +1,7 @@ --- -# Namespace for the CFME project -openshift_cfme_project: cfme +# Namespace for the CFME project (Note: changed post-3.6 to use +# reserved 'openshift-' namespace prefix) +openshift_cfme_project: openshift-cfme # Namespace/project description openshift_cfme_project_description: ManageIQ - CloudForms Management Engine # Basic user assigned the `admin` role for the project diff --git a/roles/openshift_cli/library/openshift_container_binary_sync.py b/roles/openshift_cli/library/openshift_container_binary_sync.py index c47203211..b40c49701 100644 --- a/roles/openshift_cli/library/openshift_container_binary_sync.py +++ b/roles/openshift_cli/library/openshift_container_binary_sync.py @@ -133,6 +133,11 @@ class BinarySyncer(object): dest_path = os.path.join(self.bin_dir, binary_name) incoming_checksum = self.module.run_command(['sha256sum', src_path])[1] if not os.path.exists(dest_path) or self.module.run_command(['sha256sum', dest_path])[1] != incoming_checksum: + + # See: https://github.com/openshift/openshift-ansible/issues/4965 + if os.path.islink(dest_path): + os.unlink(dest_path) + self.output.append('Removed old symlink {} before copying binary.'.format(dest_path)) shutil.move(src_path, dest_path) self.output.append("Moved %s to %s." % (src_path, dest_path)) self.changed = True diff --git a/roles/openshift_health_checker/library/rpm_version.py b/roles/openshift_health_checker/library/rpm_version.py index 8ea223055..c24fbba3b 100644 --- a/roles/openshift_health_checker/library/rpm_version.py +++ b/roles/openshift_health_checker/library/rpm_version.py @@ -4,6 +4,7 @@ Ansible module for rpm-based systems determining existing package version inform """ from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import string_types IMPORT_EXCEPTION = None try: @@ -82,11 +83,16 @@ def _check_pkg_versions(found_pkgs_dict, expected_pkgs_dict): continue found_versions = [_parse_version(version) for version in found_pkgs_dict[pkg_name]] - expected_version = _parse_version(pkg["version"]) - if expected_version not in found_versions: + + if isinstance(pkg["version"], string_types): + expected_versions = [_parse_version(pkg["version"])] + else: + expected_versions = [_parse_version(version) for version in pkg["version"]] + + if not set(expected_versions) & set(found_versions): invalid_pkg_versions[pkg_name] = { "found_versions": found_versions, - "required_version": expected_version, + "required_versions": expected_versions, } if not_found_pkgs: @@ -106,7 +112,7 @@ def _check_pkg_versions(found_pkgs_dict, expected_pkgs_dict): "The following packages were found to be installed with an incorrect version: {}".format('\n'.join([ " \n{}\n Required version: {}\n Found versions: {}".format( pkg_name, - pkg["required_version"], + ', '.join(pkg["required_versions"]), ', '.join([version for version in pkg["found_versions"]])) for pkg_name, pkg in invalid_pkg_versions.items() ])) diff --git a/roles/openshift_health_checker/openshift_checks/etcd_traffic.py b/roles/openshift_health_checker/openshift_checks/etcd_traffic.py index cc1b14d8a..b4c8957e9 100644 --- a/roles/openshift_health_checker/openshift_checks/etcd_traffic.py +++ b/roles/openshift_health_checker/openshift_checks/etcd_traffic.py @@ -14,8 +14,8 @@ class EtcdTraffic(OpenShiftCheck): group_names = self.get_var("group_names", default=[]) valid_group_names = "etcd" in group_names - version = self.get_var("openshift", "common", "short_version") - valid_version = version in ("3.4", "3.5", "1.4", "1.5") + version = self.get_major_minor_version(self.get_var("openshift_image_tag")) + valid_version = version in ((3, 4), (3, 5)) return super(EtcdTraffic, self).is_active() and valid_group_names and valid_version diff --git a/roles/openshift_health_checker/openshift_checks/ovs_version.py b/roles/openshift_health_checker/openshift_checks/ovs_version.py index d5e55bc25..363c12def 100644 --- a/roles/openshift_health_checker/openshift_checks/ovs_version.py +++ b/roles/openshift_health_checker/openshift_checks/ovs_version.py @@ -16,8 +16,8 @@ class OvsVersion(NotContainerizedMixin, OpenShiftCheck): tags = ["health"] openshift_to_ovs_version = { - "3.6": "2.6", - "3.5": "2.6", + "3.6": ["2.6", "2.7"], + "3.5": ["2.6", "2.7"], "3.4": "2.4", } diff --git a/roles/openshift_health_checker/test/etcd_traffic_test.py b/roles/openshift_health_checker/test/etcd_traffic_test.py index f4316c423..fae3e578d 100644 --- a/roles/openshift_health_checker/test/etcd_traffic_test.py +++ b/roles/openshift_health_checker/test/etcd_traffic_test.py @@ -8,7 +8,7 @@ from openshift_checks.etcd_traffic import EtcdTraffic (['masters'], "3.6", False), (['nodes'], "3.4", False), (['etcd'], "3.4", True), - (['etcd'], "3.5", True), + (['etcd'], "1.5", True), (['etcd'], "3.1", False), (['masters', 'nodes'], "3.5", False), (['masters', 'etcd'], "3.5", True), @@ -17,9 +17,7 @@ from openshift_checks.etcd_traffic import EtcdTraffic def test_is_active(group_names, version, is_active): task_vars = dict( group_names=group_names, - openshift=dict( - common=dict(short_version=version), - ), + openshift_image_tag=version, ) assert EtcdTraffic(task_vars=task_vars).is_active() == is_active diff --git a/roles/openshift_health_checker/test/ovs_version_test.py b/roles/openshift_health_checker/test/ovs_version_test.py index b6acef5a6..e1bf29d2a 100644 --- a/roles/openshift_health_checker/test/ovs_version_test.py +++ b/roles/openshift_health_checker/test/ovs_version_test.py @@ -38,8 +38,8 @@ def test_invalid_openshift_release_format(): @pytest.mark.parametrize('openshift_release,expected_ovs_version', [ - ("3.5", "2.6"), - ("3.6", "2.6"), + ("3.5", ["2.6", "2.7"]), + ("3.6", ["2.6", "2.7"]), ("3.4", "2.4"), ("3.3", "2.4"), ("1.0", "2.4"), diff --git a/roles/openshift_health_checker/test/rpm_version_test.py b/roles/openshift_health_checker/test/rpm_version_test.py index 2f09ef965..2c1bcf876 100644 --- a/roles/openshift_health_checker/test/rpm_version_test.py +++ b/roles/openshift_health_checker/test/rpm_version_test.py @@ -49,7 +49,7 @@ def test_check_pkg_found(pkgs, expect_not_found): }, { "eggs": { - "required_version": "3.2", + "required_versions": ["3.2"], "found_versions": ["3.3"], } }, # not the right version @@ -61,11 +61,11 @@ def test_check_pkg_found(pkgs, expect_not_found): }, { "eggs": { - "required_version": "3.2", + "required_versions": ["3.2"], "found_versions": ["3.3", "1.2"], }, "spam": { - "required_version": "3.2", + "required_versions": ["3.2"], "found_versions": ["3.1", "3.3"], } }, # not the right version diff --git a/roles/openshift_logging_elasticsearch/defaults/main.yml b/roles/openshift_logging_elasticsearch/defaults/main.yml index c0b5d394e..0690bf114 100644 --- a/roles/openshift_logging_elasticsearch/defaults/main.yml +++ b/roles/openshift_logging_elasticsearch/defaults/main.yml @@ -37,6 +37,9 @@ openshift_logging_elasticsearch_storage_group: '65534' openshift_logging_es_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_pvc_prefix | default('logging-es') }}" +# config the es plugin to write kibana index based on the index mode +openshift_logging_elasticsearch_kibana_index_mode: 'unique' + # this is used to determine if this is an operations deployment or a non-ops deployment # simply used for naming purposes openshift_logging_elasticsearch_ops_deployment: false diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml index aae23668a..5593fac3a 100644 --- a/roles/openshift_logging_elasticsearch/tasks/main.yaml +++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml @@ -102,6 +102,11 @@ delete_after: true # configmap +- assert: + that: + - openshift_logging_elasticsearch_kibana_index_mode in __kibana_index_modes + msg: "The openshift_logging_elasticsearch_kibana_index_mode '{{ openshift_logging_elasticsearch_kibana_index_mode }}' only supports one of: {{ __kibana_index_modes | join(', ') }}" + - template: src: elasticsearch-logging.yml.j2 dest: "{{ tempdir }}/elasticsearch-logging.yml" @@ -115,6 +120,8 @@ allow_cluster_reader: "{{ openshift_logging_elasticsearch_ops_allow_cluster_reader | lower | default('false') }}" es_number_of_shards: "{{ openshift_logging_es_number_of_shards | default(1) }}" es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas | default(0) }}" + es_kibana_index_mode: "{{ openshift_logging_elasticsearch_kibana_index_mode | default('unique') }}" + when: es_config_contents is undefined changed_when: no diff --git a/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2 b/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2 index 141967c33..0c06a7677 100644 --- a/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2 +++ b/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2 @@ -53,6 +53,8 @@ openshift.searchguard: openshift.operations.allow_cluster_reader: {{allow_cluster_reader | default (false)}} +openshift.kibana.index.mode: {{es_kibana_index_mode | default('unique')}} + path: data: /elasticsearch/persistent/${CLUSTER_NAME}/data logs: /elasticsearch/${CLUSTER_NAME}/logs diff --git a/roles/openshift_logging_elasticsearch/vars/main.yml b/roles/openshift_logging_elasticsearch/vars/main.yml index 7a1f5048b..5b4b226e8 100644 --- a/roles/openshift_logging_elasticsearch/vars/main.yml +++ b/roles/openshift_logging_elasticsearch/vars/main.yml @@ -3,6 +3,8 @@ __latest_es_version: "3_5" __allowed_es_versions: ["3_5", "3_6"] __allowed_es_types: ["data-master", "data-client", "master", "client"] +__kibana_index_modes: ["unique", "shared_ops"] + # TODO: integrate these openshift_master_config_dir: "{{ openshift.common.config_base }}/master" es_node_quorum: "{{ openshift_logging_elasticsearch_replica_count | int/2 + 1 }}" diff --git a/roles/openshift_master/handlers/main.yml b/roles/openshift_master/handlers/main.yml index ce7688581..d5094c2c9 100644 --- a/roles/openshift_master/handlers/main.yml +++ b/roles/openshift_master/handlers/main.yml @@ -1,12 +1,21 @@ --- - name: restart master api - systemd: name={{ openshift.common.service_type }}-master-api state=restarted - when: (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' - notify: Verify API Server + systemd: + name: "{{ openshift.common.service_type }}-master-api" + state: restarted + when: + - not (master_api_service_status_changed | default(false) | bool) + - openshift.master.cluster_method == 'native' + notify: + - Verify API Server - name: restart master controllers - systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted - when: (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' + systemd: + name: "{{ openshift.common.service_type }}-master-controllers" + state: restarted + when: + - not (master_controllers_service_status_changed | default(false) | bool) + - openshift.master.cluster_method == 'native' - name: Verify API Server # Using curl here since the uri module requires python-httplib2 and @@ -23,8 +32,8 @@ # Disables the following warning: # Consider using get_url or uri module rather than running curl warn: no - register: api_available_output - until: api_available_output.stdout == 'ok' + register: l_api_available_output + until: l_api_available_output.stdout == 'ok' retries: 120 delay: 1 changed_when: false diff --git a/roles/openshift_master/tasks/clean_systemd_units.yml b/roles/openshift_master/tasks/clean_systemd_units.yml index 590692c10..e641f84d4 100644 --- a/roles/openshift_master/tasks/clean_systemd_units.yml +++ b/roles/openshift_master/tasks/clean_systemd_units.yml @@ -1,5 +1,9 @@ --- - name: Disable master service - systemd: name={{ openshift.common.service_type }}-master state=stopped enabled=no masked=yes + systemd: + name: "{{ openshift.common.service_type }}-master" + state: stopped + enabled: no + masked: yes ignore_errors: true diff --git a/roles/openshift_master/tasks/firewall.yml b/roles/openshift_master/tasks/firewall.yml index e51eeb56e..38afb6764 100644 --- a/roles/openshift_master/tasks/firewall.yml +++ b/roles/openshift_master/tasks/firewall.yml @@ -7,7 +7,8 @@ action: add protocol: "{{ item.port.split('/')[1] }}" port: "{{ item.port.split('/')[0] }}" - when: item.cond | default(True) + when: + - item.cond | default(True) with_items: "{{ r_openshift_master_os_firewall_allow }}" - name: Remove iptables rules @@ -16,7 +17,8 @@ action: remove protocol: "{{ item.port.split('/')[1] }}" port: "{{ item.port.split('/')[0] }}" - when: item.cond | default(True) + when: + - item.cond | default(True) with_items: "{{ r_openshift_master_os_firewall_deny }}" - when: r_openshift_master_firewall_enabled | bool and r_openshift_master_use_firewalld | bool @@ -27,7 +29,8 @@ permanent: true immediate: true state: enabled - when: item.cond | default(True) + when: + - item.cond | default(True) with_items: "{{ r_openshift_master_os_firewall_allow }}" - name: Remove firewalld allow rules @@ -36,5 +39,6 @@ permanent: true immediate: true state: disabled - when: item.cond | default(True) + when: + - item.cond | default(True) with_items: "{{ r_openshift_master_os_firewall_deny }}" diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index b1412c3d9..a11471891 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -7,21 +7,34 @@ - fail: msg: > Invalid OAuth grant method: {{ openshift_master_oauth_grant_method }} - when: openshift_master_oauth_grant_method is defined and openshift_master_oauth_grant_method not in openshift_master_valid_grant_methods + when: + - openshift_master_oauth_grant_method is defined + - openshift_master_oauth_grant_method not in openshift_master_valid_grant_methods # HA Variable Validation - fail: msg: "openshift_master_cluster_method must be set to either 'native' or 'pacemaker' for multi-master installations" - when: openshift.master.ha | bool and ((openshift.master.cluster_method is not defined) or (openshift.master.cluster_method is defined and openshift.master.cluster_method not in ["native", "pacemaker"])) + when: + - openshift.master.ha | bool + - (openshift.master.cluster_method is not defined) or (openshift.master.cluster_method is defined and openshift.master.cluster_method not in ["native", "pacemaker"]) - fail: msg: "'native' high availability is not supported for the requested OpenShift version" - when: openshift.master.ha | bool and openshift.master.cluster_method == "native" and not openshift.common.version_gte_3_1_or_1_1 | bool + when: + - openshift.master.ha | bool + - openshift.master.cluster_method == "native" + - not openshift.common.version_gte_3_1_or_1_1 | bool - fail: msg: "openshift_master_cluster_password must be set for multi-master installations" - when: openshift.master.ha | bool and openshift.master.cluster_method == "pacemaker" and (openshift_master_cluster_password is not defined or not openshift_master_cluster_password) + when: + - openshift.master.ha | bool + - openshift.master.cluster_method == "pacemaker" + - openshift_master_cluster_password is not defined or not openshift_master_cluster_password - fail: msg: "Pacemaker based HA is not supported at this time when used with containerized installs" - when: openshift.master.ha | bool and openshift.master.cluster_method == "pacemaker" and openshift.common.is_containerized | bool + when: + - openshift.master.ha | bool + - openshift.master.cluster_method == "pacemaker" + - openshift.common.is_containerized | bool - name: Open up firewall ports include: firewall.yml @@ -31,7 +44,8 @@ package: name: "{{ openshift.common.service_type }}-master{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}" state: present - when: not openshift.common.is_containerized | bool + when: + - not openshift.common.is_containerized | bool - name: Create openshift.common.data_dir file: @@ -40,11 +54,13 @@ mode: 0755 owner: root group: root - when: openshift.common.is_containerized | bool + when: + - openshift.common.is_containerized | bool - name: Reload systemd units command: systemctl daemon-reload - when: openshift.common.is_containerized | bool and install_result | changed + when: + - openshift.common.is_containerized | bool - name: Re-gather package dependent master facts openshift_facts: @@ -61,8 +77,8 @@ args: creates: "{{ openshift_master_policy }}" notify: - - restart master api - - restart master controllers + - restart master api + - restart master controllers - name: Create the scheduler config copy: @@ -70,20 +86,22 @@ dest: "{{ openshift_master_scheduler_conf }}" backup: true notify: - - restart master api - - restart master controllers + - restart master api + - restart master controllers - name: Install httpd-tools if needed package: name=httpd-tools state=present - when: (item.kind == 'HTPasswdPasswordIdentityProvider') and - not openshift.common.is_atomic | bool + when: + - item.kind == 'HTPasswdPasswordIdentityProvider' + - not openshift.common.is_atomic | bool with_items: "{{ openshift.master.identity_providers }}" - name: Ensure htpasswd directory exists file: path: "{{ item.filename | dirname }}" state: directory - when: item.kind == 'HTPasswdPasswordIdentityProvider' + when: + - item.kind == 'HTPasswdPasswordIdentityProvider' with_items: "{{ openshift.master.identity_providers }}" - name: Create the htpasswd file if needed @@ -91,7 +109,9 @@ dest: "{{ item.filename }}" src: htpasswd.j2 backup: yes - when: item.kind == 'HTPasswdPasswordIdentityProvider' and openshift.master.manage_htpasswd | bool + when: + - item.kind == 'HTPasswdPasswordIdentityProvider' + - openshift.master.manage_htpasswd | bool with_items: "{{ openshift.master.identity_providers }}" - name: Ensure htpasswd file exists @@ -100,7 +120,8 @@ force: no content: "" mode: 0600 - when: item.kind == 'HTPasswdPasswordIdentityProvider' + when: + - item.kind == 'HTPasswdPasswordIdentityProvider' with_items: "{{ openshift.master.identity_providers }}" - name: Create the ldap ca file if needed @@ -109,7 +130,9 @@ content: "{{ openshift.master.ldap_ca }}" mode: 0600 backup: yes - when: openshift.master.ldap_ca is defined and item.kind == 'LDAPPasswordIdentityProvider' + when: + - openshift.master.ldap_ca is defined + - item.kind == 'LDAPPasswordIdentityProvider' with_items: "{{ openshift.master.identity_providers }}" - name: Create the openid ca file if needed @@ -118,7 +141,10 @@ content: "{{ openshift.master.openid_ca }}" mode: 0600 backup: yes - when: openshift.master.openid_ca is defined and item.kind == 'OpenIDIdentityProvider' and item.ca | default('') != '' + when: + - openshift.master.openid_ca is defined + - item.kind == 'OpenIDIdentityProvider' + - item.ca | default('') != '' with_items: "{{ openshift.master.identity_providers }}" - name: Create the request header ca file if needed @@ -127,20 +153,23 @@ content: "{{ openshift.master.request_header_ca }}" mode: 0600 backup: yes - when: openshift.master.request_header_ca is defined and item.kind == 'RequestHeaderIdentityProvider' and item.clientCA | default('') != '' + when: + - openshift.master.request_header_ca is defined + - item.kind == 'RequestHeaderIdentityProvider' + - item.clientCA | default('') != '' with_items: "{{ openshift.master.identity_providers }}" # This is an ugly hack to verify settings are in a file without modifying them with lineinfile. # The template file will stomp any other settings made. - block: - - name: check whether our docker-registry setting exists in the env file - command: "awk '/^OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000/' /etc/sysconfig/{{ openshift.common.service_type }}-master" - failed_when: false - changed_when: false - register: already_set + - name: check whether our docker-registry setting exists in the env file + command: "awk '/^OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000/' /etc/sysconfig/{{ openshift.common.service_type }}-master" + failed_when: false + changed_when: false + register: l_already_set - - set_fact: - openshift_push_via_dns: "{{ (openshift_use_dnsmasq | default(true) and openshift.common.version_gte_3_6) or (already_set.stdout is defined and already_set.stdout | match('OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000')) }}" + - set_fact: + openshift_push_via_dns: "{{ (openshift_use_dnsmasq | default(true) and openshift.common.version_gte_3_6) or (l_already_set.stdout is defined and l_already_set.stdout | match('OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000')) }}" - name: Set fact of all etcd host IPs openshift_facts: @@ -156,7 +185,9 @@ - name: Install Master system container include: system_container.yml - when: openshift.common.is_containerized | bool and openshift.common.is_master_system_container | bool + when: + - openshift.common.is_containerized | bool + - openshift.common.is_master_system_container | bool - name: Create session secrets file template: @@ -165,9 +196,11 @@ owner: root group: root mode: 0600 - when: openshift.master.session_auth_secrets is defined and openshift.master.session_encryption_secrets is defined + when: + - openshift.master.session_auth_secrets is defined + - openshift.master.session_encryption_secrets is defined notify: - - restart master api + - restart master api - set_fact: translated_identity_providers: "{{ openshift.master.identity_providers | translate_idps('v1', openshift.common.version, openshift.common.deployment_type) }}" @@ -182,53 +215,66 @@ group: root mode: 0600 notify: - - restart master api - - restart master controllers + - restart master api + - restart master controllers - include: set_loopback_context.yml - when: openshift.common.version_gte_3_2_or_1_2 + when: + - openshift.common.version_gte_3_2_or_1_2 - name: Start and enable master api on first master systemd: name: "{{ openshift.common.service_type }}-master-api" enabled: yes state: started - when: openshift.master.cluster_method == 'native' and inventory_hostname == openshift_master_hosts[0] - register: start_result - until: not start_result | failed + when: + - openshift.master.cluster_method == 'native' + - inventory_hostname == openshift_master_hosts[0] + register: l_start_result + until: not l_start_result | failed retries: 1 delay: 60 - name: Dump logs from master-api if it failed command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-master-api - when: start_result | failed + when: + - l_start_result | failed - set_fact: - master_api_service_status_changed: "{{ start_result | changed }}" - when: openshift.master.cluster_method == 'native' and inventory_hostname == openshift_master_hosts[0] + master_api_service_status_changed: "{{ l_start_result | changed }}" + when: + - openshift.master.cluster_method == 'native' + - inventory_hostname == openshift_master_hosts[0] - pause: seconds: 15 - when: openshift.master.ha | bool and openshift.master.cluster_method == 'native' + when: + - openshift.master.ha | bool + - openshift.master.cluster_method == 'native' - name: Start and enable master api all masters systemd: name: "{{ openshift.common.service_type }}-master-api" enabled: yes state: started - when: openshift.master.cluster_method == 'native' and inventory_hostname != openshift_master_hosts[0] - register: start_result - until: not start_result | failed + when: + - openshift.master.cluster_method == 'native' + - inventory_hostname != openshift_master_hosts[0] + register: l_start_result + until: not l_start_result | failed retries: 1 delay: 60 - name: Dump logs from master-api if it failed command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-master-api - when: start_result | failed + when: + - l_start_result | failed - set_fact: - master_api_service_status_changed: "{{ start_result | changed }}" - when: openshift.master.cluster_method == 'native' and inventory_hostname != openshift_master_hosts[0] + master_api_service_status_changed: "{{ l_start_result | changed }}" + when: + - openshift.master.cluster_method == 'native' + - inventory_hostname != openshift_master_hosts[0] # A separate wait is required here for native HA since notifies will # be resolved after all tasks in the role. @@ -243,67 +289,80 @@ --cacert {{ openshift.common.config_base }}/master/ca.crt {% endif %} {{ openshift.master.api_url }}/healthz/ready - register: api_available_output - until: api_available_output.stdout == 'ok' + register: l_api_available_output + until: l_api_available_output.stdout == 'ok' retries: 120 delay: 1 run_once: true changed_when: false - when: openshift.master.cluster_method == 'native' and master_api_service_status_changed | bool + when: + - openshift.master.cluster_method == 'native' + - master_api_service_status_changed | bool - name: Start and enable master controller on first master systemd: name: "{{ openshift.common.service_type }}-master-controllers" enabled: yes state: started - when: openshift.master.cluster_method == 'native' and inventory_hostname == openshift_master_hosts[0] - register: start_result - until: not start_result | failed + when: + - openshift.master.cluster_method == 'native' + - inventory_hostname == openshift_master_hosts[0] + register: l_start_result + until: not l_start_result | failed retries: 1 delay: 60 - name: Dump logs from master-controllers if it failed command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-master-controllers - when: start_result | failed + when: + - l_start_result | failed - name: Wait for master controller service to start on first master pause: seconds: 15 - when: openshift.master.cluster_method == 'native' + when: + - openshift.master.cluster_method == 'native' - name: Start and enable master controller on all masters systemd: name: "{{ openshift.common.service_type }}-master-controllers" enabled: yes state: started - when: openshift.master.cluster_method == 'native' and inventory_hostname != openshift_master_hosts[0] - register: start_result - until: not start_result | failed + when: + - openshift.master.cluster_method == 'native' + - inventory_hostname != openshift_master_hosts[0] + register: l_start_result + until: not l_start_result | failed retries: 1 delay: 60 - name: Dump logs from master-controllers if it failed command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-master-controllers - when: start_result | failed + when: + - l_start_result | failed - set_fact: - master_controllers_service_status_changed: "{{ start_result | changed }}" - when: openshift.master.cluster_method == 'native' + master_controllers_service_status_changed: "{{ l_start_result | changed }}" + when: + - openshift.master.cluster_method == 'native' - name: Install cluster packages package: name=pcs state=present - when: openshift.master.cluster_method == 'pacemaker' - and not openshift.common.is_containerized | bool - register: install_result + when: + - openshift.master.cluster_method == 'pacemaker' + - not openshift.common.is_containerized | bool + register: l_install_result - name: Start and enable cluster service systemd: name: pcsd enabled: yes state: started - when: openshift.master.cluster_method == 'pacemaker' - and not openshift.common.is_containerized | bool + when: + - openshift.master.cluster_method == 'pacemaker' + - not openshift.common.is_containerized | bool - name: Set the cluster user password shell: echo {{ openshift_master_cluster_password | quote }} | passwd --stdin hacluster - when: install_result | changed + when: + - l_install_result | changed diff --git a/roles/openshift_master/tasks/set_loopback_context.yml b/roles/openshift_master/tasks/set_loopback_context.yml index 9c3fb31dc..308b2f4cd 100644 --- a/roles/openshift_master/tasks/set_loopback_context.yml +++ b/roles/openshift_master/tasks/set_loopback_context.yml @@ -4,7 +4,7 @@ {{ openshift.common.client_binary }} config view --config={{ openshift_master_loopback_config }} changed_when: false - register: loopback_config + register: l_loopback_config - command: > {{ openshift.common.client_binary }} config set-cluster @@ -12,7 +12,8 @@ --embed-certs=true --server={{ openshift.master.loopback_api_url }} {{ openshift.master.loopback_cluster_name }} --config={{ openshift_master_loopback_config }} - when: loopback_context_string not in loopback_config.stdout + when: + - loopback_context_string not in l_loopback_config.stdout register: set_loopback_cluster - command: > @@ -21,11 +22,13 @@ --namespace=default --user={{ openshift.master.loopback_user }} {{ openshift.master.loopback_context_name }} --config={{ openshift_master_loopback_config }} - when: set_loopback_cluster | changed - register: set_loopback_context + when: + - set_loopback_cluster | changed + register: l_set_loopback_context - command: > {{ openshift.common.client_binary }} config use-context {{ openshift.master.loopback_context_name }} --config={{ openshift_master_loopback_config }} - when: set_loopback_context | changed + when: + - l_set_loopback_context | changed register: set_current_context diff --git a/roles/openshift_master/tasks/system_container.yml b/roles/openshift_master/tasks/system_container.yml index 8d343336f..91332acfb 100644 --- a/roles/openshift_master/tasks/system_container.yml +++ b/roles/openshift_master/tasks/system_container.yml @@ -2,13 +2,12 @@ - name: Pre-pull master system container image command: > atomic pull --storage=ostree {{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.master.master_system_image }}:{{ openshift_image_tag }} - register: pull_result - changed_when: "'Pulling layer' in pull_result.stdout" + register: l_pull_result + changed_when: "'Pulling layer' in l_pull_result.stdout" - name: Check Master system container package command: > atomic containers list --no-trunc -a -f container={{ openshift.common.service_type }}-master - register: result # HA - name: Install or Update HA api master system container @@ -17,7 +16,7 @@ image: "{{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}" state: latest values: - - COMMAND=api + - COMMAND=api - name: Install or Update HA controller master system container oc_atomic_container: @@ -25,4 +24,4 @@ image: "{{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}" state: latest values: - - COMMAND=controllers + - COMMAND=controllers diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml index 723bdb0c4..72c231e52 100644 --- a/roles/openshift_master/tasks/systemd_units.yml +++ b/roles/openshift_master/tasks/systemd_units.yml @@ -12,15 +12,18 @@ set_fact: containerized_svc_dir: "/etc/systemd/system" ha_svc_template_path: "docker-cluster" - when: openshift.common.is_containerized | bool + when: + - openshift.common.is_containerized | bool # This is the image used for both HA and non-HA clusters: - name: Pre-pull master image command: > docker pull {{ openshift.master.master_image }}:{{ openshift_image_tag }} - register: pull_result - changed_when: "'Downloaded newer image' in pull_result.stdout" - when: openshift.common.is_containerized | bool and not openshift.common.is_master_system_container | bool + register: l_pull_result + changed_when: "'Downloaded newer image' in l_pull_result.stdout" + when: + - openshift.common.is_containerized | bool + - not openshift.common.is_master_system_container | bool - name: Create the ha systemd unit files template: @@ -32,23 +35,26 @@ with_items: - api - controllers - register: create_ha_unit_files + register: l_create_ha_unit_files - command: systemctl daemon-reload - when: create_ha_unit_files | changed + when: + - l_create_ha_unit_files | changed # end workaround for missing systemd unit files - name: Preserve Master API Proxy Config options command: grep PROXY /etc/sysconfig/{{ openshift.common.service_type }}-master-api - register: master_api_proxy - when: openshift.master.cluster_method == "native" + register: l_master_api_proxy + when: + - openshift.master.cluster_method == "native" failed_when: false changed_when: false - name: Preserve Master API AWS options command: grep AWS_ /etc/sysconfig/{{ openshift.common.service_type }}-master-api register: master_api_aws - when: openshift.master.cluster_method == "native" + when: + - openshift.master.cluster_method == "native" failed_when: false changed_when: false @@ -57,22 +63,27 @@ src: "{{ ha_svc_template_path }}/atomic-openshift-master-api.j2" dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api backup: true - when: openshift.master.cluster_method == "native" + when: + - openshift.master.cluster_method == "native" notify: - restart master api - name: Restore Master API Proxy Config Options - when: openshift.master.cluster_method == "native" - and master_api_proxy.rc == 0 and 'http_proxy' not in openshift.common and 'https_proxy' not in openshift.common + when: + - openshift.master.cluster_method == "native" + - l_master_api_proxy.rc == 0 + - "'http_proxy' not in openshift.common" + - "'https_proxy' not in openshift.common" lineinfile: dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api line: "{{ item }}" - with_items: "{{ master_api_proxy.stdout_lines | default([]) }}" + with_items: "{{ l_master_api_proxy.stdout_lines | default([]) }}" - name: Restore Master API AWS Options - when: openshift.master.cluster_method == "native" - and master_api_aws.rc == 0 and - not (openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined) + when: + - openshift.master.cluster_method == "native" + - master_api_aws.rc == 0 + - not (openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined) lineinfile: dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api line: "{{ item }}" @@ -82,14 +93,16 @@ - name: Preserve Master Controllers Proxy Config options command: grep PROXY /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers register: master_controllers_proxy - when: openshift.master.cluster_method == "native" + when: + - openshift.master.cluster_method == "native" failed_when: false changed_when: false - name: Preserve Master Controllers AWS options command: grep AWS_ /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers register: master_controllers_aws - when: openshift.master.cluster_method == "native" + when: + - openshift.master.cluster_method == "native" failed_when: false changed_when: false @@ -98,7 +111,8 @@ src: "{{ ha_svc_template_path }}/atomic-openshift-master-controllers.j2" dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers backup: true - when: openshift.master.cluster_method == "native" + when: + - openshift.master.cluster_method == "native" notify: - restart master controllers @@ -107,14 +121,18 @@ dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers line: "{{ item }}" with_items: "{{ master_controllers_proxy.stdout_lines | default([]) }}" - when: openshift.master.cluster_method == "native" - and master_controllers_proxy.rc == 0 and 'http_proxy' not in openshift.common and 'https_proxy' not in openshift.common + when: + - openshift.master.cluster_method == "native" + - master_controllers_proxy.rc == 0 + - "'http_proxy' not in openshift.common" + - "'https_proxy' not in openshift.common" - name: Restore Master Controllers AWS Options lineinfile: dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers line: "{{ item }}" with_items: "{{ master_controllers_aws.stdout_lines | default([]) }}" - when: openshift.master.cluster_method == "native" - and master_controllers_aws.rc == 0 and - not (openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined) + when: + - openshift.master.cluster_method == "native" + - master_controllers_aws.rc == 0 + - not (openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined) diff --git a/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2 b/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2 index 779b4d2f5..ef3ba2880 100644 --- a/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2 +++ b/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2 @@ -4,4 +4,4 @@ no-negcache max-cache-ttl=1 enable-dbus bind-interfaces -listen-address={{ ansible_default_ipv4.address }} +listen-address={{ openshift.node.dns_ip }} diff --git a/roles/openshift_repos/meta/main.yml b/roles/openshift_repos/meta/main.yml deleted file mode 100644 index 1b043863b..000000000 --- a/roles/openshift_repos/meta/main.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -galaxy_info: - author: TODO - description: OpenShift Repositories - company: Red Hat, Inc. - license: Apache License, Version 2.0 - min_ansible_version: 1.7 - platforms: - - name: EL - versions: - - 7 - categories: - - cloud -dependencies: -- role: openshift_sanitize_inventory diff --git a/roles/os_firewall/meta/main.yml b/roles/os_firewall/meta/main.yml deleted file mode 100644 index dca5fc5ff..000000000 --- a/roles/os_firewall/meta/main.yml +++ /dev/null @@ -1,16 +0,0 @@ ---- -galaxy_info: - author: Jason DeTiberus - description: os_firewall - company: Red Hat, Inc. - license: Apache License, Version 2.0 - min_ansible_version: 2.2 - platforms: - - name: EL - versions: - - 7 - categories: - - system -allow_duplicates: yes -dependencies: - - role: openshift_facts diff --git a/roles/os_firewall/tasks/firewall/firewalld.yml b/roles/os_firewall/tasks/firewalld.yml index 2cc7af478..54430f402 100644 --- a/roles/os_firewall/tasks/firewall/firewalld.yml +++ b/roles/os_firewall/tasks/firewalld.yml @@ -1,4 +1,9 @@ --- +- name: Fail - Firewalld is not supported on Atomic Host + fail: + msg: "Firewalld is not supported on Atomic Host" + when: r_os_firewall_is_atomic | bool + - name: Install firewalld packages package: name: firewalld @@ -31,7 +36,8 @@ register: result - name: need to pause here, otherwise the firewalld service starting can sometimes cause ssh to fail - pause: seconds=10 + pause: + seconds: 10 when: result | changed - name: Restart polkitd diff --git a/roles/os_firewall/tasks/firewall/iptables.yml b/roles/os_firewall/tasks/iptables.yml index 7e1fa2c02..0af5abf38 100644 --- a/roles/os_firewall/tasks/firewall/iptables.yml +++ b/roles/os_firewall/tasks/iptables.yml @@ -15,11 +15,13 @@ when: task_result | changed - name: Install iptables packages - package: name={{ item }} state=present + package: + name: "{{ item }}" + state: present with_items: - iptables - iptables-services - when: not openshift.common.is_atomic | bool + when: not r_os_firewall_is_atomic | bool - name: Start and enable iptables service systemd: @@ -34,5 +36,6 @@ with_items: "{{ ansible_play_hosts }}" - name: need to pause here, otherwise the iptables service starting can sometimes cause ssh to fail - pause: seconds=10 + pause: + seconds: 10 when: result | changed diff --git a/roles/os_firewall/tasks/main.yml b/roles/os_firewall/tasks/main.yml index 20efe5b0d..c477d386c 100644 --- a/roles/os_firewall/tasks/main.yml +++ b/roles/os_firewall/tasks/main.yml @@ -1,12 +1,19 @@ --- -- name: Assert - Do not use firewalld on Atomic Host - assert: - that: not os_firewall_use_firewalld | bool - msg: "Firewalld is not supported on Atomic Host" - when: openshift.common.is_atomic | bool +- name: Detecting Atomic Host Operating System + stat: + path: /run/ostree-booted + register: r_os_firewall_ostree_booted -- include: firewall/firewalld.yml - when: os_firewall_enabled | bool and os_firewall_use_firewalld | bool +- name: Set fact r_os_firewall_is_atomic + set_fact: + r_os_firewall_is_atomic: "{{ r_os_firewall_ostree_booted.stat.exists }}" -- include: firewall/iptables.yml - when: os_firewall_enabled | bool and not os_firewall_use_firewalld | bool +- include: firewalld.yml + when: + - os_firewall_enabled | bool + - os_firewall_use_firewalld | bool + +- include: iptables.yml + when: + - os_firewall_enabled | bool + - not os_firewall_use_firewalld | bool diff --git a/roles/rhel_subscribe/tasks/enterprise.yml b/roles/rhel_subscribe/tasks/enterprise.yml index ea0c42150..39d59db70 100644 --- a/roles/rhel_subscribe/tasks/enterprise.yml +++ b/roles/rhel_subscribe/tasks/enterprise.yml @@ -7,7 +7,7 @@ when: deployment_type == 'enterprise' - set_fact: - default_ose_version: '3.5' + default_ose_version: '3.6' when: deployment_type in ['atomic-enterprise', 'openshift-enterprise'] - set_fact: @@ -16,7 +16,7 @@ - fail: msg: "{{ ose_version }} is not a valid version for {{ deployment_type }} deployment type" when: ( deployment_type == 'enterprise' and ose_version not in ['3.0'] ) or - ( deployment_type in ['atomic-enterprise', 'openshift-enterprise'] and ose_version not in ['3.1', '3.2', '3.3', '3.4', '3.5'] ) + ( deployment_type in ['atomic-enterprise', 'openshift-enterprise'] and ose_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6'] ) - name: Enable RHEL repositories command: subscription-manager repos \ |