diff options
21 files changed, 169 insertions, 143 deletions
@@ -37,12 +37,14 @@ not practical to start over at 1.0. Requirements: - Ansible >= 2.1.0 (>= 2.2 is preferred for performance reasons) - Jinja >= 2.7 + - pyOpenSSL + - python-lxml *** Fedora: ``` - dnf install -y ansible pyOpenSSL python-cryptography + dnf install -y ansible pyOpenSSL python-cryptography python-lxml ``` 2. Setup for a specific cloud: diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example index 5a95ecf94..324e2477f 100644 --- a/inventory/byo/hosts.origin.example +++ b/inventory/byo/hosts.origin.example @@ -619,6 +619,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # masterConfig.volumeConfig.dynamicProvisioningEnabled, configurable as of 1.2/3.2, enabled by default #openshift_master_dynamic_provisioning_enabled=False +# Admission plugin config +#openshift_master_admission_plugin_config={"ProjectRequestLimit":{"configuration":{"apiVersion":"v1","kind":"ProjectRequestLimitConfig","limits":[{"selector":{"admin":"true"}},{"maxProjects":"1"}]}},"PodNodeConstraints":{"configuration":{"apiVersion":"v1","kind":"PodNodeConstraintsConfig"}}} + # Configure usage of openshift_clock role. #openshift_clock_enabled=true diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example index be919c105..4a2925599 100644 --- a/inventory/byo/hosts.ose.example +++ b/inventory/byo/hosts.ose.example @@ -619,6 +619,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # masterConfig.volumeConfig.dynamicProvisioningEnabled, configurable as of 1.2/3.2, enabled by default #openshift_master_dynamic_provisioning_enabled=False +# Admission plugin config +#openshift_master_admission_plugin_config={"ProjectRequestLimit":{"configuration":{"apiVersion":"v1","kind":"ProjectRequestLimitConfig","limits":[{"selector":{"admin":"true"}},{"maxProjects":"1"}]}},"PodNodeConstraints":{"configuration":{"apiVersion":"v1","kind":"PodNodeConstraintsConfig"}}} + # Configure usage of openshift_clock role. #openshift_clock_enabled=true diff --git a/playbooks/byo/openshift-node/network_manager.yml b/playbooks/byo/openshift-node/network_manager.yml new file mode 100644 index 000000000..8c810096f --- /dev/null +++ b/playbooks/byo/openshift-node/network_manager.yml @@ -0,0 +1,36 @@ +--- +- hosts: localhost + connection: local + become: no + gather_facts: no + tasks: + - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml + - add_host: + name: "{{ item }}" + groups: l_oo_all_hosts + with_items: "{{ g_all_hosts }}" + +- hosts: l_oo_all_hosts + become: yes + tasks: + - name: install NetworkManager + package: + name: 'NetworkManager' + state: present + + - name: configure NetworkManager + lineinfile: + dest: "/etc/sysconfig/network-scripts/ifcfg-{{ ansible_default_ipv4['interface'] }}" + regexp: '^{{ item }}=' + line: '{{ item }}=yes' + state: present + create: yes + with_items: + - 'USE_PEERDNS' + - 'NM_CONTROLLED' + + - name: enable and start NetworkManager + service: + name: 'NetworkManager' + state: started + enabled: yes
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml index 1f314c854..53d670196 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml @@ -17,7 +17,7 @@ # we merge upgrade functionality into the base roles and a normal config.yml playbook run. - name: Determine if node is currently scheduleable command: > - {{ openshift.common.client_binary }} get node {{ openshift.node.nodename | lower }} -o json + {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} get node {{ openshift.node.nodename | lower }} -o json register: node_output delegate_to: "{{ groups.oo_first_master.0 }}" changed_when: false @@ -29,7 +29,7 @@ - name: Mark unschedulable if host is a node command: > - {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=false + {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=false delegate_to: "{{ groups.oo_first_master.0 }}" when: inventory_hostname in groups.oo_nodes_to_upgrade # NOTE: There is a transient "object has been modified" error here, allow a couple @@ -41,7 +41,7 @@ - name: Evacuate Node for Kubelet upgrade command: > - {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --evacuate --force + {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --evacuate --force delegate_to: "{{ groups.oo_first_master.0 }}" when: inventory_hostname in groups.oo_nodes_to_upgrade tasks: @@ -64,7 +64,7 @@ - name: Set node schedulability command: > - {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=true + {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=true delegate_to: "{{ groups.oo_first_master.0 }}" when: inventory_hostname in groups.oo_nodes_to_upgrade and was_schedulable | bool register: node_sched diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml index 684eea343..8c0bd272c 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml @@ -48,3 +48,18 @@ dest: "{{ openshift.common.config_base}}/master/master-config.yaml" yaml_key: 'controllerConfig.servicesServingCert.signer.keyFile' yaml_value: service-signer.key + +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'admissionConfig.pluginConfig' + yaml_value: "{{ openshift.master.admission_plugin_config }}" + +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'admissionConfig.pluginOrderOverride' + yaml_value: + +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'kubernetesMasterConfig.admissionConfig' + yaml_value: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml new file mode 100644 index 000000000..32de9d94a --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml @@ -0,0 +1,15 @@ +--- +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'admissionConfig.pluginConfig' + yaml_value: "{{ openshift.master.admission_plugin_config }}" + +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'admissionConfig.pluginOrderOverride' + yaml_value: + +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'kubernetesMasterConfig.admissionConfig' + yaml_value: diff --git a/roles/nuage_master/tasks/serviceaccount.yml b/roles/nuage_master/tasks/serviceaccount.yml index 2b3ae0454..41143772e 100644 --- a/roles/nuage_master/tasks/serviceaccount.yml +++ b/roles/nuage_master/tasks/serviceaccount.yml @@ -29,7 +29,7 @@ --config={{nuage_tmp_conf}} with_items: "{{nuage_tasks}}" register: osnuage_perm_task - failed_when: "'already exists' not in osnuage_perm_task.stderr and osnuage_perm_task.rc != 0" + failed_when: "'the object has been modified' not in osnuage_perm_task.stderr and osnuage_perm_task.rc != 0" changed_when: osnuage_perm_task.rc == 0 - name: Generate the node client config diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 0d85dba4b..ad4b1e47b 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -107,14 +107,6 @@ def migrate_node_facts(facts): facts['node'][param] = facts[role].pop(param) return facts -def migrate_local_facts(facts): - """ Apply migrations of local facts """ - migrated_facts = copy.deepcopy(facts) - migrated_facts = migrate_docker_facts(migrated_facts) - migrated_facts = migrate_common_facts(migrated_facts) - migrated_facts = migrate_node_facts(migrated_facts) - migrated_facts = migrate_hosted_facts(migrated_facts) - return migrated_facts def migrate_hosted_facts(facts): """ Apply migrations for master facts """ @@ -133,6 +125,30 @@ def migrate_hosted_facts(facts): facts['hosted']['registry']['selector'] = facts['master'].pop('registry_selector') return facts +def migrate_admission_plugin_facts(facts): + if 'master' in facts: + if 'kube_admission_plugin_config' in facts['master']: + if 'admission_plugin_config' not in facts['master']: + facts['master']['admission_plugin_config'] = dict() + # Merge existing kube_admission_plugin_config with admission_plugin_config. + facts['master']['admission_plugin_config'] = merge_facts(facts['master']['admission_plugin_config'], + facts['master']['kube_admission_plugin_config'], + additive_facts_to_overwrite=[], + protected_facts_to_overwrite=[]) + # Remove kube_admission_plugin_config fact + facts['master'].pop('kube_admission_plugin_config', None) + return facts + +def migrate_local_facts(facts): + """ Apply migrations of local facts """ + migrated_facts = copy.deepcopy(facts) + migrated_facts = migrate_docker_facts(migrated_facts) + migrated_facts = migrate_common_facts(migrated_facts) + migrated_facts = migrate_node_facts(migrated_facts) + migrated_facts = migrate_hosted_facts(migrated_facts) + migrated_facts = migrate_admission_plugin_facts(migrated_facts) + return migrated_facts + def first_ip(network): """ Return the first IPv4 address in network @@ -1572,14 +1588,14 @@ def set_proxy_facts(facts): builddefaults['git_http_proxy'] = builddefaults['http_proxy'] if 'git_https_proxy' not in builddefaults and 'https_proxy' in builddefaults: builddefaults['git_https_proxy'] = builddefaults['https_proxy'] - # If we're actually defining a proxy config then create kube_admission_plugin_config + # If we're actually defining a proxy config then create admission_plugin_config # if it doesn't exist, then merge builddefaults[config] structure - # into kube_admission_plugin_config - if 'kube_admission_plugin_config' not in facts['master']: - facts['master']['kube_admission_plugin_config'] = dict() + # into admission_plugin_config + if 'admission_plugin_config' not in facts['master']: + facts['master']['admission_plugin_config'] = dict() if 'config' in builddefaults and ('http_proxy' in builddefaults or \ 'https_proxy' in builddefaults): - facts['master']['kube_admission_plugin_config'].update(builddefaults['config']) + facts['master']['admission_plugin_config'].update(builddefaults['config']) facts['builddefaults'] = builddefaults return facts diff --git a/roles/openshift_manage_node/tasks/main.yml b/roles/openshift_manage_node/tasks/main.yml index 88cdd2d89..c06758833 100644 --- a/roles/openshift_manage_node/tasks/main.yml +++ b/roles/openshift_manage_node/tasks/main.yml @@ -47,7 +47,7 @@ - name: Wait for Node Registration command: > - {{ openshift.common.client_binary }} get node {{ openshift.node.nodename }} + {{ hostvars[openshift_master_host].openshift.common.client_binary }} get node {{ openshift.node.nodename }} --config={{ openshift_manage_node_kubeconfig }} -n default register: omd_get_node @@ -60,7 +60,7 @@ - name: Set node schedulability command: > - {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --schedulable={{ 'true' if openshift.node.schedulable | bool else 'false' }} + {{ hostvars[openshift_master_host].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --schedulable={{ 'true' if openshift.node.schedulable | bool else 'false' }} --config={{ openshift_manage_node_kubeconfig }} -n default when: "'nodename' in openshift.node" @@ -68,7 +68,7 @@ - name: Label nodes command: > - {{ openshift.common.client_binary }} label --overwrite node {{ openshift.node.nodename }} {{ openshift.node.labels | oo_combine_dict }} + {{ hostvars[openshift_master_host].openshift.common.client_binary }} label --overwrite node {{ openshift.node.nodename }} {{ openshift.node.labels | oo_combine_dict }} --config={{ openshift_manage_node_kubeconfig }} -n default when: "'nodename' in openshift.node and 'labels' in openshift.node and openshift.node.labels != {}" diff --git a/roles/openshift_manageiq/tasks/main.yaml b/roles/openshift_manageiq/tasks/main.yaml index bdaf64b3f..a7214482f 100644 --- a/roles/openshift_manageiq/tasks/main.yaml +++ b/roles/openshift_manageiq/tasks/main.yaml @@ -50,6 +50,16 @@ failed_when: "'already exists' not in osmiq_create_cluster_role.stderr and osmiq_create_cluster_role.rc != 0" changed_when: osmiq_create_cluster_role.rc == 0 +- name: Create Hawkular Metrics Admin Cluster Role + shell: > + echo {{ manageiq_metrics_admin_clusterrole | to_json | quote }} | + {{ openshift.common.client_binary }} + --config={{manage_iq_tmp_conf}} + create -f - + register: oshawkular_create_cluster_role + failed_when: "'already exists' not in oshawkular_create_cluster_role.stderr and oshawkular_create_cluster_role.rc != 0" + changed_when: oshawkular_create_cluster_role.rc == 0 + - name: Configure role/user permissions command: > {{ openshift.common.client_binary }} adm {{item}} diff --git a/roles/openshift_manageiq/vars/main.yml b/roles/openshift_manageiq/vars/main.yml index 6a0c5b41b..37d4679ef 100644 --- a/roles/openshift_manageiq/vars/main.yml +++ b/roles/openshift_manageiq/vars/main.yml @@ -9,6 +9,20 @@ manageiq_cluster_role: verbs: - '*' +manageiq_metrics_admin_clusterrole: + apiVersion: v1 + kind: ClusterRole + metadata: + name: hawkular-metrics-admin + rules: + - apiGroups: + - "" + resources: + - hawkular-metrics + - hawkular-alerts + verbs: + - '*' + manageiq_service_account: apiVersion: v1 kind: ServiceAccount @@ -31,6 +45,7 @@ manage_iq_tasks: - policy add-cluster-role-to-user system:image-puller system:serviceaccount:management-infra:inspector-admin - policy add-scc-to-user privileged system:serviceaccount:management-infra:inspector-admin - policy add-cluster-role-to-user self-provisioner system:serviceaccount:management-infra:management-admin + - policy add-cluster-role-to-user hawkular-metrics-admin system:serviceaccount:management-infra:management-admin manage_iq_openshift_3_2_tasks: - policy add-cluster-role-to-user system:image-auditor system:serviceaccount:management-infra:management-admin diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2 index a52ae578c..dc9226a5a 100644 --- a/roles/openshift_master/templates/master.yaml.v1.j2 +++ b/roles/openshift_master/templates/master.yaml.v1.j2 @@ -1,7 +1,4 @@ admissionConfig: -{% if 'admission_plugin_order' in openshift.master %} - pluginOrderOverride:{{ openshift.master.admission_plugin_order | to_padded_yaml(level=2) }} -{% endif %} {% if 'admission_plugin_config' in openshift.master %} pluginConfig:{{ openshift.master.admission_plugin_config | to_padded_yaml(level=2) }} {% endif %} @@ -116,13 +113,6 @@ kubernetesMasterConfig: - v1beta3 - v1 {% endif %} - admissionConfig: -{% if 'kube_admission_plugin_order' in openshift.master %} - pluginOrderOverride:{{ openshift.master.kube_admission_plugin_order | to_padded_yaml(level=3) }} -{% endif %} -{% if 'kube_admission_plugin_config' in openshift.master %} - pluginConfig:{{ openshift.master.kube_admission_plugin_config | to_padded_yaml(level=3) }} -{% endif %} apiServerArguments: {{ openshift.master.api_server_args | default(None) | to_padded_yaml( level=2 ) }} controllerArguments: {{ openshift.master.controller_args | default(None) | to_padded_yaml( level=2 ) }} masterCount: {{ openshift.master.master_count if openshift.master.cluster_method | default(None) == 'native' else 1 }} diff --git a/roles/openshift_master_facts/tasks/main.yml b/roles/openshift_master_facts/tasks/main.yml index 62ac1aef5..1f27a2c1d 100644 --- a/roles/openshift_master_facts/tasks/main.yml +++ b/roles/openshift_master_facts/tasks/main.yml @@ -66,10 +66,8 @@ master_image: "{{ osm_image | default(None) }}" scheduler_predicates: "{{ openshift_master_scheduler_predicates | default(None) }}" scheduler_priorities: "{{ openshift_master_scheduler_priorities | default(None) }}" - admission_plugin_order: "{{openshift_master_admission_plugin_order | default(None) }}" admission_plugin_config: "{{openshift_master_admission_plugin_config | default(None) }}" - kube_admission_plugin_order: "{{openshift_master_kube_admission_plugin_order | default(None) }}" - kube_admission_plugin_config: "{{openshift_master_kube_admission_plugin_config | default(None) }}" + kube_admission_plugin_config: "{{openshift_master_kube_admission_plugin_config | default(None) }}" # deprecated, merged with admission_plugin_config oauth_template: "{{ openshift_master_oauth_template | default(None) }}" # deprecated in origin 1.2 / OSE 3.2 oauth_templates: "{{ openshift_master_oauth_templates | default(None) }}" oauth_always_show_provider_selection: "{{ openshift_master_oauth_always_show_provider_selection | default(None) }}" diff --git a/roles/openshift_node_certificates/tasks/main.yml b/roles/openshift_node_certificates/tasks/main.yml index 69bcd3668..35f84c2cf 100644 --- a/roles/openshift_node_certificates/tasks/main.yml +++ b/roles/openshift_node_certificates/tasks/main.yml @@ -44,7 +44,7 @@ - name: Generate the node client config command: > - {{ openshift.common.client_binary }} adm create-api-client-config + {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm create-api-client-config {% for named_ca_certificate in hostvars[openshift_ca_host].openshift.master.named_certificates | default([]) | oo_collect('cafile') %} --certificate-authority {{ named_ca_certificate }} {% endfor %} @@ -63,7 +63,7 @@ - name: Generate the node server certificate command: > - {{ openshift.common.client_binary }} adm ca create-server-cert + {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm ca create-server-cert --cert={{ openshift_node_generated_config_dir }}/server.crt --key={{ openshift_generated_configs_dir }}/node-{{ openshift.common.hostname }}/server.key --overwrite=true diff --git a/roles/openshift_node_dnsmasq/tasks/main.yml b/roles/openshift_node_dnsmasq/tasks/main.yml index 51820210d..0167b02b1 100644 --- a/roles/openshift_node_dnsmasq/tasks/main.yml +++ b/roles/openshift_node_dnsmasq/tasks/main.yml @@ -4,6 +4,7 @@ systemctl show NetworkManager register: nm_show changed_when: false + ignore_errors: True - name: Set fact using_network_manager set_fact: diff --git a/roles/os_firewall/README.md b/roles/os_firewall/README.md index c6c70b81d..bb7fc2384 100644 --- a/roles/os_firewall/README.md +++ b/roles/os_firewall/README.md @@ -31,7 +31,6 @@ Use iptables and open tcp ports 80 and 443: --- - hosts: servers vars: - os_firewall_use_firewalld: false os_firewall_allow: - service: httpd port: 80/tcp @@ -46,6 +45,7 @@ Use firewalld and open tcp port 443 and close previously open tcp port 80: --- - hosts: servers vars: + os_firewall_use_firewalld: true os_firewall_allow: - service: https port: 443/tcp diff --git a/roles/os_firewall/library/os_firewall_manage_iptables.py b/roles/os_firewall/library/os_firewall_manage_iptables.py index bd638b69b..37bb16f35 100755 --- a/roles/os_firewall/library/os_firewall_manage_iptables.py +++ b/roles/os_firewall/library/os_firewall_manage_iptables.py @@ -139,7 +139,7 @@ class IpTablesManager(object): # pylint: disable=too-many-instance-attributes output = check_output(cmd, stderr=subprocess.STDOUT) # break the input rules into rows and columns - input_rules = [s.split() for s in output.split('\n')] + input_rules = [s.split() for s in to_native(output).split('\n')] # Find the last numbered rule last_rule_num = None @@ -269,5 +269,6 @@ def main(): # pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils._text import to_native if __name__ == '__main__': main() diff --git a/roles/os_firewall/meta/main.yml b/roles/os_firewall/meta/main.yml index 6df7c9f2b..4cfc72011 100644 --- a/roles/os_firewall/meta/main.yml +++ b/roles/os_firewall/meta/main.yml @@ -6,11 +6,11 @@ galaxy_info: license: Apache License, Version 2.0 min_ansible_version: 1.7 platforms: - - name: EL - versions: - - 7 + - name: EL + versions: + - 7 categories: - - system + - system allow_duplicates: yes dependencies: -- { role: openshift_facts } + - role: openshift_facts diff --git a/roles/os_firewall/tasks/firewall/firewalld.yml b/roles/os_firewall/tasks/firewall/firewalld.yml index a5b733cb7..1101870be 100644 --- a/roles/os_firewall/tasks/firewall/firewalld.yml +++ b/roles/os_firewall/tasks/firewall/firewalld.yml @@ -2,87 +2,44 @@ - name: Install firewalld packages package: name=firewalld state=present when: not openshift.common.is_containerized | bool - register: install_result - -- name: Check if iptables-services is installed - command: rpm -q iptables-services - register: pkg_check - failed_when: pkg_check.rc > 1 - changed_when: no - name: Ensure iptables services are not enabled - service: + systemd: name: "{{ item }}" state: stopped enabled: no + masked: yes with_items: - - iptables - - ip6tables - when: pkg_check.rc == 0 - -- name: Reload systemd units - command: systemctl daemon-reload - when: install_result | changed - -- name: Determine if firewalld service masked - command: > - systemctl is-enabled firewalld - register: os_firewall_firewalld_masked_output - changed_when: false - failed_when: false - -- name: Unmask firewalld service - command: > - systemctl unmask firewalld - when: os_firewall_firewalld_masked_output.stdout == "masked" + - iptables + - ip6tables + register: task_result + failed_when: "task_result|failed and 'could not' not in task_result.msg|lower" - name: Start and enable firewalld service - service: + systemd: name: firewalld state: started enabled: yes + masked: no + daemon_reload: yes register: result - name: need to pause here, otherwise the firewalld service starting can sometimes cause ssh to fail pause: seconds=10 when: result | changed -- name: Mask iptables services - command: systemctl mask "{{ item }}" - register: result - changed_when: "'iptables' in result.stdout" - with_items: - - iptables - - ip6tables - when: pkg_check.rc == 0 - ignore_errors: yes - -# TODO: Ansible 1.9 will eliminate the need for separate firewalld tasks for -# enabling rules and making them permanent with the immediate flag - name: Add firewalld allow rules firewalld: port: "{{ item.port }}" - permanent: false - state: enabled - with_items: "{{ os_firewall_allow }}" - -- name: Persist firewalld allow rules - firewalld: - port: "{{ item.port }}" permanent: true + immediate: true state: enabled with_items: "{{ os_firewall_allow }}" - name: Remove firewalld allow rules firewalld: port: "{{ item.port }}" - permanent: false - state: disabled - with_items: "{{ os_firewall_deny }}" - -- name: Persist removal of firewalld allow rules - firewalld: - port: "{{ item.port }}" permanent: true + immediate: true state: disabled with_items: "{{ os_firewall_deny }}" diff --git a/roles/os_firewall/tasks/firewall/iptables.yml b/roles/os_firewall/tasks/firewall/iptables.yml index 366ede8fd..930b32cf2 100644 --- a/roles/os_firewall/tasks/firewall/iptables.yml +++ b/roles/os_firewall/tasks/firewall/iptables.yml @@ -1,64 +1,28 @@ --- -- name: Check if firewalld is installed - command: rpm -q firewalld - args: - # Disables the following warning: - # Consider using yum, dnf or zypper module rather than running rpm - warn: no - register: pkg_check - failed_when: pkg_check.rc > 1 - changed_when: no - name: Ensure firewalld service is not enabled - service: + systemd: name: firewalld state: stopped enabled: no - when: pkg_check.rc == 0 - -# TODO: submit PR upstream to add mask/unmask to service module -- name: Mask firewalld service - command: systemctl mask firewalld - register: result - changed_when: "'firewalld' in result.stdout" - when: pkg_check.rc == 0 - ignore_errors: yes + masked: yes + register: task_result + failed_when: "task_result|failed and 'could not' not in task_result.msg|lower" - name: Install iptables packages package: name={{ item }} state=present with_items: - - iptables - - iptables-services - register: install_result + - iptables + - iptables-services when: not openshift.common.is_atomic | bool -- name: Reload systemd units - command: systemctl daemon-reload - when: install_result | changed - -- name: Determine if iptables service masked - command: > - systemctl is-enabled {{ item }} - with_items: - - iptables - - ip6tables - register: os_firewall_iptables_masked_output - changed_when: false - failed_when: false - -- name: Unmask iptables service - command: > - systemctl unmask {{ item }} - with_items: - - iptables - - ip6tables - when: "'masked' in os_firewall_iptables_masked_output.results | map(attribute='stdout')" - - name: Start and enable iptables service - service: + systemd: name: iptables state: started enabled: yes + masked: no + daemon_reload: yes register: result - name: need to pause here, otherwise the iptables service starting can sometimes cause ssh to fail |