diff options
Diffstat (limited to 'roles')
19 files changed, 518 insertions, 167 deletions
diff --git a/roles/etcd/templates/etcd.docker.service b/roles/etcd/templates/etcd.docker.service index ae059b549..e4d1b57e6 100644 --- a/roles/etcd/templates/etcd.docker.service +++ b/roles/etcd/templates/etcd.docker.service @@ -7,7 +7,7 @@ PartOf=docker.service [Service] EnvironmentFile=/etc/etcd/etcd.conf ExecStartPre=-/usr/bin/docker rm -f {{ etcd_service }} -ExecStart=/usr/bin/docker run --name {{ etcd_service }} --rm -v /var/lib/etcd:/var/lib/etcd:z -v /etc/etcd:/etc/etcd:ro --env-file=/etc/etcd/etcd.conf --net=host --entrypoint=/usr/bin/etcd {{ openshift.etcd.etcd_image }} +ExecStart=/usr/bin/docker run --name {{ etcd_service }} --rm -v /var/lib/etcd:/var/lib/etcd:z -v /etc/etcd:/etc/etcd:ro --env-file=/etc/etcd/etcd.conf --env-file=/etc/sysconfig/etcd --net=host --entrypoint=/usr/bin/etcd {{ openshift.etcd.etcd_image }} ExecStop=/usr/bin/docker stop {{ etcd_service }} SyslogIdentifier=etcd_container Restart=always diff --git a/roles/lib_openshift/library/oc_adm_policy_user.py b/roles/lib_openshift/library/oc_adm_policy_user.py index 5f7e4b8fa..09b0561a7 100644 --- a/roles/lib_openshift/library/oc_adm_policy_user.py +++ b/roles/lib_openshift/library/oc_adm_policy_user.py @@ -1960,7 +1960,7 @@ class PolicyUser(OpenShiftCLI): @property def policybindings(self): if self._policy_bindings is None: - results = self._get('clusterpolicybindings', None) + results = self._get('policybindings', None) if results['returncode'] != 0: raise OpenShiftCLIError('Could not retrieve policybindings') self._policy_bindings = results['results'][0]['items'][0] diff --git a/roles/lib_openshift/library/oc_clusterrole.py b/roles/lib_openshift/library/oc_clusterrole.py index a34ce351e..e2cbcfb81 100644 --- a/roles/lib_openshift/library/oc_clusterrole.py +++ b/roles/lib_openshift/library/oc_clusterrole.py @@ -1531,10 +1531,10 @@ class Rule(object): results = [] for rule in inc_rules: - results.append(Rule(rule['apiGroups'], - rule['attributeRestrictions'], - rule['resources'], - rule['verbs'])) + results.append(Rule(rule.get('apiGroups', ['']), + rule.get('attributeRestrictions', None), + rule.get('resources', []), + rule.get('verbs', []))) return results @@ -1633,7 +1633,7 @@ class OCClusterRole(OpenShiftCLI): @property def clusterrole(self): ''' property for clusterrole''' - if not self._clusterrole: + if self._clusterrole is None: self.get() return self._clusterrole @@ -1669,6 +1669,7 @@ class OCClusterRole(OpenShiftCLI): elif 'clusterrole "{}" not found'.format(self.name) in result['stderr']: result['returncode'] = 0 + self.clusterrole = None return result @@ -1738,6 +1739,9 @@ class OCClusterRole(OpenShiftCLI): # Create it here api_rval = oc_clusterrole.create() + if api_rval['returncode'] != 0: + return {'failed': True, 'msg': api_rval} + # return the created object api_rval = oc_clusterrole.get() diff --git a/roles/lib_openshift/src/class/oc_adm_policy_user.py b/roles/lib_openshift/src/class/oc_adm_policy_user.py index 88fcc1ddc..37a685ebb 100644 --- a/roles/lib_openshift/src/class/oc_adm_policy_user.py +++ b/roles/lib_openshift/src/class/oc_adm_policy_user.py @@ -46,7 +46,7 @@ class PolicyUser(OpenShiftCLI): @property def policybindings(self): if self._policy_bindings is None: - results = self._get('clusterpolicybindings', None) + results = self._get('policybindings', None) if results['returncode'] != 0: raise OpenShiftCLIError('Could not retrieve policybindings') self._policy_bindings = results['results'][0]['items'][0] diff --git a/roles/lib_openshift/src/class/oc_clusterrole.py b/roles/lib_openshift/src/class/oc_clusterrole.py index 1d3d977db..ae6795446 100644 --- a/roles/lib_openshift/src/class/oc_clusterrole.py +++ b/roles/lib_openshift/src/class/oc_clusterrole.py @@ -22,7 +22,7 @@ class OCClusterRole(OpenShiftCLI): @property def clusterrole(self): ''' property for clusterrole''' - if not self._clusterrole: + if self._clusterrole is None: self.get() return self._clusterrole @@ -58,6 +58,7 @@ class OCClusterRole(OpenShiftCLI): elif 'clusterrole "{}" not found'.format(self.name) in result['stderr']: result['returncode'] = 0 + self.clusterrole = None return result @@ -127,6 +128,9 @@ class OCClusterRole(OpenShiftCLI): # Create it here api_rval = oc_clusterrole.create() + if api_rval['returncode'] != 0: + return {'failed': True, 'msg': api_rval} + # return the created object api_rval = oc_clusterrole.get() diff --git a/roles/lib_openshift/src/lib/rule.py b/roles/lib_openshift/src/lib/rule.py index 4590dcf90..fe5ed9723 100644 --- a/roles/lib_openshift/src/lib/rule.py +++ b/roles/lib_openshift/src/lib/rule.py @@ -136,9 +136,9 @@ class Rule(object): results = [] for rule in inc_rules: - results.append(Rule(rule['apiGroups'], - rule['attributeRestrictions'], - rule['resources'], - rule['verbs'])) + results.append(Rule(rule.get('apiGroups', ['']), + rule.get('attributeRestrictions', None), + rule.get('resources', []), + rule.get('verbs', []))) return results diff --git a/roles/openshift_health_checker/openshift_checks/disk_availability.py b/roles/openshift_health_checker/openshift_checks/disk_availability.py new file mode 100644 index 000000000..c2792a0fe --- /dev/null +++ b/roles/openshift_health_checker/openshift_checks/disk_availability.py @@ -0,0 +1,65 @@ +# pylint: disable=missing-docstring +from openshift_checks import OpenShiftCheck, OpenShiftCheckException, get_var +from openshift_checks.mixins import NotContainerizedMixin + + +class DiskAvailability(NotContainerizedMixin, OpenShiftCheck): + """Check that recommended disk space is available before a first-time install.""" + + name = "disk_availability" + tags = ["preflight"] + + # Values taken from the official installation documentation: + # https://docs.openshift.org/latest/install_config/install/prerequisites.html#system-requirements + recommended_disk_space_bytes = { + "masters": 40 * 10**9, + "nodes": 15 * 10**9, + "etcd": 20 * 10**9, + } + + @classmethod + def is_active(cls, task_vars): + """Skip hosts that do not have recommended disk space requirements.""" + group_names = get_var(task_vars, "group_names", default=[]) + has_disk_space_recommendation = bool(set(group_names).intersection(cls.recommended_disk_space_bytes)) + return super(DiskAvailability, cls).is_active(task_vars) and has_disk_space_recommendation + + def run(self, tmp, task_vars): + group_names = get_var(task_vars, "group_names") + ansible_mounts = get_var(task_vars, "ansible_mounts") + + min_free_bytes = max(self.recommended_disk_space_bytes.get(name, 0) for name in group_names) + free_bytes = self.openshift_available_disk(ansible_mounts) + + if free_bytes < min_free_bytes: + return { + 'failed': True, + 'msg': ( + 'Available disk space ({:.1f} GB) for the volume containing ' + '"/var" is below minimum recommended space ({:.1f} GB)' + ).format(float(free_bytes) / 10**9, float(min_free_bytes) / 10**9) + } + + return {} + + @staticmethod + def openshift_available_disk(ansible_mounts): + """Determine the available disk space for an OpenShift installation. + + ansible_mounts should be a list of dicts like the 'setup' Ansible module + returns. + """ + # priority list in descending order + supported_mnt_paths = ["/var", "/"] + available_mnts = {mnt.get("mount"): mnt for mnt in ansible_mounts} + + try: + for path in supported_mnt_paths: + if path in available_mnts: + return available_mnts[path]["size_available"] + except KeyError: + pass + + paths = ''.join(sorted(available_mnts)) or 'none' + msg = "Unable to determine available disk space. Paths mounted: {}.".format(paths) + raise OpenShiftCheckException(msg) diff --git a/roles/openshift_health_checker/openshift_checks/memory_availability.py b/roles/openshift_health_checker/openshift_checks/memory_availability.py new file mode 100644 index 000000000..28805dc37 --- /dev/null +++ b/roles/openshift_health_checker/openshift_checks/memory_availability.py @@ -0,0 +1,44 @@ +# pylint: disable=missing-docstring +from openshift_checks import OpenShiftCheck, get_var + + +class MemoryAvailability(OpenShiftCheck): + """Check that recommended memory is available.""" + + name = "memory_availability" + tags = ["preflight"] + + # Values taken from the official installation documentation: + # https://docs.openshift.org/latest/install_config/install/prerequisites.html#system-requirements + recommended_memory_bytes = { + "masters": 16 * 10**9, + "nodes": 8 * 10**9, + "etcd": 20 * 10**9, + } + + @classmethod + def is_active(cls, task_vars): + """Skip hosts that do not have recommended memory requirements.""" + group_names = get_var(task_vars, "group_names", default=[]) + has_memory_recommendation = bool(set(group_names).intersection(cls.recommended_memory_bytes)) + return super(MemoryAvailability, cls).is_active(task_vars) and has_memory_recommendation + + def run(self, tmp, task_vars): + group_names = get_var(task_vars, "group_names") + total_memory_bytes = get_var(task_vars, "ansible_memtotal_mb") * 10**6 + + min_memory_bytes = max(self.recommended_memory_bytes.get(name, 0) for name in group_names) + + if total_memory_bytes < min_memory_bytes: + return { + 'failed': True, + 'msg': ( + 'Available memory ({available:.1f} GB) ' + 'below recommended value ({recommended:.1f} GB)' + ).format( + available=float(total_memory_bytes) / 10**9, + recommended=float(min_memory_bytes) / 10**9, + ), + } + + return {} diff --git a/roles/openshift_health_checker/openshift_checks/mixins.py b/roles/openshift_health_checker/openshift_checks/mixins.py index 657e15160..20d160eaf 100644 --- a/roles/openshift_health_checker/openshift_checks/mixins.py +++ b/roles/openshift_health_checker/openshift_checks/mixins.py @@ -1,4 +1,8 @@ -# pylint: disable=missing-docstring +# pylint: disable=missing-docstring,too-few-public-methods +""" +Mixin classes meant to be used with subclasses of OpenShiftCheck. +""" + from openshift_checks import get_var @@ -7,12 +11,5 @@ class NotContainerizedMixin(object): @classmethod def is_active(cls, task_vars): - return ( - # This mixin is meant to be used with subclasses of OpenShiftCheck. - super(NotContainerizedMixin, cls).is_active(task_vars) and - not cls.is_containerized(task_vars) - ) - - @staticmethod - def is_containerized(task_vars): - return get_var(task_vars, "openshift", "common", "is_containerized") + is_containerized = get_var(task_vars, "openshift", "common", "is_containerized") + return super(NotContainerizedMixin, cls).is_active(task_vars) and not is_containerized diff --git a/roles/openshift_health_checker/test/action_plugin_test.py b/roles/openshift_health_checker/test/action_plugin_test.py index a877246f4..2693ae37b 100644 --- a/roles/openshift_health_checker/test/action_plugin_test.py +++ b/roles/openshift_health_checker/test/action_plugin_test.py @@ -1,5 +1,7 @@ import pytest +from ansible.playbook.play_context import PlayContext + from openshift_health_check import ActionModule, resolve_checks from openshift_checks import OpenShiftCheckException @@ -34,7 +36,7 @@ def fake_check(name='fake_check', tags=None, is_active=True, run_return=None, ru @pytest.fixture def plugin(): task = FakeTask('openshift_health_check', {'checks': ['fake_check']}) - plugin = ActionModule(task, None, None, None, None, None) + plugin = ActionModule(task, None, PlayContext(), None, None, None) return plugin diff --git a/roles/openshift_health_checker/test/disk_availability_test.py b/roles/openshift_health_checker/test/disk_availability_test.py new file mode 100644 index 000000000..970b474d7 --- /dev/null +++ b/roles/openshift_health_checker/test/disk_availability_test.py @@ -0,0 +1,155 @@ +import pytest + +from openshift_checks.disk_availability import DiskAvailability, OpenShiftCheckException + + +@pytest.mark.parametrize('group_names,is_containerized,is_active', [ + (['masters'], False, True), + # ensure check is skipped on containerized installs + (['masters'], True, False), + (['nodes'], False, True), + (['etcd'], False, True), + (['masters', 'nodes'], False, True), + (['masters', 'etcd'], False, True), + ([], False, False), + (['lb'], False, False), + (['nfs'], False, False), +]) +def test_is_active(group_names, is_containerized, is_active): + task_vars = dict( + group_names=group_names, + openshift=dict(common=dict(is_containerized=is_containerized)), + ) + assert DiskAvailability.is_active(task_vars=task_vars) == is_active + + +@pytest.mark.parametrize('ansible_mounts,extra_words', [ + ([], ['none']), # empty ansible_mounts + ([{'mount': '/mnt'}], ['/mnt']), # missing relevant mount paths + ([{'mount': '/var'}], ['/var']), # missing size_available +]) +def test_cannot_determine_available_disk(ansible_mounts, extra_words): + task_vars = dict( + group_names=['masters'], + ansible_mounts=ansible_mounts, + ) + check = DiskAvailability(execute_module=fake_execute_module) + + with pytest.raises(OpenShiftCheckException) as excinfo: + check.run(tmp=None, task_vars=task_vars) + + for word in 'determine available disk'.split() + extra_words: + assert word in str(excinfo.value) + + +@pytest.mark.parametrize('group_names,ansible_mounts', [ + ( + ['masters'], + [{ + 'mount': '/', + 'size_available': 40 * 10**9 + 1, + }], + ), + ( + ['nodes'], + [{ + 'mount': '/', + 'size_available': 15 * 10**9 + 1, + }], + ), + ( + ['etcd'], + [{ + 'mount': '/', + 'size_available': 20 * 10**9 + 1, + }], + ), + ( + ['etcd'], + [{ + # not enough space on / ... + 'mount': '/', + 'size_available': 0, + }, { + # ... but enough on /var + 'mount': '/var', + 'size_available': 20 * 10**9 + 1, + }], + ), +]) +def test_succeeds_with_recommended_disk_space(group_names, ansible_mounts): + task_vars = dict( + group_names=group_names, + ansible_mounts=ansible_mounts, + ) + + check = DiskAvailability(execute_module=fake_execute_module) + result = check.run(tmp=None, task_vars=task_vars) + + assert not result.get('failed', False) + + +@pytest.mark.parametrize('group_names,ansible_mounts,extra_words', [ + ( + ['masters'], + [{ + 'mount': '/', + 'size_available': 1, + }], + ['0.0 GB'], + ), + ( + ['nodes'], + [{ + 'mount': '/', + 'size_available': 1 * 10**9, + }], + ['1.0 GB'], + ), + ( + ['etcd'], + [{ + 'mount': '/', + 'size_available': 1, + }], + ['0.0 GB'], + ), + ( + ['nodes', 'masters'], + [{ + 'mount': '/', + # enough space for a node, not enough for a master + 'size_available': 15 * 10**9 + 1, + }], + ['15.0 GB'], + ), + ( + ['etcd'], + [{ + # enough space on / ... + 'mount': '/', + 'size_available': 20 * 10**9 + 1, + }, { + # .. but not enough on /var + 'mount': '/var', + 'size_available': 0, + }], + ['0.0 GB'], + ), +]) +def test_fails_with_insufficient_disk_space(group_names, ansible_mounts, extra_words): + task_vars = dict( + group_names=group_names, + ansible_mounts=ansible_mounts, + ) + + check = DiskAvailability(execute_module=fake_execute_module) + result = check.run(tmp=None, task_vars=task_vars) + + assert result['failed'] + for word in 'below recommended'.split() + extra_words: + assert word in result['msg'] + + +def fake_execute_module(*args): + raise AssertionError('this function should not be called') diff --git a/roles/openshift_health_checker/test/memory_availability_test.py b/roles/openshift_health_checker/test/memory_availability_test.py new file mode 100644 index 000000000..e161a5b9e --- /dev/null +++ b/roles/openshift_health_checker/test/memory_availability_test.py @@ -0,0 +1,91 @@ +import pytest + +from openshift_checks.memory_availability import MemoryAvailability + + +@pytest.mark.parametrize('group_names,is_active', [ + (['masters'], True), + (['nodes'], True), + (['etcd'], True), + (['masters', 'nodes'], True), + (['masters', 'etcd'], True), + ([], False), + (['lb'], False), + (['nfs'], False), +]) +def test_is_active(group_names, is_active): + task_vars = dict( + group_names=group_names, + ) + assert MemoryAvailability.is_active(task_vars=task_vars) == is_active + + +@pytest.mark.parametrize('group_names,ansible_memtotal_mb', [ + ( + ['masters'], + 17200, + ), + ( + ['nodes'], + 8200, + ), + ( + ['etcd'], + 22200, + ), + ( + ['masters', 'nodes'], + 17000, + ), +]) +def test_succeeds_with_recommended_memory(group_names, ansible_memtotal_mb): + task_vars = dict( + group_names=group_names, + ansible_memtotal_mb=ansible_memtotal_mb, + ) + + check = MemoryAvailability(execute_module=fake_execute_module) + result = check.run(tmp=None, task_vars=task_vars) + + assert not result.get('failed', False) + + +@pytest.mark.parametrize('group_names,ansible_memtotal_mb,extra_words', [ + ( + ['masters'], + 0, + ['0.0 GB'], + ), + ( + ['nodes'], + 100, + ['0.1 GB'], + ), + ( + ['etcd'], + -1, + ['0.0 GB'], + ), + ( + ['nodes', 'masters'], + # enough memory for a node, not enough for a master + 11000, + ['11.0 GB'], + ), +]) +def test_fails_with_insufficient_memory(group_names, ansible_memtotal_mb, extra_words): + task_vars = dict( + group_names=group_names, + ansible_memtotal_mb=ansible_memtotal_mb, + ) + + check = MemoryAvailability(execute_module=fake_execute_module) + result = check.run(tmp=None, task_vars=task_vars) + + assert result['failed'] + for word in 'below recommended'.split() + extra_words: + assert word in result['msg'] + + +def fake_execute_module(*args): + raise AssertionError('this function should not be called') diff --git a/roles/openshift_logging/templates/es.j2 b/roles/openshift_logging/templates/es.j2 index 16185fc1d..f89855bf5 100644 --- a/roles/openshift_logging/templates/es.j2 +++ b/roles/openshift_logging/templates/es.j2 @@ -95,6 +95,13 @@ spec: readOnly: true - name: elasticsearch-storage mountPath: /elasticsearch/persistent + readinessProbe: + exec: + command: + - "/usr/share/elasticsearch/probe/readiness.sh" + initialDelaySeconds: 5 + timeoutSeconds: 4 + periodSeconds: 5 volumes: - name: elasticsearch secret: diff --git a/roles/openshift_manageiq/tasks/main.yaml b/roles/openshift_manageiq/tasks/main.yaml index f202486a5..cfc4e2722 100644 --- a/roles/openshift_manageiq/tasks/main.yaml +++ b/roles/openshift_manageiq/tasks/main.yaml @@ -3,24 +3,13 @@ msg: "The openshift_manageiq role requires OpenShift Enterprise 3.1 or Origin 1.1." when: not openshift.common.version_gte_3_1_or_1_1 | bool -- name: Copy Configuration to temporary conf - command: > - cp {{ openshift.common.config_base }}/master/admin.kubeconfig {{manage_iq_tmp_conf}} - changed_when: false - - name: Add Management Infrastructure project - command: > - {{ openshift.common.client_binary }} adm new-project - management-infra - --description="Management Infrastructure" - --config={{manage_iq_tmp_conf}} - register: osmiq_create_mi_project - failed_when: "'already exists' not in osmiq_create_mi_project.stderr and osmiq_create_mi_project.rc != 0" - changed_when: osmiq_create_mi_project.rc == 0 + oc_project: + name: management-infra + description: Management Infrastructure - name: Create Admin and Image Inspector Service Account oc_serviceaccount: - kubeconfig: "{{ openshift_master_config_dir }}/admin.kubeconfig" name: "{{ item }}" namespace: management-infra state: present @@ -28,51 +17,42 @@ - management-admin - inspector-admin -- name: Create Cluster Role - shell: > - echo {{ manageiq_cluster_role | to_json | quote }} | - {{ openshift.common.client_binary }} create - --config={{manage_iq_tmp_conf}} - -f - - register: osmiq_create_cluster_role - failed_when: "'already exists' not in osmiq_create_cluster_role.stderr and osmiq_create_cluster_role.rc != 0" - changed_when: osmiq_create_cluster_role.rc == 0 +- name: Create manageiq cluster role + oc_clusterrole: + name: management-infra-admin + rules: + - apiGroups: + - "" + resources: + - pods/proxy + verbs: + - "*" - name: Create Hawkular Metrics Admin Cluster Role - shell: > - echo {{ manageiq_metrics_admin_clusterrole | to_json | quote }} | - {{ openshift.common.client_binary }} - --config={{manage_iq_tmp_conf}} - create -f - - register: oshawkular_create_cluster_role - failed_when: "'already exists' not in oshawkular_create_cluster_role.stderr and oshawkular_create_cluster_role.rc != 0" - changed_when: oshawkular_create_cluster_role.rc == 0 - # AUDIT:changed_when_note: Checking the return code is insufficient - # here. We really need to verify the if the role even exists before - # we run this task. + oc_clusterrole: + name: hawkular-metrics-admin + rules: + - apiGroups: + - "" + resources: + - hawkular-alerts + - hawkular-metrics + verbs: + - "*" - name: Configure role/user permissions - command: > - {{ openshift.common.client_binary }} adm {{item}} - --config={{manage_iq_tmp_conf}} - with_items: "{{manage_iq_tasks}}" - register: osmiq_perm_task - failed_when: "'already exists' not in osmiq_perm_task.stderr and osmiq_perm_task.rc != 0" - changed_when: osmiq_perm_task.rc == 0 - # AUDIT:changed_when_note: Checking the return code is insufficient - # here. We really need to compare the current role/user permissions - # with their expected state. I think we may have a module for this? - + oc_adm_policy_user: + namespace: management-infra + resource_name: "{{ item.resource_name }}" + resource_kind: "{{ item.resource_kind }}" + user: "{{ item.user }}" + with_items: "{{ manage_iq_tasks }}" - name: Configure 3_2 role/user permissions - command: > - {{ openshift.common.client_binary }} adm {{item}} - --config={{manage_iq_tmp_conf}} + oc_adm_policy_user: + namespace: management-infra + resource_name: "{{ item.resource_name }}" + resource_kind: "{{ item.resource_kind }}" + user: "{{ item.user }}" with_items: "{{manage_iq_openshift_3_2_tasks}}" - register: osmiq_perm_3_2_task - failed_when: osmiq_perm_3_2_task.rc != 0 - changed_when: osmiq_perm_3_2_task.rc == 0 when: openshift.common.version_gte_3_2_or_1_2 | bool - -- name: Clean temporary configuration file - file: path={{manage_iq_tmp_conf}} state=absent diff --git a/roles/openshift_manageiq/vars/main.yml b/roles/openshift_manageiq/vars/main.yml index 9936bb126..15d667628 100644 --- a/roles/openshift_manageiq/vars/main.yml +++ b/roles/openshift_manageiq/vars/main.yml @@ -1,41 +1,31 @@ --- -openshift_master_config_dir: "{{ openshift.common.config_base }}/master" -manageiq_cluster_role: - apiVersion: v1 - kind: ClusterRole - metadata: - name: management-infra-admin - rules: - - resources: - - pods/proxy - verbs: - - '*' - -manageiq_metrics_admin_clusterrole: - apiVersion: v1 - kind: ClusterRole - metadata: - name: hawkular-metrics-admin - rules: - - apiGroups: - - "" - resources: - - hawkular-metrics - - hawkular-alerts - verbs: - - '*' - -manage_iq_tmp_conf: /tmp/manageiq_admin.kubeconfig - manage_iq_tasks: -- policy add-role-to-user -n management-infra admin -z management-admin -- policy add-role-to-user -n management-infra management-infra-admin -z management-admin -- policy add-cluster-role-to-user cluster-reader system:serviceaccount:management-infra:management-admin -- policy add-scc-to-user privileged system:serviceaccount:management-infra:management-admin -- policy add-cluster-role-to-user system:image-puller system:serviceaccount:management-infra:inspector-admin -- policy add-scc-to-user privileged system:serviceaccount:management-infra:inspector-admin -- policy add-cluster-role-to-user self-provisioner system:serviceaccount:management-infra:management-admin -- policy add-cluster-role-to-user hawkular-metrics-admin system:serviceaccount:management-infra:management-admin +- resource_kind: role + resource_name: admin + user: management-admin +- resource_kind: role + resource_name: management-infra-admin + user: management-admin +- resource_kind: cluster-role + resource_name: cluster-reader + user: system:serviceaccount:management-infra:management-admin +- resource_kind: scc + resource_name: privileged + user: system:serviceaccount:management-infra:management-admin +- resource_kind: cluster-role + resource_name: system:image-puller + user: system:serviceaccount:management-infra:inspector-admin +- resource_kind: scc + resource_name: privileged + user: system:serviceaccount:management-infra:inspector-admin +- resource_kind: cluster-role + resource_name: self-provisioner + user: system:serviceaccount:management-infra:management-admin +- resource_kind: cluster-role + resource_name: hawkular-metrics-admin + user: system:serviceaccount:management-infra:management-admin manage_iq_openshift_3_2_tasks: -- policy add-cluster-role-to-user system:image-auditor system:serviceaccount:management-infra:management-admin +- resource_kind: cluster-role + resource_name: system:image-auditor + user: system:serviceaccount:management-infra:management-admin diff --git a/roles/openshift_node_upgrade/tasks/docker/upgrade.yml b/roles/openshift_node_upgrade/tasks/docker/upgrade.yml index e91891ca9..416cf605a 100644 --- a/roles/openshift_node_upgrade/tasks/docker/upgrade.yml +++ b/roles/openshift_node_upgrade/tasks/docker/upgrade.yml @@ -6,20 +6,6 @@ # - docker_version # - skip_docker_restart -# We need docker service up to remove all the images, but these services will keep -# trying to re-start and thus re-pull the images we're trying to delete. -- name: Stop containerized services - service: name={{ item }} state=stopped - with_items: - - "{{ openshift.common.service_type }}-master" - - "{{ openshift.common.service_type }}-master-api" - - "{{ openshift.common.service_type }}-master-controllers" - - "{{ openshift.common.service_type }}-node" - - etcd_container - - openvswitch - failed_when: false - when: openshift.common.is_containerized | bool - - name: Check Docker image count shell: "docker images -aq | wc -l" register: docker_image_count @@ -45,5 +31,4 @@ - name: Upgrade Docker package: name=docker{{ '-' + docker_version }} state=present -- include: restart.yml - when: not skip_docker_restart | default(False) | bool +# starting docker happens back in ../main.yml where it calls ../restart.yml diff --git a/roles/openshift_node_upgrade/tasks/main.yml b/roles/openshift_node_upgrade/tasks/main.yml index 01bd3bf38..57da86620 100644 --- a/roles/openshift_node_upgrade/tasks/main.yml +++ b/roles/openshift_node_upgrade/tasks/main.yml @@ -9,6 +9,28 @@ # - openshift_release # tasks file for openshift_node_upgrade + +- name: Stop node and openvswitch services + service: + name: "{{ item }}" + state: stopped + with_items: + - "{{ openshift.common.service_type }}-node" + - openvswitch + failed_when: false + +- name: Stop additional containerized services + service: + name: "{{ item }}" + state: stopped + with_items: + - "{{ openshift.common.service_type }}-master" + - "{{ openshift.common.service_type }}-master-controllers" + - "{{ openshift.common.service_type }}-master-api" + - etcd_container + failed_when: false + when: openshift.common.is_containerized | bool + - include: docker/upgrade.yml vars: # We will restart Docker ourselves after everything is ready: @@ -16,7 +38,6 @@ when: - l_docker_upgrade is defined - l_docker_upgrade | bool - - not openshift.common.is_containerized | bool - include: "{{ node_config_hook }}" when: node_config_hook is defined @@ -67,16 +88,6 @@ state: latest when: not openshift.common.is_containerized | bool -- name: Restart openvswitch - systemd: - name: openvswitch - state: started - when: - - not openshift.common.is_containerized | bool - -# Mandatory Docker restart, ensure all containerized services are running: -- include: docker/restart.yml - - name: Update oreg value yedit: src: "{{ openshift.common.config_base }}/node/node-config.yaml" @@ -111,11 +122,8 @@ when: swap_result.stdout_lines | length > 0 # End Disable Swap Block -- name: Restart rpm node service - service: - name: "{{ openshift.common.service_type }}-node" - state: restarted - when: not openshift.common.is_containerized | bool +# Restart all services +- include: restart.yml - name: Wait for node to be ready oc_obj: diff --git a/roles/openshift_node_upgrade/tasks/docker/restart.yml b/roles/openshift_node_upgrade/tasks/restart.yml index 176fc3c0b..a9fab74e1 100644 --- a/roles/openshift_node_upgrade/tasks/docker/restart.yml +++ b/roles/openshift_node_upgrade/tasks/restart.yml @@ -12,7 +12,7 @@ openshift_facts: role: docker -- name: Restart containerized services +- name: Start services service: name={{ item }} state=started with_items: - etcd_container @@ -22,7 +22,6 @@ - "{{ openshift.common.service_type }}-master-controllers" - "{{ openshift.common.service_type }}-node" failed_when: false - when: openshift.common.is_containerized | bool - name: Wait for master API to come back online wait_for: diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml index c3d001bb4..fa9b20e92 100644 --- a/roles/openshift_version/tasks/main.yml +++ b/roles/openshift_version/tasks/main.yml @@ -7,8 +7,13 @@ # Block attempts to install origin without specifying some kind of version information. # This is because the latest tags for origin are usually alpha builds, which should not # be used by default. Users must indicate what they want. -- fail: - msg: "Must specify openshift_release or openshift_image_tag in inventory to install origin. (suggestion: add openshift_release=\"1.2\" to inventory)" +- name: Abort when we cannot safely guess what Origin image version the user wanted + fail: + msg: |- + To install a containerized Origin release, you must set openshift_release or + openshift_image_tag in your inventory to specify which version of the OpenShift + component images to use. You may want the latest (usually alpha) releases or + a more stable release. (Suggestion: add openshift_release="x.y" to inventory.) when: - is_containerized | bool - openshift.common.deployment_type == 'origin' @@ -27,7 +32,10 @@ when: openshift_release is defined # Verify that the image tag is in a valid format -- block: +- when: + - openshift_image_tag is defined + - openshift_image_tag != "latest" + block: # Verifies that when the deployment type is origin the version: # - starts with a v @@ -35,12 +43,14 @@ # It also allows for optional trailing data which: # - must start with a dash # - may contain numbers, letters, dashes and dots. - - name: Verify Origin openshift_image_tag is valid + - name: (Origin) Verify openshift_image_tag is valid + when: openshift.common.deployment_type == 'origin' assert: that: - "{{ openshift_image_tag|match('(^v?\\d+\\.\\d+\\.\\d+(-[\\w\\-\\.]*)?$)') }}" - msg: "openshift_image_tag must be in the format v#.#.#[-optional.#]. Examples: v1.2.3, v3.5.1-alpha.1" - when: openshift.common.deployment_type == 'origin' + msg: |- + openshift_image_tag must be in the format v#.#.#[-optional.#]. Examples: v1.2.3, v3.5.1-alpha.1 + You specified openshift_image_tag={{ openshift_image_tag }} # Verifies that when the deployment type is openshift-enterprise the version: # - starts with a v @@ -48,16 +58,14 @@ # It also allows for optional trailing data which: # - must start with a dash # - may contain numbers - - name: Verify Enterprise openshift_image_tag is valid + - name: (Enterprise) Verify openshift_image_tag is valid + when: openshift.common.deployment_type == 'openshift-enterprise' assert: that: - "{{ openshift_image_tag|match('(^v\\d+\\.\\d+[\\.\\d+]*(-\\d+)?$)') }}" - msg: "openshift_image_tag must be in the format v#.#[.#[.#]]. Examples: v1.2, v3.4.1, v3.5.1.3, v1.2-1, v1.2.3-4" - when: openshift.common.deployment_type == 'openshift-enterprise' - - when: - - openshift_image_tag is defined - - openshift_image_tag != "latest" + msg: |- + openshift_image_tag must be in the format v#.#[.#[.#]]. Examples: v1.2, v3.4.1, v3.5.1.3, v1.2-1, v1.2.3-4 + You specified openshift_image_tag={{ openshift_image_tag }} # Make sure we copy this to a fact if given a var: - set_fact: @@ -119,30 +127,42 @@ - fail: msg: openshift_version role was unable to set openshift_version + name: Abort if openshift_version was not set when: openshift_version is not defined - fail: msg: openshift_version role was unable to set openshift_image_tag + name: Abort if openshift_image_tag was not set when: openshift_image_tag is not defined - fail: msg: openshift_version role was unable to set openshift_pkg_version + name: Abort if openshift_pkg_version was not set when: openshift_pkg_version is not defined - fail: - msg: "No OpenShift version available, please ensure your systems are fully registered and have access to appropriate yum repositories." + msg: "No OpenShift version available; please ensure your systems are fully registered and have access to appropriate yum repositories." + name: Abort if openshift_pkg_version was not set when: - not is_containerized | bool - openshift_version == '0.0' -# We can't map an openshift_release to full rpm version like we can with containers, make sure +# We can't map an openshift_release to full rpm version like we can with containers; make sure # the rpm version we looked up matches the release requested and error out if not. -- fail: - msg: "Detected OpenShift version {{ openshift_version }} does not match requested openshift_release {{ openshift_release }}. You may need to adjust your yum repositories, inventory, or run the appropriate OpenShift upgrade playbook." +- name: For an RPM install, abort when the release requested does not match the available version. when: - not is_containerized | bool - openshift_release is defined - - not openshift_version.startswith(openshift_release) | bool + assert: + that: + - openshift_version.startswith(openshift_release) | bool + msg: |- + You requested openshift_release {{ openshift_release }}, which is not matched by + the latest OpenShift RPM we detected as {{ openshift.common.service_type }}-{{ openshift_version }} + on host {{ inventory_hostname }}. + We will only install the latest RPMs, so please ensure you are getting the release + you expect. You may need to adjust your Ansible inventory, modify the repositories + available on the host, or run the appropriate OpenShift upgrade playbook. # The end result of these three variables is quite important so make sure they are displayed and logged: - debug: var=openshift_release |