diff options
-rw-r--r-- | playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml | 25 | ||||
-rw-r--r-- | playbooks/common/openshift-cluster/upgrades/post_control_plane.yml | 87 | ||||
-rw-r--r-- | roles/openshift_logging/defaults/main.yml | 2 | ||||
-rw-r--r-- | roles/openshift_logging/tasks/delete_logging.yaml | 1 | ||||
-rw-r--r-- | roles/openshift_logging/tasks/label_node.yaml | 52 | ||||
-rw-r--r-- | roles/openshift_logging/tasks/start_cluster.yaml | 114 | ||||
-rw-r--r-- | roles/openshift_logging/tasks/stop_cluster.yaml | 107 | ||||
-rw-r--r-- | roles/openshift_logging/tasks/upgrade_logging.yaml | 25 | ||||
-rw-r--r-- | roles/openshift_manage_node/tasks/main.yml | 3 | ||||
-rw-r--r-- | roles/openshift_metrics/defaults/main.yaml | 2 |
10 files changed, 203 insertions, 215 deletions
diff --git a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml index 37c89374c..046535680 100644 --- a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml @@ -1,20 +1,17 @@ --- - name: Filter list of nodes to be upgraded if necessary hosts: oo_first_master + + roles: + - lib_openshift + tasks: - name: Retrieve list of openshift nodes matching upgrade label - command: > - {{ openshift.common.client_binary }} - get nodes - --config={{ openshift.common.config_base }}/master/admin.kubeconfig - --selector={{ openshift_upgrade_nodes_label }} - -o jsonpath='{.items[*].metadata.name}' - register: matching_nodes - changed_when: false - when: openshift_upgrade_nodes_label is defined - - - set_fact: - nodes_to_upgrade: "{{ matching_nodes.stdout.split(' ') }}" + oc_obj: + state: list + kind: node + selector: "{{ openshift_upgrade_nodes_label }}" + register: nodes_to_upgrade when: openshift_upgrade_nodes_label is defined # We got a list of nodes with the label, now we need to match these with inventory hosts @@ -26,7 +23,9 @@ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" ansible_become: "{{ g_sudo | default(omit) }}" with_items: " {{ groups['oo_nodes_to_config'] }}" - when: openshift_upgrade_nodes_label is defined and hostvars[item].openshift.common.hostname in nodes_to_upgrade + when: + - openshift_upgrade_nodes_label is defined + - hostvars[item].openshift.common.hostname in nodes_to_upgrade.results.results[0]['items'] | map(attribute='metadata.name') | list changed_when: false # Build up the oo_nodes_to_upgrade group, use the list filtered by label if diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml index d2ed64300..f0191e380 100644 --- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml @@ -9,43 +9,37 @@ registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', openshift_image_tag ) }}" router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', openshift_image_tag ) }}" oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig" - roles: - - openshift_manageiq - # Create the new templates shipped in 3.2, existing templates are left - # unmodified. This prevents the subsequent role definition for - # openshift_examples from failing when trying to replace templates that do - # not already exist. We could have potentially done a replace --force to - # create and update in one step. - - openshift_examples - - openshift_hosted_templates - # Update the existing templates - - role: openshift_examples - registry_url: "{{ openshift.master.registry_url }}" - openshift_examples_import_command: replace - - role: openshift_hosted_templates - registry_url: "{{ openshift.master.registry_url }}" - openshift_hosted_templates_import_command: replace - pre_tasks: + pre_tasks: + - name: Load lib_openshift modules + include_role: + name: lib_openshift # TODO: remove temp_skip_router_registry_upgrade variable. This is a short term hack # to allow ops to use this control plane upgrade, without triggering router/registry # upgrade which has not yet been synced with their process. - name: Collect all routers - command: > - {{ oc_cmd }} get pods --all-namespaces -l 'router' -o json + oc_obj: + state: list + kind: pods + all_namespaces: True + selector: 'router' register: all_routers - failed_when: false - changed_when: false when: temp_skip_router_registry_upgrade is not defined - - set_fact: haproxy_routers="{{ (all_routers.stdout | from_json)['items'] | oo_pods_match_component(openshift_deployment_type, 'haproxy-router') | oo_select_keys_from_list(['metadata']) }}" - when: all_routers.rc == 0 and temp_skip_router_registry_upgrade is not defined + - set_fact: haproxy_routers="{{ (all_routers.reults.results[0]['items'] | oo_pods_match_component(openshift_deployment_type, 'haproxy-router') | oo_select_keys_from_list(['metadata']) }}" + when: + - all_routers.results.returncode == 0 + - temp_skip_router_registry_upgrade is not defined - set_fact: haproxy_routers=[] - when: all_routers.rc != 0 and temp_skip_router_registry_upgrade is not defined + when: + - all_routers.results.returncode != 0 + - temp_skip_router_registry_upgrade is not defined - name: Update router image to current version - when: all_routers.rc == 0 and temp_skip_router_registry_upgrade is not defined + when: + - all_routers.results.returncode == 0 + - temp_skip_router_registry_upgrade is not defined command: > {{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -n {{ item['namespace'] }} -p '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}","livenessProbe":{"tcpSocket":null,"httpGet":{"path": "/healthz", "port": 1936, "host": "localhost", "scheme": "HTTP"},"initialDelaySeconds":10,"timeoutSeconds":1}}]}}}}' @@ -56,15 +50,17 @@ # this task needs to be ran. - name: Check for default registry - command: > - {{ oc_cmd }} get -n default dc/docker-registry + oc_obj: + state: list + kind: dc + name: docker-registry register: _default_registry - failed_when: false - changed_when: false when: temp_skip_router_registry_upgrade is not defined - name: Update registry image to current version - when: _default_registry.rc == 0 and temp_skip_router_registry_upgrade is not defined + when: + - _default_registry.results.results[0] != {} + - temp_skip_router_registry_upgrade is not defined command: > {{ oc_cmd }} patch dc/docker-registry -n default -p '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}' @@ -73,19 +69,40 @@ # need to do is check the current registry image version and see # if this task needs to be ran. + roles: + - openshift_manageiq + # Create the new templates shipped in 3.2, existing templates are left + # unmodified. This prevents the subsequent role definition for + # openshift_examples from failing when trying to replace templates that do + # not already exist. We could have potentially done a replace --force to + # create and update in one step. + - openshift_examples + - openshift_hosted_templates + # Update the existing templates + - role: openshift_examples + registry_url: "{{ openshift.master.registry_url }}" + openshift_examples_import_command: replace + - role: openshift_hosted_templates + registry_url: "{{ openshift.master.registry_url }}" + openshift_hosted_templates_import_command: replace + # Check for warnings to be printed at the end of the upgrade: - name: Check for warnings hosts: oo_masters_to_config tasks: # Check if any masters are using pluginOrderOverride and warn if so, only for 1.3/3.3 and beyond: - - command: > - grep pluginOrderOverride {{ openshift.common.config_base }}/master/master-config.yaml + - name: grep pluginOrderOverride + command: grep pluginOrderOverride {{ openshift.common.config_base }}/master/master-config.yaml register: grep_plugin_order_override when: openshift.common.version_gte_3_3_or_1_3 | bool - failed_when: false + changed_when: false + - name: Warn if pluginOrderOverride is in use in master-config.yaml - debug: msg="WARNING pluginOrderOverride is being deprecated in master-config.yaml, please see https://docs.openshift.com/enterprise/latest/architecture/additional_concepts/admission_controllers.html for more information." - when: not grep_plugin_order_override | skipped and grep_plugin_order_override.rc == 0 + debug: + msg: "WARNING pluginOrderOverride is being deprecated in master-config.yaml, please see https://docs.openshift.com/enterprise/latest/architecture/additional_concepts/admission_controllers.html for more information." + when: + - not grep_plugin_order_override | skipped + - grep_plugin_order_override.rc == 0 - include: ../reset_excluder.yml tags: diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml index bdb168921..d9eebe688 100644 --- a/roles/openshift_logging/defaults/main.yml +++ b/roles/openshift_logging/defaults/main.yml @@ -19,7 +19,7 @@ openshift_logging_curator_memory_limit: null openshift_logging_curator_ops_cpu_limit: 100m openshift_logging_curator_ops_memory_limit: null -openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default(kibana.{{openshift.common.dns_domain}}) }}" +openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.{{openshift.common.dns_domain}}') }}" openshift_logging_kibana_cpu_limit: null openshift_logging_kibana_memory_limit: null openshift_logging_kibana_proxy_debug: false diff --git a/roles/openshift_logging/tasks/delete_logging.yaml b/roles/openshift_logging/tasks/delete_logging.yaml index 9621d0d1a..188ea246c 100644 --- a/roles/openshift_logging/tasks/delete_logging.yaml +++ b/roles/openshift_logging/tasks/delete_logging.yaml @@ -81,7 +81,6 @@ # delete our service accounts - name: delete service accounts oc_serviceaccount: - kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig" name: "{{ item }}" namespace: "{{ openshift_logging_namespace }}" state: absent diff --git a/roles/openshift_logging/tasks/label_node.yaml b/roles/openshift_logging/tasks/label_node.yaml deleted file mode 100644 index ebe8f1ca8..000000000 --- a/roles/openshift_logging/tasks/label_node.yaml +++ /dev/null @@ -1,52 +0,0 @@ ---- -- command: > - {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get node {{host}} - -o jsonpath='{.metadata.labels}' - register: node_labels - when: not ansible_check_mode - changed_when: no - -- command: > - {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig label node {{host}} {{label}}={{value}} - register: label_result - failed_when: label_result.rc == 1 and 'exists' not in label_result.stderr - when: - - value is defined - - node_labels.stdout is defined - - label not in node_labels.stdout - - unlabel is not defined or not unlabel - - not ansible_check_mode - -- command: > - {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get node {{host}} - -o jsonpath='{.metadata.labels.{{ label }}}' - register: label_value - ignore_errors: yes - changed_when: no - when: - - value is defined - - node_labels.stdout is defined - - label in node_labels.stdout - - unlabel is not defined or not unlabel - - not ansible_check_mode - -- command: > - {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig label node {{host}} {{label}}={{value}} --overwrite - register: label_result - failed_when: label_result.rc == 1 and 'exists' not in label_result.stderr - when: - - value is defined - - label_value.stdout is defined - - label_value.stdout != value - - unlabel is not defined or not unlabel - - not ansible_check_mode - -- command: > - {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig label node {{host}} {{label}}- - register: label_result - failed_when: label_result.rc == 1 and 'exists' not in label_result.stderr - when: - - unlabel is defined - - unlabel - - not ansible_check_mode - - label in node_labels.stdout diff --git a/roles/openshift_logging/tasks/start_cluster.yaml b/roles/openshift_logging/tasks/start_cluster.yaml index 69d2b2b6b..3e97487dc 100644 --- a/roles/openshift_logging/tasks/start_cluster.yaml +++ b/roles/openshift_logging/tasks/start_cluster.yaml @@ -1,125 +1,133 @@ --- -- command: > - {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get node -o name - register: fluentd_hosts +- name: Retrieve list of fluentd hosts + oc_obj: + state: list + kind: node when: "'--all' in openshift_logging_fluentd_hosts" - check_mode: no - changed_when: no + register: fluentd_hosts -- set_fact: openshift_logging_fluentd_hosts={{ fluentd_hosts.stdout_lines | regex_replace('node/', '') }} +- name: Set fact openshift_logging_fluentd_hosts + set_fact: + openshift_logging_fluentd_hosts: "{{ fluentd_hosts.results.results[0]['items'] | map(attribute='metadata.name') | list }}" when: "'--all' in openshift_logging_fluentd_hosts" - name: start fluentd - include: label_node.yaml - vars: - host: "{{fluentd_host}}" - label: "{{openshift_logging_fluentd_nodeselector.keys()[0]}}" - value: "{{openshift_logging_fluentd_nodeselector.values()[0]}}" + oc_label: + name: "{{ fluentd_host }}" + kind: node + state: add + label: "{{ openshift_logging_fluentd_nodeselector | oo_dict_to_list_of_dict }}" with_items: "{{ openshift_logging_fluentd_hosts }}" loop_control: loop_var: fluentd_host -- command: > - {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es -o name -n {{openshift_logging_namespace}} +- name: Retrieve elasticsearch + oc_obj: + state: list + kind: dc + selector: "component=es" + namespace: "{{openshift_logging_namespace}}" register: es_dc - check_mode: no - changed_when: no - name: start elasticsearch oc_scale: kind: dc - name: "{{object.split('/')[1]}}" + name: "{{ object }}" namespace: "{{openshift_logging_namespace}}" - kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig" replicas: 1 - with_items: "{{es_dc.stdout_lines}}" + with_items: "{{ es_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}" loop_control: loop_var: object -- command: > - {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=kibana -o name -n {{openshift_logging_namespace}} +- name: Retrieve kibana + oc_obj: + state: list + kind: dc + selector: "component=kibana" + namespace: "{{openshift_logging_namespace}}" register: kibana_dc - check_mode: no - changed_when: no - name: start kibana oc_scale: kind: dc - name: "{{object.split('/')[1]}}" + name: "{{ object }}" namespace: "{{openshift_logging_namespace}}" - kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig" replicas: "{{ openshift_logging_kibana_replica_count | default (1) }}" - with_items: "{{kibana_dc.stdout_lines}}" + with_items: "{{ kibana_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}" loop_control: loop_var: object -- command: > - {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=curator -o name -n {{openshift_logging_namespace}} +- name: Retrieve curator + oc_obj: + state: list + kind: dc + selector: "component=curator" + namespace: "{{openshift_logging_namespace}}" register: curator_dc - check_mode: no - changed_when: no - name: start curator oc_scale: kind: dc - name: "{{object.split('/')[1]}}" + name: "{{ object }}" namespace: "{{openshift_logging_namespace}}" - kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig" replicas: 1 - with_items: "{{curator_dc.stdout_lines}}" + with_items: "{{ curator_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}" loop_control: loop_var: object -- command: > - {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es-ops -o name -n {{openshift_logging_namespace}} +- name: Retrieve elasticsearch-ops + oc_obj: + state: list + kind: dc + selector: "component=es-ops" + namespace: "{{openshift_logging_namespace}}" register: es_dc - check_mode: no - changed_when: no - name: start elasticsearch-ops oc_scale: kind: dc - name: "{{object.split('/')[1]}}" + name: "{{ object }}" namespace: "{{openshift_logging_namespace}}" - kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig" replicas: 1 - with_items: "{{es_dc.stdout_lines}}" + with_items: "{{ es_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}" loop_control: loop_var: object when: openshift_logging_use_ops | bool -- command: > - {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=kibana-ops -o name -n {{openshift_logging_namespace}} +- name: Retrieve kibana-ops + oc_obj: + state: list + kind: dc + selector: "component=kibana-ops" + namespace: "{{openshift_logging_namespace}}" register: kibana_dc - check_mode: no - changed_when: no - name: start kibana-ops oc_scale: kind: dc - name: "{{object.split('/')[1]}}" + name: "{{ object }}" namespace: "{{openshift_logging_namespace}}" - kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig" replicas: "{{ openshift_logging_kibana_ops_replica_count | default (1) }}" - with_items: "{{kibana_dc.stdout_lines}}" + with_items: "{{ kibana_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}" loop_control: loop_var: object when: openshift_logging_use_ops | bool -- command: > - {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=curator-ops -o name -n {{openshift_logging_namespace}} +- name: Retrieve curator + oc_obj: + state: list + kind: dc + selector: "component=curator-ops" + namespace: "{{openshift_logging_namespace}}" register: curator_dc - check_mode: no - changed_when: no - name: start curator-ops oc_scale: kind: dc - name: "{{object.split('/')[1]}}" + name: "{{ object }}" namespace: "{{openshift_logging_namespace}}" - kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig" replicas: 1 - with_items: "{{curator_dc.stdout_lines}}" + with_items: "{{ curator_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}" loop_control: loop_var: object when: openshift_logging_use_ops | bool diff --git a/roles/openshift_logging/tasks/stop_cluster.yaml b/roles/openshift_logging/tasks/stop_cluster.yaml index 7826efabe..bae6aebbb 100644 --- a/roles/openshift_logging/tasks/stop_cluster.yaml +++ b/roles/openshift_logging/tasks/stop_cluster.yaml @@ -1,118 +1,133 @@ --- -- command: > - {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get node -o name - register: fluentd_hosts +- name: Retrieve list of fluentd hosts + oc_obj: + state: list + kind: node when: "'--all' in openshift_logging_fluentd_hosts" - changed_when: no + register: fluentd_hosts -- set_fact: openshift_logging_fluentd_hosts={{ fluentd_hosts.stdout_lines | regex_replace('node/', '') }} +- name: Set fact openshift_logging_fluentd_hosts + set_fact: + openshift_logging_fluentd_hosts: "{{ fluentd_hosts.results.results[0]['items'] | map(attribute='metadata.name') | list }}" when: "'--all' in openshift_logging_fluentd_hosts" - name: stop fluentd - include: label_node.yaml - vars: - host: "{{fluentd_host}}" - label: "{{openshift_logging_fluentd_nodeselector.keys()[0]}}" - unlabel: True + oc_label: + name: "{{ fluentd_host }}" + kind: node + state: absent + label: "{{ openshift_logging_fluentd_nodeselector | oo_dict_to_list_of_dict }}" with_items: "{{ openshift_logging_fluentd_hosts }}" loop_control: loop_var: fluentd_host -- command: > - {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es -o name -n {{openshift_logging_namespace}} +- name: Retrieve elasticsearch + oc_obj: + state: list + kind: dc + selector: "component=es" + namespace: "{{openshift_logging_namespace}}" register: es_dc - changed_when: no - name: stop elasticsearch oc_scale: kind: dc - name: "{{object.split('/')[1]}}" + name: "{{ object }}" namespace: "{{openshift_logging_namespace}}" - kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig" replicas: 0 - with_items: "{{es_dc.stdout_lines}}" + with_items: "{{ es_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}" loop_control: loop_var: object -- command: > - {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=kibana -o name -n {{openshift_logging_namespace}} +- name: Retrieve kibana + oc_obj: + state: list + kind: dc + selector: "component=kibana" + namespace: "{{openshift_logging_namespace}}" register: kibana_dc - changed_when: no - name: stop kibana oc_scale: kind: dc - name: "{{object.split('/')[1]}}" + name: "{{ object }}" namespace: "{{openshift_logging_namespace}}" - kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig" replicas: 0 - with_items: "{{kibana_dc.stdout_lines}}" + with_items: "{{ kibana_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}" loop_control: loop_var: object -- command: > - {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=curator -o name -n {{openshift_logging_namespace}} +- name: Retrieve curator + oc_obj: + state: list + kind: dc + selector: "component=curator" + namespace: "{{openshift_logging_namespace}}" register: curator_dc - changed_when: no - name: stop curator oc_scale: kind: dc - name: "{{object.split('/')[1]}}" + name: "{{ object }}" namespace: "{{openshift_logging_namespace}}" - kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig" replicas: 0 - with_items: "{{curator_dc.stdout_lines}}" + with_items: "{{ curator_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}" loop_control: loop_var: object -- command: > - {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es-ops -o name -n {{openshift_logging_namespace}} +- name: Retrieve elasticsearch-ops + oc_obj: + state: list + kind: dc + selector: "component=es-ops" + namespace: "{{openshift_logging_namespace}}" register: es_dc - changed_when: no - name: stop elasticsearch-ops oc_scale: kind: dc - name: "{{object.split('/')[1]}}" + name: "{{ object }}" namespace: "{{openshift_logging_namespace}}" - kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig" replicas: 0 - with_items: "{{es_dc.stdout_lines}}" + with_items: "{{ es_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}" loop_control: loop_var: object when: openshift_logging_use_ops | bool -- command: > - {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=kibana-ops -o name -n {{openshift_logging_namespace}} +- name: Retrieve kibana-ops + oc_obj: + state: list + kind: dc + selector: "component=kibana-ops" + namespace: "{{openshift_logging_namespace}}" register: kibana_dc - changed_when: no - name: stop kibana-ops oc_scale: kind: dc - name: "{{object.split('/')[1]}}" + name: "{{ object }}" namespace: "{{openshift_logging_namespace}}" - kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig" replicas: 0 - with_items: "{{kibana_dc.stdout_lines}}" + with_items: "{{ kibana_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}" loop_control: loop_var: object when: openshift_logging_use_ops | bool -- command: > - {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=curator-ops -o name -n {{openshift_logging_namespace}} +- name: Retrieve curator + oc_obj: + state: list + kind: dc + selector: "component=curator-ops" + namespace: "{{openshift_logging_namespace}}" register: curator_dc - changed_when: no - name: stop curator-ops oc_scale: kind: dc - name: "{{object.split('/')[1]}}" + name: "{{ object }}" namespace: "{{openshift_logging_namespace}}" - kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig" replicas: 0 - with_items: "{{curator_dc.stdout_lines}}" + with_items: "{{ curator_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}" loop_control: loop_var: object when: openshift_logging_use_ops | bool diff --git a/roles/openshift_logging/tasks/upgrade_logging.yaml b/roles/openshift_logging/tasks/upgrade_logging.yaml index 0dc31932c..0421cdf58 100644 --- a/roles/openshift_logging/tasks/upgrade_logging.yaml +++ b/roles/openshift_logging/tasks/upgrade_logging.yaml @@ -8,29 +8,34 @@ start_cluster: False # start ES so that we can run migrate script -- command: > - {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es -o name -n {{openshift_logging_namespace}} +- name: Retrieve elasticsearch + oc_obj: + state: list + kind: dc + selector: "component=es" + namespace: "{{openshift_logging_namespace}}" register: es_dc - check_mode: no - name: start elasticsearch oc_scale: kind: dc - name: "{{object.split('/')[1]}}" + name: "{{ object }}" namespace: "{{openshift_logging_namespace}}" replicas: 1 - with_items: "{{es_dc.stdout_lines}}" + with_items: "{{ es_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}" loop_control: loop_var: object -- command: > - {{ openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get pods -n {{openshift_logging_namespace}} -l component=es -o jsonpath='{.items[?(@.status.phase == "Running")].metadata.name}' +- name: Wait for pods to stop + oc_obj: + state: list + kind: dc + selector: "component=es" + namespace: "{{openshift_logging_namespace}}" register: running_pod - until: running_pod.stdout != '' + until: running_pod.results.results.items[?(@.status.phase == "Running")].metadata.name != '' retries: 30 delay: 10 - changed_when: no - check_mode: no - name: Run upgrade script script: es_migration.sh {{openshift.common.config_base}}/logging/ca.crt {{openshift.common.config_base}}/logging/system.admin.key {{openshift.common.config_base}}/logging/system.admin.crt {{openshift_logging_es_host}} {{openshift_logging_es_port}} {{openshift_logging_namespace}} diff --git a/roles/openshift_manage_node/tasks/main.yml b/roles/openshift_manage_node/tasks/main.yml index 73f55df12..9a883feed 100644 --- a/roles/openshift_manage_node/tasks/main.yml +++ b/roles/openshift_manage_node/tasks/main.yml @@ -31,7 +31,6 @@ name: "{{ openshift.node.nodename }}" kind: node state: list - kubeconfig: "{{ openshift.common.config_base }}/master/admin.kubeconfig" register: get_node until: "'metadata' in get_node.results.results[0]" retries: 50 @@ -43,7 +42,6 @@ oadm_manage_node: node: "{{ openshift.node.nodename | lower }}" schedulable: "{{ 'true' if openshift.node.schedulable | bool else 'false' }}" - kubeconfig: "{{ openshift.common.config_base }}/master/admin.kubeconfig" retries: 10 delay: 5 register: node_schedulable @@ -57,7 +55,6 @@ kind: node state: add labels: "{{ openshift.node.labels | oo_dict_to_list_of_dict }}" - kubeconfig: "{{ openshift.common.config_base }}/master/admin.kubeconfig" namespace: default when: - "'nodename' in openshift.node" diff --git a/roles/openshift_metrics/defaults/main.yaml b/roles/openshift_metrics/defaults/main.yaml index 83843f126..edaa7d0df 100644 --- a/roles/openshift_metrics/defaults/main.yaml +++ b/roles/openshift_metrics/defaults/main.yaml @@ -32,7 +32,7 @@ openshift_metrics_heapster_requests_memory: 0.9375G openshift_metrics_heapster_requests_cpu: null openshift_metrics_heapster_nodeselector: "" -openshift_metrics_hostname: "hawkular-metrics.{{openshift_master_default_subdomain}}" +openshift_metrics_hawkular_hostname: "hawkular-metrics.{{openshift_master_default_subdomain}}" openshift_metrics_duration: 7 openshift_metrics_resolution: 30s |