diff options
Diffstat (limited to 'playbooks')
-rw-r--r-- | playbooks/adhoc/s3_registry/s3_registry.j2 | 4 | ||||
-rw-r--r-- | playbooks/adhoc/s3_registry/s3_registry.yml | 6 | ||||
-rw-r--r-- | playbooks/aws/openshift-cluster/add_nodes.yml (renamed from playbooks/aws/openshift-cluster/addNodes.yml) | 0 | ||||
-rw-r--r-- | playbooks/common/openshift-cluster/upgrades/files/versions.sh | 4 | ||||
-rw-r--r-- | playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml | 18 | ||||
-rw-r--r-- | playbooks/common/openshift-etcd/config.yml | 1 | ||||
-rw-r--r-- | playbooks/common/openshift-master/config.yml | 4 | ||||
-rw-r--r-- | playbooks/common/openshift-master/restart.yml | 14 | ||||
-rw-r--r-- | playbooks/common/openshift-node/config.yml | 3 |
9 files changed, 38 insertions, 16 deletions
diff --git a/playbooks/adhoc/s3_registry/s3_registry.j2 b/playbooks/adhoc/s3_registry/s3_registry.j2 index d997a73b1..10454ad11 100644 --- a/playbooks/adhoc/s3_registry/s3_registry.j2 +++ b/playbooks/adhoc/s3_registry/s3_registry.j2 @@ -9,8 +9,8 @@ storage: s3: accesskey: {{ aws_access_key }} secretkey: {{ aws_secret_key }} - region: us-east-1 - bucket: {{ clusterid }}-docker + region: {{ aws_bucket_region }} + bucket: {{ aws_bucket_name }} encrypt: true secure: true v4auth: true diff --git a/playbooks/adhoc/s3_registry/s3_registry.yml b/playbooks/adhoc/s3_registry/s3_registry.yml index d409b4086..0814efae2 100644 --- a/playbooks/adhoc/s3_registry/s3_registry.yml +++ b/playbooks/adhoc/s3_registry/s3_registry.yml @@ -1,7 +1,7 @@ --- # This playbook creates an S3 bucket named after your cluster and configures the docker-registry service to use the bucket as its backend storage. # Usage: -# ansible-playbook s3_registry.yml -e clusterid="mycluster" +# ansible-playbook s3_registry.yml -e clusterid="mycluster" -e aws_bucket="clusterid-docker" -e aws_region="us-east-1" # # The AWS access/secret keys should be the keys of a separate user (not your main user), containing only the necessary S3 access role. # The 'clusterid' is the short name of your cluster. @@ -13,6 +13,8 @@ vars: aws_access_key: "{{ lookup('env', 'S3_ACCESS_KEY_ID') }}" aws_secret_key: "{{ lookup('env', 'S3_SECRET_ACCESS_KEY') }}" + aws_bucket_name: "{{ aws_bucket | default(clusterid ~ '-docker') }}" + aws_bucket_region: "{{ aws_region | lookup('env', 'S3_REGION') | default('us-east-1') }}" tasks: @@ -29,7 +31,7 @@ - name: Create S3 bucket local_action: - module: s3 bucket="{{ clusterid }}-docker" mode=create + module: s3 bucket="{{ aws_bucket_name }}" mode=create - name: Set up registry environment variable command: oc env dc/docker-registry REGISTRY_CONFIGURATION_PATH=/etc/registryconfig/config.yml diff --git a/playbooks/aws/openshift-cluster/addNodes.yml b/playbooks/aws/openshift-cluster/add_nodes.yml index 3d88e6b23..3d88e6b23 100644 --- a/playbooks/aws/openshift-cluster/addNodes.yml +++ b/playbooks/aws/openshift-cluster/add_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/files/versions.sh b/playbooks/common/openshift-cluster/upgrades/files/versions.sh index b46407ed7..3a1a8ebb1 100644 --- a/playbooks/common/openshift-cluster/upgrades/files/versions.sh +++ b/playbooks/common/openshift-cluster/upgrades/files/versions.sh @@ -1,8 +1,8 @@ #!/bin/bash -yum_installed=$(yum list installed -e 0 -q "$@" 2>&1 | tail -n +2 | awk '{ print $2 }' | tr '\n' ' ') +yum_installed=$(yum list installed -e 0 -q "$@" 2>&1 | tail -n +2 | awk '{ print $2 }' | sort -r | tr '\n' ' ') -yum_available=$(yum list available -e 0 -q "$@" 2>&1 | tail -n +2 | grep -v 'el7ose' | awk '{ print $2 }' | tr '\n' ' ') +yum_available=$(yum list available -e 0 -q "$@" 2>&1 | tail -n +2 | grep -v 'el7ose' | awk '{ print $2 }' | sort -r | tr '\n' ' ') echo "---" echo "curr_version: ${yum_installed}" diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml index 2a4eecad9..8ec379109 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml @@ -422,6 +422,24 @@ - name: Ensure node service enabled service: name="{{ openshift.common.service_type }}-node" state=started enabled=yes + - name: Install Ceph storage plugin dependencies + action: "{{ ansible_pkg_mgr }} name=ceph-common state=present" + + - name: Install GlusterFS storage plugin dependencies + action: "{{ ansible_pkg_mgr }} name=glusterfs-fuse state=present" + + - name: Set sebooleans to allow gluster storage plugin access from containers + seboolean: + name: "{{ item }}" + state: yes + persistent: yes + when: ansible_selinux and ansible_selinux.status == "enabled" + with_items: + - virt_use_fusefs + - virt_sandbox_use_fusefs + register: sebool_result + failed_when: "'state' not in sebool_result and 'msg' in sebool_result and 'SELinux boolean {{ item }} does not exist' not in sebool_result.msg" + - set_fact: node_update_complete: True diff --git a/playbooks/common/openshift-etcd/config.yml b/playbooks/common/openshift-etcd/config.yml index 9a5ae0e6b..d23a54511 100644 --- a/playbooks/common/openshift-etcd/config.yml +++ b/playbooks/common/openshift-etcd/config.yml @@ -89,7 +89,6 @@ roles: - etcd - role: nickhammond.logrotate - when: not openshift.common.is_containerized | bool - name: Delete temporary directory on localhost hosts: localhost diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index 9f8443599..a0d21451f 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -86,6 +86,7 @@ etcd_generated_certs_dir: /etc/etcd/generated_certs etcd_needing_client_certs: "{{ hostvars | oo_select_keys(groups['oo_masters_to_config']) + | default([]) | oo_filter_list(filter_attr='etcd_client_certs_missing') }}" sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}" roles: @@ -337,9 +338,10 @@ roles: - openshift_master - role: nickhammond.logrotate - when: not openshift.common.is_containerized | bool - role: fluentd_master when: openshift.common.use_fluentd | bool + - role: nuage_master + when: openshift.common.use_nuage | bool post_tasks: - name: Create group for deployment type group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }} diff --git a/playbooks/common/openshift-master/restart.yml b/playbooks/common/openshift-master/restart.yml index 052892863..02449e40d 100644 --- a/playbooks/common/openshift-master/restart.yml +++ b/playbooks/common/openshift-master/restart.yml @@ -77,13 +77,6 @@ when: openshift.master.cluster_method | default(None) == 'pacemaker' failed_when: false changed_when: false - # Any master which did not report 'active' or 'inactive' is likely - # unhealthy. Other possible states are 'unknown' or 'failed'. - - fail: - msg: > - Got invalid service state from {{ openshift.common.service_type }}-master - on {{ inventory_hostname }}. Please verify pacemaker cluster. - when: openshift.master.cluster_method | default(None) == 'pacemaker' and active_check_output.stdout not in ['active', 'inactive'] - set_fact: is_active: "{{ active_check_output.stdout == 'active' }}" when: openshift.master.cluster_method | default(None) == 'pacemaker' @@ -92,6 +85,13 @@ hosts: localhost become: no tasks: + - fail: + msg: > + Did not receive active status from any masters. Please verify pacemaker cluster. + when: "{{ hostvars[groups.oo_first_master.0].openshift.master.cluster_method | default(None) == 'pacemaker' and 'True' not in (hostvars + | oo_select_keys(groups['oo_masters_to_config']) + | oo_collect('is_active') + | list) }}" - name: Evaluate oo_active_masters add_host: name: "{{ item }}" diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml index 1d31657ed..dc3c25107 100644 --- a/playbooks/common/openshift-node/config.yml +++ b/playbooks/common/openshift-node/config.yml @@ -211,8 +211,9 @@ roles: - role: flannel when: openshift.common.use_flannel | bool + - role: nuage_node + when: openshift.common.use_nuage | bool - role: nickhammond.logrotate - when: not openshift.common.is_containerized | bool - role: fluentd_node when: openshift.common.use_fluentd | bool tasks: |