diff options
39 files changed, 457 insertions, 342 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible index fd9a1844f..d29838038 100644 --- a/.tito/packages/openshift-ansible +++ b/.tito/packages/openshift-ansible @@ -1 +1 @@ -3.5.1-1 ./ +3.5.2-1 ./ diff --git a/HOOKS.md b/HOOKS.md new file mode 100644 index 000000000..9c5f80054 --- /dev/null +++ b/HOOKS.md @@ -0,0 +1,70 @@ +# Hooks + +The ansible installer allows for operators to execute custom tasks during +specific operations through a system called hooks. Hooks allow operators to +provide files defining tasks to execute before and/or after specific areas +during installations and upgrades. This can be very helpful to validate +or modify custom infrastructure when installing/upgrading OpenShift. + +It is important to remember that when a hook fails the operation fails. This +means a good hook can run multiple times and provide the same results. A great +hook is idempotent. + +**Note**: There is currently not a standard interface for hooks. In the future +a standard interface will be defined and any hooks that existed previously will +need to be updated to meet the new standard. + +## Using Hooks + +Hooks are defined in the ``hosts`` inventory file under the ``OSEv3:vars`` +section. + +Each hook should point to a yaml file which defines Ansible tasks. This file +will be used as an include meaning that the file can not be a playbook but +a set of tasks. Best practice suggests using absolute paths to the hook file to avoid any ambiguity. + +### Example +```ini +[OSEv3:vars] +# <snip> +openshift_master_upgrade_pre_hook=/usr/share/custom/pre_master.yml +openshift_master_upgrade_hook=/usr/share/custom/master.yml +openshift_master_upgrade_post_hook=/usr/share/custom/post_master.yml +# <snip> +``` + +Hook files must be a yaml formatted file that defines a set of Ansible tasks. +The file may **not** be a playbook. + +### Example +```yaml +--- +# Trivial example forcing an operator to ack the start of an upgrade +# file=/usr/share/custom/pre_master.yml + +- name: note the start of a master upgrade + debug: + msg: "Master upgrade of {{ inventory_hostname }} is about to start" + +- name: require an operator agree to start an upgrade + pause: + prompt: "Hit enter to start the master upgrade" +``` + +## Upgrade Hooks + +### openshift_master_upgrade_pre_hook +- Runs **before** each master is upgraded. +- This hook runs against **each master** in serial. +- If a task needs to run against a different host, said task will need to use [``delegate_to`` or ``local_action``](http://docs.ansible.com/ansible/playbooks_delegation.html#delegation). + +### openshift_master_upgrade_hook +- Runs **after** each master is upgraded but **before** it's service/system restart. +- This hook runs against **each master** in serial. +- If a task needs to run against a different host, said task will need to use [``delegate_to`` or ``local_action``](http://docs.ansible.com/ansible/playbooks_delegation.html#delegation). + + +### openshift_master_upgrade_post_hook +- Runs **after** each master is upgraded and has had it's service/system restart. +- This hook runs against **each master** in serial. +- If a task needs to run against a different host, said task will need to use [``delegate_to`` or ``local_action``](http://docs.ansible.com/ansible/playbooks_delegation.html#delegation). @@ -74,6 +74,12 @@ you are not running a stable release. - [OpenShift Enterprise](https://docs.openshift.com/enterprise/latest/install_config/install/advanced_install.html) - [OpenShift Origin](https://docs.openshift.org/latest/install_config/install/advanced_install.html) + +## Installer Hooks + +See the [hooks documentation](HOOKS.md). + + ## Contributing See the [contribution guide](CONTRIBUTING.md). diff --git a/openshift-ansible.spec b/openshift-ansible.spec index 0b7c44660..85675f5f9 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -5,7 +5,7 @@ } Name: openshift-ansible -Version: 3.5.1 +Version: 3.5.2 Release: 1%{?dist} Summary: Openshift and Atomic Enterprise Ansible License: ASL 2.0 @@ -253,6 +253,67 @@ Atomic OpenShift Utilities includes %changelog +* Wed Jan 25 2017 Scott Dodson <sdodson@redhat.com> 3.5.2-1 +- Sync latest image streams (sdodson@redhat.com) +- Fix containerized haproxy config (andrew@andrewklau.com) +- Allow RHEL subscription for OSE 3.4 (lhuard@amadeus.com) +- fixes BZ-1415447. Error when stopping heapster. Modify to be conditional + include (jcantril@redhat.com) +- override nodename for gce with cloudprovider (jdetiber@redhat.com) +- fixes jks generation, node labeling, and rerunning for oauth secrets + (ewolinet@redhat.com) +- allow openshift_logging role to specify nodeSelectors (jcantril@redhat.com) +- Remove is_containerized check for firewalld installs (rteague@redhat.com) +- Clean up pylint for delete_empty_keys. (abutcher@redhat.com) +- [os_firewall] Fix default iptables args. (abutcher@redhat.com) +- Add new option 'openshift_docker_selinux_enabled' (rteague@redhat.com) +- Temporary work-around for flake8 vs maccabe version conflict + (tbielawa@redhat.com) +- do not set empty proxy env variable defaults (bparees@redhat.com) +- fix BZ1414477. Use keytool on control node and require java + (jcantril@redhat.com) +- Remove unused temporary directory in master config playbook. + (abutcher@redhat.com) +- Added link to HOOKS in README (smilner@redhat.com) +- HOOKS.md added documenting new hooks (smilner@redhat.com) +- [os_firewall] Add -w flag to wait for iptables xtables lock. + (abutcher@redhat.com) +- fixes BZ-1414625. Check for httpd-tools and java before install + (jcantril@redhat.com) +- Add a mid upgrade hook, re-prefix variables. (dgoodwin@redhat.com) +- treat force_pull as a bool (bparees@redhat.com) +- Adding to ansible spec and changing logging jks generation to be a + local_action (ewolinet@redhat.com) +- Add containzerized haproxy option (andrew@andrewklau.com) +- Reorder node dnsmasq dependency s.t. networkmanager is restarted after + firewall changes have been applied. (abutcher@redhat.com) +- Removing docker run strategy and make java a requirement for control host + (ewolinet@redhat.com) +- Adding version to lib_openshift (kwoodson@redhat.com) +- Updating to use docker run instead of scheduling jks gen pod + (ewolinet@redhat.com) +- jenkins v1.3 templates should not enable oauth (gmontero@redhat.com) +- fix oc_apply to allow running on any control node (jcantril@redhat.com) +- g_master_mktemp in openshift-master conflicts with + openshift_master_certificates (rmeggins@redhat.com) +- fixes #3127. Get files for oc_apply from remote host (jcantril@redhat.com) +- Debug message before running hooks. (dgoodwin@redhat.com) +- Cleaning repo cache earlier (rteague@redhat.com) +- Added tar as a requirement per BZ1388445 (smilner@redhat.com) +- fixes BZ141619. Corrects the variable in the README (jcantril@redhat.com) +- Run user provided hooks prior to system/service restarts. + (dgoodwin@redhat.com) +- Implement pre/post master upgrade hooks. (dgoodwin@redhat.com) +- Adding oc_obj to the lib_openshift library (kwoodson@redhat.com) +- Addressing found issues with logging role (ewolinet@redhat.com) +- Updated the generate.py scripts for tox and virtualenv. (kwoodson@redhat.com) +- Adding tox tests for generated code. (kwoodson@redhat.com) +- Perform master upgrades in a single play serially. (dgoodwin@redhat.com) +- Validate system restart policy during pre-upgrade. (dgoodwin@redhat.com) +- Correct consistency between upgrade playbooks (rteague@redhat.com) +- Wait for nodes to be ready before proceeding with upgrade. + (dgoodwin@redhat.com) + * Wed Jan 18 2017 Scott Dodson <sdodson@redhat.com> 3.5.1-1 - More reliable wait for master after full host reboot. (dgoodwin@redhat.com) - kubelet must have rw to cgroups for pod/qos cgroups to function diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml index 0f226f5f9..a95cb68b7 100644 --- a/playbooks/common/openshift-cluster/config.yml +++ b/playbooks/common/openshift-cluster/config.yml @@ -38,6 +38,9 @@ - set_fact: openshift_docker_log_options: "{{ lookup('oo_option', 'docker_log_options') }}" when: openshift_docker_log_options is not defined + - set_fact: + openshift_docker_selinux_enabled: "{{ lookup('oo_option', 'docker_selinux_enabled') }}" + when: openshift_docker_selinux_enabled is not defined - include: ../openshift-etcd/config.yml tags: diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml index 86b344d7a..2bb460815 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml @@ -87,6 +87,19 @@ - name: Restart rpm node service service: name="{{ openshift.common.service_type }}-node" state=restarted when: inventory_hostname in groups.oo_nodes_to_upgrade and not openshift.common.is_containerized | bool + + - name: Wait for node to be ready + command: > + {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} get node {{ openshift.common.hostname | lower }} --no-headers + register: node_output + delegate_to: "{{ groups.oo_first_master.0 }}" + when: inventory_hostname in groups.oo_nodes_to_upgrade + until: "{{ node_output.stdout.split()[1].startswith('Ready')}}" + # Give the node two minutes to come back online. Note that we pre-pull images now + # so containerized services should restart quickly as well. + retries: 24 + delay: 5 + - name: Set node schedulability command: > {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=true diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml index a8935370a..66c9cfa0f 100644 --- a/roles/docker/tasks/main.yml +++ b/roles/docker/tasks/main.yml @@ -96,7 +96,7 @@ dest: /etc/sysconfig/docker regexp: '^OPTIONS=.*$' line: "OPTIONS='\ - {% if ansible_selinux and ansible_selinux.status == '''enabled''' %} --selinux-enabled{% endif %}\ + {% if ansible_selinux.status | default(None) == '''enabled''' and docker_selinux_enabled | default(true) %} --selinux-enabled {% endif %}\ {% if docker_log_driver is defined %} --log-driver {{ docker_log_driver }}{% endif %}\ {% if docker_log_options is defined %} {{ docker_log_options | oo_split() | oo_prepend_strings_in_list('--log-opt ') | join(' ')}}{% endif %}\ {% if docker_options is defined %} {{ docker_options }}{% endif %}\ diff --git a/roles/openshift_builddefaults/vars/main.yml b/roles/openshift_builddefaults/vars/main.yml index c9ec3b82f..fe6069ea9 100644 --- a/roles/openshift_builddefaults/vars/main.yml +++ b/roles/openshift_builddefaults/vars/main.yml @@ -23,7 +23,6 @@ builddefaults_yaml: imageLabels: "{{ openshift_builddefaults_image_labels | default(None) }}" nodeSelector: "{{ openshift_builddefaults_nodeselectors | default(None) }}" annotations: "{{ openshift_builddefaults_annotations | default(None) }}" - #resources: "{{ openshift.builddefaults.resources | default(None) }}" resources: requests: cpu: "{{ openshift_builddefaults_resources_requests_cpu | default(None) }}" diff --git a/roles/openshift_buildoverrides/tasks/main.yml b/roles/openshift_buildoverrides/tasks/main.yml index 82fce1c5b..87d0e6f21 100644 --- a/roles/openshift_buildoverrides/tasks/main.yml +++ b/roles/openshift_buildoverrides/tasks/main.yml @@ -1,13 +1,4 @@ --- -#- name: Set buildoverrides -# openshift_facts: -# role: buildoverrides -# local_facts: -# force_pull: "{{ openshift_buildoverrides_force_pull | default(None) }}" -# image_labels: "{{ openshift_buildoverrides_image_labels | default(None) }}" -# nodeselectors: "{{ openshift_buildoverrides_nodeselectors | default(None) }}" -# annotations: "{{ openshift_buildoverrides_annotations | default(None) }}" - - name: Set buildoverrides config structure openshift_facts: role: buildoverrides diff --git a/roles/openshift_buildoverrides/vars/main.yml b/roles/openshift_buildoverrides/vars/main.yml index f0f9c255b..cf49a6ebf 100644 --- a/roles/openshift_buildoverrides/vars/main.yml +++ b/roles/openshift_buildoverrides/vars/main.yml @@ -1,10 +1,11 @@ --- +force_pull: "{{ openshift_buildoverrides_force_pull | default('') }}" buildoverrides_yaml: BuildOverrides: configuration: apiVersion: v1 kind: BuildOverridesConfig - forcePull: "{{ openshift_buildoverrides_force_pull | default('', true) }}" + forcePull: "{{ '' if force_pull == '' else force_pull | bool }}" imageLabels: "{{ openshift_buildoverrides_image_labels | default(None) }}" nodeSelector: "{{ openshift_buildoverrides_nodeselectors | default(None) }}" annotations: "{{ openshift_buildoverrides_annotations | default(None) }}" diff --git a/roles/openshift_docker_facts/tasks/main.yml b/roles/openshift_docker_facts/tasks/main.yml index 613c237a3..049ceffe0 100644 --- a/roles/openshift_docker_facts/tasks/main.yml +++ b/roles/openshift_docker_facts/tasks/main.yml @@ -9,6 +9,7 @@ additional_registries: "{{ openshift_docker_additional_registries | default(None) }}" blocked_registries: "{{ openshift_docker_blocked_registries | default(None) }}" insecure_registries: "{{ openshift_docker_insecure_registries | default(None) }}" + selinux_enabled: "{{ openshift_docker_selinux_enabled | default(None) }}" log_driver: "{{ openshift_docker_log_driver | default(None) }}" log_options: "{{ openshift_docker_log_options | default(None) }}" options: "{{ openshift_docker_options | default(None) }}" @@ -23,6 +24,7 @@ | default(omit) }}" docker_insecure_registries: "{{ openshift.docker.insecure_registries | default(omit) }}" + docker_selinux_enabled: "{{ openshift.docker.selinux_enabled | default(omit) }}" docker_log_driver: "{{ openshift.docker.log_driver | default(omit) }}" docker_log_options: "{{ openshift.docker.log_options | default(omit) }}" docker_push_dockerhub: "{{ openshift.docker.disable_push_dockerhub diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/cakephp-mysql.json b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/cakephp-mysql.json index 9dbbf89d1..9732e59e1 100644 --- a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/cakephp-mysql.json +++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/cakephp-mysql.json @@ -22,8 +22,11 @@ "name": "${NAME}" }, "stringData" : { - "databaseUser" : "${DATABASE_USER}", - "databasePassword" : "${DATABASE_PASSWORD}" + "database-user" : "${DATABASE_USER}", + "database-password" : "${DATABASE_PASSWORD}", + "cakephp-secret-token" : "${CAKEPHP_SECRET_TOKEN}", + "cakephp-security-salt" : "${CAKEPHP_SECURITY_SALT}", + "cakephp-security-cipher-seed" : "${CAKEPHP_SECURITY_CIPHER_SEED}" } }, { @@ -97,12 +100,12 @@ "from": { "kind": "ImageStreamTag", "namespace": "${NAMESPACE}", - "name": "php:5.6" + "name": "php:7.0" }, "env": [ { - "name": "COMPOSER_MIRROR", - "value": "${COMPOSER_MIRROR}" + "name": "COMPOSER_MIRROR", + "value": "${COMPOSER_MIRROR}" } ] } @@ -201,12 +204,12 @@ } }, "livenessProbe": { - "timeoutSeconds": 3, - "initialDelaySeconds": 30, - "httpGet": { - "path": "/", - "port": 8080 - } + "timeoutSeconds": 3, + "initialDelaySeconds": 30, + "httpGet": { + "path": "/", + "port": 8080 + } }, "env": [ { @@ -226,7 +229,7 @@ "valueFrom": { "secretKeyRef" : { "name" : "${NAME}", - "key" : "databaseUser" + "key" : "database-user" } } }, @@ -235,21 +238,36 @@ "valueFrom": { "secretKeyRef" : { "name" : "${NAME}", - "key" : "databasePassword" + "key" : "database-password" } } }, { "name": "CAKEPHP_SECRET_TOKEN", - "value": "${CAKEPHP_SECRET_TOKEN}" + "valueFrom": { + "secretKeyRef" : { + "name" : "${NAME}", + "key" : "cakephp-secret-token" + } + } }, { "name": "CAKEPHP_SECURITY_SALT", - "value": "${CAKEPHP_SECURITY_SALT}" + "valueFrom": { + "secretKeyRef" : { + "name" : "${NAME}", + "key" : "cakephp-security-salt" + } + } }, { "name": "CAKEPHP_SECURITY_CIPHER_SEED", - "value": "${CAKEPHP_SECURITY_CIPHER_SEED}" + "valueFrom": { + "secretKeyRef" : { + "name" : "${NAME}", + "key" : "cakephp-security-cipher-seed" + } + } }, { "name": "OPCACHE_REVALIDATE_FREQ", @@ -257,9 +275,9 @@ } ], "resources": { - "limits": { - "memory": "${MEMORY_LIMIT}" - } + "limits": { + "memory": "${MEMORY_LIMIT}" + } } } ] @@ -313,7 +331,7 @@ "from": { "kind": "ImageStreamTag", "namespace": "${NAMESPACE}", - "name": "mysql:5.6" + "name": "mysql:5.7" } } }, @@ -362,40 +380,40 @@ } }, "livenessProbe": { - "timeoutSeconds": 1, - "initialDelaySeconds": 30, - "tcpSocket": { - "port": 3306 - } + "timeoutSeconds": 1, + "initialDelaySeconds": 30, + "tcpSocket": { + "port": 3306 + } }, "env": [ - { - "name": "MYSQL_USER", - "valueFrom": { - "secretKeyRef" : { - "name" : "${NAME}", - "key" : "databaseUser" - } + { + "name": "MYSQL_USER", + "valueFrom": { + "secretKeyRef" : { + "name" : "${NAME}", + "key" : "database-user" + } + } + }, + { + "name": "MYSQL_PASSWORD", + "valueFrom": { + "secretKeyRef" : { + "name" : "${NAME}", + "key" : "database-password" } - }, - { - "name": "MYSQL_PASSWORD", - "valueFrom": { - "secretKeyRef" : { - "name" : "${NAME}", - "key" : "databasePassword" - } - } - }, - { - "name": "MYSQL_DATABASE", - "value": "${DATABASE_NAME}" } + }, + { + "name": "MYSQL_DATABASE", + "value": "${DATABASE_NAME}" + } ], "resources": { - "limits": { - "memory": "${MEMORY_MYSQL_LIMIT}" - } + "limits": { + "memory": "${MEMORY_MYSQL_LIMIT}" + } } } ] diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dancer-mysql.json b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dancer-mysql.json index dccb8bf7f..18100974b 100644 --- a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dancer-mysql.json +++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dancer-mysql.json @@ -22,8 +22,9 @@ "name": "${NAME}" }, "stringData" : { - "databaseUser" : "${DATABASE_USER}", - "databasePassword" : "${DATABASE_PASSWORD}" + "database-user" : "${DATABASE_USER}", + "database-password" : "${DATABASE_PASSWORD}", + "keybase" : "${SECRET_KEY_BASE}" } }, { @@ -97,7 +98,7 @@ "from": { "kind": "ImageStreamTag", "namespace": "${NAMESPACE}", - "name": "perl:5.20" + "name": "perl:5.24" }, "env": [ { @@ -207,7 +208,7 @@ "valueFrom": { "secretKeyRef" : { "name" : "${NAME}", - "key" : "databaseUser" + "key" : "database-user" } } }, @@ -216,7 +217,7 @@ "valueFrom": { "secretKeyRef" : { "name" : "${NAME}", - "key" : "databasePassword" + "key" : "database-password" } } }, @@ -226,7 +227,12 @@ }, { "name": "SECRET_KEY_BASE", - "value": "${SECRET_KEY_BASE}" + "valueFrom": { + "secretKeyRef" : { + "name" : "${NAME}", + "key" : "keybase" + } + } }, { "name": "PERL_APACHE2_RELOAD", @@ -290,7 +296,7 @@ "from": { "kind": "ImageStreamTag", "namespace": "${NAMESPACE}", - "name": "mysql:5.6" + "name": "mysql:5.7" } } }, @@ -351,7 +357,7 @@ "valueFrom": { "secretKeyRef" : { "name" : "${NAME}", - "key" : "databaseUser" + "key" : "database-user" } } }, @@ -360,7 +366,7 @@ "valueFrom": { "secretKeyRef" : { "name" : "${NAME}", - "key" : "databasePassword" + "key" : "database-password" } } }, diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/django-postgresql.json b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/django-postgresql.json index 59ff8a988..64b914e61 100644 --- a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/django-postgresql.json +++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/django-postgresql.json @@ -22,8 +22,9 @@ "name": "${NAME}" }, "stringData" : { - "databaseUser" : "${DATABASE_USER}", - "databasePassword" : "${DATABASE_PASSWORD}" + "database-user" : "${DATABASE_USER}", + "database-password" : "${DATABASE_PASSWORD}", + "django-secret-key" : "${DJANGO_SECRET_KEY}" } }, { @@ -218,7 +219,7 @@ "valueFrom": { "secretKeyRef" : { "name" : "${NAME}", - "key" : "databaseUser" + "key" : "database-user" } } }, @@ -227,7 +228,7 @@ "valueFrom": { "secretKeyRef" : { "name" : "${NAME}", - "key" : "databasePassword" + "key" : "database-password" } } }, @@ -237,7 +238,12 @@ }, { "name": "DJANGO_SECRET_KEY", - "value": "${DJANGO_SECRET_KEY}" + "valueFrom": { + "secretKeyRef" : { + "name" : "${NAME}", + "key" : "django-secret-key" + } + } } ], "resources": { @@ -338,7 +344,7 @@ "valueFrom": { "secretKeyRef" : { "name" : "${NAME}", - "key" : "databaseUser" + "key" : "database-user" } } }, @@ -347,7 +353,7 @@ "valueFrom": { "secretKeyRef" : { "name" : "${NAME}", - "key" : "databasePassword" + "key" : "database-password" } } }, diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/nodejs-mongodb.json b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/nodejs-mongodb.json index 91f9ec7b3..6a55f0251 100644 --- a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/nodejs-mongodb.json +++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/nodejs-mongodb.json @@ -22,9 +22,9 @@ "name": "${NAME}" }, "stringData": { - "databaseUser": "${DATABASE_USER}", - "databasePassword": "${DATABASE_PASSWORD}", - "databaseAdminPassword" : "${DATABASE_ADMIN_PASSWORD}" + "database-user": "${DATABASE_USER}", + "database-password": "${DATABASE_PASSWORD}", + "database-admin-password" : "${DATABASE_ADMIN_PASSWORD}" } }, { @@ -201,7 +201,7 @@ "valueFrom": { "secretKeyRef" : { "name" : "${NAME}", - "key" : "databaseUser" + "key" : "database-user" } } }, @@ -210,7 +210,7 @@ "valueFrom": { "secretKeyRef" : { "name" : "${NAME}", - "key" : "databasePassword" + "key" : "database-password" } } }, @@ -223,7 +223,7 @@ "valueFrom": { "secretKeyRef" : { "name" : "${NAME}", - "key" : "databaseAdminPassword" + "key" : "database-admin-password" } } } @@ -336,7 +336,7 @@ "valueFrom": { "secretKeyRef" : { "name" : "${NAME}", - "key" : "databaseUser" + "key" : "database-user" } } }, @@ -345,7 +345,7 @@ "valueFrom": { "secretKeyRef" : { "name" : "${NAME}", - "key" : "databasePassword" + "key" : "database-password" } } }, @@ -358,7 +358,7 @@ "valueFrom": { "secretKeyRef" : { "name" : "${NAME}", - "key" : "databaseAdminPassword" + "key" : "database-admin-password" } } } diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/rails-postgresql.json b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/rails-postgresql.json index 6373562c4..043554c79 100644 --- a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/rails-postgresql.json +++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/rails-postgresql.json @@ -22,11 +22,11 @@ "name": "${NAME}" }, "stringData" : { - "databaseUser" : "${DATABASE_USER}", - "databasePassword" : "${DATABASE_PASSWORD}", - "applicationUser" : "${APPLICATION_USER}", - "applicationPassword" : "${APPLICATION_PASSWORD}", - "keyBase" : "${SECRET_KEY_BASE}" + "database-user" : "${DATABASE_USER}", + "database-password" : "${DATABASE_PASSWORD}", + "application-user" : "${APPLICATION_USER}", + "application-password" : "${APPLICATION_PASSWORD}", + "keybase" : "${SECRET_KEY_BASE}" } }, { @@ -104,8 +104,8 @@ }, "env": [ { - "name": "RUBYGEM_MIRROR", - "value": "${RUBYGEM_MIRROR}" + "name": "RUBYGEM_MIRROR", + "value": "${RUBYGEM_MIRROR}" } ] } @@ -148,7 +148,7 @@ "strategy": { "type": "Recreate", "recreateParams": { - "pre": { + "pre": { "failurePolicy": "Abort", "execNewPod": { "command": [ @@ -224,7 +224,7 @@ "valueFrom": { "secretKeyRef" : { "name" : "${NAME}", - "key" : "databaseUser" + "key" : "database-user" } } }, @@ -233,7 +233,7 @@ "valueFrom": { "secretKeyRef" : { "name" : "${NAME}", - "key" : "databasePassword" + "key" : "database-password" } } }, @@ -246,7 +246,7 @@ "valueFrom": { "secretKeyRef" : { "name" : "${NAME}", - "key" : "keyBase" + "key" : "keybase" } } }, @@ -267,7 +267,7 @@ "valueFrom": { "secretKeyRef" : { "name" : "${NAME}", - "key" : "applicationUser" + "key" : "application-user" } } }, @@ -276,7 +276,7 @@ "valueFrom": { "secretKeyRef" : { "name" : "${NAME}", - "key" : "applicationPassword" + "key" : "application-password" } } }, @@ -286,9 +286,9 @@ } ], "resources": { - "limits": { - "memory": "${MEMORY_LIMIT}" - } + "limits": { + "memory": "${MEMORY_LIMIT}" + } } } ] @@ -400,11 +400,21 @@ "env": [ { "name": "POSTGRESQL_USER", - "value": "${DATABASE_USER}" + "valueFrom": { + "secretKeyRef" : { + "name" : "${NAME}", + "key" : "database-user" + } + } }, { "name": "POSTGRESQL_PASSWORD", - "value": "${DATABASE_PASSWORD}" + "valueFrom": { + "secretKeyRef" : { + "name" : "${NAME}", + "key" : "database-password" + } + } }, { "name": "POSTGRESQL_DATABASE", @@ -420,9 +430,9 @@ } ], "resources": { - "limits": { - "memory": "${MEMORY_POSTGRESQL_LIMIT}" - } + "limits": { + "memory": "${MEMORY_POSTGRESQL_LIMIT}" + } } } ] diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 10121f82a..78886dcea 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -1032,6 +1032,8 @@ def set_nodename(facts): if 'node' in facts and 'common' in facts: if 'cloudprovider' in facts and facts['cloudprovider']['kind'] == 'openstack': facts['node']['nodename'] = facts['provider']['metadata']['hostname'].replace('.novalocal', '') + elif 'cloudprovider' in facts and facts['cloudprovider']['kind'] == 'gce': + facts['node']['nodename'] = '.'.split(facts['provider']['metadata']['hostname'])[0] else: facts['node']['nodename'] = facts['common']['hostname'].lower() return facts @@ -1458,7 +1460,7 @@ def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overw dict: the merged facts """ additive_facts = ['named_certificates'] - protected_facts = ['ha', 'master_count'] + protected_facts = ['ha'] # Facts we do not ever want to merge. These originate in inventory variables # and contain JSON dicts. We don't ever want to trigger a merge @@ -1511,14 +1513,6 @@ def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overw # it so we will determine if it is okay to change this # fact. elif key in protected_facts and key not in [x.split('.')[-1] for x in protected_facts_to_overwrite]: - # The master count (int) can only increase unless it - # has been passed as a protected fact to overwrite. - if key == 'master_count' and new[key] is not None and new[key] is not '': - if int(value) <= int(new[key]): - facts[key] = copy.deepcopy(new[key]) - else: - # pylint: disable=line-too-long - module.fail_json(msg='openshift_facts received a lower value for openshift.master.master_count') # noqa: F405 # ha (bool) can not change unless it has been passed # as a protected fact to overwrite. if key == 'ha': @@ -1690,9 +1684,38 @@ def set_builddefaults_facts(facts): if 'admission_plugin_config' not in facts['master']: facts['master']['admission_plugin_config'] = dict() facts['master']['admission_plugin_config'].update(builddefaults['config']) + # if the user didn't actually provide proxy values, delete the proxy env variable defaults. + delete_empty_keys(facts['master']['admission_plugin_config']['BuildDefaults']['configuration']['env']) + return facts +def delete_empty_keys(keylist): + """ Delete dictionary elements from keylist where "value" is empty. + + Args: + keylist(list): A list of builddefault configuration envs. + + Returns: + none + + Example: + keylist = [{'name': 'HTTP_PROXY', 'value': 'http://file.rdu.redhat.com:3128'}, + {'name': 'HTTPS_PROXY', 'value': 'http://file.rdu.redhat.com:3128'}, + {'name': 'NO_PROXY', 'value': ''}] + + After calling delete_empty_keys the provided list is modified to become: + + [{'name': 'HTTP_PROXY', 'value': 'http://file.rdu.redhat.com:3128'}, + {'name': 'HTTPS_PROXY', 'value': 'http://file.rdu.redhat.com:3128'}] + """ + count = 0 + for i in range(0, len(keylist)): + if len(keylist[i - count]['value']) == 0: + del keylist[i - count] + count += 1 + + def set_buildoverrides_facts(facts): """ Set build overrides diff --git a/roles/openshift_loadbalancer/tasks/main.yml b/roles/openshift_loadbalancer/tasks/main.yml index e9bc8b4ab..68bb4ace8 100644 --- a/roles/openshift_loadbalancer/tasks/main.yml +++ b/roles/openshift_loadbalancer/tasks/main.yml @@ -17,7 +17,7 @@ - name: Create the systemd unit files template: src: "haproxy.docker.service.j2" - dest: "{{ containerized_svc_dir }}/haproxy.service" + dest: "/etc/systemd/system/haproxy.service" when: openshift.common.is_containerized | bool notify: restart haproxy diff --git a/roles/openshift_loadbalancer/templates/haproxy.cfg.j2 b/roles/openshift_loadbalancer/templates/haproxy.cfg.j2 index 79e695001..24fd635ec 100644 --- a/roles/openshift_loadbalancer/templates/haproxy.cfg.j2 +++ b/roles/openshift_loadbalancer/templates/haproxy.cfg.j2 @@ -1,16 +1,20 @@ # Global settings #--------------------------------------------------------------------- global + maxconn {{ openshift_loadbalancer_global_maxconn | default(20000) }} + log /dev/log local0 info +{% if openshift.common.is_containerized | bool %} + stats socket /var/lib/haproxy/run/haproxy.sock mode 600 level admin +{% else %} chroot /var/lib/haproxy pidfile /var/run/haproxy.pid - maxconn {{ openshift_loadbalancer_global_maxconn | default(20000) }} user haproxy group haproxy daemon - log /dev/log local0 info # turn on stats unix socket stats socket /var/lib/haproxy/stats +{% endif %} #--------------------------------------------------------------------- # common defaults that all the 'listen' and 'backend' sections will diff --git a/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2 b/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2 index 624876ab0..5385df3b7 100644 --- a/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2 +++ b/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2 @@ -5,7 +5,7 @@ PartOf=docker.service [Service] ExecStartPre=-/usr/bin/docker rm -f openshift_loadbalancer -ExecStart=/usr/bin/docker run --rm --name openshift_loadbalancer -p {{ openshift_master_api_port | default(8443) }}:{{ openshift_master_api_port | default(8443) }} -v /etc/haproxy/haproxy.cfg:/etc/haproxy/haproxy.cfg:ro --entrypoint="haproxy -f /etc/haproxy/haproxy.cfg" {{ openshift.common.router_image }}:{{ openshift_image_tag }} +ExecStart=/usr/bin/docker run --rm --name openshift_loadbalancer -p {{ openshift_master_api_port | default(8443) }}:{{ openshift_master_api_port | default(8443) }} -v /etc/haproxy/haproxy.cfg:/etc/haproxy/haproxy.cfg:ro --entrypoint=haproxy {{ openshift.common.router_image }}:{{ openshift_image_tag }} -f /etc/haproxy/haproxy.cfg ExecStartPost=/usr/bin/sleep 10 ExecStop=/usr/bin/docker stop openshift_loadbalancer LimitNOFILE={{ openshift_loadbalancer_limit_nofile | default(100000) }} diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md index 9b71dc676..856cfa2b9 100644 --- a/roles/openshift_logging/README.md +++ b/roles/openshift_logging/README.md @@ -35,6 +35,7 @@ When both `openshift_logging_install_logging` and `openshift_logging_upgrade_log - `openshift_logging_curator_log_level`: The log level for the Curator process. Defaults to 'ERROR'. - `openshift_logging_curator_cpu_limit`: The amount of CPU to allocate to Curator. Default is '100m'. - `openshift_logging_curator_memory_limit`: The amount of memory to allocate to Curator. Unset if not specified. +- `openshift_logging_curator_nodeselector`: A map of labels (e.g. {"node":"infra","region":"west"} to select the nodes where the curator pod will land. - `openshift_logging_kibana_hostname`: The Kibana hostname. Defaults to 'kibana.example.com'. - `openshift_logging_kibana_cpu_limit`: The amount of CPU to allocate to Kibana or unset if not specified. @@ -43,6 +44,7 @@ When both `openshift_logging_install_logging` and `openshift_logging_upgrade_log - `openshift_logging_kibana_proxy_cpu_limit`: The amount of CPU to allocate to Kibana proxy or unset if not specified. - `openshift_logging_kibana_proxy_memory_limit`: The amount of memory to allocate to Kibana proxy or unset if not specified. - `openshift_logging_kibana_replica_count`: The number of replicas Kibana should be scaled up to. Defaults to 1. +- `openshift_logging_kibana_nodeselector`: A map of labels (e.g. {"node":"infra","region":"west"} to select the nodes where the pod will land. - `openshift_logging_fluentd_nodeselector`: The node selector that the Fluentd daemonset uses to determine where to deploy to. Defaults to '"logging-infra-fluentd": "true"'. - `openshift_logging_fluentd_cpu_limit`: The CPU limit for Fluentd pods. Defaults to '100m'. @@ -67,6 +69,7 @@ When both `openshift_logging_install_logging` and `openshift_logging_upgrade_log - `openshift_logging_es_pvc_prefix`: The prefix for the generated PVCs. Defaults to 'logging-es'. - `openshift_logging_es_recover_after_time`: The amount of time ES will wait before it tries to recover. Defaults to '5m'. - `openshift_logging_es_storage_group`: The storage group used for ES. Defaults to '65534'. +- `openshift_logging_es_nodeselector`: A map of labels (e.g. {"node":"infra","region":"west"} to select the nodes where the pod will land. When `openshift_logging_use_ops` is `True`, there are some additional vars. These work the same as above for their non-ops counterparts, but apply to the OPS cluster instance: diff --git a/roles/openshift_logging/tasks/generate_certs.yaml b/roles/openshift_logging/tasks/generate_certs.yaml index 20e50482e..740e490e1 100644 --- a/roles/openshift_logging/tasks/generate_certs.yaml +++ b/roles/openshift_logging/tasks/generate_certs.yaml @@ -88,56 +88,12 @@ - name: Creating necessary JKS certs include: generate_jks.yaml -# check for secret/logging-kibana-proxy -- command: > - {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get secret/logging-kibana-proxy -n {{openshift_logging_namespace}} -o jsonpath='{.data.oauth-secret}' - register: kibana_secret_oauth_check - ignore_errors: yes - changed_when: no - check_mode: no - -- command: > - {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get secret/logging-kibana-proxy -n {{openshift_logging_namespace}} -o jsonpath='{.data.session-secret}' - register: kibana_secret_session_check - ignore_errors: yes - changed_when: no - check_mode: no - -# check for oauthclient secret -- command: > - {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get oauthclient/kibana-proxy -n {{openshift_logging_namespace}} -o jsonpath='{.secret}' - register: oauth_secret_check - ignore_errors: yes - changed_when: no - check_mode: no - -# set or generate as needed +# TODO: make idempotent - name: Generate proxy session set_fact: session_secret={{'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'|random_word(200)}} check_mode: no - when: - - kibana_secret_session_check.stdout is not defined or kibana_secret_session_check.stdout == '' - -- name: Generate proxy session - set_fact: session_secret={{kibana_secret_session_check.stdout | b64decode }} - check_mode: no - when: - - kibana_secret_session_check.stdout is defined - - kibana_secret_session_check.stdout != '' +# TODO: make idempotent - name: Generate oauth client secret set_fact: oauth_secret={{'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'|random_word(64)}} check_mode: no - when: kibana_secret_oauth_check.stdout is not defined or kibana_secret_oauth_check.stdout == '' - or oauth_secret_check.stdout is not defined or oauth_secret_check.stdout == '' - or kibana_secret_oauth_check.stdout | b64decode != oauth_secret_check.stdout - -- name: Generate oauth client secret - set_fact: oauth_secret={{kibana_secret_oauth_check.stdout | b64decode}} - check_mode: no - when: - - kibana_secret_oauth_check is defined - - kibana_secret_oauth_check.stdout != '' - - oauth_secret_check.stdout is defined - - oauth_secret_check.stdout != '' - - kibana_secret_oauth_check.stdout | b64decode == oauth_secret_check.stdout diff --git a/roles/openshift_logging/tasks/generate_jks.yaml b/roles/openshift_logging/tasks/generate_jks.yaml index adb6c2b2d..c6e2ccbc0 100644 --- a/roles/openshift_logging/tasks/generate_jks.yaml +++ b/roles/openshift_logging/tasks/generate_jks.yaml @@ -27,34 +27,22 @@ check_mode: no - name: Create placeholder for previously created JKS certs to prevent recreating... - file: - path: "{{local_tmp.stdout}}/elasticsearch.jks" - state: touch - mode: "u=rw,g=r,o=r" + local_action: file path="{{local_tmp.stdout}}/elasticsearch.jks" state=touch mode="u=rw,g=r,o=r" when: elasticsearch_jks.stat.exists changed_when: False - name: Create placeholder for previously created JKS certs to prevent recreating... - file: - path: "{{local_tmp.stdout}}/logging-es.jks" - state: touch - mode: "u=rw,g=r,o=r" + local_action: file path="{{local_tmp.stdout}}/logging-es.jks" state=touch mode="u=rw,g=r,o=r" when: logging_es_jks.stat.exists changed_when: False - name: Create placeholder for previously created JKS certs to prevent recreating... - file: - path: "{{local_tmp.stdout}}/system.admin.jks" - state: touch - mode: "u=rw,g=r,o=r" + local_action: file path="{{local_tmp.stdout}}/system.admin.jks" state=touch mode="u=rw,g=r,o=r" when: system_admin_jks.stat.exists changed_when: False - name: Create placeholder for previously created JKS certs to prevent recreating... - file: - path: "{{local_tmp.stdout}}/truststore.jks" - state: touch - mode: "u=rw,g=r,o=r" + local_action: file path="{{local_tmp.stdout}}/truststore.jks" state=touch mode="u=rw,g=r,o=r" when: truststore_jks.stat.exists changed_when: False @@ -69,15 +57,16 @@ - ca.serial.txt - ca.crl.srl - ca.db + when: not elasticsearch_jks.stat.exists or not logging_es_jks.stat.exists or not system_admin_jks.stat.exists or not truststore_jks.stat.exists - local_action: template src=signing.conf.j2 dest={{local_tmp.stdout}}/signing.conf vars: - top_dir: "{{local_tmp.stdout}}" + when: not elasticsearch_jks.stat.exists or not logging_es_jks.stat.exists or not system_admin_jks.stat.exists or not truststore_jks.stat.exists - name: Run JKS generation script local_action: script generate-jks.sh {{local_tmp.stdout}} {{openshift_logging_namespace}} check_mode: no - become: yes when: not elasticsearch_jks.stat.exists or not logging_es_jks.stat.exists or not system_admin_jks.stat.exists or not truststore_jks.stat.exists - name: Pushing locally generated JKS certs to remote host... @@ -105,7 +94,5 @@ when: not truststore_jks.stat.exists - name: Cleaning up temp dir - file: - path: "{{local_tmp.stdout}}" - state: absent + local_action: file path="{{local_tmp.stdout}}" state=absent changed_when: False diff --git a/roles/openshift_logging/tasks/install_curator.yaml b/roles/openshift_logging/tasks/install_curator.yaml index 8f2825552..fcfce4e1e 100644 --- a/roles/openshift_logging/tasks/install_curator.yaml +++ b/roles/openshift_logging/tasks/install_curator.yaml @@ -31,6 +31,7 @@ curator_cpu_limit: "{{openshift_logging_curator_cpu_limit }}" curator_memory_limit: "{{openshift_logging_curator_memory_limit }}" replicas: "{{curator_replica_count.stdout | default (0)}}" + curator_node_selector: "{{openshift_logging_curator_nodeselector | default({}) }}" check_mode: no changed_when: no @@ -46,6 +47,7 @@ curator_cpu_limit: "{{openshift_logging_curator_ops_cpu_limit }}" curator_memory_limit: "{{openshift_logging_curator_ops_memory_limit }}" replicas: "{{curator_ops_replica_count.stdout | default (0)}}" + curator_node_selector: "{{openshift_logging_curator_ops_nodeselector | default({}) }}" when: openshift_logging_use_ops check_mode: no changed_when: no diff --git a/roles/openshift_logging/tasks/install_elasticsearch.yaml b/roles/openshift_logging/tasks/install_elasticsearch.yaml index fbba46a35..9b1c004f2 100644 --- a/roles/openshift_logging/tasks/install_elasticsearch.yaml +++ b/roles/openshift_logging/tasks/install_elasticsearch.yaml @@ -33,6 +33,7 @@ volume_names: "{{es_pvc_pool | default([])}}" pvc_claim: "{{(volume_names | length > item.0) | ternary(volume_names[item.0], None)}}" deploy_name: "{{item.1}}" + es_node_selector: "{{openshift_logging_es_nodeselector | default({})}}" with_indexed_items: - "{{es_dc_pool | default([])}}" check_mode: no @@ -98,6 +99,7 @@ es_recover_after_nodes: "{{es_ops_recover_after_nodes}}" es_recover_expected_nodes: "{{es_ops_recover_expected_nodes}}" openshift_logging_es_recover_after_time: "{{openshift_logging_es_ops_recover_after_time}}" + es_node_selector: "{{openshift_logging_es_ops_nodeselector | default({})}}" with_indexed_items: - "{{es_dc_pool_ops | default([])}}" when: diff --git a/roles/openshift_logging/tasks/install_kibana.yaml b/roles/openshift_logging/tasks/install_kibana.yaml index de4b018dd..f4df7de0c 100644 --- a/roles/openshift_logging/tasks/install_kibana.yaml +++ b/roles/openshift_logging/tasks/install_kibana.yaml @@ -35,6 +35,7 @@ kibana_proxy_cpu_limit: "{{openshift_logging_kibana_proxy_cpu_limit }}" kibana_proxy_memory_limit: "{{openshift_logging_kibana_proxy_memory_limit }}" replicas: "{{kibana_replica_count.stdout | default (0)}}" + kibana_node_selector: "{{openshift_logging_kibana_nodeselector | default({}) }}" check_mode: no changed_when: no @@ -53,6 +54,7 @@ kibana_proxy_cpu_limit: "{{openshift_logging_kibana_ops_proxy_cpu_limit }}" kibana_proxy_memory_limit: "{{openshift_logging_kibana_ops_proxy_memory_limit }}" replicas: "{{kibana_ops_replica_count.stdout | default (0)}}" + kibana_node_selector: "{{openshift_logging_kibana_ops_nodeselector | default({}) }}" when: openshift_logging_use_ops check_mode: no changed_when: no diff --git a/roles/openshift_logging/tasks/label_node.yaml b/roles/openshift_logging/tasks/label_node.yaml index aecb5d81b..bd5073381 100644 --- a/roles/openshift_logging/tasks/label_node.yaml +++ b/roles/openshift_logging/tasks/label_node.yaml @@ -1,11 +1,34 @@ --- - command: > {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get node {{host}} + -o jsonpath='{.metadata.labels}' + register: node_labels + when: not ansible_check_mode + changed_when: no + +- command: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig label node {{host}} {{label}}={{value}} + register: label_result + failed_when: label_result.rc == 1 and 'exists' not in label_result.stderr + when: + - value is defined + - node_labels.stdout is defined + - label not in node_labels.stdout + - unlabel is not defined or not unlabel + - not ansible_check_mode + +- command: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get node {{host}} -o jsonpath='{.metadata.labels.{{ label }}}' register: label_value - failed_when: label_value.rc == 1 and 'exists' not in label_value.stderr - when: not ansible_check_mode + ignore_errors: yes changed_when: no + when: + - value is defined + - node_labels.stdout is defined + - label in node_labels.stdout + - unlabel is not defined or not unlabel + - not ansible_check_mode - command: > {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig label node {{host}} {{label}}={{value}} --overwrite diff --git a/roles/openshift_logging/templates/curator.j2 b/roles/openshift_logging/templates/curator.j2 index d3b5d33a2..de6258eaa 100644 --- a/roles/openshift_logging/templates/curator.j2 +++ b/roles/openshift_logging/templates/curator.j2 @@ -28,6 +28,12 @@ spec: spec: terminationGracePeriod: 600 serviceAccountName: aggregated-logging-curator +{% if curator_node_selector is iterable and curator_node_selector | length > 0 %} + nodeSelector: +{% for key, value in curator_node_selector.iteritems() %} + {{key}}: {{value}} +{% endfor %} +{% endif %} containers: - name: "curator" diff --git a/roles/openshift_logging/templates/es.j2 b/roles/openshift_logging/templates/es.j2 index 291589690..ec84c6b76 100644 --- a/roles/openshift_logging/templates/es.j2 +++ b/roles/openshift_logging/templates/es.j2 @@ -30,6 +30,12 @@ spec: securityContext: supplementalGroups: - {{openshift_logging_es_storage_group}} +{% if es_node_selector is iterable and es_node_selector | length > 0 %} + nodeSelector: +{% for key, value in es_node_selector.iteritems() %} + {{key}}: {{value}} +{% endfor %} +{% endif %} containers: - name: "elasticsearch" diff --git a/roles/openshift_logging/templates/kibana.j2 b/roles/openshift_logging/templates/kibana.j2 index 1ec97701a..b42f62850 100644 --- a/roles/openshift_logging/templates/kibana.j2 +++ b/roles/openshift_logging/templates/kibana.j2 @@ -27,6 +27,12 @@ spec: component: "{{component}}" spec: serviceAccountName: aggregated-logging-kibana +{% if kibana_node_selector is iterable and kibana_node_selector | length > 0 %} + nodeSelector: +{% for key, value in kibana_node_selector.iteritems() %} + {{key}}: {{value}} +{% endfor %} +{% endif %} containers: - name: "kibana" diff --git a/roles/openshift_metrics/files/import_jks_certs.sh b/roles/openshift_metrics/files/import_jks_certs.sh index bb046df87..f4315ef34 100755 --- a/roles/openshift_metrics/files/import_jks_certs.sh +++ b/roles/openshift_metrics/files/import_jks_certs.sh @@ -114,5 +114,3 @@ function import_certs() { } import_certs - -exit 0 diff --git a/roles/openshift_metrics/tasks/import_jks_certs.yaml b/roles/openshift_metrics/tasks/import_jks_certs.yaml index f6bf6c1a6..f5192b005 100644 --- a/roles/openshift_metrics/tasks/import_jks_certs.yaml +++ b/roles/openshift_metrics/tasks/import_jks_certs.yaml @@ -1,76 +1,4 @@ --- -- name: Check for jks-generator service account - command: > - {{ openshift.common.client_binary }} - --config={{ mktemp.stdout }}/admin.kubeconfig - -n {{openshift_metrics_project}} - get serviceaccount/jks-generator --no-headers - register: serviceaccount_result - ignore_errors: yes - when: not ansible_check_mode - changed_when: no - -- name: Create jks-generator service account - command: > - {{ openshift.common.client_binary }} - --config={{ mktemp.stdout }}/admin.kubeconfig - -n {{openshift_metrics_project}} - create serviceaccount jks-generator - when: not ansible_check_mode and "not found" in serviceaccount_result.stderr - -- name: Check for hostmount-anyuid scc entry - command: > - {{ openshift.common.client_binary }} - --config={{ mktemp.stdout }}/admin.kubeconfig - get scc hostmount-anyuid - -o jsonpath='{.users}' - register: scc_result - when: not ansible_check_mode - changed_when: no - -- name: Add to hostmount-anyuid scc - command: > - {{ openshift.common.admin_binary }} - --config={{ mktemp.stdout }}/admin.kubeconfig - -n {{openshift_metrics_project}} - policy add-scc-to-user hostmount-anyuid - -z jks-generator - when: - - not ansible_check_mode - - scc_result.stdout.find("system:serviceaccount:{{openshift_metrics_project}}:jks-generator") == -1 - -- name: Copy JKS generation script - copy: - src: import_jks_certs.sh - dest: "{{openshift_metrics_certs_dir}}/import_jks_certs.sh" - check_mode: no - -- slurp: src={{ openshift_metrics_certs_dir }}/hawkular-metrics-keystore.pwd - register: metrics_keystore_password - -- slurp: src={{ openshift_metrics_certs_dir }}/hawkular-cassandra-keystore.pwd - register: cassandra_keystore_password - -- slurp: src={{ openshift_metrics_certs_dir }}/hawkular-jgroups-keystore.pwd - register: jgroups_keystore_password - -- name: Generate JKS pod template - template: - src: jks_pod.j2 - dest: "{{mktemp.stdout}}/jks_pod.yaml" - vars: - metrics_keystore_passwd: "{{metrics_keystore_password.content}}" - cassandra_keystore_passwd: "{{cassandra_keystore_password.content}}" - metrics_truststore_passwd: "{{hawkular_truststore_password.content}}" - cassandra_truststore_passwd: "{{cassandra_truststore_password.content}}" - jgroups_passwd: "{{jgroups_keystore_password.content}}" - check_mode: no - changed_when: no - -- stat: path="{{openshift_metrics_certs_dir}}/hawkular-metrics.keystore" - register: metrics_keystore - check_mode: no - - stat: path="{{openshift_metrics_certs_dir}}/hawkular-cassandra.keystore" register: cassandra_keystore check_mode: no @@ -79,6 +7,10 @@ register: cassandra_truststore check_mode: no +- stat: path="{{openshift_metrics_certs_dir}}/hawkular-metrics.keystore" + register: metrics_keystore + check_mode: no + - stat: path="{{openshift_metrics_certs_dir}}/hawkular-metrics.truststore" register: metrics_truststore check_mode: no @@ -87,32 +19,52 @@ register: jgroups_keystore check_mode: no -- name: create JKS pod - command: > - {{ openshift.common.client_binary }} - --config={{ mktemp.stdout }}/admin.kubeconfig - -n {{openshift_metrics_project}} - create -f {{mktemp.stdout}}/jks_pod.yaml - -o name - register: podoutput - check_mode: no - when: not metrics_keystore.stat.exists or - not metrics_truststore.stat.exists or - not cassandra_keystore.stat.exists or - not cassandra_truststore.stat.exists or - not jgroups_keystore.stat.exists +- block: + - slurp: src={{ openshift_metrics_certs_dir }}/hawkular-metrics-keystore.pwd + register: metrics_keystore_password + + - slurp: src={{ openshift_metrics_certs_dir }}/hawkular-cassandra-keystore.pwd + register: cassandra_keystore_password + + - slurp: src={{ openshift_metrics_certs_dir }}/hawkular-jgroups-keystore.pwd + register: jgroups_keystore_password + + - local_action: command mktemp -d + register: local_tmp + changed_when: False + + - fetch: + dest: "{{local_tmp.stdout}}/" + src: "{{ openshift_metrics_certs_dir }}/{{item}}" + flat: yes + changed_when: False + with_items: + - hawkular-metrics.pkcs12 + - hawkular-cassandra.pkcs12 + - hawkular-metrics.crt + - hawkular-cassandra.crt + - ca.crt + + - local_action: command {{role_path}}/files/import_jks_certs.sh + environment: + CERT_DIR: "{{local_tmp.stdout}}" + METRICS_KEYSTORE_PASSWD: "{{metrics_keystore_password.content}}" + CASSANDRA_KEYSTORE_PASSWD: "{{cassandra_keystore_password.content}}" + METRICS_TRUSTSTORE_PASSWD: "{{hawkular_truststore_password.content}}" + CASSANDRA_TRUSTSTORE_PASSWD: "{{cassandra_truststore_password.content}}" + JGROUPS_PASSWD: "{{jgroups_keystore_password.content}}" + changed_when: False + + - copy: + dest: "{{openshift_metrics_certs_dir}}/" + src: "{{item}}" + with_fileglob: "{{local_tmp.stdout}}/*.*store" + + - file: + path: "{{local_tmp.stdout}}" + state: absent + changed_when: False -- command: > - {{ openshift.common.client_binary }} - --config={{ mktemp.stdout }}/admin.kubeconfig - -n {{openshift_metrics_project}} - get {{podoutput.stdout}} - -o jsonpath='{.status.phase}' - register: result - until: result.stdout.find("Succeeded") != -1 - retries: 5 - delay: 10 - changed_when: no when: not metrics_keystore.stat.exists or not metrics_truststore.stat.exists or not cassandra_keystore.stat.exists or diff --git a/roles/openshift_metrics/tasks/main.yaml b/roles/openshift_metrics/tasks/main.yaml index c42440130..1808db5d5 100644 --- a/roles/openshift_metrics/tasks/main.yaml +++ b/roles/openshift_metrics/tasks/main.yaml @@ -7,6 +7,7 @@ - name: Create temp directory for all our templates file: path={{mktemp.stdout}}/templates state=directory mode=0755 changed_when: False + when: "{{ openshift_metrics_install_metrics | bool }}" - name: Copy the admin client config(s) command: > @@ -15,8 +16,4 @@ check_mode: no tags: metrics_init -- include: install_metrics.yaml - when: openshift_metrics_install_metrics | default(false) | bool - -- include: uninstall_metrics.yaml - when: not openshift_metrics_install_metrics | default(false) | bool +- include: "{{ (openshift_metrics_install_metrics | bool) | ternary('install_metrics.yaml','uninstall_metrics.yaml') }}" diff --git a/roles/openshift_metrics/templates/hawkular_metrics_rc.j2 b/roles/openshift_metrics/templates/hawkular_metrics_rc.j2 index e6954ea44..f78621674 100644 --- a/roles/openshift_metrics/templates/hawkular_metrics_rc.j2 +++ b/roles/openshift_metrics/templates/hawkular_metrics_rc.j2 @@ -37,6 +37,7 @@ spec: - "-Dhawkular.metrics.openshift.htpasswd-file=/secrets/hawkular-metrics.htpasswd.file" - "-Dhawkular.metrics.allowed-cors-access-control-allow-headers=authorization" - "-Dhawkular.metrics.default-ttl={{openshift_metrics_duration}}" + - "-Dhawkular.metrics.admin-tenant=_hawkular_admin" - "-Dhawkular-alerts.cassandra-nodes=hawkular-cassandra" - "-Dhawkular-alerts.cassandra-use-ssl" - "-Dhawkular.alerts.openshift.auth-methods=openshift-oauth,htpasswd" @@ -44,6 +45,7 @@ spec: - "-Dhawkular.alerts.allowed-cors-access-control-allow-headers=authorization" - "-Dorg.apache.tomcat.util.buf.UDecoder.ALLOW_ENCODED_SLASH=true" - "-Dorg.apache.catalina.connector.CoyoteAdapter.ALLOW_BACKSLASH=true" + - "-Dcom.datastax.driver.FORCE_NIO=true" - "-DKUBERNETES_MASTER_URL={{openshift_metrics_master_url}}" - "-DUSER_WRITE_ACCESS={{openshift_metrics_hawkular_user_write_access}}" - "--hmw.keystore=/secrets/hawkular-metrics.keystore" diff --git a/roles/openshift_metrics/templates/jks_pod.j2 b/roles/openshift_metrics/templates/jks_pod.j2 deleted file mode 100644 index e86fe38a4..000000000 --- a/roles/openshift_metrics/templates/jks_pod.j2 +++ /dev/null @@ -1,38 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - labels: - metrics-infra: support - generateName: jks-cert-gen- -spec: - containers: - - name: jks-cert-gen - image: {{openshift_metrics_image_prefix}}metrics-deployer:{{openshift_metrics_image_version}} - imagePullPolicy: Always - command: ["sh", "{{openshift_metrics_certs_dir}}/import_jks_certs.sh"] - securityContext: - runAsUser: 0 - volumeMounts: - - mountPath: {{openshift_metrics_certs_dir}} - name: certmount - env: - - name: CERT_DIR - value: {{openshift_metrics_certs_dir}} - - name: METRICS_KEYSTORE_PASSWD - value: {{metrics_keystore_passwd}} - - name: CASSANDRA_KEYSTORE_PASSWD - value: {{cassandra_keystore_passwd}} - - name: METRICS_TRUSTSTORE_PASSWD - value: {{metrics_truststore_passwd}} - - name: CASSANDRA_TRUSTSTORE_PASSWD - value: {{cassandra_truststore_passwd}} - - name: hawkular_cassandra_alias - value: {{cassandra_keystore_passwd}} - - name: JGROUPS_PASSWD - value: {{jgroups_passwd}} - restartPolicy: Never - serviceAccount: jks-generator - volumes: - - hostPath: - path: "{{openshift_metrics_certs_dir}}" - name: certmount diff --git a/roles/os_firewall/library/os_firewall_manage_iptables.py b/roles/os_firewall/library/os_firewall_manage_iptables.py index 4ba38b721..8d4878fa7 100755 --- a/roles/os_firewall/library/os_firewall_manage_iptables.py +++ b/roles/os_firewall/library/os_firewall_manage_iptables.py @@ -224,8 +224,8 @@ class IpTablesManager(object): # pylint: disable=too-many-instance-attributes def gen_cmd(self): cmd = 'iptables' if self.ip_version == 'ipv4' else 'ip6tables' # Include -w (wait for xtables lock) in default arguments. - default_args = '-w' - return ["/usr/sbin/%s %s" % (cmd, default_args)] + default_args = ['-w'] + return ["/usr/sbin/%s" % cmd] + default_args def gen_save_cmd(self): # pylint: disable=no-self-use return ['/usr/libexec/iptables/iptables.init', 'save'] diff --git a/roles/os_firewall/tasks/firewall/firewalld.yml b/roles/os_firewall/tasks/firewall/firewalld.yml index 1101870be..c4db197ca 100644 --- a/roles/os_firewall/tasks/firewall/firewalld.yml +++ b/roles/os_firewall/tasks/firewall/firewalld.yml @@ -1,7 +1,8 @@ --- - name: Install firewalld packages - package: name=firewalld state=present - when: not openshift.common.is_containerized | bool + package: + name: firewalld + state: present - name: Ensure iptables services are not enabled systemd: diff --git a/roles/rhel_subscribe/tasks/enterprise.yml b/roles/rhel_subscribe/tasks/enterprise.yml index 291df6822..41673ee40 100644 --- a/roles/rhel_subscribe/tasks/enterprise.yml +++ b/roles/rhel_subscribe/tasks/enterprise.yml @@ -7,7 +7,7 @@ when: deployment_type == 'enterprise' - set_fact: - default_ose_version: '3.3' + default_ose_version: '3.4' when: deployment_type in ['atomic-enterprise', 'openshift-enterprise'] - set_fact: @@ -16,7 +16,7 @@ - fail: msg: "{{ ose_version }} is not a valid version for {{ deployment_type }} deployment type" when: ( deployment_type == 'enterprise' and ose_version not in ['3.0'] ) or - ( deployment_type in ['atomic-enterprise', 'openshift-enterprise'] and ose_version not in ['3.1', '3.2', '3.3'] ) + ( deployment_type in ['atomic-enterprise', 'openshift-enterprise'] and ose_version not in ['3.1', '3.2', '3.3', '3.4'] ) - name: Enable RHEL repositories command: subscription-manager repos \ diff --git a/utils/test-requirements.txt b/utils/test-requirements.txt index aebfe7c39..699afc26a 100644 --- a/utils/test-requirements.txt +++ b/utils/test-requirements.txt @@ -1,11 +1,10 @@ ansible -configparser -pylint +# flake8 moved to before setuptools-lint to satisfy mccabe dependency issue +flake8 setuptools-lint nose coverage mock -flake8 PyYAML click backports.functools_lru_cache @@ -13,5 +12,3 @@ pyOpenSSL yamllint tox detox -# Temporary work-around for flake8 vs maccabe version conflict -mccabe==0.5.3 |