diff options
26 files changed, 403 insertions, 28 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible index d85882bf9..4ec54c846 100644 --- a/.tito/packages/openshift-ansible +++ b/.tito/packages/openshift-ansible @@ -1 +1 @@ -3.0.32-1 ./ +3.0.35-1 ./ diff --git a/README_AWS.md b/README_AWS.md index f8ecaec49..c605de43d 100644 --- a/README_AWS.md +++ b/README_AWS.md @@ -51,7 +51,7 @@ to setup a private key file to allow ansible to connect to the created hosts. To do so, add the the following entry to your $HOME/.ssh/config file and make it point to the private key file which allows you to login on AWS. ``` Host *.compute-1.amazonaws.com - PrivateKey $HOME/.ssh/my_private_key.pem + IdentityFile $HOME/.ssh/my_private_key.pem ``` Alternatively, you can configure your ssh-agent to hold the credentials to connect to your AWS instances. diff --git a/openshift-ansible.spec b/openshift-ansible.spec index c8f6a2673..4d00c655b 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -5,7 +5,7 @@ } Name: openshift-ansible -Version: 3.0.32 +Version: 3.0.35 Release: 1%{?dist} Summary: Openshift and Atomic Enterprise Ansible License: ASL 2.0 @@ -259,6 +259,36 @@ Atomic OpenShift Utilities includes %changelog +* Mon Jan 18 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.35-1 +- added the lib_timedate role (mwoodson@redhat.com) +- added chrony (mwoodson@redhat.com) +- added oso_moniotoring tools role (mwoodson@redhat.com) +- Improve pacemaker 'is-active' check. (abutcher@redhat.com) + +* Mon Jan 18 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.34-1 +- clean up too-many-branches / logic (jdiaz@redhat.com) +- atomic-openshift-installer: add containerized to inventory + (smunilla@redhat.com) +- Add 'unknown' to possible output for the is-active check. + (abutcher@redhat.com) +- Fix cluster_method conditional in master restart playbook. + (abutcher@redhat.com) +- Use IdentityFile instead of PrivateKey (donovan.muller@gmail.com) +- atomic-openshift-installer: Remove containerized install for 3.0 + (smunilla@redhat.com) +- Host group should be OSEv3 not OSv3 (donovan.muller@gmail.com) +- Remove pause after haproxy start (abutcher@redhat.com) +- Ensure nfs-utils installed for non-atomic hosts. (abutcher@redhat.com) + +* Fri Jan 15 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.33-1 +- Configure nodes which are also masters prior to nodes in containerized + install. (abutcher@redhat.com) +- Call attention to openshift_master_rolling_restart_mode variable in restart + prompt. (abutcher@redhat.com) +- Added anchors for rules in style_guide.adoc in order to make it easier to + reference specific rules in PRs. (twiest@redhat.com) +- Update ec2.ini (jdetiber@redhat.com) + * Thu Jan 14 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.32-1 - Uninstall remove containerized wrapper and symlinks (abutcher@redhat.com) diff --git a/playbooks/adhoc/bootstrap-fedora.yml b/playbooks/adhoc/bootstrap-fedora.yml index de9f36c8a..0df77e309 100644 --- a/playbooks/adhoc/bootstrap-fedora.yml +++ b/playbooks/adhoc/bootstrap-fedora.yml @@ -1,4 +1,4 @@ -- hosts: OSv3 +- hosts: OSEv3 gather_facts: false tasks: - name: install python and deps for ansible modules diff --git a/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml index 174cea460..d24e9cafa 100644 --- a/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml +++ b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml @@ -20,7 +20,7 @@ # ansible-playbook -e 'cli_tag_name=ops-compute-12345' grow_docker_vg.yml # # Notes: -# * By default this will do a 55GB GP2 volume. The can be overidden with the "-e 'cli_volume_size=100'" variable +# * By default this will do a 200GB GP2 volume. The can be overidden with the "-e 'cli_volume_size=100'" variable # * This does a GP2 by default. Support for Provisioned IOPS has not been added # * This will assign the new volume to /dev/xvdc. This is not variablized, yet. # * This can be done with NO downtime on the host @@ -36,7 +36,7 @@ vars: cli_volume_type: gp2 - cli_volume_size: 55 + cli_volume_size: 200 # cli_volume_iops: "{{ 30 * cli_volume_size }}" pre_tasks: diff --git a/playbooks/common/openshift-master/restart.yml b/playbooks/common/openshift-master/restart.yml index 987fae63c..052892863 100644 --- a/playbooks/common/openshift-master/restart.yml +++ b/playbooks/common/openshift-master/restart.yml @@ -68,17 +68,25 @@ - name: Determine which masters are currently active hosts: oo_masters_to_config + any_errors_fatal: true tasks: - name: Check master service status command: > systemctl is-active {{ openshift.common.service_type }}-master register: active_check_output - when: openshift.master.cluster_method == 'pacemaker' - failed_when: active_check_output.stdout not in ['active', 'inactive'] + when: openshift.master.cluster_method | default(None) == 'pacemaker' + failed_when: false changed_when: false + # Any master which did not report 'active' or 'inactive' is likely + # unhealthy. Other possible states are 'unknown' or 'failed'. + - fail: + msg: > + Got invalid service state from {{ openshift.common.service_type }}-master + on {{ inventory_hostname }}. Please verify pacemaker cluster. + when: openshift.master.cluster_method | default(None) == 'pacemaker' and active_check_output.stdout not in ['active', 'inactive'] - set_fact: is_active: "{{ active_check_output.stdout == 'active' }}" - when: openshift.master.cluster_method == 'pacemaker' + when: openshift.master.cluster_method | default(None) == 'pacemaker' - name: Evaluate master groups hosts: localhost diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml index 8d0c4945e..1d31657ed 100644 --- a/playbooks/common/openshift-node/config.yml +++ b/playbooks/common/openshift-node/config.yml @@ -154,21 +154,15 @@ validate_checksum: yes with_items: nodes_needing_certs -- name: Configure node instances +- name: Deploy node certificates hosts: oo_nodes_to_config vars: sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}" - openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}" - # TODO: Prefix flannel role variables. - etcd_urls: "{{ hostvars[groups.oo_first_master.0].openshift.master.etcd_urls }}" - embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}" - openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}" - pre_tasks: + tasks: - name: Ensure certificate directory exists file: path: "{{ node_cert_dir }}" state: directory - # TODO: notify restart node # possibly test service started time against certificate/config file # timestamps in node to trigger notify @@ -177,8 +171,44 @@ src: "{{ sync_tmpdir }}/{{ node_subdir }}.tgz" dest: "{{ node_cert_dir }}" when: certs_missing + +- name: Evaluate node groups + hosts: localhost + become: no + tasks: + - name: Evaluate oo_containerized_master_nodes + add_host: + name: "{{ item }}" + groups: oo_containerized_master_nodes + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_sudo: "{{ g_sudo | default(omit) }}" + with_items: "{{ groups.oo_nodes_to_config | default([]) }}" + when: hostvars[item].openshift.common.is_containerized | bool and (item in groups.oo_nodes_to_config and item in groups.oo_masters_to_config) + +- name: Configure node instances + hosts: oo_containerized_master_nodes + serial: 1 + vars: + openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}" + openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}" + roles: + - openshift_node + +- name: Configure node instances + hosts: oo_nodes_to_config:!oo_containerized_master_nodes + vars: + openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}" + openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}" roles: - openshift_node + +- name: Additional node config + hosts: oo_nodes_to_config + vars: + # TODO: Prefix flannel role variables. + etcd_urls: "{{ hostvars[groups.oo_first_master.0].openshift.master.etcd_urls }}" + embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}" + roles: - role: flannel when: openshift.common.use_flannel | bool - role: nickhammond.logrotate diff --git a/roles/chrony/README.md b/roles/chrony/README.md new file mode 100644 index 000000000..bf15d9669 --- /dev/null +++ b/roles/chrony/README.md @@ -0,0 +1,31 @@ +Role Name +========= + +A role to configure chrony as the ntp client + +Requirements +------------ + + +Role Variables +-------------- + +chrony_ntp_servers: a list of ntp servers to use the chrony.conf file + +Dependencies +------------ + +roles/lib_timedatectl + +Example Playbook +---------------- + +License +------- + +Apache 2.0 + +Author Information +------------------ + +Openshift Operations diff --git a/roles/chrony/defaults/main.yml b/roles/chrony/defaults/main.yml new file mode 100644 index 000000000..95576e666 --- /dev/null +++ b/roles/chrony/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for chrony diff --git a/roles/chrony/handlers/main.yml b/roles/chrony/handlers/main.yml new file mode 100644 index 000000000..1973c79e2 --- /dev/null +++ b/roles/chrony/handlers/main.yml @@ -0,0 +1,5 @@ +--- +- name: Restart chronyd + service: + name: chronyd + state: restarted diff --git a/roles/chrony/meta/main.yml b/roles/chrony/meta/main.yml new file mode 100644 index 000000000..85595d7c3 --- /dev/null +++ b/roles/chrony/meta/main.yml @@ -0,0 +1,18 @@ +--- +galaxy_info: + author: Openshift Operations + description: Configure chrony as an ntp server + company: Red Hat + license: Apache 2.0 + min_ansible_version: 1.9.2 + platforms: + - name: EL + versions: + - 7 + - name: Fedora + versions: + - all + categories: + - system +dependencies: +- roles/lib_timedatectl diff --git a/roles/chrony/tasks/main.yml b/roles/chrony/tasks/main.yml new file mode 100644 index 000000000..fae6d8e4c --- /dev/null +++ b/roles/chrony/tasks/main.yml @@ -0,0 +1,30 @@ +--- +- name: remove ntp package + yum: + name: ntp + state: absent + +- name: ensure chrony package is installed + yum: + name: chrony + state: installed + +- name: Install /etc/chrony.conf + template: + src: chrony.conf.j2 + dest: /etc/chrony.conf + owner: root + group: root + mode: 0644 + notify: + - Restart chronyd + +- name: enabled timedatectl set-ntp yes + timedatectl: + ntp: True + +- name: + service: + name: chronyd + state: started + enabled: yes diff --git a/roles/chrony/templates/chrony.conf.j2 b/roles/chrony/templates/chrony.conf.j2 new file mode 100644 index 000000000..de43b6364 --- /dev/null +++ b/roles/chrony/templates/chrony.conf.j2 @@ -0,0 +1,45 @@ +# Use public servers from the pool.ntp.org project. +# Please consider joining the pool (http://www.pool.ntp.org/join.html). +{% for server in chrony_ntp_servers %} +server {{ server }} iburst +{% endfor %} + +# Ignore stratum in source selection. +stratumweight 0 + +# Record the rate at which the system clock gains/losses time. +driftfile /var/lib/chrony/drift + +# Enable kernel RTC synchronization. +rtcsync + +# In first three updates step the system clock instead of slew +# if the adjustment is larger than 10 seconds. +makestep 10 3 + +# Allow NTP client access from local network. +#allow 192.168/16 + +# Listen for commands only on localhost. +bindcmdaddress 127.0.0.1 +bindcmdaddress ::1 + +# Serve time even if not synchronized to any NTP server. +#local stratum 10 + +keyfile /etc/chrony.keys + +# Specify the key used as password for chronyc. +commandkey 1 + +# Generate command key if missing. +generatecommandkey + +# Disable logging of client accesses. +noclientlog + +# Send a message to syslog if a clock adjustment is larger than 0.5 seconds. +logchange 0.5 + +logdir /var/log/chrony +#log measurements statistics tracking diff --git a/roles/chrony/vars/main.yml b/roles/chrony/vars/main.yml new file mode 100644 index 000000000..061a21547 --- /dev/null +++ b/roles/chrony/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for chrony diff --git a/roles/haproxy/handlers/main.yml b/roles/haproxy/handlers/main.yml index ee60adcab..5b8691b26 100644 --- a/roles/haproxy/handlers/main.yml +++ b/roles/haproxy/handlers/main.yml @@ -3,3 +3,4 @@ service: name: haproxy state: restarted + when: not (haproxy_start_result_changed | default(false) | bool) diff --git a/roles/haproxy/tasks/main.yml b/roles/haproxy/tasks/main.yml index 97f870829..0b8370ce2 100644 --- a/roles/haproxy/tasks/main.yml +++ b/roles/haproxy/tasks/main.yml @@ -19,6 +19,5 @@ enabled: yes register: start_result -- name: Pause 30 seconds if haproxy was just started - pause: seconds=30 - when: start_result | changed +- set_fact: + haproxy_start_result_changed: "{{ start_result | changed }}" diff --git a/roles/lib_timedatectl/library/timedatectl.py b/roles/lib_timedatectl/library/timedatectl.py new file mode 100644 index 000000000..b6eab5918 --- /dev/null +++ b/roles/lib_timedatectl/library/timedatectl.py @@ -0,0 +1,74 @@ +#!/usr/bin/env python +''' + timedatectl ansible module + + This module supports setting ntp enabled +''' +import subprocess + + + + +def do_timedatectl(options=None): + ''' subprocess timedatectl ''' + + cmd = ['/usr/bin/timedatectl'] + if options: + cmd += options.split() + + proc = subprocess.Popen(cmd, stdin=None, stdout=subprocess.PIPE) + proc.wait() + return proc.stdout.read() + +def main(): + ''' Ansible module for timedatectl + ''' + + module = AnsibleModule( + argument_spec=dict( + #state=dict(default='enabled', type='str'), + ntp=dict(default=True, type='bool'), + ), + #supports_check_mode=True + ) + + # do something + ntp_enabled = False + + results = do_timedatectl() + + for line in results.split('\n'): + if 'NTP enabled' in line: + if 'yes' in line: + ntp_enabled = True + + ######## + # Enable NTP + ######## + if module.params['ntp']: + if ntp_enabled: + module.exit_json(changed=False, results="enabled", state="enabled") + + # Enable it + # Commands to enable ntp + else: + results = do_timedatectl('set-ntp yes') + module.exit_json(changed=True, results="enabled", state="enabled", cmdout=results) + + ######### + # Disable NTP + ######### + else: + if not ntp_enabled: + module.exit_json(changed=False, results="disabled", state="disabled") + + results = do_timedatectl('set-ntp no') + module.exit_json(changed=True, results="disabled", state="disabled") + + module.exit_json(failed=True, changed=False, results="Something went wrong", state="unknown") + +# Pylint is getting in the way of basic Ansible +# pylint: disable=redefined-builtin,wildcard-import,unused-wildcard-import +from ansible.module_utils.basic import * + +main() diff --git a/roles/openshift_node/tasks/storage_plugins/nfs.yml b/roles/openshift_node/tasks/storage_plugins/nfs.yml index 1edf21d9b..14a613786 100644 --- a/roles/openshift_node/tasks/storage_plugins/nfs.yml +++ b/roles/openshift_node/tasks/storage_plugins/nfs.yml @@ -1,4 +1,8 @@ --- +- name: Install NFS storage plugin dependencies + action: "{{ ansible_pkg_mgr }} name=nfs-utils state=present" + when: not openshift.common.is_atomic | bool + - name: Set seboolean to allow nfs storage plugin access from containers seboolean: name: virt_use_nfs diff --git a/roles/oso_monitoring_tools/README.md b/roles/oso_monitoring_tools/README.md new file mode 100644 index 000000000..4215f9eeb --- /dev/null +++ b/roles/oso_monitoring_tools/README.md @@ -0,0 +1,54 @@ +Role Name +========= + +This role will install the Openshift Monitoring Utilities + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +osomt_zagg_client_config + +from vars/main.yml: + +osomt_zagg_client_config: + host: + name: "{{ osomt_host_name }}" + zagg: + url: "{{ osomt_zagg_url }}" + user: "{{ osomt_zagg_user }}" + pass: "{{ osomt_zagg_password }}" + ssl_verify: "{{ osomt_zagg_ssl_verify }}" + verbose: "{{ osomt_zagg_verbose }}" + debug: "{{ osomt_zagg_debug }}" + +Dependencies +------------ + +None + +Example Playbook +---------------- + +- role: "oso_monitoring_tools" + osomt_host_name: hostname + osomt_zagg_url: http://path.to/zagg_web + osomt_zagg_user: admin + osomt_zagg_password: password + osomt_zagg_ssl_verify: True + osomt_zagg_verbose: False + osomt_zagg_debug: False + +License +------- + +BSD + +Author Information +------------------ + +Openshift Operations diff --git a/roles/oso_monitoring_tools/defaults/main.yml b/roles/oso_monitoring_tools/defaults/main.yml new file mode 100644 index 000000000..a17424f25 --- /dev/null +++ b/roles/oso_monitoring_tools/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for oso_monitoring_tools diff --git a/roles/oso_monitoring_tools/handlers/main.yml b/roles/oso_monitoring_tools/handlers/main.yml new file mode 100644 index 000000000..cefa780ab --- /dev/null +++ b/roles/oso_monitoring_tools/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for oso_monitoring_tools diff --git a/roles/oso_monitoring_tools/meta/main.yml b/roles/oso_monitoring_tools/meta/main.yml new file mode 100644 index 000000000..9c42b68dc --- /dev/null +++ b/roles/oso_monitoring_tools/meta/main.yml @@ -0,0 +1,8 @@ +--- +galaxy_info: + author: OpenShift Operations + description: Install Openshift Monitoring tools + company: Red Hat, Inc + license: ASL 2.0 + min_ansible_version: 1.2 +dependencies: [] diff --git a/roles/oso_monitoring_tools/tasks/main.yml b/roles/oso_monitoring_tools/tasks/main.yml new file mode 100644 index 000000000..b165f9a45 --- /dev/null +++ b/roles/oso_monitoring_tools/tasks/main.yml @@ -0,0 +1,17 @@ +--- +# tasks file for oso_monitoring_tools +- name: Install the Openshift Tools RPMS + yum: + name: "{{ item }}" + state: latest + with_items: + - openshift-tools-scripts-monitoring-zagg-client + - python-openshift-tools-monitoring-zagg + +- debug: var=g_zagg_client_config + +- name: Generate the /etc/openshift_tools/zagg_client.yaml config file + copy: + content: "{{ osomt_zagg_client_config | to_nice_yaml }}" + dest: /etc/openshift_tools/zagg_client.yaml + mode: "644" diff --git a/roles/oso_monitoring_tools/vars/main.yml b/roles/oso_monitoring_tools/vars/main.yml new file mode 100644 index 000000000..3538ba30b --- /dev/null +++ b/roles/oso_monitoring_tools/vars/main.yml @@ -0,0 +1,12 @@ +--- +# vars file for oso_monitoring_tools +osomt_zagg_client_config: + host: + name: "{{ osomt_host_name }}" + zagg: + url: "{{ osomt_zagg_url }}" + user: "{{ osomt_zagg_user }}" + pass: "{{ osomt_zagg_password }}" + ssl_verify: "{{ osomt_zagg_ssl_verify }}" + verbose: "{{ osomt_zagg_verbose }}" + debug: "{{ osomt_zagg_debug }}" diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py index 4e30929da..1aacf3a4b 100644 --- a/utils/src/ooinstall/cli_installer.py +++ b/utils/src/ooinstall/cli_installer.py @@ -127,14 +127,13 @@ http://docs.openshift.com/enterprise/latest/architecture/infrastructure_componen masters_set = True host_props['node'] = True - #TODO: Reenable this option once container installs are out of tech preview - rpm_or_container = click.prompt('Will this host be RPM or Container based (rpm/container)?', - type=click.Choice(['rpm', 'container']), - default='rpm') - if rpm_or_container == 'container': - host_props['containerized'] = True - else: - host_props['containerized'] = False + host_props['containerized'] = False + if oo_cfg.settings['variant_version'] != '3.0': + rpm_or_container = click.prompt('Will this host be RPM or Container based (rpm/container)?', + type=click.Choice(['rpm', 'container']), + default='rpm') + if rpm_or_container == 'container': + host_props['containerized'] = True if existing_env: host_props['new_host'] = True diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py index 20401f812..c0d115fdc 100644 --- a/utils/src/ooinstall/openshift_ansible.py +++ b/utils/src/ooinstall/openshift_ansible.py @@ -128,6 +128,8 @@ def write_host(host, inventory, schedulable=None): facts += ' openshift_hostname={}'.format(host.hostname) if host.public_hostname: facts += ' openshift_public_hostname={}'.format(host.public_hostname) + if host.containerized: + facts += ' containerized={}'.format(host.containerized) # TODO: For not write_host is handles both master and nodes. # Technically only nodes will ever need this. |