diff options
31 files changed, 442 insertions, 167 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible index fdcdcb9dd..25d0f87c7 100644 --- a/.tito/packages/openshift-ansible +++ b/.tito/packages/openshift-ansible @@ -1 +1 @@ -3.4.12-1 ./ +3.4.14-1 ./ @@ -22,11 +22,16 @@ not practical to start over at 1.0. ##Setup - Install base dependencies: + - Requirements: + - Ansible >= 2.1.0 though 2.2 is preferred for performance reasons. + - Jinja >= 2.7 + - Fedora: ``` dnf install -y ansible-2.1.0.0 pyOpenSSL python-cryptography ``` - - OSX: + + - OSX: ``` # Install ansible 2.1.0.0 and python 2 brew install ansible python diff --git a/README_vagrant.md b/README_vagrant.md index 3e8188ade..cb62e31d8 100644 --- a/README_vagrant.md +++ b/README_vagrant.md @@ -1,52 +1 @@ -:warning: **WARNING** :warning: This feature is community supported and has not been tested by Red Hat. Visit [docs.openshift.com](https://docs.openshift.com) for [OpenShift Enterprise](https://docs.openshift.com/enterprise/latest/install_config/install/index.html) or [OpenShift Origin](https://docs.openshift.org/latest/install_config/install/index.html) supported installation docs. - -Requirements ------------- -- ansible (the latest 1.9 release is preferred, but any version greater than 1.9.1 should be sufficient). -- vagrant (tested against version 1.7.2) -- vagrant-hostmanager plugin (tested against version 1.5.0) -- vagrant-libvirt (tested against version 0.0.26) - - Only required if using libvirt instead of virtualbox - -For ``enterprise`` deployment types the base RHEL box has to be added to Vagrant: - -1. Download the RHEL7 vagrant image (libvirt or virtualbox) available from the [Red Hat Container Development Kit downloads in the customer portal](https://access.redhat.com/downloads/content/293/ver=1/rhel---7/1.0.1/x86_64/product-downloads) - -2. Install it into vagrant - - ``$ vagrant box add --name rhel-7 /path/to/rhel-server-libvirt-7.1-3.x86_64.box`` - -3. (optional, recommended) Increase the disk size of the image to 20GB - This is a two step process. (these instructions are specific to libvirt) - - Resize the actual qcow2 image: - - ``$ qemu-img resize ~/.vagrant.d/boxes/rhel-7/0/libvirt/box.img 20GB`` - - Edit `~/.vagrant.d/boxes/rhel-7/0/libvirt/metadata.json` to reflect the new size. A corrected metadata.json looks like this: - - ``{"provider": "libvirt", "format": "qcow2", "virtual_size": 20}`` - -Usage ------ -``` -vagrant up --no-provision -vagrant provision -``` - -Using libvirt: -``` -vagrant up --provider=libvirt --no-provision -vagrant provision -``` - -Environment Variables ---------------------- -The following environment variables can be overridden: -- ``OPENSHIFT_DEPLOYMENT_TYPE`` (defaults to origin, choices: origin, openshift-enterprise) -- ``OPENSHIFT_NUM_NODES`` (the number of nodes to create, defaults to 2) - -Note that if ``OPENSHIFT_DEPLOYMENT_TYPE`` is ``enterprise`` you should also specify environment variables related to ``subscription-manager`` which are used by the ``rhel_subscribe`` role: - -- ``rhel_subscription_user``: rhsm user -- ``rhel_subscription_pass``: rhsm password -- (optional) ``rhel_subscription_pool``: poolID to attach a specific subscription besides what auto-attach detects +The Vagrant-based installation has been moved to: https://github.com/openshift/openshift-ansible-contrib/tree/master/vagrant diff --git a/Vagrantfile b/Vagrantfile deleted file mode 100644 index a38378289..000000000 --- a/Vagrantfile +++ /dev/null @@ -1,71 +0,0 @@ -# -*- mode: ruby -*- -# vi: set ft=ruby : -VAGRANTFILE_API_VERSION = "2" - -unless Vagrant.has_plugin?("vagrant-hostmanager") - raise 'vagrant-hostmanager plugin is required' -end - -Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| - - deployment_type = ENV['OPENSHIFT_DEPLOYMENT_TYPE'] || 'origin' - num_nodes = (ENV['OPENSHIFT_NUM_NODES'] || 2).to_i - - config.hostmanager.enabled = true - config.hostmanager.manage_host = true - config.hostmanager.include_offline = true - config.ssh.insert_key = false - - config.vm.provider "virtualbox" do |vbox, override| - override.vm.box = "centos/7" - vbox.memory = 1024 - vbox.cpus = 2 - - # Enable multiple guest CPUs if available - vbox.customize ["modifyvm", :id, "--ioapic", "on"] - end - - config.vm.provider "libvirt" do |libvirt, override| - libvirt.cpus = 2 - libvirt.memory = 1024 - libvirt.driver = 'kvm' - case deployment_type - when "openshift-enterprise" - override.vm.box = "rhel-7" - when "atomic-enterprise" - override.vm.box = "rhel-7" - when "origin" - override.vm.box = "centos/7" - override.vm.box_download_checksum = "b2a9f7421e04e73a5acad6fbaf4e9aba78b5aeabf4230eebacc9942e577c1e05" - override.vm.box_download_checksum_type = "sha256" - end - end - - num_nodes.times do |n| - node_index = n+1 - config.vm.define "node#{node_index}" do |node| - node.vm.hostname = "ose3-node#{node_index}.example.com" - node.vm.network :private_network, ip: "192.168.100.#{200 + n}" - config.vm.provision "shell", inline: "nmcli connection reload; systemctl restart NetworkManager.service" - end - end - - config.vm.define "master" do |master| - master.vm.hostname = "ose3-master.example.com" - master.vm.network :private_network, ip: "192.168.100.100" - master.vm.network :forwarded_port, guest: 8443, host: 8443 - config.vm.provision "shell", inline: "nmcli connection reload; systemctl restart NetworkManager.service" - master.vm.provision "ansible" do |ansible| - ansible.limit = 'all' - ansible.sudo = true - ansible.groups = { - "masters" => ["master"], - "nodes" => ["master", "node1", "node2"], - } - ansible.extra_vars = { - deployment_type: deployment_type, - } - ansible.playbook = "playbooks/byo/vagrant.yml" - end - end -end diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py index 3541d5471..93fdd5ae4 100644 --- a/filter_plugins/oo_filters.py +++ b/filter_plugins/oo_filters.py @@ -234,7 +234,7 @@ class FilterModule(object): arrange them as a string 'key=value key=value' """ if not isinstance(data, dict): - raise errors.AnsibleFilterError("|failed expects first param is a dict") + raise errors.AnsibleFilterError("|failed expects first param is a dict [oo_combine_dict]. Got %s. Type: %s" % (str(data), str(type(data)))) return out_joiner.join([in_joiner.join([k, str(v)]) for k, v in data.items()]) @@ -286,7 +286,7 @@ class FilterModule(object): } """ if not isinstance(data, dict): - raise errors.AnsibleFilterError("|failed expects first param is a dict") + raise errors.AnsibleFilterError("|failed expects first param is a dict [oo_ec2_volume_def]. Got %s. Type: %s" % (str(data), str(type(data)))) if host_type not in ['master', 'node', 'etcd']: raise errors.AnsibleFilterError("|failed expects etcd, master or node" " as the host type") diff --git a/openshift-ansible.spec b/openshift-ansible.spec index 4bc6cef7d..69d02d43c 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -5,7 +5,7 @@ } Name: openshift-ansible -Version: 3.4.12 +Version: 3.4.14 Release: 1%{?dist} Summary: Openshift and Atomic Enterprise Ansible License: ASL 2.0 @@ -249,6 +249,25 @@ Atomic OpenShift Utilities includes %changelog +* Fri Oct 28 2016 Troy Dawson <tdawson@redhat.com> 3.4.14-1 +- Change HA master controller service to restart always. (dgoodwin@redhat.com) +- Default hosted_registry_insecure true when insecure registry present in + existing /etc/sysconfig/docker. (abutcher@redhat.com) +- Fix race condtion in openshift_facts (smunilla@redhat.com) + +* Wed Oct 26 2016 Troy Dawson <tdawson@redhat.com> 3.4.13-1 +- [upgrades] Fix containerized node (sdodson@redhat.com) +- Add support for 3.4 upgrade. (dgoodwin@redhat.com) +- Update link to latest versions upgrade README (ebballon@gmail.com) +- Bump logging and metrics deployers to 3.3.1 and 3.4.0 (sdodson@redhat.com) +- Remove Vagrantfile (jdetiber@redhat.com) +- Enable dnsmasq service (sdodson@redhat.com) +- Default infra template modification based on + openshift_examples_modify_imagestreams (abutcher@redhat.com) +- Added a parameter for cert validity (vishal.patil@nuagenetworks.net) +- Fix and reorder control plane service restart. (dgoodwin@redhat.com) +- Add node-labels to kubeletArguments (tbielawa@redhat.com) + * Mon Oct 24 2016 Troy Dawson <tdawson@redhat.com> 3.4.12-1 - Move infrastructure templates into openshift_hosted_templates role. (abutcher@redhat.com) diff --git a/playbooks/byo/openshift-cluster/upgrades/README.md b/playbooks/byo/openshift-cluster/upgrades/README.md index ca01dbc9d..de4e34e2d 100644 --- a/playbooks/byo/openshift-cluster/upgrades/README.md +++ b/playbooks/byo/openshift-cluster/upgrades/README.md @@ -4,5 +4,5 @@ cluster. Additional notes for the associated upgrade playbooks are provided in their respective directories. # Upgrades available -- [OpenShift Enterprise 3.0 to latest minor release](v3_0_minor/README.md) -- [OpenShift Enterprise 3.0 to 3.1](v3_0_to_v3_1/README.md) +- [OpenShift Enterprise 3.2 to 3.3](v3_3/README.md) +- [OpenShift Enterprise 3.1 to 3.2](v3_2/README.md) diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml index 7a3829283..9a5d84751 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml @@ -92,10 +92,9 @@ vars: master_config_hook: "v3_3/master_config_upgrade.yml" +- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml + - include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml vars: node_config_hook: "v3_3/node_config_upgrade.yml" -- include: ../../../openshift-master/restart.yml - -- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml index d6af71827..c9338a960 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml @@ -98,3 +98,4 @@ master_config_hook: "v3_3/master_config_upgrade.yml" - include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml + diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_4/README.md new file mode 100644 index 000000000..85b807dc6 --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/README.md @@ -0,0 +1,18 @@ +# v3.4 Major and Minor Upgrade Playbook + +## Overview +This playbook currently performs the +following steps. + + * Upgrade and restart master services + * Unschedule node. + * Upgrade and restart docker + * Upgrade and restart node services + * Modifies the subset of the configuration necessary + * Applies the latest cluster policies + * Updates the default router if one exists + * Updates the default registry if one exists + * Updates image streams and quickstarts + +## Usage +ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/roles b/playbooks/byo/openshift-cluster/upgrades/v3_4/roles new file mode 120000 index 000000000..6bc1a7aef --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/roles @@ -0,0 +1 @@ +../../../../../roles
\ No newline at end of file diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml new file mode 100644 index 000000000..4f8a80ee8 --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml @@ -0,0 +1,96 @@ +--- +# +# Full Control Plane + Nodes Upgrade +# +- include: ../../../../common/openshift-cluster/upgrades/init.yml + tags: + - pre_upgrade + +# Configure the upgrade target for the common upgrade tasks: +- hosts: l_oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" + openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" + +# Pre-upgrade + +- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos and initialize facts on all hosts + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config + tags: + - pre_upgrade + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and + openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config + tasks: + - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml + +- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml + +- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml + +- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml + diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml new file mode 100644 index 000000000..8cde2ac88 --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml @@ -0,0 +1,98 @@ +--- +# +# Control Plane Upgrade Playbook +# +# Upgrades masters and Docker (only on standalone etcd hosts) +# +# This upgrade does not include: +# - node service running on masters +# - docker running on masters +# - node service running on dedicated nodes +# +# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. +# +- include: ../../../../common/openshift-cluster/upgrades/init.yml + tags: + - pre_upgrade + +# Configure the upgrade target for the common upgrade tasks: +- hosts: l_oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" + openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" + +# Pre-upgrade + +- name: Update repos on control plane hosts + hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config + tags: + - pre_upgrade + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and + openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_etcd_to_config + tasks: + - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml + +- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml + +- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml new file mode 100644 index 000000000..f385d4f22 --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml @@ -0,0 +1,100 @@ +--- +# +# Node Upgrade Playbook +# +# Upgrades nodes only, but requires the control plane to have already been upgraded. +# +- include: ../../../../common/openshift-cluster/upgrades/init.yml + tags: + - pre_upgrade + +# Configure the upgrade target for the common upgrade tasks: +- hosts: l_oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" + openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" + +# Pre-upgrade +- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos on nodes + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config + roles: + - openshift_repos + tags: + - pre_upgrade + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and + openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- name: Verify masters are already upgraded + hosts: oo_masters_to_config + tags: + - pre_upgrade + tasks: + - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." + when: openshift.common.version != openshift_version + +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_nodes_to_upgrade + tasks: + - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml + +- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml index 352d266a5..ccbba54b4 100644 --- a/playbooks/common/openshift-cluster/openshift_hosted.yml +++ b/playbooks/common/openshift-cluster/openshift_hosted.yml @@ -65,4 +65,4 @@ openshift_hosted_logging_elasticsearch_ops_pvc_prefix: "{{ 'logging-es' if openshift_hosted_logging_storage_kind | default(none) =='dynamic' else '' }}" - role: cockpit-ui - when: ( openshift.common.version_gte_3_3_or_1_3 | bool ) and ( openshift_hosted_manage_registry | default(true) | bool ) + when: ( openshift.common.version_gte_3_3_or_1_3 | bool ) and ( openshift_hosted_manage_registry | default(true) | bool ) and not (openshift.docker.hosted_registry_insecure | default(false) | bool) diff --git a/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml index 32a3636aa..439df5ffd 100644 --- a/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml @@ -1,5 +1,3 @@ -- include_vars: ../../../../roles/openshift_node/vars/main.yml - - name: Update systemd units include: ../../../../roles/openshift_node/tasks/systemd_units.yml openshift_version={{ openshift_image_tag }} diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index b3f4d7d1a..927d9b4ca 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -99,6 +99,8 @@ - include: rpm_upgrade.yml component=master when: not openshift.common.is_containerized | bool +# Create service signer cert when missing. Service signer certificate +# is added to master config in the master config hook for v3_3. - name: Determine if service signer cert must be created hosts: oo_first_master tasks: @@ -108,8 +110,6 @@ register: service_signer_cert_stat changed_when: false -# Create service signer cert when missing. Service signer certificate -# is added to master config in the master config hook for v3_3. - include: create_service_signer_cert.yml - name: Upgrade master config and systemd units @@ -128,13 +128,6 @@ - name: Update systemd units include: ../../../../roles/openshift_master/tasks/systemd_units.yml -# - name: Upgrade master configuration -# openshift_upgrade_config: -# from_version: '3.1' -# to_version: '3.2' -# role: master -# config_base: "{{ hostvars[inventory_hostname].openshift.common.config_base }}" - - name: Check for ca-bundle.crt stat: path: "{{ openshift.common.config_base }}/master/ca-bundle.crt" @@ -184,6 +177,10 @@ msg: "Upgrade cannot continue. The following masters did not finish updating: {{ master_update_failed | join(',') }}" when: master_update_failed | length > 0 +# We are now ready to restart master services (or entire system +# depending on openshift_rolling_restart_mode): +- include: ../../openshift-master/restart.yml + ############################################################################### # Reconcile Cluster Roles, Cluster Role Bindings and Security Context Constraints ############################################################################### diff --git a/roles/nuage_common/defaults/main.yaml b/roles/nuage_common/defaults/main.yaml index 9b777213e..16dac8720 100644 --- a/roles/nuage_common/defaults/main.yaml +++ b/roles/nuage_common/defaults/main.yaml @@ -10,4 +10,4 @@ nuage_master_mon_dir: /usr/share/nuage-openshift-monitor nuage_node_plugin_dir: /usr/share/vsp-openshift nuage_mon_rest_server_port: "{{ nuage_openshift_monitor_rest_server_port | default('9443') }}" - +nuage_mon_cert_validity_period: "{{ nuage_cert_validity_period | default('3650') }}" diff --git a/roles/nuage_master/tasks/certificates.yml b/roles/nuage_master/tasks/certificates.yml index 32b024487..0a2f375cd 100644 --- a/roles/nuage_master/tasks/certificates.yml +++ b/roles/nuage_master/tasks/certificates.yml @@ -15,7 +15,7 @@ - name: Generate the crt file command: > - openssl x509 -req -in "{{ nuage_mon_rest_server_crt_dir }}/restServer.req" -CA "{{ nuage_ca_crt }}" -CAkey "{{ nuage_ca_key }}" -CAserial "{{ nuage_ca_serial }}" -out "{{ nuage_ca_master_rest_server_crt }}" + openssl x509 -req -in "{{ nuage_mon_rest_server_crt_dir }}/restServer.req" -CA "{{ nuage_ca_crt }}" -CAkey "{{ nuage_ca_key }}" -CAserial "{{ nuage_ca_serial }}" -out "{{ nuage_ca_master_rest_server_crt }}" -days "{{ nuage_mon_cert_validity_period }}" delegate_to: "{{ nuage_ca_master }}" - name: Remove the req file diff --git a/roles/nuage_node/tasks/certificates.yml b/roles/nuage_node/tasks/certificates.yml index 0fe6f7bac..7fcd4274d 100644 --- a/roles/nuage_node/tasks/certificates.yml +++ b/roles/nuage_node/tasks/certificates.yml @@ -15,7 +15,7 @@ - name: Generate the crt file command: > - openssl x509 -req -in "{{ nuage_plugin_rest_client_crt_dir }}/restClient.req" -CA "{{ nuage_ca_crt }}" -CAkey "{{ nuage_ca_key }}" -CAserial "{{ nuage_ca_serial }}" -out "{{ nuage_ca_master_plugin_crt }}" -extensions clientauth -extfile "{{ nuage_ca_dir }}"/openssl.cnf + openssl x509 -req -in "{{ nuage_plugin_rest_client_crt_dir }}/restClient.req" -CA "{{ nuage_ca_crt }}" -CAkey "{{ nuage_ca_key }}" -CAserial "{{ nuage_ca_serial }}" -out "{{ nuage_ca_master_plugin_crt }}" -extensions clientauth -extfile "{{ nuage_ca_dir }}"/openssl.cnf -days {{ nuage_mon_cert_validity_period }} delegate_to: "{{ nuage_ca_master }}" - name: Remove the req file diff --git a/roles/openshift_docker_facts/tasks/main.yml b/roles/openshift_docker_facts/tasks/main.yml index 0c8a36d65..c690c5243 100644 --- a/roles/openshift_docker_facts/tasks/main.yml +++ b/roles/openshift_docker_facts/tasks/main.yml @@ -13,7 +13,7 @@ log_options: "{{ openshift_docker_log_options | default(None) }}" options: "{{ openshift_docker_options | default(None) }}" disable_push_dockerhub: "{{ openshift_disable_push_dockerhub | default(None) }}" - hosted_registry_insecure: "{{ openshift_docker_hosted_registry_insecure | default(False) }}" + hosted_registry_insecure: "{{ openshift_docker_hosted_registry_insecure | default(openshift.docker.hosted_registry_insecure | default(False)) }}" hosted_registry_network: "{{ openshift_docker_hosted_registry_network | default(None) }}" - set_fact: diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index f281b1303..0a783b164 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -55,7 +55,6 @@ def migrate_docker_facts(facts): facts['docker'][param] = facts[role].pop(old_param) if 'node' in facts and 'portal_net' in facts['node']: - facts['docker']['hosted_registry_insecure'] = True facts['docker']['hosted_registry_network'] = facts['node'].pop('portal_net') # log_options was originally meant to be a comma separated string, but @@ -1035,12 +1034,23 @@ def get_current_config(facts): return current_config def build_kubelet_args(facts): - """ Build node kubelet_args """ - cloud_cfg_path = os.path.join(facts['common']['config_base'], - 'cloudprovider') + """Build node kubelet_args + +In the node-config.yaml file, kubeletArgument sub-keys have their +values provided as a list. Hence the gratuitous use of ['foo'] below. + """ + cloud_cfg_path = os.path.join( + facts['common']['config_base'], + 'cloudprovider') + + # We only have to do this stuff on hosts that are nodes if 'node' in facts: + # Any changes to the kubeletArguments parameter are stored + # here first. kubelet_args = {} + if 'cloudprovider' in facts: + # EVERY cloud is special <3 if 'kind' in facts['cloudprovider']: if facts['cloudprovider']['kind'] == 'aws': kubelet_args['cloud-provider'] = ['aws'] @@ -1050,6 +1060,28 @@ def build_kubelet_args(facts): kubelet_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf'] if facts['cloudprovider']['kind'] == 'gce': kubelet_args['cloud-provider'] = ['gce'] + + # Automatically add node-labels to the kubeletArguments + # parameter. See BZ1359848 for additional details. + # + # Ref: https://bugzilla.redhat.com/show_bug.cgi?id=1359848 + if 'labels' in facts['node'] and isinstance(facts['node']['labels'], dict): + # tl;dr: os_node_labels="{'foo': 'bar', 'a': 'b'}" turns + # into ['foo=bar', 'a=b'] + # + # On the openshift_node_labels inventory variable we loop + # over each key-value tuple (from .items()) and join the + # key to the value with an '=' character, this produces a + # list. + # + # map() seems to be returning an itertools.imap object + # instead of a list. We cast it to a list ourselves. + labels_str = list(map(lambda x: '='.join(x), facts['node']['labels'].items())) + if labels_str != '': + kubelet_args['node-labels'] = labels_str + + # If we've added items to the kubelet_args dict then we need + # to merge the new items back into the main facts object. if kubelet_args != {}: facts = merge_facts({'node': {'kubelet_args': kubelet_args}}, facts, [], []) return facts @@ -1138,6 +1170,24 @@ def get_docker_version_info(): } return result +def get_hosted_registry_insecure(): + """ Parses OPTIONS from /etc/sysconfig/docker to determine if the + registry is currently insecure. + """ + hosted_registry_insecure = None + if os.path.exists('/etc/sysconfig/docker'): + try: + ini_str = unicode('[root]\n' + open('/etc/sysconfig/docker', 'r').read(), 'utf-8') + ini_fp = io.StringIO(ini_str) + config = ConfigParser.RawConfigParser() + config.readfp(ini_fp) + options = config.get('root', 'OPTIONS') + if 'insecure-registry' in options: + hosted_registry_insecure = True + except: + pass + return hosted_registry_insecure + def get_openshift_version(facts): """ Get current version of openshift on the host. @@ -1347,8 +1397,11 @@ def save_local_facts(filename, facts): """ try: fact_dir = os.path.dirname(filename) - if not os.path.exists(fact_dir): - os.makedirs(fact_dir) + try: + os.makedirs(fact_dir) # try to make the directory + except OSError as exception: + if exception.errno != errno.EEXIST: # but it is okay if it is already there + raise # pass any other exceptions up the chain with open(filename, 'w') as fact_file: fact_file.write(module.jsonify(facts)) os.chmod(filename, 0o600) @@ -1790,13 +1843,15 @@ class OpenShiftFacts(object): if 'docker' in roles: docker = dict(disable_push_dockerhub=False, - hosted_registry_insecure=True, options='--log-driver=json-file --log-opt max-size=50m') version_info = get_docker_version_info() if version_info is not None: docker['api_version'] = version_info['api_version'] docker['version'] = version_info['version'] docker['gte_1_10'] = LooseVersion(version_info['version']) >= LooseVersion('1.10') + hosted_registry_insecure = get_hosted_registry_insecure() + if hosted_registry_insecure is not None: + docker['hosted_registry_insecure'] = hosted_registry_insecure defaults['docker'] = docker if 'clock' in roles: diff --git a/roles/openshift_hosted/tasks/registry/registry.yml b/roles/openshift_hosted/tasks/registry/registry.yml index c29df1873..93b701ebc 100644 --- a/roles/openshift_hosted/tasks/registry/registry.yml +++ b/roles/openshift_hosted/tasks/registry/registry.yml @@ -53,7 +53,7 @@ - include: secure.yml static: no - when: replicas | int > 0 + when: replicas | int > 0 and not (openshift.docker.hosted_registry_insecure | default(false) | bool) - include: storage/object_storage.yml static: no diff --git a/roles/openshift_hosted_templates/files/v1.3/enterprise/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.3/enterprise/logging-deployer.yaml index a8d4b1cbb..13cef2d66 100644 --- a/roles/openshift_hosted_templates/files/v1.3/enterprise/logging-deployer.yaml +++ b/roles/openshift_hosted_templates/files/v1.3/enterprise/logging-deployer.yaml @@ -200,13 +200,13 @@ items: name: MODE value: "install" - - description: 'Specify prefix for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.3.0", set prefix "registry.access.redhat.com/openshift3/"' + description: 'Specify prefix for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.3.1", set prefix "registry.access.redhat.com/openshift3/"' name: IMAGE_PREFIX value: "registry.access.redhat.com/openshift3/" - - description: 'Specify version for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.3.0", set version "3.3.0"' + description: 'Specify version for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.3.1", set version "3.3.1"' name: IMAGE_VERSION - value: "3.3.0" + value: "3.3.1" - description: "(Deprecated) Specify the name of an existing pull secret to be used for pulling component images from an authenticated registry." name: IMAGE_PULL_SECRET diff --git a/roles/openshift_hosted_templates/files/v1.3/enterprise/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.3/enterprise/metrics-deployer.yaml index afd47ec7c..5e21e3a7a 100644 --- a/roles/openshift_hosted_templates/files/v1.3/enterprise/metrics-deployer.yaml +++ b/roles/openshift_hosted_templates/files/v1.3/enterprise/metrics-deployer.yaml @@ -101,7 +101,7 @@ parameters: - description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set version "latest"' name: IMAGE_VERSION - value: "3.3.0" + value: "3.3.1" - description: "Internal URL for the master, for authentication retrieval" name: MASTER_URL diff --git a/roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml index a8d4b1cbb..9cff9daca 100644 --- a/roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml +++ b/roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml @@ -200,13 +200,13 @@ items: name: MODE value: "install" - - description: 'Specify prefix for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.3.0", set prefix "registry.access.redhat.com/openshift3/"' + description: 'Specify prefix for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.4.0", set prefix "registry.access.redhat.com/openshift3/"' name: IMAGE_PREFIX value: "registry.access.redhat.com/openshift3/" - - description: 'Specify version for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.3.0", set version "3.3.0"' + description: 'Specify version for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.4.0", set version "3.4.0"' name: IMAGE_VERSION - value: "3.3.0" + value: "3.4.0" - description: "(Deprecated) Specify the name of an existing pull secret to be used for pulling component images from an authenticated registry." name: IMAGE_PULL_SECRET diff --git a/roles/openshift_hosted_templates/files/v1.4/enterprise/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.4/enterprise/metrics-deployer.yaml index afd47ec7c..1b46d6ac7 100644 --- a/roles/openshift_hosted_templates/files/v1.4/enterprise/metrics-deployer.yaml +++ b/roles/openshift_hosted_templates/files/v1.4/enterprise/metrics-deployer.yaml @@ -101,7 +101,7 @@ parameters: - description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set version "latest"' name: IMAGE_VERSION - value: "3.3.0" + value: "3.4.0" - description: "Internal URL for the master, for authentication retrieval" name: MASTER_URL diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 index a8f5d7351..088e8db43 100644 --- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 +++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 @@ -18,7 +18,7 @@ LimitNOFILE=131072 LimitCORE=infinity WorkingDirectory={{ openshift.common.data_dir }} SyslogIdentifier={{ openshift.common.service_type }}-master-controllers -Restart=on-failure +Restart=always RestartSec=5s [Install] diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2 index bf62696f0..94928f88c 100644 --- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2 +++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2 @@ -19,7 +19,7 @@ LimitNOFILE=131072 LimitCORE=infinity WorkingDirectory={{ openshift.common.data_dir }} SyslogIdentifier={{ openshift.common.service_type }}-master-controllers -Restart=on-failure +Restart=always RestartSec=5s [Install] diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index 1de63ecc3..8b3145785 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -9,6 +9,10 @@ role: "{{ item.role }}" local_facts: "{{ item.local_facts }}" with_items: + # Reset node labels to an empty dictionary. + - role: node + local_facts: + labels: {} - role: node local_facts: annotations: "{{ openshift_node_annotations | default(none) }}" diff --git a/roles/openshift_node_dnsmasq/tasks/main.yml b/roles/openshift_node_dnsmasq/tasks/main.yml index bd9a0ffb6..396c27295 100644 --- a/roles/openshift_node_dnsmasq/tasks/main.yml +++ b/roles/openshift_node_dnsmasq/tasks/main.yml @@ -29,6 +29,12 @@ when: openshift_node_dnsmasq_additional_config_file is defined notify: restart dnsmasq +- name: Enable dnsmasq + service: + name: dnsmasq + enabled: yes + state: started + # Dynamic NetworkManager based dispatcher - include: ./network-manager.yml when: network_manager_active | bool |