diff options
41 files changed, 649 insertions, 252 deletions
diff --git a/.gitignore b/.gitignore index 081659a94..626065fe1 100644 --- a/.gitignore +++ b/.gitignore @@ -18,3 +18,4 @@ multi_ec2.yaml multi_inventory.yaml .vagrant .tags* +ansible.cfg diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible index d78ef4a60..ab096b300 100644 --- a/.tito/packages/openshift-ansible +++ b/.tito/packages/openshift-ansible @@ -1 +1 @@ -3.0.44-1 ./ +3.0.47-1 ./ diff --git a/README_AWS.md b/README_AWS.md index c605de43d..4a2399d42 100644 --- a/README_AWS.md +++ b/README_AWS.md @@ -43,6 +43,16 @@ You may also want to allow access from the outside world on the following ports: ``` +Determine your subnet and setup the VPC +--------------------------------------- + +In the AWS VPC console, look up your subnet ID for the region you want to use and set it as such: + +- export ec2_vpc_subnet='my_vpc_subnet' + +Go to Your VPCs, select the VPC, and under Actions -> DNS Hostnames, set to Yes and Save. + + (Optional) Setup your $HOME/.ssh/config file ------------------------------------------- In case of a cluster creation, or any other case where you don't know the machine hostname in advance, you can use `.ssh/config` @@ -62,7 +72,7 @@ Alternatively, you can configure your ssh-agent to hold the credentials to conne By default, a cluster is launched with the following configuration: - Instance type: m4.large -- AMI: ami-307b3658 (for online deployments, ami-acd999c4 for origin deployments and ami-10663b78 for enterprise deployments) +- AMI: ami-7a9e9812 (for online deployments, ami-61bbf104 for origin deployments and ami-10663b78 for enterprise deployments) - Region: us-east-1 - Keypair name: libra - Security group: public @@ -109,7 +119,6 @@ If needed, these values can be changed by setting environment variables on your - export ec2_region='us-east-1' - export ec2_keypair='libra' - export ec2_security_groups="['public']" -- export ec2_vpc_subnet='my_vpc_subnet' - export ec2_assign_public_ip='true' - export os_etcd_root_vol_size='20' - export os_etcd_root_vol_type='standard' @@ -211,7 +211,7 @@ class Oscp(object): # Assume we have one and only one. hostname, server_info = results[0] - dns = server_info['oo_pulic_ip'] + dns = server_info['oo_public_ip'] host_str = "%s%s%s" % (self.user, dns, self.path) diff --git a/openshift-ansible.spec b/openshift-ansible.spec index 22108ffde..c92ea2843 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -5,7 +5,7 @@ } Name: openshift-ansible -Version: 3.0.44 +Version: 3.0.47 Release: 1%{?dist} Summary: Openshift and Atomic Enterprise Ansible License: ASL 2.0 @@ -261,6 +261,30 @@ Atomic OpenShift Utilities includes %changelog +* Wed Feb 24 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.47-1 +- a-o-i: Double safety check on master_lb (smunilla@redhat.com) +- a-o-i: Better method for identifying master_lb (smunilla@redhat.com) + +* Tue Feb 23 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.46-1 +- a-o-i: Exception checking around master_lb (smunilla@redhat.com) + +* Mon Feb 22 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.45-1 +- Do not monitor for etcd watchers (mmahut@redhat.com) +- remove old master registry item/triggers (jdiaz@redhat.com) +- a-o-i: Redo logic for detecting master_lb (smunilla@redhat.com) +- Fix 1.2 version check (jdetiber@redhat.com) +- Fix pv/c creation failed_when. (abutcher@redhat.com) +- Rename variable to delete temporary file, add configurable path. + (hrosnet@redhat.com) +- Add /var/log to containerized node mounts (sdodson@redhat.com) +- Add extra parameters for S3 registry: delete file, create bucket. + (hrosnet@redhat.com) +- Don't make config files world readable (sdodson@redhat.com) +- Fix requiring state and providing a default (rharriso@redhat.com) +- bind in /etc/origin/node for non-master monitoring to be able to talk with + master (jdiaz@redhat.com) +- a-o-i: pylint fixes related to too-long lines (smunilla@redhat.com) + * Wed Feb 17 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.44-1 - create registry items/triggers under Openshift Node (jdiaz@redhat.com) - a-o-i: Change method for counting master_lb as installed diff --git a/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml b/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml index ec28564cf..2f1d003ff 100755 --- a/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml +++ b/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml @@ -8,8 +8,12 @@ g_server: http://localhost/zabbix/api_jsonrpc.php g_user: Admin g_password: zabbix + g_zbx_scriptrunner_user: scriptrunner + g_zbx_scriptrunner_bastion_host: specialhost.example.com roles: - role: os_zabbix ozb_server: "{{ g_server }}" ozb_user: "{{ g_user }}" ozb_password: "{{ g_password }}" + ozb_scriptrunner_user: "{{ g_zbx_scriptrunner_user }}" + ozb_scriptrunner_bastion_host: "{{ g_zbx_scriptrunner_bastion_host }}" diff --git a/playbooks/aws/openshift-cluster/cluster_hosts.yml b/playbooks/aws/openshift-cluster/cluster_hosts.yml index ca87bc655..9a3361919 100644 --- a/playbooks/aws/openshift-cluster/cluster_hosts.yml +++ b/playbooks/aws/openshift-cluster/cluster_hosts.yml @@ -10,8 +10,12 @@ g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_nfs'] | defau g_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_master'] | default([])) }}" +g_new_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_new_master'] | default([])) }}" + g_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_node'] | default([])) }}" -g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type_infra'] | default([])) }}" +g_new_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_new_node'] | default([])) }}" + +g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type_infra']) | default([]) }}" g_compute_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type_compute'] | default([])) }}" diff --git a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml index 6090ed6fe..63be06ecf 100644 --- a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml @@ -34,26 +34,22 @@ - set_fact: ec2_instance_type: "{{ ec2_master_instance_type | default(lookup('env', 'ec2_master_instance_type') | default(lookup('env', 'ec2_instance_type') | default(deployment_vars[deployment_type].type, true), true), true) }}" - ec2_security_groups: "{{ ec2_master_security_groups - | default(deployment_vars[deployment_type].security_groups, true) }}" + ec2_security_groups: "{{ ec2_master_security_groups | default(lookup('env', 'ec2_master_security_groups') | default(lookup('env', 'ec2_security_groups') | default(deployment_vars[deployment_type].security_groups, true), true), true) }}" when: host_type == "master" and sub_host_type == "default" - set_fact: ec2_instance_type: "{{ ec2_etcd_instance_type | default(lookup('env', 'ec2_etcd_instance_type') | default(lookup('env', 'ec2_instance_type') | default(deployment_vars[deployment_type].type, true), true), true) }}" - ec2_security_groups: "{{ ec2_etcd_security_groups - | default(deployment_vars[deployment_type].security_groups, true)}}" + ec2_security_groups: "{{ ec2_etcd_security_groups | default(lookup('env', 'ec2_etcd_security_groups') | default(lookup('env', 'ec2_security_groups') | default(deployment_vars[deployment_type].security_groups, true), true), true) }}" when: host_type == "etcd" and sub_host_type == "default" - set_fact: ec2_instance_type: "{{ ec2_infra_instance_type | default(lookup('env', 'ec2_infra_instance_type') | default(lookup('env', 'ec2_instance_type') | default(deployment_vars[deployment_type].type, true), true), true) }}" - ec2_security_groups: "{{ ec2_infra_security_groups - | default(deployment_vars[deployment_type].security_groups, true) }}" + ec2_security_groups: "{{ ec2_infra_security_groups | default(lookup('env', 'ec2_infra_security_groups') | default(lookup('env', 'ec2_security_groups') | default(deployment_vars[deployment_type].security_groups, true), true), true) }}" when: host_type == "node" and sub_host_type == "infra" - set_fact: ec2_instance_type: "{{ ec2_node_instance_type | default(lookup('env', 'ec2_node_instance_type') | default(lookup('env', 'ec2_instance_type') | default(deployment_vars[deployment_type].type, true), true), true) }}" - ec2_security_groups: "{{ ec2_node_security_groups - | default(deployment_vars[deployment_type].security_groups, true) }}" + ec2_security_groups: "{{ ec2_node_security_groups | default(lookup('env', 'ec2_node_security_groups') | default(lookup('env', 'ec2_security_groups') | default(deployment_vars[deployment_type].security_groups, true), true), true) }}" when: host_type == "node" and sub_host_type == "compute" - set_fact: @@ -61,8 +57,7 @@ | default(deployment_vars[deployment_type].type, true) }}" when: ec2_instance_type is not defined - set_fact: - ec2_security_groups: "{{ lookup('env', 'ec2_security_groups') - | default(deployment_vars[deployment_type].security_groups, true) }}" + ec2_security_groups: "{{ lookup('env', 'ec2_security_groups') | default(deployment_vars[deployment_type].security_groups, true) }}" when: ec2_security_groups is not defined - name: Find amis for deployment_type diff --git a/playbooks/byo/openshift-cluster/cluster_hosts.yml b/playbooks/byo/openshift-cluster/cluster_hosts.yml index 10872e738..8893db245 100644 --- a/playbooks/byo/openshift-cluster/cluster_hosts.yml +++ b/playbooks/byo/openshift-cluster/cluster_hosts.yml @@ -5,6 +5,8 @@ g_lb_hosts: "{{ groups.lb | default([]) }}" g_master_hosts: "{{ groups.masters | default([]) }}" +g_new_master_hosts: "{{ groups.new_masters | default([]) }}" + g_node_hosts: "{{ groups.nodes | default([]) }}" g_new_node_hosts: "{{ groups.new_nodes | default([]) }}" diff --git a/playbooks/byo/openshift-cluster/scaleup.yml b/playbooks/byo/openshift-master/scaleup.yml index 1702690f6..18797d02a 100644 --- a/playbooks/byo/openshift-cluster/scaleup.yml +++ b/playbooks/byo/openshift-master/scaleup.yml @@ -1,5 +1,5 @@ --- -- include: ../../common/openshift-cluster/scaleup.yml +- include: ../../common/openshift-master/scaleup.yml vars_files: - ../../byo/openshift-cluster/cluster_hosts.yml vars: diff --git a/playbooks/byo/openshift-node/filter_plugins b/playbooks/byo/openshift-node/filter_plugins new file mode 120000 index 000000000..99a95e4ca --- /dev/null +++ b/playbooks/byo/openshift-node/filter_plugins @@ -0,0 +1 @@ +../../../filter_plugins
\ No newline at end of file diff --git a/playbooks/byo/openshift-node/lookup_plugins b/playbooks/byo/openshift-node/lookup_plugins new file mode 120000 index 000000000..ac79701db --- /dev/null +++ b/playbooks/byo/openshift-node/lookup_plugins @@ -0,0 +1 @@ +../../../lookup_plugins
\ No newline at end of file diff --git a/playbooks/byo/openshift-node/roles b/playbooks/byo/openshift-node/roles new file mode 120000 index 000000000..20c4c58cf --- /dev/null +++ b/playbooks/byo/openshift-node/roles @@ -0,0 +1 @@ +../../../roles
\ No newline at end of file diff --git a/playbooks/byo/openshift-node/scaleup.yml b/playbooks/byo/openshift-node/scaleup.yml new file mode 100644 index 000000000..0343597b5 --- /dev/null +++ b/playbooks/byo/openshift-node/scaleup.yml @@ -0,0 +1,8 @@ +--- +- include: ../../common/openshift-node/scaleup.yml + vars_files: + - ../../byo/openshift-cluster/cluster_hosts.yml + vars: + openshift_cluster_id: "{{ cluster_id | default('default') }}" + openshift_debug_level: "{{ debug_level | default(2) }}" + openshift_deployment_type: "{{ deployment_type }}" diff --git a/playbooks/common/openshift-cluster/additional_config.yml b/playbooks/common/openshift-cluster/additional_config.yml new file mode 100644 index 000000000..1ac78468a --- /dev/null +++ b/playbooks/common/openshift-cluster/additional_config.yml @@ -0,0 +1,56 @@ +- name: Configure flannel + hosts: oo_first_master + vars: + etcd_urls: "{{ openshift.master.etcd_urls }}" + roles: + - role: flannel_register + when: openshift.common.use_flannel | bool + +- name: Additional master configuration + hosts: oo_first_master + vars: + cockpit_plugins: "{{ osm_cockpit_plugins | default(['cockpit-kubernetes']) }}" + etcd_urls: "{{ openshift.master.etcd_urls }}" + openshift_master_ha: "{{ groups.oo_masters | length > 1 }}" + omc_cluster_hosts: "{{ groups.oo_masters | join(' ')}}" + roles: + - role: openshift_master_cluster + when: openshift_master_ha | bool and openshift.master.cluster_method == "pacemaker" + - role: openshift_examples + when: openshift.common.install_examples | bool + - role: openshift_cluster_metrics + when: openshift.common.use_cluster_metrics | bool + - role: openshift_manageiq + when: openshift.common.use_manageiq | bool + - role: cockpit + when: not openshift.common.is_atomic and ( deployment_type in ['atomic-enterprise','openshift-enterprise'] ) and + (osm_use_cockpit | bool or osm_use_cockpit is undefined ) + - role: flannel_register + when: openshift.common.use_flannel | bool + - role: pods + when: openshift.common.deployment_type == 'online' + - role: os_env_extras + when: openshift.common.deployment_type == 'online' + +- name: Create persistent volumes and create hosted services + hosts: oo_first_master + vars: + attach_registry_volume: "{{ openshift.hosted.registry.storage.kind != None }}" + deploy_infra: "{{ openshift.master.infra_nodes | default([]) | length > 0 }}" + persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups) }}" + persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims }}" + roles: + - role: openshift_persistent_volumes + when: persistent_volumes | length > 0 or persistent_volume_claims | length > 0 + - role: openshift_serviceaccounts + openshift_serviceaccounts_names: + - router + - registry + openshift_serviceaccounts_namespace: default + openshift_serviceaccounts_sccs: + - privileged + - role: openshift_router + when: deploy_infra | bool + - role: openshift_registry + registry_volume_claim: "{{ openshift.hosted.registry.storage.volume.name }}-claim" + when: deploy_infra | bool and attach_registry_volume | bool diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml index 2cad4b362..23c8f039e 100644 --- a/playbooks/common/openshift-cluster/config.yml +++ b/playbooks/common/openshift-cluster/config.yml @@ -11,4 +11,6 @@ - include: ../openshift-master/config.yml +- include: additional_config.yml + - include: ../openshift-node/config.yml diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml index 7917bfba5..432a92b49 100644 --- a/playbooks/common/openshift-cluster/evaluate_groups.yml +++ b/playbooks/common/openshift-cluster/evaluate_groups.yml @@ -10,8 +10,8 @@ when: g_etcd_hosts is not defined - fail: - msg: This playbook requires g_master_hosts to be set - when: g_master_hosts is not defined + msg: This playbook requires g_master_hosts or g_new_master_hosts to be set + when: g_master_hosts is not defined and g_new_master_hosts is not defined - fail: msg: This playbook requires g_node_hosts or g_new_node_hosts to be set @@ -29,6 +29,14 @@ msg: The nfs group must be limited to one host when: (groups[g_nfs_hosts] | default([])) | length > 1 + - name: Evaluate oo_masters + add_host: + name: "{{ item }}" + groups: oo_masters + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_sudo: "{{ g_sudo | default(omit) }}" + with_items: "{{ g_master_hosts | union(g_new_master_hosts) | default([]) }}" + - name: Evaluate oo_etcd_to_config add_host: name: "{{ item }}" @@ -43,11 +51,7 @@ groups: oo_masters_to_config ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" ansible_sudo: "{{ g_sudo | default(omit) }}" - with_items: "{{ g_master_hosts | default([]) }}" - - # Use g_new_node_hosts if it exists otherwise g_node_hosts - - set_fact: - g_node_hosts_to_config: "{{ g_new_node_hosts | default(g_node_hosts | default([], true), true) }}" + with_items: "{{ g_new_master_hosts | default(g_master_hosts | default([], true), true) }}" - name: Evaluate oo_nodes_to_config add_host: @@ -55,7 +59,7 @@ groups: oo_nodes_to_config ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" ansible_sudo: "{{ g_sudo | default(omit) }}" - with_items: "{{ g_node_hosts_to_config | default([]) }}" + with_items: "{{ g_new_node_hosts | default(g_node_hosts | default([], true), true) }}" # Skip adding the master to oo_nodes_to_config when g_new_node_hosts is - name: Evaluate oo_nodes_to_config diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml index dbf746f12..54bb251f7 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml @@ -13,10 +13,11 @@ tasks: - name: Upgrade master packages command: "{{ ansible_pkg_mgr}} update -y {{ openshift.common.service_type }}-master{{ openshift_version }}" + when: not openshift.common.is_containerized | bool - name: Ensure python-yaml present for config upgrade action: "{{ ansible_pkg_mgr }} name=PyYAML state=present" - when: not openshift.common.is_atomic | bool + when: not openshift.common.is_containerized | bool # Currently 3.1.1 does not have any new configuration settings # @@ -63,6 +64,7 @@ tasks: - name: Upgrade node packages command: "{{ ansible_pkg_mgr }} update -y {{ openshift.common.service_type }}-node{{ openshift_version }}" + when: not openshift.common.is_containerized | bool - name: Restart node service service: name="{{ openshift.common.service_type }}-node" state=restarted diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index 77edbd1a6..b9d595576 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -57,6 +57,8 @@ console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}" public_console_url: "{{ openshift_master_public_console_url | default(None) }}" portal_net: "{{ openshift_master_portal_net | default(None) }}" + ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}" + master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}" - openshift_facts: role: hosted openshift_env: @@ -144,7 +146,7 @@ when: etcd_client_certs_missing is defined and etcd_client_certs_missing - name: Determine if master certificates need to be generated - hosts: oo_masters_to_config + hosts: oo_first_master:oo_masters_to_config tasks: - set_fact: openshift_master_certs_no_etcd: @@ -221,15 +223,6 @@ validate_checksum: yes with_items: masters_needing_certs -- name: Compute haproxy_backend_servers - hosts: localhost - connection: local - become: no - gather_facts: no - tasks: - - set_fact: - haproxy_backend_servers: "{{ hostvars | oo_select_keys(groups['oo_masters_to_config']) | oo_haproxy_backend_masters }}" - - name: Configure load balancers hosts: oo_lb_to_config vars: @@ -248,11 +241,11 @@ mode: tcp option: tcplog balance: source - servers: "{{ hostvars.localhost.haproxy_backend_servers }}" + servers: "{{ hostvars | oo_select_keys(groups['oo_masters']) | oo_haproxy_backend_masters }}" roles: - role: openshift_facts - role: haproxy - when: groups.oo_masters_to_config | length > 1 + when: hostvars[groups.oo_first_master.0].openshift.master.ha | bool - name: Check for cached session secrets hosts: oo_first_master @@ -334,8 +327,8 @@ serial: 1 vars: sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}" - openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" - openshift_master_count: "{{ groups.oo_masters_to_config | length }}" + openshift_master_ha: "{{ openshift.master.ha }}" + openshift_master_count: "{{ openshift.master.master_count }}" openshift_master_session_auth_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_auth_secrets }}" openshift_master_session_encryption_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_encryption_secrets }}" pre_tasks: @@ -343,12 +336,12 @@ file: path: "{{ openshift.common.config_base }}/master" state: directory - when: master_certs_missing and 'oo_first_master' not in group_names + when: master_certs_missing | bool and 'oo_first_master' not in group_names - name: Unarchive the tarball on the master unarchive: src: "{{ sync_tmpdir }}/{{ master_cert_subdir }}.tgz" dest: "{{ master_cert_config_dir }}" - when: master_certs_missing and 'oo_first_master' not in group_names + when: master_certs_missing | bool and 'oo_first_master' not in group_names roles: - openshift_master - role: nickhammond.logrotate @@ -359,32 +352,12 @@ group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }} changed_when: False -- name: Additional master configuration - hosts: oo_first_master - vars: - cockpit_plugins: "{{ osm_cockpit_plugins | default(['cockpit-kubernetes']) }}" - etcd_urls: "{{ openshift.master.etcd_urls }}" - openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" - omc_cluster_hosts: "{{ groups.oo_masters_to_config | join(' ')}}" +# Additional instance config for online deployments +- name: Additional instance config + hosts: oo_masters_deployment_type_online roles: - - role: openshift_master_cluster - when: openshift_master_ha | bool and openshift.master.cluster_method == "pacemaker" - - role: openshift_examples - when: openshift.common.install_examples | bool - - role: openshift_cluster_metrics - when: openshift.common.use_cluster_metrics | bool - - role: openshift_manageiq - when: openshift.common.use_manageiq | bool - - role: cockpit - when: not openshift.common.is_atomic and ( deployment_type in ['atomic-enterprise','openshift-enterprise'] ) and - (osm_use_cockpit | bool or osm_use_cockpit is undefined ) - - role: flannel_register - when: openshift.common.use_flannel | bool - - role: pods - when: openshift.common.deployment_type == 'online' - - role: os_env_extras - when: openshift.common.deployment_type == 'online' - + - pods + - os_env_extras - name: Delete temporary directory on localhost hosts: localhost @@ -394,26 +367,3 @@ tasks: - file: name={{ g_master_mktemp.stdout }} state=absent changed_when: False - -- name: Create persistent volumes and create hosted services - hosts: oo_first_master - vars: - attach_registry_volume: "{{ openshift.hosted.registry.storage.kind != None }}" - deploy_infra: "{{ openshift.master.infra_nodes | default([]) | length > 0 }}" - persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups) }}" - persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims }}" - roles: - - role: openshift_persistent_volumes - when: persistent_volumes | length > 0 or persistent_volume_claims | length > 0 - - role: openshift_serviceaccounts - openshift_serviceaccounts_names: - - router - - registry - openshift_serviceaccounts_namespace: default - openshift_serviceaccounts_sccs: - - privileged - - role: openshift_router - when: deploy_infra | bool - - role: openshift_registry - when: deploy_infra | bool and attach_registry_volume | bool - diff --git a/playbooks/common/openshift-master/library/modify_yaml.py b/playbooks/common/openshift-master/library/modify_yaml.py new file mode 100755 index 000000000..a4be10ca3 --- /dev/null +++ b/playbooks/common/openshift-master/library/modify_yaml.py @@ -0,0 +1,95 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# vim: expandtab:tabstop=4:shiftwidth=4 + +''' modify_yaml ansible module ''' + +import yaml + +DOCUMENTATION = ''' +--- +module: modify_yaml +short_description: Modify yaml key value pairs +author: Andrew Butcher +requirements: [ ] +''' +EXAMPLES = ''' +- modify_yaml: + dest: /etc/origin/master/master-config.yaml + yaml_key: 'kubernetesMasterConfig.masterCount' + yaml_value: 2 +''' + +def main(): + ''' Modify key (supplied in jinja2 dot notation) in yaml file, setting + the key to the desired value. + ''' + + # disabling pylint errors for global-variable-undefined and invalid-name + # for 'global module' usage, since it is required to use ansible_facts + # pylint: disable=global-variable-undefined, invalid-name, + # redefined-outer-name + global module + + module = AnsibleModule( + argument_spec=dict( + dest=dict(required=True), + yaml_key=dict(required=True), + yaml_value=dict(required=True), + backup=dict(required=False, default=True, type='bool'), + ), + supports_check_mode=True, + ) + + dest = module.params['dest'] + yaml_key = module.params['yaml_key'] + yaml_value = module.safe_eval(module.params['yaml_value']) + backup = module.params['backup'] + + # Represent null values as an empty string. + # pylint: disable=missing-docstring, unused-argument + def none_representer(dumper, data): + return yaml.ScalarNode(tag=u'tag:yaml.org,2002:null', value=u'') + yaml.add_representer(type(None), none_representer) + + try: + changes = [] + + yaml_file = open(dest) + yaml_data = yaml.safe_load(yaml_file.read()) + yaml_file.close() + + ptr = yaml_data + for key in yaml_key.split('.'): + if key not in ptr and key != yaml_key.split('.')[-1]: + ptr[key] = {} + elif key == yaml_key.split('.')[-1]: + if (key in ptr and module.safe_eval(ptr[key]) != yaml_value) or (key not in ptr): + ptr[key] = yaml_value + changes.append((yaml_key, yaml_value)) + else: + ptr = ptr[key] + + if len(changes) > 0: + if backup: + module.backup_local(dest) + yaml_file = open(dest, 'w') + yaml_string = yaml.dump(yaml_data, default_flow_style=False) + yaml_string = yaml_string.replace('\'\'', '""') + yaml_file.write(yaml_string) + yaml_file.close() + + return module.exit_json(changed=(len(changes) > 0), changes=changes) + + # ignore broad-except error to avoid stack trace to ansible user + # pylint: disable=broad-except + except Exception, e: + return module.fail_json(msg=str(e)) + +# ignore pylint errors related to the module_utils import +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import +# import module snippets +from ansible.module_utils.basic import * + +if __name__ == '__main__': + main() diff --git a/playbooks/common/openshift-master/scaleup.yml b/playbooks/common/openshift-master/scaleup.yml new file mode 100644 index 000000000..6f8151d30 --- /dev/null +++ b/playbooks/common/openshift-master/scaleup.yml @@ -0,0 +1,55 @@ +--- +- include: ../openshift-cluster/evaluate_groups.yml + +- name: Gather facts + hosts: oo_etcd_to_config:oo_masters_to_config:oo_nodes_to_config + roles: + - openshift_facts + +- name: Update master count + hosts: oo_masters:!oo_masters_to_config + serial: 1 + roles: + - openshift_facts + post_tasks: + - openshift_facts: + role: master + local_facts: + ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}" + master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}" + - name: Update master count + modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'kubernetesMasterConfig.masterCount' + yaml_value: "{{ openshift.master.master_count }}" + notify: + - restart master api + - restart master controllers + handlers: + - name: restart master api + service: name={{ openshift.common.service_type }}-master-controllers state=restarted + notify: verify api server + - name: restart master controllers + service: name={{ openshift.common.service_type }}-master-controllers state=restarted + - name: verify api server + command: > + curl -k --head --silent {{ openshift.master.api_url }} + register: api_available_output + until: api_available_output.stdout.find("200 OK") != -1 + retries: 120 + delay: 1 + changed_when: false + +- name: Configure docker hosts + hosts: oo_masters_to-config:oo_nodes_to_config + vars: + docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') | oo_split }}" + docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') | oo_split }}" + docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') | oo_split }}" + roles: + - openshift_facts + - openshift_docker + +- include: ../openshift-master/config.yml + +- include: ../openshift-node/config.yml diff --git a/playbooks/common/openshift-cluster/scaleup.yml b/playbooks/common/openshift-node/scaleup.yml index c4340902b..d36f7acea 100644 --- a/playbooks/common/openshift-cluster/scaleup.yml +++ b/playbooks/common/openshift-node/scaleup.yml @@ -1,5 +1,5 @@ --- -- include: evaluate_groups.yml +- include: ../openshift-cluster/evaluate_groups.yml - name: Configure docker hosts hosts: oo_nodes_to_config @@ -12,5 +12,3 @@ - openshift_docker - include: ../openshift-node/config.yml - vars: - openshift_deployment_type: "{{ deployment_type }}" diff --git a/playbooks/gce/openshift-cluster/cluster_hosts.yml b/playbooks/gce/openshift-cluster/cluster_hosts.yml index 5cc300bbf..b989e15fa 100644 --- a/playbooks/gce/openshift-cluster/cluster_hosts.yml +++ b/playbooks/gce/openshift-cluster/cluster_hosts.yml @@ -10,8 +10,12 @@ g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-nfs'] | defau g_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-master'] | default([])) }}" +g_new_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new-master'] | default([])) }}" + g_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-node'] | default([])) }}" -g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-infra'] | default([])) }}" +g_new_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new-node'] | default([])) }}" + +g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-infra']) | default([]) }}" g_compute_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-compute'] | default([])) }}" diff --git a/playbooks/libvirt/openshift-cluster/cluster_hosts.yml b/playbooks/libvirt/openshift-cluster/cluster_hosts.yml index 5cc300bbf..b989e15fa 100644 --- a/playbooks/libvirt/openshift-cluster/cluster_hosts.yml +++ b/playbooks/libvirt/openshift-cluster/cluster_hosts.yml @@ -10,8 +10,12 @@ g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-nfs'] | defau g_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-master'] | default([])) }}" +g_new_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new-master'] | default([])) }}" + g_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-node'] | default([])) }}" -g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-infra'] | default([])) }}" +g_new_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new-node'] | default([])) }}" + +g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-infra']) | default([]) }}" g_compute_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-compute'] | default([])) }}" diff --git a/playbooks/openstack/openshift-cluster/cluster_hosts.yml b/playbooks/openstack/openshift-cluster/cluster_hosts.yml index ca87bc655..9a3361919 100644 --- a/playbooks/openstack/openshift-cluster/cluster_hosts.yml +++ b/playbooks/openstack/openshift-cluster/cluster_hosts.yml @@ -10,8 +10,12 @@ g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_nfs'] | defau g_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_master'] | default([])) }}" +g_new_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_new_master'] | default([])) }}" + g_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_node'] | default([])) }}" -g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type_infra'] | default([])) }}" +g_new_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_new_node'] | default([])) }}" + +g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type_infra']) | default([]) }}" g_compute_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type_compute'] | default([])) }}" diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml index ac1612634..af774aa32 100644 --- a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml +++ b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml @@ -346,16 +346,16 @@ resources: port_range_max: 22 remote_ip_prefix: { get_param: ssh_incoming } - direction: ingress - protocol: udp - port_range_min: 4789 - port_range_max: 4789 - remote_mode: remote_group_id - - direction: ingress protocol: tcp port_range_min: 10250 port_range_max: 10250 remote_mode: remote_group_id remote_group_id: { get_resource: master-secgrp } + - direction: ingress + protocol: udp + port_range_min: 4789 + port_range_max: 4789 + remote_mode: remote_group_id infra-secgrp: type: OS::Neutron::SecurityGroup @@ -473,6 +473,7 @@ resources: subnet: { get_resource: subnet } secgrp: - { get_resource: master-secgrp } + - { get_resource: node-secgrp } floating_network: { get_param: external_net } net_name: str_replace: diff --git a/playbooks/openstack/openshift-cluster/launch.yml b/playbooks/openstack/openshift-cluster/launch.yml index e9b9cf540..0afcad72e 100644 --- a/playbooks/openstack/openshift-cluster/launch.yml +++ b/playbooks/openstack/openshift-cluster/launch.yml @@ -29,7 +29,7 @@ - name: Create or Update OpenStack Stack command: 'heat {{ heat_stack_action }} -f {{ openstack_infra_heat_stack }} - --timeout 3 --enable-rollback + --timeout 3 -P cluster_env={{ cluster_env }} -P cluster_id={{ cluster_id }} -P subnet_24_prefix={{ openstack_subnet_24_prefix }} @@ -59,7 +59,40 @@ until: stack_show_status_result.stdout not in ['CREATE_IN_PROGRESS', 'UPDATE_IN_PROGRESS'] retries: 30 delay: 5 - failed_when: stack_show_status_result.stdout not in ['CREATE_COMPLETE', 'UPDATE_COMPLETE'] + + - name: Display the stack resources + command: 'heat resource-list openshift-ansible-{{ cluster_id }}-stack' + register: stack_resource_list_result + when: stack_show_status_result.stdout not in ['CREATE_COMPLETE', 'UPDATE_COMPLETE'] + + - name: Display the stack status + command: 'heat stack-show openshift-ansible-{{ cluster_id }}-stack' + register: stack_show_result + when: stack_show_status_result.stdout not in ['CREATE_COMPLETE', 'UPDATE_COMPLETE'] + + - name: Delete the stack + command: 'heat stack-delete openshift-ansible-{{ cluster_id }}-stack' + when: stack_show_status_result.stdout not in ['CREATE_COMPLETE', 'UPDATE_COMPLETE'] + + - fail: + msg: | + + +--------------------------------------+ + | ^ | + | /!\ Failed to create the heat stack | + | /___\ | + +--------------------------------------+ + + Here is the list of stack resources and their status: + {{ stack_resource_list_result.stdout }} + + Here is the status of the stack: + {{ stack_show_result.stdout }} + + ^ Failed to create the heat stack + /!\ + /___\ Please check the `stack_status_reason` line in the above array to know why. + when: stack_show_status_result.stdout not in ['CREATE_COMPLETE', 'UPDATE_COMPLETE'] - name: Read OpenStack Stack outputs command: 'heat stack-show openshift-ansible-{{ cluster_id }}-stack' diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml index 1e97b047b..e72509c4d 100644 --- a/roles/etcd/tasks/main.yml +++ b/roles/etcd/tasks/main.yml @@ -8,7 +8,7 @@ when: "'ipv4' not in hostvars[inventory_hostname]['ansible_' ~ etcd_interface] or 'address' not in hostvars[inventory_hostname]['ansible_' ~ etcd_interface].ipv4" - name: Install etcd - action: "{{ ansible_pkg_mgr }} name=etcd-2.* state=present" + action: "{{ ansible_pkg_mgr }} name=etcd state=present" when: not openshift.common.is_containerized | bool - name: Pull etcd container diff --git a/roles/lib_dyn/library/dyn_record.py b/roles/lib_dyn/library/dyn_record.py index 7b80064f4..42d970060 100644 --- a/roles/lib_dyn/library/dyn_record.py +++ b/roles/lib_dyn/library/dyn_record.py @@ -13,6 +13,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# pylint: disable=too-many-branches '''Ansible module to manage records in the Dyn Managed DNS service''' DOCUMENTATION = ''' --- @@ -84,9 +85,18 @@ options: description: - 'Record's "Time to live". Number of seconds the record remains cached' - 'in DNS servers or c(0) to use the default TTL for the zone.' + - 'This option is mutually exclusive with use_zone_ttl' required: false default: 0 + use_zone_ttl: + description: + - 'Use the DYN Zone's Default TTL' + - 'This option is mutually exclusive with record_ttl' + required: false + default: false + mutually exclusive with: record_ttl + notes: - The module makes a broad assumption that there will be only one record per "node" (FQDN). - This module returns record(s) in the "result" element when 'state' is set to 'present'. This value can be be registered and used in your playbooks. @@ -96,18 +106,28 @@ author: "Russell Harrison" ''' EXAMPLES = ''' +# Attempting to cname www.example.com to web1.example.com +- name: Update CNAME record + dyn_record: + state: present + record_fqdn: www.example.com + zone: example.com + record_type: CNAME + record_value: web1.example.com + record_ttl: 7200 + +# Use the zones default TTL - name: Update CNAME record - local_action: - module: dyn_record + dyn_record: state: present record_fqdn: www.example.com zone: example.com record_type: CNAME record_value: web1.example.com + use_zone_ttl: true - name: Update A record - local_action: - module: dyn_record + dyn_record: state: present record_fqdn: web1.example.com zone: example.com @@ -144,7 +164,10 @@ def get_record_type(record_key): return record_key.replace('_records', '').upper() def get_record_key(record_type): - '''Get the key to look up records in the dictionary returned from get_any_records.''' + '''Get the key to look up records in the dictionary returned from get_any_records. + example: + 'cname_records' + ''' return record_type.lower() + '_records' def get_any_records(module, node): @@ -166,14 +189,41 @@ def get_any_records(module, node): def get_record_values(records): '''Get the record values for each record returned by get_any_records.''' - # This simply returns the values from a dictionary of record objects + # This simply returns the values from a record ret_dict = {} for key in records.keys(): record_type = get_record_type(key) - record_value_param = RECORD_PARAMS[record_type]['value_param'] - ret_dict[key] = [getattr(elem, record_value_param) for elem in records[key]] + params = [RECORD_PARAMS[record_type]['value_param'], 'ttl', 'zone', 'fqdn'] + ret_dict[key] = [] + properties = {} + for elem in records[key]: + for param in params: + properties[param] = getattr(elem, param) + ret_dict[key].append(properties) + return ret_dict +def compare_record_values(record_type_key, user_record_value, dyn_values): + ''' Verify the user record_value exists in dyn''' + rtype = get_record_type(record_type_key) + for record in dyn_values[record_type_key]: + if user_record_value in record[RECORD_PARAMS[rtype]['value_param']]: + return True + + return False + +def compare_record_ttl(record_type_key, user_record_value, dyn_values, user_param_ttl): + ''' Verify the ttls match for the record''' + rtype = get_record_type(record_type_key) + for record in dyn_values[record_type_key]: + # find the right record + if user_record_value in record[RECORD_PARAMS[rtype]['value_param']]: + # Compare ttls from the records + if int(record['ttl']) == user_param_ttl: + return True + + return False + def main(): '''Ansible module for managing Dyn DNS records.''' module = AnsibleModule( @@ -187,16 +237,20 @@ def main(): record_type=dict(required=False, type='str', choices=[ 'A', 'AAAA', 'CNAME', 'PTR', 'TXT']), record_value=dict(required=False, type='str'), - record_ttl=dict(required=False, default=0, type='int'), + record_ttl=dict(required=False, default=None, type='int'), + use_zone_ttl=dict(required=False, default=False), ), required_together=( ['record_fqdn', 'record_value', 'record_ttl', 'record_type'] - ) + ), + mutually_exclusive=[('record_ttl', 'use_zone_ttl')] ) if IMPORT_ERROR: - module.fail_json(msg="Unable to import dyn module: https://pypi.python.org/pypi/dyn", - error=IMPORT_ERROR) + module.fail_json(msg="Unable to import dyn module: https://pypi.python.org/pypi/dyn", error=IMPORT_ERROR) + + if module.params['record_ttl'] != None and int(module.params['record_ttl']) <= 0: + module.fail_json(msg="Invalid Value for record TTL") # Start the Dyn session try: @@ -204,22 +258,16 @@ def main(): module.params['user_name'], module.params['user_password']) except dyn.tm.errors.DynectAuthError as error: - module.fail_json(msg='Unable to authenticate with Dyn', - error=str(error)) + module.fail_json(msg='Unable to authenticate with Dyn', error=str(error)) # Retrieve zone object try: dyn_zone = Zone(module.params['zone']) except dyn.tm.errors.DynectGetError as error: if 'No such zone' in str(error): - module.fail_json( - msg="Not a valid zone for this account", - zone=module.params['zone'] - ) + module.fail_json(msg="Not a valid zone for this account", zone=module.params['zone']) else: - module.fail_json(msg="Unable to retrieve zone", - error=str(error)) - + module.fail_json(msg="Unable to retrieve zone", error=str(error)) # To retrieve the node object we need to remove the zone name from the FQDN dyn_node_name = module.params['record_fqdn'].replace('.' + module.params['zone'], '') @@ -233,27 +281,46 @@ def main(): # All states will need a list of the exiting records for the zone. dyn_node_records = get_any_records(module, dyn_node) + dyn_values = get_record_values(dyn_node_records) + if module.params['state'] == 'list': - module.exit_json(changed=False, - records=get_record_values( - dyn_node_records, - )) + module.exit_json(changed=False, dyn_records=dyn_values) - if module.params['state'] == 'present': + elif module.params['state'] == 'absent': + # If there are any records present we'll want to delete the node. + if dyn_node_records: + dyn_node.delete() + + # Publish the zone since we've modified it. + dyn_zone.publish() + + module.exit_json(changed=True, msg="Removed node %s from zone %s" % (dyn_node_name, module.params['zone'])) + + module.exit_json(changed=False) + + elif module.params['state'] == 'present': + + # configure the TTL variable: + # if use_zone_ttl, use the default TTL of the account. + # if TTL == None, don't check it, set it as 0 (api default) + # if TTL > 0, ensure this TTL is set + if module.params['use_zone_ttl']: + user_param_ttl = dyn_zone.ttl + elif not module.params['record_ttl']: + user_param_ttl = 0 + else: + user_param_ttl = module.params['record_ttl'] # First get a list of existing records for the node - values = get_record_values(dyn_node_records) - value_key = get_record_key(module.params['record_type']) - param_value = module.params['record_value'] + record_type_key = get_record_key(module.params['record_type']) + user_record_value = module.params['record_value'] # Check to see if the record is already in place before doing anything. - if (dyn_node_records and - dyn_node_records[value_key][0].ttl == module.params['record_ttl'] and - (param_value in values[value_key] or - param_value + '.' in values[value_key])): - - module.exit_json(changed=False) + if dyn_node_records and compare_record_values(record_type_key, user_record_value, dyn_values): + if user_param_ttl == 0 or \ + compare_record_ttl(record_type_key, user_record_value, dyn_values, user_param_ttl): + module.exit_json(changed=False, dyn_record=dyn_values) # Working on the assumption that there is only one record per # node we will first delete the node if there are any records before @@ -262,27 +329,20 @@ def main(): dyn_node.delete() # Now lets create the correct node entry. - dyn_zone.add_record(dyn_node_name, - module.params['record_type'], - module.params['record_value'], - module.params['record_ttl'] - ) + record = dyn_zone.add_record(dyn_node_name, + module.params['record_type'], + module.params['record_value'], + user_param_ttl + ) # Now publish the zone since we've updated it. dyn_zone.publish() - module.exit_json(changed=True, - msg="Created node %s in zone %s" % (dyn_node_name, module.params['zone'])) - if module.params['state'] == 'absent': - # If there are any records present we'll want to delete the node. - if dyn_node_records: - dyn_node.delete() - # Publish the zone since we've modified it. - dyn_zone.publish() - module.exit_json(changed=True, - msg="Removed node %s from zone %s" % (dyn_node_name, module.params['zone'])) - else: - module.exit_json(changed=False) + rmsg = "Created node [%s] " % dyn_node_name + rmsg += "in zone: [%s]" % module.params['zone'] + module.exit_json(changed=True, msg=rmsg, dyn_record=get_record_values({record_type_key: [record]})) + + module.fail_json(msg="Unknown state: [%s]" % module.params['state']) # Ansible tends to need a wild card import so we'll use it here # pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled diff --git a/roles/lib_zabbix/tasks/create_template.yml b/roles/lib_zabbix/tasks/create_template.yml index 61344357a..783249c3a 100644 --- a/roles/lib_zabbix/tasks/create_template.yml +++ b/roles/lib_zabbix/tasks/create_template.yml @@ -61,6 +61,20 @@ with_items: template.ztriggers when: template.ztriggers is defined +- name: Create Actions + zbx_action: + zbx_server: "{{ server }}" + zbx_user: "{{ user }}" + zbx_password: "{{ password }}" + state: "{{ item.state | default('present', True) }}" + name: "{{ item.name }}" + status: "{{ item.status | default('enabled', True) }}" + escalation_time: "{{ item.escalation_time }}" + conditions_filter: "{{ item.conditions_filter }}" + operations: "{{ item.operations }}" + with_items: template.zactions + when: template.zactions is defined + - name: Create Discoveryrules zbx_discoveryrule: zbx_server: "{{ server }}" diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 25b9534dd..0f25881f1 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -718,7 +718,7 @@ def set_version_facts_if_unset(facts): if deployment_type == 'origin': version_gte_3_1_or_1_1 = LooseVersion(version) >= LooseVersion('1.1.0') version_gte_3_1_1_or_1_1_1 = LooseVersion(version) >= LooseVersion('1.1.1') - version_gte_3_2_or_1_2 = LooseVersion(version) >= LooseVersion('1.1.2') + version_gte_3_2_or_1_2 = LooseVersion(version) >= LooseVersion('1.2.0') else: version_gte_3_1_or_1_1 = LooseVersion(version) >= LooseVersion('3.0.2.905') version_gte_3_1_1_or_1_1_1 = LooseVersion(version) >= LooseVersion('3.1.1') @@ -916,41 +916,79 @@ def apply_provider_facts(facts, provider_facts): facts['provider'] = provider_facts return facts - -def merge_facts(orig, new, additive_facts_to_overwrite): +# Disabling pylint too many branches. This function needs refactored +# but is a very core part of openshift_facts. +# pylint: disable=too-many-branches +def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overwrite): """ Recursively merge facts dicts Args: orig (dict): existing facts new (dict): facts to update - additive_facts_to_overwrite (list): additive facts to overwrite in jinja '.' notation ex: ['master.named_certificates'] + protected_facts_to_overwrite (list): protected facts to overwrite in jinja + '.' notation ex: ['master.master_count'] Returns: dict: the merged facts """ additive_facts = ['named_certificates'] + protected_facts = ['ha', 'master_count'] facts = dict() for key, value in orig.iteritems(): + # Key exists in both old and new facts. if key in new: + # Continue to recurse if old and new fact is a dictionary. if isinstance(value, dict) and isinstance(new[key], dict): + # Collect the subset of additive facts to overwrite if + # key matches. These will be passed to the subsequent + # merge_facts call. relevant_additive_facts = [] - # Keep additive_facts_to_overwrite if key matches for item in additive_facts_to_overwrite: if '.' in item and item.startswith(key + '.'): relevant_additive_facts.append(item) - facts[key] = merge_facts(value, new[key], relevant_additive_facts) + + # Collect the subset of protected facts to overwrite + # if key matches. These will be passed to the + # subsequent merge_facts call. + relevant_protected_facts = [] + for item in protected_facts_to_overwrite: + if '.' in item and item.startswith(key + '.'): + relevant_protected_facts.append(item) + facts[key] = merge_facts(value, new[key], relevant_additive_facts, relevant_protected_facts) + # Key matches an additive fact and we are not overwriting + # it so we will append the new value to the existing value. elif key in additive_facts and key not in [x.split('.')[-1] for x in additive_facts_to_overwrite]: - # Fact is additive so we'll combine orig and new. if isinstance(value, list) and isinstance(new[key], list): new_fact = [] for item in copy.deepcopy(value) + copy.deepcopy(new[key]): if item not in new_fact: new_fact.append(item) facts[key] = new_fact + # Key matches a protected fact and we are not overwriting + # it so we will determine if it is okay to change this + # fact. + elif key in protected_facts and key not in [x.split('.')[-1] for x in protected_facts_to_overwrite]: + # The master count (int) can only increase unless it + # has been passed as a protected fact to overwrite. + if key == 'master_count': + if int(value) <= int(new[key]): + facts[key] = copy.deepcopy(new[key]) + else: + module.fail_json(msg='openshift_facts received a lower value for openshift.master.master_count') + # ha (bool) can not change unless it has been passed + # as a protected fact to overwrite. + if key == 'ha': + if bool(value) != bool(new[key]): + module.fail_json(msg='openshift_facts received a different value for openshift.master.ha') + else: + facts[key] = value + # No other condition has been met. Overwrite the old fact + # with the new value. else: facts[key] = copy.deepcopy(new[key]) + # Key isn't in new so add it to facts to keep it. else: facts[key] = copy.deepcopy(value) new_keys = set(new.keys()) - set(orig.keys()) @@ -1114,6 +1152,8 @@ class OpenShiftFacts(object): local_facts (dict): local facts to set additive_facts_to_overwrite (list): additive facts to overwrite in jinja '.' notation ex: ['master.named_certificates'] + protected_facts_to_overwrite (list): protected facts to overwrite in jinja + '.' notation ex: ['master.master_count'] Raises: OpenShiftFactsUnsupportedRoleError: @@ -1122,7 +1162,10 @@ class OpenShiftFacts(object): # Disabling too-many-arguments, this should be cleaned up as a TODO item. # pylint: disable=too-many-arguments - def __init__(self, role, filename, local_facts, additive_facts_to_overwrite=False, openshift_env=None): + def __init__(self, role, filename, local_facts, + additive_facts_to_overwrite=None, + openshift_env=None, + protected_facts_to_overwrite=None): self.changed = False self.filename = filename if role not in self.known_roles: @@ -1131,27 +1174,41 @@ class OpenShiftFacts(object): ) self.role = role self.system_facts = ansible_facts(module) - self.facts = self.generate_facts(local_facts, additive_facts_to_overwrite, openshift_env) - - def generate_facts(self, local_facts, additive_facts_to_overwrite, openshift_env): + self.facts = self.generate_facts(local_facts, + additive_facts_to_overwrite, + openshift_env, + protected_facts_to_overwrite) + + def generate_facts(self, + local_facts, + additive_facts_to_overwrite, + openshift_env, + protected_facts_to_overwrite): """ Generate facts Args: - local_facts (dict): local_facts for overriding generated - defaults + local_facts (dict): local_facts for overriding generated defaults additive_facts_to_overwrite (list): additive facts to overwrite in jinja '.' notation ex: ['master.named_certificates'] - + openshift_env (dict): openshift_env facts for overriding generated defaults + protected_facts_to_overwrite (list): protected facts to overwrite in jinja + '.' notation ex: ['master.master_count'] Returns: dict: The generated facts """ - local_facts = self.init_local_facts(local_facts, additive_facts_to_overwrite, openshift_env) + local_facts = self.init_local_facts(local_facts, + additive_facts_to_overwrite, + openshift_env, + protected_facts_to_overwrite) roles = local_facts.keys() defaults = self.get_defaults(roles) provider_facts = self.init_provider_facts() facts = apply_provider_facts(defaults, provider_facts) - facts = merge_facts(facts, local_facts, additive_facts_to_overwrite) + facts = merge_facts(facts, + local_facts, + additive_facts_to_overwrite, + protected_facts_to_overwrite) facts['current_config'] = get_current_config(facts) facts = set_url_facts_if_unset(facts) facts = set_project_cfg_facts_if_unset(facts) @@ -1315,13 +1372,20 @@ class OpenShiftFacts(object): # Disabling too-many-branches. This should be cleaned up as a TODO item. #pylint: disable=too-many-branches - def init_local_facts(self, facts=None, additive_facts_to_overwrite=False, openshift_env=None): + def init_local_facts(self, facts=None, + additive_facts_to_overwrite=None, + openshift_env=None, + protected_facts_to_overwrite=None): """ Initialize the provider facts Args: facts (dict): local facts to set additive_facts_to_overwrite (list): additive facts to overwrite in jinja '.' notation ex: ['master.named_certificates'] + openshift_env (dict): openshift env facts to set + protected_facts_to_overwrite (list): protected facts to overwrite in jinja + '.' notation ex: ['master.master_count'] + Returns: dict: The result of merging the provided facts with existing @@ -1347,7 +1411,10 @@ class OpenShiftFacts(object): elif key not in current_level: current_level[key] = dict() current_level = current_level[key] - facts_to_set = merge_facts(facts_to_set, oo_env_facts, []) + facts_to_set = merge_facts(orig=facts_to_set, + new=oo_env_facts, + additive_facts_to_overwrite=[], + protected_facts_to_overwrite=[]) local_facts = get_local_facts_from_file(self.filename) @@ -1356,7 +1423,10 @@ class OpenShiftFacts(object): basestring): facts_to_set[arg] = module.from_json(facts_to_set[arg]) - new_local_facts = merge_facts(local_facts, facts_to_set, additive_facts_to_overwrite) + new_local_facts = merge_facts(local_facts, + facts_to_set, + additive_facts_to_overwrite, + protected_facts_to_overwrite) for facts in new_local_facts.values(): keys_to_delete = [] if isinstance(facts, dict): @@ -1452,7 +1522,8 @@ def main(): choices=OpenShiftFacts.known_roles), local_facts=dict(default=None, type='dict', required=False), additive_facts_to_overwrite=dict(default=[], type='list', required=False), - openshift_env=dict(default={}, type='dict', required=False) + openshift_env=dict(default={}, type='dict', required=False), + protected_facts_to_overwrite=dict(default=[], type='list', required=False), ), supports_check_mode=True, add_file_common_args=True, @@ -1462,6 +1533,7 @@ def main(): local_facts = module.params['local_facts'] additive_facts_to_overwrite = module.params['additive_facts_to_overwrite'] openshift_env = module.params['openshift_env'] + protected_facts_to_overwrite = module.params['protected_facts_to_overwrite'] fact_file = '/etc/ansible/facts.d/openshift.fact' @@ -1469,7 +1541,8 @@ def main(): fact_file, local_facts, additive_facts_to_overwrite, - openshift_env) + openshift_env, + protected_facts_to_overwrite) file_params = module.params.copy() file_params['path'] = fact_file diff --git a/roles/openshift_manage_node/tasks/main.yml b/roles/openshift_manage_node/tasks/main.yml index 06f12053a..cee1f1738 100644 --- a/roles/openshift_manage_node/tasks/main.yml +++ b/roles/openshift_manage_node/tasks/main.yml @@ -3,7 +3,7 @@ {{ openshift.common.client_binary }} get node {{ item | lower }} register: omd_get_node until: omd_get_node.rc == 0 - retries: 20 + retries: 50 delay: 5 changed_when: false with_items: openshift_nodes diff --git a/roles/openshift_master_ca/tasks/main.yml b/roles/openshift_master_ca/tasks/main.yml index 6d9be81c0..66960e73e 100644 --- a/roles/openshift_master_ca/tasks/main.yml +++ b/roles/openshift_master_ca/tasks/main.yml @@ -25,4 +25,4 @@ --master={{ openshift.master.api_url }} --public-master={{ openshift.master.public_api_url }} --cert-dir={{ openshift_master_config_dir }} --overwrite=false - when: master_certs_missing + when: master_certs_missing | bool diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml index 7c58e943a..72869a592 100644 --- a/roles/openshift_master_certificates/tasks/main.yml +++ b/roles/openshift_master_certificates/tasks/main.yml @@ -6,40 +6,16 @@ mode: 0700 with_items: masters_needing_certs -- set_fact: - master_certificates: - - ca.crt - - ca.key - - ca.serial.txt - - admin.crt - - admin.key - - admin.kubeconfig - - master.kubelet-client.crt - - master.kubelet-client.key - - master.server.crt - - master.server.key - - openshift-master.crt - - openshift-master.key - - openshift-master.kubeconfig - - openshift-registry.crt - - openshift-registry.key - - openshift-registry.kubeconfig - - openshift-router.crt - - openshift-router.key - - openshift-router.kubeconfig - - serviceaccounts.private.key - - serviceaccounts.public.key - master_31_certificates: - - master.proxy-client.crt - - master.proxy-client.key - - file: src: "{{ openshift_master_config_dir }}/{{ item.1 }}" dest: "{{ openshift_generated_configs_dir }}/{{ item.0.master_cert_subdir }}/{{ item.1 }}" state: hard with_nested: - masters_needing_certs - - "{{ master_certificates | union(master_31_certificates) if openshift.common.version_gte_3_1_or_1_1 | bool else master_certificates }}" + - + - ca.crt + - ca.key + - ca.serial.txt - name: Create the master certificates if they do not already exist command: > @@ -49,5 +25,5 @@ --public-master={{ item.openshift.master.public_api_url }} --cert-dir={{ openshift_generated_configs_dir }}/{{ item.master_cert_subdir }} --overwrite=false - when: master_certs_missing + when: item.master_certs_missing | bool with_items: masters_needing_certs diff --git a/roles/openshift_persistent_volumes/tasks/main.yml b/roles/openshift_persistent_volumes/tasks/main.yml index 2455fc792..e431e978c 100644 --- a/roles/openshift_persistent_volumes/tasks/main.yml +++ b/roles/openshift_persistent_volumes/tasks/main.yml @@ -23,7 +23,7 @@ --config={{ mktemp.stdout }}/admin.kubeconfig register: pv_create_output when: persistent_volumes | length > 0 - failed_when: ('already exists' not in pv_create_output.stderr if pv_create_output.stderr else False) or ('created' not in pv_create_output.stdout if pv_create_output.stdout else False) + failed_when: ('already exists' not in pv_create_output.stderr) and ('created' not in pv_create_output.stdout) changed_when: ('created' in pv_create_output.stdout) - name: Deploy PersistentVolumeClaim definitions @@ -40,7 +40,7 @@ --config={{ mktemp.stdout }}/admin.kubeconfig register: pvc_create_output when: persistent_volume_claims | length > 0 - failed_when: ('already exists' not in pvc_create_output.stderr if pvc_create_output.stderr else False) or ('created' not in pvc_create_output.stdout if pvc_create_output.stdout else False) + failed_when: ('already exists' not in pvc_create_output.stderr) and ('created' not in pvc_create_output.stdout) changed_when: ('created' in pvc_create_output.stdout) - name: Delete temp directory diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml index 8a75639c2..6143805ca 100644 --- a/roles/openshift_repos/tasks/main.yaml +++ b/roles/openshift_repos/tasks/main.yaml @@ -46,19 +46,19 @@ with_fileglob: - '*/repos/*' when: not (item | search("/files/fedora-" ~ openshift_deployment_type ~ "/repos")) and - (ansible_distribution == "Fedora") + (ansible_distribution == "Fedora") and not openshift.common.is_containerized | bool notify: refresh cache - name: Configure gpg keys if needed - copy: src={{ item }} dest=/etc/pki/rpm-gpg/ + copy: src="{{ item }}" dest=/etc/pki/rpm-gpg/ with_fileglob: - "{{ openshift_deployment_type }}/gpg_keys/*" notify: refresh cache when: not openshift.common.is_containerized | bool - name: Configure yum repositories RHEL/CentOS - copy: src={{ item }} dest=/etc/yum.repos.d/ + copy: src="{{ item }}" dest=/etc/yum.repos.d/ with_fileglob: - "{{ openshift_deployment_type }}/repos/*" notify: refresh cache @@ -66,7 +66,7 @@ and not openshift.common.is_containerized | bool - name: Configure yum repositories Fedora - copy: src={{ item }} dest=/etc/yum.repos.d/ + copy: src="{{ item }}" dest=/etc/yum.repos.d/ with_fileglob: - "fedora-{{ openshift_deployment_type }}/repos/*" notify: refresh cache diff --git a/roles/os_zabbix/vars/template_openshift_master.yml b/roles/os_zabbix/vars/template_openshift_master.yml index 1824d7881..e36f23a2b 100644 --- a/roles/os_zabbix/vars/template_openshift_master.yml +++ b/roles/os_zabbix/vars/template_openshift_master.yml @@ -7,12 +7,6 @@ g_template_openshift_master: - Openshift Master key: openshift.master.app.create - - key: openshift.master.registry.healthy_pct - description: "Shows the percentage of healthy registries in the cluster" - type: int - applications: - - Openshift Master - - key: openshift.master.process.count description: Shows number of master processes running type: int @@ -278,11 +272,6 @@ g_template_openshift_master: url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc' priority: high - - name: 'Low number of etcd watchers on {HOST.NAME}' - expression: '{Template Openshift Master:openshift.master.etcd.watchers.last(#1)}<10 and {Template Openshift Master:openshift.master.etcd.watchers.last(#2)}<10' - url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_etcd.asciidoc' - priority: avg - - name: 'Etcd ping failed on {HOST.NAME}' expression: '{Template Openshift Master:openshift.master.etcd.ping.last(#1)}=0 and {Template Openshift Master:openshift.master.etcd.ping.last(#2)}=0' url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_etcd.asciidoc' @@ -345,20 +334,6 @@ g_template_openshift_master: - 'Openshift Master process not running on {HOST.NAME}' priority: avg - - name: 'One or more Docker Registries is unhealthy according to {HOST.NAME}' - expression: '{Template Openshift Master:openshift.master.registry.healthy_pct.last(#2)}<100 and {Template Openshift Master:openshift.master.registry.healthy_pct.max(#2)}>50' - url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_registry.asciidoc' - dependencies: - - 'Openshift Master process not running on {HOST.NAME}' - priority: avg - - - name: 'Multiple Docker Registries are unhealthy according to {HOST.NAME}' - expression: '{Template Openshift Master:openshift.master.registry.healthy_pct.last(#2)}<51' - url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_registry.asciidoc' - dependencies: - - 'Openshift Master process not running on {HOST.NAME}' - priority: high - - name: 'SkyDNS port not listening on {HOST.NAME}' expression: '{Template Openshift Master:openshift.master.skydns.port.open.max(#3)}<1' url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc' diff --git a/roles/os_zabbix/vars/template_openshift_node.yml b/roles/os_zabbix/vars/template_openshift_node.yml index c36c593df..e6daee8e4 100644 --- a/roles/os_zabbix/vars/template_openshift_node.yml +++ b/roles/os_zabbix/vars/template_openshift_node.yml @@ -69,4 +69,33 @@ g_template_openshift_node: url: 'https://github.com/openshift/ops-sop/blob/node/V3/Alerts/openshift_node.asciidoc' priority: high - + zactions: + - name: '[HEAL] OVS may not be running on {HOST.NAME}' + status: disabled + escalation_time: 60 + conditions_filter: + calculation_type: "and/or" + conditions: + - conditiontype: maintenance status + operator: not in + - conditiontype: trigger name + operator: like + value: "[HEAL] OVS may not be running on" + - conditiontype: trigger value + operator: "=" + value: PROBLEM + operations: + - esc_step_from: 1 + esc_step_to: 1 + esc_period: 0 + operationtype: remote command + opcommand: + command: 'ssh -i /etc/openshift_tools/scriptrunner_id_rsa {{ ozb_scriptrunner_user }}@{{ ozb_scriptrunner_bastion_host }} remote-healer --host \"{HOST.NAME}\" --trigger \"{TRIGGER.NAME}\" --trigger-val \"{TRIGGER.VALUE}\"' + execute_on: "zabbix server" + type: 'custom script' + target_hosts: + - target_type: 'zabbix server' + opconditions: + - conditiontype: 'event acknowledged' + operator: '=' + value: 'not acknowledged' diff --git a/roles/oso_host_monitoring/templates/oso-rhel7-host-monitoring.service.j2 b/roles/oso_host_monitoring/templates/oso-rhel7-host-monitoring.service.j2 index d85d8b94e..e17092202 100644 --- a/roles/oso_host_monitoring/templates/oso-rhel7-host-monitoring.service.j2 +++ b/roles/oso_host_monitoring/templates/oso-rhel7-host-monitoring.service.j2 @@ -43,13 +43,13 @@ ExecStart=/usr/bin/docker run --name {{ osohm_host_monitoring }} -e ZAGG_URL={{ osohm_zagg_web_url }} \ -e ZAGG_USER={{ osohm_default_zagg_server_user }} \ -e ZAGG_PASSWORD={{ osohm_default_zagg_server_password }} \ - -e ZAGG_CLIENT_HOSTNAME={{ ec2_tag_Name }} \ + -e ZAGG_CLIENT_HOSTNAME={{ oo_name }} \ -e ZAGG_SSL_VERIFY={{ osohm_zagg_verify_ssl }} \ -e OSO_CLUSTER_GROUP={{ cluster_group }} \ -e OSO_CLUSTER_ID={{ oo_clusterid }} \ -e OSO_ENVIRONMENT={{ oo_environment }} \ - -e OSO_HOST_TYPE={{ hostvars[inventory_hostname]['ec2_tag_host-type'] }} \ - -e OSO_SUB_HOST_TYPE={{ hostvars[inventory_hostname]['ec2_tag_sub-host-type'] }} \ + -e OSO_HOST_TYPE={{ hostvars[inventory_hostname]['oo_hosttype'] }} \ + -e OSO_SUB_HOST_TYPE={{ hostvars[inventory_hostname]['oo_subhosttype'] }} \ -e OSO_MASTER_HA={{ osohm_master_ha }} \ -v /etc/localtime:/etc/localtime \ -v /sys:/sys:ro \ diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py index a62bfe134..ace834323 100644 --- a/utils/src/ooinstall/cli_installer.py +++ b/utils/src/ooinstall/cli_installer.py @@ -528,18 +528,30 @@ Add new nodes here def get_installed_hosts(hosts, callback_facts): installed_hosts = [] + + # count nativeha lb as an installed host + try: + first_master = next(host for host in hosts if host.master) + lb_hostname = callback_facts[first_master.connect_to]['master'].get('cluster_hostname', '') + lb_host = \ + next(host for host in hosts if host.ip == callback_facts[lb_hostname]['common']['ip']) + + installed_hosts.append(lb_host) + except (KeyError, StopIteration): + pass + for host in hosts: - if host.connect_to in callback_facts.keys() and ( - ('common' in callback_facts[host.connect_to].keys() and - callback_facts[host.connect_to]['common'].get('version', '') and - callback_facts[host.connect_to]['common'].get('version', '') != 'None') \ - or - ('master' in callback_facts[host.connect_to].keys() and - callback_facts[host.connect_to]['master'].get('cluster_hostname', '') == host.connect_to) - ): + if host.connect_to in callback_facts.keys() and is_installed_host(host, callback_facts): installed_hosts.append(host) return installed_hosts +def is_installed_host(host, callback_facts): + version_found = 'common' in callback_facts[host.connect_to].keys() and \ + callback_facts[host.connect_to]['common'].get('version', '') and \ + callback_facts[host.connect_to]['common'].get('version', '') != 'None' + + return version_found or host.master_lb or host.preconfigured + # pylint: disable=too-many-branches # This pylint error will be corrected shortly in separate PR. def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force, verbose): diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py index 64875dbe9..f2c7289fa 100644 --- a/utils/src/ooinstall/openshift_ansible.py +++ b/utils/src/ooinstall/openshift_ansible.py @@ -206,7 +206,7 @@ def run_main_playbook(hosts, hosts_to_run_on, verbose=False): inventory_file = generate_inventory(hosts_to_run_on) if len(hosts_to_run_on) != len(hosts): main_playbook_path = os.path.join(CFG.ansible_playbook_directory, - 'playbooks/byo/openshift-cluster/scaleup.yml') + 'playbooks/byo/openshift-node/scaleup.yml') else: main_playbook_path = os.path.join(CFG.ansible_playbook_directory, 'playbooks/byo/openshift-cluster/config.yml') |