summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.tito/packages/openshift-ansible2
-rwxr-xr-xbin/cluster25
-rw-r--r--filter_plugins/openshift_master.py469
-rw-r--r--inventory/byo/hosts.example10
-rw-r--r--openshift-ansible.spec36
-rw-r--r--playbooks/aws/openshift-cluster/addNodes.yml39
-rw-r--r--playbooks/aws/openshift-cluster/scaleup.yml34
-rw-r--r--playbooks/aws/openshift-cluster/tasks/launch_instances.yml16
-rw-r--r--playbooks/byo/openshift-cluster/scaleup.yml10
-rw-r--r--playbooks/common/openshift-cluster/evaluate_groups.yml13
-rw-r--r--playbooks/common/openshift-cluster/scaleup.yml8
-rwxr-xr-xplaybooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py4
-rw-r--r--roles/lib_zabbix/library/zbx_action.py7
-rw-r--r--roles/lib_zabbix/library/zbx_graph.py331
-rw-r--r--roles/lib_zabbix/library/zbx_graphprototype.py331
-rw-r--r--roles/lib_zabbix/library/zbx_httptest.py10
-rw-r--r--roles/lib_zabbix/tasks/create_template.yml24
-rw-r--r--roles/openshift_common/tasks/main.yml13
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py2
-rw-r--r--roles/openshift_master/tasks/main.yml16
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j219
-rw-r--r--roles/openshift_master/templates/v1_partials/oauthConfig.j293
-rw-r--r--roles/openshift_node/tasks/main.yml1
-rw-r--r--roles/openshift_node/templates/node.yaml.v1.j22
-rw-r--r--roles/os_zabbix/tasks/main.yml40
-rw-r--r--roles/os_zabbix/vars/template_openshift_master.yml34
-rw-r--r--roles/os_zabbix/vars/template_os_linux.yml12
-rw-r--r--utils/src/ooinstall/cli_installer.py122
-rw-r--r--utils/src/ooinstall/oo_config.py9
-rw-r--r--utils/src/ooinstall/openshift_ansible.py86
-rw-r--r--utils/test/cli_installer_tests.py260
31 files changed, 1913 insertions, 165 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index 71c04689a..ce566784c 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.0.15-1 ./
+3.0.16-1 ./
diff --git a/bin/cluster b/bin/cluster
index a3d4b629c..9b02b4347 100755
--- a/bin/cluster
+++ b/bin/cluster
@@ -67,6 +67,21 @@ class Cluster(object):
self.action(args, inventory, env, playbook)
+ def addNodes(self, args):
+ """
+ Add nodes to an existing cluster for given provider
+ :param args: command line arguments provided by user
+ """
+ env = {'cluster_id': args.cluster_id,
+ 'deployment_type': self.get_deployment_type(args)}
+ playbook = "playbooks/{0}/openshift-cluster/addNodes.yml".format(args.provider)
+ inventory = self.setup_provider(args.provider)
+
+ env['num_nodes'] = args.nodes
+ env['num_infra'] = args.infra
+
+ self.action(args, inventory, env, playbook)
+
def terminate(self, args):
"""
Destroy OpenShift cluster
@@ -292,6 +307,16 @@ if __name__ == '__main__':
help='number of external etcd hosts to create in cluster')
create_parser.set_defaults(func=cluster.create)
+
+ create_parser = action_parser.add_parser('addNodes', help='Add nodes to a cluster',
+ parents=[meta_parser])
+ create_parser.add_argument('-n', '--nodes', default=1, type=int,
+ help='number of nodes to add to the cluster')
+ create_parser.add_argument('-i', '--infra', default=1, type=int,
+ help='number of infra nodes to add to the cluster')
+ create_parser.set_defaults(func=cluster.addNodes)
+
+
config_parser = action_parser.add_parser('config',
help='Configure or reconfigure a cluster',
parents=[meta_parser])
diff --git a/filter_plugins/openshift_master.py b/filter_plugins/openshift_master.py
new file mode 100644
index 000000000..76fe610a0
--- /dev/null
+++ b/filter_plugins/openshift_master.py
@@ -0,0 +1,469 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# vim: expandtab:tabstop=4:shiftwidth=4
+'''
+Custom filters for use in openshift-master
+'''
+import copy
+import sys
+import yaml
+
+from ansible import errors
+from ansible.runner.filter_plugins.core import bool as ansible_bool
+
+
+class IdentityProviderBase(object):
+ """ IdentityProviderBase
+
+ Attributes:
+ name (str): Identity provider Name
+ login (bool): Is this identity provider a login provider?
+ challenge (bool): Is this identity provider a challenge provider?
+ provider (dict): Provider specific config
+ _idp (dict): internal copy of the IDP dict passed in
+ _required (list): List of lists of strings for required attributes
+ _optional (list): List of lists of strings for optional attributes
+ _allow_additional (bool): Does this provider support attributes
+ not in _required and _optional
+
+ Args:
+ api_version(str): OpenShift config version
+ idp (dict): idp config dict
+
+ Raises:
+ AnsibleFilterError:
+ """
+ # disabling this check since the number of instance attributes are
+ # necessary for this class
+ # pylint: disable=too-many-instance-attributes
+ def __init__(self, api_version, idp):
+ if api_version not in ['v1']:
+ raise errors.AnsibleFilterError("|failed api version {0} unknown".format(api_version))
+
+ self._idp = copy.deepcopy(idp)
+
+ if 'name' not in self._idp:
+ raise errors.AnsibleFilterError("|failed identity provider missing a name")
+
+ if 'kind' not in self._idp:
+ raise errors.AnsibleFilterError("|failed identity provider missing a kind")
+
+ self.name = self._idp.pop('name')
+ self.login = ansible_bool(self._idp.pop('login', False))
+ self.challenge = ansible_bool(self._idp.pop('challenge', False))
+ self.provider = dict(apiVersion=api_version, kind=self._idp.pop('kind'))
+
+ self._required = [['mappingMethod', 'mapping_method']]
+ self._optional = []
+ self._allow_additional = True
+
+ @staticmethod
+ def validate_idp_list(idp_list):
+ ''' validates a list of idps '''
+ login_providers = [x.name for x in idp_list if x.login]
+ if len(login_providers) > 1:
+ raise errors.AnsibleFilterError("|failed multiple providers are "
+ "not allowed for login. login "
+ "providers: {0}".format(', '.join(login_providers)))
+
+ names = [x.name for x in idp_list]
+ if len(set(names)) != len(names):
+ raise errors.AnsibleFilterError("|failed more than one provider configured with the same name")
+
+ for idp in idp_list:
+ idp.validate()
+
+ def validate(self):
+ ''' validate an instance of this idp class '''
+ valid_mapping_methods = ['add', 'claim', 'generate', 'lookup']
+ if self.provider['mappingMethod'] not in valid_mapping_methods:
+ raise errors.AnsibleFilterError("|failed unkown mapping method "
+ "for provider {0}".format(self.__class__.__name__))
+
+ @staticmethod
+ def get_default(key):
+ ''' get a default value for a given key '''
+ if key == 'mappingMethod':
+ return 'claim'
+ else:
+ return None
+
+ def set_provider_item(self, items, required=False):
+ ''' set a provider item based on the list of item names provided. '''
+ for item in items:
+ provider_key = items[0]
+ if item in self._idp:
+ self.provider[provider_key] = self._idp.pop(item)
+ break
+ else:
+ default = self.get_default(provider_key)
+ if default is not None:
+ self.provider[provider_key] = default
+ elif required:
+ raise errors.AnsibleFilterError("|failed provider {0} missing "
+ "required key {1}".format(self.__class__.__name__, provider_key))
+
+ def set_provider_items(self):
+ ''' set the provider items for this idp '''
+ for items in self._required:
+ self.set_provider_item(items, True)
+ for items in self._optional:
+ self.set_provider_item(items)
+ if self._allow_additional:
+ for key in self._idp.keys():
+ self.set_provider_item([key])
+ else:
+ if len(self._idp) > 0:
+ raise errors.AnsibleFilterError("|failed provider {0} "
+ "contains unknown keys "
+ "{1}".format(self.__class__.__name__, ', '.join(self._idp.keys())))
+
+ def to_dict(self):
+ ''' translate this idp to a dictionary '''
+ return dict(name=self.name, challenge=self.challenge,
+ login=self.login, provider=self.provider)
+
+
+class LDAPPasswordIdentityProvider(IdentityProviderBase):
+ """ LDAPPasswordIdentityProvider
+
+ Attributes:
+
+ Args:
+ api_version(str): OpenShift config version
+ idp (dict): idp config dict
+
+ Raises:
+ AnsibleFilterError:
+ """
+ def __init__(self, api_version, idp):
+ IdentityProviderBase.__init__(self, api_version, idp)
+ self._allow_additional = False
+ self._required += [['attributes'], ['url'], ['insecure']]
+ self._optional += [['ca'],
+ ['bindDN', 'bind_dn'],
+ ['bindPassword', 'bind_password']]
+
+ self._idp['insecure'] = ansible_bool(self._idp.pop('insecure', False))
+
+ if 'attributes' in self._idp and 'preferred_username' in self._idp['attributes']:
+ pref_user = self._idp['attributes'].pop('preferred_username')
+ self._idp['attributes']['preferredUsername'] = pref_user
+
+ def validate(self):
+ ''' validate this idp instance '''
+ IdentityProviderBase.validate(self)
+ if not isinstance(self.provider['attributes'], dict):
+ raise errors.AnsibleFilterError("|failed attributes for provider "
+ "{0} must be a dictionary".format(self.__class__.__name__))
+
+ attrs = ['id', 'email', 'name', 'preferredUsername']
+ for attr in attrs:
+ if attr in self.provider['attributes'] and not isinstance(self.provider['attributes'][attr], list):
+ raise errors.AnsibleFilterError("|failed {0} attribute for "
+ "provider {1} must be a list".format(attr, self.__class__.__name__))
+
+ unknown_attrs = set(self.provider['attributes'].keys()) - set(attrs)
+ if len(unknown_attrs) > 0:
+ raise errors.AnsibleFilterError("|failed provider {0} has unknown "
+ "attributes: {1}".format(self.__class__.__name__, ', '.join(unknown_attrs)))
+
+
+class KeystonePasswordIdentityProvider(IdentityProviderBase):
+ """ KeystoneIdentityProvider
+
+ Attributes:
+
+ Args:
+ api_version(str): OpenShift config version
+ idp (dict): idp config dict
+
+ Raises:
+ AnsibleFilterError:
+ """
+ def __init__(self, api_version, idp):
+ IdentityProviderBase.__init__(self, api_version, idp)
+ self._allow_additional = False
+ self._required += [['url'], ['domainName', 'domain_name']]
+ self._optional += [['ca'], ['certFile', 'cert_file'], ['keyFile', 'key_file']]
+
+
+class RequestHeaderIdentityProvider(IdentityProviderBase):
+ """ RequestHeaderIdentityProvider
+
+ Attributes:
+
+ Args:
+ api_version(str): OpenShift config version
+ idp (dict): idp config dict
+
+ Raises:
+ AnsibleFilterError:
+ """
+ def __init__(self, api_version, idp):
+ IdentityProviderBase.__init__(self, api_version, idp)
+ self._allow_additional = False
+ self._required += [['headers']]
+ self._optional += [['challengeURL', 'challenge_url'],
+ ['loginURL', 'login_url'],
+ ['clientCA', 'client_ca']]
+
+ def validate(self):
+ ''' validate this idp instance '''
+ IdentityProviderBase.validate(self)
+ if not isinstance(self.provider['headers'], list):
+ raise errors.AnsibleFilterError("|failed headers for provider {0} "
+ "must be a list".format(self.__class__.__name__))
+
+
+class AllowAllPasswordIdentityProvider(IdentityProviderBase):
+ """ AllowAllPasswordIdentityProvider
+
+ Attributes:
+
+ Args:
+ api_version(str): OpenShift config version
+ idp (dict): idp config dict
+
+ Raises:
+ AnsibleFilterError:
+ """
+ def __init__(self, api_version, idp):
+ IdentityProviderBase.__init__(self, api_version, idp)
+ self._allow_additional = False
+
+
+class DenyAllPasswordIdentityProvider(IdentityProviderBase):
+ """ DenyAllPasswordIdentityProvider
+
+ Attributes:
+
+ Args:
+ api_version(str): OpenShift config version
+ idp (dict): idp config dict
+
+ Raises:
+ AnsibleFilterError:
+ """
+ def __init__(self, api_version, idp):
+ IdentityProviderBase.__init__(self, api_version, idp)
+ self._allow_additional = False
+
+
+class HTPasswdPasswordIdentityProvider(IdentityProviderBase):
+ """ HTPasswdPasswordIdentity
+
+ Attributes:
+
+ Args:
+ api_version(str): OpenShift config version
+ idp (dict): idp config dict
+
+ Raises:
+ AnsibleFilterError:
+ """
+ def __init__(self, api_version, idp):
+ IdentityProviderBase.__init__(self, api_version, idp)
+ self._allow_additional = False
+ self._required += [['file', 'filename', 'fileName', 'file_name']]
+
+ @staticmethod
+ def get_default(key):
+ if key == 'file':
+ return '/etc/origin/htpasswd'
+ else:
+ return IdentityProviderBase.get_default(key)
+
+
+class BasicAuthPasswordIdentityProvider(IdentityProviderBase):
+ """ BasicAuthPasswordIdentityProvider
+
+ Attributes:
+
+ Args:
+ api_version(str): OpenShift config version
+ idp (dict): idp config dict
+
+ Raises:
+ AnsibleFilterError:
+ """
+ def __init__(self, api_version, idp):
+ IdentityProviderBase.__init__(self, api_version, idp)
+ self._allow_additional = False
+ self._required += [['ca'], ['certFile', 'cert_file'], ['keyFile', 'key_file']]
+ self._optional += [['key']]
+
+
+class IdentityProviderOauthBase(IdentityProviderBase):
+ """ IdentityProviderOauthBase
+
+ Attributes:
+
+ Args:
+ api_version(str): OpenShift config version
+ idp (dict): idp config dict
+
+ Raises:
+ AnsibleFilterError:
+ """
+ def __init__(self, api_version, idp):
+ IdentityProviderBase.__init__(self, api_version, idp)
+ self._allow_additional = False
+ self._required += [['clientID', 'client_id'], ['clientSecret', 'client_secret']]
+
+ def validate(self):
+ ''' validate this idp instance '''
+ IdentityProviderBase.validate(self)
+ if self.challenge:
+ raise errors.AnsibleFilterError("|failed provider {0} does not "
+ "allow challenge authentication".format(self.__class__.__name__))
+
+
+class OpenIDIdentityProvider(IdentityProviderOauthBase):
+ """ OpenIDIdentityProvider
+
+ Attributes:
+
+ Args:
+ api_version(str): OpenShift config version
+ idp (dict): idp config dict
+
+ Raises:
+ AnsibleFilterError:
+ """
+ def __init__(self, api_version, idp):
+ IdentityProviderOauthBase.__init__(self, api_version, idp)
+ self._required += [['claims'], ['urls']]
+ self._optional += [['ca'],
+ ['extraScopes'],
+ ['extraAuthorizeParameters']]
+ if 'claims' in self._idp and 'preferred_username' in self._idp['claims']:
+ pref_user = self._idp['claims'].pop('preferred_username')
+ self._idp['claims']['preferredUsername'] = pref_user
+ if 'urls' in self._idp and 'user_info' in self._idp['urls']:
+ user_info = self._idp['urls'].pop('user_info')
+ self._idp['urls']['userInfo'] = user_info
+ if 'extra_scopes' in self._idp:
+ self._idp['extraScopes'] = self._idp.pop('extra_scopes')
+ if 'extra_authorize_parameters' in self._idp:
+ self._idp['extraAuthorizeParameters'] = self._idp.pop('extra_authorize_parameters')
+
+ if 'extraAuthorizeParameters' in self._idp:
+ if 'include_granted_scopes' in self._idp['extraAuthorizeParameters']:
+ val = ansible_bool(self._idp['extraAuthorizeParameters'].pop('include_granted_scopes'))
+ self._idp['extraAuthorizeParameters']['include_granted_scopes'] = val
+
+
+ def validate(self):
+ ''' validate this idp instance '''
+ IdentityProviderOauthBase.validate(self)
+ if not isinstance(self.provider['claims'], dict):
+ raise errors.AnsibleFilterError("|failed claims for provider {0} "
+ "must be a dictionary".format(self.__class__.__name__))
+
+ if 'extraScopes' not in self.provider['extraScopes'] and not isinstance(self.provider['extraScopes'], list):
+ raise errors.AnsibleFilterError("|failed extraScopes for provider "
+ "{0} must be a list".format(self.__class__.__name__))
+ if ('extraAuthorizeParameters' not in self.provider['extraAuthorizeParameters']
+ and not isinstance(self.provider['extraAuthorizeParameters'], dict)):
+ raise errors.AnsibleFilterError("|failed extraAuthorizeParameters "
+ "for provider {0} must be a dictionary".format(self.__class__.__name__))
+
+ required_claims = ['id']
+ optional_claims = ['email', 'name', 'preferredUsername']
+ all_claims = required_claims + optional_claims
+
+ for claim in required_claims:
+ if claim in required_claims and claim not in self.provider['claims']:
+ raise errors.AnsibleFilterError("|failed {0} claim missing "
+ "for provider {1}".format(claim, self.__class__.__name__))
+
+ for claim in all_claims:
+ if claim in self.provider['claims'] and not isinstance(self.provider['claims'][claim], list):
+ raise errors.AnsibleFilterError("|failed {0} claims for "
+ "provider {1} must be a list".format(claim, self.__class__.__name__))
+
+ unknown_claims = set(self.provider['claims'].keys()) - set(all_claims)
+ if len(unknown_claims) > 0:
+ raise errors.AnsibleFilterError("|failed provider {0} has unknown "
+ "claims: {1}".format(self.__class__.__name__, ', '.join(unknown_claims)))
+
+ if not isinstance(self.provider['urls'], dict):
+ raise errors.AnsibleFilterError("|failed urls for provider {0} "
+ "must be a dictionary".format(self.__class__.__name__))
+
+ required_urls = ['authorize', 'token']
+ optional_urls = ['userInfo']
+ all_urls = required_urls + optional_urls
+
+ for url in required_urls:
+ if url not in self.provider['urls']:
+ raise errors.AnsibleFilterError("|failed {0} url missing for "
+ "provider {1}".format(url, self.__class__.__name__))
+
+ unknown_urls = set(self.provider['urls'].keys()) - set(all_urls)
+ if len(unknown_urls) > 0:
+ raise errors.AnsibleFilterError("|failed provider {0} has unknown "
+ "urls: {1}".format(self.__class__.__name__, ', '.join(unknown_urls)))
+
+
+class GoogleIdentityProvider(IdentityProviderOauthBase):
+ """ GoogleIdentityProvider
+
+ Attributes:
+
+ Args:
+ api_version(str): OpenShift config version
+ idp (dict): idp config dict
+
+ Raises:
+ AnsibleFilterError:
+ """
+ def __init__(self, api_version, idp):
+ IdentityProviderOauthBase.__init__(self, api_version, idp)
+ self._optional += [['hostedDomain', 'hosted_domain']]
+
+
+class GitHubIdentityProvider(IdentityProviderOauthBase):
+ """ GitHubIdentityProvider
+
+ Attributes:
+
+ Args:
+ api_version(str): OpenShift config version
+ idp (dict): idp config dict
+
+ Raises:
+ AnsibleFilterError:
+ """
+ pass
+
+
+class FilterModule(object):
+ ''' Custom ansible filters for use by the openshift_master role'''
+
+ @staticmethod
+ def translate_idps(idps, api_version):
+ ''' Translates a list of dictionaries into a valid identityProviders config '''
+ idp_list = []
+
+ if not isinstance(idps, list):
+ raise errors.AnsibleFilterError("|failed expects to filter on a list of identity providers")
+ for idp in idps:
+ if not isinstance(idp, dict):
+ raise errors.AnsibleFilterError("|failed identity providers must be a list of dictionaries")
+
+ cur_module = sys.modules[__name__]
+ idp_class = getattr(cur_module, idp['kind'], None)
+ idp_inst = idp_class(api_version, idp) if idp_class is not None else IdentityProviderBase(api_version, idp)
+ idp_inst.set_provider_items()
+ idp_list.append(idp_inst)
+
+
+ IdentityProviderBase.validate_idp_list(idp_list)
+ return yaml.safe_dump([idp.to_dict() for idp in idp_list], default_flow_style=False)
+
+
+ def filters(self):
+ ''' returns a mapping of filters to methods '''
+ return {"translate_idps": self.translate_idps}
diff --git a/inventory/byo/hosts.example b/inventory/byo/hosts.example
index 423581281..ef0736b63 100644
--- a/inventory/byo/hosts.example
+++ b/inventory/byo/hosts.example
@@ -144,6 +144,16 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# configure how often node iptables rules are refreshed
#openshift_node_iptables_sync_period=5s
+# Configure nodeIP in the node config
+# This is needed in cases where node traffic is desired to go over an
+# interface other than the default network interface.
+#openshift_node_set_node_ip=True
+
+# Force setting of system hostname when configuring OpenShift
+# This works around issues related to installations that do not have valid dns
+# entries for the interfaces attached to the host.
+#openshift_set_hostname=True
+
# host group for masters
[masters]
ose3-master[1:3]-ansible.test.example.com
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index ea9cc91bd..09569761f 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -5,7 +5,7 @@
}
Name: openshift-ansible
-Version: 3.0.15
+Version: 3.0.16
Release: 1%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
@@ -258,6 +258,40 @@ Atomic OpenShift Utilities includes
%changelog
+* Tue Nov 24 2015 Brenton Leanhardt <bleanhar@redhat.com> 3.0.16-1
+- Silencing pylint branch errors for now for the atomic-openshift-installer
+ harness (bleanhar@redhat.com)
+- Properly setting scheduleability for HA Master scenarios
+ (bleanhar@redhat.com)
+- added graphs (mwoodson@redhat.com)
+- Rework setting of hostname (jdetiber@redhat.com)
+- Fixed a bug in the actions. It now supports changing opconditions
+ (kwoodson@redhat.com)
+- Conditionally set the nodeIP (jdetiber@redhat.com)
+- Bug 1284991 - "atomic-openshift-installer uninstall" error when configuration
+ file is missing. (bleanhar@redhat.com)
+- Avoid printing the master and node totals in the add-a-node scenario
+ (bleanhar@redhat.com)
+- Fixing tests for quick_ha (bleanhar@redhat.com)
+- Removing a debug line (bleanhar@redhat.com)
+- atomic-openshift-installer: Fix lint issue (smunilla@redhat.com)
+- Handling preconfigured load balancers (bleanhar@redhat.com)
+- atomic-openshift-installer: Rename ha_proxy (smunilla@redhat.com)
+- atomic-openshift-installer: Reverse version and host collection
+ (smunilla@redhat.com)
+- cli_installer_tests: Add test for unattended quick HA (smunilla@redhat.com)
+- Breakup inventory writing (smunilla@redhat.com)
+- Enforce 1 or 3 masters (smunilla@redhat.com)
+- Add interactive test (smunilla@redhat.com)
+- atomic-openshift-installer: HA for quick installer (smunilla@redhat.com)
+- Adding zbx_graph support (kwoodson@redhat.com)
+- Modified step params to be in order when passed as a list
+ (kwoodson@redhat.com)
+- Add serviceAccountConfig.masterCA during 3.1 upgrade (jdetiber@redhat.com)
+- Use the identity_providers from openshift_facts instead of always using the
+ inventory variable (jdetiber@redhat.com)
+- Refactor master identity provider configuration (jdetiber@redhat.com)
+
* Fri Nov 20 2015 Kenny Woodson <kwoodson@redhat.com> 3.0.15-1
- Fixing clone group functionality. Also separating extra_vars from
extra_groups (kwoodson@redhat.com)
diff --git a/playbooks/aws/openshift-cluster/addNodes.yml b/playbooks/aws/openshift-cluster/addNodes.yml
new file mode 100644
index 000000000..fff3e401b
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/addNodes.yml
@@ -0,0 +1,39 @@
+---
+- name: Launch instance(s)
+ hosts: localhost
+ connection: local
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ - ["vars.{{ deployment_type }}.{{ cluster_id }}.yml", vars.defaults.yml]
+ vars:
+ oo_extend_env: True
+ tasks:
+ - fail:
+ msg: Deployment type not supported for aws provider yet
+ when: deployment_type == 'enterprise'
+
+ - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
+ vars:
+ type: "compute"
+ count: "{{ num_nodes }}"
+ - include: tasks/launch_instances.yml
+ vars:
+ instances: "{{ node_names }}"
+ cluster: "{{ cluster_id }}"
+ type: "{{ k8s_type }}"
+ g_sub_host_type: "{{ sub_host_type }}"
+
+ - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
+ vars:
+ type: "infra"
+ count: "{{ num_infra }}"
+ - include: tasks/launch_instances.yml
+ vars:
+ instances: "{{ node_names }}"
+ cluster: "{{ cluster_id }}"
+ type: "{{ k8s_type }}"
+ g_sub_host_type: "{{ sub_host_type }}"
+
+- include: scaleup.yml
+- include: list.yml
diff --git a/playbooks/aws/openshift-cluster/scaleup.yml b/playbooks/aws/openshift-cluster/scaleup.yml
new file mode 100644
index 000000000..4415700a3
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/scaleup.yml
@@ -0,0 +1,34 @@
+---
+
+- hosts: localhost
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
+ - set_fact:
+ g_ssh_user_tmp: "{{ deployment_vars[deployment_type].ssh_user }}"
+ g_sudo_tmp: "{{ deployment_vars[deployment_type].sudo }}"
+ - name: Evaluate oo_hosts_to_update
+ add_host:
+ name: "{{ item }}"
+ groups: oo_hosts_to_update
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: "{{ groups.nodes_to_add }}"
+
+- include: ../../common/openshift-cluster/update_repos_and_packages.yml
+
+- include: ../../common/openshift-cluster/scaleup.yml
+ vars:
+ g_etcd_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-etcd' }}"
+ g_lb_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-lb' }}"
+ g_masters_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-master' }}"
+ g_new_nodes_group: 'nodes_to_add'
+ g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"
+ g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}"
+ g_nodeonmaster: true
+ openshift_cluster_id: "{{ cluster_id }}"
+ openshift_debug_level: 2
+ openshift_deployment_type: "{{ deployment_type }}"
+ openshift_hostname: "{{ ec2_private_ip_address }}"
+ openshift_public_hostname: "{{ ec2_ip_address }}"
diff --git a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
index 22c617fea..15e775770 100644
--- a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
@@ -190,6 +190,22 @@
- instances
- ec2.instances
+- name: Add new instances to nodes_to_add group if needed
+ add_host:
+ hostname: "{{ item.0 }}"
+ ansible_ssh_host: "{{ item.1.dns_name }}"
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ groups: nodes_to_add
+ ec2_private_ip_address: "{{ item.1.private_ip }}"
+ ec2_ip_address: "{{ item.1.public_ip }}"
+ openshift_node_labels: "{{ node_label }}"
+ logrotate_scripts: "{{ logrotate }}"
+ with_together:
+ - instances
+ - ec2.instances
+ when: oo_extend_env is defined and oo_extend_env | bool
+
- name: Wait for ssh
wait_for: "port=22 host={{ item.dns_name }}"
with_items: ec2.instances
diff --git a/playbooks/byo/openshift-cluster/scaleup.yml b/playbooks/byo/openshift-cluster/scaleup.yml
new file mode 100644
index 000000000..70644d427
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/scaleup.yml
@@ -0,0 +1,10 @@
+---
+- include: ../../common/openshift-cluster/scaleup.yml
+ vars:
+ g_etcd_group: "{{ 'etcd' }}"
+ g_masters_group: "{{ 'masters' }}"
+ g_new_nodes_group: "{{ 'new_nodes' }}"
+ g_lb_group: "{{ 'lb' }}"
+ openshift_cluster_id: "{{ cluster_id | default('default') }}"
+ openshift_debug_level: 2
+ openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml
index 2bb69614f..34da372a4 100644
--- a/playbooks/common/openshift-cluster/evaluate_groups.yml
+++ b/playbooks/common/openshift-cluster/evaluate_groups.yml
@@ -12,8 +12,8 @@
when: g_masters_group is not defined
- fail:
- msg: This playbook requires g_nodes_group to be set
- when: g_nodes_group is not defined
+ msg: This playbook requires g_nodes_group or g_new_nodes_group to be set
+ when: g_nodes_group is not defined and g_new_nodes_group is not defined
- fail:
msg: This playbook requires g_lb_group to be set
@@ -35,14 +35,19 @@
ansible_sudo: "{{ g_sudo | default(omit) }}"
with_items: groups[g_masters_group] | default([])
+ # Use g_new_nodes_group if it exists otherwise g_nodes_group
+ - set_fact:
+ g_nodes_to_config: "{{ g_new_nodes_group | default(g_nodes_group | default([])) }}"
+
- name: Evaluate oo_nodes_to_config
add_host:
name: "{{ item }}"
groups: oo_nodes_to_config
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_sudo: "{{ g_sudo | default(omit) }}"
- with_items: groups[g_nodes_group] | default([])
+ with_items: groups[g_nodes_to_config] | default([])
+ # Skip adding the master to oo_nodes_to_config when g_new_nodes_group is
- name: Evaluate oo_nodes_to_config
add_host:
name: "{{ item }}"
@@ -50,7 +55,7 @@
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_sudo: "{{ g_sudo | default(omit) }}"
with_items: groups[g_masters_group] | default([])
- when: g_nodeonmaster is defined and g_nodeonmaster == true
+ when: g_nodeonmaster | default(false) == true and g_new_nodes_group is not defined
- name: Evaluate oo_first_etcd
add_host:
diff --git a/playbooks/common/openshift-cluster/scaleup.yml b/playbooks/common/openshift-cluster/scaleup.yml
index 6d2777732..e1778e41e 100644
--- a/playbooks/common/openshift-cluster/scaleup.yml
+++ b/playbooks/common/openshift-cluster/scaleup.yml
@@ -1,13 +1,5 @@
---
- include: evaluate_groups.yml
- vars:
- g_etcd_group: "{{ 'etcd' }}"
- g_masters_group: "{{ 'masters' }}"
- g_nodes_group: "{{ 'nodes' }}"
- g_lb_group: "{{ 'lb' }}"
- openshift_cluster_id: "{{ cluster_id | default('default') }}"
- openshift_debug_level: 2
- openshift_deployment_type: "{{ deployment_type }}"
- include: ../openshift-node/config.yml
vars:
diff --git a/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py b/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py
index a6721bb92..9a065fd1c 100755
--- a/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py
+++ b/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py
@@ -78,6 +78,10 @@ def upgrade_master_3_0_to_3_1(ansible_module, config_base, backup):
config['kubernetesMasterConfig'].pop('apiLevels')
changes.append('master-config.yaml: removed kubernetesMasterConfig.apiLevels')
+ # Add masterCA to serviceAccountConfig
+ if 'serviceAccountConfig' in config and 'masterCA' not in config['serviceAccountConfig']:
+ config['serviceAccountConfig']['masterCA'] = config['oauthConfig'].get('masterCA', 'ca.crt')
+
# Add proxyClientInfo to master-config
if 'proxyClientInfo' not in config['kubernetesMasterConfig']:
config['kubernetesMasterConfig']['proxyClientInfo'] = {
diff --git a/roles/lib_zabbix/library/zbx_action.py b/roles/lib_zabbix/library/zbx_action.py
index d64cebae1..24693e5db 100644
--- a/roles/lib_zabbix/library/zbx_action.py
+++ b/roles/lib_zabbix/library/zbx_action.py
@@ -89,6 +89,9 @@ def operation_differences(zabbix_ops, user_ops):
for zab, user in zip(zabbix_ops, user_ops):
for key, val in user.items():
if key == 'opconditions':
+ if len(zab[key]) != len(val):
+ rval[key] = val
+ break
for z_cond, u_cond in zip(zab[key], user[key]):
if not all([str(u_cond[op_key]) == z_cond[op_key] for op_key in \
['conditiontype', 'operator', 'value']]):
@@ -330,9 +333,9 @@ def get_action_operations(zapi, inc_operations):
condition['operator'] = 0
if condition['value'] == 'acknowledged':
- condition['operator'] = 1
+ condition['value'] = 1
else:
- condition['operator'] = 0
+ condition['value'] = 0
return inc_operations
diff --git a/roles/lib_zabbix/library/zbx_graph.py b/roles/lib_zabbix/library/zbx_graph.py
new file mode 100644
index 000000000..121ec3dee
--- /dev/null
+++ b/roles/lib_zabbix/library/zbx_graph.py
@@ -0,0 +1,331 @@
+#!/usr/bin/env python
+'''
+ Ansible module for zabbix graphs
+'''
+# vim: expandtab:tabstop=4:shiftwidth=4
+#
+# Zabbix graphs ansible module
+#
+#
+# Copyright 2015 Red Hat Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#---
+#- hosts: localhost
+# gather_facts: no
+# tasks:
+# - zbx_graph:
+# zbx_server: https://zabbixserver/zabbix/api_jsonrpc.php
+# zbx_user: Admin
+# zbx_password: zabbix
+# name: Test Graph
+# height: 300
+# width: 500
+# graph_items:
+# - item_name: openshift.master.etcd.create.fail
+# color: red
+# line_style: bold
+# - item_name: openshift.master.etcd.create.success
+# color: red
+# line_style: bold
+#
+#
+
+# This is in place because each module looks similar to each other.
+# These need duplicate code as their behavior is very similar
+# but different for each zabbix class.
+# pylint: disable=duplicate-code
+
+# pylint: disable=import-error
+from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection
+
+def exists(content, key='result'):
+ ''' Check if key exists in content or the size of content[key] > 0
+ '''
+ if not content.has_key(key):
+ return False
+
+ if not content[key]:
+ return False
+
+ return True
+
+def get_graph_type(graphtype):
+ '''
+ Possible values:
+ 0 - normal;
+ 1 - stacked;
+ 2 - pie;
+ 3 - exploded;
+ '''
+ gtype = 0
+ if 'stacked' in graphtype:
+ gtype = 1
+ elif 'pie' in graphtype:
+ gtype = 2
+ elif 'exploded' in graphtype:
+ gtype = 3
+
+ return gtype
+
+def get_show_legend(show_legend):
+ '''Get the value for show_legend
+ 0 - hide
+ 1 - (default) show
+ '''
+ rval = 1
+ if 'hide' == show_legend:
+ rval = 0
+
+ return rval
+
+def get_template_id(zapi, template_name):
+ '''
+ get related templates
+ '''
+ # Fetch templates by name
+ content = zapi.get_content('template',
+ 'get',
+ {'filter': {'host': template_name},})
+
+ if content.has_key('result'):
+ return content['result'][0]['templateid']
+
+ return None
+
+def get_color(color_in):
+ ''' Receive a color and translate it to a hex representation of the color
+
+ Will have a few setup by default
+ '''
+ colors = {'black': '000000',
+ 'red': 'FF0000',
+ 'pink': 'FFC0CB',
+ 'purple': '800080',
+ 'orange': 'FFA500',
+ 'gold': 'FFD700',
+ 'yellow': 'FFFF00',
+ 'green': '008000',
+ 'cyan': '00FFFF',
+ 'aqua': '00FFFF',
+ 'blue': '0000FF',
+ 'brown': 'A52A2A',
+ 'gray': '808080',
+ 'grey': '808080',
+ 'silver': 'C0C0C0',
+ }
+ if colors.has_key(color_in):
+ return colors[color_in]
+
+ return color_in
+
+def get_line_style(style):
+ '''determine the line style
+ '''
+ line_style = {'line': 0,
+ 'filled': 1,
+ 'bold': 2,
+ 'dot': 3,
+ 'dashed': 4,
+ 'gradient': 5,
+ }
+
+ if line_style.has_key(style):
+ return line_style[style]
+
+ return 0
+
+def get_calc_function(func):
+ '''Determine the caclulation function'''
+ rval = 2 # default to avg
+ if 'min' in func:
+ rval = 1
+ elif 'max' in func:
+ rval = 4
+ elif 'all' in func:
+ rval = 7
+ elif 'last' in func:
+ rval = 9
+
+ return rval
+
+def get_graph_item_type(gtype):
+ '''Determine the graph item type
+ '''
+ rval = 0 # simple graph type
+ if 'sum' in gtype:
+ rval = 2
+
+ return rval
+
+def get_graph_items(zapi, gitems):
+ '''Get graph items by id'''
+
+ r_items = []
+ for item in gitems:
+ content = zapi.get_content('item',
+ 'get',
+ {'filter': {'name': item['item_name']}})
+ _ = item.pop('item_name')
+ color = get_color(item.pop('color'))
+ drawtype = get_line_style(item.get('line_style', 'line'))
+ func = get_calc_function(item.get('calc_func', 'avg'))
+ g_type = get_graph_item_type(item.get('graph_item_type', 'simple'))
+
+ if content.has_key('result'):
+ tmp = {'itemid': content['result'][0]['itemid'],
+ 'color': color,
+ 'drawtype': drawtype,
+ 'calc_fnc': func,
+ 'type': g_type,
+ }
+ r_items.append(tmp)
+
+ return r_items
+
+def compare_gitems(zabbix_items, user_items):
+ '''Compare zabbix results with the user's supplied items
+ return True if user_items are equal
+ return False if any of the values differ
+ '''
+ if len(zabbix_items) != len(user_items):
+ return False
+
+ for u_item in user_items:
+ for z_item in zabbix_items:
+ if u_item['itemid'] == z_item['itemid']:
+ if not all([str(value) == z_item[key] for key, value in u_item.items()]):
+ return False
+
+ return True
+
+# The branches are needed for CRUD and error handling
+# pylint: disable=too-many-branches
+def main():
+ '''
+ ansible zabbix module for zbx_graphs
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
+ zbx_user=dict(default=os.environ.get('ZABBIX_USER', None), type='str'),
+ zbx_password=dict(default=os.environ.get('ZABBIX_PASSWORD', None), type='str'),
+ zbx_debug=dict(default=False, type='bool'),
+ name=dict(default=None, type='str'),
+ height=dict(default=None, type='int'),
+ width=dict(default=None, type='int'),
+ graph_type=dict(default='normal', type='str'),
+ show_legend=dict(default='show', type='str'),
+ state=dict(default='present', type='str'),
+ graph_items=dict(default=None, type='list'),
+ ),
+ #supports_check_mode=True
+ )
+
+ zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'],
+ module.params['zbx_user'],
+ module.params['zbx_password'],
+ module.params['zbx_debug']))
+
+ #Set the instance and the template for the rest of the calls
+ zbx_class_name = 'graph'
+ state = module.params['state']
+
+ content = zapi.get_content(zbx_class_name,
+ 'get',
+ {'filter': {'name': module.params['name']},
+ #'templateids': templateid,
+ 'selectGraphItems': 'extend',
+ })
+
+ #******#
+ # GET
+ #******#
+ if state == 'list':
+ module.exit_json(changed=False, results=content['result'], state="list")
+
+ #******#
+ # DELETE
+ #******#
+ if state == 'absent':
+ if not exists(content):
+ module.exit_json(changed=False, state="absent")
+
+ content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0]['graphid']])
+ module.exit_json(changed=True, results=content['result'], state="absent")
+
+ # Create and Update
+ if state == 'present':
+
+ params = {'name': module.params['name'],
+ 'height': module.params['height'],
+ 'width': module.params['width'],
+ 'graphtype': get_graph_type(module.params['graph_type']),
+ 'show_legend': get_show_legend(module.params['show_legend']),
+ 'gitems': get_graph_items(zapi, module.params['graph_items']),
+ }
+
+ # Remove any None valued params
+ _ = [params.pop(key, None) for key in params.keys() if params[key] is None]
+
+ #******#
+ # CREATE
+ #******#
+ if not exists(content):
+ content = zapi.get_content(zbx_class_name, 'create', params)
+
+ if content.has_key('error'):
+ module.exit_json(failed=True, changed=True, results=content['error'], state="present")
+
+ module.exit_json(changed=True, results=content['result'], state='present')
+
+
+ ########
+ # UPDATE
+ ########
+ differences = {}
+ zab_results = content['result'][0]
+ for key, value in params.items():
+
+ if key == 'gitems':
+ if not compare_gitems(zab_results[key], value):
+ differences[key] = value
+
+ elif zab_results[key] != value and zab_results[key] != str(value):
+ differences[key] = value
+
+ if not differences:
+ module.exit_json(changed=False, results=zab_results, state="present")
+
+ # We have differences and need to update
+ differences['graphid'] = zab_results['graphid']
+ content = zapi.get_content(zbx_class_name, 'update', differences)
+
+ if content.has_key('error'):
+ module.exit_json(failed=True, changed=False, results=content['error'], state="present")
+
+ module.exit_json(changed=True, results=content['result'], state="present")
+
+ module.exit_json(failed=True,
+ changed=False,
+ results='Unknown state passed. %s' % state,
+ state="unknown")
+
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
+# import module snippets. This are required
+from ansible.module_utils.basic import *
+
+main()
diff --git a/roles/lib_zabbix/library/zbx_graphprototype.py b/roles/lib_zabbix/library/zbx_graphprototype.py
new file mode 100644
index 000000000..8287c1e2d
--- /dev/null
+++ b/roles/lib_zabbix/library/zbx_graphprototype.py
@@ -0,0 +1,331 @@
+#!/usr/bin/env python
+'''
+ Ansible module for zabbix graphprototypes
+'''
+# vim: expandtab:tabstop=4:shiftwidth=4
+#
+# Zabbix graphprototypes ansible module
+#
+#
+# Copyright 2015 Red Hat Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#---
+#- hosts: localhost
+# gather_facts: no
+# tasks:
+# - zbx_graphprototype:
+# zbx_server: https://zabbixserver/zabbix/api_jsonrpc.php
+# zbx_user: Admin
+# zbx_password: zabbix
+# name: Test Graph
+# height: 300
+# width: 500
+# graph_items:
+# - item_name: Bytes per second IN on network interface {#OSO_NET_INTERFACE}
+# color: red
+# line_style: bold
+# item_type: prototype
+# - item_name: Template OS Linux: Bytes per second OUT on network interface {#OSO_NET_INTERFACE}
+# item_type: prototype
+#
+#
+
+# This is in place because each module looks similar to each other.
+# These need duplicate code as their behavior is very similar
+# but different for each zabbix class.
+# pylint: disable=duplicate-code
+
+# pylint: disable=import-error
+from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection
+
+def exists(content, key='result'):
+ ''' Check if key exists in content or the size of content[key] > 0
+ '''
+ if not content.has_key(key):
+ return False
+
+ if not content[key]:
+ return False
+
+ return True
+
+def get_graph_type(graphtype):
+ '''
+ Possible values:
+ 0 - normal;
+ 1 - stacked;
+ 2 - pie;
+ 3 - exploded;
+ '''
+ gtype = 0
+ if 'stacked' in graphtype:
+ gtype = 1
+ elif 'pie' in graphtype:
+ gtype = 2
+ elif 'exploded' in graphtype:
+ gtype = 3
+
+ return gtype
+
+def get_show_legend(show_legend):
+ '''Get the value for show_legend
+ 0 - hide
+ 1 - (default) show
+ '''
+ rval = 1
+ if 'hide' == show_legend:
+ rval = 0
+
+ return rval
+
+def get_template_id(zapi, template_name):
+ '''
+ get related templates
+ '''
+ # Fetch templates by name
+ content = zapi.get_content('template',
+ 'get',
+ {'filter': {'host': template_name},})
+
+ if content.has_key('result'):
+ return content['result'][0]['templateid']
+
+ return None
+
+def get_color(color_in='black'):
+ ''' Receive a color and translate it to a hex representation of the color
+
+ Will have a few setup by default
+ '''
+ colors = {'black': '000000',
+ 'red': 'FF0000',
+ 'pink': 'FFC0CB',
+ 'purple': '800080',
+ 'orange': 'FFA500',
+ 'gold': 'FFD700',
+ 'yellow': 'FFFF00',
+ 'green': '008000',
+ 'cyan': '00FFFF',
+ 'aqua': '00FFFF',
+ 'blue': '0000FF',
+ 'brown': 'A52A2A',
+ 'gray': '808080',
+ 'grey': '808080',
+ 'silver': 'C0C0C0',
+ }
+ if colors.has_key(color_in):
+ return colors[color_in]
+
+ return color_in
+
+def get_line_style(style):
+ '''determine the line style
+ '''
+ line_style = {'line': 0,
+ 'filled': 1,
+ 'bold': 2,
+ 'dot': 3,
+ 'dashed': 4,
+ 'gradient': 5,
+ }
+
+ if line_style.has_key(style):
+ return line_style[style]
+
+ return 0
+
+def get_calc_function(func):
+ '''Determine the caclulation function'''
+ rval = 2 # default to avg
+ if 'min' in func:
+ rval = 1
+ elif 'max' in func:
+ rval = 4
+ elif 'all' in func:
+ rval = 7
+ elif 'last' in func:
+ rval = 9
+
+ return rval
+
+def get_graph_item_type(gtype):
+ '''Determine the graph item type
+ '''
+ rval = 0 # simple graph type
+ if 'sum' in gtype:
+ rval = 2
+
+ return rval
+
+def get_graph_items(zapi, gitems):
+ '''Get graph items by id'''
+
+ r_items = []
+ for item in gitems:
+ content = zapi.get_content('item%s' % item.get('item_type', ''),
+ 'get',
+ {'filter': {'name': item['item_name']}})
+ _ = item.pop('item_name')
+ color = get_color(item.pop('color', 'black'))
+ drawtype = get_line_style(item.get('line_style', 'line'))
+ func = get_calc_function(item.get('calc_func', 'avg'))
+ g_type = get_graph_item_type(item.get('graph_item_type', 'simple'))
+
+ if content.has_key('result'):
+ tmp = {'itemid': content['result'][0]['itemid'],
+ 'color': color,
+ 'drawtype': drawtype,
+ 'calc_fnc': func,
+ 'type': g_type,
+ }
+ r_items.append(tmp)
+
+ return r_items
+
+def compare_gitems(zabbix_items, user_items):
+ '''Compare zabbix results with the user's supplied items
+ return True if user_items are equal
+ return False if any of the values differ
+ '''
+ if len(zabbix_items) != len(user_items):
+ return False
+
+ for u_item in user_items:
+ for z_item in zabbix_items:
+ if u_item['itemid'] == z_item['itemid']:
+ if not all([str(value) == z_item[key] for key, value in u_item.items()]):
+ return False
+
+ return True
+
+# The branches are needed for CRUD and error handling
+# pylint: disable=too-many-branches
+def main():
+ '''
+ ansible zabbix module for zbx_graphprototypes
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
+ zbx_user=dict(default=os.environ.get('ZABBIX_USER', None), type='str'),
+ zbx_password=dict(default=os.environ.get('ZABBIX_PASSWORD', None), type='str'),
+ zbx_debug=dict(default=False, type='bool'),
+ name=dict(default=None, type='str'),
+ height=dict(default=None, type='int'),
+ width=dict(default=None, type='int'),
+ graph_type=dict(default='normal', type='str'),
+ show_legend=dict(default='show', type='str'),
+ state=dict(default='present', type='str'),
+ graph_items=dict(default=None, type='list'),
+ ),
+ #supports_check_mode=True
+ )
+
+ zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'],
+ module.params['zbx_user'],
+ module.params['zbx_password'],
+ module.params['zbx_debug']))
+
+ #Set the instance and the template for the rest of the calls
+ zbx_class_name = 'graphprototype'
+ state = module.params['state']
+
+ content = zapi.get_content(zbx_class_name,
+ 'get',
+ {'filter': {'name': module.params['name']},
+ #'templateids': templateid,
+ 'selectGraphItems': 'extend',
+ })
+
+ #******#
+ # GET
+ #******#
+ if state == 'list':
+ module.exit_json(changed=False, results=content['result'], state="list")
+
+ #******#
+ # DELETE
+ #******#
+ if state == 'absent':
+ if not exists(content):
+ module.exit_json(changed=False, state="absent")
+
+ content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0]['graphid']])
+ module.exit_json(changed=True, results=content['result'], state="absent")
+
+ # Create and Update
+ if state == 'present':
+
+ params = {'name': module.params['name'],
+ 'height': module.params['height'],
+ 'width': module.params['width'],
+ 'graphtype': get_graph_type(module.params['graph_type']),
+ 'show_legend': get_show_legend(module.params['show_legend']),
+ 'gitems': get_graph_items(zapi, module.params['graph_items']),
+ }
+
+ # Remove any None valued params
+ _ = [params.pop(key, None) for key in params.keys() if params[key] is None]
+
+ #******#
+ # CREATE
+ #******#
+ if not exists(content):
+ content = zapi.get_content(zbx_class_name, 'create', params)
+
+ if content.has_key('error'):
+ module.exit_json(failed=True, changed=True, results=content['error'], state="present")
+
+ module.exit_json(changed=True, results=content['result'], state='present')
+
+
+ ########
+ # UPDATE
+ ########
+ differences = {}
+ zab_results = content['result'][0]
+ for key, value in params.items():
+
+ if key == 'gitems':
+ if not compare_gitems(zab_results[key], value):
+ differences[key] = value
+
+ elif zab_results[key] != value and zab_results[key] != str(value):
+ differences[key] = value
+
+ if not differences:
+ module.exit_json(changed=False, results=zab_results, state="present")
+
+ # We have differences and need to update
+ differences['graphid'] = zab_results['graphid']
+ content = zapi.get_content(zbx_class_name, 'update', differences)
+
+ if content.has_key('error'):
+ module.exit_json(failed=True, changed=False, results=content['error'], state="present")
+
+ module.exit_json(changed=True, results=content['result'], state="present")
+
+ module.exit_json(failed=True,
+ changed=False,
+ results='Unknown state passed. %s' % state,
+ state="unknown")
+
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
+# import module snippets. This are required
+from ansible.module_utils.basic import *
+
+main()
diff --git a/roles/lib_zabbix/library/zbx_httptest.py b/roles/lib_zabbix/library/zbx_httptest.py
index 96733b3d1..6b28117ad 100644
--- a/roles/lib_zabbix/library/zbx_httptest.py
+++ b/roles/lib_zabbix/library/zbx_httptest.py
@@ -131,6 +131,14 @@ def steps_equal(zab_steps, user_steps):
return True
+def process_steps(steps):
+ '''Preprocess the step parameters'''
+ for idx, step in enumerate(steps):
+ if not step.has_key('no'):
+ step['no'] = idx + 1
+
+ return steps
+
# The branches are needed for CRUD and error handling
# pylint: disable=too-many-branches
def main():
@@ -218,7 +226,7 @@ def main():
'hostid': hostid,
'agent': module.params['agent'],
'retries': module.params['retries'],
- 'steps': module.params['steps'],
+ 'steps': process_steps(module.params['steps']),
'applicationid': get_app_id(zapi, module.params['application']),
'delay': module.params['interval'],
'verify_host': get_verify_host(module.params['verify_host']),
diff --git a/roles/lib_zabbix/tasks/create_template.yml b/roles/lib_zabbix/tasks/create_template.yml
index 2992505bf..47749389e 100644
--- a/roles/lib_zabbix/tasks/create_template.yml
+++ b/roles/lib_zabbix/tasks/create_template.yml
@@ -105,3 +105,27 @@
description: "{{ item.description | default('', True) }}"
with_items: template.ztriggerprototypes
when: template.ztriggerprototypes is defined
+
+- name: Create Graphs
+ zbx_graph:
+ zbx_server: "{{ server }}"
+ zbx_user: "{{ user }}"
+ zbx_password: "{{ password }}"
+ name: "{{ item.name }}"
+ height: "{{ item.height }}"
+ width: "{{ item.width }}"
+ graph_items: "{{ item.graph_items }}"
+ with_items: template.zgraphs
+ when: template.zgraphs is defined
+
+- name: Create Graph Prototypes
+ zbx_graphprototype:
+ zbx_server: "{{ server }}"
+ zbx_user: "{{ user }}"
+ zbx_password: "{{ password }}"
+ name: "{{ item.name }}"
+ height: "{{ item.height }}"
+ width: "{{ item.width }}"
+ graph_items: "{{ item.graph_items }}"
+ with_items: template.zgraphprototypes
+ when: template.zgraphprototypes is defined
diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml
index e9df4e364..55065b3de 100644
--- a/roles/openshift_common/tasks/main.yml
+++ b/roles/openshift_common/tasks/main.yml
@@ -3,6 +3,10 @@
msg: Flannel can not be used with openshift sdn
when: openshift_use_openshift_sdn | default(false) | bool and openshift_use_flannel | default(false) | bool
+- fail:
+ msg: openshift_hostname must be 64 characters or less
+ when: openshift_hostname is defined and openshift_hostname | length > 64
+
- name: Set common Cluster facts
openshift_facts:
role: common
@@ -18,3 +22,12 @@
deployment_type: "{{ openshift_deployment_type }}"
use_fluentd: "{{ openshift_use_fluentd | default(None) }}"
use_flannel: "{{ openshift_use_flannel | default(None) }}"
+
+ # For enterprise versions < 3.1 and origin versions < 1.1 we want to set the
+ # hostname by default.
+- set_fact:
+ set_hostname_default: "{{ not openshift.common.version_greater_than_3_1_or_1_1 }}"
+
+- name: Set hostname
+ hostname: name={{ openshift.common.hostname }}
+ when: openshift_set_hostname | default(set_hostname_default) | bool
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 6006bfa9d..b60e42c71 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -1074,7 +1074,7 @@ class OpenShiftFacts(object):
if 'node' in roles:
node = dict(labels={}, annotations={}, portal_net='172.30.0.0/16',
- iptables_sync_period='5s')
+ iptables_sync_period='5s', set_node_ip=False)
defaults['node'] = node
return defaults
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index 185bfb8f3..2cf2a53c4 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -1,13 +1,16 @@
---
-# TODO: add validation for openshift_master_identity_providers
# TODO: add ability to configure certificates given either a local file to
# point to or certificate contents, set in default cert locations.
-- assert:
- that:
- - openshift_master_oauth_grant_method in openshift_master_valid_grant_methods
- when: openshift_master_oauth_grant_method is defined
+# Authentication Variable Validation
+# TODO: validate the different identity provider kinds as well
+- fail:
+ msg: >
+ Invalid OAuth grant method: {{ openshift_master_oauth_grant_method }}
+ when: openshift_master_oauth_grant_method is defined and openshift_master_oauth_grant_method not in openshift_master_valid_grant_methods
+
+# HA Variable Validation
- fail:
msg: "openshift_master_cluster_method must be set to either 'native' or 'pacemaker' for multi-master installations"
when: openshift_master_ha | bool and ((openshift_master_cluster_method is not defined) or (openshift_master_cluster_method is defined and openshift_master_cluster_method not in ["native", "pacemaker"]))
@@ -172,6 +175,9 @@
- restart master
- restart master api
+- set_fact:
+ translated_identity_providers: "{{ openshift.master.identity_providers | translate_idps('v1') }}"
+
# TODO: add the validate parameter when there is a validation command to run
- name: Create master config
template:
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
index 2a37c06d9..9f4a17f0a 100644
--- a/roles/openshift_master/templates/master.yaml.v1.j2
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -107,7 +107,24 @@ networkConfig:
{% endif %}
# serviceNetworkCIDR must match kubernetesMasterConfig.servicesSubnet
serviceNetworkCIDR: {{ openshift.master.portal_net }}
-{% include 'v1_partials/oauthConfig.j2' %}
+oauthConfig:
+ assetPublicURL: {{ openshift.master.public_console_url }}/
+ grantConfig:
+ method: {{ openshift.master.oauth_grant_method }}
+ identityProviders:
+{% for line in translated_identity_providers.splitlines() %}
+ {{ line }}
+{% endfor %}
+ masterCA: ca.crt
+ masterPublicURL: {{ openshift.master.public_api_url }}
+ masterURL: {{ openshift.master.api_url }}
+ sessionConfig:
+ sessionMaxAgeSeconds: {{ openshift.master.session_max_seconds }}
+ sessionName: {{ openshift.master.session_name }}
+ sessionSecretsFile: {{ openshift.master.session_secrets_file }}
+ tokenConfig:
+ accessTokenMaxAgeSeconds: {{ openshift.master.access_token_max_seconds }}
+ authorizeTokenMaxAgeSeconds: {{ openshift.master.auth_token_max_seconds }}
pauseControllers: false
policyConfig:
bootstrapPolicyFile: {{ openshift_master_policy }}
diff --git a/roles/openshift_master/templates/v1_partials/oauthConfig.j2 b/roles/openshift_master/templates/v1_partials/oauthConfig.j2
deleted file mode 100644
index 8a4f5a746..000000000
--- a/roles/openshift_master/templates/v1_partials/oauthConfig.j2
+++ /dev/null
@@ -1,93 +0,0 @@
-{% macro identity_provider_config(identity_provider) %}
- apiVersion: v1
- kind: {{ identity_provider.kind }}
-{% if identity_provider.kind == 'HTPasswdPasswordIdentityProvider' %}
- file: {{ identity_provider.filename }}
-{% elif identity_provider.kind == 'BasicAuthPasswordIdentityProvider' %}
- url: {{ identity_provider.url }}
-{% for key in ('ca', 'certFile', 'keyFile') %}
-{% if key in identity_provider %}
- {{ key }}: "{{ identity_provider[key] }}"
-{% endif %}
-{% endfor %}
-{% elif identity_provider.kind == 'LDAPPasswordIdentityProvider' %}
- attributes:
-{% for attribute_key in identity_provider.attributes %}
- {{ attribute_key }}:
-{% for attribute_value in identity_provider.attributes[attribute_key] %}
- - {{ attribute_value }}
-{% endfor %}
-{% endfor %}
-{% for key in ('bindDN', 'bindPassword', 'ca') %}
- {{ key }}: "{{ identity_provider[key] }}"
-{% endfor %}
-{% for key in ('insecure', 'url') %}
- {{ key }}: {{ identity_provider[key] }}
-{% endfor %}
-{% elif identity_provider.kind == 'RequestHeaderIdentityProvider' %}
- headers: {{ identity_provider.headers }}
-{% if 'clientCA' in identity_provider %}
- clientCA: {{ identity_provider.clientCA }}
-{% endif %}
-{% elif identity_provider.kind == 'GitHubIdentityProvider' %}
- clientID: {{ identity_provider.clientID }}
- clientSecret: {{ identity_provider.clientSecret }}
-{% elif identity_provider.kind == 'GoogleIdentityProvider' %}
- clientID: {{ identity_provider.clientID }}
- clientSecret: {{ identity_provider.clientSecret }}
-{% if 'hostedDomain' in identity_provider %}
- hostedDomain: {{ identity_provider.hostedDomain }}
-{% endif %}
-{% elif identity_provider.kind == 'OpenIDIdentityProvider' %}
- clientID: {{ identity_provider.clientID }}
- clientSecret: {{ identity_provider.clientSecret }}
- claims:
- id: identity_provider.claims.id
-{% for claim_key in ('preferredUsername', 'name', 'email') %}
-{% if claim_key in identity_provider.claims %}
- {{ claim_key }}: {{ identity_provider.claims[claim_key] }}
-{% endif %}
-{% endfor %}
- urls:
- authorize: {{ identity_provider.urls.authorize }}
- token: {{ identity_provider.urls.token }}
-{% if 'userInfo' in identity_provider.urls %}
- userInfo: {{ identity_provider.userInfo }}
-{% endif %}
-{% if 'extraScopes' in identity_provider %}
- extraScopes:
-{% for scope in identity_provider.extraScopes %}
- - {{ scope }}
-{% endfor %}
-{% endif %}
-{% if 'extraAuthorizeParameters' in identity_provider %}
- extraAuthorizeParameters:
-{% for param_key, param_value in identity_provider.extraAuthorizeParameters.iteritems() %}
- {{ param_key }}: {{ param_value }}
-{% endfor %}
-{% endif %}
-{% endif %}
-{% endmacro %}
-oauthConfig:
- assetPublicURL: {{ openshift.master.public_console_url }}/
- grantConfig:
- method: {{ openshift.master.oauth_grant_method }}
- identityProviders:
-{% for identity_provider in openshift.master.identity_providers %}
- - name: {{ identity_provider.name }}
- challenge: {{ identity_provider.challenge }}
- login: {{ identity_provider.login }}
- provider:
-{{ identity_provider_config(identity_provider) }}
-{%- endfor %}
- masterCA: ca.crt
- masterPublicURL: {{ openshift.master.public_api_url }}
- masterURL: {{ openshift.master.api_url }}
- sessionConfig:
- sessionMaxAgeSeconds: {{ openshift.master.session_max_seconds }}
- sessionName: {{ openshift.master.session_name }}
- sessionSecretsFile: {{ openshift.master.session_secrets_file }}
- tokenConfig:
- accessTokenMaxAgeSeconds: {{ openshift.master.access_token_max_seconds }}
- authorizeTokenMaxAgeSeconds: {{ openshift.master.auth_token_max_seconds }}
-{# Comment to preserve newline after authorizeTokenMaxAgeSeconds #}
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index d11bc5123..42d984a09 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -34,6 +34,7 @@
schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}"
sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}"
storage_plugin_deps: "{{ osn_storage_plugin_deps | default(None) }}"
+ set_node_ip: "{{ openshift_set_node_ip | default(None) }}"
# We have to add tuned-profiles in the same transaction otherwise we run into depsolving
# problems because the rpms don't pin the version properly.
diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2
index 7d2f506e3..41a303dee 100644
--- a/roles/openshift_node/templates/node.yaml.v1.j2
+++ b/roles/openshift_node/templates/node.yaml.v1.j2
@@ -23,7 +23,9 @@ networkConfig:
{% if openshift.common.use_openshift_sdn %}
networkPluginName: {{ openshift.common.sdn_network_plugin_name }}
{% endif %}
+{% if openshift.node.set_node_ip | bool %}
nodeIP: {{ openshift.common.ip }}
+{% endif %}
nodeName: {{ openshift.common.hostname | lower }}
podManifestConfig:
servingInfo:
diff --git a/roles/os_zabbix/tasks/main.yml b/roles/os_zabbix/tasks/main.yml
index 59c89bb02..d0b307a3d 100644
--- a/roles/os_zabbix/tasks/main.yml
+++ b/roles/os_zabbix/tasks/main.yml
@@ -8,15 +8,35 @@
register: templates
- include_vars: template_heartbeat.yml
+ tags:
+ - heartbeat
- include_vars: template_os_linux.yml
+ tags:
+ - linux
- include_vars: template_docker.yml
+ tags:
+ - docker
- include_vars: template_openshift_master.yml
+ tags:
+ - openshift_master
- include_vars: template_openshift_node.yml
+ tags:
+ - openshift_node
- include_vars: template_ops_tools.yml
+ tags:
+ - ops_tools
- include_vars: template_app_zabbix_server.yml
+ tags:
+ - zabbix_server
- include_vars: template_app_zabbix_agent.yml
+ tags:
+ - zabbix_agent
- include_vars: template_performance_copilot.yml
+ tags:
+ - pcp
- include_vars: template_aws.yml
+ tags:
+ - aws
- name: Include Template Heartbeat
include: ../../lib_zabbix/tasks/create_template.yml
@@ -25,6 +45,8 @@
server: "{{ ozb_server }}"
user: "{{ ozb_user }}"
password: "{{ ozb_password }}"
+ tags:
+ - heartbeat
- name: Include Template os_linux
include: ../../lib_zabbix/tasks/create_template.yml
@@ -33,6 +55,8 @@
server: "{{ ozb_server }}"
user: "{{ ozb_user }}"
password: "{{ ozb_password }}"
+ tags:
+ - linux
- name: Include Template docker
include: ../../lib_zabbix/tasks/create_template.yml
@@ -41,6 +65,8 @@
server: "{{ ozb_server }}"
user: "{{ ozb_user }}"
password: "{{ ozb_password }}"
+ tags:
+ - docker
- name: Include Template Openshift Master
include: ../../lib_zabbix/tasks/create_template.yml
@@ -49,6 +75,8 @@
server: "{{ ozb_server }}"
user: "{{ ozb_user }}"
password: "{{ ozb_password }}"
+ tags:
+ - openshift_master
- name: Include Template Openshift Node
include: ../../lib_zabbix/tasks/create_template.yml
@@ -57,6 +85,8 @@
server: "{{ ozb_server }}"
user: "{{ ozb_user }}"
password: "{{ ozb_password }}"
+ tags:
+ - openshift_node
- name: Include Template Ops Tools
include: ../../lib_zabbix/tasks/create_template.yml
@@ -65,6 +95,8 @@
server: "{{ ozb_server }}"
user: "{{ ozb_user }}"
password: "{{ ozb_password }}"
+ tags:
+ - ops_tools
- name: Include Template App Zabbix Server
include: ../../lib_zabbix/tasks/create_template.yml
@@ -73,6 +105,8 @@
server: "{{ ozb_server }}"
user: "{{ ozb_user }}"
password: "{{ ozb_password }}"
+ tags:
+ - zabbix_server
- name: Include Template App Zabbix Agent
include: ../../lib_zabbix/tasks/create_template.yml
@@ -81,6 +115,8 @@
server: "{{ ozb_server }}"
user: "{{ ozb_user }}"
password: "{{ ozb_password }}"
+ tags:
+ - zabbix_agent
- name: Include Template Performance Copilot
include: ../../lib_zabbix/tasks/create_template.yml
@@ -89,6 +125,8 @@
server: "{{ ozb_server }}"
user: "{{ ozb_user }}"
password: "{{ ozb_password }}"
+ tags:
+ - pcp
- name: Include Template AWS
include: ../../lib_zabbix/tasks/create_template.yml
@@ -97,3 +135,5 @@
server: "{{ ozb_server }}"
user: "{{ ozb_user }}"
password: "{{ ozb_password }}"
+ tags:
+ - aws
diff --git a/roles/os_zabbix/vars/template_openshift_master.yml b/roles/os_zabbix/vars/template_openshift_master.yml
index 512adad4c..8236cf135 100644
--- a/roles/os_zabbix/vars/template_openshift_master.yml
+++ b/roles/os_zabbix/vars/template_openshift_master.yml
@@ -244,3 +244,37 @@ g_template_openshift_master:
expression: '{Template Openshift Master:openshift.master.etcd.ping.last(#1)}=0 and {Template Openshift Master:openshift.master.etcd.ping.last(#2)}=0'
url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_etcd.asciidoc'
priority: high
+
+ zgraphs:
+ - name: Openshift Master API Server Latency Pods LIST Quantiles
+ width: 900
+ height: 200
+ graph_items:
+ - item_name: openshift.master.apiserver.latency.summary.pods.quantile.list.5
+ color: red
+ - item_name: openshift.master.apiserver.latency.summary.pods.quantile.list.9
+ color: blue
+ - item_name: openshift.master.apiserver.latency.summary.pods.quantile.list.99
+ color: orange
+
+ - name: Openshift Master API Server Latency Pods WATCHLIST Quantiles
+ width: 900
+ height: 200
+ graph_items:
+ - item_name: openshift.master.apiserver.latency.summary.pods.quantile.watchlist.5
+ color: red
+ - item_name: openshift.master.apiserver.latency.summary.pods.quantile.watchlist.9
+ color: blue
+ - item_name: openshift.master.apiserver.latency.summary.pods.quantile.watchlist.99
+ color: orange
+
+ - name: Openshift Master Scheduler End to End Latency Quantiles
+ width: 900
+ height: 200
+ graph_items:
+ - item_name: openshift.master.scheduler.e2e.scheduling.latency.quantile.5
+ color: red
+ - item_name: openshift.master.scheduler.e2e.scheduling.latency.quantile.9
+ color: blue
+ - item_name: openshift.master.scheduler.e2e.scheduling.latency.quantile.99
+ color: orange
diff --git a/roles/os_zabbix/vars/template_os_linux.yml b/roles/os_zabbix/vars/template_os_linux.yml
index 04665be62..79d52ef9b 100644
--- a/roles/os_zabbix/vars/template_os_linux.yml
+++ b/roles/os_zabbix/vars/template_os_linux.yml
@@ -304,3 +304,15 @@ g_template_os_linux:
description: 'CPU is less than 10% idle'
dependencies:
- 'CPU idle less than 5% on {HOST.NAME}'
+
+ zgraphprototypes:
+ - name: Network Interface Usage
+ width: 1000
+ height: 400
+ graph_items:
+ - item_name: "Bytes per second IN on network interface {#OSO_NET_INTERFACE}"
+ item_type: prototype
+ color: red
+ - item_name: "Bytes per second OUT on network interface {#OSO_NET_INTERFACE}"
+ item_type: prototype
+ color: blue
diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py
index 0b3af8829..d7c06745e 100644
--- a/utils/src/ooinstall/cli_installer.py
+++ b/utils/src/ooinstall/cli_installer.py
@@ -72,13 +72,14 @@ def delete_hosts(hosts):
click.echo("\"{}\" doesn't coorespond to any valid input.".format(del_idx))
return hosts, None
-def collect_hosts(master_set=False):
+def collect_hosts(version=None, masters_set=False, print_summary=True):
"""
Collect host information from user. This will later be filled in using
ansible.
Returns: a list of host information collected from the user
"""
+ min_masters_for_ha = 3
click.clear()
click.echo('***Host Configuration***')
message = """
@@ -102,17 +103,20 @@ http://docs.openshift.com/enterprise/latest/architecture/infrastructure_componen
hosts = []
more_hosts = True
+ num_masters = 0
while more_hosts:
host_props = {}
- hostname_or_ip = click.prompt('Enter hostname or IP address:',
- default='',
- value_proc=validate_prompt_hostname)
-
- host_props['connect_to'] = hostname_or_ip
- if not master_set:
- is_master = click.confirm('Will this host be an OpenShift Master?')
- host_props['master'] = is_master
- master_set = is_master
+ host_props['connect_to'] = click.prompt('Enter hostname or IP address:',
+ default='',
+ value_proc=validate_prompt_hostname)
+
+ if not masters_set:
+ if click.confirm('Will this host be an OpenShift Master?'):
+ host_props['master'] = True
+ num_masters += 1
+
+ if num_masters >= min_masters_for_ha or version == '3.0':
+ masters_set = True
host_props['node'] = True
#TODO: Reenable this option once container installs are out of tech preview
@@ -129,9 +133,51 @@ http://docs.openshift.com/enterprise/latest/architecture/infrastructure_componen
hosts.append(host)
- more_hosts = click.confirm('Do you want to add additional hosts?')
+ if print_summary:
+ click.echo('')
+ click.echo('Current Masters: {}'.format(num_masters))
+ click.echo('Current Nodes: {}'.format(len(hosts)))
+ click.echo('Additional Masters required for HA: {}'.format(max(min_masters_for_ha - num_masters, 0)))
+ click.echo('')
+
+ if num_masters <= 1 or num_masters >= min_masters_for_ha:
+ more_hosts = click.confirm('Do you want to add additional hosts?')
+
+ if num_masters > 1:
+ hosts.append(collect_master_lb())
+
return hosts
+def collect_master_lb():
+ """
+ Get an HA proxy from the user
+ """
+ message = """
+Setting up High Availability Masters requires a load balancing solution.
+Please provide a host that will be configured as a proxy. This can either be
+an existing load balancer configured to balance all masters on port 8443 or a
+new host that will have HAProxy installed on it.
+
+If the host provided does is not yet configured a reference haproxy load
+balancer will be installed. It's important to note that while the rest of the
+environment will be fault tolerant this reference load balancer will not be.
+It can be replaced post-installation with a load balancer with the same
+hostname.
+"""
+ click.echo(message)
+ host_props = {}
+ host_props['connect_to'] = click.prompt('Enter hostname or IP address:',
+ default='',
+ value_proc=validate_prompt_hostname)
+ install_haproxy = click.confirm('Should the reference haproxy load balancer be installed on this host?')
+ host_props['preconfigured'] = not install_haproxy
+ host_props['master'] = False
+ host_props['node'] = False
+ host_props['master_lb'] = True
+ master_lb = Host(**host_props)
+
+ return master_lb
+
def confirm_hosts_facts(oo_cfg, callback_facts):
hosts = oo_cfg.hosts
click.clear()
@@ -169,6 +215,8 @@ Notes:
default_facts_lines = []
default_facts = {}
for h in hosts:
+ if h.preconfigured == True:
+ continue
default_facts[h.connect_to] = {}
h.ip = callback_facts[h.connect_to]["common"]["ip"]
h.public_ip = callback_facts[h.connect_to]["common"]["public_ip"]
@@ -199,7 +247,41 @@ Edit %s with the desired values and run `atomic-openshift-installer --unattended
sys.exit(0)
return default_facts
-def get_variant_and_version():
+
+
+def check_hosts_config(oo_cfg):
+ click.clear()
+ masters = [host for host in oo_cfg.hosts if host.master]
+ if len(masters) > 1:
+ master_lb = [host for host in oo_cfg.hosts if host.master_lb]
+ if len(master_lb) > 1:
+ click.echo('More than one Master load balancer specified. Only one is allowed.')
+ sys.exit(0)
+ elif len(master_lb) == 1:
+ if master_lb[0].master or master_lb[0].node:
+ click.echo('The Master load balancer is configured as a master or node. Please correct this.')
+ sys.exit(0)
+ else:
+ message = """
+No HAProxy given in config. Either specify one or provide a load balancing solution
+of your choice to balance the master API (port 8443) on all master hosts.
+
+https://docs.openshift.org/latest/install_config/install/advanced_install.html#multiple-masters
+"""
+ confirm_continue(message)
+
+ nodes = [host for host in oo_cfg.hosts if host.node]
+ if len(masters) == len(nodes):
+ message = """
+No dedicated Nodes specified. By default, colocated Masters have their Nodes
+set to unscheduleable. Continuing at this point will label all nodes as
+scheduleable.
+"""
+ confirm_continue(message)
+
+ return
+
+def get_variant_and_version(multi_master=False):
message = "\nWhich variant would you like to install?\n\n"
i = 1
@@ -211,6 +293,8 @@ def get_variant_and_version():
message = "%s\n" % message
click.echo(message)
+ if multi_master:
+ click.echo('NOTE: 3.0 installations are not')
response = click.prompt("Choose a variant from above: ", default=1)
product, version = combos[response - 1]
@@ -292,16 +376,16 @@ https://docs.openshift.com/enterprise/latest/admin_guide/install/prerequisites.h
oo_cfg.settings['ansible_ssh_user'] = get_ansible_ssh_user()
click.clear()
- if not oo_cfg.hosts:
- oo_cfg.hosts = collect_hosts()
- click.clear()
-
if oo_cfg.settings.get('variant', '') == '':
variant, version = get_variant_and_version()
oo_cfg.settings['variant'] = variant.name
oo_cfg.settings['variant_version'] = version.name
click.clear()
+ if not oo_cfg.hosts:
+ oo_cfg.hosts = collect_hosts(version=oo_cfg.settings['variant_version'])
+ click.clear()
+
return oo_cfg
@@ -312,7 +396,7 @@ def collect_new_nodes():
Add new nodes here
"""
click.echo(message)
- return collect_hosts(True)
+ return collect_hosts(masters_set=True, print_summary=False)
def get_installed_hosts(hosts, callback_facts):
installed_hosts = []
@@ -487,7 +571,7 @@ def uninstall(ctx):
verbose = ctx.obj['verbose']
if len(oo_cfg.hosts) == 0:
- click.echo("No hosts defined in: %s" % oo_cfg['configuration'])
+ click.echo("No hosts defined in: %s" % oo_cfg.config_path)
sys.exit(1)
click.echo("OpenShift will be uninstalled from the following hosts:\n")
@@ -555,6 +639,8 @@ def install(ctx, force):
else:
oo_cfg = get_missing_info_from_user(oo_cfg)
+ check_hosts_config(oo_cfg)
+
click.echo('Gathering information from hosts...')
callback_facts, error = openshift_ansible.default_facts(oo_cfg.hosts,
verbose)
diff --git a/utils/src/ooinstall/oo_config.py b/utils/src/ooinstall/oo_config.py
index 9c97e6e93..b6f0cdce3 100644
--- a/utils/src/ooinstall/oo_config.py
+++ b/utils/src/ooinstall/oo_config.py
@@ -36,19 +36,24 @@ class Host(object):
self.public_ip = kwargs.get('public_ip', None)
self.public_hostname = kwargs.get('public_hostname', None)
self.connect_to = kwargs.get('connect_to', None)
+ self.preconfigured = kwargs.get('preconfigured', None)
# Should this host run as an OpenShift master:
self.master = kwargs.get('master', False)
# Should this host run as an OpenShift node:
self.node = kwargs.get('node', False)
+
+ # Should this host run as an HAProxy:
+ self.master_lb = kwargs.get('master_lb', False)
+
self.containerized = kwargs.get('containerized', False)
if self.connect_to is None:
raise OOConfigInvalidHostError("You must specify either and 'ip' " \
"or 'hostname' to connect to.")
- if self.master is False and self.node is False:
+ if self.master is False and self.node is False and self.master_lb is False:
raise OOConfigInvalidHostError(
"You must specify each host as either a master or a node.")
@@ -62,7 +67,7 @@ class Host(object):
""" Used when exporting to yaml. """
d = {}
for prop in ['ip', 'hostname', 'public_ip', 'public_hostname',
- 'master', 'node', 'containerized', 'connect_to']:
+ 'master', 'node', 'master_lb', 'containerized', 'connect_to', 'preconfigured']:
# If the property is defined (not None or False), export it:
if getattr(self, prop):
d[prop] = getattr(self, prop)
diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py
index 372f27bda..9afc9a644 100644
--- a/utils/src/ooinstall/openshift_ansible.py
+++ b/utils/src/ooinstall/openshift_ansible.py
@@ -17,14 +17,17 @@ def set_config(cfg):
def generate_inventory(hosts):
global CFG
+ masters = [host for host in hosts if host.master]
+ nodes = [host for host in hosts if host.node]
+ proxy = determine_proxy_configuration(hosts)
+ multiple_masters = len(masters) > 1
base_inventory_path = CFG.settings['ansible_inventory_path']
base_inventory = open(base_inventory_path, 'w')
- base_inventory.write('\n[OSEv3:children]\nmasters\nnodes\n')
- base_inventory.write('\n[OSEv3:vars]\n')
- base_inventory.write('ansible_ssh_user={}\n'.format(CFG.settings['ansible_ssh_user']))
- if CFG.settings['ansible_ssh_user'] != 'root':
- base_inventory.write('ansible_become=true\n')
+
+ write_inventory_children(base_inventory, multiple_masters, proxy)
+
+ write_inventory_vars(base_inventory, multiple_masters, proxy)
# Find the correct deployment type for ansible:
ver = find_variant(CFG.settings['variant'],
@@ -45,22 +48,69 @@ def generate_inventory(hosts):
"'enabled': 1, 'gpgcheck': 0}}]\n".format(os.environ['OO_INSTALL_PUDDLE_REPO']))
base_inventory.write('\n[masters]\n')
- masters = (host for host in hosts if host.master)
for master in masters:
write_host(master, base_inventory)
+
+ if len(masters) > 1:
+ base_inventory.write('\n[etcd]\n')
+ for master in masters:
+ write_host(master, base_inventory)
+
base_inventory.write('\n[nodes]\n')
- nodes = (host for host in hosts if host.node)
- for node in nodes:
- # TODO: Until the Master can run the SDN itself we have to configure the Masters
- # as Nodes too.
- scheduleable = True
- # If there's only one Node and it's also a Master we want it to be scheduleable:
- if node in masters and len(masters) != 1:
- scheduleable = False
- write_host(node, base_inventory, scheduleable)
+
+ # TODO: It would be much better to calculate the scheduleability elsewhere
+ # and store it on the Node object.
+ if set(nodes) == set(masters):
+ for node in nodes:
+ write_host(node, base_inventory)
+ else:
+ for node in nodes:
+ # TODO: Until the Master can run the SDN itself we have to configure the Masters
+ # as Nodes too.
+ scheduleable = True
+ if node in masters:
+ scheduleable = False
+ write_host(node, base_inventory, scheduleable)
+
+ if not getattr(proxy, 'preconfigured', True):
+ base_inventory.write('\n[lb]\n')
+ write_host(proxy, base_inventory)
+
base_inventory.close()
return base_inventory_path
+def determine_proxy_configuration(hosts):
+ proxy = next((host for host in hosts if host.master_lb), None)
+ if proxy:
+ if proxy.hostname == None:
+ proxy.hostname = proxy.connect_to
+ proxy.public_hostname = proxy.connect_to
+ return proxy
+
+ return None
+
+def write_inventory_children(base_inventory, multiple_masters, proxy):
+ global CFG
+
+ base_inventory.write('\n[OSEv3:children]\n')
+ base_inventory.write('masters\n')
+ base_inventory.write('nodes\n')
+ if multiple_masters:
+ base_inventory.write('etcd\n')
+ if not getattr(proxy, 'preconfigured', True):
+ base_inventory.write('lb\n')
+
+def write_inventory_vars(base_inventory, multiple_masters, proxy):
+ global CFG
+ base_inventory.write('\n[OSEv3:vars]\n')
+ base_inventory.write('ansible_ssh_user={}\n'.format(CFG.settings['ansible_ssh_user']))
+ if CFG.settings['ansible_ssh_user'] != 'root':
+ base_inventory.write('ansible_become=true\n')
+ if multiple_masters:
+ base_inventory.write('openshift_master_cluster_method=native\n')
+ base_inventory.write("openshift_master_cluster_hostname={}\n".format(proxy.hostname))
+ base_inventory.write("openshift_master_cluster_public_hostname={}\n".format(proxy.public_hostname))
+
def write_host(host, inventory, scheduleable=True):
global CFG
@@ -118,6 +168,7 @@ def default_facts(hosts, verbose=False):
facts_env = os.environ.copy()
facts_env["OO_INSTALL_CALLBACK_FACTS_YAML"] = CFG.settings['ansible_callback_facts_yaml']
facts_env["ANSIBLE_CALLBACK_PLUGINS"] = CFG.settings['ansible_plugins_directory']
+ facts_env["OPENSHIFT_MASTER_CLUSTER_METHOD"] = 'native'
if 'ansible_log_path' in CFG.settings:
facts_env["ANSIBLE_LOG_PATH"] = CFG.settings['ansible_log_path']
if 'ansible_config' in CFG.settings:
@@ -130,10 +181,10 @@ def run_main_playbook(hosts, hosts_to_run_on, verbose=False):
inventory_file = generate_inventory(hosts_to_run_on)
if len(hosts_to_run_on) != len(hosts):
main_playbook_path = os.path.join(CFG.ansible_playbook_directory,
- 'playbooks/common/openshift-cluster/scaleup.yml')
+ 'playbooks/byo/openshift-cluster/scaleup.yml')
else:
main_playbook_path = os.path.join(CFG.ansible_playbook_directory,
- 'playbooks/byo/config.yml')
+ 'playbooks/byo/openshift-cluster/config.yml')
facts_env = os.environ.copy()
if 'ansible_log_path' in CFG.settings:
facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path']
@@ -176,4 +227,3 @@ def run_upgrade_playbook(verbose=False):
if 'ansible_config' in CFG.settings:
facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config']
return run_ansible(playbook, inventory_file, facts_env, verbose)
-
diff --git a/utils/test/cli_installer_tests.py b/utils/test/cli_installer_tests.py
index 40a2f844d..c951b6580 100644
--- a/utils/test/cli_installer_tests.py
+++ b/utils/test/cli_installer_tests.py
@@ -41,6 +41,41 @@ MOCK_FACTS = {
},
}
+MOCK_FACTS_QUICKHA = {
+ '10.0.0.1': {
+ 'common': {
+ 'ip': '10.0.0.1',
+ 'public_ip': '10.0.0.1',
+ 'hostname': 'master-private.example.com',
+ 'public_hostname': 'master.example.com'
+ }
+ },
+ '10.0.0.2': {
+ 'common': {
+ 'ip': '10.0.0.2',
+ 'public_ip': '10.0.0.2',
+ 'hostname': 'node1-private.example.com',
+ 'public_hostname': 'node1.example.com'
+ }
+ },
+ '10.0.0.3': {
+ 'common': {
+ 'ip': '10.0.0.3',
+ 'public_ip': '10.0.0.3',
+ 'hostname': 'node2-private.example.com',
+ 'public_hostname': 'node2.example.com'
+ }
+ },
+ '10.0.0.4': {
+ 'common': {
+ 'ip': '10.0.0.4',
+ 'public_ip': '10.0.0.4',
+ 'hostname': 'proxy-private.example.com',
+ 'public_hostname': 'proxy.example.com'
+ }
+ },
+}
+
# Substitute in a product name before use:
SAMPLE_CONFIG = """
variant: %s
@@ -91,6 +126,38 @@ hosts:
node: true
"""
+QUICKHA_CONFIG = """
+variant: %s
+ansible_ssh_user: root
+hosts:
+ - connect_to: 10.0.0.1
+ ip: 10.0.0.1
+ hostname: master-private.example.com
+ public_ip: 24.222.0.1
+ public_hostname: master.example.com
+ master: true
+ node: true
+ - connect_to: 10.0.0.2
+ ip: 10.0.0.2
+ hostname: node1-private.example.com
+ public_ip: 24.222.0.2
+ public_hostname: node1.example.com
+ master: true
+ node: true
+ - connect_to: 10.0.0.3
+ ip: 10.0.0.3
+ hostname: node2-private.example.com
+ public_ip: 24.222.0.3
+ public_hostname: node2.example.com
+ node: true
+ - connect_to: 10.0.0.4
+ ip: 10.0.0.4
+ hostname: proxy-private.example.com
+ public_ip: 24.222.0.4
+ public_hostname: proxy.example.com
+ master_lb: true
+"""
+
class OOCliFixture(OOInstallFixture):
def setUp(self):
@@ -145,11 +212,12 @@ class OOCliFixture(OOInstallFixture):
print written_config['hosts']
self.assertEquals(host_count, len(written_config['hosts']))
for h in written_config['hosts']:
- self.assertTrue(h['node'])
- self.assertTrue('ip' in h)
self.assertTrue('hostname' in h)
- self.assertTrue('public_ip' in h)
self.assertTrue('public_hostname' in h)
+ if 'preconfigured' not in h:
+ self.assertTrue(h['node'])
+ self.assertTrue('ip' in h)
+ self.assertTrue('public_ip' in h)
#pylint: disable=too-many-arguments
def _verify_get_hosts_to_run_on(self, mock_facts, load_facts_mock,
@@ -504,6 +572,40 @@ class UnattendedCliTests(OOCliFixture):
assert result.exit_code == 1
assert result.output == "You must specify either and 'ip' or 'hostname' to connect to.\n"
+ #unattended with two masters, one node, and haproxy
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_quick_ha_full_run(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
+ run_playbook_mock.return_value = 0
+
+ config_file = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), QUICKHA_CONFIG % 'openshift-enterprise')
+
+ self.cli_args.extend(["-c", config_file, "install"])
+ result = self.runner.invoke(cli.cli, self.cli_args)
+ self.assert_result(result, 0)
+
+ load_facts_args = load_facts_mock.call_args[0]
+ self.assertEquals(os.path.join(self.work_dir, ".ansible/hosts"),
+ load_facts_args[0])
+ self.assertEquals(os.path.join(self.work_dir,
+ "playbooks/byo/openshift_facts.yml"), load_facts_args[1])
+ env_vars = load_facts_args[2]
+ self.assertEquals(os.path.join(self.work_dir,
+ '.ansible/callback_facts.yaml'),
+ env_vars['OO_INSTALL_CALLBACK_FACTS_YAML'])
+ self.assertEqual('/tmp/ansible.log', env_vars['ANSIBLE_LOG_PATH'])
+ # If user running test has rpm installed, this might be set to default:
+ self.assertTrue('ANSIBLE_CONFIG' not in env_vars or
+ env_vars['ANSIBLE_CONFIG'] == cli.DEFAULT_ANSIBLE_CONFIG)
+
+ # Make sure we ran on the expected masters and nodes:
+ hosts = run_playbook_mock.call_args[0][0]
+ hosts_to_run_on = run_playbook_mock.call_args[0][1]
+ self.assertEquals(4, len(hosts))
+ self.assertEquals(4, len(hosts_to_run_on))
+
class AttendedCliTests(OOCliFixture):
def setUp(self):
@@ -512,9 +614,10 @@ class AttendedCliTests(OOCliFixture):
self.config_file = os.path.join(self.work_dir, 'config.yml')
self.cli_args.extend(["-c", self.config_file])
- #pylint: disable=too-many-arguments
+ #pylint: disable=too-many-arguments,too-many-branches
def _build_input(self, ssh_user=None, hosts=None, variant_num=None,
- add_nodes=None, confirm_facts=None):
+ add_nodes=None, confirm_facts=None, scheduleable_masters_ok=None,
+ master_lb=None):
"""
Builds a CLI input string with newline characters to simulate
the full run.
@@ -527,28 +630,40 @@ class AttendedCliTests(OOCliFixture):
if ssh_user:
inputs.append(ssh_user)
+ if variant_num:
+ inputs.append(str(variant_num)) # Choose variant + version
+
+ num_masters = 0
if hosts:
i = 0
+ min_masters_for_ha = 3
for (host, is_master) in hosts:
inputs.append(host)
- inputs.append('y' if is_master else 'n')
+ if is_master:
+ inputs.append('y')
+ num_masters += 1
+ else:
+ inputs.append('n')
#inputs.append('rpm')
if i < len(hosts) - 1:
- inputs.append('y') # Add more hosts
+ if num_masters <= 1 or num_masters >= min_masters_for_ha:
+ inputs.append('y') # Add more hosts
else:
inputs.append('n') # Done adding hosts
i += 1
- if variant_num:
- inputs.append(str(variant_num)) # Choose variant + version
+ if master_lb:
+ inputs.append(master_lb[0])
+ inputs.append('y' if master_lb[1] else 'n')
# TODO: support option 2, fresh install
if add_nodes:
+ if scheduleable_masters_ok:
+ inputs.append('y')
inputs.append('1') # Add more nodes
i = 0
for (host, is_master) in add_nodes:
inputs.append(host)
- inputs.append('y' if is_master else 'n')
#inputs.append('rpm')
if i < len(add_nodes) - 1:
inputs.append('y') # Add more hosts
@@ -556,6 +671,13 @@ class AttendedCliTests(OOCliFixture):
inputs.append('n') # Done adding hosts
i += 1
+ if add_nodes is None:
+ total_hosts = hosts
+ else:
+ total_hosts = hosts + add_nodes
+ if total_hosts is not None and num_masters == len(total_hosts):
+ inputs.append('y')
+
inputs.extend([
confirm_facts,
'y', # lets do this
@@ -587,6 +709,15 @@ class AttendedCliTests(OOCliFixture):
written_config = self._read_yaml(self.config_file)
self._verify_config_hosts(written_config, 3)
+ inventory = ConfigParser.ConfigParser(allow_no_value=True)
+ inventory.read(os.path.join(self.work_dir, '.ansible/hosts'))
+ self.assertEquals('False',
+ inventory.get('nodes', '10.0.0.1 openshift_scheduleable'))
+ self.assertEquals(None,
+ inventory.get('nodes', '10.0.0.2'))
+ self.assertEquals(None,
+ inventory.get('nodes', '10.0.0.3'))
+
# interactive with config file and some installed some uninstalled hosts
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
@@ -613,6 +744,7 @@ class AttendedCliTests(OOCliFixture):
result = self.runner.invoke(cli.cli,
self.cli_args,
input=cli_input)
+ print result
self.assert_result(result, 0)
self._verify_load_facts(load_facts_mock)
@@ -658,6 +790,7 @@ class AttendedCliTests(OOCliFixture):
add_nodes=[('10.0.0.2', False)],
ssh_user='root',
variant_num=1,
+ scheduleable_masters_ok=True,
confirm_facts='y')
self._verify_get_hosts_to_run_on(mock_facts, load_facts_mock,
@@ -667,6 +800,113 @@ class AttendedCliTests(OOCliFixture):
exp_hosts_to_run_on_len=2,
force=False)
+ #interactive multimaster: one more node than master
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_quick_ha1(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
+ run_playbook_mock.return_value = 0
+
+ cli_input = self._build_input(hosts=[
+ ('10.0.0.1', True),
+ ('10.0.0.2', True),
+ ('10.0.0.3', False),
+ ('10.0.0.4', True)],
+ ssh_user='root',
+ variant_num=1,
+ confirm_facts='y',
+ master_lb=('10.0.0.5', False))
+ self.cli_args.append("install")
+ result = self.runner.invoke(cli.cli, self.cli_args,
+ input=cli_input)
+ self.assert_result(result, 0)
+
+ self._verify_load_facts(load_facts_mock)
+ self._verify_run_playbook(run_playbook_mock, 5, 5)
+
+ written_config = self._read_yaml(self.config_file)
+ self._verify_config_hosts(written_config, 5)
+
+ inventory = ConfigParser.ConfigParser(allow_no_value=True)
+ inventory.read(os.path.join(self.work_dir, '.ansible/hosts'))
+ self.assertEquals('False',
+ inventory.get('nodes', '10.0.0.1 openshift_scheduleable'))
+ self.assertEquals('False',
+ inventory.get('nodes', '10.0.0.2 openshift_scheduleable'))
+ self.assertEquals(None,
+ inventory.get('nodes', '10.0.0.3'))
+ self.assertEquals('False',
+ inventory.get('nodes', '10.0.0.4 openshift_scheduleable'))
+
+ return
+
+ #interactive multimaster: equal number masters and nodes
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_quick_ha2(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
+ run_playbook_mock.return_value = 0
+
+ cli_input = self._build_input(hosts=[
+ ('10.0.0.1', True),
+ ('10.0.0.2', True),
+ ('10.0.0.3', True)],
+ ssh_user='root',
+ variant_num=1,
+ confirm_facts='y',
+ master_lb=('10.0.0.5', False))
+ self.cli_args.append("install")
+ result = self.runner.invoke(cli.cli, self.cli_args,
+ input=cli_input)
+ self.assert_result(result, 0)
+
+ self._verify_load_facts(load_facts_mock)
+ self._verify_run_playbook(run_playbook_mock, 4, 4)
+
+ written_config = self._read_yaml(self.config_file)
+ self._verify_config_hosts(written_config, 4)
+
+ inventory = ConfigParser.ConfigParser(allow_no_value=True)
+ inventory.read(os.path.join(self.work_dir, '.ansible/hosts'))
+ self.assertEquals(None,
+ inventory.get('nodes', '10.0.0.1'))
+ self.assertEquals(None,
+ inventory.get('nodes', '10.0.0.2'))
+ self.assertEquals(None,
+ inventory.get('nodes', '10.0.0.3'))
+
+ return
+
+ #interactive all-in-one
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_all_in_one(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS, 0)
+ run_playbook_mock.return_value = 0
+
+ cli_input = self._build_input(hosts=[
+ ('10.0.0.1', True)],
+ ssh_user='root',
+ variant_num=1,
+ confirm_facts='y')
+ self.cli_args.append("install")
+ result = self.runner.invoke(cli.cli, self.cli_args,
+ input=cli_input)
+ self.assert_result(result, 0)
+
+ self._verify_load_facts(load_facts_mock)
+ self._verify_run_playbook(run_playbook_mock, 1, 1)
+
+ written_config = self._read_yaml(self.config_file)
+ self._verify_config_hosts(written_config, 1)
+
+ inventory = ConfigParser.ConfigParser(allow_no_value=True)
+ inventory.read(os.path.join(self.work_dir, '.ansible/hosts'))
+ self.assertEquals(None,
+ inventory.get('nodes', '10.0.0.1'))
+
+ return
+
# TODO: test with config file, attended add node
# TODO: test with config file, attended new node already in config file
# TODO: test with config file, attended new node already in config file, plus manually added nodes