diff options
Diffstat (limited to 'utils')
-rw-r--r-- | utils/src/ooinstall/cli_installer.py | 122 | ||||
-rw-r--r-- | utils/src/ooinstall/oo_config.py | 9 | ||||
-rw-r--r-- | utils/src/ooinstall/openshift_ansible.py | 86 | ||||
-rw-r--r-- | utils/test/cli_installer_tests.py | 260 |
4 files changed, 429 insertions, 48 deletions
diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py index 0b3af8829..d7c06745e 100644 --- a/utils/src/ooinstall/cli_installer.py +++ b/utils/src/ooinstall/cli_installer.py @@ -72,13 +72,14 @@ def delete_hosts(hosts): click.echo("\"{}\" doesn't coorespond to any valid input.".format(del_idx)) return hosts, None -def collect_hosts(master_set=False): +def collect_hosts(version=None, masters_set=False, print_summary=True): """ Collect host information from user. This will later be filled in using ansible. Returns: a list of host information collected from the user """ + min_masters_for_ha = 3 click.clear() click.echo('***Host Configuration***') message = """ @@ -102,17 +103,20 @@ http://docs.openshift.com/enterprise/latest/architecture/infrastructure_componen hosts = [] more_hosts = True + num_masters = 0 while more_hosts: host_props = {} - hostname_or_ip = click.prompt('Enter hostname or IP address:', - default='', - value_proc=validate_prompt_hostname) - - host_props['connect_to'] = hostname_or_ip - if not master_set: - is_master = click.confirm('Will this host be an OpenShift Master?') - host_props['master'] = is_master - master_set = is_master + host_props['connect_to'] = click.prompt('Enter hostname or IP address:', + default='', + value_proc=validate_prompt_hostname) + + if not masters_set: + if click.confirm('Will this host be an OpenShift Master?'): + host_props['master'] = True + num_masters += 1 + + if num_masters >= min_masters_for_ha or version == '3.0': + masters_set = True host_props['node'] = True #TODO: Reenable this option once container installs are out of tech preview @@ -129,9 +133,51 @@ http://docs.openshift.com/enterprise/latest/architecture/infrastructure_componen hosts.append(host) - more_hosts = click.confirm('Do you want to add additional hosts?') + if print_summary: + click.echo('') + click.echo('Current Masters: {}'.format(num_masters)) + click.echo('Current Nodes: {}'.format(len(hosts))) + click.echo('Additional Masters required for HA: {}'.format(max(min_masters_for_ha - num_masters, 0))) + click.echo('') + + if num_masters <= 1 or num_masters >= min_masters_for_ha: + more_hosts = click.confirm('Do you want to add additional hosts?') + + if num_masters > 1: + hosts.append(collect_master_lb()) + return hosts +def collect_master_lb(): + """ + Get an HA proxy from the user + """ + message = """ +Setting up High Availability Masters requires a load balancing solution. +Please provide a host that will be configured as a proxy. This can either be +an existing load balancer configured to balance all masters on port 8443 or a +new host that will have HAProxy installed on it. + +If the host provided does is not yet configured a reference haproxy load +balancer will be installed. It's important to note that while the rest of the +environment will be fault tolerant this reference load balancer will not be. +It can be replaced post-installation with a load balancer with the same +hostname. +""" + click.echo(message) + host_props = {} + host_props['connect_to'] = click.prompt('Enter hostname or IP address:', + default='', + value_proc=validate_prompt_hostname) + install_haproxy = click.confirm('Should the reference haproxy load balancer be installed on this host?') + host_props['preconfigured'] = not install_haproxy + host_props['master'] = False + host_props['node'] = False + host_props['master_lb'] = True + master_lb = Host(**host_props) + + return master_lb + def confirm_hosts_facts(oo_cfg, callback_facts): hosts = oo_cfg.hosts click.clear() @@ -169,6 +215,8 @@ Notes: default_facts_lines = [] default_facts = {} for h in hosts: + if h.preconfigured == True: + continue default_facts[h.connect_to] = {} h.ip = callback_facts[h.connect_to]["common"]["ip"] h.public_ip = callback_facts[h.connect_to]["common"]["public_ip"] @@ -199,7 +247,41 @@ Edit %s with the desired values and run `atomic-openshift-installer --unattended sys.exit(0) return default_facts -def get_variant_and_version(): + + +def check_hosts_config(oo_cfg): + click.clear() + masters = [host for host in oo_cfg.hosts if host.master] + if len(masters) > 1: + master_lb = [host for host in oo_cfg.hosts if host.master_lb] + if len(master_lb) > 1: + click.echo('More than one Master load balancer specified. Only one is allowed.') + sys.exit(0) + elif len(master_lb) == 1: + if master_lb[0].master or master_lb[0].node: + click.echo('The Master load balancer is configured as a master or node. Please correct this.') + sys.exit(0) + else: + message = """ +No HAProxy given in config. Either specify one or provide a load balancing solution +of your choice to balance the master API (port 8443) on all master hosts. + +https://docs.openshift.org/latest/install_config/install/advanced_install.html#multiple-masters +""" + confirm_continue(message) + + nodes = [host for host in oo_cfg.hosts if host.node] + if len(masters) == len(nodes): + message = """ +No dedicated Nodes specified. By default, colocated Masters have their Nodes +set to unscheduleable. Continuing at this point will label all nodes as +scheduleable. +""" + confirm_continue(message) + + return + +def get_variant_and_version(multi_master=False): message = "\nWhich variant would you like to install?\n\n" i = 1 @@ -211,6 +293,8 @@ def get_variant_and_version(): message = "%s\n" % message click.echo(message) + if multi_master: + click.echo('NOTE: 3.0 installations are not') response = click.prompt("Choose a variant from above: ", default=1) product, version = combos[response - 1] @@ -292,16 +376,16 @@ https://docs.openshift.com/enterprise/latest/admin_guide/install/prerequisites.h oo_cfg.settings['ansible_ssh_user'] = get_ansible_ssh_user() click.clear() - if not oo_cfg.hosts: - oo_cfg.hosts = collect_hosts() - click.clear() - if oo_cfg.settings.get('variant', '') == '': variant, version = get_variant_and_version() oo_cfg.settings['variant'] = variant.name oo_cfg.settings['variant_version'] = version.name click.clear() + if not oo_cfg.hosts: + oo_cfg.hosts = collect_hosts(version=oo_cfg.settings['variant_version']) + click.clear() + return oo_cfg @@ -312,7 +396,7 @@ def collect_new_nodes(): Add new nodes here """ click.echo(message) - return collect_hosts(True) + return collect_hosts(masters_set=True, print_summary=False) def get_installed_hosts(hosts, callback_facts): installed_hosts = [] @@ -487,7 +571,7 @@ def uninstall(ctx): verbose = ctx.obj['verbose'] if len(oo_cfg.hosts) == 0: - click.echo("No hosts defined in: %s" % oo_cfg['configuration']) + click.echo("No hosts defined in: %s" % oo_cfg.config_path) sys.exit(1) click.echo("OpenShift will be uninstalled from the following hosts:\n") @@ -555,6 +639,8 @@ def install(ctx, force): else: oo_cfg = get_missing_info_from_user(oo_cfg) + check_hosts_config(oo_cfg) + click.echo('Gathering information from hosts...') callback_facts, error = openshift_ansible.default_facts(oo_cfg.hosts, verbose) diff --git a/utils/src/ooinstall/oo_config.py b/utils/src/ooinstall/oo_config.py index 9c97e6e93..b6f0cdce3 100644 --- a/utils/src/ooinstall/oo_config.py +++ b/utils/src/ooinstall/oo_config.py @@ -36,19 +36,24 @@ class Host(object): self.public_ip = kwargs.get('public_ip', None) self.public_hostname = kwargs.get('public_hostname', None) self.connect_to = kwargs.get('connect_to', None) + self.preconfigured = kwargs.get('preconfigured', None) # Should this host run as an OpenShift master: self.master = kwargs.get('master', False) # Should this host run as an OpenShift node: self.node = kwargs.get('node', False) + + # Should this host run as an HAProxy: + self.master_lb = kwargs.get('master_lb', False) + self.containerized = kwargs.get('containerized', False) if self.connect_to is None: raise OOConfigInvalidHostError("You must specify either and 'ip' " \ "or 'hostname' to connect to.") - if self.master is False and self.node is False: + if self.master is False and self.node is False and self.master_lb is False: raise OOConfigInvalidHostError( "You must specify each host as either a master or a node.") @@ -62,7 +67,7 @@ class Host(object): """ Used when exporting to yaml. """ d = {} for prop in ['ip', 'hostname', 'public_ip', 'public_hostname', - 'master', 'node', 'containerized', 'connect_to']: + 'master', 'node', 'master_lb', 'containerized', 'connect_to', 'preconfigured']: # If the property is defined (not None or False), export it: if getattr(self, prop): d[prop] = getattr(self, prop) diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py index 372f27bda..9afc9a644 100644 --- a/utils/src/ooinstall/openshift_ansible.py +++ b/utils/src/ooinstall/openshift_ansible.py @@ -17,14 +17,17 @@ def set_config(cfg): def generate_inventory(hosts): global CFG + masters = [host for host in hosts if host.master] + nodes = [host for host in hosts if host.node] + proxy = determine_proxy_configuration(hosts) + multiple_masters = len(masters) > 1 base_inventory_path = CFG.settings['ansible_inventory_path'] base_inventory = open(base_inventory_path, 'w') - base_inventory.write('\n[OSEv3:children]\nmasters\nnodes\n') - base_inventory.write('\n[OSEv3:vars]\n') - base_inventory.write('ansible_ssh_user={}\n'.format(CFG.settings['ansible_ssh_user'])) - if CFG.settings['ansible_ssh_user'] != 'root': - base_inventory.write('ansible_become=true\n') + + write_inventory_children(base_inventory, multiple_masters, proxy) + + write_inventory_vars(base_inventory, multiple_masters, proxy) # Find the correct deployment type for ansible: ver = find_variant(CFG.settings['variant'], @@ -45,22 +48,69 @@ def generate_inventory(hosts): "'enabled': 1, 'gpgcheck': 0}}]\n".format(os.environ['OO_INSTALL_PUDDLE_REPO'])) base_inventory.write('\n[masters]\n') - masters = (host for host in hosts if host.master) for master in masters: write_host(master, base_inventory) + + if len(masters) > 1: + base_inventory.write('\n[etcd]\n') + for master in masters: + write_host(master, base_inventory) + base_inventory.write('\n[nodes]\n') - nodes = (host for host in hosts if host.node) - for node in nodes: - # TODO: Until the Master can run the SDN itself we have to configure the Masters - # as Nodes too. - scheduleable = True - # If there's only one Node and it's also a Master we want it to be scheduleable: - if node in masters and len(masters) != 1: - scheduleable = False - write_host(node, base_inventory, scheduleable) + + # TODO: It would be much better to calculate the scheduleability elsewhere + # and store it on the Node object. + if set(nodes) == set(masters): + for node in nodes: + write_host(node, base_inventory) + else: + for node in nodes: + # TODO: Until the Master can run the SDN itself we have to configure the Masters + # as Nodes too. + scheduleable = True + if node in masters: + scheduleable = False + write_host(node, base_inventory, scheduleable) + + if not getattr(proxy, 'preconfigured', True): + base_inventory.write('\n[lb]\n') + write_host(proxy, base_inventory) + base_inventory.close() return base_inventory_path +def determine_proxy_configuration(hosts): + proxy = next((host for host in hosts if host.master_lb), None) + if proxy: + if proxy.hostname == None: + proxy.hostname = proxy.connect_to + proxy.public_hostname = proxy.connect_to + return proxy + + return None + +def write_inventory_children(base_inventory, multiple_masters, proxy): + global CFG + + base_inventory.write('\n[OSEv3:children]\n') + base_inventory.write('masters\n') + base_inventory.write('nodes\n') + if multiple_masters: + base_inventory.write('etcd\n') + if not getattr(proxy, 'preconfigured', True): + base_inventory.write('lb\n') + +def write_inventory_vars(base_inventory, multiple_masters, proxy): + global CFG + base_inventory.write('\n[OSEv3:vars]\n') + base_inventory.write('ansible_ssh_user={}\n'.format(CFG.settings['ansible_ssh_user'])) + if CFG.settings['ansible_ssh_user'] != 'root': + base_inventory.write('ansible_become=true\n') + if multiple_masters: + base_inventory.write('openshift_master_cluster_method=native\n') + base_inventory.write("openshift_master_cluster_hostname={}\n".format(proxy.hostname)) + base_inventory.write("openshift_master_cluster_public_hostname={}\n".format(proxy.public_hostname)) + def write_host(host, inventory, scheduleable=True): global CFG @@ -118,6 +168,7 @@ def default_facts(hosts, verbose=False): facts_env = os.environ.copy() facts_env["OO_INSTALL_CALLBACK_FACTS_YAML"] = CFG.settings['ansible_callback_facts_yaml'] facts_env["ANSIBLE_CALLBACK_PLUGINS"] = CFG.settings['ansible_plugins_directory'] + facts_env["OPENSHIFT_MASTER_CLUSTER_METHOD"] = 'native' if 'ansible_log_path' in CFG.settings: facts_env["ANSIBLE_LOG_PATH"] = CFG.settings['ansible_log_path'] if 'ansible_config' in CFG.settings: @@ -130,10 +181,10 @@ def run_main_playbook(hosts, hosts_to_run_on, verbose=False): inventory_file = generate_inventory(hosts_to_run_on) if len(hosts_to_run_on) != len(hosts): main_playbook_path = os.path.join(CFG.ansible_playbook_directory, - 'playbooks/common/openshift-cluster/scaleup.yml') + 'playbooks/byo/openshift-cluster/scaleup.yml') else: main_playbook_path = os.path.join(CFG.ansible_playbook_directory, - 'playbooks/byo/config.yml') + 'playbooks/byo/openshift-cluster/config.yml') facts_env = os.environ.copy() if 'ansible_log_path' in CFG.settings: facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path'] @@ -176,4 +227,3 @@ def run_upgrade_playbook(verbose=False): if 'ansible_config' in CFG.settings: facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config'] return run_ansible(playbook, inventory_file, facts_env, verbose) - diff --git a/utils/test/cli_installer_tests.py b/utils/test/cli_installer_tests.py index 40a2f844d..c951b6580 100644 --- a/utils/test/cli_installer_tests.py +++ b/utils/test/cli_installer_tests.py @@ -41,6 +41,41 @@ MOCK_FACTS = { }, } +MOCK_FACTS_QUICKHA = { + '10.0.0.1': { + 'common': { + 'ip': '10.0.0.1', + 'public_ip': '10.0.0.1', + 'hostname': 'master-private.example.com', + 'public_hostname': 'master.example.com' + } + }, + '10.0.0.2': { + 'common': { + 'ip': '10.0.0.2', + 'public_ip': '10.0.0.2', + 'hostname': 'node1-private.example.com', + 'public_hostname': 'node1.example.com' + } + }, + '10.0.0.3': { + 'common': { + 'ip': '10.0.0.3', + 'public_ip': '10.0.0.3', + 'hostname': 'node2-private.example.com', + 'public_hostname': 'node2.example.com' + } + }, + '10.0.0.4': { + 'common': { + 'ip': '10.0.0.4', + 'public_ip': '10.0.0.4', + 'hostname': 'proxy-private.example.com', + 'public_hostname': 'proxy.example.com' + } + }, +} + # Substitute in a product name before use: SAMPLE_CONFIG = """ variant: %s @@ -91,6 +126,38 @@ hosts: node: true """ +QUICKHA_CONFIG = """ +variant: %s +ansible_ssh_user: root +hosts: + - connect_to: 10.0.0.1 + ip: 10.0.0.1 + hostname: master-private.example.com + public_ip: 24.222.0.1 + public_hostname: master.example.com + master: true + node: true + - connect_to: 10.0.0.2 + ip: 10.0.0.2 + hostname: node1-private.example.com + public_ip: 24.222.0.2 + public_hostname: node1.example.com + master: true + node: true + - connect_to: 10.0.0.3 + ip: 10.0.0.3 + hostname: node2-private.example.com + public_ip: 24.222.0.3 + public_hostname: node2.example.com + node: true + - connect_to: 10.0.0.4 + ip: 10.0.0.4 + hostname: proxy-private.example.com + public_ip: 24.222.0.4 + public_hostname: proxy.example.com + master_lb: true +""" + class OOCliFixture(OOInstallFixture): def setUp(self): @@ -145,11 +212,12 @@ class OOCliFixture(OOInstallFixture): print written_config['hosts'] self.assertEquals(host_count, len(written_config['hosts'])) for h in written_config['hosts']: - self.assertTrue(h['node']) - self.assertTrue('ip' in h) self.assertTrue('hostname' in h) - self.assertTrue('public_ip' in h) self.assertTrue('public_hostname' in h) + if 'preconfigured' not in h: + self.assertTrue(h['node']) + self.assertTrue('ip' in h) + self.assertTrue('public_ip' in h) #pylint: disable=too-many-arguments def _verify_get_hosts_to_run_on(self, mock_facts, load_facts_mock, @@ -504,6 +572,40 @@ class UnattendedCliTests(OOCliFixture): assert result.exit_code == 1 assert result.output == "You must specify either and 'ip' or 'hostname' to connect to.\n" + #unattended with two masters, one node, and haproxy + @patch('ooinstall.openshift_ansible.run_main_playbook') + @patch('ooinstall.openshift_ansible.load_system_facts') + def test_quick_ha_full_run(self, load_facts_mock, run_playbook_mock): + load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0) + run_playbook_mock.return_value = 0 + + config_file = self.write_config(os.path.join(self.work_dir, + 'ooinstall.conf'), QUICKHA_CONFIG % 'openshift-enterprise') + + self.cli_args.extend(["-c", config_file, "install"]) + result = self.runner.invoke(cli.cli, self.cli_args) + self.assert_result(result, 0) + + load_facts_args = load_facts_mock.call_args[0] + self.assertEquals(os.path.join(self.work_dir, ".ansible/hosts"), + load_facts_args[0]) + self.assertEquals(os.path.join(self.work_dir, + "playbooks/byo/openshift_facts.yml"), load_facts_args[1]) + env_vars = load_facts_args[2] + self.assertEquals(os.path.join(self.work_dir, + '.ansible/callback_facts.yaml'), + env_vars['OO_INSTALL_CALLBACK_FACTS_YAML']) + self.assertEqual('/tmp/ansible.log', env_vars['ANSIBLE_LOG_PATH']) + # If user running test has rpm installed, this might be set to default: + self.assertTrue('ANSIBLE_CONFIG' not in env_vars or + env_vars['ANSIBLE_CONFIG'] == cli.DEFAULT_ANSIBLE_CONFIG) + + # Make sure we ran on the expected masters and nodes: + hosts = run_playbook_mock.call_args[0][0] + hosts_to_run_on = run_playbook_mock.call_args[0][1] + self.assertEquals(4, len(hosts)) + self.assertEquals(4, len(hosts_to_run_on)) + class AttendedCliTests(OOCliFixture): def setUp(self): @@ -512,9 +614,10 @@ class AttendedCliTests(OOCliFixture): self.config_file = os.path.join(self.work_dir, 'config.yml') self.cli_args.extend(["-c", self.config_file]) - #pylint: disable=too-many-arguments + #pylint: disable=too-many-arguments,too-many-branches def _build_input(self, ssh_user=None, hosts=None, variant_num=None, - add_nodes=None, confirm_facts=None): + add_nodes=None, confirm_facts=None, scheduleable_masters_ok=None, + master_lb=None): """ Builds a CLI input string with newline characters to simulate the full run. @@ -527,28 +630,40 @@ class AttendedCliTests(OOCliFixture): if ssh_user: inputs.append(ssh_user) + if variant_num: + inputs.append(str(variant_num)) # Choose variant + version + + num_masters = 0 if hosts: i = 0 + min_masters_for_ha = 3 for (host, is_master) in hosts: inputs.append(host) - inputs.append('y' if is_master else 'n') + if is_master: + inputs.append('y') + num_masters += 1 + else: + inputs.append('n') #inputs.append('rpm') if i < len(hosts) - 1: - inputs.append('y') # Add more hosts + if num_masters <= 1 or num_masters >= min_masters_for_ha: + inputs.append('y') # Add more hosts else: inputs.append('n') # Done adding hosts i += 1 - if variant_num: - inputs.append(str(variant_num)) # Choose variant + version + if master_lb: + inputs.append(master_lb[0]) + inputs.append('y' if master_lb[1] else 'n') # TODO: support option 2, fresh install if add_nodes: + if scheduleable_masters_ok: + inputs.append('y') inputs.append('1') # Add more nodes i = 0 for (host, is_master) in add_nodes: inputs.append(host) - inputs.append('y' if is_master else 'n') #inputs.append('rpm') if i < len(add_nodes) - 1: inputs.append('y') # Add more hosts @@ -556,6 +671,13 @@ class AttendedCliTests(OOCliFixture): inputs.append('n') # Done adding hosts i += 1 + if add_nodes is None: + total_hosts = hosts + else: + total_hosts = hosts + add_nodes + if total_hosts is not None and num_masters == len(total_hosts): + inputs.append('y') + inputs.extend([ confirm_facts, 'y', # lets do this @@ -587,6 +709,15 @@ class AttendedCliTests(OOCliFixture): written_config = self._read_yaml(self.config_file) self._verify_config_hosts(written_config, 3) + inventory = ConfigParser.ConfigParser(allow_no_value=True) + inventory.read(os.path.join(self.work_dir, '.ansible/hosts')) + self.assertEquals('False', + inventory.get('nodes', '10.0.0.1 openshift_scheduleable')) + self.assertEquals(None, + inventory.get('nodes', '10.0.0.2')) + self.assertEquals(None, + inventory.get('nodes', '10.0.0.3')) + # interactive with config file and some installed some uninstalled hosts @patch('ooinstall.openshift_ansible.run_main_playbook') @patch('ooinstall.openshift_ansible.load_system_facts') @@ -613,6 +744,7 @@ class AttendedCliTests(OOCliFixture): result = self.runner.invoke(cli.cli, self.cli_args, input=cli_input) + print result self.assert_result(result, 0) self._verify_load_facts(load_facts_mock) @@ -658,6 +790,7 @@ class AttendedCliTests(OOCliFixture): add_nodes=[('10.0.0.2', False)], ssh_user='root', variant_num=1, + scheduleable_masters_ok=True, confirm_facts='y') self._verify_get_hosts_to_run_on(mock_facts, load_facts_mock, @@ -667,6 +800,113 @@ class AttendedCliTests(OOCliFixture): exp_hosts_to_run_on_len=2, force=False) + #interactive multimaster: one more node than master + @patch('ooinstall.openshift_ansible.run_main_playbook') + @patch('ooinstall.openshift_ansible.load_system_facts') + def test_quick_ha1(self, load_facts_mock, run_playbook_mock): + load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0) + run_playbook_mock.return_value = 0 + + cli_input = self._build_input(hosts=[ + ('10.0.0.1', True), + ('10.0.0.2', True), + ('10.0.0.3', False), + ('10.0.0.4', True)], + ssh_user='root', + variant_num=1, + confirm_facts='y', + master_lb=('10.0.0.5', False)) + self.cli_args.append("install") + result = self.runner.invoke(cli.cli, self.cli_args, + input=cli_input) + self.assert_result(result, 0) + + self._verify_load_facts(load_facts_mock) + self._verify_run_playbook(run_playbook_mock, 5, 5) + + written_config = self._read_yaml(self.config_file) + self._verify_config_hosts(written_config, 5) + + inventory = ConfigParser.ConfigParser(allow_no_value=True) + inventory.read(os.path.join(self.work_dir, '.ansible/hosts')) + self.assertEquals('False', + inventory.get('nodes', '10.0.0.1 openshift_scheduleable')) + self.assertEquals('False', + inventory.get('nodes', '10.0.0.2 openshift_scheduleable')) + self.assertEquals(None, + inventory.get('nodes', '10.0.0.3')) + self.assertEquals('False', + inventory.get('nodes', '10.0.0.4 openshift_scheduleable')) + + return + + #interactive multimaster: equal number masters and nodes + @patch('ooinstall.openshift_ansible.run_main_playbook') + @patch('ooinstall.openshift_ansible.load_system_facts') + def test_quick_ha2(self, load_facts_mock, run_playbook_mock): + load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0) + run_playbook_mock.return_value = 0 + + cli_input = self._build_input(hosts=[ + ('10.0.0.1', True), + ('10.0.0.2', True), + ('10.0.0.3', True)], + ssh_user='root', + variant_num=1, + confirm_facts='y', + master_lb=('10.0.0.5', False)) + self.cli_args.append("install") + result = self.runner.invoke(cli.cli, self.cli_args, + input=cli_input) + self.assert_result(result, 0) + + self._verify_load_facts(load_facts_mock) + self._verify_run_playbook(run_playbook_mock, 4, 4) + + written_config = self._read_yaml(self.config_file) + self._verify_config_hosts(written_config, 4) + + inventory = ConfigParser.ConfigParser(allow_no_value=True) + inventory.read(os.path.join(self.work_dir, '.ansible/hosts')) + self.assertEquals(None, + inventory.get('nodes', '10.0.0.1')) + self.assertEquals(None, + inventory.get('nodes', '10.0.0.2')) + self.assertEquals(None, + inventory.get('nodes', '10.0.0.3')) + + return + + #interactive all-in-one + @patch('ooinstall.openshift_ansible.run_main_playbook') + @patch('ooinstall.openshift_ansible.load_system_facts') + def test_all_in_one(self, load_facts_mock, run_playbook_mock): + load_facts_mock.return_value = (MOCK_FACTS, 0) + run_playbook_mock.return_value = 0 + + cli_input = self._build_input(hosts=[ + ('10.0.0.1', True)], + ssh_user='root', + variant_num=1, + confirm_facts='y') + self.cli_args.append("install") + result = self.runner.invoke(cli.cli, self.cli_args, + input=cli_input) + self.assert_result(result, 0) + + self._verify_load_facts(load_facts_mock) + self._verify_run_playbook(run_playbook_mock, 1, 1) + + written_config = self._read_yaml(self.config_file) + self._verify_config_hosts(written_config, 1) + + inventory = ConfigParser.ConfigParser(allow_no_value=True) + inventory.read(os.path.join(self.work_dir, '.ansible/hosts')) + self.assertEquals(None, + inventory.get('nodes', '10.0.0.1')) + + return + # TODO: test with config file, attended add node # TODO: test with config file, attended new node already in config file # TODO: test with config file, attended new node already in config file, plus manually added nodes |