diff options
-rw-r--r-- | utils/Makefile | 10 | ||||
-rw-r--r-- | utils/src/ooinstall/ansible_plugins/facts_callback.py | 9 | ||||
-rw-r--r-- | utils/src/ooinstall/cli_installer.py | 160 | ||||
-rw-r--r-- | utils/src/ooinstall/oo_config.py | 9 | ||||
-rw-r--r-- | utils/src/ooinstall/openshift_ansible.py | 42 | ||||
-rw-r--r-- | utils/src/ooinstall/variants.py | 24 |
6 files changed, 134 insertions, 120 deletions
diff --git a/utils/Makefile b/utils/Makefile index b1a3874ae..dd0b5cdd0 100644 --- a/utils/Makefile +++ b/utils/Makefile @@ -75,15 +75,7 @@ ci-pep8: @echo "#############################################" @echo "# Running PEP8 Compliance Tests in virtualenv" @echo "#############################################" - @echo "Skipping PEP8 tests until we clean them up" -# . $(NAME)env/bin/activate && pep8 --ignore=E501,E121,E124 src/$(SHORTNAME)/ - -ci-pep8-real: - @echo "#############################################" - @echo "# Running PEP8 Compliance Tests in virtualenv" - @echo "#############################################" . $(NAME)env/bin/activate && pep8 --ignore=E501,E121,E124 src/$(SHORTNAME)/ - -ci: clean virtualenv ci-list-deps ci-pylint ci-pep8 ci-unittests ci-pyflakes +ci: clean virtualenv ci-list-deps ci-pep8 ci-pylint ci-pyflakes ci-unittests : diff --git a/utils/src/ooinstall/ansible_plugins/facts_callback.py b/utils/src/ooinstall/ansible_plugins/facts_callback.py index 2537a099f..e51890a22 100644 --- a/utils/src/ooinstall/ansible_plugins/facts_callback.py +++ b/utils/src/ooinstall/ansible_plugins/facts_callback.py @@ -6,6 +6,7 @@ import os import yaml from ansible.plugins.callback import CallbackBase + # pylint: disable=super-init-not-called class CallbackModule(CallbackBase): @@ -19,9 +20,9 @@ class CallbackModule(CallbackBase): self.hosts_yaml_name = os.environ['OO_INSTALL_CALLBACK_FACTS_YAML'] except KeyError: raise ValueError('The OO_INSTALL_CALLBACK_FACTS_YAML environment ' - 'variable must be set.') + 'variable must be set.') self.hosts_yaml = os.open(self.hosts_yaml_name, os.O_CREAT | - os.O_WRONLY) + os.O_WRONLY) def v2_on_any(self, *args, **kwargs): pass @@ -72,9 +73,9 @@ class CallbackModule(CallbackBase): def v2_playbook_on_task_start(self, name, is_conditional): pass - #pylint: disable=too-many-arguments + # pylint: disable=too-many-arguments def v2_playbook_on_vars_prompt(self, varname, private=True, prompt=None, - encrypt=None, confirm=False, salt_size=None, salt=None, default=None): + encrypt=None, confirm=False, salt_size=None, salt=None, default=None): pass def v2_playbook_on_setup(self): diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py index 230891e7f..67f3659ff 100644 --- a/utils/src/ooinstall/cli_installer.py +++ b/utils/src/ooinstall/cli_installer.py @@ -28,25 +28,26 @@ DEFAULT_ANSIBLE_CONFIG = '/usr/share/atomic-openshift-utils/ansible.cfg' DEFAULT_PLAYBOOK_DIR = '/usr/share/ansible/openshift-ansible/' UPGRADE_MAPPINGS = { - '3.0':{ - 'minor_version' :'3.0', - 'minor_playbook':'v3_0_minor/upgrade.yml', - 'major_version' :'3.1', - 'major_playbook':'v3_0_to_v3_1/upgrade.yml', - }, - '3.1':{ - 'minor_version' :'3.1', - 'minor_playbook':'v3_1_minor/upgrade.yml', - 'major_playbook':'v3_1_to_v3_2/upgrade.yml', - 'major_version' :'3.2', - }, - '3.2':{ - 'minor_version' :'3.2', - 'minor_playbook':'v3_2/upgrade.yml', - 'major_playbook':'v3_2/upgrade.yml', - 'major_version' :'3.3', - } - } + '3.0': { + 'minor_version': '3.0', + 'minor_playbook': 'v3_0_minor/upgrade.yml', + 'major_version': '3.1', + 'major_playbook': 'v3_0_to_v3_1/upgrade.yml', + }, + '3.1': { + 'minor_version': '3.1', + 'minor_playbook': 'v3_1_minor/upgrade.yml', + 'major_playbook': 'v3_1_to_v3_2/upgrade.yml', + 'major_version': '3.2', + }, + '3.2': { + 'minor_version': '3.2', + 'minor_playbook': 'v3_2/upgrade.yml', + 'major_playbook': 'v3_2/upgrade.yml', + 'major_version': '3.3', + } +} + def validate_ansible_dir(path): if not path: @@ -55,6 +56,7 @@ def validate_ansible_dir(path): # if not os.path.exists(path)): # raise click.BadParameter("Path \"{}\" doesn't exist".format(path)) + def is_valid_hostname(hostname): if not hostname or len(hostname) > 255: return False @@ -63,11 +65,13 @@ def is_valid_hostname(hostname): allowed = re.compile(r"(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE) return all(allowed.match(x) for x in hostname.split(".")) + def validate_prompt_hostname(hostname): if hostname == '' or is_valid_hostname(hostname): return hostname raise click.BadParameter('Invalid hostname. Please double-check this value and re-enter it.') + def get_ansible_ssh_user(): click.clear() message = """ @@ -78,6 +82,7 @@ passwordless sudo access. click.echo(message) return click.prompt('User for ssh access', default='root') + def get_master_routingconfig_subdomain(): click.clear() message = """ @@ -86,15 +91,17 @@ You might want to override the default subdomain used for exposed routes. If you click.echo(message) return click.prompt('New default subdomain (ENTER for none)', default='') + def list_hosts(hosts): hosts_idx = range(len(hosts)) for idx in hosts_idx: click.echo(' {}: {}'.format(idx, hosts[idx])) + def delete_hosts(hosts): while True: list_hosts(hosts) - del_idx = click.prompt('Select host to delete, y/Y to confirm, ' \ + del_idx = click.prompt('Select host to delete, y/Y to confirm, ' 'or n/N to add more hosts', default='n') try: del_idx = int(del_idx) @@ -111,6 +118,7 @@ def delete_hosts(hosts): click.echo("\"{}\" doesn't correspond to any valid input.".format(del_idx)) return hosts, None + def collect_hosts(oo_cfg, existing_env=False, masters_set=False, print_summary=True): """ Collect host information from user. This will later be filled in using @@ -186,7 +194,6 @@ http://docs.openshift.com/enterprise/latest/architecture/infrastructure_componen hosts.append(host) - if print_summary: print_installation_summary(hosts, oo_cfg.settings['variant_version']) @@ -329,6 +336,7 @@ hostname. master_lb = Host(**host_props) hosts.append(master_lb) + def collect_storage_host(hosts): """ Get a valid host for storage from the user and append it to the list of @@ -346,8 +354,8 @@ Note: Containerized storage hosts are not currently supported. first_master = next(host for host in hosts if host.is_master()) hostname_or_ip = click.prompt('Enter hostname or IP address', - value_proc=validate_prompt_hostname, - default=first_master.connect_to) + value_proc=validate_prompt_hostname, + default=first_master.connect_to) existing, existing_host = is_host_already_node_or_master(hostname_or_ip, hosts) if existing and existing_host.is_node(): existing_host.roles.append('storage') @@ -358,6 +366,7 @@ Note: Containerized storage hosts are not currently supported. storage = Host(**host_props) hosts.append(storage) + def is_host_already_node_or_master(hostname, hosts): is_existing = False existing_host = None @@ -369,6 +378,7 @@ def is_host_already_node_or_master(hostname, hosts): return is_existing, existing_host + def confirm_hosts_facts(oo_cfg, callback_facts): hosts = oo_cfg.deployment.hosts click.clear() @@ -443,7 +453,6 @@ Edit %s with the desired values and run `atomic-openshift-installer --unattended return default_facts - def check_hosts_config(oo_cfg, unattended): click.clear() masters = [host for host in oo_cfg.deployment.hosts if host.is_master()] @@ -460,7 +469,7 @@ def check_hosts_config(oo_cfg, unattended): sys.exit(1) elif len(master_lb) == 1: if master_lb[0].is_master() or master_lb[0].is_node(): - click.echo('ERROR: The master load balancer is configured as a master or node. ' \ + click.echo('ERROR: The master load balancer is configured as a master or node. ' 'Please correct this.') sys.exit(1) else: @@ -473,8 +482,8 @@ https://docs.openshift.org/latest/install_config/install/advanced_install.html#m click.echo(message) sys.exit(1) - dedicated_nodes = [host for host in oo_cfg.deployment.hosts \ - if host.is_node() and not host.is_master()] + dedicated_nodes = [host for host in oo_cfg.deployment.hosts + if host.is_node() and not host.is_master()] if len(dedicated_nodes) == 0: message = """ WARNING: No dedicated nodes specified. By default, colocated masters have @@ -488,6 +497,7 @@ as schedulable. return + def get_variant_and_version(multi_master=False): message = "\nWhich variant would you like to install?\n\n" @@ -495,7 +505,7 @@ def get_variant_and_version(multi_master=False): combos = get_variant_version_combos() for (variant, version) in combos: message = "%s\n(%s) %s %s" % (message, i, variant.description, - version.name) + version.name) i = i + 1 message = "%s\n" % message @@ -507,12 +517,14 @@ def get_variant_and_version(multi_master=False): return product, version + def confirm_continue(message): if message: click.echo(message) click.confirm("Are you ready to continue?", default=False, abort=True) return + def error_if_missing_info(oo_cfg): missing_info = False if not oo_cfg.deployment.hosts: @@ -552,6 +564,7 @@ def error_if_missing_info(oo_cfg): if missing_info: sys.exit(1) + def get_host_roles_set(oo_cfg): roles_set = set() for host in oo_cfg.deployment.hosts: @@ -560,6 +573,7 @@ def get_host_roles_set(oo_cfg): return roles_set + def get_proxy_hostnames_and_excludes(): message = """ If a proxy is needed to reach HTTP and HTTPS traffic, please enter the name below. @@ -588,6 +602,7 @@ Please provide any additional hosts to be added to NO_PROXY. (ENTER for none) return http_proxy_hostname, https_proxy_hostname, proxy_excludes + def get_missing_info_from_user(oo_cfg): """ Prompts the user for any information missing from the given configuration. """ click.clear() @@ -633,9 +648,8 @@ https://docs.openshift.com/enterprise/latest/admin_guide/install/prerequisites.h oo_cfg.deployment.roles[role] = Role(name=role, variables={}) click.clear() - if not 'master_routingconfig_subdomain' in oo_cfg.deployment.variables: - oo_cfg.deployment.variables['master_routingconfig_subdomain'] = \ - get_master_routingconfig_subdomain() + if 'master_routingconfig_subdomain' not in oo_cfg.deployment.variables: + oo_cfg.deployment.variables['master_routingconfig_subdomain'] = get_master_routingconfig_subdomain() click.clear() if not oo_cfg.settings.get('openshift_http_proxy', None) and \ @@ -657,10 +671,12 @@ def get_role_variable(oo_cfg, role_name, variable_name): except (StopIteration, KeyError): return None + def set_role_variable(oo_cfg, role_name, variable_name, variable_value): target_role = next(role for role in oo_cfg.deployment.roles if role.name is role_name) target_role[variable_name] = variable_value + def collect_new_nodes(oo_cfg): click.clear() click.echo('*** New Node Configuration ***') @@ -671,6 +687,7 @@ Add new nodes here new_nodes, _ = collect_hosts(oo_cfg, existing_env=True, masters_set=True, print_summary=False) return new_nodes + def get_installed_hosts(hosts, callback_facts): installed_hosts = [] uninstalled_hosts = [] @@ -682,13 +699,15 @@ def get_installed_hosts(hosts, callback_facts): uninstalled_hosts.append(host) return installed_hosts, uninstalled_hosts + def is_installed_host(host, callback_facts): version_found = 'common' in callback_facts[host.connect_to].keys() and \ - callback_facts[host.connect_to]['common'].get('version', '') and \ - callback_facts[host.connect_to]['common'].get('version', '') != 'None' + callback_facts[host.connect_to]['common'].get('version', '') and \ + callback_facts[host.connect_to]['common'].get('version', '') != 'None' return version_found + # pylint: disable=too-many-branches # This pylint error will be corrected shortly in separate PR. def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force, verbose): @@ -703,10 +722,10 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force, verbose): # This check has to happen before we start removing hosts later in this method if not force: if not unattended: - click.echo('By default the installer only adds new nodes ' \ + click.echo('By default the installer only adds new nodes ' 'to an installed environment.') - response = click.prompt('Do you want to (1) only add additional nodes or ' \ - '(2) reinstall the existing hosts ' \ + response = click.prompt('Do you want to (1) only add additional nodes or ' + '(2) reinstall the existing hosts ' 'potentially erasing any custom changes?', type=int) # TODO: this should be reworked with error handling. @@ -735,16 +754,16 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force, verbose): for uninstalled_host in uninstalled_hosts: click.echo("{} is currently uninstalled".format(uninstalled_host)) # Fall through - click.echo('\nUninstalled hosts have been detected in your environment. ' \ - 'Please make sure your environment was installed successfully ' \ - 'before adding new nodes. If you want a fresh install, use ' \ + click.echo('\nUninstalled hosts have been detected in your environment. ' + 'Please make sure your environment was installed successfully ' + 'before adding new nodes. If you want a fresh install, use ' '`atomic-openshift-installer install --force`') sys.exit(1) else: if unattended: if not force: - click.echo('Installed environment detected and no additional ' \ - 'nodes specified: aborting. If you want a fresh install, use ' \ + click.echo('Installed environment detected and no additional ' + 'nodes specified: aborting. If you want a fresh install, use ' '`atomic-openshift-installer install --force`') sys.exit(1) else: @@ -758,14 +777,15 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force, verbose): click.echo('Gathering information from hosts...') callback_facts, error = openshift_ansible.default_facts(oo_cfg.deployment.hosts, verbose) if error or callback_facts is None: - click.echo("There was a problem fetching the required information. See " \ + click.echo("There was a problem fetching the required information. See " "{} for details.".format(oo_cfg.settings['ansible_log_path'])) sys.exit(1) else: - pass # proceeding as normal should do a clean install + pass # proceeding as normal should do a clean install return hosts_to_run_on, callback_facts + def set_infra_nodes(hosts): if all(host.is_master() for host in hosts): infra_list = hosts @@ -781,11 +801,11 @@ def set_infra_nodes(hosts): @click.pass_context @click.option('--unattended', '-u', is_flag=True, default=False) @click.option('--configuration', '-c', - type=click.Path(file_okay=True, - dir_okay=False, - writable=True, - readable=True), - default=None) + type=click.Path(file_okay=True, + dir_okay=False, + writable=True, + readable=True), + default=None) @click.option('--ansible-playbook-directory', '-a', type=click.Path(exists=True, @@ -796,25 +816,25 @@ def set_infra_nodes(hosts): default=DEFAULT_PLAYBOOK_DIR, envvar='OO_ANSIBLE_PLAYBOOK_DIRECTORY') @click.option('--ansible-config', - type=click.Path(file_okay=True, - dir_okay=False, - writable=True, - readable=True), - default=None) + type=click.Path(file_okay=True, + dir_okay=False, + writable=True, + readable=True), + default=None) @click.option('--ansible-log-path', - type=click.Path(file_okay=True, - dir_okay=False, - writable=True, - readable=True), - default="/tmp/ansible.log") + type=click.Path(file_okay=True, + dir_okay=False, + writable=True, + readable=True), + default="/tmp/ansible.log") @click.option('-v', '--verbose', - is_flag=True, default=False) + is_flag=True, default=False) @click.option('-d', '--debug', - help="Enable installer debugging (/tmp/installer.log)", - is_flag=True, default=False) + help="Enable installer debugging (/tmp/installer.log)", + is_flag=True, default=False) @click.help_option('--help', '-h') -#pylint: disable=too-many-arguments -#pylint: disable=line-too-long +# pylint: disable=too-many-arguments +# pylint: disable=line-too-long # Main CLI entrypoint, not much we can do about too many arguments. def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_config, ansible_log_path, verbose, debug): """ @@ -859,7 +879,7 @@ def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_conf if ctx.obj['ansible_config']: oo_cfg.settings['ansible_config'] = ctx.obj['ansible_config'] elif 'ansible_config' not in oo_cfg.settings and \ - os.path.exists(DEFAULT_ANSIBLE_CONFIG): + os.path.exists(DEFAULT_ANSIBLE_CONFIG): # If we're installed by RPM this file should exist and we can use it as our default: oo_cfg.settings['ansible_config'] = DEFAULT_ANSIBLE_CONFIG @@ -900,7 +920,7 @@ def uninstall(ctx): @click.option('--latest-minor', '-l', is_flag=True, default=False) @click.option('--next-major', '-n', is_flag=True, default=False) @click.pass_context -#pylint: disable=bad-builtin,too-many-statements +# pylint: disable=bad-builtin,too-many-statements def upgrade(ctx, latest_minor, next_major): oo_cfg = ctx.obj['oo_cfg'] @@ -943,7 +963,7 @@ def upgrade(ctx, latest_minor, next_major): if next_major: if 'major_playbook' not in mapping: - click.echo("No major upgrade supported for %s %s with this version "\ + click.echo("No major upgrade supported for %s %s with this version " "of atomic-openshift-utils." % (variant, old_version)) sys.exit(0) playbook = mapping['major_playbook'] @@ -956,7 +976,7 @@ def upgrade(ctx, latest_minor, next_major): if latest_minor: if 'minor_playbook' not in mapping: - click.echo("No minor upgrade supported for %s %s with this version "\ + click.echo("No minor upgrade supported for %s %s with this version " "of atomic-openshift-utils." % (variant, old_version)) sys.exit(0) playbook = mapping['minor_playbook'] @@ -978,7 +998,7 @@ def upgrade(ctx, latest_minor, next_major): ctx.obj['verbose']) if retcode > 0: click.echo("Errors encountered during upgrade, please check %s." % - oo_cfg.settings['ansible_log_path']) + oo_cfg.settings['ansible_log_path']) else: oo_cfg.save_to_disk() click.echo("Upgrade completed! Rebooting all hosts is recommended.") @@ -1003,10 +1023,10 @@ def install(ctx, force, gen_inventory): print_installation_summary(oo_cfg.deployment.hosts, oo_cfg.settings.get('variant_version', None)) click.echo('Gathering information from hosts...') callback_facts, error = openshift_ansible.default_facts(oo_cfg.deployment.hosts, - verbose) + verbose) if error or callback_facts is None: - click.echo("There was a problem fetching the required information. " \ + click.echo("There was a problem fetching the required information. " "Please see {} for details.".format(oo_cfg.settings['ansible_log_path'])) sys.exit(1) diff --git a/utils/src/ooinstall/oo_config.py b/utils/src/ooinstall/oo_config.py index 0e855f437..b9f0cc65c 100644 --- a/utils/src/ooinstall/oo_config.py +++ b/utils/src/ooinstall/oo_config.py @@ -18,7 +18,7 @@ CONFIG_PERSIST_SETTINGS = [ 'version', 'variant', 'variant_version', - ] +] DEPLOYMENT_VARIABLES_BLACKLIST = [ 'hosts', @@ -28,6 +28,7 @@ DEPLOYMENT_VARIABLES_BLACKLIST = [ DEFAULT_REQUIRED_FACTS = ['ip', 'public_ip', 'hostname', 'public_hostname'] PRECONFIGURED_REQUIRED_FACTS = ['hostname', 'public_hostname'] + def print_read_config_error(error, path='the configuration file'): message = """ Error loading config. {}. @@ -103,7 +104,6 @@ class Host(object): def is_storage(self): return 'storage' in self.roles - def is_etcd_member(self, all_hosts): """ Will this host be a member of a standalone etcd cluster. """ if not self.is_master(): @@ -189,7 +189,7 @@ class OOConfig(object): with open(self.config_path, 'r') as cfgfile: loaded_config = yaml.safe_load(cfgfile.read()) - if not 'version' in loaded_config: + if 'version' not in loaded_config: print_read_config_error('Legacy configuration file found', self.config_path) sys.exit(0) @@ -242,7 +242,6 @@ class OOConfig(object): for name, variables in role_list.iteritems(): self.deployment.roles.update({name: Role(name, variables)}) - except IOError, ferr: raise OOConfigFileError('Cannot open config file "{}": {}'.format(ferr.filename, ferr.strerror)) @@ -251,7 +250,6 @@ class OOConfig(object): 'Config file "{}" is not a valid YAML document'.format(self.config_path)) installer_log.debug("Parsed the config file") - def _upgrade_v1_config(self, config): new_config_data = {} new_config_data['deployment'] = {} @@ -396,7 +394,6 @@ class OOConfig(object): print "Error persisting settings: {}".format(e) sys.exit(0) - return p_settings def yaml(self): diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py index 570b48dda..001c58d73 100644 --- a/utils/src/ooinstall/openshift_ansible.py +++ b/utils/src/ooinstall/openshift_ansible.py @@ -22,16 +22,18 @@ ROLES_TO_GROUPS_MAP = { VARIABLES_MAP = { 'ansible_ssh_user': 'ansible_ssh_user', 'deployment_type': 'deployment_type', - 'master_routingconfig_subdomain':'openshift_master_default_subdomain', - 'proxy_http':'openshift_http_proxy', + 'master_routingconfig_subdomain': 'openshift_master_default_subdomain', + 'proxy_http': 'openshift_http_proxy', 'proxy_https': 'openshift_https_proxy', 'proxy_exclude_hosts': 'openshift_no_proxy', } + def set_config(cfg): global CFG CFG = cfg + def generate_inventory(hosts): global CFG @@ -50,8 +52,7 @@ def generate_inventory(hosts): write_inventory_vars(base_inventory, multiple_masters, lb) - - #write_inventory_hosts + # write_inventory_hosts for role in CFG.deployment.roles: # write group block group = ROLES_TO_GROUPS_MAP.get(role, role) @@ -70,15 +71,17 @@ def generate_inventory(hosts): base_inventory.close() return base_inventory_path + def determine_lb_configuration(hosts): lb = next((host for host in hosts if host.is_master_lb()), None) if lb: - if lb.hostname == None: + if lb.hostname is None: lb.hostname = lb.connect_to lb.public_hostname = lb.connect_to return lb + def write_inventory_children(base_inventory, scaleup): global CFG @@ -116,28 +119,28 @@ def write_inventory_vars(base_inventory, multiple_masters, lb): "openshift_master_cluster_public_hostname={}\n".format(lb.public_hostname)) if CFG.settings.get('variant_version', None) == '3.1': - #base_inventory.write('openshift_image_tag=v{}\n'.format(CFG.settings.get('variant_version'))) + # base_inventory.write('openshift_image_tag=v{}\n'.format(CFG.settings.get('variant_version'))) base_inventory.write('openshift_image_tag=v{}\n'.format('3.1.1.6')) write_proxy_settings(base_inventory) # Find the correct deployment type for ansible: ver = find_variant(CFG.settings['variant'], - version=CFG.settings.get('variant_version', None))[1] + version=CFG.settings.get('variant_version', None))[1] base_inventory.write('deployment_type={}\n'.format(ver.ansible_key)) if 'OO_INSTALL_ADDITIONAL_REGISTRIES' in os.environ: - base_inventory.write('openshift_docker_additional_registries={}\n' - .format(os.environ['OO_INSTALL_ADDITIONAL_REGISTRIES'])) + base_inventory.write('openshift_docker_additional_registries={}\n'.format( + os.environ['OO_INSTALL_ADDITIONAL_REGISTRIES'])) if 'OO_INSTALL_INSECURE_REGISTRIES' in os.environ: - base_inventory.write('openshift_docker_insecure_registries={}\n' - .format(os.environ['OO_INSTALL_INSECURE_REGISTRIES'])) + base_inventory.write('openshift_docker_insecure_registries={}\n'.format( + os.environ['OO_INSTALL_INSECURE_REGISTRIES'])) if 'OO_INSTALL_PUDDLE_REPO' in os.environ: # We have to double the '{' here for literals base_inventory.write("openshift_additional_repos=[{{'id': 'ose-devel', " - "'name': 'ose-devel', " - "'baseurl': '{}', " - "'enabled': 1, 'gpgcheck': 0}}]\n".format(os.environ['OO_INSTALL_PUDDLE_REPO'])) + "'name': 'ose-devel', " + "'baseurl': '{}', " + "'enabled': 1, 'gpgcheck': 0}}]\n".format(os.environ['OO_INSTALL_PUDDLE_REPO'])) for name, role_obj in CFG.deployment.roles.iteritems(): if role_obj.variables: @@ -153,17 +156,17 @@ def write_inventory_vars(base_inventory, multiple_masters, lb): def write_proxy_settings(base_inventory): try: base_inventory.write("openshift_http_proxy={}\n".format( - CFG.settings['openshift_http_proxy'])) + CFG.settings['openshift_http_proxy'])) except KeyError: pass try: base_inventory.write("openshift_https_proxy={}\n".format( - CFG.settings['openshift_https_proxy'])) + CFG.settings['openshift_https_proxy'])) except KeyError: pass try: base_inventory.write("openshift_no_proxy={}\n".format( - CFG.settings['openshift_no_proxy'])) + CFG.settings['openshift_no_proxy'])) except KeyError: pass @@ -193,7 +196,6 @@ def write_host(host, role, inventory, schedulable=None): if role == 'node': facts += ' openshift_node_labels="{}"'.format(host.node_labels) - # Distinguish between three states, no schedulability specified (use default), # explicitly set to True, or explicitly set to False: if role != 'node' or schedulable is None: @@ -290,7 +292,7 @@ def run_ansible(playbook, inventory, env_vars, verbose=False): def run_uninstall_playbook(hosts, verbose=False): playbook = os.path.join(CFG.settings['ansible_playbook_directory'], - 'playbooks/adhoc/uninstall.yml') + 'playbooks/adhoc/uninstall.yml') inventory_file = generate_inventory(hosts) facts_env = os.environ.copy() if 'ansible_log_path' in CFG.settings: @@ -302,7 +304,7 @@ def run_uninstall_playbook(hosts, verbose=False): def run_upgrade_playbook(hosts, playbook, verbose=False): playbook = os.path.join(CFG.settings['ansible_playbook_directory'], - 'playbooks/byo/openshift-cluster/upgrades/{}'.format(playbook)) + 'playbooks/byo/openshift-cluster/upgrades/{}'.format(playbook)) # TODO: Upgrade inventory for upgrade? inventory_file = generate_inventory(hosts) diff --git a/utils/src/ooinstall/variants.py b/utils/src/ooinstall/variants.py index ce4d772ee..c723c7e61 100644 --- a/utils/src/ooinstall/variants.py +++ b/utils/src/ooinstall/variants.py @@ -38,29 +38,30 @@ class Variant(object): # WARNING: Keep the versions ordered, most recent first: OSE = Variant('openshift-enterprise', 'OpenShift Container Platform', - [ - Version('3.3', 'openshift-enterprise'), - ] + [ + Version('3.3', 'openshift-enterprise'), + ] ) origin = Variant('origin', 'OpenShift Origin', - [ - Version('1.2', 'origin'), - ] + [ + Version('1.2', 'origin'), + ] ) LEGACY = Variant('openshift-enterprise', 'OpenShift Container Platform', - [ - Version('3.2', 'openshift-enterprise'), - Version('3.1', 'openshift-enterprise'), - Version('3.0', 'openshift-enterprise'), - ] + [ + Version('3.2', 'openshift-enterprise'), + Version('3.1', 'openshift-enterprise'), + Version('3.0', 'openshift-enterprise'), + ] ) # Ordered list of variants we can install, first is the default. SUPPORTED_VARIANTS = (OSE, origin, LEGACY) DISPLAY_VARIANTS = (OSE, ) + def find_variant(name, version=None): """ Locate the variant object for the variant given in config file, and @@ -78,6 +79,7 @@ def find_variant(name, version=None): return (None, None) + def get_variant_version_combos(): combos = [] for variant in DISPLAY_VARIANTS: |