diff options
Diffstat (limited to 'bin')
-rw-r--r-- | bin/README_SHELL_COMPLETION | 2 | ||||
-rwxr-xr-x | bin/cluster | 193 | ||||
-rwxr-xr-x | bin/ohi | 97 | ||||
-rw-r--r-- | bin/openshift-ansible-bin.spec | 122 | ||||
-rw-r--r-- | bin/openshift_ansible.conf.example | 2 | ||||
-rw-r--r-- | bin/openshift_ansible/awsutil.py | 164 | ||||
l--------- | bin/openshift_ansible/multi_ec2.py | 1 | ||||
l--------- | bin/openshift_ansible/multi_inventory.py | 1 | ||||
-rwxr-xr-x | bin/opscp | 46 | ||||
-rwxr-xr-x | bin/opssh | 52 | ||||
-rwxr-xr-x | bin/oscp | 19 | ||||
-rwxr-xr-x | bin/ossh | 35 | ||||
-rwxr-xr-x | bin/ossh_bash_completion | 20 | ||||
-rw-r--r-- | bin/ossh_zsh_completion | 10 | ||||
-rw-r--r-- | bin/zsh_functions/_ossh | 4 |
15 files changed, 414 insertions, 354 deletions
diff --git a/bin/README_SHELL_COMPLETION b/bin/README_SHELL_COMPLETION index 5f05df7fc..49bba3acc 100644 --- a/bin/README_SHELL_COMPLETION +++ b/bin/README_SHELL_COMPLETION @@ -14,7 +14,7 @@ will populate the cache file and the completions should become available. This script will look at the cached version of your -multi_ec2 results in ~/.ansible/tmp/multi_ec2_inventory.cache. +multi_inventory results in ~/.ansible/tmp/multi_inventory.cache. It will then parse a few {host}.{env} out of the json and return them to be completable. diff --git a/bin/cluster b/bin/cluster index a19434e21..ecb8bc58e 100755 --- a/bin/cluster +++ b/bin/cluster @@ -5,6 +5,7 @@ import argparse import ConfigParser import os import sys +import subprocess import traceback @@ -53,86 +54,109 @@ class Cluster(object): """ Create an OpenShift cluster for given provider :param args: command line arguments provided by user - :return: exit status from run command """ - env = {'cluster_id': args.cluster_id, + cluster = {'cluster_id': args.cluster_id, 'deployment_type': self.get_deployment_type(args)} - playbook = "playbooks/{}/openshift-cluster/launch.yml".format(args.provider) + playbook = "playbooks/{0}/openshift-cluster/launch.yml".format(args.provider) inventory = self.setup_provider(args.provider) - env['num_masters'] = args.masters - env['num_nodes'] = args.nodes - env['num_infra'] = args.infra - env['num_etcd'] = args.etcd + cluster['num_masters'] = args.masters + cluster['num_nodes'] = args.nodes + cluster['num_infra'] = args.infra + cluster['num_etcd'] = args.etcd + cluster['cluster_env'] = args.env - return self.action(args, inventory, env, playbook) + self.action(args, inventory, cluster, playbook) + + def add_nodes(self, args): + """ + Add nodes to an existing cluster for given provider + :param args: command line arguments provided by user + """ + cluster = {'cluster_id': args.cluster_id, + 'deployment_type': self.get_deployment_type(args), + } + playbook = "playbooks/{0}/openshift-cluster/add_nodes.yml".format(args.provider) + inventory = self.setup_provider(args.provider) + + cluster['num_nodes'] = args.nodes + cluster['num_infra'] = args.infra + cluster['cluster_env'] = args.env + + self.action(args, inventory, cluster, playbook) def terminate(self, args): """ Destroy OpenShift cluster :param args: command line arguments provided by user - :return: exit status from run command """ - env = {'cluster_id': args.cluster_id, - 'deployment_type': self.get_deployment_type(args)} - playbook = "playbooks/{}/openshift-cluster/terminate.yml".format(args.provider) + cluster = {'cluster_id': args.cluster_id, + 'deployment_type': self.get_deployment_type(args), + 'cluster_env': args.env, + } + playbook = "playbooks/{0}/openshift-cluster/terminate.yml".format(args.provider) inventory = self.setup_provider(args.provider) - return self.action(args, inventory, env, playbook) + self.action(args, inventory, cluster, playbook) def list(self, args): """ List VMs in cluster :param args: command line arguments provided by user - :return: exit status from run command """ - env = {'cluster_id': args.cluster_id, - 'deployment_type': self.get_deployment_type(args)} - playbook = "playbooks/{}/openshift-cluster/list.yml".format(args.provider) + cluster = {'cluster_id': args.cluster_id, + 'deployment_type': self.get_deployment_type(args), + 'cluster_env': args.env, + } + playbook = "playbooks/{0}/openshift-cluster/list.yml".format(args.provider) inventory = self.setup_provider(args.provider) - return self.action(args, inventory, env, playbook) + self.action(args, inventory, cluster, playbook) def config(self, args): """ Configure or reconfigure OpenShift across clustered VMs :param args: command line arguments provided by user - :return: exit status from run command """ - env = {'cluster_id': args.cluster_id, - 'deployment_type': self.get_deployment_type(args)} - playbook = "playbooks/{}/openshift-cluster/config.yml".format(args.provider) + cluster = {'cluster_id': args.cluster_id, + 'deployment_type': self.get_deployment_type(args), + 'cluster_env': args.env, + } + playbook = "playbooks/{0}/openshift-cluster/config.yml".format(args.provider) inventory = self.setup_provider(args.provider) - return self.action(args, inventory, env, playbook) + self.action(args, inventory, cluster, playbook) def update(self, args): """ Update to latest OpenShift across clustered VMs :param args: command line arguments provided by user - :return: exit status from run command """ - env = {'cluster_id': args.cluster_id, - 'deployment_type': self.get_deployment_type(args)} - playbook = "playbooks/{}/openshift-cluster/update.yml".format(args.provider) + cluster = {'cluster_id': args.cluster_id, + 'deployment_type': self.get_deployment_type(args), + 'cluster_env': args.env, + } + + playbook = "playbooks/{0}/openshift-cluster/update.yml".format(args.provider) inventory = self.setup_provider(args.provider) - return self.action(args, inventory, env, playbook) + self.action(args, inventory, cluster, playbook) def service(self, args): """ Make the same service call across all nodes in the cluster :param args: command line arguments provided by user - :return: exit status from run command """ - env = {'cluster_id': args.cluster_id, - 'deployment_type': self.get_deployment_type(args), - 'new_cluster_state': args.state} + cluster = {'cluster_id': args.cluster_id, + 'deployment_type': self.get_deployment_type(args), + 'new_cluster_state': args.state, + 'cluster_env': args.env, + } - playbook = "playbooks/{}/openshift-cluster/service.yml".format(args.provider) + playbook = "playbooks/{0}/openshift-cluster/service.yml".format(args.provider) inventory = self.setup_provider(args.provider) - return self.action(args, inventory, env, playbook) + self.action(args, inventory, cluster, playbook) def setup_provider(self, provider): """ @@ -142,10 +166,13 @@ class Cluster(object): """ config = ConfigParser.ConfigParser() if 'gce' == provider: - config.readfp(open('inventory/gce/hosts/gce.ini')) + gce_ini_default_path = os.path.join('inventory/gce/hosts/gce.ini') + gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path) + if os.path.exists(gce_ini_path): + config.readfp(open(gce_ini_path)) - for key in config.options('gce'): - os.environ[key] = config.get('gce', key) + for key in config.options('gce'): + os.environ[key] = config.get('gce', key) inventory = '-i inventory/gce/hosts' elif 'aws' == provider: @@ -164,7 +191,7 @@ class Cluster(object): boto_configs = [conf for conf in boto_conf_files if conf_exists(conf)] if len(key_missing) > 0 and len(boto_configs) == 0: - raise ValueError("PROVIDER aws requires {} environment variable(s). See README_AWS.md".format(key_missing)) + raise ValueError("PROVIDER aws requires {0} environment variable(s). See README_AWS.md".format(key_missing)) elif 'libvirt' == provider: inventory = '-i inventory/libvirt/hosts' @@ -172,53 +199,63 @@ class Cluster(object): inventory = '-i inventory/openstack/hosts' else: # this code should never be reached - raise ValueError("invalid PROVIDER {}".format(provider)) + raise ValueError("invalid PROVIDER {0}".format(provider)) return inventory - def action(self, args, inventory, env, playbook): + def action(self, args, inventory, cluster, playbook): """ Build ansible-playbook command line and execute :param args: command line arguments provided by user :param inventory: derived provider library - :param env: environment variables for kubernetes + :param cluster: cluster variables for kubernetes :param playbook: ansible playbook to execute - :return: exit status from ansible-playbook command """ verbose = '' if args.verbose > 0: - verbose = '-{}'.format('v' * args.verbose) + verbose = '-{0}'.format('v' * args.verbose) if args.option: for opt in args.option: k, v = opt.split('=', 1) - env['cli_' + k] = v + cluster['cli_' + k] = v - ansible_env = '-e \'{}\''.format( - ' '.join(['%s=%s' % (key, value) for (key, value) in env.items()]) + ansible_extra_vars = '-e \'{0}\''.format( + ' '.join(['%s=%s' % (key, value) for (key, value) in cluster.items()]) ) - command = 'ansible-playbook {} {} {} {}'.format( - verbose, inventory, ansible_env, playbook + command = 'ansible-playbook {0} {1} {2} {3}'.format( + verbose, inventory, ansible_extra_vars, playbook ) if args.profile: command = 'ANSIBLE_CALLBACK_PLUGINS=ansible-profile/callback_plugins ' + command if args.verbose > 1: - command = 'time {}'.format(command) + command = 'time {0}'.format(command) if args.verbose > 0: - sys.stderr.write('RUN [{}]\n'.format(command)) + sys.stderr.write('RUN [{0}]\n'.format(command)) sys.stderr.flush() - return os.system(command) + try: + subprocess.check_call(command, shell=True) + except subprocess.CalledProcessError as exc: + raise ActionFailed("ACTION [{0}] failed: {1}" + .format(args.action, exc)) + + +class ActionFailed(Exception): + """ + Raised when action failed. + """ + pass if __name__ == '__main__': """ - User command to invoke ansible playbooks in a "known" environment + User command to invoke ansible playbooks in a "known" configuration Reads ~/.openshift-ansible for default configuration items [DEFAULT] @@ -227,7 +264,14 @@ if __name__ == '__main__': providers = gce,aws,libvirt,openstack """ - environment = ConfigParser.SafeConfigParser({ + warning = ("================================================================================\n" + "ATTENTION: You are running a community supported utility that has not been\n" + "tested by Red Hat. Visit https://docs.openshift.com for supported installation\n" + "instructions.\n" + "================================================================================\n\n") + sys.stderr.write(warning) + + cluster_config = ConfigParser.SafeConfigParser({ 'cluster_ids': 'marketing,sales', 'validate_cluster_ids': 'False', 'providers': 'gce,aws,libvirt,openstack', @@ -235,36 +279,36 @@ if __name__ == '__main__': path = os.path.expanduser("~/.openshift-ansible") if os.path.isfile(path): - environment.read(path) + cluster_config.read(path) cluster = Cluster() parser = argparse.ArgumentParser( - description='Python wrapper to ensure proper environment for OpenShift ansible playbooks', + description='Python wrapper to ensure proper configuration for OpenShift ansible playbooks', ) parser.add_argument('-v', '--verbose', action='count', help='Multiple -v options increase the verbosity') parser.add_argument('--version', action='version', version='%(prog)s 0.3') meta_parser = argparse.ArgumentParser(add_help=False) - providers = environment.get('DEFAULT', 'providers').split(',') + providers = cluster_config.get('DEFAULT', 'providers').split(',') meta_parser.add_argument('provider', choices=providers, help='provider') - if environment.get('DEFAULT', 'validate_cluster_ids').lower() in ("yes", "true", "1"): - meta_parser.add_argument('cluster_id', choices=environment.get('DEFAULT', 'cluster_ids').split(','), + if cluster_config.get('DEFAULT', 'validate_cluster_ids').lower() in ("yes", "true", "1"): + meta_parser.add_argument('cluster_id', choices=cluster_config.get('DEFAULT', 'cluster_ids').split(','), help='prefix for cluster VM names') else: meta_parser.add_argument('cluster_id', help='prefix for cluster VM names') meta_parser.add_argument('-t', '--deployment-type', - choices=['origin', 'online', 'enterprise'], + choices=['origin', 'online', 'enterprise', 'atomic-enterprise', 'openshift-enterprise'], help='Deployment type. (default: origin)') - meta_parser.add_argument('-T', '--product-type', - choices=['openshift' 'atomic-enterprise'], - help='Product type. (default: openshift)') meta_parser.add_argument('-o', '--option', action='append', help='options') + meta_parser.add_argument('--env', default='dev', type=str, + help='environment for the cluster. Defaults to \'dev\'.') + meta_parser.add_argument('-p', '--profile', action='store_true', help='Enable playbook profiling') @@ -283,6 +327,16 @@ if __name__ == '__main__': help='number of external etcd hosts to create in cluster') create_parser.set_defaults(func=cluster.create) + + create_parser = action_parser.add_parser('add-nodes', help='Add nodes to a cluster', + parents=[meta_parser]) + create_parser.add_argument('-n', '--nodes', default=1, type=int, + help='number of nodes to add to the cluster') + create_parser.add_argument('-i', '--infra', default=1, type=int, + help='number of infra nodes to add to the cluster') + create_parser.set_defaults(func=cluster.add_nodes) + + config_parser = action_parser.add_parser('config', help='Configure or reconfigure a cluster', parents=[meta_parser]) @@ -316,26 +370,23 @@ if __name__ == '__main__': args = parser.parse_args() if 'terminate' == args.action and not args.force: - answer = raw_input("This will destroy the ENTIRE {} environment. Are you sure? [y/N] ".format(args.cluster_id)) + answer = raw_input("This will destroy the ENTIRE {0} cluster. Are you sure? [y/N] ".format(args.cluster_id)) if answer not in ['y', 'Y']: sys.stderr.write('\nACTION [terminate] aborted by user!\n') exit(1) if 'update' == args.action and not args.force: answer = raw_input( - "This is destructive and could corrupt {} environment. Continue? [y/N] ".format(args.cluster_id)) + "This is destructive and could corrupt {0} cluster. Continue? [y/N] ".format(args.cluster_id)) if answer not in ['y', 'Y']: sys.stderr.write('\nACTION [update] aborted by user!\n') exit(1) - status = 1 try: - status = args.func(args) - if status != 0: - sys.stderr.write("ACTION [{}] failed with exit status {}\n".format(args.action, status)) - except Exception, e: + args.func(args) + except Exception as exc: if args.verbose: traceback.print_exc(file=sys.stderr) else: - sys.stderr.write("{}\n".format(e)) - exit(status) + print >>sys.stderr, exc + exit(1) @@ -1,14 +1,16 @@ #!/usr/bin/env python +''' +Ohi = Openshift Host Inventory + +This script provides an easy way to look at your host inventory. + +This depends on multi_inventory being setup correctly. +''' # vim: expandtab:tabstop=4:shiftwidth=4 import argparse -import traceback import sys import os -import re -import tempfile -import time -import subprocess import ConfigParser from openshift_ansible import awsutil @@ -20,6 +22,9 @@ CONFIG_HOST_TYPE_ALIAS_SECTION = 'host_type_aliases' class Ohi(object): + ''' + Class for managing openshift host inventory + ''' def __init__(self): self.host_type_aliases = {} self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__))) @@ -29,37 +34,42 @@ class Ohi(object): 'openshift_ansible', \ 'openshift_ansible.conf') + self.args = None self.parse_cli_args() self.parse_config_file() self.aws = awsutil.AwsUtil(self.host_type_aliases) def run(self): + ''' + Call into awsutil and retrieve the desired hosts and environments + ''' + if self.args.list_host_types: self.aws.print_host_types() return 0 - hosts = None - if self.args.host_type is not None and \ - self.args.env is not None: - # Both env and host-type specified - hosts = self.aws.get_host_list(host_type=self.args.host_type, \ - envs=self.args.env) - - if self.args.host_type is None and \ - self.args.env is not None: - # Only env specified - hosts = self.aws.get_host_list(envs=self.args.env) + if self.args.v3: + version = '3' + elif self.args.all_versions: + version = 'all' + else: + version = '2' - if self.args.host_type is not None and \ - self.args.env is None: - # Only host-type specified - hosts = self.aws.get_host_list(host_type=self.args.host_type) + hosts = self.aws.get_host_list(clusters=self.args.cluster, + host_type=self.args.host_type, + sub_host_type=self.args.sub_host_type, + envs=self.args.env, + version=version, + cached=self.args.cache_only) if hosts is None: # We weren't able to determine what they wanted to do raise ArgumentError("Invalid combination of arguments") + if self.args.ip: + hosts = self.aws.convert_to_ip(hosts) + for host in sorted(hosts, key=utils.normalize_dnsname): if self.args.user: print "%s@%s" % (self.args.user, host) @@ -69,6 +79,9 @@ class Ohi(object): return 0 def parse_config_file(self): + ''' + Parse the config file for ohi + ''' if os.path.isfile(self.config_path): config = ConfigParser.ConfigParser() config.read(self.config_path) @@ -85,23 +98,39 @@ class Ohi(object): parser = argparse.ArgumentParser(description='OpenShift Host Inventory') - parser.add_argument('--list-host-types', default=False, action='store_true', - help='List all of the host types') + parser.add_argument('--list-host-types', default=False, action='store_true', help='List all of the host types') + parser.add_argument('--list', default=False, action='store_true', help='List all hosts') - parser.add_argument('-e', '--env', action="store", - help="Which environment to use") + parser.add_argument('-c', '--cluster', action="append", help="Which clusterid to use") + parser.add_argument('-e', '--env', action="append", help="Which environment to use") - parser.add_argument('-t', '--host-type', action="store", - help="Which host type to use") + parser.add_argument('-t', '--host-type', action="store", help="Which host type to use") - parser.add_argument('-l', '--user', action='store', default=None, - help='username') + parser.add_argument('-s', '--sub-host-type', action="store", help="Which sub host type to use") + parser.add_argument('-l', '--user', action='store', default=None, help='username') - self.args = parser.parse_args() + parser.add_argument('--cache-only', action='store_true', default=False, + help='Retrieve the host inventory by cache only. Default is false.') + parser.add_argument('--v2', action='store_true', default=True, + help='Specify the openshift version. Default is 2') -if __name__ == '__main__': + parser.add_argument('--v3', action='store_true', default=False, + help='Specify the openshift version.') + + parser.add_argument('--ip', action='store_true', default=False, + help='Return ip address only.') + + parser.add_argument('--all-versions', action='store_true', default=False, + help='Specify the openshift version. Return all versions') + + self.args = parser.parse_args() + +def main(): + ''' + Ohi will do its work here + ''' if len(sys.argv) == 1: print "\nError: No options given. Use --help to see the available options\n" sys.exit(0) @@ -110,5 +139,9 @@ if __name__ == '__main__': ohi = Ohi() exitcode = ohi.run() sys.exit(exitcode) - except ArgumentError as e: - print "\nError: %s\n" % e.message + except ArgumentError as err: + print "\nError: %s\n" % err.message + +if __name__ == '__main__': + main() + diff --git a/bin/openshift-ansible-bin.spec b/bin/openshift-ansible-bin.spec deleted file mode 100644 index d90810bc3..000000000 --- a/bin/openshift-ansible-bin.spec +++ /dev/null @@ -1,122 +0,0 @@ -Summary: OpenShift Ansible Scripts for working with metadata hosts -Name: openshift-ansible-bin -Version: 0.0.19 -Release: 1%{?dist} -License: ASL 2.0 -URL: https://github.com/openshift/openshift-ansible -Source0: %{name}-%{version}.tar.gz -Requires: python2, openshift-ansible-inventory -BuildRequires: python2-devel -BuildArch: noarch - -%description -Scripts to make it nicer when working with hosts that are defined only by metadata. - -%prep -%setup -q - -%build - -%install -mkdir -p %{buildroot}%{_bindir} -mkdir -p %{buildroot}%{python_sitelib}/openshift_ansible -mkdir -p %{buildroot}/etc/bash_completion.d -mkdir -p %{buildroot}/etc/openshift_ansible - -cp -p ossh oscp opssh opscp ohi %{buildroot}%{_bindir} -cp -pP openshift_ansible/* %{buildroot}%{python_sitelib}/openshift_ansible - -# Make it so we can load multi_ec2.py as a library. -rm %{buildroot}%{python_sitelib}/openshift_ansible/multi_ec2.py* -ln -sf /usr/share/ansible/inventory/multi_ec2.py %{buildroot}%{python_sitelib}/openshift_ansible/multi_ec2.py -ln -sf /usr/share/ansible/inventory/multi_ec2.pyc %{buildroot}%{python_sitelib}/openshift_ansible/multi_ec2.pyc - -cp -p ossh_bash_completion %{buildroot}/etc/bash_completion.d - -cp -p openshift_ansible.conf.example %{buildroot}/etc/openshift_ansible/openshift_ansible.conf - -%files -%{_bindir}/* -%{python_sitelib}/openshift_ansible/ -/etc/bash_completion.d/* -%config(noreplace) /etc/openshift_ansible/ - -%changelog -* Thu Aug 20 2015 Kenny Woodson <kwoodson@redhat.com> 0.0.19-1 -- Updated to show private ips when doing a list (kwoodson@redhat.com) -- Updated to read config first and default to users home dir - (kwoodson@redhat.com) -- Prevent Ansible from serializing tasks (lhuard@amadeus.com) -- Infra node support (whearn@redhat.com) -- Playbook updates for clustered etcd (jdetiber@redhat.com) -- bin/cluster supports boto credentials as well as env variables - (jdetiber@redhat.com) -- Merge pull request #291 from lhuard1A/profile - (twiest@users.noreply.github.com) -- Add a generic mechanism for passing options (lhuard@amadeus.com) -- Infrastructure - Validate AWS environment before calling playbooks - (jhonce@redhat.com) -- Add a --profile option to spot which task takes more time - (lhuard@amadeus.com) -- changed Openshift to OpenShift (twiest@redhat.com) - -* Tue Jun 09 2015 Kenny Woodson <kwoodson@redhat.com> 0.0.18-1 -- Implement OpenStack provider (lhuard@amadeus.com) -- * Update defaults and examples to track core concepts guide - (jhonce@redhat.com) -- Issue 119 - Add support for ~/.openshift-ansible (jhonce@redhat.com) -- Infrastructure - Add service action to bin/cluster (jhonce@redhat.com) - -* Fri May 15 2015 Thomas Wiest <twiest@redhat.com> 0.0.17-1 -- fixed the openshift-ansible-bin build (twiest@redhat.com) - -* Fri May 15 2015 Thomas Wiest <twiest@redhat.com> 0.0.14-1 -- Command line tools import multi_ec2 as lib (kwoodson@redhat.com) -- Adding cache location for multi ec2 (kwoodson@redhat.com) -* Thu May 07 2015 Thomas Wiest <twiest@redhat.com> 0.0.13-1 -- added '-e all' to ohi and fixed pylint errors. (twiest@redhat.com) - -* Tue May 05 2015 Thomas Wiest <twiest@redhat.com> 0.0.12-1 -- fixed opssh and opscp to allow just environment or just host-type. - (twiest@redhat.com) - -* Mon May 04 2015 Thomas Wiest <twiest@redhat.com> 0.0.11-1 -- changed opssh to a bash script using ohi to make it easier to maintain, and - to expose all of the pssh features directly. (twiest@redhat.com) -- Added --user option to ohi to pre-pend the username in the hostlist output. - (twiest@redhat.com) -- Added utils.py that contains a normalize_dnsname function good for sorting - dns names to a human readable list. (twiest@redhat.com) - -* Thu Apr 30 2015 Thomas Wiest <twiest@redhat.com> 0.0.10-1 -- added --list-host-types option to opscp (twiest@redhat.com) - -* Thu Apr 30 2015 Thomas Wiest <twiest@redhat.com> 0.0.9-1 -- added opscp (twiest@redhat.com) -* Mon Apr 13 2015 Thomas Wiest <twiest@redhat.com> 0.0.8-1 -- fixed bug in opssh where it wouldn't actually run pssh (twiest@redhat.com) - -* Mon Apr 13 2015 Thomas Wiest <twiest@redhat.com> 0.0.7-1 -- added the ability to run opssh and ohi on all hosts in an environment, as - well as all hosts of the same host-type regardless of environment - (twiest@redhat.com) -- added ohi (twiest@redhat.com) -* Thu Apr 09 2015 Thomas Wiest <twiest@redhat.com> 0.0.6-1 -- fixed bug where opssh would throw an exception if pssh returned a non-zero - exit code (twiest@redhat.com) - -* Wed Apr 08 2015 Thomas Wiest <twiest@redhat.com> 0.0.5-1 -- fixed the opssh default output behavior to be consistent with pssh. Also - fixed a bug in how directories are named for --outdir and --errdir. - (twiest@redhat.com) -* Tue Mar 31 2015 Thomas Wiest <twiest@redhat.com> 0.0.4-1 -- Fixed when tag was missing and added opssh completion (kwoodson@redhat.com) - -* Mon Mar 30 2015 Thomas Wiest <twiest@redhat.com> 0.0.3-1 -- created a python package named openshift_ansible (twiest@redhat.com) - -* Mon Mar 30 2015 Thomas Wiest <twiest@redhat.com> 0.0.2-1 -- added config file support to opssh, ossh, and oscp (twiest@redhat.com) -* Tue Mar 24 2015 Thomas Wiest <twiest@redhat.com> 0.0.1-1 -- new package built with tito - diff --git a/bin/openshift_ansible.conf.example b/bin/openshift_ansible.conf.example index e891b855a..8786dfc13 100644 --- a/bin/openshift_ansible.conf.example +++ b/bin/openshift_ansible.conf.example @@ -1,5 +1,5 @@ #[main] -#inventory = /usr/share/ansible/inventory/multi_ec2.py +#inventory = /usr/share/ansible/inventory/multi_inventory.py #[host_type_aliases] #host-type-one = aliasa,aliasb diff --git a/bin/openshift_ansible/awsutil.py b/bin/openshift_ansible/awsutil.py index 9df034f57..eba11e851 100644 --- a/bin/openshift_ansible/awsutil.py +++ b/bin/openshift_ansible/awsutil.py @@ -4,7 +4,10 @@ import os import re -from openshift_ansible import multi_ec2 + +# Buildbot does not have multi_inventory installed +#pylint: disable=no-name-in-module +from openshift_ansible import multi_inventory class ArgumentError(Exception): """This class is raised when improper arguments are passed.""" @@ -28,6 +31,7 @@ class AwsUtil(object): host_type_aliases -- a list of aliases to common host-types (e.g. ex-node) """ + self.alias_lookup = {} host_type_aliases = host_type_aliases or {} self.host_type_aliases = host_type_aliases @@ -37,25 +41,41 @@ class AwsUtil(object): def setup_host_type_alias_lookup(self): """Sets up the alias to host-type lookup table.""" - self.alias_lookup = {} for key, values in self.host_type_aliases.iteritems(): for value in values: self.alias_lookup[value] = key @staticmethod - def get_inventory(args=None): + def get_inventory(args=None, cached=False): """Calls the inventory script and returns a dictionary containing the inventory." Keyword arguments: args -- optional arguments to pass to the inventory script """ - mec2 = multi_ec2.MultiEc2(args) - mec2.run() - return mec2.result + minv = multi_inventory.MultiInventory(args) + if cached: + minv.get_inventory_from_cache() + else: + minv.run() + return minv.result + + def get_clusters(self): + """Searches for cluster tags in the inventory and returns all of the clusters found.""" + pattern = re.compile(r'^oo_clusterid_(.*)') + + clusters = [] + inv = self.get_inventory() + for key in inv.keys(): + matched = pattern.match(key) + if matched: + clusters.append(matched.group(1)) + + clusters.sort() + return clusters def get_environments(self): """Searches for env tags in the inventory and returns all of the envs found.""" - pattern = re.compile(r'^tag_environment_(.*)') + pattern = re.compile(r'^oo_environment_(.*)') envs = [] inv = self.get_inventory() @@ -69,7 +89,7 @@ class AwsUtil(object): def get_host_types(self): """Searches for host-type tags in the inventory and returns all host-types found.""" - pattern = re.compile(r'^tag_host-type_(.*)') + pattern = re.compile(r'^oo_hosttype_(.*)') host_types = [] inv = self.get_inventory() @@ -81,6 +101,20 @@ class AwsUtil(object): host_types.sort() return host_types + def get_sub_host_types(self): + """Searches for sub-host-type tags in the inventory and returns all sub-host-types found.""" + pattern = re.compile(r'^oo_subhosttype_(.*)') + + sub_host_types = [] + inv = self.get_inventory() + for key in inv.keys(): + matched = pattern.match(key) + if matched: + sub_host_types.append(matched.group(1)) + + sub_host_types.sort() + return sub_host_types + def get_security_groups(self): """Searches for security_groups in the inventory and returns all SGs found.""" pattern = re.compile(r'^security_group_(.*)') @@ -148,61 +182,87 @@ class AwsUtil(object): return host_type @staticmethod + def gen_version_tag(ver): + """Generate the version tag + """ + return "oo_version_%s" % ver + + @staticmethod + def gen_clusterid_tag(clu): + """Generate the clusterid tag + """ + return "oo_clusterid_%s" % clu + + @staticmethod def gen_env_tag(env): """Generate the environment tag """ - return "tag_environment_%s" % env + return "oo_environment_%s" % env - def gen_host_type_tag(self, host_type): + def gen_host_type_tag(self, host_type, version): """Generate the host type tag """ - host_type = self.resolve_host_type(host_type) - return "tag_host-type_%s" % host_type + if version == '2': + host_type = self.resolve_host_type(host_type) + return "oo_hosttype_%s" % host_type - def gen_env_host_type_tag(self, host_type, env): - """Generate the environment host type tag + @staticmethod + def gen_sub_host_type_tag(sub_host_type): + """Generate the host type tag """ - host_type = self.resolve_host_type(host_type) - return "tag_env-host-type_%s-%s" % (env, host_type) + return "oo_subhosttype_%s" % sub_host_type - def get_host_list(self, host_type=None, envs=None): + # This function uses all of these params to perform a filters on our host inventory. + # pylint: disable=too-many-arguments + def get_host_list(self, clusters=None, host_type=None, sub_host_type=None, envs=None, version=None, cached=False): """Get the list of hosts from the inventory using host-type and environment """ + retval = set([]) envs = envs or [] - inv = self.get_inventory() - # We prefer to deal with a list of environments - if issubclass(type(envs), basestring): - if envs == 'all': - envs = self.get_environments() + inv = self.get_inventory(cached=cached) + + retval.update(inv.get('all_hosts', [])) + + if clusters: + cluster_hosts = set([]) + if len(clusters) > 1: + for cluster in clusters: + clu_tag = AwsUtil.gen_clusterid_tag(cluster) + cluster_hosts.update(inv.get(clu_tag, [])) + else: + cluster_hosts.update(inv.get(AwsUtil.gen_clusterid_tag(clusters[0]), [])) + + retval.intersection_update(cluster_hosts) + + if envs: + env_hosts = set([]) + if len(envs) > 1: + for env in envs: + env_tag = AwsUtil.gen_env_tag(env) + env_hosts.update(inv.get(env_tag, [])) else: - envs = [envs] - - if host_type and envs: - # Both host type and environment were specified - retval = [] - for env in envs: - env_host_type_tag = self.gen_env_host_type_tag(host_type, env) - if env_host_type_tag in inv.keys(): - retval += inv[env_host_type_tag] - return set(retval) - - if envs and not host_type: - # Just environment was specified - retval = [] - for env in envs: - env_tag = AwsUtil.gen_env_tag(env) - if env_tag in inv.keys(): - retval += inv[env_tag] - return set(retval) - - if host_type and not envs: - # Just host-type was specified - retval = [] - host_type_tag = self.gen_host_type_tag(host_type) - if host_type_tag in inv.keys(): - retval = inv[host_type_tag] - return set(retval) - - # We should never reach here! - raise ArgumentError("Invalid combination of parameters") + env_hosts.update(inv.get(AwsUtil.gen_env_tag(envs[0]), [])) + + retval.intersection_update(env_hosts) + + if host_type: + retval.intersection_update(inv.get(self.gen_host_type_tag(host_type, version), [])) + + if sub_host_type: + retval.intersection_update(inv.get(self.gen_sub_host_type_tag(sub_host_type), [])) + + if version != 'all': + retval.intersection_update(inv.get(AwsUtil.gen_version_tag(version), [])) + + return list(retval) + + def convert_to_ip(self, hosts, cached=False): + """convert a list of host names to ip addresses""" + + inv = self.get_inventory(cached=cached) + ips = [] + for host in hosts: + ips.append(inv['_meta']['hostvars'][host]['oo_public_ip']) + + return ips diff --git a/bin/openshift_ansible/multi_ec2.py b/bin/openshift_ansible/multi_ec2.py deleted file mode 120000 index 660a0418e..000000000 --- a/bin/openshift_ansible/multi_ec2.py +++ /dev/null @@ -1 +0,0 @@ -../../inventory/multi_ec2.py
\ No newline at end of file diff --git a/bin/openshift_ansible/multi_inventory.py b/bin/openshift_ansible/multi_inventory.py new file mode 120000 index 000000000..b40feec07 --- /dev/null +++ b/bin/openshift_ansible/multi_inventory.py @@ -0,0 +1 @@ +../../inventory/multi_inventory.py
\ No newline at end of file @@ -13,7 +13,10 @@ Options: -p PAR, --par=PAR max number of parallel threads (OPTIONAL) --outdir=OUTDIR output directory for stdout files (OPTIONAL) --errdir=ERRDIR output directory for stderr files (OPTIONAL) + -c CLUSTER, --cluster CLUSTER + which cluster to use -e ENV, --env ENV which environment to use + --v3 When working with v3 environments. v2 by default -t HOST_TYPE, --host-type HOST_TYPE which host type to use --list-host-types list all of the host types @@ -61,12 +64,23 @@ while [ $# -gt 0 ] ; do shift # get past the value of the option ;; + -c) + shift # get past the option + CLUSTER=$1 + shift # get past the value of the option + ;; + -e) shift # get past the option ENV=$1 shift # get past the value of the option ;; + --v3) + OPENSHIFT_VERSION="--v3 --ip" + shift # get past the value of the option + ;; + --timeout) shift # get past the option TIMEOUT=$1 @@ -103,20 +117,26 @@ while [ $# -gt 0 ] ; do done # Get host list from ohi -if [ -n "$ENV" -a -n "$HOST_TYPE" ] ; then - HOSTS="$(ohi -t "$HOST_TYPE" -e "$ENV" 2>/dev/null)" - OHI_ECODE=$? -elif [ -n "$ENV" ] ; then - HOSTS="$(ohi -e "$ENV" 2>/dev/null)" - OHI_ECODE=$? -elif [ -n "$HOST_TYPE" ] ; then - HOSTS="$(ohi -t "$HOST_TYPE" 2>/dev/null)" +CMD="" +if [ -n "$CLUSTER" ] ; then + CMD="$CMD -c $CLUSTER" +fi + +if [ -n "$ENV" ] ; then + CMD="$CMD -e $ENV" +fi + +if [ -n "$HOST_TYPE" ] ; then + CMD="$CMD -t $HOST_TYPE" +fi + +if [ -n "$OPENSHIFT_VERSION" ] ; then + CMD="$CMD $OPENSHIFT_VERSION" +fi + +if [ -n "$CMD" ] ; then + HOSTS="$(ohi $CMD 2>/dev/null)" OHI_ECODE=$? -else - echo - echo "Error: either -e or -t must be specified" - echo - exit 10 fi if [ $OHI_ECODE -ne 0 ] ; then @@ -13,7 +13,10 @@ Options: -p PAR, --par=PAR max number of parallel threads (OPTIONAL) --outdir=OUTDIR output directory for stdout files (OPTIONAL) --errdir=ERRDIR output directory for stderr files (OPTIONAL) + -c CLUSTER, --cluster CLUSTER + which cluster to use -e ENV, --env ENV which environment to use + --v3 When working with v3 environments. v2 by default -t HOST_TYPE, --host-type HOST_TYPE which host type to use --list-host-types list all of the host types @@ -45,17 +48,17 @@ fi # See if ohi is installed if ! which ohi &>/dev/null ; then - echo "ERROR: can't find ohi (OpenShift Host Inventory) on your system, please either install the openshift-ansible-bin package, or add openshift-ansible/bin to your path." + echo "ERROR: can't find ohi (OpenShift Host Inventory) on your system, please either install the openshift-ansible-bin package, or add openshift-ansible/bin to your path." - exit 10 + exit 10 fi PAR=200 USER=root TIMEOUT=0 -ARGS=() ENV="" HOST_TYPE="" + while [ $# -gt 0 ] ; do case $1 in -t|--host-type) @@ -64,12 +67,23 @@ while [ $# -gt 0 ] ; do shift # get past the value of the option ;; + -c) + shift # get past the option + CLUSTER=$1 + shift # get past the value of the option + ;; + -e) shift # get past the option ENV=$1 shift # get past the value of the option ;; + --v3) + OPENSHIFT_VERSION="--v3 --ip" + shift # get past the value of the option + ;; + --timeout) shift # get past the option TIMEOUT=$1 @@ -106,20 +120,26 @@ while [ $# -gt 0 ] ; do done # Get host list from ohi -if [ -n "$ENV" -a -n "$HOST_TYPE" ] ; then - HOSTS="$(ohi -t "$HOST_TYPE" -e "$ENV" 2>/dev/null)" - OHI_ECODE=$? -elif [ -n "$ENV" ] ; then - HOSTS="$(ohi -e "$ENV" 2>/dev/null)" - OHI_ECODE=$? -elif [ -n "$HOST_TYPE" ] ; then - HOSTS="$(ohi -t "$HOST_TYPE" 2>/dev/null)" +CMD="" +if [ -n "$CLUSTER" ] ; then + CMD="$CMD -c $CLUSTER" +fi + +if [ -n "$ENV" ] ; then + CMD="$CMD -e $ENV" +fi + +if [ -n "$HOST_TYPE" ] ; then + CMD="$CMD -t $HOST_TYPE" +fi + +if [ -n "$OPENSHIFT_VERSION" ] ; then + CMD="$CMD $OPENSHIFT_VERSION" +fi + +if [ -n "$CMD" ] ; then + HOSTS="$(ohi $CMD 2>/dev/null)" OHI_ECODE=$? -else - echo - echo "Error: either -e or -t must be specified" - echo - exit 10 fi if [ $OHI_ECODE -ne 0 ] ; then @@ -138,7 +138,7 @@ class Oscp(object): # attempt to select the correct environment if specified if self.env: - results = filter(lambda result: result[1]['ec2_tag_environment'] == self.env, results) + results = filter(lambda result: result[1]['oo_environment'] == self.env, results) if results: return results @@ -164,10 +164,8 @@ class Oscp(object): print '{0:<35} {1}'.format(key, server_info[key]) else: for host_id, server_info in results[:limit]: - name = server_info['ec2_tag_Name'] - ec2_id = server_info['ec2_id'] - ip = server_info['ec2_ip_address'] - print '{ec2_tag_Name:<35} {ec2_tag_environment:<8} {ec2_id:<15} {ec2_ip_address:<18} {ec2_private_ip_address}'.format(**server_info) + print '{oo_name:<35} {oo_clusterid:<10} {oo_environment:<8} ' \ + '{oo_id:<15} {oo_public_ip:<18} {oo_private_ip:<18}'.format(**server_info) if limit: print @@ -177,10 +175,9 @@ class Oscp(object): else: for env, host_ids in self.host_inventory.items(): for host_id, server_info in host_ids.items(): - name = server_info['ec2_tag_Name'] - ec2_id = server_info['ec2_id'] - ip = server_info['ec2_ip_address'] - print '{ec2_tag_Name:<35} {ec2_tag_environment:<8} {ec2_id:<15} {ec2_ip_address:<18} {ec2_private_ip_address}'.format(**server_info) + print '{oo_name:<35} {oo_clusterid:<10} {oo_environment:<8} ' \ + '{oo_id:<15} {oo_public_ip:<18} {oo_private_ip:<18}'.format(**server_info) + def scp(self): '''scp files to or from a specified host @@ -209,12 +206,12 @@ class Oscp(object): if len(results) > 1: print "Multiple results found for %s." % self.host for result in results: - print "{ec2_tag_Name:<35} {ec2_tag_environment:<5} {ec2_id:<10}".format(**result[1]) + print "{oo_name:<35} {oo_clusterid:<5} {oo_environment:<5} {oo_id:<10}".format(**result[1]) return # early exit, too many results # Assume we have one and only one. hostname, server_info = results[0] - dns = server_info['ec2_public_dns_name'] + dns = server_info['oo_public_ip'] host_str = "%s%s%s" % (self.user, dns, self.path) @@ -55,15 +55,15 @@ class Ossh(object): def parse_cli_args(self): parser = argparse.ArgumentParser(description='OpenShift Online SSH Tool.') parser.add_argument('-e', '--env', action="store", - help="Which environment to search for the host ") + help="Which environment to search for the host ") parser.add_argument('-d', '--debug', default=False, - action="store_true", help="debug mode") + action="store_true", help="debug mode") parser.add_argument('-v', '--verbose', default=False, - action="store_true", help="Verbose?") + action="store_true", help="Verbose?") parser.add_argument('--refresh-cache', default=False, - action="store_true", help="Force a refresh on the host cache.") + action="store_true", help="Force a refresh on the host cache.") parser.add_argument('--list', default=False, - action="store_true", help="list out hosts") + action="store_true", help="list out hosts") parser.add_argument('-c', '--command', action='store', help='Command to run on remote host') parser.add_argument('-l', '--login_name', action='store', @@ -72,6 +72,8 @@ class Ossh(object): parser.add_argument('-o', '--ssh_opts', action='store', help='options to pass to SSH.\n \ "-oForwardX11=yes,TCPKeepAlive=yes"') + parser.add_argument('-A', default=False, action="store_true", + help='Forward authentication agent') parser.add_argument('host', nargs='?', default='') self.args = parser.parse_args() @@ -127,7 +129,7 @@ class Ossh(object): # attempt to select the correct environment if specified if self.env: - results = filter(lambda result: result[1]['ec2_tag_environment'] == self.env, results) + results = filter(lambda result: result[1]['oo_environment'] == self.env, results) if results: return results @@ -153,10 +155,8 @@ class Ossh(object): print '{0:<35} {1}'.format(key, server_info[key]) else: for host_id, server_info in results[:limit]: - name = server_info['ec2_tag_Name'] - ec2_id = server_info['ec2_id'] - ip = server_info['ec2_ip_address'] - print '{ec2_tag_Name:<35} {ec2_tag_environment:<8} {ec2_id:<15} {ec2_ip_address:<18} {ec2_private_ip_address}'.format(**server_info) + print '{oo_name:<35} {oo_clusterid:<10} {oo_environment:<8} ' \ + '{oo_id:<15} {oo_public_ip:<18} {oo_private_ip:<18}'.format(**server_info) if limit: print @@ -166,10 +166,8 @@ class Ossh(object): else: for env, host_ids in self.host_inventory.items(): for host_id, server_info in host_ids.items(): - name = server_info['ec2_tag_Name'] - ec2_id = server_info['ec2_id'] - ip = server_info['ec2_ip_address'] - print '{ec2_tag_Name:<35} {ec2_tag_environment:<8} {ec2_id:<15} {ec2_ip_address:<18} {ec2_private_ip_address}'.format(**server_info) + print '{oo_name:<35} {oo_clusterid:<10} {oo_environment:<8} ' \ + '{oo_id:<15} {oo_public_ip:<18} {oo_private_ip:<18}'.format(**server_info) def ssh(self): '''SSH to a specified host @@ -181,6 +179,9 @@ class Ossh(object): if self.user: ssh_args.append('-l%s' % self.user) + if self.args.A: + ssh_args.append('-A') + if self.args.verbose: ssh_args.append('-vvv') @@ -195,12 +196,12 @@ class Ossh(object): if len(results) > 1: print "Multiple results found for %s." % self.host for result in results: - print "{ec2_tag_Name:<35} {ec2_tag_environment:<5} {ec2_id:<10}".format(**result[1]) + print "{oo_name:<35} {oo_clusterid:<5} {oo_environment:<5} {oo_id:<10}".format(**result[1]) return # early exit, too many results # Assume we have one and only one. - hostname, server_info = results[0] - dns = server_info['ec2_public_dns_name'] + _, server_info = results[0] + dns = server_info['oo_public_ip'] ssh_args.append(dns) diff --git a/bin/ossh_bash_completion b/bin/ossh_bash_completion index 5072161f0..77b770a43 100755 --- a/bin/ossh_bash_completion +++ b/bin/ossh_bash_completion @@ -1,12 +1,12 @@ __ossh_known_hosts(){ if python -c 'import openshift_ansible' &>/dev/null; then - /usr/bin/python -c 'from openshift_ansible import multi_ec2; m=multi_ec2.MultiEc2(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])' + /usr/bin/python -c 'from openshift_ansible import multi_inventory; m=multi_inventory.MultiInventory(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["oo_name"],host["oo_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("oo_name", "oo_environment"))])' - elif [[ -f /dev/shm/.ansible/tmp/multi_ec2_inventory.cache ]]; then - /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_ec2_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])' + elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then + /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["oo_name"],host["oo_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("oo_name", "oo_environment"))])' - elif [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then - /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_ec2_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])' + elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then + /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["oo_name"],host["oo_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("oo_name", "oo_environment"))])' fi } @@ -26,13 +26,13 @@ complete -F _ossh ossh oscp __opssh_known_hosts(){ if python -c 'import openshift_ansible' &>/dev/null; then - /usr/bin/python -c 'from openshift_ansible.multi_ec2 import MultiEc2; m=MultiEc2(); m.run(); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in m.result["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])' + /usr/bin/python -c 'from openshift_ansible.multi_inventory import MultiInventory; m=MultiInventory(); m.run(); print "\n".join(set(["%s" % (host["oo_hosttype"]) for dns, host in m.result["_meta"]["hostvars"].items() if "oo_hosttype" in host]))' - elif [[ -f /dev/shm/.ansible/tmp/multi_ec2_inventory.cache ]]; then - /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_ec2_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])' + elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then + /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(set(["%s" % (host["oo_hosttype"]) for dns, host in z["_meta"]["hostvars"].items() if "oo_hosttype" in host]))' - elif [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then - /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("/dev/shm/.ansible/tmp/multi_ec2_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])' + elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then + /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("/dev/shm/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(set(["%s" % (host["oo_hosttype"]) for dns, host in z["_meta"]["hostvars"].items() if "oo_hosttype" in host]))' fi } diff --git a/bin/ossh_zsh_completion b/bin/ossh_zsh_completion index 44500c618..170ca889b 100644 --- a/bin/ossh_zsh_completion +++ b/bin/ossh_zsh_completion @@ -2,13 +2,13 @@ _ossh_known_hosts(){ if python -c 'import openshift_ansible' &>/dev/null; then - print $(/usr/bin/python -c 'from openshift_ansible import multi_ec2; m=multi_ec2.MultiEc2(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])') + print $(/usr/bin/python -c 'from openshift_ansible import multi_inventory; m=multi_inventory.MultiInventory(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["oo_name"],host["oo_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("oo_name", "oo_environment"))])') - elif [[ -f /dev/shm/.ansible/tmp/multi_ec2_inventory.cache ]]; then - print $(/usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_ec2_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])') + elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then + print $(/usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["oo_name"],host["oo_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("oo_name", "oo_environment"))])') - elif [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then - print $(/usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_ec2_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])') + elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then + print $(/usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["oo_name"],host["oo_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("oo_name", "oo_environment"))])') fi diff --git a/bin/zsh_functions/_ossh b/bin/zsh_functions/_ossh index 7c6cb7b0b..65979c58a 100644 --- a/bin/zsh_functions/_ossh +++ b/bin/zsh_functions/_ossh @@ -1,8 +1,8 @@ #compdef ossh oscp _ossh_known_hosts(){ - if [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then - print $(/usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_ec2_inventory.cache")).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items()])') + if [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then + print $(/usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_inventory.cache")).read()); print "\n".join(["%s.%s" % (host["oo_name"],host["oo_environment"]) for dns, host in z["_meta"]["hostvars"].items()])') fi } |