From 236b30d9b5816c18325c698081bb102afaae8d17 Mon Sep 17 00:00:00 2001
From: Kenjiro Nakayama <nakayamakenjiro@gmail.com>
Date: Mon, 28 Sep 2015 08:54:40 +0900
Subject: Remove images options in oadm command

---
 inventory/byo/hosts.example | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

(limited to 'inventory')

diff --git a/inventory/byo/hosts.example b/inventory/byo/hosts.example
index df1bae49f..c2c5090f9 100644
--- a/inventory/byo/hosts.example
+++ b/inventory/byo/hosts.example
@@ -24,7 +24,7 @@ deployment_type=atomic-enterprise
 #use_cluster_metrics=true
 
 # Pre-release registry URL
-#oreg_url=rcm-img-docker01.build.eng.bos.redhat.com:5001/openshift3/ose-${component}:${version}
+#oreg_url=example.com/openshift3/ose-${component}:${version}
 
 # Pre-release Dev puddle repo
 #openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}]
-- 
cgit v1.2.3


From 79297c1c1f98e67b531c5235798bdd508fd60624 Mon Sep 17 00:00:00 2001
From: Kenny Woodson <kwoodson@redhat.com>
Date: Thu, 29 Oct 2015 16:57:34 -0400
Subject: Pulling latest gce.py module from ansible

---
 inventory/gce/hosts/gce.py | 32 +++++++++++++++++++++-----------
 1 file changed, 21 insertions(+), 11 deletions(-)

(limited to 'inventory')

diff --git a/inventory/gce/hosts/gce.py b/inventory/gce/hosts/gce.py
index 6ed12e011..99746cdbf 100755
--- a/inventory/gce/hosts/gce.py
+++ b/inventory/gce/hosts/gce.py
@@ -66,12 +66,22 @@ Examples:
   $ ansible -i gce.py us-central1-a -m shell -a "/bin/uname -a"
 
   Use the GCE inventory script to print out instance specific information
-  $ plugins/inventory/gce.py --host my_instance
+  $ contrib/inventory/gce.py --host my_instance
 
 Author: Eric Johnson <erjohnso@google.com>
 Version: 0.0.1
 '''
 
+__requires__ = ['pycrypto>=2.6']
+try:
+    import pkg_resources
+except ImportError:
+    # Use pkg_resources to find the correct versions of libraries and set
+    # sys.path appropriately when there are multiversion installs.  We don't
+    # fail here as there is code that better expresses the errors where the
+    # library is used.
+    pass
+
 USER_AGENT_PRODUCT="Ansible-gce_inventory_plugin"
 USER_AGENT_VERSION="v1"
 
@@ -102,9 +112,9 @@ class GceInventory(object):
 
         # Just display data for specific host
         if self.args.host:
-            print self.json_format_dict(self.node_to_dict(
+            print(self.json_format_dict(self.node_to_dict(
                     self.get_instance(self.args.host)),
-                    pretty=self.args.pretty)
+                    pretty=self.args.pretty))
             sys.exit(0)
 
         # Otherwise, assume user wants all instances grouped
@@ -120,7 +130,6 @@ class GceInventory(object):
             os.path.dirname(os.path.realpath(__file__)), "gce.ini")
         gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path)
 
-
         # Create a ConfigParser.
         # This provides empty defaults to each key, so that environment
         # variable configuration (as opposed to INI configuration) is able
@@ -174,7 +183,6 @@ class GceInventory(object):
         args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1])
         kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project'])
 
-        
         # Retrieve and return the GCE driver.
         gce = get_driver(Provider.GCE)(*args, **kwargs)
         gce.connection.user_agent_append(
@@ -213,8 +221,7 @@ class GceInventory(object):
             'gce_image': inst.image,
             'gce_machine_type': inst.size,
             'gce_private_ip': inst.private_ips[0],
-            # Hosts don't always have a public IP name
-            #'gce_public_ip': inst.public_ips[0],
+            'gce_public_ip': inst.public_ips[0] if len(inst.public_ips) >= 1 else None,
             'gce_name': inst.name,
             'gce_description': inst.extra['description'],
             'gce_status': inst.extra['status'],
@@ -222,15 +229,15 @@ class GceInventory(object):
             'gce_tags': inst.extra['tags'],
             'gce_metadata': md,
             'gce_network': net,
-            # Hosts don't always have a public IP name
-            #'ansible_ssh_host': inst.public_ips[0]
+            # Hosts don't have a public name, so we add an IP
+            'ansible_ssh_host': inst.public_ips[0] if len(inst.public_ips) >= 1 else inst.private_ips[0]
         }
 
     def get_instance(self, instance_name):
         '''Gets details about a specific instance '''
         try:
             return self.driver.ex_get_node(instance_name)
-        except Exception, e:
+        except Exception as e:
             return None
 
     def group_instances(self):
@@ -250,7 +257,10 @@ class GceInventory(object):
 
             tags = node.extra['tags']
             for t in tags:
-                tag = 'tag_%s' % t
+                if t.startswith('group-'):
+                    tag = t[6:]
+                else:
+                    tag = 'tag_%s' % t
                 if groups.has_key(tag): groups[tag].append(name)
                 else: groups[tag] = [name]
 
-- 
cgit v1.2.3


From 8da7c1f5bc68110469bedceb0ddad4fdfc8b7e4d Mon Sep 17 00:00:00 2001
From: Andrew Butcher <abutcher@redhat.com>
Date: Wed, 28 Oct 2015 10:39:41 -0400
Subject: Add custom certificates to serving info in master configuration.

---
 inventory/byo/hosts.example | 3 +++
 1 file changed, 3 insertions(+)

(limited to 'inventory')

diff --git a/inventory/byo/hosts.example b/inventory/byo/hosts.example
index ad19fe116..c6733567a 100644
--- a/inventory/byo/hosts.example
+++ b/inventory/byo/hosts.example
@@ -99,6 +99,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
 # set RPM version for debugging purposes
 #openshift_pkg_version=-3.0.0.0
 
+# Configure custom master certificates
+#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key"}]
+
 # host group for masters
 [masters]
 ose3-master[1:3]-ansible.test.example.com
-- 
cgit v1.2.3


From 3a8b4f1315e28f35e16ace77560f040f08588722 Mon Sep 17 00:00:00 2001
From: Andrew Butcher <abutcher@redhat.com>
Date: Tue, 3 Nov 2015 11:26:33 -0500
Subject: Filter internal hostnames from the list of parsed names.

---
 inventory/byo/hosts.example | 2 ++
 1 file changed, 2 insertions(+)

(limited to 'inventory')

diff --git a/inventory/byo/hosts.example b/inventory/byo/hosts.example
index c6733567a..f60918e6d 100644
--- a/inventory/byo/hosts.example
+++ b/inventory/byo/hosts.example
@@ -101,6 +101,8 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
 
 # Configure custom master certificates
 #openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key"}]
+# Detected names may be overridden by specifying the "names" key
+#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "names": ["public-master-host.com"]}]
 
 # host group for masters
 [masters]
-- 
cgit v1.2.3


From 4b85e4fae04a71cf2cea17018b9240b369f145d5 Mon Sep 17 00:00:00 2001
From: Kenny Woodson <kwoodson@redhat.com>
Date: Mon, 2 Nov 2015 16:03:33 -0500
Subject: Support for gce

---
 inventory/multi_ec2.py                 | 375 -----------------------------
 inventory/multi_ec2.yaml.example       |  32 ---
 inventory/multi_inventory.py           | 415 +++++++++++++++++++++++++++++++++
 inventory/multi_inventory.yaml.example |  51 ++++
 4 files changed, 466 insertions(+), 407 deletions(-)
 delete mode 100755 inventory/multi_ec2.py
 delete mode 100644 inventory/multi_ec2.yaml.example
 create mode 100755 inventory/multi_inventory.py
 create mode 100644 inventory/multi_inventory.yaml.example

(limited to 'inventory')

diff --git a/inventory/multi_ec2.py b/inventory/multi_ec2.py
deleted file mode 100755
index 98dde3f3c..000000000
--- a/inventory/multi_ec2.py
+++ /dev/null
@@ -1,375 +0,0 @@
-#!/usr/bin/env python2
-'''
-    Fetch and combine multiple ec2 account settings into a single
-    json hash.
-'''
-# vim: expandtab:tabstop=4:shiftwidth=4
-
-from time import time
-import argparse
-import yaml
-import os
-import subprocess
-import json
-import errno
-import fcntl
-import tempfile
-import copy
-
-CONFIG_FILE_NAME = 'multi_ec2.yaml'
-DEFAULT_CACHE_PATH = os.path.expanduser('~/.ansible/tmp/multi_ec2_inventory.cache')
-
-class MultiEc2(object):
-    '''
-       MultiEc2 class:
-            Opens a yaml config file and reads aws credentials.
-            Stores a json hash of resources in result.
-    '''
-
-    def __init__(self, args=None):
-        # Allow args to be passed when called as a library
-        if not args:
-            self.args = {}
-        else:
-            self.args = args
-
-        self.cache_path = DEFAULT_CACHE_PATH
-        self.config = None
-        self.all_ec2_results = {}
-        self.result = {}
-        self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
-
-        same_dir_config_file = os.path.join(self.file_path, CONFIG_FILE_NAME)
-        etc_dir_config_file = os.path.join(os.path.sep, 'etc', 'ansible', CONFIG_FILE_NAME)
-
-        # Prefer a file in the same directory, fall back to a file in etc
-        if os.path.isfile(same_dir_config_file):
-            self.config_file = same_dir_config_file
-        elif os.path.isfile(etc_dir_config_file):
-            self.config_file = etc_dir_config_file
-        else:
-            self.config_file = None # expect env vars
-
-
-    def run(self):
-        '''This method checks to see if the local
-           cache is valid for the inventory.
-
-           if the cache is valid; return cache
-           else the credentials are loaded from multi_ec2.yaml or from the env
-           and we attempt to get the inventory from the provider specified.
-        '''
-        # load yaml
-        if self.config_file and os.path.isfile(self.config_file):
-            self.config = self.load_yaml_config()
-        elif os.environ.has_key("AWS_ACCESS_KEY_ID") and \
-             os.environ.has_key("AWS_SECRET_ACCESS_KEY"):
-            # Build a default config
-            self.config = {}
-            self.config['accounts'] = [
-                {
-                    'name': 'default',
-                    'cache_location': DEFAULT_CACHE_PATH,
-                    'provider': 'aws/hosts/ec2.py',
-                    'env_vars': {
-                        'AWS_ACCESS_KEY_ID':     os.environ["AWS_ACCESS_KEY_ID"],
-                        'AWS_SECRET_ACCESS_KEY': os.environ["AWS_SECRET_ACCESS_KEY"],
-                    }
-                },
-            ]
-
-            self.config['cache_max_age'] = 300
-        else:
-            raise RuntimeError("Could not find valid ec2 credentials in the environment.")
-
-        if self.config.has_key('cache_location'):
-            self.cache_path = self.config['cache_location']
-
-        if self.args.get('refresh_cache', None):
-            self.get_inventory()
-            self.write_to_cache()
-        # if its a host query, fetch and do not cache
-        elif self.args.get('host', None):
-            self.get_inventory()
-        elif not self.is_cache_valid():
-            # go fetch the inventories and cache them if cache is expired
-            self.get_inventory()
-            self.write_to_cache()
-        else:
-            # get data from disk
-            self.get_inventory_from_cache()
-
-    def load_yaml_config(self, conf_file=None):
-        """Load a yaml config file with credentials to query the
-        respective cloud for inventory.
-        """
-        config = None
-
-        if not conf_file:
-            conf_file = self.config_file
-
-        with open(conf_file) as conf:
-            config = yaml.safe_load(conf)
-
-        return config
-
-    def get_provider_tags(self, provider, env=None):
-        """Call <provider> and query all of the tags that are usuable
-        by ansible.  If environment is empty use the default env.
-        """
-        if not env:
-            env = os.environ
-
-        # Allow relatively path'd providers in config file
-        if os.path.isfile(os.path.join(self.file_path, provider)):
-            provider = os.path.join(self.file_path, provider)
-
-        # check to see if provider exists
-        if not os.path.isfile(provider) or not os.access(provider, os.X_OK):
-            raise RuntimeError("Problem with the provider.  Please check path " \
-                        "and that it is executable. (%s)" % provider)
-
-        cmds = [provider]
-        if self.args.get('host', None):
-            cmds.append("--host")
-            cmds.append(self.args.get('host', None))
-        else:
-            cmds.append('--list')
-
-        cmds.append('--refresh-cache')
-
-        return subprocess.Popen(cmds, stderr=subprocess.PIPE, \
-                                stdout=subprocess.PIPE, env=env)
-
-    @staticmethod
-    def generate_config(config_data):
-        """Generate the ec2.ini file in as a secure temp file.
-           Once generated, pass it to the ec2.py as an environment variable.
-        """
-        fildes, tmp_file_path = tempfile.mkstemp(prefix='multi_ec2.ini.')
-        for section, values in config_data.items():
-            os.write(fildes, "[%s]\n" % section)
-            for option, value  in values.items():
-                os.write(fildes, "%s = %s\n" % (option, value))
-        os.close(fildes)
-        return tmp_file_path
-
-    def run_provider(self):
-        '''Setup the provider call with proper variables
-           and call self.get_provider_tags.
-        '''
-        try:
-            all_results = []
-            tmp_file_paths = []
-            processes = {}
-            for account in self.config['accounts']:
-                env = account['env_vars']
-                if account.has_key('provider_config'):
-                    tmp_file_paths.append(MultiEc2.generate_config(account['provider_config']))
-                    env['EC2_INI_PATH'] = tmp_file_paths[-1]
-                name = account['name']
-                provider = account['provider']
-                processes[name] = self.get_provider_tags(provider, env)
-
-            # for each process collect stdout when its available
-            for name, process in processes.items():
-                out, err = process.communicate()
-                all_results.append({
-                    "name": name,
-                    "out": out.strip(),
-                    "err": err.strip(),
-                    "code": process.returncode
-                })
-
-        finally:
-            # Clean up the mkstemp file
-            for tmp_file in tmp_file_paths:
-                os.unlink(tmp_file)
-
-        return all_results
-
-    def get_inventory(self):
-        """Create the subprocess to fetch tags from a provider.
-        Host query:
-        Query to return a specific host.  If > 1 queries have
-        results then fail.
-
-        List query:
-        Query all of the different accounts for their tags.  Once completed
-        store all of their results into one merged updated hash.
-        """
-        provider_results = self.run_provider()
-
-        # process --host results
-        # For any 0 result, return it
-        if self.args.get('host', None):
-            count = 0
-            for results in provider_results:
-                if results['code'] == 0 and results['err'] == '' and results['out'] != '{}':
-                    self.result = json.loads(results['out'])
-                    count += 1
-                if count > 1:
-                    raise RuntimeError("Found > 1 results for --host %s. \
-                                       This is an invalid state." % self.args.get('host', None))
-        # process --list results
-        else:
-            # For any non-zero, raise an error on it
-            for result in provider_results:
-                if result['code'] != 0:
-                    err_msg = ['\nProblem fetching account: {name}',
-                               'Error Code: {code}',
-                               'StdErr: {err}',
-                               'Stdout: {out}',
-                              ]
-                    raise RuntimeError('\n'.join(err_msg).format(**result))
-                else:
-                    self.all_ec2_results[result['name']] = json.loads(result['out'])
-
-            # Check if user wants extra vars in yaml by
-            # having hostvars and all_group defined
-            for acc_config in self.config['accounts']:
-                self.apply_account_config(acc_config)
-
-            # Build results by merging all dictionaries
-            values = self.all_ec2_results.values()
-            values.insert(0, self.result)
-            for result in  values:
-                MultiEc2.merge_destructively(self.result, result)
-
-    def apply_account_config(self, acc_config):
-        ''' Apply account config settings
-        '''
-        results = self.all_ec2_results[acc_config['name']]
-
-        # Update each hostvar with the newly desired key: value from extra_*
-        for _extra in ['extra_groups', 'extra_vars']:
-            for new_var, value in acc_config.get(_extra, {}).items():
-                # Verify the account results look sane
-                # by checking for these keys ('_meta' and 'hostvars' exist)
-                if results.has_key('_meta') and results['_meta'].has_key('hostvars'):
-                    for data in results['_meta']['hostvars'].values():
-                        data[str(new_var)] = str(value)
-
-                # Add this group
-                if _extra == 'extra_groups' and results.has_key(acc_config['all_group']):
-                    results["%s_%s" % (new_var, value)] = \
-                     copy.copy(results[acc_config['all_group']])
-
-        # Clone groups goes here
-        for to_name, from_name in acc_config.get('clone_groups', {}).items():
-            if results.has_key(from_name):
-                results[to_name] = copy.copy(results[from_name])
-
-        # Clone vars goes here
-        for to_name, from_name in acc_config.get('clone_vars', {}).items():
-            # Verify the account results look sane
-            # by checking for these keys ('_meta' and 'hostvars' exist)
-            if results.has_key('_meta') and results['_meta'].has_key('hostvars'):
-                for data in results['_meta']['hostvars'].values():
-                    data[str(to_name)] = data.get(str(from_name), 'nil')
-
-        # store the results back into all_ec2_results
-        self.all_ec2_results[acc_config['name']] = results
-
-    @staticmethod
-    def merge_destructively(input_a, input_b):
-        "merges b into input_a"
-        for key in input_b:
-            if key in input_a:
-                if isinstance(input_a[key], dict) and isinstance(input_b[key], dict):
-                    MultiEc2.merge_destructively(input_a[key], input_b[key])
-                elif input_a[key] == input_b[key]:
-                    pass # same leaf value
-                # both lists so add each element in b to a if it does ! exist
-                elif isinstance(input_a[key], list) and isinstance(input_b[key], list):
-                    for result in input_b[key]:
-                        if result not in input_a[key]:
-                            input_a[key].append(result)
-                # a is a list and not b
-                elif isinstance(input_a[key], list):
-                    if input_b[key] not in input_a[key]:
-                        input_a[key].append(input_b[key])
-                elif isinstance(input_b[key], list):
-                    input_a[key] = [input_a[key]] + [k for k in input_b[key] if k != input_a[key]]
-                else:
-                    input_a[key] = [input_a[key], input_b[key]]
-            else:
-                input_a[key] = input_b[key]
-        return input_a
-
-    def is_cache_valid(self):
-        ''' Determines if the cache files have expired, or if it is still valid '''
-
-        if os.path.isfile(self.cache_path):
-            mod_time = os.path.getmtime(self.cache_path)
-            current_time = time()
-            if (mod_time + self.config['cache_max_age']) > current_time:
-                return True
-
-        return False
-
-    def parse_cli_args(self):
-        ''' Command line argument processing '''
-
-        parser = argparse.ArgumentParser(
-            description='Produce an Ansible Inventory file based on a provider')
-        parser.add_argument('--refresh-cache', action='store_true', default=False,
-                            help='Fetch cached only instances (default: False)')
-        parser.add_argument('--list', action='store_true', default=True,
-                            help='List instances (default: True)')
-        parser.add_argument('--host', action='store', default=False,
-                            help='Get all the variables about a specific instance')
-        self.args = parser.parse_args().__dict__
-
-    def write_to_cache(self):
-        ''' Writes data in JSON format to a file '''
-
-        # if it does not exist, try and create it.
-        if not os.path.isfile(self.cache_path):
-            path = os.path.dirname(self.cache_path)
-            try:
-                os.makedirs(path)
-            except OSError as exc:
-                if exc.errno != errno.EEXIST or not os.path.isdir(path):
-                    raise
-
-        json_data = MultiEc2.json_format_dict(self.result, True)
-        with open(self.cache_path, 'w') as cache:
-            try:
-                fcntl.flock(cache, fcntl.LOCK_EX)
-                cache.write(json_data)
-            finally:
-                fcntl.flock(cache, fcntl.LOCK_UN)
-
-    def get_inventory_from_cache(self):
-        ''' Reads the inventory from the cache file and returns it as a JSON
-        object '''
-
-        if not os.path.isfile(self.cache_path):
-            return None
-
-        with open(self.cache_path, 'r') as cache:
-            self.result = json.loads(cache.read())
-
-        return True
-
-    @classmethod
-    def json_format_dict(cls, data, pretty=False):
-        ''' Converts a dict to a JSON object and dumps it as a formatted
-        string '''
-
-        if pretty:
-            return json.dumps(data, sort_keys=True, indent=2)
-        else:
-            return json.dumps(data)
-
-    def result_str(self):
-        '''Return cache string stored in self.result'''
-        return self.json_format_dict(self.result, True)
-
-
-if __name__ == "__main__":
-    MEC2 = MultiEc2()
-    MEC2.parse_cli_args()
-    MEC2.run()
-    print MEC2.result_str()
diff --git a/inventory/multi_ec2.yaml.example b/inventory/multi_ec2.yaml.example
deleted file mode 100644
index bbd81ad20..000000000
--- a/inventory/multi_ec2.yaml.example
+++ /dev/null
@@ -1,32 +0,0 @@
-# multi ec2 inventory configs
-#
-cache_location: ~/.ansible/tmp/multi_ec2_inventory.cache
-
-accounts:
-  - name: aws1
-    provider: aws/hosts/ec2.py
-    provider_config:
-      ec2:
-        regions: all
-        regions_exclude:  us-gov-west-1,cn-north-1
-        destination_variable: public_dns_name
-        route53: False
-        cache_path: ~/.ansible/tmp
-        cache_max_age: 300
-        vpc_destination_variable: ip_address
-    env_vars:
-      AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX
-      AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-    all_group: ec2
-    extra_vars:
-      cloud: aws
-      account: aws1
-
-- name: aws2
-    provider: aws/hosts/ec2.py
-    env_vars:
-      AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX
-      AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-      EC2_INI_PATH: /etc/ansible/ec2.ini
-
-cache_max_age: 60
diff --git a/inventory/multi_inventory.py b/inventory/multi_inventory.py
new file mode 100755
index 000000000..354a8c10c
--- /dev/null
+++ b/inventory/multi_inventory.py
@@ -0,0 +1,415 @@
+#!/usr/bin/env python2
+'''
+    Fetch and combine multiple inventory account settings into a single
+    json hash.
+'''
+# vim: expandtab:tabstop=4:shiftwidth=4
+
+from time import time
+import argparse
+import yaml
+import os
+import subprocess
+import json
+import errno
+import fcntl
+import tempfile
+import copy
+from string import Template
+import shutil
+
+CONFIG_FILE_NAME = 'multi_inventory.yaml'
+DEFAULT_CACHE_PATH = os.path.expanduser('~/.ansible/tmp/multi_inventory.cache')
+
+class MultiInventoryException(Exception):
+    '''Exceptions for MultiInventory class'''
+    pass
+
+class MultiInventory(object):
+    '''
+       MultiInventory class:
+            Opens a yaml config file and reads aws credentials.
+            Stores a json hash of resources in result.
+    '''
+
+    def __init__(self, args=None):
+        # Allow args to be passed when called as a library
+        if not args:
+            self.args = {}
+        else:
+            self.args = args
+
+        self.cache_path = DEFAULT_CACHE_PATH
+        self.config = None
+        self.all_inventory_results = {}
+        self.result = {}
+        self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
+
+        same_dir_config_file = os.path.join(self.file_path, CONFIG_FILE_NAME)
+        etc_dir_config_file = os.path.join(os.path.sep, 'etc', 'ansible', CONFIG_FILE_NAME)
+
+        # Prefer a file in the same directory, fall back to a file in etc
+        if os.path.isfile(same_dir_config_file):
+            self.config_file = same_dir_config_file
+        elif os.path.isfile(etc_dir_config_file):
+            self.config_file = etc_dir_config_file
+        else:
+            self.config_file = None # expect env vars
+
+
+    def run(self):
+        '''This method checks to see if the local
+           cache is valid for the inventory.
+
+           if the cache is valid; return cache
+           else the credentials are loaded from multi_inventory.yaml or from the env
+           and we attempt to get the inventory from the provider specified.
+        '''
+        # load yaml
+        if self.config_file and os.path.isfile(self.config_file):
+            self.config = self.load_yaml_config()
+        elif os.environ.has_key("AWS_ACCESS_KEY_ID") and \
+             os.environ.has_key("AWS_SECRET_ACCESS_KEY"):
+            # Build a default config
+            self.config = {}
+            self.config['accounts'] = [
+                {
+                    'name': 'default',
+                    'cache_location': DEFAULT_CACHE_PATH,
+                    'provider': 'aws/hosts/ec2.py',
+                    'env_vars': {
+                        'AWS_ACCESS_KEY_ID':     os.environ["AWS_ACCESS_KEY_ID"],
+                        'AWS_SECRET_ACCESS_KEY': os.environ["AWS_SECRET_ACCESS_KEY"],
+                    }
+                },
+            ]
+
+            self.config['cache_max_age'] = 300
+        else:
+            raise RuntimeError("Could not find valid ec2 credentials in the environment.")
+
+        if self.config.has_key('cache_location'):
+            self.cache_path = self.config['cache_location']
+
+        if self.args.get('refresh_cache', None):
+            self.get_inventory()
+            self.write_to_cache()
+        # if its a host query, fetch and do not cache
+        elif self.args.get('host', None):
+            self.get_inventory()
+        elif not self.is_cache_valid():
+            # go fetch the inventories and cache them if cache is expired
+            self.get_inventory()
+            self.write_to_cache()
+        else:
+            # get data from disk
+            self.get_inventory_from_cache()
+
+    def load_yaml_config(self, conf_file=None):
+        """Load a yaml config file with credentials to query the
+        respective cloud for inventory.
+        """
+        config = None
+
+        if not conf_file:
+            conf_file = self.config_file
+
+        with open(conf_file) as conf:
+            config = yaml.safe_load(conf)
+
+        # Provide a check for unique account names
+        if len(set([acc['name'] for acc in config['accounts']])) != len(config['accounts']):
+            raise MultiInventoryException('Duplicate account names in config file')
+
+        return config
+
+    def get_provider_tags(self, provider, env=None):
+        """Call <provider> and query all of the tags that are usuable
+        by ansible.  If environment is empty use the default env.
+        """
+        if not env:
+            env = os.environ
+
+        # Allow relatively path'd providers in config file
+        if os.path.isfile(os.path.join(self.file_path, provider)):
+            provider = os.path.join(self.file_path, provider)
+
+        # check to see if provider exists
+        if not os.path.isfile(provider) or not os.access(provider, os.X_OK):
+            raise RuntimeError("Problem with the provider.  Please check path " \
+                        "and that it is executable. (%s)" % provider)
+
+        cmds = [provider]
+        if self.args.get('host', None):
+            cmds.append("--host")
+            cmds.append(self.args.get('host', None))
+        else:
+            cmds.append('--list')
+
+        if 'aws' in provider.lower():
+            cmds.append('--refresh-cache')
+
+        return subprocess.Popen(cmds, stderr=subprocess.PIPE, \
+                                stdout=subprocess.PIPE, env=env)
+
+    @staticmethod
+    def generate_config(provider_files):
+        """Generate the provider_files in a temporary directory.
+        """
+        prefix = 'multi_inventory.'
+        tmp_dir_path = tempfile.mkdtemp(prefix=prefix)
+        for provider_file in provider_files:
+            filedes = open(os.path.join(tmp_dir_path, provider_file['name']), 'w+')
+            content = Template(provider_file['contents']).substitute(tmpdir=tmp_dir_path)
+            filedes.write(content)
+            filedes.close()
+
+        return tmp_dir_path
+
+    def run_provider(self):
+        '''Setup the provider call with proper variables
+           and call self.get_provider_tags.
+        '''
+        try:
+            all_results = []
+            tmp_dir_paths = []
+            processes = {}
+            for account in self.config['accounts']:
+                tmp_dir = None
+                if account.has_key('provider_files'):
+                    tmp_dir = MultiInventory.generate_config(account['provider_files'])
+                    tmp_dir_paths.append(tmp_dir)
+
+                # Update env vars after creating provider_config_files
+                # so that we can grab the tmp_dir if it exists
+                env = account.get('env_vars', {})
+                if env and tmp_dir:
+                    for key, value in env.items():
+                        env[key] = Template(value).substitute(tmpdir=tmp_dir)
+
+                name = account['name']
+                provider = account['provider']
+                processes[name] = self.get_provider_tags(provider, env)
+
+            # for each process collect stdout when its available
+            for name, process in processes.items():
+                out, err = process.communicate()
+                all_results.append({
+                    "name": name,
+                    "out": out.strip(),
+                    "err": err.strip(),
+                    "code": process.returncode
+                })
+
+        finally:
+            # Clean up the mkdtemp dirs
+            for tmp_dir in tmp_dir_paths:
+                shutil.rmtree(tmp_dir)
+
+        return all_results
+
+    def get_inventory(self):
+        """Create the subprocess to fetch tags from a provider.
+        Host query:
+        Query to return a specific host.  If > 1 queries have
+        results then fail.
+
+        List query:
+        Query all of the different accounts for their tags.  Once completed
+        store all of their results into one merged updated hash.
+        """
+        provider_results = self.run_provider()
+
+        # process --host results
+        # For any 0 result, return it
+        if self.args.get('host', None):
+            count = 0
+            for results in provider_results:
+                if results['code'] == 0 and results['err'] == '' and results['out'] != '{}':
+                    self.result = json.loads(results['out'])
+                    count += 1
+                if count > 1:
+                    raise RuntimeError("Found > 1 results for --host %s. \
+                                       This is an invalid state." % self.args.get('host', None))
+        # process --list results
+        else:
+            # For any non-zero, raise an error on it
+            for result in provider_results:
+                if result['code'] != 0:
+                    err_msg = ['\nProblem fetching account: {name}',
+                               'Error Code: {code}',
+                               'StdErr: {err}',
+                               'Stdout: {out}',
+                              ]
+                    raise RuntimeError('\n'.join(err_msg).format(**result))
+                else:
+                    self.all_inventory_results[result['name']] = json.loads(result['out'])
+
+            # Check if user wants extra vars in yaml by
+            # having hostvars and all_group defined
+            for acc_config in self.config['accounts']:
+                self.apply_account_config(acc_config)
+
+            # Build results by merging all dictionaries
+            values = self.all_inventory_results.values()
+            values.insert(0, self.result)
+            for result in  values:
+                MultiInventory.merge_destructively(self.result, result)
+
+    def add_entry(self, data, keys, item):
+        ''' Add an item to a dictionary with key notation a.b.c
+            d = {'a': {'b': 'c'}}}
+            keys = a.b
+            item = c
+        '''
+        if "." in keys:
+            key, rest = keys.split(".", 1)
+            if key not in data:
+                data[key] = {}
+            self.add_entry(data[key], rest, item)
+        else:
+            data[keys] = item
+
+    def get_entry(self, data, keys):
+        ''' Get an item from a dictionary with key notation a.b.c
+            d = {'a': {'b': 'c'}}}
+            keys = a.b
+            return c
+        '''
+        if keys and "." in keys:
+            key, rest = keys.split(".", 1)
+            return self.get_entry(data[key], rest)
+        else:
+            return data.get(keys, None)
+
+    def apply_account_config(self, acc_config):
+        ''' Apply account config settings
+        '''
+        results = self.all_inventory_results[acc_config['name']]
+        results['all_hosts'] = results['_meta']['hostvars'].keys()
+
+        # Update each hostvar with the newly desired key: value from extra_*
+        for _extra in ['extra_vars', 'extra_groups']:
+            for new_var, value in acc_config.get(_extra, {}).items():
+                for data in results['_meta']['hostvars'].values():
+                    self.add_entry(data, new_var, value)
+
+                # Add this group
+                if _extra == 'extra_groups':
+                    results["%s_%s" % (new_var, value)] = copy.copy(results['all_hosts'])
+
+        # Clone groups goes here
+        for to_name, from_name in acc_config.get('clone_groups', {}).items():
+            if results.has_key(from_name):
+                results[to_name] = copy.copy(results[from_name])
+
+        # Clone vars goes here
+        for to_name, from_name in acc_config.get('clone_vars', {}).items():
+            for data in results['_meta']['hostvars'].values():
+                self.add_entry(data, to_name, self.get_entry(data, from_name))
+
+        # store the results back into all_inventory_results
+        self.all_inventory_results[acc_config['name']] = results
+
+    @staticmethod
+    def merge_destructively(input_a, input_b):
+        "merges b into input_a"
+        for key in input_b:
+            if key in input_a:
+                if isinstance(input_a[key], dict) and isinstance(input_b[key], dict):
+                    MultiInventory.merge_destructively(input_a[key], input_b[key])
+                elif input_a[key] == input_b[key]:
+                    pass # same leaf value
+                # both lists so add each element in b to a if it does ! exist
+                elif isinstance(input_a[key], list) and isinstance(input_b[key], list):
+                    for result in input_b[key]:
+                        if result not in input_a[key]:
+                            input_a[key].append(result)
+                # a is a list and not b
+                elif isinstance(input_a[key], list):
+                    if input_b[key] not in input_a[key]:
+                        input_a[key].append(input_b[key])
+                elif isinstance(input_b[key], list):
+                    input_a[key] = [input_a[key]] + [k for k in input_b[key] if k != input_a[key]]
+                else:
+                    input_a[key] = [input_a[key], input_b[key]]
+            else:
+                input_a[key] = input_b[key]
+        return input_a
+
+    def is_cache_valid(self):
+        ''' Determines if the cache files have expired, or if it is still valid '''
+
+        if os.path.isfile(self.cache_path):
+            mod_time = os.path.getmtime(self.cache_path)
+            current_time = time()
+            if (mod_time + self.config['cache_max_age']) > current_time:
+                return True
+
+        return False
+
+    def parse_cli_args(self):
+        ''' Command line argument processing '''
+
+        parser = argparse.ArgumentParser(
+            description='Produce an Ansible Inventory file based on a provider')
+        parser.add_argument('--refresh-cache', action='store_true', default=False,
+                            help='Fetch cached only instances (default: False)')
+        parser.add_argument('--list', action='store_true', default=True,
+                            help='List instances (default: True)')
+        parser.add_argument('--host', action='store', default=False,
+                            help='Get all the variables about a specific instance')
+        self.args = parser.parse_args().__dict__
+
+    def write_to_cache(self):
+        ''' Writes data in JSON format to a file '''
+
+        # if it does not exist, try and create it.
+        if not os.path.isfile(self.cache_path):
+            path = os.path.dirname(self.cache_path)
+            try:
+                os.makedirs(path)
+            except OSError as exc:
+                if exc.errno != errno.EEXIST or not os.path.isdir(path):
+                    raise
+
+        json_data = MultiInventory.json_format_dict(self.result, True)
+        with open(self.cache_path, 'w') as cache:
+            try:
+                fcntl.flock(cache, fcntl.LOCK_EX)
+                cache.write(json_data)
+            finally:
+                fcntl.flock(cache, fcntl.LOCK_UN)
+
+    def get_inventory_from_cache(self):
+        ''' Reads the inventory from the cache file and returns it as a JSON
+        object '''
+
+        if not os.path.isfile(self.cache_path):
+            return None
+
+        with open(self.cache_path, 'r') as cache:
+            self.result = json.loads(cache.read())
+
+        return True
+
+    @classmethod
+    def json_format_dict(cls, data, pretty=False):
+        ''' Converts a dict to a JSON object and dumps it as a formatted
+        string '''
+
+        if pretty:
+            return json.dumps(data, sort_keys=True, indent=2)
+        else:
+            return json.dumps(data)
+
+    def result_str(self):
+        '''Return cache string stored in self.result'''
+        return self.json_format_dict(self.result, True)
+
+
+if __name__ == "__main__":
+    MI2 = MultiInventory()
+    MI2.parse_cli_args()
+    MI2.run()
+    print MI2.result_str()
diff --git a/inventory/multi_inventory.yaml.example b/inventory/multi_inventory.yaml.example
new file mode 100644
index 000000000..0f0788d18
--- /dev/null
+++ b/inventory/multi_inventory.yaml.example
@@ -0,0 +1,51 @@
+# multi ec2 inventory configs
+#
+cache_location: ~/.ansible/tmp/multi_inventory.cache
+
+accounts:
+  - name: aws1
+    provider: aws/ec2.py
+    provider_files:
+    - name: ec2.ini
+      content: |-
+        [ec2]
+        regions = all
+        regions_exclude =  us-gov-west-1,cn-north-1
+        destination_variable = public_dns_name
+        route53 = False
+        cache_path = ~/.ansible/tmp
+        cache_max_age = 300
+        vpc_destination_variable = ip_address
+    env_vars:
+      AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX
+      AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+      EC2_INI_PATH: ${tmpdir}/ec2.ini # we replace ${tmpdir} with the temporary directory that we've created for the provider.
+    extra_vars:
+      cloud: aws
+      account: aws1
+
+-   name: mygce
+    extra_vars:
+      cloud: gce
+      account: gce1
+    env_vars:
+      GCE_INI_PATH: ${tmpdir}/gce.ini # we replace ${tmpdir} with the temporary directory that we've created for the provider.
+    provider: gce/gce.py
+    provider_files:
+    - name: priv_key.pem
+      contents: |-
+        -----BEGIN PRIVATE KEY-----
+        yourprivatekeydatahere
+        -----END PRIVATE KEY-----
+    - name: gce.ini
+      contents: |-
+        [gce]
+        gce_service_account_email_address = <uuid>@developer.gserviceaccount.com
+        gce_service_account_pem_file_path = ${tmpdir}/priv_key.pem # we replace ${tmpdir} with the temporary directory that we've created for the provider.
+        gce_project_id = gce-project
+        zone = us-central1-a
+        network = default
+        gce_machine_type = n1-standard-2
+        gce_machine_image = rhel7
+
+cache_max_age: 600
-- 
cgit v1.2.3