diff options
Diffstat (limited to 'inventory')
-rw-r--r-- | inventory/byo/hosts.example | 86 | ||||
-rwxr-xr-x | inventory/gce/hosts/gce.py | 32 | ||||
-rw-r--r-- | inventory/multi_ec2.yaml.example | 32 | ||||
-rwxr-xr-x | inventory/multi_inventory.py (renamed from inventory/multi_ec2.py) | 154 | ||||
-rw-r--r-- | inventory/multi_inventory.yaml.example | 51 | ||||
-rw-r--r-- | inventory/openshift-ansible-inventory.spec | 108 |
6 files changed, 251 insertions, 212 deletions
diff --git a/inventory/byo/hosts.example b/inventory/byo/hosts.example index df1bae49f..56bbb9612 100644 --- a/inventory/byo/hosts.example +++ b/inventory/byo/hosts.example @@ -5,6 +5,7 @@ masters nodes etcd +lb # Set variables common for all OSEv3 hosts [OSEv3:vars] @@ -24,7 +25,7 @@ deployment_type=atomic-enterprise #use_cluster_metrics=true # Pre-release registry URL -#oreg_url=rcm-img-docker01.build.eng.bos.redhat.com:5001/openshift3/ose-${component}:${version} +#oreg_url=example.com/openshift3/ose-${component}:${version} # Pre-release Dev puddle repo #openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}] @@ -41,24 +42,48 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # Allow all auth #openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}] +# LDAP auth +#openshift_master_identity_providers=[{'name': 'my_ldap_provider', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider', 'attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': '', 'bindPassword': '', 'ca': '', 'insecure': 'false', 'url': 'ldap://ldap.example.com:389/ou=users,dc=example,dc=com?uid'}] + +# Project Configuration +#osm_project_request_message='' +#osm_project_request_template='' +#osm_mcs_allocator_range='s0:/2' +#osm_mcs_labels_per_project=5 +#osm_uid_allocator_range='1000000000-1999999999/10000' + # Configure Fluentd #use_fluentd=true -# master cluster ha variables using pacemaker or RHEL HA +# Enable cockpit +#osm_use_cockpit=true +# +# Set cockpit plugins +#osm_cockpit_plugins=['cockpit-kubernetes'] + +# Native high availbility cluster method with optional load balancer. +# If no lb group is defined installer assumes that a load balancer has +# been preconfigured. For installation the value of +# openshift_master_cluster_hostname must resolve to the load balancer +# or to one or all of the masters defined in the inventory if no load +# balancer is present. +#openshift_master_cluster_method=native +#openshift_master_cluster_hostname=openshift-ansible.test.example.com +#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com + +# Pacemaker high availability cluster method. +# Pacemaker HA environment must be able to self provision the +# configured VIP. For installation openshift_master_cluster_hostname +# must resolve to the configured VIP. +#openshift_master_cluster_method=pacemaker #openshift_master_cluster_password=openshift_cluster #openshift_master_cluster_vip=192.168.133.25 #openshift_master_cluster_public_vip=192.168.133.25 #openshift_master_cluster_hostname=openshift-ansible.test.example.com #openshift_master_cluster_public_hostname=openshift-ansible.test.example.com -# master cluster ha variables when using a different HA solution -# For installation the value of openshift_master_cluster_hostname must resolve -# to the first master defined in the inventory. -# The HA solution must be manually configured after installation and must ensure -# that the master is running on a single master host. -#openshift_master_cluster_hostname=openshift-ansible.test.example.com -#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com -#openshift_master_cluster_defer_ha=True +# Override the default controller lease ttl +#osm_controller_lease_ttl=30 # default subdomain to use for exposed routes #osm_default_subdomain=apps.test.example.com @@ -69,9 +94,47 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # default project node selector #osm_default_node_selector='region=primary' +# default storage plugin dependencies to install, by default the ceph and +# glusterfs plugin dependencies will be installed, if available. +#osn_storage_plugin_deps=['ceph','glusterfs'] + +# default selectors for router and registry services +# openshift_router_selector='region=infra' +# openshift_registry_selector='region=infra' + +# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet') +# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant' + +# Disable the OpenShift SDN plugin +# openshift_use_openshift_sdn=False + # set RPM version for debugging purposes #openshift_pkg_version=-3.0.0.0 +# Configure custom master certificates +#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key"}] +# Detected names may be overridden by specifying the "names" key +#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "names": ["public-master-host.com"]}] + +# Session options +#openshift_master_session_name=ssn +#openshift_master_session_max_seconds=3600 + +# An authentication and encryption secret will be generated if secrets +# are not provided. If provided, openshift_master_session_auth_secrets +# and openshift_master_encryption_secrets must be equal length. +# +# Signing secrets, used to authenticate sessions using +# HMAC. Recommended to use secrets with 32 or 64 bytes. +#openshift_master_session_auth_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO'] +# +# Encrypting secrets, used to encrypt sessions. Must be 16, 24, or 32 +# characters long, to select AES-128, AES-192, or AES-256. +#openshift_master_session_encryption_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO'] + +# configure how often node iptables rules are refreshed +#openshift_node_iptables_sync_period=5s + # host group for masters [masters] ose3-master[1:3]-ansible.test.example.com @@ -79,6 +142,9 @@ ose3-master[1:3]-ansible.test.example.com [etcd] ose3-etcd[1:3]-ansible.test.example.com +[lb] +ose3-lb-ansible.test.example.com + # NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes # However, in order to ensure that your masters are not burdened with running pods you should # make them unschedulable by adding openshift_scheduleable=False any node that's also a master. diff --git a/inventory/gce/hosts/gce.py b/inventory/gce/hosts/gce.py index 6ed12e011..99746cdbf 100755 --- a/inventory/gce/hosts/gce.py +++ b/inventory/gce/hosts/gce.py @@ -66,12 +66,22 @@ Examples: $ ansible -i gce.py us-central1-a -m shell -a "/bin/uname -a" Use the GCE inventory script to print out instance specific information - $ plugins/inventory/gce.py --host my_instance + $ contrib/inventory/gce.py --host my_instance Author: Eric Johnson <erjohnso@google.com> Version: 0.0.1 ''' +__requires__ = ['pycrypto>=2.6'] +try: + import pkg_resources +except ImportError: + # Use pkg_resources to find the correct versions of libraries and set + # sys.path appropriately when there are multiversion installs. We don't + # fail here as there is code that better expresses the errors where the + # library is used. + pass + USER_AGENT_PRODUCT="Ansible-gce_inventory_plugin" USER_AGENT_VERSION="v1" @@ -102,9 +112,9 @@ class GceInventory(object): # Just display data for specific host if self.args.host: - print self.json_format_dict(self.node_to_dict( + print(self.json_format_dict(self.node_to_dict( self.get_instance(self.args.host)), - pretty=self.args.pretty) + pretty=self.args.pretty)) sys.exit(0) # Otherwise, assume user wants all instances grouped @@ -120,7 +130,6 @@ class GceInventory(object): os.path.dirname(os.path.realpath(__file__)), "gce.ini") gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path) - # Create a ConfigParser. # This provides empty defaults to each key, so that environment # variable configuration (as opposed to INI configuration) is able @@ -174,7 +183,6 @@ class GceInventory(object): args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1]) kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project']) - # Retrieve and return the GCE driver. gce = get_driver(Provider.GCE)(*args, **kwargs) gce.connection.user_agent_append( @@ -213,8 +221,7 @@ class GceInventory(object): 'gce_image': inst.image, 'gce_machine_type': inst.size, 'gce_private_ip': inst.private_ips[0], - # Hosts don't always have a public IP name - #'gce_public_ip': inst.public_ips[0], + 'gce_public_ip': inst.public_ips[0] if len(inst.public_ips) >= 1 else None, 'gce_name': inst.name, 'gce_description': inst.extra['description'], 'gce_status': inst.extra['status'], @@ -222,15 +229,15 @@ class GceInventory(object): 'gce_tags': inst.extra['tags'], 'gce_metadata': md, 'gce_network': net, - # Hosts don't always have a public IP name - #'ansible_ssh_host': inst.public_ips[0] + # Hosts don't have a public name, so we add an IP + 'ansible_ssh_host': inst.public_ips[0] if len(inst.public_ips) >= 1 else inst.private_ips[0] } def get_instance(self, instance_name): '''Gets details about a specific instance ''' try: return self.driver.ex_get_node(instance_name) - except Exception, e: + except Exception as e: return None def group_instances(self): @@ -250,7 +257,10 @@ class GceInventory(object): tags = node.extra['tags'] for t in tags: - tag = 'tag_%s' % t + if t.startswith('group-'): + tag = t[6:] + else: + tag = 'tag_%s' % t if groups.has_key(tag): groups[tag].append(name) else: groups[tag] = [name] diff --git a/inventory/multi_ec2.yaml.example b/inventory/multi_ec2.yaml.example deleted file mode 100644 index 99f157b11..000000000 --- a/inventory/multi_ec2.yaml.example +++ /dev/null @@ -1,32 +0,0 @@ -# multi ec2 inventory configs -# -cache_location: ~/.ansible/tmp/multi_ec2_inventory.cache - -accounts: - - name: aws1 - provider: aws/hosts/ec2.py - provider_config: - ec2: - regions: all - regions_exclude: us-gov-west-1,cn-north-1 - destination_variable: public_dns_name - route53: False - cache_path: ~/.ansible/tmp - cache_max_age: 300 - vpc_destination_variable: ip_address - env_vars: - AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX - AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX - all_group: ec2 - hostvars: - cloud: aws - account: aws1 - -- name: aws2 - provider: aws/hosts/ec2.py - env_vars: - AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX - AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX - EC2_INI_PATH: /etc/ansible/ec2.ini - -cache_max_age: 60 diff --git a/inventory/multi_ec2.py b/inventory/multi_inventory.py index 2cbf33473..354a8c10c 100755 --- a/inventory/multi_ec2.py +++ b/inventory/multi_inventory.py @@ -1,6 +1,6 @@ #!/usr/bin/env python2 ''' - Fetch and combine multiple ec2 account settings into a single + Fetch and combine multiple inventory account settings into a single json hash. ''' # vim: expandtab:tabstop=4:shiftwidth=4 @@ -15,13 +15,19 @@ import errno import fcntl import tempfile import copy +from string import Template +import shutil -CONFIG_FILE_NAME = 'multi_ec2.yaml' -DEFAULT_CACHE_PATH = os.path.expanduser('~/.ansible/tmp/multi_ec2_inventory.cache') +CONFIG_FILE_NAME = 'multi_inventory.yaml' +DEFAULT_CACHE_PATH = os.path.expanduser('~/.ansible/tmp/multi_inventory.cache') -class MultiEc2(object): +class MultiInventoryException(Exception): + '''Exceptions for MultiInventory class''' + pass + +class MultiInventory(object): ''' - MultiEc2 class: + MultiInventory class: Opens a yaml config file and reads aws credentials. Stores a json hash of resources in result. ''' @@ -35,7 +41,7 @@ class MultiEc2(object): self.cache_path = DEFAULT_CACHE_PATH self.config = None - self.all_ec2_results = {} + self.all_inventory_results = {} self.result = {} self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__))) @@ -56,7 +62,7 @@ class MultiEc2(object): cache is valid for the inventory. if the cache is valid; return cache - else the credentials are loaded from multi_ec2.yaml or from the env + else the credentials are loaded from multi_inventory.yaml or from the env and we attempt to get the inventory from the provider specified. ''' # load yaml @@ -111,6 +117,10 @@ class MultiEc2(object): with open(conf_file) as conf: config = yaml.safe_load(conf) + # Provide a check for unique account names + if len(set([acc['name'] for acc in config['accounts']])) != len(config['accounts']): + raise MultiInventoryException('Duplicate account names in config file') + return config def get_provider_tags(self, provider, env=None): @@ -136,23 +146,25 @@ class MultiEc2(object): else: cmds.append('--list') - cmds.append('--refresh-cache') + if 'aws' in provider.lower(): + cmds.append('--refresh-cache') return subprocess.Popen(cmds, stderr=subprocess.PIPE, \ stdout=subprocess.PIPE, env=env) @staticmethod - def generate_config(config_data): - """Generate the ec2.ini file in as a secure temp file. - Once generated, pass it to the ec2.py as an environment variable. + def generate_config(provider_files): + """Generate the provider_files in a temporary directory. """ - fildes, tmp_file_path = tempfile.mkstemp(prefix='multi_ec2.ini.') - for section, values in config_data.items(): - os.write(fildes, "[%s]\n" % section) - for option, value in values.items(): - os.write(fildes, "%s = %s\n" % (option, value)) - os.close(fildes) - return tmp_file_path + prefix = 'multi_inventory.' + tmp_dir_path = tempfile.mkdtemp(prefix=prefix) + for provider_file in provider_files: + filedes = open(os.path.join(tmp_dir_path, provider_file['name']), 'w+') + content = Template(provider_file['contents']).substitute(tmpdir=tmp_dir_path) + filedes.write(content) + filedes.close() + + return tmp_dir_path def run_provider(self): '''Setup the provider call with proper variables @@ -160,13 +172,21 @@ class MultiEc2(object): ''' try: all_results = [] - tmp_file_paths = [] + tmp_dir_paths = [] processes = {} for account in self.config['accounts']: - env = account['env_vars'] - if account.has_key('provider_config'): - tmp_file_paths.append(MultiEc2.generate_config(account['provider_config'])) - env['EC2_INI_PATH'] = tmp_file_paths[-1] + tmp_dir = None + if account.has_key('provider_files'): + tmp_dir = MultiInventory.generate_config(account['provider_files']) + tmp_dir_paths.append(tmp_dir) + + # Update env vars after creating provider_config_files + # so that we can grab the tmp_dir if it exists + env = account.get('env_vars', {}) + if env and tmp_dir: + for key, value in env.items(): + env[key] = Template(value).substitute(tmpdir=tmp_dir) + name = account['name'] provider = account['provider'] processes[name] = self.get_provider_tags(provider, env) @@ -182,9 +202,9 @@ class MultiEc2(object): }) finally: - # Clean up the mkstemp file - for tmp_file in tmp_file_paths: - os.unlink(tmp_file) + # Clean up the mkdtemp dirs + for tmp_dir in tmp_dir_paths: + shutil.rmtree(tmp_dir) return all_results @@ -223,7 +243,7 @@ class MultiEc2(object): ] raise RuntimeError('\n'.join(err_msg).format(**result)) else: - self.all_ec2_results[result['name']] = json.loads(result['out']) + self.all_inventory_results[result['name']] = json.loads(result['out']) # Check if user wants extra vars in yaml by # having hostvars and all_group defined @@ -231,33 +251,65 @@ class MultiEc2(object): self.apply_account_config(acc_config) # Build results by merging all dictionaries - values = self.all_ec2_results.values() + values = self.all_inventory_results.values() values.insert(0, self.result) for result in values: - MultiEc2.merge_destructively(self.result, result) + MultiInventory.merge_destructively(self.result, result) + + def add_entry(self, data, keys, item): + ''' Add an item to a dictionary with key notation a.b.c + d = {'a': {'b': 'c'}}} + keys = a.b + item = c + ''' + if "." in keys: + key, rest = keys.split(".", 1) + if key not in data: + data[key] = {} + self.add_entry(data[key], rest, item) + else: + data[keys] = item + + def get_entry(self, data, keys): + ''' Get an item from a dictionary with key notation a.b.c + d = {'a': {'b': 'c'}}} + keys = a.b + return c + ''' + if keys and "." in keys: + key, rest = keys.split(".", 1) + return self.get_entry(data[key], rest) + else: + return data.get(keys, None) def apply_account_config(self, acc_config): ''' Apply account config settings ''' - if not acc_config.has_key('hostvars') and not acc_config.has_key('all_group'): - return - - results = self.all_ec2_results[acc_config['name']] - # Update each hostvar with the newly desired key: value - for host_property, value in acc_config['hostvars'].items(): - # Verify the account results look sane - # by checking for these keys ('_meta' and 'hostvars' exist) - if results.has_key('_meta') and results['_meta'].has_key('hostvars'): + results = self.all_inventory_results[acc_config['name']] + results['all_hosts'] = results['_meta']['hostvars'].keys() + + # Update each hostvar with the newly desired key: value from extra_* + for _extra in ['extra_vars', 'extra_groups']: + for new_var, value in acc_config.get(_extra, {}).items(): for data in results['_meta']['hostvars'].values(): - data[str(host_property)] = str(value) + self.add_entry(data, new_var, value) + + # Add this group + if _extra == 'extra_groups': + results["%s_%s" % (new_var, value)] = copy.copy(results['all_hosts']) + + # Clone groups goes here + for to_name, from_name in acc_config.get('clone_groups', {}).items(): + if results.has_key(from_name): + results[to_name] = copy.copy(results[from_name]) - # Add this group - if results.has_key(acc_config['all_group']): - results["%s_%s" % (host_property, value)] = \ - copy.copy(results[acc_config['all_group']]) + # Clone vars goes here + for to_name, from_name in acc_config.get('clone_vars', {}).items(): + for data in results['_meta']['hostvars'].values(): + self.add_entry(data, to_name, self.get_entry(data, from_name)) - # store the results back into all_ec2_results - self.all_ec2_results[acc_config['name']] = results + # store the results back into all_inventory_results + self.all_inventory_results[acc_config['name']] = results @staticmethod def merge_destructively(input_a, input_b): @@ -265,7 +317,7 @@ class MultiEc2(object): for key in input_b: if key in input_a: if isinstance(input_a[key], dict) and isinstance(input_b[key], dict): - MultiEc2.merge_destructively(input_a[key], input_b[key]) + MultiInventory.merge_destructively(input_a[key], input_b[key]) elif input_a[key] == input_b[key]: pass # same leaf value # both lists so add each element in b to a if it does ! exist @@ -321,7 +373,7 @@ class MultiEc2(object): if exc.errno != errno.EEXIST or not os.path.isdir(path): raise - json_data = MultiEc2.json_format_dict(self.result, True) + json_data = MultiInventory.json_format_dict(self.result, True) with open(self.cache_path, 'w') as cache: try: fcntl.flock(cache, fcntl.LOCK_EX) @@ -357,7 +409,7 @@ class MultiEc2(object): if __name__ == "__main__": - MEC2 = MultiEc2() - MEC2.parse_cli_args() - MEC2.run() - print MEC2.result_str() + MI2 = MultiInventory() + MI2.parse_cli_args() + MI2.run() + print MI2.result_str() diff --git a/inventory/multi_inventory.yaml.example b/inventory/multi_inventory.yaml.example new file mode 100644 index 000000000..0f0788d18 --- /dev/null +++ b/inventory/multi_inventory.yaml.example @@ -0,0 +1,51 @@ +# multi ec2 inventory configs +# +cache_location: ~/.ansible/tmp/multi_inventory.cache + +accounts: + - name: aws1 + provider: aws/ec2.py + provider_files: + - name: ec2.ini + content: |- + [ec2] + regions = all + regions_exclude = us-gov-west-1,cn-north-1 + destination_variable = public_dns_name + route53 = False + cache_path = ~/.ansible/tmp + cache_max_age = 300 + vpc_destination_variable = ip_address + env_vars: + AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX + AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX + EC2_INI_PATH: ${tmpdir}/ec2.ini # we replace ${tmpdir} with the temporary directory that we've created for the provider. + extra_vars: + cloud: aws + account: aws1 + +- name: mygce + extra_vars: + cloud: gce + account: gce1 + env_vars: + GCE_INI_PATH: ${tmpdir}/gce.ini # we replace ${tmpdir} with the temporary directory that we've created for the provider. + provider: gce/gce.py + provider_files: + - name: priv_key.pem + contents: |- + -----BEGIN PRIVATE KEY----- + yourprivatekeydatahere + -----END PRIVATE KEY----- + - name: gce.ini + contents: |- + [gce] + gce_service_account_email_address = <uuid>@developer.gserviceaccount.com + gce_service_account_pem_file_path = ${tmpdir}/priv_key.pem # we replace ${tmpdir} with the temporary directory that we've created for the provider. + gce_project_id = gce-project + zone = us-central1-a + network = default + gce_machine_type = n1-standard-2 + gce_machine_image = rhel7 + +cache_max_age: 600 diff --git a/inventory/openshift-ansible-inventory.spec b/inventory/openshift-ansible-inventory.spec deleted file mode 100644 index f163f865a..000000000 --- a/inventory/openshift-ansible-inventory.spec +++ /dev/null @@ -1,108 +0,0 @@ -Summary: OpenShift Ansible Inventories -Name: openshift-ansible-inventory -Version: 0.0.9 -Release: 1%{?dist} -License: ASL 2.0 -URL: https://github.com/openshift/openshift-ansible -Source0: %{name}-%{version}.tar.gz -Requires: python2 -BuildRequires: python2-devel -BuildArch: noarch - -%description -Ansible Inventories used with the openshift-ansible scripts and playbooks. - -%prep -%setup -q - -%build - -%install -mkdir -p %{buildroot}/etc/ansible -mkdir -p %{buildroot}/usr/share/ansible/inventory -mkdir -p %{buildroot}/usr/share/ansible/inventory/aws -mkdir -p %{buildroot}/usr/share/ansible/inventory/gce - -cp -p multi_ec2.py %{buildroot}/usr/share/ansible/inventory -cp -p multi_ec2.yaml.example %{buildroot}/etc/ansible/multi_ec2.yaml -cp -p aws/hosts/ec2.py %{buildroot}/usr/share/ansible/inventory/aws -cp -p gce/hosts/gce.py %{buildroot}/usr/share/ansible/inventory/gce - -%files -%config(noreplace) /etc/ansible/* -%dir /usr/share/ansible/inventory -/usr/share/ansible/inventory/multi_ec2.py* -/usr/share/ansible/inventory/aws/ec2.py* -/usr/share/ansible/inventory/gce/gce.py* - -%changelog -* Thu Aug 20 2015 Kenny Woodson <kwoodson@redhat.com> 0.0.9-1 -- Merge pull request #408 from sdodson/docker-buildvm (bleanhar@redhat.com) -- Merge pull request #428 from jtslear/issue-383 - (twiest@users.noreply.github.com) -- Merge pull request #407 from aveshagarwal/ae-ansible-merge-auth - (bleanhar@redhat.com) -- Enable htpasswd by default in the example hosts file. (avagarwa@redhat.com) -- Add support for setting default node selector (jdetiber@redhat.com) -- Merge pull request #429 from spinolacastro/custom_cors (bleanhar@redhat.com) -- Updated to read config first and default to users home dir - (kwoodson@redhat.com) -- Fix Custom Cors (spinolacastro@gmail.com) -- Revert "namespace the byo inventory so the group names aren't so generic" - (sdodson@redhat.com) -- Removes hardcoded python2 (jtslear@gmail.com) -- namespace the byo inventory so the group names aren't so generic - (admiller@redhat.com) -- docker-buildvm-rhose is dead (sdodson@redhat.com) -- Add support for setting routingConfig:subdomain (jdetiber@redhat.com) -- Initial HA master (jdetiber@redhat.com) -- Make it clear that the byo inventory file is just an example - (jdetiber@redhat.com) -- Playbook updates for clustered etcd (jdetiber@redhat.com) -- Update for RC2 changes (sdodson@redhat.com) -- Templatize configs and 0.5.2 changes (jdetiber@redhat.com) - -* Tue Jun 09 2015 Kenny Woodson <kwoodson@redhat.com> 0.0.8-1 -- Added more verbosity when error happens. Also fixed a bug. - (kwoodson@redhat.com) -- Implement OpenStack provider (lhuard@amadeus.com) -- * rename openshift_registry_url oreg_url * rename option_images to - _{oreg|ortr}_images (jhonce@redhat.com) -- Fix the remaining pylint warnings (lhuard@amadeus.com) -- Fix some of the pylint warnings (lhuard@amadeus.com) -- [libvirt cluster] Use net-dhcp-leases to find VMs’ IPs (lhuard@amadeus.com) -- fixed the openshift-ansible-bin build (twiest@redhat.com) - -* Fri May 15 2015 Kenny Woodson <kwoodson@redhat.com> 0.0.7-1 -- Making multi_ec2 into a library (kwoodson@redhat.com) - -* Wed May 13 2015 Thomas Wiest <twiest@redhat.com> 0.0.6-1 -- Added support for grouping and a bug fix. (kwoodson@redhat.com) - -* Tue May 12 2015 Thomas Wiest <twiest@redhat.com> 0.0.5-1 -- removed ec2.ini from the openshift-ansible-inventory.spec file so that we're - not dictating what the ec2.ini file should look like. (twiest@redhat.com) -- Added capability to pass in ec2.ini file. (kwoodson@redhat.com) - -* Thu May 07 2015 Thomas Wiest <twiest@redhat.com> 0.0.4-1 -- Fixed a bug due to renaming of variables. (kwoodson@redhat.com) - -* Thu May 07 2015 Thomas Wiest <twiest@redhat.com> 0.0.3-1 -- fixed build problems with openshift-ansible-inventory.spec - (twiest@redhat.com) -- Allow option in multi_ec2 to set cache location. (kwoodson@redhat.com) -- Add ansible_connection=local to localhost in inventory (jdetiber@redhat.com) -- Adding refresh-cache option and cleanup for pylint. Also updated for - aws/hosts/ being added. (kwoodson@redhat.com) - -* Thu Mar 26 2015 Thomas Wiest <twiest@redhat.com> 0.0.2-1 -- added the ability to have a config file in /etc/openshift_ansible to - multi_ec2.py. (twiest@redhat.com) -- Merge pull request #97 from jwhonce/wip/cluster (jhonce@redhat.com) -- gce inventory/playbook updates for node registration changes - (jdetiber@redhat.com) -- Various fixes (jdetiber@redhat.com) - -* Tue Mar 24 2015 Thomas Wiest <twiest@redhat.com> 0.0.1-1 -- new package built with tito - |