summaryrefslogtreecommitdiffstats
path: root/playbooks
diff options
context:
space:
mode:
Diffstat (limited to 'playbooks')
-rw-r--r--playbooks/aws/ansible-tower/launch.yml2
-rw-r--r--playbooks/aws/openshift-cluster/config.yml36
-rw-r--r--playbooks/aws/openshift-cluster/launch.yml74
-rw-r--r--playbooks/aws/openshift-cluster/launch_instances.yml63
-rw-r--r--playbooks/aws/openshift-cluster/library/ec2_ami_find.py302
-rw-r--r--playbooks/aws/openshift-cluster/list.yml15
-rw-r--r--playbooks/aws/openshift-cluster/tasks/launch_instances.yml132
-rw-r--r--playbooks/aws/openshift-cluster/templates/user_data.j229
-rw-r--r--playbooks/aws/openshift-cluster/terminate.yml20
-rw-r--r--playbooks/aws/openshift-cluster/update.yml25
-rw-r--r--playbooks/aws/openshift-cluster/vars.defaults.yml1
-rw-r--r--playbooks/aws/openshift-cluster/vars.online.int.yml9
-rw-r--r--playbooks/aws/openshift-cluster/vars.online.prod.yml9
-rw-r--r--playbooks/aws/openshift-cluster/vars.online.stage.yml9
-rw-r--r--playbooks/aws/openshift-cluster/vars.yml37
-rw-r--r--playbooks/aws/openshift-master/config.yml27
-rw-r--r--playbooks/aws/openshift-master/launch.yml8
-rw-r--r--playbooks/aws/openshift-master/terminate.yml52
-rw-r--r--playbooks/aws/openshift-master/vars.yml3
-rw-r--r--playbooks/aws/openshift-node/config.yml110
-rw-r--r--playbooks/aws/openshift-node/launch.yml10
-rw-r--r--playbooks/aws/openshift-node/terminate.yml52
-rw-r--r--playbooks/aws/openshift-node/vars.yml3
-rw-r--r--playbooks/aws/terminate.yml64
-rw-r--r--playbooks/byo/openshift-master/config.yml20
-rw-r--r--playbooks/byo/openshift-node/config.yml90
-rw-r--r--playbooks/byo/openshift_facts.yml10
-rw-r--r--playbooks/common/openshift-cluster/config.yml4
l---------playbooks/common/openshift-cluster/filter_plugins (renamed from playbooks/libvirt/openshift-master/filter_plugins)0
l---------playbooks/common/openshift-cluster/roles (renamed from playbooks/libvirt/openshift-master/roles)0
-rw-r--r--playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml11
-rw-r--r--playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml11
-rw-r--r--playbooks/common/openshift-cluster/update_repos_and_packages.yml7
-rw-r--r--playbooks/common/openshift-master/config.yml19
l---------playbooks/common/openshift-master/filter_plugins (renamed from playbooks/libvirt/openshift-node/filter_plugins)0
l---------playbooks/common/openshift-master/roles1
-rw-r--r--playbooks/common/openshift-node/config.yml127
l---------playbooks/common/openshift-node/filter_plugins1
l---------playbooks/common/openshift-node/roles1
-rw-r--r--playbooks/gce/openshift-cluster/config.yml37
-rw-r--r--playbooks/gce/openshift-cluster/launch.yml72
-rw-r--r--playbooks/gce/openshift-cluster/list.yml15
-rw-r--r--playbooks/gce/openshift-cluster/tasks/launch_instances.yml (renamed from playbooks/gce/openshift-cluster/launch_instances.yml)25
-rw-r--r--playbooks/gce/openshift-cluster/terminate.yml22
-rw-r--r--playbooks/gce/openshift-cluster/update.yml25
-rw-r--r--playbooks/gce/openshift-cluster/vars.yml14
-rw-r--r--playbooks/gce/openshift-master/config.yml24
-rw-r--r--playbooks/gce/openshift-master/launch.yml6
-rw-r--r--playbooks/gce/openshift-master/terminate.yml11
-rw-r--r--playbooks/gce/openshift-master/vars.yml3
-rw-r--r--playbooks/gce/openshift-node/config.yml106
-rw-r--r--playbooks/gce/openshift-node/launch.yml6
-rw-r--r--playbooks/gce/openshift-node/terminate.yml11
-rw-r--r--playbooks/gce/openshift-node/vars.yml3
-rw-r--r--playbooks/libvirt/openshift-cluster/config.yml38
-rw-r--r--playbooks/libvirt/openshift-cluster/launch.yml81
-rw-r--r--playbooks/libvirt/openshift-cluster/list.yml50
-rw-r--r--playbooks/libvirt/openshift-cluster/tasks/configure_libvirt.yml6
-rw-r--r--playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml27
-rw-r--r--playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml23
-rw-r--r--playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml (renamed from playbooks/libvirt/openshift-cluster/launch_instances.yml)63
-rw-r--r--playbooks/libvirt/openshift-cluster/templates/domain.xml (renamed from playbooks/libvirt/templates/domain.xml)14
-rw-r--r--playbooks/libvirt/openshift-cluster/templates/meta-data3
-rw-r--r--playbooks/libvirt/openshift-cluster/templates/network.xml23
-rw-r--r--playbooks/libvirt/openshift-cluster/templates/user-data23
-rw-r--r--playbooks/libvirt/openshift-cluster/terminate.yml69
-rw-r--r--playbooks/libvirt/openshift-cluster/update.yml18
-rw-r--r--playbooks/libvirt/openshift-cluster/vars.yml38
-rw-r--r--playbooks/libvirt/openshift-master/config.yml21
-rw-r--r--playbooks/libvirt/openshift-master/vars.yml1
-rw-r--r--playbooks/libvirt/openshift-node/config.yml102
l---------playbooks/libvirt/openshift-node/roles1
-rw-r--r--playbooks/libvirt/openshift-node/vars.yml1
-rw-r--r--playbooks/libvirt/templates/meta-data2
-rw-r--r--playbooks/libvirt/templates/user-data10
75 files changed, 1421 insertions, 967 deletions
diff --git a/playbooks/aws/ansible-tower/launch.yml b/playbooks/aws/ansible-tower/launch.yml
index 4c29fa833..56235bc8a 100644
--- a/playbooks/aws/ansible-tower/launch.yml
+++ b/playbooks/aws/ansible-tower/launch.yml
@@ -6,7 +6,7 @@
vars:
inst_region: us-east-1
- rhel7_ami: ami-a24e30ca
+ rhel7_ami: ami-906240f8
user_data_file: user_data.txt
vars_files:
diff --git a/playbooks/aws/openshift-cluster/config.yml b/playbooks/aws/openshift-cluster/config.yml
new file mode 100644
index 000000000..b8961704e
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/config.yml
@@ -0,0 +1,36 @@
+---
+- name: Populate oo_masters_to_config host group
+ hosts: localhost
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
+ - name: Evaluate oo_masters_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_masters_to_config
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-master"] | default([])
+ - name: Evaluate oo_nodes_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_nodes_to_config
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-node"] | default([])
+ - name: Evaluate oo_first_master
+ add_host:
+ name: "{{ groups['tag_env-host-type_' ~ cluster_id ~ '-openshift-master'][0] }}"
+ groups: oo_first_master
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ when: "'tag_env-host-type_{{ cluster_id }}-openshift-master' in groups"
+
+- include: ../../common/openshift-cluster/config.yml
+ vars:
+ openshift_cluster_id: "{{ cluster_id }}"
+ openshift_debug_level: 4
+ openshift_deployment_type: "{{ deployment_type }}"
+ openshift_hostname: "{{ ec2_private_ip_address }}"
+ openshift_public_hostname: "{{ ec2_ip_address }}"
diff --git a/playbooks/aws/openshift-cluster/launch.yml b/playbooks/aws/openshift-cluster/launch.yml
index 3561c1803..3eb5496e4 100644
--- a/playbooks/aws/openshift-cluster/launch.yml
+++ b/playbooks/aws/openshift-cluster/launch.yml
@@ -4,59 +4,27 @@
connection: local
gather_facts: no
vars_files:
- - vars.yml
+ - vars.yml
+ - ["vars.{{ deployment_type }}.{{ cluster_id }}.yml", vars.defaults.yml]
tasks:
- - set_fact: k8s_type="master"
-
- - name: Generate master instance names(s)
- set_fact: scratch={{ cluster_id }}-{{ k8s_type }}-{{ '%05x' |format( 1048576 |random) }}
- register: master_names_output
- with_sequence: start=1 end={{ num_masters }}
-
- # These set_fact's cannot be combined
- - set_fact:
- master_names_string: "{% for item in master_names_output.results %}{{ item.ansible_facts.scratch }} {% endfor %}"
-
- - set_fact:
- master_names: "{{ master_names_string.strip().split(' ') }}"
-
- - include: launch_instances.yml
- vars:
- instances: "{{ master_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
-
- - set_fact: k8s_type="node"
-
- - name: Generate node instance names(s)
- set_fact: scratch={{ cluster_id }}-{{ k8s_type }}-{{ '%05x' |format( 1048576 |random) }}
- register: node_names_output
- with_sequence: start=1 end={{ num_nodes }}
-
- # These set_fact's cannot be combined
- - set_fact:
- node_names_string: "{% for item in node_names_output.results %}{{ item.ansible_facts.scratch }} {% endfor %}"
-
- - set_fact:
- node_names: "{{ node_names_string.strip().split(' ') }}"
-
- - include: launch_instances.yml
- vars:
- instances: "{{ node_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
-
-- hosts: "tag_env_{{ cluster_id }}"
- roles:
- - openshift_repos
- - os_update_latest
-
-- include: ../openshift-master/config.yml
- vars:
- oo_host_group_exp: "groups[\"tag_env-host-type_{{ cluster_id }}-openshift-master\"]"
-
-- include: ../openshift-node/config.yml
- vars:
- oo_host_group_exp: "groups[\"tag_env-host-type_{{ cluster_id }}-openshift-node\"]"
+ - fail:
+ msg: Deployment type not supported for aws provider yet
+ when: deployment_type == 'enterprise'
+
+ - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml
+ - include: tasks/launch_instances.yml
+ vars:
+ instances: "{{ master_names }}"
+ cluster: "{{ cluster_id }}"
+ type: "{{ k8s_type }}"
+
+ - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+ - include: tasks/launch_instances.yml
+ vars:
+ instances: "{{ node_names }}"
+ cluster: "{{ cluster_id }}"
+ type: "{{ k8s_type }}"
+
+- include: update.yml
- include: list.yml
diff --git a/playbooks/aws/openshift-cluster/launch_instances.yml b/playbooks/aws/openshift-cluster/launch_instances.yml
deleted file mode 100644
index 9d645fbe5..000000000
--- a/playbooks/aws/openshift-cluster/launch_instances.yml
+++ /dev/null
@@ -1,63 +0,0 @@
----
-- set_fact:
- machine_type: "{{ lookup('env', 'ec2_instance_type')|default('m3.large', true) }}"
- machine_image: "{{ lookup('env', 'ec2_ami')|default('ami-307b3658', true) }}"
- machine_region: "{{ lookup('env', 'ec2_region')|default('us-east-1', true) }}"
- machine_keypair: "{{ lookup('env', 'ec2_keypair')|default('libra', true) }}"
- created_by: "{{ lookup('env', 'LOGNAME')|default(cluster, true) }}"
- security_group: "{{ lookup('env', 'ec2_security_group')|default('public', true) }}"
- env: "{{ cluster }}"
- host_type: "{{ type }}"
- env_host_type: "{{ cluster }}-openshift-{{ type }}"
-
-- name: Launch instance(s)
- ec2:
- state: present
- region: "{{ machine_region }}"
- keypair: "{{ machine_keypair }}"
- group: "{{ security_group }}"
- instance_type: "{{ machine_type }}"
- image: "{{ machine_image }}"
- count: "{{ instances | oo_len }}"
- wait: yes
- instance_tags:
- created-by: "{{ created_by }}"
- env: "{{ env }}"
- host-type: "{{ host_type }}"
- env-host-type: "{{ env_host_type }}"
- register: ec2
-
-- name: Add Name tag to instances
- ec2_tag: resource={{ item.1.id }} region={{ machine_region }} state=present
- with_together:
- - instances
- - ec2.instances
- args:
- tags:
- Name: "{{ item.0 }}"
-
-- set_fact:
- instance_groups: tag_created-by_{{ created_by }}, tag_env_{{ env }}, tag_host-type_{{ host_type }}, tag_env-host-type_{{ env_host_type }}
-
-- name: Add new instances groups and variables
- add_host:
- hostname: "{{ item.0 }}"
- ansible_ssh_host: "{{ item.1.dns_name }}"
- groups: "{{ instance_groups }}"
- ec2_private_ip_address: "{{ item.1.private_ip }}"
- ec2_ip_address: "{{ item.1.public_ip }}"
- with_together:
- - instances
- - ec2.instances
-
-- name: Wait for ssh
- wait_for: "port=22 host={{ item.dns_name }}"
- with_items: ec2.instances
-
-- name: Wait for root user setup
- command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item.dns_name }} echo root user is setup"
- register: result
- until: result.rc == 0
- retries: 20
- delay: 10
- with_items: ec2.instances
diff --git a/playbooks/aws/openshift-cluster/library/ec2_ami_find.py b/playbooks/aws/openshift-cluster/library/ec2_ami_find.py
new file mode 100644
index 000000000..29e594a65
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/library/ec2_ami_find.py
@@ -0,0 +1,302 @@
+#!/usr/bin/python
+#pylint: skip-file
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: ec2_ami_find
+version_added: 2.0
+short_description: Searches for AMIs to obtain the AMI ID and other information
+description:
+ - Returns list of matching AMIs with AMI ID, along with other useful information
+ - Can search AMIs with different owners
+ - Can search by matching tag(s), by AMI name and/or other criteria
+ - Results can be sorted and sliced
+author: Tom Bamford
+notes:
+ - This module is not backwards compatible with the previous version of the ec2_search_ami module which worked only for Ubuntu AMIs listed on cloud-images.ubuntu.com.
+ - See the example below for a suggestion of how to search by distro/release.
+options:
+ region:
+ description:
+ - The AWS region to use.
+ required: true
+ aliases: [ 'aws_region', 'ec2_region' ]
+ owner:
+ description:
+ - Search AMIs owned by the specified owner
+ - Can specify an AWS account ID, or one of the special IDs 'self', 'amazon' or 'aws-marketplace'
+ - If not specified, all EC2 AMIs in the specified region will be searched.
+ - You can include wildcards in many of the search options. An asterisk (*) matches zero or more characters, and a question mark (?) matches exactly one character. You can escape special characters using a backslash (\) before the character. For example, a value of \*amazon\?\\ searches for the literal string *amazon?\.
+ required: false
+ default: null
+ ami_id:
+ description:
+ - An AMI ID to match.
+ default: null
+ required: false
+ ami_tags:
+ description:
+ - A hash/dictionary of tags to match for the AMI.
+ default: null
+ required: false
+ architecture:
+ description:
+ - An architecture type to match (e.g. x86_64).
+ default: null
+ required: false
+ hypervisor:
+ description:
+ - A hypervisor type type to match (e.g. xen).
+ default: null
+ required: false
+ is_public:
+ description:
+ - Whether or not the image(s) are public.
+ choices: ['yes', 'no']
+ default: null
+ required: false
+ name:
+ description:
+ - An AMI name to match.
+ default: null
+ required: false
+ platform:
+ description:
+ - Platform type to match.
+ default: null
+ required: false
+ sort:
+ description:
+ - Optional attribute which with to sort the results.
+ - If specifying 'tag', the 'tag_name' parameter is required.
+ choices: ['name', 'description', 'tag']
+ default: null
+ required: false
+ sort_tag:
+ description:
+ - Tag name with which to sort results.
+ - Required when specifying 'sort=tag'.
+ default: null
+ required: false
+ sort_order:
+ description:
+ - Order in which to sort results.
+ - Only used when the 'sort' parameter is specified.
+ choices: ['ascending', 'descending']
+ default: 'ascending'
+ required: false
+ sort_start:
+ description:
+ - Which result to start with (when sorting).
+ - Corresponds to Python slice notation.
+ default: null
+ required: false
+ sort_end:
+ description:
+ - Which result to end with (when sorting).
+ - Corresponds to Python slice notation.
+ default: null
+ required: false
+ state:
+ description:
+ - AMI state to match.
+ default: 'available'
+ required: false
+ virtualization_type:
+ description:
+ - Virtualization type to match (e.g. hvm).
+ default: null
+ required: false
+ no_result_action:
+ description:
+ - What to do when no results are found.
+ - "'success' reports success and returns an empty array"
+ - "'fail' causes the module to report failure"
+ choices: ['success', 'fail']
+ default: 'success'
+ required: false
+requirements:
+ - boto
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Search for the AMI tagged "project:website"
+- ec2_ami_find:
+ owner: self
+ tags:
+ project: website
+ no_result_action: fail
+ register: ami_find
+
+# Search for the latest Ubuntu 14.04 AMI
+- ec2_ami_find:
+ name: "ubuntu/images/ebs/ubuntu-trusty-14.04-amd64-server-*"
+ owner: 099720109477
+ sort: name
+ sort_order: descending
+ sort_end: 1
+ register: ami_find
+
+# Launch an EC2 instance
+- ec2:
+ image: "{{ ami_search.results[0].ami_id }}"
+ instance_type: m3.medium
+ key_name: mykey
+ wait: yes
+'''
+
+try:
+ import boto.ec2
+ HAS_BOTO=True
+except ImportError:
+ HAS_BOTO=False
+
+import json
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ region = dict(required=True,
+ aliases = ['aws_region', 'ec2_region']),
+ owner = dict(required=False, default=None),
+ ami_id = dict(required=False),
+ ami_tags = dict(required=False, type='dict',
+ aliases = ['search_tags', 'image_tags']),
+ architecture = dict(required=False),
+ hypervisor = dict(required=False),
+ is_public = dict(required=False),
+ name = dict(required=False),
+ platform = dict(required=False),
+ sort = dict(required=False, default=None,
+ choices=['name', 'description', 'tag']),
+ sort_tag = dict(required=False),
+ sort_order = dict(required=False, default='ascending',
+ choices=['ascending', 'descending']),
+ sort_start = dict(required=False),
+ sort_end = dict(required=False),
+ state = dict(required=False, default='available'),
+ virtualization_type = dict(required=False),
+ no_result_action = dict(required=False, default='success',
+ choices = ['success', 'fail']),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ )
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module, install via pip or your package manager')
+
+ ami_id = module.params.get('ami_id')
+ ami_tags = module.params.get('ami_tags')
+ architecture = module.params.get('architecture')
+ hypervisor = module.params.get('hypervisor')
+ is_public = module.params.get('is_public')
+ name = module.params.get('name')
+ owner = module.params.get('owner')
+ platform = module.params.get('platform')
+ sort = module.params.get('sort')
+ sort_tag = module.params.get('sort_tag')
+ sort_order = module.params.get('sort_order')
+ sort_start = module.params.get('sort_start')
+ sort_end = module.params.get('sort_end')
+ state = module.params.get('state')
+ virtualization_type = module.params.get('virtualization_type')
+ no_result_action = module.params.get('no_result_action')
+
+ filter = {'state': state}
+
+ if ami_id:
+ filter['image_id'] = ami_id
+ if ami_tags:
+ for tag in ami_tags:
+ filter['tag:'+tag] = ami_tags[tag]
+ if architecture:
+ filter['architecture'] = architecture
+ if hypervisor:
+ filter['hypervisor'] = hypervisor
+ if is_public:
+ filter['is_public'] = is_public
+ if name:
+ filter['name'] = name
+ if platform:
+ filter['platform'] = platform
+ if virtualization_type:
+ filter['virtualization_type'] = virtualization_type
+
+ ec2 = ec2_connect(module)
+
+ images_result = ec2.get_all_images(owners=owner, filters=filter)
+
+ if no_result_action == 'fail' and len(images_result) == 0:
+ module.fail_json(msg="No AMIs matched the attributes: %s" % json.dumps(filter))
+
+ results = []
+ for image in images_result:
+ data = {
+ 'ami_id': image.id,
+ 'architecture': image.architecture,
+ 'description': image.description,
+ 'is_public': image.is_public,
+ 'name': image.name,
+ 'owner_id': image.owner_id,
+ 'platform': image.platform,
+ 'root_device_name': image.root_device_name,
+ 'root_device_type': image.root_device_type,
+ 'state': image.state,
+ 'tags': image.tags,
+ 'virtualization_type': image.virtualization_type,
+ }
+
+ if image.kernel_id:
+ data['kernel_id'] = image.kernel_id
+ if image.ramdisk_id:
+ data['ramdisk_id'] = image.ramdisk_id
+
+ results.append(data)
+
+ if sort == 'tag':
+ if not sort_tag:
+ module.fail_json(msg="'sort_tag' option must be given with 'sort=tag'")
+ results.sort(key=lambda e: e['tags'][sort_tag], reverse=(sort_order=='descending'))
+ elif sort:
+ results.sort(key=lambda e: e[sort], reverse=(sort_order=='descending'))
+
+ try:
+ if sort and sort_start and sort_end:
+ results = results[int(sort_start):int(sort_end)]
+ elif sort and sort_start:
+ results = results[int(sort_start):]
+ elif sort and sort_end:
+ results = results[:int(sort_end)]
+ except TypeError:
+ module.fail_json(msg="Please supply numeric values for sort_start and/or sort_end")
+
+ module.exit_json(results=results)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
+
diff --git a/playbooks/aws/openshift-cluster/list.yml b/playbooks/aws/openshift-cluster/list.yml
index 08e9e2df4..04fcdc0a1 100644
--- a/playbooks/aws/openshift-cluster/list.yml
+++ b/playbooks/aws/openshift-cluster/list.yml
@@ -2,16 +2,23 @@
- name: Generate oo_list_hosts group
hosts: localhost
gather_facts: no
+ vars_files:
+ - vars.yml
tasks:
- set_fact: scratch_group=tag_env_{{ cluster_id }}
when: cluster_id != ''
- set_fact: scratch_group=all
- when: scratch_group is not defined
- - add_host: name={{ item }} groups=oo_list_hosts
- with_items: groups[scratch_group] | difference(['localhost'])
+ when: cluster_id == ''
+ - add_host:
+ name: "{{ item }}"
+ groups: oo_list_hosts
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups[scratch_group] | default([]) | difference(['localhost'])
- name: List Hosts
hosts: oo_list_hosts
gather_facts: no
tasks:
- - debug: msg="public:{{hostvars[inventory_hostname].ec2_ip_address}} private:{{hostvars[inventory_hostname].ec2_private_ip_address}}"
+ - debug:
+ msg: "public ip:{{ hostvars[inventory_hostname].ec2_ip_address }} private ip:{{ hostvars[inventory_hostname].ec2_private_ip_address }}"
diff --git a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
new file mode 100644
index 000000000..666a8d1fb
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
@@ -0,0 +1,132 @@
+---
+- set_fact:
+ created_by: "{{ lookup('env', 'LOGNAME')|default(cluster, true) }}"
+ docker_vol_ephemeral: "{{ lookup('env', 'os_docker_vol_ephemeral') | default(false, true) }}"
+ env: "{{ cluster }}"
+ env_host_type: "{{ cluster }}-openshift-{{ type }}"
+ host_type: "{{ type }}"
+
+- set_fact:
+ ec2_region: "{{ lookup('env', 'ec2_region')
+ | default(deployment_vars[deployment_type].region, true) }}"
+ when: ec2_region is not defined
+- set_fact:
+ ec2_image_name: "{{ lookup('env', 'ec2_image_name')
+ | default(deployment_vars[deployment_type].image_name, true) }}"
+ when: ec2_image_name is not defined and ec2_image is not defined
+- set_fact:
+ ec2_image: "{{ lookup('env', 'ec2_image')
+ | default(deployment_vars[deployment_type].image, true) }}"
+ when: ec2_image is not defined and not ec2_image_name
+- set_fact:
+ ec2_instance_type: "{{ lookup('env', 'ec2_instance_type')
+ | default(deployment_vars[deployment_type].type, true) }}"
+ when: ec2_instance_type is not defined
+- set_fact:
+ ec2_keypair: "{{ lookup('env', 'ec2_keypair')
+ | default(deployment_vars[deployment_type].keypair, true) }}"
+ when: ec2_keypair is not defined
+- set_fact:
+ ec2_vpc_subnet: "{{ lookup('env', 'ec2_vpc_subnet')
+ | default(deployment_vars[deployment_type].vpc_subnet, true) }}"
+ when: ec2_vpc_subnet is not defined
+- set_fact:
+ ec2_assign_public_ip: "{{ lookup('env', 'ec2_assign_public_ip')
+ | default(deployment_vars[deployment_type].assign_public_ip, true) }}"
+ when: ec2_assign_public_ip is not defined
+- set_fact:
+ ec2_security_groups: "{{ lookup('env', 'ec2_security_groups')
+ | default(deployment_vars[deployment_type].security_groups, true) }}"
+ when: ec2_security_groups is not defined
+
+- name: Find amis for deployment_type
+ ec2_ami_find:
+ region: "{{ ec2_region }}"
+ ami_id: "{{ ec2_image | default(omit, true) }}"
+ name: "{{ ec2_image_name | default(omit, true) }}"
+ register: ami_result
+
+- fail: msg="Could not find requested ami"
+ when: not ami_result.results
+
+- set_fact:
+ latest_ami: "{{ ami_result.results | oo_ami_selector(ec2_image_name) }}"
+ user_data: "{{ lookup('template', '../templates/user_data.j2') if type == 'node' else None | default('omit') }}"
+ volume_defs:
+ master:
+ root:
+ volume_size: "{{ lookup('env', 'os_master_root_vol_size') | default(25, true) }}"
+ device_type: "{{ lookup('env', 'os_master_root_vol_type') | default('gp2', true) }}"
+ iops: "{{ lookup('env', 'os_master_root_vol_iops') | default(500, true) }}"
+ node:
+ root:
+ volume_size: "{{ lookup('env', 'os_node_root_vol_size') | default(25, true) }}"
+ device_type: "{{ lookup('env', 'os_node_root_vol_type') | default('gp2', true) }}"
+ iops: "{{ lookup('env', 'os_node_root_vol_iops') | default(500, true) }}"
+ docker:
+ volume_size: "{{ lookup('env', 'os_docker_vol_size') | default(32, true) }}"
+ device_type: "{{ lookup('env', 'os_docker_vol_type') | default('gp2', true) }}"
+ iops: "{{ lookup('env', 'os_docker_vol_iops') | default(500, true) }}"
+
+- set_fact:
+ volumes: "{{ volume_defs | oo_ec2_volume_definition(host_type, docker_vol_ephemeral | bool) }}"
+
+- name: Launch instance(s)
+ ec2:
+ state: present
+ region: "{{ ec2_region }}"
+ keypair: "{{ ec2_keypair }}"
+ group: "{{ ec2_security_groups }}"
+ instance_type: "{{ ec2_instance_type }}"
+ image: "{{ latest_ami }}"
+ count: "{{ instances | oo_len }}"
+ vpc_subnet_id: "{{ ec2_vpc_subnet | default(omit, true) }}"
+ assign_public_ip: "{{ ec2_assign_public_ip | default(omit, true) }}"
+ user_data: "{{ user_data }}"
+ wait: yes
+ instance_tags:
+ created-by: "{{ created_by }}"
+ env: "{{ env }}"
+ host-type: "{{ host_type }}"
+ env-host-type: "{{ env_host_type }}"
+ volumes: "{{ volumes }}"
+ register: ec2
+
+- name: Add Name tag to instances
+ ec2_tag: resource={{ item.1.id }} region={{ ec2_region }} state=present
+ with_together:
+ - instances
+ - ec2.instances
+ args:
+ tags:
+ Name: "{{ item.0 }}"
+
+- set_fact:
+ instance_groups: tag_created-by_{{ created_by }}, tag_env_{{ env }}, tag_host-type_{{ host_type }}, tag_env-host-type_{{ env_host_type }}
+
+- name: Add new instances groups and variables
+ add_host:
+ hostname: "{{ item.0 }}"
+ ansible_ssh_host: "{{ item.1.dns_name }}"
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ groups: "{{ instance_groups }}"
+ ec2_private_ip_address: "{{ item.1.private_ip }}"
+ ec2_ip_address: "{{ item.1.public_ip }}"
+ with_together:
+ - instances
+ - ec2.instances
+
+- name: Wait for ssh
+ wait_for: "port=22 host={{ item.dns_name }}"
+ with_items: ec2.instances
+
+- name: Wait for user setup
+ command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ hostvars[item.0].ansible_ssh_user }}@{{ item.1.dns_name }} echo {{ hostvars[item.0].ansible_ssh_user }} user is setup"
+ register: result
+ until: result.rc == 0
+ retries: 20
+ delay: 10
+ with_together:
+ - instances
+ - ec2.instances
diff --git a/playbooks/aws/openshift-cluster/templates/user_data.j2 b/playbooks/aws/openshift-cluster/templates/user_data.j2
new file mode 100644
index 000000000..7dbc8f552
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/templates/user_data.j2
@@ -0,0 +1,29 @@
+#cloud-config
+yum_repos:
+ jdetiber-copr:
+ name: Copr repo for origin owned by jdetiber
+ baseurl: https://copr-be.cloud.fedoraproject.org/results/jdetiber/origin/epel-7-$basearch/
+ skip_if_unavailable: true
+ gpgcheck: true
+ gpgkey: https://copr-be.cloud.fedoraproject.org/results/jdetiber/origin/pubkey.gpg
+ enabled: true
+
+packages:
+- xfsprogs # can be dropped after docker-storage-setup properly requires it: https://github.com/projectatomic/docker-storage-setup/pull/8
+- docker-storage-setup
+
+mounts:
+- [ xvdb ]
+- [ ephemeral0 ]
+
+write_files:
+- content: |
+ DEVS=/dev/xvdb
+ VG=docker_vg
+ path: /etc/sysconfig/docker-storage-setup
+ owner: root:root
+ permissions: '0644'
+
+runcmd:
+- systemctl daemon-reload
+- systemctl enable lvm2-lvmetad.service docker-storage-setup.service
diff --git a/playbooks/aws/openshift-cluster/terminate.yml b/playbooks/aws/openshift-cluster/terminate.yml
index 39607633a..617d0d456 100644
--- a/playbooks/aws/openshift-cluster/terminate.yml
+++ b/playbooks/aws/openshift-cluster/terminate.yml
@@ -1,14 +1,16 @@
---
- name: Terminate instance(s)
hosts: localhost
-
+ gather_facts: no
vars_files:
- - vars.yml
-
-- include: ../openshift-node/terminate.yml
- vars:
- oo_host_group_exp: 'groups["tag_env-host-type_{{ cluster_id }}-openshift-node"]'
+ - vars.yml
+ tasks:
+ - set_fact: scratch_group=tag_env_{{ cluster_id }}
+ - add_host:
+ name: "{{ item }}"
+ groups: oo_hosts_to_terminate
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups[scratch_group] | default([]) | difference(['localhost'])
-- include: ../openshift-master/terminate.yml
- vars:
- oo_host_group_exp: 'groups["tag_env-host-type_{{ cluster_id }}-openshift-master"]'
+- include: ../terminate.yml
diff --git a/playbooks/aws/openshift-cluster/update.yml b/playbooks/aws/openshift-cluster/update.yml
index 90ecdc6ab..5e7ab4e58 100644
--- a/playbooks/aws/openshift-cluster/update.yml
+++ b/playbooks/aws/openshift-cluster/update.yml
@@ -1,13 +1,18 @@
---
-- hosts: "tag_env_{{ cluster_id }}"
- roles:
- - openshift_repos
- - os_update_latest
+- name: Populate oo_hosts_to_update group
+ hosts: localhost
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
+ - name: Evaluate oo_hosts_to_update
+ add_host:
+ name: "{{ item }}"
+ groups: oo_hosts_to_update
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-master"] | union(groups["tag_env-host-type_{{ cluster_id }}-openshift-node"]) | default([])
-- include: ../openshift-master/config.yml
- vars:
- oo_host_group_exp: "groups[\"tag_env-host-type_{{ cluster_id }}-openshift-master\"]"
+- include: ../../common/openshift-cluster/update_repos_and_packages.yml
-- include: ../openshift-node/config.yml
- vars:
- oo_host_group_exp: "groups[\"tag_env-host-type_{{ cluster_id }}-openshift-node\"]"
+- include: config.yml
diff --git a/playbooks/aws/openshift-cluster/vars.defaults.yml b/playbooks/aws/openshift-cluster/vars.defaults.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/vars.defaults.yml
@@ -0,0 +1 @@
+---
diff --git a/playbooks/aws/openshift-cluster/vars.online.int.yml b/playbooks/aws/openshift-cluster/vars.online.int.yml
new file mode 100644
index 000000000..12f79a9c1
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/vars.online.int.yml
@@ -0,0 +1,9 @@
+---
+ec2_image: ami-906240f8
+ec2_image_name: libra-ops-rhel7*
+ec2_region: us-east-1
+ec2_keypair: mmcgrath_libra
+ec2_instance_type: m3.large
+ec2_security_groups: [ 'int-v3' ]
+ec2_vpc_subnet: subnet-987c0def
+ec2_assign_public_ip: yes
diff --git a/playbooks/aws/openshift-cluster/vars.online.prod.yml b/playbooks/aws/openshift-cluster/vars.online.prod.yml
new file mode 100644
index 000000000..12f79a9c1
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/vars.online.prod.yml
@@ -0,0 +1,9 @@
+---
+ec2_image: ami-906240f8
+ec2_image_name: libra-ops-rhel7*
+ec2_region: us-east-1
+ec2_keypair: mmcgrath_libra
+ec2_instance_type: m3.large
+ec2_security_groups: [ 'int-v3' ]
+ec2_vpc_subnet: subnet-987c0def
+ec2_assign_public_ip: yes
diff --git a/playbooks/aws/openshift-cluster/vars.online.stage.yml b/playbooks/aws/openshift-cluster/vars.online.stage.yml
new file mode 100644
index 000000000..12f79a9c1
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/vars.online.stage.yml
@@ -0,0 +1,9 @@
+---
+ec2_image: ami-906240f8
+ec2_image_name: libra-ops-rhel7*
+ec2_region: us-east-1
+ec2_keypair: mmcgrath_libra
+ec2_instance_type: m3.large
+ec2_security_groups: [ 'int-v3' ]
+ec2_vpc_subnet: subnet-987c0def
+ec2_assign_public_ip: yes
diff --git a/playbooks/aws/openshift-cluster/vars.yml b/playbooks/aws/openshift-cluster/vars.yml
index ed97d539c..07e453f89 100644
--- a/playbooks/aws/openshift-cluster/vars.yml
+++ b/playbooks/aws/openshift-cluster/vars.yml
@@ -1 +1,38 @@
---
+deployment_vars:
+ origin:
+ # fedora, since centos requires marketplace
+ image: ami-acd999c4
+ image_name:
+ region: us-east-1
+ ssh_user: fedora
+ sudo: yes
+ keypair: libra
+ type: m3.large
+ security_groups: [ 'public' ]
+ vpc_subnet:
+ assign_public_ip:
+ online:
+ # private ami
+ image: ami-7a9e9812
+ image_name: openshift-rhel7_*
+ region: us-east-1
+ ssh_user: root
+ sudo: no
+ keypair: libra
+ type: m3.large
+ security_groups: [ 'public' ]
+ vpc_subnet:
+ assign_public_ip:
+ enterprise:
+ # rhel-7.1, requires cloud access subscription
+ image: ami-10663b78
+ image_name:
+ region: us-east-1
+ ssh_user: ec2-user
+ sudo: yes
+ keypair: libra
+ type: m3.large
+ security_groups: [ 'public' ]
+ vpc_subnet:
+ assign_public_ip:
diff --git a/playbooks/aws/openshift-master/config.yml b/playbooks/aws/openshift-master/config.yml
index 1c4060eee..37ab4fbe6 100644
--- a/playbooks/aws/openshift-master/config.yml
+++ b/playbooks/aws/openshift-master/config.yml
@@ -1,24 +1,19 @@
---
-- name: Populate oo_masters_to_config host group if needed
+- name: Populate oo_masters_to_config host group
hosts: localhost
gather_facts: no
tasks:
- - name: "Evaluate oo_host_group_exp if it's set"
- add_host: "name={{ item }} groups=oo_masters_to_config"
- with_items: "{{ oo_host_group_exp | default('') }}"
- when: oo_host_group_exp is defined
+ - name: Evaluate oo_masters_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_masters_to_config
+ ansible_ssh_user: root
+ with_items: oo_host_group_exp | default([])
-- name: Configure instances
- hosts: oo_masters_to_config
+- include: ../../common/openshift-master/config.yml
vars:
+ openshift_cluster_id: "{{ cluster_id }}"
+ openshift_debug_level: 4
+ openshift_deployment_type: "{{ deployment_type }}"
openshift_hostname: "{{ ec2_private_ip_address }}"
openshift_public_hostname: "{{ ec2_ip_address }}"
- # TODO: this should be removed once openshift-sdn packages are available
- openshift_use_openshift_sdn: False
- vars_files:
- - vars.yml
- roles:
- - openshift_master
- #- openshift_sdn_master
- - pods
- - os_env_extras
diff --git a/playbooks/aws/openshift-master/launch.yml b/playbooks/aws/openshift-master/launch.yml
index 3d87879a0..6b3751682 100644
--- a/playbooks/aws/openshift-master/launch.yml
+++ b/playbooks/aws/openshift-master/launch.yml
@@ -4,14 +4,12 @@
connection: local
gather_facts: no
+# TODO: modify atomic_ami based on deployment_type
vars:
inst_region: us-east-1
atomic_ami: ami-86781fee
user_data_file: user_data.txt
- vars_files:
- - vars.yml
-
tasks:
- name: Launch instances
ec2:
@@ -40,7 +38,7 @@
Name: "{{ item.0 }}"
- name: Add other tags to instances
- ec2_tag: "resource={{ item.id }} region={{ inst_region }} state=present"
+ ec2_tag: resource={{ item.id }} region={{ inst_region }} state=present
with_items: ec2.instances
args:
tags: "{{ oo_new_inst_tags }}"
@@ -57,7 +55,7 @@
- ec2.instances
- name: Wait for ssh
- wait_for: "port=22 host={{ item.dns_name }}"
+ wait_for: port=22 host={{ item.dns_name }}
with_items: ec2.instances
- name: Wait for root user setup
diff --git a/playbooks/aws/openshift-master/terminate.yml b/playbooks/aws/openshift-master/terminate.yml
index fd15cf00f..07d9961bc 100644
--- a/playbooks/aws/openshift-master/terminate.yml
+++ b/playbooks/aws/openshift-master/terminate.yml
@@ -1,52 +1,2 @@
---
-- name: Populate oo_masters_to_terminate host group if needed
- hosts: localhost
- gather_facts: no
- tasks:
- - name: Evaluate oo_host_group_exp if it's set
- add_host: "name={{ item }} groups=oo_masters_to_terminate"
- with_items: "{{ oo_host_group_exp | default('') }}"
- when: oo_host_group_exp is defined
-
-- name: Gather facts for instances to terminate
- hosts: oo_masters_to_terminate
-
-- name: Terminate instances
- hosts: localhost
- connection: local
- gather_facts: no
- vars:
- host_vars: "{{ hostvars
- | oo_select_keys(groups['oo_masters_to_terminate']) }}"
- tasks:
- - name: Terminate instances
- ec2:
- state: absent
- instance_ids: ["{{ item.ec2_id }}"]
- region: "{{ item.ec2_region }}"
- ignore_errors: yes
- register: ec2_term
- with_items: host_vars
-
- # Fail if any of the instances failed to terminate with an error other
- # than 403 Forbidden
- - fail: msg=Terminating instance {{ item.item.ec2_id }} failed with message {{ item.msg }}
- when: "item.failed and not item.msg | search(\"error: EC2ResponseError: 403 Forbidden\")"
- with_items: ec2_term.results
-
- - name: Stop instance if termination failed
- ec2:
- state: stopped
- instance_ids: ["{{ item.item.ec2_id }}"]
- region: "{{ item.item.ec2_region }}"
- register: ec2_stop
- when: item.failed
- with_items: ec2_term.results
-
- - name: Rename stopped instances
- ec2_tag: resource={{ item.item.item.ec2_id }} region={{ item.item.item.ec2_region }} state=present
- args:
- tags:
- Name: "{{ item.item.item.ec2_tag_Name }}-terminate"
- with_items: ec2_stop.results
-
+- include: ../terminate.yml
diff --git a/playbooks/aws/openshift-master/vars.yml b/playbooks/aws/openshift-master/vars.yml
deleted file mode 100644
index c196b2fca..000000000
--- a/playbooks/aws/openshift-master/vars.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-openshift_debug_level: 4
-openshift_cluster_id: "{{ cluster_id }}"
diff --git a/playbooks/aws/openshift-node/config.yml b/playbooks/aws/openshift-node/config.yml
index b08ed7571..fc9b397b4 100644
--- a/playbooks/aws/openshift-node/config.yml
+++ b/playbooks/aws/openshift-node/config.yml
@@ -1,107 +1,25 @@
---
-- name: Populate oo_nodes_to_config host group if needed
+- name: Populate oo_nodes_to_config and oo_first_master host groups
hosts: localhost
gather_facts: no
tasks:
- - name: Evaluate oo_host_group_exp
- add_host: "name={{ item }} groups=oo_nodes_to_config"
- with_items: "{{ oo_host_group_exp | default('') }}"
- when: oo_host_group_exp is defined
- - add_host:
+ - name: Evaluate oo_nodes_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_nodes_to_config
+ ansible_ssh_user: root
+ with_items: oo_host_group_exp | default([])
+ - name: Evaluate oo_first_master
+ add_host:
name: "{{ groups['tag_env-host-type_' ~ cluster_id ~ '-openshift-master'][0] }}"
groups: oo_first_master
- when: oo_host_group_exp is defined
+ ansible_ssh_user: root
-- name: Gather and set facts for hosts to configure
- hosts: oo_nodes_to_config
- roles:
- - openshift_facts
- tasks:
- # Since the master is registering the nodes before they are configured, we
- # need to make sure to set the node properties beforehand if we do not want
- # the defaults
- - openshift_facts:
- role: "{{ item.role }}"
- local_facts: "{{ item.local_facts }}"
- with_items:
- - role: common
- local_facts:
- hostname: "{{ ec2_private_ip_address }}"
- public_hostname: "{{ ec2_ip_address }}"
- # TODO: this should be removed once openshift-sdn packages are available
- use_openshift_sdn: False
- - role: node
- local_facts:
- external_id: "{{ openshift_node_external_id | default(None) }}"
- resources_cpu: "{{ openshfit_node_resources_cpu | default(None) }}"
- resources_memory: "{{ openshfit_node_resources_memory | default(None) }}"
- pod_cidr: "{{ openshfit_node_pod_cidr | default(None) }}"
- labels: "{{ openshfit_node_labels | default(None) }}"
- annotations: "{{ openshfit_node_annotations | default(None) }}"
-
-
-- name: Register nodes
- hosts: oo_first_master
- vars:
- openshift_nodes: "{{ hostvars
- | oo_select_keys(groups['oo_nodes_to_config']) }}"
- roles:
- - openshift_register_nodes
- tasks:
- - name: Create local temp directory for syncing certs
- local_action: command /usr/bin/mktemp -d /tmp/openshift-ansible-XXXXXXX
- register: mktemp
-
- - name: Sync master certs to localhost
- synchronize:
- mode: pull
- checksum: yes
- src: /var/lib/openshift/openshift.local.certificates
- dest: "{{ mktemp.stdout }}"
-
-
-- name: Configure instances
- hosts: oo_nodes_to_config
- vars_files:
- - vars.yml
+- include: ../../common/openshift-node/config.yml
vars:
+ openshift_cluster_id: "{{ cluster_id }}"
+ openshift_debug_level: 4
+ openshift_deployment_type: "{{ deployment_type }}"
openshift_hostname: "{{ ec2_private_ip_address }}"
openshift_public_hostname: "{{ ec2_ip_address }}"
- sync_tmpdir: "{{ hostvars[groups['oo_first_master'][0]].mktemp.stdout }}"
- cert_parent_rel_path: openshift.local.certificates
- cert_rel_path: "{{ cert_parent_rel_path }}/node-{{ openshift.common.hostname }}"
- cert_base_path: /var/lib/openshift
- cert_parent_path: "{{ cert_base_path }}/{{ cert_parent_rel_path }}"
- cert_path: "{{ cert_base_path }}/{{ cert_rel_path }}"
- pre_tasks:
- - name: Ensure certificate directories exists
- file:
- path: "{{ item }}"
- state: directory
- with_items:
- - "{{ cert_path }}"
- - "{{ cert_parent_path }}/ca"
-
- # TODO: notify restart openshift-node and/or restart openshift-sdn-node,
- # possibly test service started time against certificate/config file
- # timestamps in openshift-node or openshift-sdn-node to trigger notify
- - name: Sync certs to nodes
- synchronize:
- checksum: yes
- src: "{{ item.src }}"
- dest: "{{ item.dest }}"
- owner: no
- group: no
- with_items:
- - src: "{{ sync_tmpdir }}/{{ cert_rel_path }}"
- dest: "{{ cert_parent_path }}"
- - src: "{{ sync_tmpdir }}/{{ cert_parent_rel_path }}/ca/cert.crt"
- dest: "{{ cert_parent_path }}/ca/cert.crt"
- - local_action: file name={{ sync_tmpdir }} state=absent
- run_once: true
- roles:
- - openshift_node
- #- openshift_sdn_node
- - os_env_extras
- - os_env_extras_node
diff --git a/playbooks/aws/openshift-node/launch.yml b/playbooks/aws/openshift-node/launch.yml
index b7ef593e7..36aee14ff 100644
--- a/playbooks/aws/openshift-node/launch.yml
+++ b/playbooks/aws/openshift-node/launch.yml
@@ -4,14 +4,12 @@
connection: local
gather_facts: no
+# TODO: modify atomic_ami based on deployment_type
vars:
inst_region: us-east-1
atomic_ami: ami-86781fee
user_data_file: user_data.txt
- vars_files:
- - vars.yml
-
tasks:
- name: Launch instances
ec2:
@@ -33,7 +31,7 @@
with_items: ec2.instances
- name: Add Name and environment tags to instances
- ec2_tag: "resource={{ item.1.id }} region={{ inst_region }} state=present"
+ ec2_tag: resource={{ item.1.id }} region={{ inst_region }} state=present
with_together:
- oo_new_inst_names
- ec2.instances
@@ -42,7 +40,7 @@
Name: "{{ item.0 }}"
- name: Add other tags to instances
- ec2_tag: "resource={{ item.id }} region={{ inst_region }} state=present"
+ ec2_tag: resource={{ item.id }} region={{ inst_region }} state=present
with_items: ec2.instances
args:
tags: "{{ oo_new_inst_tags }}"
@@ -59,7 +57,7 @@
- ec2.instances
- name: Wait for ssh
- wait_for: "port=22 host={{ item.dns_name }}"
+ wait_for: port=22 host={{ item.dns_name }}
with_items: ec2.instances
- name: Wait for root user setup
diff --git a/playbooks/aws/openshift-node/terminate.yml b/playbooks/aws/openshift-node/terminate.yml
index 1c0c77eb7..07d9961bc 100644
--- a/playbooks/aws/openshift-node/terminate.yml
+++ b/playbooks/aws/openshift-node/terminate.yml
@@ -1,52 +1,2 @@
---
-- name: Populate oo_nodes_to_terminate host group if needed
- hosts: localhost
- gather_facts: no
- tasks:
- - name: Evaluate oo_host_group_exp if it's set
- add_host: "name={{ item }} groups=oo_nodes_to_terminate"
- with_items: "{{ oo_host_group_exp | default('') }}"
- when: oo_host_group_exp is defined
-
-- name: Gather facts for instances to terminate
- hosts: oo_nodes_to_terminate
-
-- name: Terminate instances
- hosts: localhost
- connection: local
- gather_facts: no
- vars:
- host_vars: "{{ hostvars
- | oo_select_keys(groups['oo_nodes_to_terminate']) }}"
- tasks:
- - name: Terminate instances
- ec2:
- state: absent
- instance_ids: ["{{ item.ec2_id }}"]
- region: "{{ item.ec2_region }}"
- ignore_errors: yes
- register: ec2_term
- with_items: host_vars
-
- # Fail if any of the instances failed to terminate with an error other
- # than 403 Forbidden
- - fail: msg=Terminating instance {{ item.item.ec2_id }} failed with message {{ item.msg }}
- when: "item.failed and not item.msg | search(\"error: EC2ResponseError: 403 Forbidden\")"
- with_items: ec2_term.results
-
- - name: Stop instance if termination failed
- ec2:
- state: stopped
- instance_ids: ["{{ item.item.ec2_id }}"]
- region: "{{ item.item.ec2_region }}"
- register: ec2_stop
- when: item.failed
- with_items: ec2_term.results
-
- - name: Rename stopped instances
- ec2_tag: resource={{ item.item.item.ec2_id }} region={{ item.item.item.ec2_region }} state=present
- args:
- tags:
- Name: "{{ item.item.item.ec2_tag_Name }}-terminate"
- with_items: ec2_stop.results
-
+- include: ../terminate.yml
diff --git a/playbooks/aws/openshift-node/vars.yml b/playbooks/aws/openshift-node/vars.yml
deleted file mode 100644
index c196b2fca..000000000
--- a/playbooks/aws/openshift-node/vars.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-openshift_debug_level: 4
-openshift_cluster_id: "{{ cluster_id }}"
diff --git a/playbooks/aws/terminate.yml b/playbooks/aws/terminate.yml
new file mode 100644
index 000000000..e9767b260
--- /dev/null
+++ b/playbooks/aws/terminate.yml
@@ -0,0 +1,64 @@
+---
+- name: Populate oo_hosts_to_terminate host group
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - name: Evaluate oo_hosts_to_terminate
+ add_host: name={{ item }} groups=oo_hosts_to_terminate
+ with_items: oo_host_group_exp | default([])
+
+- name: Gather dynamic inventory variables for hosts to terminate
+ hosts: oo_hosts_to_terminate
+ gather_facts: no
+
+- name: Terminate instances
+ hosts: localhost
+ connection: local
+ gather_facts: no
+ vars:
+ host_vars: "{{ hostvars
+ | oo_select_keys(groups['oo_hosts_to_terminate']) }}"
+ tasks:
+ - name: Remove tags from instances
+ ec2_tag: resource={{ item.ec2_id }} region={{ item.ec2_region }} state=absent
+ args:
+ tags:
+ env: "{{ item['ec2_tag_env'] }}"
+ host-type: "{{ item['ec2_tag_host-type'] }}"
+ env-host-type: "{{ item['ec2_tag_env-host-type'] }}"
+ with_items: host_vars
+ when: "'oo_hosts_to_terminate' in groups"
+
+ - name: Terminate instances
+ ec2:
+ state: absent
+ instance_ids: ["{{ item.ec2_id }}"]
+ region: "{{ item.ec2_region }}"
+ ignore_errors: yes
+ register: ec2_term
+ with_items: host_vars
+ when: "'oo_hosts_to_terminate' in groups"
+
+ # Fail if any of the instances failed to terminate with an error other
+ # than 403 Forbidden
+ - fail: msg=Terminating instance {{ item.item.ec2_id }} failed with message {{ item.msg }}
+ when: "'oo_hosts_to_terminate' in groups and item.failed and not item.msg | search(\"error: EC2ResponseError: 403 Forbidden\")"
+ with_items: ec2_term.results
+
+ - name: Stop instance if termination failed
+ ec2:
+ state: stopped
+ instance_ids: ["{{ item.item.ec2_id }}"]
+ region: "{{ item.item.ec2_region }}"
+ register: ec2_stop
+ when: item.failed
+ with_items: ec2_term.results
+ when: "'oo_hosts_to_terminate' in groups"
+
+ - name: Rename stopped instances
+ ec2_tag: resource={{ item.item.item.ec2_id }} region={{ item.item.item.ec2_region }} state=present
+ args:
+ tags:
+ Name: "{{ item.item.item.ec2_tag_Name }}-terminate"
+ with_items: ec2_stop.results
+ when: "'oo_hosts_to_terminate' in groups"
diff --git a/playbooks/byo/openshift-master/config.yml b/playbooks/byo/openshift-master/config.yml
index 706f9285c..f61d277c6 100644
--- a/playbooks/byo/openshift-master/config.yml
+++ b/playbooks/byo/openshift-master/config.yml
@@ -1,9 +1,15 @@
---
-- name: Gather facts for node hosts
- hosts: nodes
+- name: Populate oo_masters_to_config host group
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - add_host:
+ name: "{{ item }}"
+ groups: oo_masters_to_config
+ with_items: groups['masters']
-- name: Configure master instances
- hosts: masters
- roles:
- - openshift_master
- - openshift_sdn_master
+- include: ../../common/openshift-master/config.yml
+ vars:
+ openshift_cluster_id: "{{ cluster_id | default('default') }}"
+ openshift_debug_level: 4
+ openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/byo/openshift-node/config.yml b/playbooks/byo/openshift-node/config.yml
index 69ad7a840..d569827b4 100644
--- a/playbooks/byo/openshift-node/config.yml
+++ b/playbooks/byo/openshift-node/config.yml
@@ -1,79 +1,21 @@
---
-- name: Gather facts for node hosts
- hosts: nodes
- roles:
- - openshift_facts
+- name: Populate oo_nodes_to_config and oo_first_master host groups
+ hosts: localhost
+ gather_facts: no
tasks:
- # Since the master is registering the nodes before they are configured, we
- # need to make sure to set the node properties beforehand if we do not want
- # the defaults
- - openshift_facts:
- role: 'node'
- local_facts:
- hostname: "{{ openshift_hostname | default(None) }}"
- external_id: "{{ openshift_node_external_id | default(None) }}"
- resources_cpu: "{{ openshfit_node_resources_cpu | default(None) }}"
- resources_memory: "{{ openshfit_node_resources_memory | default(None) }}"
- pod_cidr: "{{ openshfit_node_pod_cidr | default(None) }}"
- labels: "{{ openshfit_node_labels | default(None) }}"
- annotations: "{{ openshfit_node_annotations | default(None) }}"
+ - name: Evaluate oo_nodes_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_nodes_to_config
+ with_items: groups.nodes
+ - name: Evaluate oo_first_master
+ add_host:
+ name: "{{ groups.masters[0] }}"
+ groups: oo_first_master
-- name: Register nodes
- hosts: masters[0]
+- include: ../../common/openshift-node/config.yml
vars:
- openshift_nodes: "{{ hostvars | oo_select_keys(groups['nodes']) }}"
- roles:
- - openshift_register_nodes
- tasks:
- - name: Create local temp directory for syncing certs
- local_action: command /usr/bin/mktemp -d /tmp/openshift-ansible-XXXXXXX
- register: mktemp
-
- - name: Sync master certs to localhost
- synchronize:
- mode: pull
- checksum: yes
- src: /var/lib/openshift/openshift.local.certificates
- dest: "{{ mktemp.stdout }}"
-
-
-- name: Configure node instances
- hosts: nodes
- vars:
- sync_tmpdir: "{{ hostvars[groups['masters'][0]].mktemp.stdout }}"
- cert_parent_rel_path: openshift.local.certificates
- cert_rel_path: "{{ cert_parent_rel_path }}/node-{{ openshift.common.hostname }}"
- cert_base_path: /var/lib/openshift
- cert_parent_path: "{{ cert_base_path }}/{{ cert_parent_rel_path }}"
- cert_path: "{{ cert_base_path }}/{{ cert_rel_path }}"
- openshift_sdn_master_url: http://{{ hostvars[groups['masters'][0]].openshift.common.hostname }}:4001
- pre_tasks:
- - name: Ensure certificate directories exists
- file:
- path: "{{ item }}"
- state: directory
- with_items:
- - "{{ cert_path }}"
- - "{{ cert_parent_path }}/ca"
-
- # TODO: notify restart openshift-node and/or restart openshift-sdn-node,
- # possibly test service started time against certificate/config file
- # timestamps in openshift-node or openshift-sdn-node to trigger notify
- - name: Sync certs to nodes
- synchronize:
- checksum: yes
- src: "{{ item.src }}"
- dest: "{{ item.dest }}"
- owner: no
- group: no
- with_items:
- - src: "{{ sync_tmpdir }}/{{ cert_rel_path }}"
- dest: "{{ cert_parent_path }}"
- - src: "{{ sync_tmpdir }}/{{ cert_parent_rel_path }}/ca/cert.crt"
- dest: "{{ cert_parent_path }}/ca/cert.crt"
- - local_action: file name={{ sync_tmpdir }} state=absent
- run_once: true
- roles:
- - openshift_node
- - openshift_sdn_node
+ openshift_cluster_id: "{{ cluster_id | default('default') }}"
+ openshift_debug_level: 4
+ openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/byo/openshift_facts.yml b/playbooks/byo/openshift_facts.yml
new file mode 100644
index 000000000..cd282270f
--- /dev/null
+++ b/playbooks/byo/openshift_facts.yml
@@ -0,0 +1,10 @@
+---
+- name: Gather OpenShift facts
+ hosts: all
+ gather_facts: no
+ roles:
+ - openshift_facts
+ tasks:
+ - openshift_facts:
+ register: result
+ - debug: var=result
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
new file mode 100644
index 000000000..14ffa928f
--- /dev/null
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -0,0 +1,4 @@
+---
+- include: ../openshift-master/config.yml
+
+- include: ../openshift-node/config.yml
diff --git a/playbooks/libvirt/openshift-master/filter_plugins b/playbooks/common/openshift-cluster/filter_plugins
index 99a95e4ca..99a95e4ca 120000
--- a/playbooks/libvirt/openshift-master/filter_plugins
+++ b/playbooks/common/openshift-cluster/filter_plugins
diff --git a/playbooks/libvirt/openshift-master/roles b/playbooks/common/openshift-cluster/roles
index 20c4c58cf..20c4c58cf 120000
--- a/playbooks/libvirt/openshift-master/roles
+++ b/playbooks/common/openshift-cluster/roles
diff --git a/playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml b/playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml
new file mode 100644
index 000000000..118727273
--- /dev/null
+++ b/playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml
@@ -0,0 +1,11 @@
+---
+- set_fact: k8s_type="master"
+
+- name: Generate master instance names(s)
+ set_fact:
+ scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}"
+ register: master_names_output
+ with_sequence: start=1 end={{ num_masters }}
+
+- set_fact:
+ master_names: "{{ master_names_output.results | oo_collect('ansible_facts') | oo_collect('scratch_name') }}"
diff --git a/playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml b/playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml
new file mode 100644
index 000000000..162315d46
--- /dev/null
+++ b/playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml
@@ -0,0 +1,11 @@
+---
+- set_fact: k8s_type="node"
+
+- name: Generate node instance names(s)
+ set_fact:
+ scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}"
+ register: node_names_output
+ with_sequence: start=1 end={{ num_nodes }}
+
+- set_fact:
+ node_names: "{{ node_names_output.results | oo_collect('ansible_facts') | oo_collect('scratch_name') }}"
diff --git a/playbooks/common/openshift-cluster/update_repos_and_packages.yml b/playbooks/common/openshift-cluster/update_repos_and_packages.yml
new file mode 100644
index 000000000..e92c6f1ee
--- /dev/null
+++ b/playbooks/common/openshift-cluster/update_repos_and_packages.yml
@@ -0,0 +1,7 @@
+---
+- hosts: oo_hosts_to_update
+ vars:
+ openshift_deployment_type: "{{ deployment_type }}"
+ roles:
+ - openshift_repos
+ - os_update_latest
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
new file mode 100644
index 000000000..05822d118
--- /dev/null
+++ b/playbooks/common/openshift-master/config.yml
@@ -0,0 +1,19 @@
+---
+- name: Configure master instances
+ hosts: oo_masters_to_config
+ vars:
+ openshift_sdn_master_url: https://{{ openshift.common.hostname }}:4001
+ roles:
+ - openshift_master
+ - { role: openshift_sdn_master, when: openshift.common.use_openshift_sdn | bool }
+ tasks:
+ - name: Create group for deployment type
+ group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }}
+ changed_when: False
+
+# Additional instance config for online deployments
+- name: Additional instance config
+ hosts: oo_masters_deployment_type_online
+ roles:
+ - pods
+ - os_env_extras
diff --git a/playbooks/libvirt/openshift-node/filter_plugins b/playbooks/common/openshift-master/filter_plugins
index 99a95e4ca..99a95e4ca 120000
--- a/playbooks/libvirt/openshift-node/filter_plugins
+++ b/playbooks/common/openshift-master/filter_plugins
diff --git a/playbooks/common/openshift-master/roles b/playbooks/common/openshift-master/roles
new file mode 120000
index 000000000..e2b799b9d
--- /dev/null
+++ b/playbooks/common/openshift-master/roles
@@ -0,0 +1 @@
+../../../roles/ \ No newline at end of file
diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml
new file mode 100644
index 000000000..5a6c89489
--- /dev/null
+++ b/playbooks/common/openshift-node/config.yml
@@ -0,0 +1,127 @@
+---
+- name: Gather and set facts for node hosts
+ hosts: oo_nodes_to_config
+ roles:
+ - openshift_facts
+ tasks:
+ # Since the master is registering the nodes before they are configured, we
+ # need to make sure to set the node properties beforehand if we do not want
+ # the defaults
+ - openshift_facts:
+ role: "{{ item.role }}"
+ local_facts: "{{ item.local_facts }}"
+ with_items:
+ - role: common
+ local_facts:
+ hostname: "{{ openshift_hostname | default(None) }}"
+ public_hostname: "{{ openshift_public_hostname | default(None) }}"
+ - role: node
+ local_facts:
+ external_id: "{{ openshift_node_external_id | default(None) }}"
+ resources_cpu: "{{ openshift_node_resources_cpu | default(None) }}"
+ resources_memory: "{{ openshift_node_resources_memory | default(None) }}"
+ pod_cidr: "{{ openshift_node_pod_cidr | default(None) }}"
+ labels: "{{ openshift_node_labels | default(None) }}"
+ annotations: "{{ openshift_node_annotations | default(None) }}"
+ deployment_type: "{{ openshift_deployment_type }}"
+
+
+- name: Create temp directory for syncing certs
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - name: Create local temp directory for syncing certs
+ local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
+ register: mktemp
+ changed_when: False
+
+
+- name: Register nodes
+ hosts: oo_first_master
+ vars:
+ openshift_nodes: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']) }}"
+ sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
+ roles:
+ - openshift_register_nodes
+ tasks:
+ # TODO: update so that we only sync necessary configs/directories, currently
+ # we sync for all nodes in oo_nodes_to_config. We will need to inspect the
+ # configs on the nodes to make the determination on whether to sync or not.
+ - name: Create the temp directory on the master
+ file:
+ path: "{{ sync_tmpdir }}"
+ owner: "{{ ansible_ssh_user }}"
+ mode: 0700
+ state: directory
+ changed_when: False
+
+ - name: Create a tarball of the node config directories
+ command: tar -czvf {{ sync_tmpdir }}/{{ item.openshift.common.hostname }}.tgz ./
+ args:
+ chdir: "{{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}"
+ with_items: openshift_nodes
+ changed_when: False
+
+ - name: Retrieve the node config tarballs from the master
+ fetch:
+ src: "{{ sync_tmpdir }}/{{ item.openshift.common.hostname }}.tgz"
+ dest: "{{ sync_tmpdir }}/"
+ flat: yes
+ fail_on_missing: yes
+ validate_checksum: yes
+ with_items: openshift_nodes
+ changed_when: False
+
+
+- name: Configure node instances
+ hosts: oo_nodes_to_config
+ gather_facts: no
+ vars:
+ sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
+ openshift_sdn_master_url: "https://{{ hostvars[groups['oo_first_master'][0]].openshift.common.hostname }}:4001"
+ pre_tasks:
+ - name: Ensure certificate directory exists
+ file:
+ path: "{{ openshift_node_cert_dir }}"
+ state: directory
+
+ # TODO: notify restart openshift-node and/or restart openshift-sdn-node,
+ # possibly test service started time against certificate/config file
+ # timestamps in openshift-node or openshift-sdn-node to trigger notify
+ - name: Unarchive the tarball on the node
+ unarchive:
+ src: "{{ sync_tmpdir }}/{{ openshift.common.hostname }}.tgz"
+ dest: "{{ openshift_node_cert_dir }}"
+ roles:
+ - openshift_node
+ - { role: openshift_sdn_node, when: openshift.common.use_openshift_sdn | bool }
+ tasks:
+ - name: Create group for deployment type
+ group_by: key=oo_nodes_deployment_type_{{ openshift.common.deployment_type }}
+ changed_when: False
+
+- name: Delete the temporary directory on the master
+ hosts: oo_first_master
+ gather_facts: no
+ vars:
+ sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
+ tasks:
+ - file: name={{ sync_tmpdir }} state=absent
+ changed_when: False
+
+
+- name: Delete temporary directory on localhost
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - file: name={{ mktemp.stdout }} state=absent
+ changed_when: False
+
+
+# Additional config for online type deployments
+- name: Additional instance config
+ hosts: oo_nodes_deployment_type_online
+ gather_facts: no
+ roles:
+ - os_env_extras
+ - os_env_extras_node
diff --git a/playbooks/common/openshift-node/filter_plugins b/playbooks/common/openshift-node/filter_plugins
new file mode 120000
index 000000000..99a95e4ca
--- /dev/null
+++ b/playbooks/common/openshift-node/filter_plugins
@@ -0,0 +1 @@
+../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-node/roles b/playbooks/common/openshift-node/roles
new file mode 120000
index 000000000..e2b799b9d
--- /dev/null
+++ b/playbooks/common/openshift-node/roles
@@ -0,0 +1 @@
+../../../roles/ \ No newline at end of file
diff --git a/playbooks/gce/openshift-cluster/config.yml b/playbooks/gce/openshift-cluster/config.yml
new file mode 100644
index 000000000..8b8490246
--- /dev/null
+++ b/playbooks/gce/openshift-cluster/config.yml
@@ -0,0 +1,37 @@
+---
+# TODO: fix firewall related bug with GCE and origin, since GCE is overriding
+# /etc/sysconfig/iptables
+- name: Populate oo_masters_to_config host group
+ hosts: localhost
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
+ - name: Evaluate oo_masters_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_masters_to_config
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | default([])
+ - name: Evaluate oo_nodes_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_nodes_to_config
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-node"] | default([])
+ - name: Evaluate oo_first_master
+ add_host:
+ name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}"
+ groups: oo_first_master
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ when: "'tag_env-host-type-{{ cluster_id }}-openshift-master' in groups"
+
+- include: ../../common/openshift-cluster/config.yml
+ vars:
+ openshift_cluster_id: "{{ cluster_id }}"
+ openshift_debug_level: 4
+ openshift_deployment_type: "{{ deployment_type }}"
+ openshift_hostname: "{{ gce_private_ip }}"
diff --git a/playbooks/gce/openshift-cluster/launch.yml b/playbooks/gce/openshift-cluster/launch.yml
index 14cdd2537..771f51e91 100644
--- a/playbooks/gce/openshift-cluster/launch.yml
+++ b/playbooks/gce/openshift-cluster/launch.yml
@@ -4,59 +4,25 @@
connection: local
gather_facts: no
vars_files:
- - vars.yml
+ - vars.yml
tasks:
- - set_fact: k8s_type="master"
-
- - name: Generate master instance names(s)
- set_fact: scratch={{ cluster_id }}-{{ k8s_type }}-{{ '%05x' |format( 1048576 |random) }}
- register: master_names_output
- with_sequence: start=1 end={{ num_masters }}
-
- # These set_fact's cannot be combined
- - set_fact:
- master_names_string: "{% for item in master_names_output.results %}{{ item.ansible_facts.scratch }} {% endfor %}"
-
- - set_fact:
- master_names: "{{ master_names_string.strip().split(' ') }}"
-
- - include: launch_instances.yml
- vars:
- instances: "{{ master_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
-
- - set_fact: k8s_type="node"
-
- - name: Generate node instance names(s)
- set_fact: scratch={{ cluster_id }}-{{ k8s_type }}-{{ '%05x' |format( 1048576 |random) }}
- register: node_names_output
- with_sequence: start=1 end={{ num_nodes }}
-
- # These set_fact's cannot be combined
- - set_fact:
- node_names_string: "{% for item in node_names_output.results %}{{ item.ansible_facts.scratch }} {% endfor %}"
-
- - set_fact:
- node_names: "{{ node_names_string.strip().split(' ') }}"
-
- - include: launch_instances.yml
- vars:
- instances: "{{ node_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
-
-- hosts: "tag_env-{{ cluster_id }}"
- roles:
- - openshift_repos
- - os_update_latest
-
-- include: ../openshift-master/config.yml
- vars:
- oo_host_group_exp: "groups[\"tag_env-host-type-{{ cluster_id }}-openshift-master\"]"
-
-- include: ../openshift-node/config.yml
- vars:
- oo_host_group_exp: "groups[\"tag_env-host-type-{{ cluster_id }}-openshift-node\"]"
+ - fail: msg="Deployment type not supported for gce provider yet"
+ when: deployment_type == 'enterprise'
+
+ - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml
+ - include: tasks/launch_instances.yml
+ vars:
+ instances: "{{ master_names }}"
+ cluster: "{{ cluster_id }}"
+ type: "{{ k8s_type }}"
+
+ - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+ - include: tasks/launch_instances.yml
+ vars:
+ instances: "{{ node_names }}"
+ cluster: "{{ cluster_id }}"
+ type: "{{ k8s_type }}"
+
+- include: update.yml
- include: list.yml
diff --git a/playbooks/gce/openshift-cluster/list.yml b/playbooks/gce/openshift-cluster/list.yml
index 1124b0ea3..962381306 100644
--- a/playbooks/gce/openshift-cluster/list.yml
+++ b/playbooks/gce/openshift-cluster/list.yml
@@ -2,16 +2,23 @@
- name: Generate oo_list_hosts group
hosts: localhost
gather_facts: no
+ vars_files:
+ - vars.yml
tasks:
- set_fact: scratch_group=tag_env-{{ cluster_id }}
when: cluster_id != ''
- set_fact: scratch_group=all
- when: scratch_group is not defined
- - add_host: name={{ item }} groups=oo_list_hosts
- with_items: groups[scratch_group] | difference(['localhost']) | difference(groups.status_terminated)
+ when: cluster_id == ''
+ - add_host:
+ name: "{{ item }}"
+ groups: oo_list_hosts
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated)
- name: List Hosts
hosts: oo_list_hosts
gather_facts: no
tasks:
- - debug: msg="public:{{hostvars[inventory_hostname].gce_public_ip}} private:{{hostvars[inventory_hostname].gce_private_ip}}"
+ - debug:
+ msg: "public ip:{{ hostvars[inventory_hostname].gce_public_ip }} private ip:{{ hostvars[inventory_hostname].gce_private_ip }}"
diff --git a/playbooks/gce/openshift-cluster/launch_instances.yml b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
index b4f33bd87..9a9848f05 100644
--- a/playbooks/gce/openshift-cluster/launch_instances.yml
+++ b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
@@ -2,41 +2,38 @@
# TODO: when we are ready to go to ansible 1.9+ support only, we can update to
# the gce task to use the disk_auto_delete parameter to avoid having to delete
# the disk as a separate step on termination
-
-- set_fact:
- machine_type: "{{ lookup('env', 'gce_machine_type') |default('n1-standard-1', true) }}"
- machine_image: "{{ lookup('env', 'gce_machine_image') |default('libra-rhel7', true) }}"
-
- name: Launch instance(s)
gce:
instance_names: "{{ instances }}"
- machine_type: "{{ machine_type }}"
- image: "{{ machine_image }}"
+ machine_type: "{{ lookup('env', 'gce_machine_type') | default('n1-standard-1', true) }}"
+ image: "{{ lookup('env', 'gce_machine_image') | default(deployment_vars[deployment_type].image, true) }}"
service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
project_id: "{{ lookup('env', 'gce_project_id') }}"
tags:
- - "created-by-{{ lookup('env', 'LOGNAME') |default(cluster, true) }}"
- - "env-{{ cluster }}"
- - "host-type-{{ type }}"
- - "env-host-type-{{ cluster }}-openshift-{{ type }}"
+ - created-by-{{ lookup('env', 'LOGNAME') |default(cluster, true) }}
+ - env-{{ cluster }}
+ - host-type-{{ type }}
+ - env-host-type-{{ cluster }}-openshift-{{ type }}
register: gce
- name: Add new instances to groups and set variables needed
add_host:
hostname: "{{ item.name }}"
ansible_ssh_host: "{{ item.public_ip }}"
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
groups: "{{ item.tags | oo_prepend_strings_in_list('tag_') | join(',') }}"
gce_public_ip: "{{ item.public_ip }}"
gce_private_ip: "{{ item.private_ip }}"
with_items: gce.instance_data
- name: Wait for ssh
- wait_for: "port=22 host={{ item.public_ip }}"
+ wait_for: port=22 host={{ item.public_ip }}
with_items: gce.instance_data
-- name: Wait for root user setup
- command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item.public_ip }} echo root user is setup"
+- name: Wait for user setup
+ command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ hostvars[item.name].ansible_ssh_user }}@{{ item.public_ip }} echo {{ hostvars[item.name].ansible_ssh_user }} user is setup"
register: result
until: result.rc == 0
retries: 20
diff --git a/playbooks/gce/openshift-cluster/terminate.yml b/playbooks/gce/openshift-cluster/terminate.yml
index 0281ae953..abe6a4c95 100644
--- a/playbooks/gce/openshift-cluster/terminate.yml
+++ b/playbooks/gce/openshift-cluster/terminate.yml
@@ -1,20 +1,34 @@
---
- name: Terminate instance(s)
hosts: localhost
-
+ gather_facts: no
vars_files:
- - vars.yml
+ - vars.yml
+ tasks:
+ - set_fact: scratch_group=tag_env-host-type-{{ cluster_id }}-openshift-node
+ - add_host:
+ name: "{{ item }}"
+ groups: oo_nodes_to_terminate
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated)
+
+ - set_fact: scratch_group=tag_env-host-type-{{ cluster_id }}-openshift-master
+ - add_host:
+ name: "{{ item }}"
+ groups: oo_masters_to_terminate
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated)
- include: ../openshift-node/terminate.yml
vars:
- oo_host_group_exp: 'groups["tag_env-host-type-{{ cluster_id }}-openshift-node"]'
gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
gce_project_id: "{{ lookup('env', 'gce_project_id') }}"
- include: ../openshift-master/terminate.yml
vars:
- oo_host_group_exp: 'groups["tag_env-host-type-{{ cluster_id }}-openshift-master"]'
gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
gce_project_id: "{{ lookup('env', 'gce_project_id') }}"
diff --git a/playbooks/gce/openshift-cluster/update.yml b/playbooks/gce/openshift-cluster/update.yml
index 973e4c3ef..9ebf39a13 100644
--- a/playbooks/gce/openshift-cluster/update.yml
+++ b/playbooks/gce/openshift-cluster/update.yml
@@ -1,13 +1,18 @@
---
-- hosts: "tag_env-{{ cluster_id }}"
- roles:
- - openshift_repos
- - os_update_latest
+- name: Populate oo_hosts_to_update group
+ hosts: localhost
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
+ - name: Evaluate oo_hosts_to_update
+ add_host:
+ name: "{{ item }}"
+ groups: oo_hosts_to_update
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | union(groups["tag_env-host-type-{{ cluster_id }}-openshift-node"]) | default([])
-- include: ../openshift-master/config.yml
- vars:
- oo_host_group_exp: "groups[\"tag_env-host-type-{{ cluster_id }}-openshift-master\"]"
+- include: ../../common/openshift-cluster/update_repos_and_packages.yml
-- include: ../openshift-node/config.yml
- vars:
- oo_host_group_exp: "groups[\"tag_env-host-type-{{ cluster_id }}-openshift-node\"]"
+- include: config.yml
diff --git a/playbooks/gce/openshift-cluster/vars.yml b/playbooks/gce/openshift-cluster/vars.yml
index ed97d539c..ae33083b9 100644
--- a/playbooks/gce/openshift-cluster/vars.yml
+++ b/playbooks/gce/openshift-cluster/vars.yml
@@ -1 +1,15 @@
---
+deployment_vars:
+ origin:
+ image: centos-7
+ ssh_user:
+ sudo: yes
+ online:
+ image: libra-rhel7
+ ssh_user: root
+ sudo: no
+ enterprise:
+ image: rhel-7
+ ssh_user:
+ sudo: yes
+
diff --git a/playbooks/gce/openshift-master/config.yml b/playbooks/gce/openshift-master/config.yml
index 857da0763..af6000bc8 100644
--- a/playbooks/gce/openshift-master/config.yml
+++ b/playbooks/gce/openshift-master/config.yml
@@ -1,20 +1,18 @@
---
-- name: master/config.yml, populate oo_masters_to_config host group if needed
+- name: Populate oo_masters_to_config host group
hosts: localhost
gather_facts: no
tasks:
- - name: "Evaluate oo_host_group_exp if it's set"
- add_host: "name={{ item }} groups=oo_masters_to_config"
- with_items: "{{ oo_host_group_exp | default('') }}"
- when: oo_host_group_exp is defined
+ - name: Evaluate oo_masters_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_masters_to_config
+ ansible_ssh_user: root
+ with_items: oo_host_group_exp | default([])
-- name: "Configure instances"
- hosts: oo_masters_to_config
+- include: ../../common/openshift-master/config.yml
vars:
+ openshift_cluster_id: "{{ cluster_id }}"
+ openshift_debug_level: 4
+ openshift_deployment_type: "{{ deployment_type }}"
openshift_hostname: "{{ gce_private_ip }}"
- vars_files:
- - vars.yml
- roles:
- - openshift_master
- - pods
- - os_env_extras
diff --git a/playbooks/gce/openshift-master/launch.yml b/playbooks/gce/openshift-master/launch.yml
index 287596002..ef10b6cf0 100644
--- a/playbooks/gce/openshift-master/launch.yml
+++ b/playbooks/gce/openshift-master/launch.yml
@@ -8,14 +8,12 @@
connection: local
gather_facts: no
+# TODO: modify image based on deployment_type
vars:
inst_names: "{{ oo_new_inst_names }}"
machine_type: n1-standard-1
image: libra-rhel7
- vars_files:
- - vars.yml
-
tasks:
- name: Launch instances
gce:
@@ -37,7 +35,7 @@
with_items: gce.instance_data
- name: Wait for ssh
- wait_for: "port=22 host={{ item.public_ip }}"
+ wait_for: port=22 host={{ item.public_ip }}
with_items: gce.instance_data
- name: Wait for root user setup
diff --git a/playbooks/gce/openshift-master/terminate.yml b/playbooks/gce/openshift-master/terminate.yml
index 8319774f8..452ac5199 100644
--- a/playbooks/gce/openshift-master/terminate.yml
+++ b/playbooks/gce/openshift-master/terminate.yml
@@ -3,10 +3,9 @@
hosts: localhost
gather_facts: no
tasks:
- - name: Evaluate oo_host_group_exp if it's set
- add_host: "name={{ item }} groups=oo_masters_to_terminate"
- with_items: "{{ oo_host_group_exp | default('') }}"
- when: oo_host_group_exp is defined
+ - name: Evaluate oo_masters_to_terminate
+ add_host: name={{ item }} groups=oo_masters_to_terminate
+ with_items: oo_host_group_exp | default([])
- name: Terminate master instances
hosts: localhost
@@ -22,6 +21,7 @@
instance_names: "{{ groups['oo_masters_to_terminate'] }}"
disks: "{{ groups['oo_masters_to_terminate'] }}"
register: gce
+ when: "'oo_masters_to_terminate' in groups"
- name: Remove disks of instances
gce_pd:
@@ -32,5 +32,4 @@
zone: "{{ gce.zone }}"
state: absent
with_items: gce.instance_names
-
-
+ when: "'oo_masters_to_terminate' in groups"
diff --git a/playbooks/gce/openshift-master/vars.yml b/playbooks/gce/openshift-master/vars.yml
deleted file mode 100644
index c196b2fca..000000000
--- a/playbooks/gce/openshift-master/vars.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-openshift_debug_level: 4
-openshift_cluster_id: "{{ cluster_id }}"
diff --git a/playbooks/gce/openshift-node/config.yml b/playbooks/gce/openshift-node/config.yml
index 771cc3a94..5b1601176 100644
--- a/playbooks/gce/openshift-node/config.yml
+++ b/playbooks/gce/openshift-node/config.yml
@@ -1,100 +1,24 @@
---
-- name: node/config.yml, populate oo_nodes_to_config host group if needed
+- name: Populate oo_nodes_to_config and oo_first_master host groups
hosts: localhost
gather_facts: no
tasks:
- - name: Evaluate oo_host_group_exp
- add_host: "name={{ item }} groups=oo_nodes_to_config"
- with_items: "{{ oo_host_group_exp | default('') }}"
- when: oo_host_group_exp is defined
- - add_host:
+ - name: Evaluate oo_nodes_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_nodes_to_config
+ ansible_ssh_user: root
+ with_items: oo_host_group_exp | default([])
+ - name: Evaluate oo_first_master
+ add_host:
name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}"
groups: oo_first_master
- when: oo_host_group_exp is defined
+ ansible_ssh_user: root
-- name: Gather and set facts for hosts to configure
- hosts: oo_nodes_to_config
- roles:
- - openshift_facts
- tasks:
- # Since the master is registering the nodes before they are configured, we
- # need to make sure to set the node properties beforehand if we do not want
- # the defaults
- - openshift_facts:
- role: "{{ item.role }}"
- local_facts: "{{ item.local_facts }}"
- with_items:
- - role: common
- local_facts:
- hostname: "{{ gce_private_ip }}"
- - role: node
- local_facts:
- external_id: "{{ openshift_node_external_id | default(None) }}"
- resources_cpu: "{{ openshfit_node_resources_cpu | default(None) }}"
- resources_memory: "{{ openshfit_node_resources_memory | default(None) }}"
- pod_cidr: "{{ openshfit_node_pod_cidr | default(None) }}"
- labels: "{{ openshfit_node_labels | default(None) }}"
- annotations: "{{ openshfit_node_annotations | default(None) }}"
-
-
-- name: Register nodes
- hosts: oo_first_master
- vars:
- openshift_nodes: "{{ hostvars
- | oo_select_keys(groups['oo_nodes_to_config']) }}"
- roles:
- - openshift_register_nodes
- tasks:
- - name: Create local temp directory for syncing certs
- local_action: command /usr/bin/mktemp -d /tmp/openshift-ansible-XXXXXXX
- register: mktemp
-
- - name: Sync master certs to localhost
- synchronize:
- mode: pull
- checksum: yes
- src: /var/lib/openshift/openshift.local.certificates
- dest: "{{ mktemp.stdout }}"
-
-- name: Configure instances
- hosts: oo_nodes_to_config
- vars_files:
- - vars.yml
+- include: ../../common/openshift-node/config.yml
vars:
- sync_tmpdir: "{{ hostvars[groups['oo_first_master'][0]].mktemp.stdout }}"
- cert_parent_rel_path: openshift.local.certificates
- cert_rel_path: "{{ cert_parent_rel_path }}/node-{{ openshift.common.hostname }}"
- cert_base_path: /var/lib/openshift
- cert_parent_path: "{{ cert_base_path }}/{{ cert_parent_rel_path }}"
- cert_path: "{{ cert_base_path }}/{{ cert_rel_path }}"
- pre_tasks:
- - name: Ensure certificate directories exists
- file:
- path: "{{ item }}"
- state: directory
- with_items:
- - "{{ cert_path }}"
- - "{{ cert_parent_path }}/ca"
-
- # TODO: notify restart openshift-node and/or restart openshift-sdn-node,
- # possibly test service started time against certificate/config file
- # timestamps in openshift-node or openshift-sdn-node to trigger notify
- - name: Sync certs to nodes
- synchronize:
- checksum: yes
- src: "{{ item.src }}"
- dest: "{{ item.dest }}"
- owner: no
- group: no
- with_items:
- - src: "{{ sync_tmpdir }}/{{ cert_rel_path }}"
- dest: "{{ cert_parent_path }}"
- - src: "{{ sync_tmpdir }}/{{ cert_parent_rel_path }}/ca/cert.crt"
- dest: "{{ cert_parent_path }}/ca/cert.crt"
- - local_action: file name={{ sync_tmpdir }} state=absent
- run_once: true
- roles:
- - openshift_node
- - os_env_extras
- - os_env_extras_node
+ openshift_cluster_id: "{{ cluster_id }}"
+ openshift_debug_level: 4
+ openshift_deployment_type: "{{ deployment_type }}"
+ openshift_hostname: "{{ gce_private_ip }}"
diff --git a/playbooks/gce/openshift-node/launch.yml b/playbooks/gce/openshift-node/launch.yml
index 73d0478ab..086ba58bc 100644
--- a/playbooks/gce/openshift-node/launch.yml
+++ b/playbooks/gce/openshift-node/launch.yml
@@ -8,14 +8,12 @@
connection: local
gather_facts: no
+# TODO: modify image based on deployment_type
vars:
inst_names: "{{ oo_new_inst_names }}"
machine_type: n1-standard-1
image: libra-rhel7
- vars_files:
- - vars.yml
-
tasks:
- name: Launch instances
gce:
@@ -37,7 +35,7 @@
with_items: gce.instance_data
- name: Wait for ssh
- wait_for: "port=22 host={{ item.public_ip }}"
+ wait_for: port=22 host={{ item.public_ip }}
with_items: gce.instance_data
- name: Wait for root user setup
diff --git a/playbooks/gce/openshift-node/terminate.yml b/playbooks/gce/openshift-node/terminate.yml
index 7d71dfcab..357e0c295 100644
--- a/playbooks/gce/openshift-node/terminate.yml
+++ b/playbooks/gce/openshift-node/terminate.yml
@@ -3,10 +3,9 @@
hosts: localhost
gather_facts: no
tasks:
- - name: Evaluate oo_host_group_exp if it's set
- add_host: "name={{ item }} groups=oo_nodes_to_terminate"
- with_items: "{{ oo_host_group_exp | default('') }}"
- when: oo_host_group_exp is defined
+ - name: Evaluate oo_nodes_to_terminate
+ add_host: name={{ item }} groups=oo_nodes_to_terminate
+ with_items: oo_host_group_exp | default([])
- name: Terminate node instances
hosts: localhost
@@ -22,6 +21,7 @@
instance_names: "{{ groups['oo_nodes_to_terminate'] }}"
disks: "{{ groups['oo_nodes_to_terminate'] }}"
register: gce
+ when: "'oo_nodes_to_terminate' in groups"
- name: Remove disks of instances
gce_pd:
@@ -32,5 +32,4 @@
zone: "{{ gce.zone }}"
state: absent
with_items: gce.instance_names
-
-
+ when: "'oo_nodes_to_terminate' in groups"
diff --git a/playbooks/gce/openshift-node/vars.yml b/playbooks/gce/openshift-node/vars.yml
deleted file mode 100644
index c196b2fca..000000000
--- a/playbooks/gce/openshift-node/vars.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-openshift_debug_level: 4
-openshift_cluster_id: "{{ cluster_id }}"
diff --git a/playbooks/libvirt/openshift-cluster/config.yml b/playbooks/libvirt/openshift-cluster/config.yml
new file mode 100644
index 000000000..faf278b10
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/config.yml
@@ -0,0 +1,38 @@
+---
+# TODO: need to figure out a plan for setting hostname, currently the default
+# is localhost, so no hostname value (or public_hostname) value is getting
+# assigned
+
+- name: Populate oo_masters_to_config host group
+ hosts: localhost
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
+ - name: Evaluate oo_masters_to_config
+ add_host:
+ name: "{{ item }}"
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ groups: oo_masters_to_config
+ with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | default([])
+ - name: Evaluate oo_nodes_to_config
+ add_host:
+ name: "{{ item }}"
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ groups: oo_nodes_to_config
+ with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-node"] | default([])
+ - name: Evaluate oo_first_master
+ add_host:
+ name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}"
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ groups: oo_first_master
+ when: "'tag_env-host-type-{{ cluster_id }}-openshift-master' in groups"
+
+- include: ../../common/openshift-cluster/config.yml
+ vars:
+ openshift_cluster_id: "{{ cluster_id }}"
+ openshift_debug_level: 4
+ openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/libvirt/openshift-cluster/launch.yml b/playbooks/libvirt/openshift-cluster/launch.yml
index 6f2df33af..a7ddc1e7e 100644
--- a/playbooks/libvirt/openshift-cluster/launch.yml
+++ b/playbooks/libvirt/openshift-cluster/launch.yml
@@ -1,65 +1,36 @@
+---
- name: Launch instance(s)
hosts: localhost
- connection: local
gather_facts: no
-
- vars:
- libvirt_storage_pool_path: "{{ lookup('env','HOME') }}/libvirt-storage-pool-openshift"
- libvirt_storage_pool: 'openshift'
- libvirt_uri: 'qemu:///system'
-
vars_files:
- - vars.yml
-
+ - vars.yml
+ vars:
+ os_libvirt_storage_pool: "{{ libvirt_storage_pool | default('images') }}"
+ os_libvirt_storage_pool_path: "{{ libvirt_storage_pool_path | default('/var/lib/libvirt/images') }}"
+ os_libvirt_network: "{{ libvirt_network | default('default') }}"
+ image_url: "{{ deployment_vars[deployment_type].image.url }}"
+ image_sha256: "{{ deployment_vars[deployment_type].image.sha256 }}"
+ image_name: "{{ deployment_vars[deployment_type].image.name }}"
tasks:
- - set_fact:
- k8s_type: master
-
- - name: Generate master instance name(s)
- set_fact:
- scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format( 1048576 | random ) }}"
- register: master_names_output
- with_sequence: start=1 end='{{ num_masters }}'
+ - fail: msg="Deployment type not supported for libvirt provider yet"
+ when: deployment_type in ['online', 'enterprise']
- - set_fact:
- master_names: "{{ master_names_output.results | oo_collect('ansible_facts') | oo_collect('scratch_name') }}"
+ - include: tasks/configure_libvirt.yml
- - include: launch_instances.yml
- vars:
- instances: '{{ master_names }}'
- cluster: '{{ cluster_id }}'
- type: '{{ k8s_type }}'
- group_name: 'tag_env-host-type-{{ cluster_id }}-openshift-master'
+ - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml
+ - include: tasks/launch_instances.yml
+ vars:
+ instances: "{{ master_names }}"
+ cluster: "{{ cluster_id }}"
+ type: "{{ k8s_type }}"
- - set_fact:
- k8s_type: node
+ - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+ - include: tasks/launch_instances.yml
+ vars:
+ instances: "{{ node_names }}"
+ cluster: "{{ cluster_id }}"
+ type: "{{ k8s_type }}"
- - name: Generate node instance name(s)
- set_fact:
- scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format( 1048576 | random ) }}"
- register: node_names_output
- with_sequence: start=1 end='{{ num_nodes }}'
+- include: update.yml
- - set_fact:
- node_names: "{{ node_names_output.results | oo_collect('ansible_facts') | oo_collect('scratch_name') }}"
-
- - include: launch_instances.yml
- vars:
- instances: '{{ node_names }}'
- cluster: '{{ cluster_id }}'
- type: '{{ k8s_type }}'
-
-- hosts: 'tag_env-{{ cluster_id }}'
- roles:
- - openshift_repos
- - os_update_latest
-
-- include: ../openshift-master/config.yml
- vars:
- oo_host_group_exp: 'groups["tag_env-host-type-{{ cluster_id }}-openshift-master"]'
- oo_env: '{{ cluster_id }}'
-
-- include: ../openshift-node/config.yml
- vars:
- oo_host_group_exp: 'groups["tag_env-host-type-{{ cluster_id }}-openshift-node"]'
- oo_env: '{{ cluster_id }}'
+- include: list.yml
diff --git a/playbooks/libvirt/openshift-cluster/list.yml b/playbooks/libvirt/openshift-cluster/list.yml
index 6bf07e3c6..eaedc4d0d 100644
--- a/playbooks/libvirt/openshift-cluster/list.yml
+++ b/playbooks/libvirt/openshift-cluster/list.yml
@@ -1,43 +1,23 @@
+---
- name: Generate oo_list_hosts group
hosts: localhost
- connection: local
gather_facts: no
-
- vars:
- libvirt_uri: 'qemu:///system'
-
+ vars_files:
+ - vars.yml
tasks:
- - name: List VMs
- virt:
- command: list_vms
- register: list_vms
-
- - name: Collect MAC addresses of the VMs
- shell: 'virsh -c {{ libvirt_uri }} dumpxml {{ item }} | xmllint --xpath "string(//domain/devices/interface/mac/@address)" -'
- register: scratch_mac
- with_items: '{{ list_vms.list_vms }}'
- when: item|truncate(cluster_id|length+1, True) == '{{ cluster_id }}-...'
-
- - name: Collect IP addresses of the VMs
- shell: "awk '/{{ item.stdout }}/ {print $1}' /proc/net/arp"
- register: scratch_ip
- with_items: '{{ scratch_mac.results }}'
- when: item.skipped is not defined
-
- - name: Add hosts
- add_host:
- hostname: '{{ item[0] }}'
- ansible_ssh_host: '{{ item[1].stdout }}'
- ansible_ssh_user: root
- groups: oo_list_hosts
- with_together:
- - '{{ list_vms.list_vms }}'
- - '{{ scratch_ip.results }}'
- when: item[1].skipped is not defined
+ - set_fact: scratch_group=tag_env-{{ cluster_id }}
+ when: cluster_id != ''
+ - set_fact: scratch_group=all
+ when: cluster_id == ''
+ - add_host:
+ name: "{{ item }}"
+ groups: oo_list_hosts
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups[scratch_group] | default([]) | difference(['localhost'])
- name: List Hosts
hosts: oo_list_hosts
-
tasks:
- - debug:
- msg: 'public:{{ansible_default_ipv4.address}} private:{{ansible_default_ipv4.address}}'
+ - debug:
+ msg: 'public:{{ansible_default_ipv4.address}} private:{{ansible_default_ipv4.address}}'
diff --git a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt.yml b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt.yml
new file mode 100644
index 000000000..f237c1a60
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt.yml
@@ -0,0 +1,6 @@
+---
+- include: configure_libvirt_storage_pool.yml
+ when: libvirt_storage_pool is defined and libvirt_storage_pool_path is defined
+
+- include: configure_libvirt_network.yml
+ when: libvirt_network is defined
diff --git a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml
new file mode 100644
index 000000000..3117d9edc
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml
@@ -0,0 +1,27 @@
+---
+- name: Test if libvirt network for openshift already exists
+ command: "virsh -c {{ libvirt_uri }} net-info {{ libvirt_network }}"
+ register: net_info_result
+ changed_when: False
+ failed_when: "net_info_result.rc != 0 and 'no network with matching name' not in net_info_result.stderr"
+
+- name: Create a temp directory for the template xml file
+ command: "mktemp -d /tmp/openshift-ansible-XXXXXXX"
+ register: mktemp
+ when: net_info_result.rc == 1
+
+- name: Create network xml file
+ template:
+ src: templates/network.xml
+ dest: "{{ mktemp.stdout }}/network.xml"
+ when: net_info_result.rc == 1
+
+- name: Create libvirt network for openshift
+ command: "virsh -c {{ libvirt_uri }} net-create {{ mktemp.stdout }}/network.xml"
+ when: net_info_result.rc == 1
+
+- name: Remove the temp directory
+ file:
+ path: "{{ mktemp.stdout }}"
+ state: absent
+ when: net_info_result.rc == 1
diff --git a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml
new file mode 100644
index 000000000..8a67d713f
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml
@@ -0,0 +1,23 @@
+---
+- name: Create libvirt storage directory for openshift
+ file:
+ dest: "{{ libvirt_storage_pool_path }}"
+ state: directory
+
+- acl:
+ default: yes
+ entity: kvm
+ etype: group
+ name: "{{ libvirt_storage_pool_path }}"
+ permissions: rwx
+ state: present
+
+- name: Test if libvirt storage pool for openshift already exists
+ command: "virsh -c {{ libvirt_uri }} pool-info {{ libvirt_storage_pool }}"
+ register: pool_info_result
+ changed_when: False
+ failed_when: "pool_info_result.rc != 0 and 'no storage pool with matching name' not in pool_info_result.stderr"
+
+- name: Create the libvirt storage pool for openshift
+ command: 'virsh -c {{ libvirt_uri }} pool-create-as {{ libvirt_storage_pool }} dir --target {{ libvirt_storage_pool_path }}'
+ when: pool_info_result.rc == 1
diff --git a/playbooks/libvirt/openshift-cluster/launch_instances.yml b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
index 3bbcae981..359d0b2f3 100644
--- a/playbooks/libvirt/openshift-cluster/launch_instances.yml
+++ b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
@@ -1,45 +1,47 @@
-- name: Create the libvirt storage directory for openshift
- file:
- dest: '{{ libvirt_storage_pool_path }}'
- state: directory
+---
+# TODO: Add support for choosing base image based on deployment_type and os
+# wanted (os wanted needs support added in bin/cluster with sane defaults:
+# fedora/centos for origin, rhel for online/enterprise)
+
+# TODO: create a role to encapsulate some of this complexity, possibly also
+# create a module to manage the storage tasks, network tasks, and possibly
+# even handle the libvirt tasks to set metadata in the domain xml and be able
+# to create/query data about vms without having to use xml the python libvirt
+# bindings look like a good candidate for this
- name: Download Base Cloud image
get_url:
- url: '{{ base_image_url }}'
- sha256sum: '{{ base_image_sha256 }}'
- dest: '{{ libvirt_storage_pool_path }}/{{ base_image_name }}'
+ url: '{{ image_url }}'
+ sha256sum: '{{ image_sha256 }}'
+ dest: '{{ os_libvirt_storage_pool_path }}/{{ image_name }}'
- name: Create the cloud-init config drive path
file:
- dest: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/openstack/latest'
+ dest: '{{ os_libvirt_storage_pool_path }}/{{ item }}_configdrive/'
state: directory
- with_items: '{{ instances }}'
+ with_items: instances
- name: Create the cloud-init config drive files
template:
src: '{{ item[1] }}'
- dest: '{{ libvirt_storage_pool_path }}/{{ item[0] }}_configdrive/openstack/latest/{{ item[1] }}'
+ dest: '{{ os_libvirt_storage_pool_path }}/{{ item[0] }}_configdrive/{{ item[1] }}'
with_nested:
- - '{{ instances }}'
+ - instances
- [ user-data, meta-data ]
- name: Create the cloud-init config drive
- command: 'genisoimage -output {{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso -volid cidata -joliet -rock user-data meta-data'
+ command: 'genisoimage -output {{ os_libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso -volid cidata -joliet -rock user-data meta-data'
args:
- chdir: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/openstack/latest'
- creates: '{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'
- with_items: '{{ instances }}'
-
-- name: Create the libvirt storage pool for openshift
- command: 'virsh -c {{ libvirt_uri }} pool-create-as {{ libvirt_storage_pool }} dir --target {{ libvirt_storage_pool_path }}'
- ignore_errors: yes
+ chdir: '{{ os_libvirt_storage_pool_path }}/{{ item }}_configdrive/'
+ creates: '{{ os_libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'
+ with_items: instances
- name: Refresh the libvirt storage pool for openshift
command: 'virsh -c {{ libvirt_uri }} pool-refresh {{ libvirt_storage_pool }}'
- name: Create VMs drives
- command: 'virsh -c {{ libvirt_uri }} vol-create-as {{ libvirt_storage_pool }} {{ item }}.qcow2 10G --format qcow2 --backing-vol {{ base_image_name }} --backing-vol-format qcow2'
- with_items: '{{ instances }}'
+ command: 'virsh -c {{ libvirt_uri }} vol-create-as {{ os_libvirt_storage_pool }} {{ item }}.qcow2 10G --format qcow2 --backing-vol {{ image_name }} --backing-vol-format qcow2'
+ with_items: instances
- name: Create VMs
virt:
@@ -47,19 +49,19 @@
command: define
xml: "{{ lookup('template', '../templates/domain.xml') }}"
uri: '{{ libvirt_uri }}'
- with_items: '{{ instances }}'
+ with_items: instances
- name: Start VMs
virt:
name: '{{ item }}'
state: running
uri: '{{ libvirt_uri }}'
- with_items: '{{ instances }}'
+ with_items: instances
- name: Collect MAC addresses of the VMs
shell: 'virsh -c {{ libvirt_uri }} dumpxml {{ item }} | xmllint --xpath "string(//domain/devices/interface/mac/@address)" -'
register: scratch_mac
- with_items: '{{ instances }}'
+ with_items: instances
- name: Wait for the VMs to get an IP
command: "egrep -c '{{ scratch_mac.results | oo_collect('stdout') | join('|') }}' /proc/net/arp"
@@ -72,7 +74,7 @@
- name: Collect IP addresses of the VMs
shell: "awk '/{{ item.stdout }}/ {print $1}' /proc/net/arp"
register: scratch_ip
- with_items: '{{ scratch_mac.results }}'
+ with_items: scratch_mac.results
- set_fact:
ips: "{{ scratch_ip.results | oo_collect('stdout') }}"
@@ -81,7 +83,8 @@
add_host:
hostname: '{{ item.0 }}'
ansible_ssh_host: '{{ item.1 }}'
- ansible_ssh_user: root
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
groups: 'tag_env-{{ cluster }}, tag_host-type-{{ type }}, tag_env-host-type-{{ cluster }}-openshift-{{ type }}'
with_together:
- instances
@@ -93,10 +96,12 @@
port: 22
with_items: ips
-- name: Wait for root user setup
- command: 'ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item }} echo root user is setup'
+- name: Wait for openshift user setup
+ command: 'ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null openshift@{{ item.1 }} echo openshift user is setup'
register: result
until: result.rc == 0
retries: 30
delay: 1
- with_items: ips
+ with_together:
+ - instances
+ - ips
diff --git a/playbooks/libvirt/templates/domain.xml b/playbooks/libvirt/openshift-cluster/templates/domain.xml
index da037d138..df200e374 100644
--- a/playbooks/libvirt/templates/domain.xml
+++ b/playbooks/libvirt/openshift-cluster/templates/domain.xml
@@ -1,6 +1,13 @@
<domain type='kvm' id='8'>
<name>{{ item }}</name>
<memory unit='GiB'>1</memory>
+ <metadata xmlns:ansible="https://github.com/ansible/ansible">
+ <ansible:tags>
+ <ansible:tag>env-{{ cluster }}</ansible:tag>
+ <ansible:tag>env-host-type-{{ cluster }}-openshift-{{ type }}</ansible:tag>
+ <ansible:tag>host-type-{{ type }}</ansible:tag>
+ </ansible:tags>
+ </metadata>
<currentMemory unit='GiB'>1</currentMemory>
<vcpu placement='static'>2</vcpu>
<os>
@@ -24,18 +31,18 @@
<emulator>/usr/bin/qemu-system-x86_64</emulator>
<disk type='file' device='disk'>
<driver name='qemu' type='qcow2'/>
- <source file='{{ libvirt_storage_pool_path }}/{{ item }}.qcow2'/>
+ <source file='{{ os_libvirt_storage_pool_path }}/{{ item }}.qcow2'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='file' device='cdrom'>
<driver name='qemu' type='raw'/>
- <source file='{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'/>
+ <source file='{{ os_libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'/>
<target dev='vdb' bus='virtio'/>
<readonly/>
</disk>
<controller type='usb' index='0' />
<interface type='network'>
- <source network='default'/>
+ <source network='{{ os_libvirt_network }}'/>
<model type='virtio'/>
</interface>
<serial type='pty'>
@@ -49,7 +56,6 @@
</channel>
<input type='tablet' bus='usb' />
<input type='mouse' bus='ps2'/>
- <input type='keyboard' bus='ps2'/>
<graphics type='spice' autoport='yes' />
<video>
<model type='qxl' ram='65536' vram='65536' vgamem='16384' heads='1'/>
diff --git a/playbooks/libvirt/openshift-cluster/templates/meta-data b/playbooks/libvirt/openshift-cluster/templates/meta-data
new file mode 100644
index 000000000..6b421770d
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/templates/meta-data
@@ -0,0 +1,3 @@
+instance-id: {{ item[0] }}
+hostname: {{ item[0] }}
+local-hostname: {{ item[0] }}.example.com
diff --git a/playbooks/libvirt/openshift-cluster/templates/network.xml b/playbooks/libvirt/openshift-cluster/templates/network.xml
new file mode 100644
index 000000000..86dcd62bb
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/templates/network.xml
@@ -0,0 +1,23 @@
+<network>
+ <name>openshift-ansible</name>
+ <forward mode='nat'>
+ <nat>
+ <port start='1024' end='65535'/>
+ </nat>
+ </forward>
+ <!-- TODO: query for first available virbr interface available -->
+ <bridge name='virbr3' stp='on' delay='0'/>
+ <!-- TODO: make overridable -->
+ <domain name='example.com'/>
+ <dns>
+ <!-- TODO: automatically add host entries -->
+ </dns>
+ <!-- TODO: query for available address space -->
+ <ip address='192.168.55.1' netmask='255.255.255.0'>
+ <dhcp>
+ <range start='192.168.55.2' end='192.168.55.254'/>
+ <!-- TODO: add static entries addresses for the hosts to be created -->
+ </dhcp>
+ </ip>
+</network>
+
diff --git a/playbooks/libvirt/openshift-cluster/templates/user-data b/playbooks/libvirt/openshift-cluster/templates/user-data
new file mode 100644
index 000000000..77b788109
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/templates/user-data
@@ -0,0 +1,23 @@
+#cloud-config
+disable_root: true
+
+hostname: {{ item[0] }}
+fqdn: {{ item[0] }}.example.com
+manage_etc_hosts: true
+
+users:
+ - default
+ - name: root
+ ssh_authorized_keys:
+ - {{ lookup('file', '~/.ssh/id_rsa.pub') }}
+
+system_info:
+ default_user:
+ name: openshift
+ sudo: ["ALL=(ALL) NOPASSWD: ALL"]
+
+ssh_authorized_keys:
+ - {{ lookup('file', '~/.ssh/id_rsa.pub') }}
+
+bootcmd:
+ - NETWORK_CONFIG=/etc/sysconfig/network-scripts/ifcfg-eth0; if ! grep DHCP_HOSTNAME ${NETWORK_CONFIG}; then echo 'DHCP_HOSTNAME="{{ item[0] }}.example.com"' >> ${NETWORK_CONFIG}; fi; pkill -9 dhclient; service network restart
diff --git a/playbooks/libvirt/openshift-cluster/terminate.yml b/playbooks/libvirt/openshift-cluster/terminate.yml
index c609169d3..b173a09dd 100644
--- a/playbooks/libvirt/openshift-cluster/terminate.yml
+++ b/playbooks/libvirt/openshift-cluster/terminate.yml
@@ -1,41 +1,44 @@
+---
+# TODO: does not handle a non-existant cluster gracefully
+
- name: Terminate instance(s)
hosts: localhost
- connection: local
gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
+ - set_fact: cluster_group=tag_env-{{ cluster_id }}
+ - add_host:
+ name: "{{ item }}"
+ groups: oo_hosts_to_terminate
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups[cluster_group] | default([])
- vars:
- libvirt_storage_pool_path: "{{ lookup('env','HOME') }}/libvirt-storage-pool-openshift"
- libvirt_storage_pool: 'openshift'
- libvirt_uri: 'qemu:///system'
+ - name: Destroy VMs
+ virt:
+ name: '{{ item[0] }}'
+ command: '{{ item[1] }}'
+ uri: '{{ libvirt_uri }}'
+ with_nested:
+ - groups['oo_hosts_to_terminate']
+ - [ destroy, undefine ]
- tasks:
- - name: List VMs
- virt:
- command: list_vms
- register: list_vms
+ - name: Delete VMs drives
+ command: 'virsh -c {{ libvirt_uri }} vol-delete --pool {{ libvirt_storage_pool }} {{ item }}.qcow2'
+ args:
+ removes: '{{ libvirt_storage_pool_path }}/{{ item }}.qcow2'
+ with_items: groups['oo_hosts_to_terminate']
- - name: Destroy VMs
- virt:
- name: '{{ item[0] }}'
- command: '{{ item[1] }}'
- uri: '{{ libvirt_uri }}'
- with_nested:
- - '{{ list_vms.list_vms }}'
- - [ destroy, undefine ]
- when: item[0]|truncate(cluster_id|length+1, True) == '{{ cluster_id }}-...'
+ - name: Delete the VM cloud-init image
+ file:
+ path: '{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'
+ state: absent
+ with_items: groups['oo_hosts_to_terminate']
- - name: Delete VMs config drive
- file:
- path: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/openstack'
- state: absent
- with_items: '{{ list_vms.list_vms }}'
- when: item|truncate(cluster_id|length+1, True) == '{{ cluster_id }}-...'
+ - name: Remove the cloud-init config directory
+ file:
+ path: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/'
+ state: absent
+ with_items: groups['oo_hosts_to_terminate']
- - name: Delete VMs drives
- command: 'virsh -c {{ libvirt_uri }} vol-delete --pool {{ libvirt_storage_pool }} {{ item[0] }}{{ item[1] }}'
- args:
- removes: '{{ libvirt_storage_pool_path }}/{{ item[0] }}{{ item[1] }}'
- with_nested:
- - '{{ list_vms.list_vms }}'
- - [ '_configdrive', '_cloud-init.iso', '.qcow2' ]
- when: item[0]|truncate(cluster_id|length+1, True) == '{{ cluster_id }}-...'
diff --git a/playbooks/libvirt/openshift-cluster/update.yml b/playbooks/libvirt/openshift-cluster/update.yml
new file mode 100644
index 000000000..57e36db9e
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/update.yml
@@ -0,0 +1,18 @@
+---
+- name: Populate oo_hosts_to_update group
+ hosts: localhost
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
+ - name: Evaluate oo_hosts_to_update
+ add_host:
+ name: "{{ item }}"
+ groups: oo_hosts_to_update
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | union(groups["tag_env-host-type-{{ cluster_id }}-openshift-node"]) | default([])
+
+- include: ../../common/openshift-cluster/update_repos_and_packages.yml
+
+- include: config.yml
diff --git a/playbooks/libvirt/openshift-cluster/vars.yml b/playbooks/libvirt/openshift-cluster/vars.yml
index 4e4eecd46..65d954fee 100644
--- a/playbooks/libvirt/openshift-cluster/vars.yml
+++ b/playbooks/libvirt/openshift-cluster/vars.yml
@@ -1,7 +1,33 @@
-# base_image_url: http://download.fedoraproject.org/pub/fedora/linux/releases/21/Cloud/Images/x86_64/Fedora-Cloud-Base-20141203-21.x86_64.qcow2
-# base_image_name: Fedora-Cloud-Base-20141203-21.x86_64.qcow2
-# base_image_sha256: 3a99bb89f33e3d4ee826c8160053cdb8a72c80cd23350b776ce73cd244467d86
+---
+libvirt_storage_pool_path: "{{ lookup('env','HOME') }}/libvirt-storage-pool-openshift-ansible"
+libvirt_storage_pool: 'openshift-ansible'
+libvirt_network: openshift-ansible
+libvirt_uri: 'qemu:///system'
-base_image_url: http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2
-base_image_name: CentOS-7-x86_64-GenericCloud.qcow2
-base_image_sha256: e324e3ab1d24a1bbf035ddb365e7f9058c0b454acf48d7aa15c5519fae5998ab
+deployment_vars:
+ origin:
+ image:
+ url: "http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2"
+ name: CentOS-7-x86_64-GenericCloud.qcow2
+ sha256: e324e3ab1d24a1bbf035ddb365e7f9058c0b454acf48d7aa15c5519fae5998ab
+ ssh_user: openshift
+ sudo: yes
+ online:
+ image:
+ url:
+ name:
+ sha256:
+ ssh_user: root
+ sudo: no
+ enterprise:
+ image:
+ url:
+ name:
+ sha256:
+ ssh_user: openshift
+ sudo: yes
+# origin:
+# fedora:
+# url: "http://download.fedoraproject.org/pub/fedora/linux/releases/21/Cloud/Images/x86_64/Fedora-Cloud-Base-20141203-21.x86_64.qcow2"
+# name: Fedora-Cloud-Base-20141203-21.x86_64.qcow2
+# sha256: 3a99bb89f33e3d4ee826c8160053cdb8a72c80cd23350b776ce73cd244467d86
diff --git a/playbooks/libvirt/openshift-master/config.yml b/playbooks/libvirt/openshift-master/config.yml
deleted file mode 100644
index dd95fd57f..000000000
--- a/playbooks/libvirt/openshift-master/config.yml
+++ /dev/null
@@ -1,21 +0,0 @@
-- name: master/config.yml, populate oo_masters_to_config host group if needed
- hosts: localhost
- gather_facts: no
- tasks:
- - name: "Evaluate oo_host_group_exp if it's set"
- add_host:
- name: '{{ item }}'
- groups: oo_masters_to_config
- with_items: "{{ oo_host_group_exp | default('') }}"
- when: oo_host_group_exp is defined
-
-- name: Configure instances
- hosts: oo_masters_to_config
- vars:
- openshift_hostname: '{{ ansible_default_ipv4.address }}'
- vars_files:
- - vars.yml
- roles:
- - openshift_master
- - pods
- - os_env_extras
diff --git a/playbooks/libvirt/openshift-master/vars.yml b/playbooks/libvirt/openshift-master/vars.yml
deleted file mode 100644
index ad0c0fbe2..000000000
--- a/playbooks/libvirt/openshift-master/vars.yml
+++ /dev/null
@@ -1 +0,0 @@
-openshift_debug_level: 4
diff --git a/playbooks/libvirt/openshift-node/config.yml b/playbooks/libvirt/openshift-node/config.yml
deleted file mode 100644
index 3244a8046..000000000
--- a/playbooks/libvirt/openshift-node/config.yml
+++ /dev/null
@@ -1,102 +0,0 @@
-- name: node/config.yml, populate oo_nodes_to_config host group if needed
- hosts: localhost
- gather_facts: no
- tasks:
- - name: "Evaluate oo_host_group_exp if it's set"
- add_host:
- name: '{{ item }}'
- groups: oo_nodes_to_config
- with_items: "{{ oo_host_group_exp | default('') }}"
- when: oo_host_group_exp is defined
-
- - add_host:
- name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}"
- groups: oo_first_master
- when: oo_host_group_exp is defined
-
-
-- name: Gather and set facts for hosts to configure
- hosts: oo_nodes_to_config
- roles:
- - openshift_facts
- tasks:
- # Since the master is registering the nodes before they are configured, we
- # need to make sure to set the node properties beforehand if we do not want
- # the defaults
- - openshift_facts:
- role: "{{ item.role }}"
- local_facts: "{{ item.local_facts }}"
- with_items:
- - role: common
- local_facts:
- hostname: "{{ ansible_default_ipv4.address }}"
- - role: node
- local_facts:
- external_id: "{{ openshift_node_external_id | default(None) }}"
- resources_cpu: "{{ openshfit_node_resources_cpu | default(None) }}"
- resources_memory: "{{ openshfit_node_resources_memory | default(None) }}"
- pod_cidr: "{{ openshfit_node_pod_cidr | default(None) }}"
- labels: "{{ openshfit_node_labels | default(None) }}"
- annotations: "{{ openshfit_node_annotations | default(None) }}"
-
-
-- name: Register nodes
- hosts: oo_first_master
- vars:
- openshift_nodes: "{{ hostvars
- | oo_select_keys(groups['oo_nodes_to_config']) }}"
- roles:
- - openshift_register_nodes
- tasks:
- - name: Create local temp directory for syncing certs
- local_action: command /usr/bin/mktemp -d /tmp/openshift-ansible-XXXXXXX
- register: mktemp
-
- - name: Sync master certs to localhost
- synchronize:
- mode: pull
- checksum: yes
- src: /var/lib/openshift/openshift.local.certificates
- dest: "{{ mktemp.stdout }}"
-
-- name: Configure instances
- hosts: oo_nodes_to_config
- vars_files:
- - vars.yml
- vars:
- sync_tmpdir: "{{ hostvars[groups['oo_first_master'][0]].mktemp.stdout }}"
- cert_parent_rel_path: openshift.local.certificates
- cert_rel_path: "{{ cert_parent_rel_path }}/node-{{ openshift.common.hostname }}"
- cert_base_path: /var/lib/openshift
- cert_parent_path: "{{ cert_base_path }}/{{ cert_parent_rel_path }}"
- cert_path: "{{ cert_base_path }}/{{ cert_rel_path }}"
- pre_tasks:
- - name: Ensure certificate directories exists
- file:
- path: "{{ item }}"
- state: directory
- with_items:
- - "{{ cert_path }}"
- - "{{ cert_parent_path }}/ca"
-
- # TODO: notify restart openshift-node and/or restart openshift-sdn-node,
- # possibly test service started time against certificate/config file
- # timestamps in openshift-node or openshift-sdn-node to trigger notify
- - name: Sync certs to nodes
- synchronize:
- checksum: yes
- src: "{{ item.src }}"
- dest: "{{ item.dest }}"
- owner: no
- group: no
- with_items:
- - src: "{{ sync_tmpdir }}/{{ cert_rel_path }}"
- dest: "{{ cert_parent_path }}"
- - src: "{{ sync_tmpdir }}/{{ cert_parent_rel_path }}/ca/cert.crt"
- dest: "{{ cert_parent_path }}/ca/cert.crt"
- - local_action: file name={{ sync_tmpdir }} state=absent
- run_once: true
- roles:
- - openshift_node
- - os_env_extras
- - os_env_extras_node
diff --git a/playbooks/libvirt/openshift-node/roles b/playbooks/libvirt/openshift-node/roles
deleted file mode 120000
index 20c4c58cf..000000000
--- a/playbooks/libvirt/openshift-node/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../roles \ No newline at end of file
diff --git a/playbooks/libvirt/openshift-node/vars.yml b/playbooks/libvirt/openshift-node/vars.yml
deleted file mode 100644
index ad0c0fbe2..000000000
--- a/playbooks/libvirt/openshift-node/vars.yml
+++ /dev/null
@@ -1 +0,0 @@
-openshift_debug_level: 4
diff --git a/playbooks/libvirt/templates/meta-data b/playbooks/libvirt/templates/meta-data
deleted file mode 100644
index 5d779519f..000000000
--- a/playbooks/libvirt/templates/meta-data
+++ /dev/null
@@ -1,2 +0,0 @@
-instance-id: {{ item[0] }}
-local-hostname: {{ item[0] }}
diff --git a/playbooks/libvirt/templates/user-data b/playbooks/libvirt/templates/user-data
deleted file mode 100644
index 985badc8e..000000000
--- a/playbooks/libvirt/templates/user-data
+++ /dev/null
@@ -1,10 +0,0 @@
-#cloud-config
-
-disable_root: 0
-
-system_info:
- default_user:
- name: root
-
-ssh_authorized_keys:
- - {{ lookup('file', '~/.ssh/id_rsa.pub') }}