From 7c7cb82fdd5583784fd5832b92886abf86934325 Mon Sep 17 00:00:00 2001 From: Jhon Honce Date: Fri, 6 Mar 2015 13:52:20 -0700 Subject: Use ansible playbook to initialize openshift cluster * Added playbooks/gce/openshift-cluster * Added bin/cluster (will replace cluster.sh) --- bin/cluster | 100 +++++++++++++++++++++ playbooks/gce/openshift-cluster/filter_plugins | 1 + playbooks/gce/openshift-cluster/launch.yml | 62 +++++++++++++ .../gce/openshift-cluster/launch_instances.yml | 37 ++++++++ playbooks/gce/openshift-cluster/terminate.yml | 25 ++++++ playbooks/gce/openshift-cluster/vars.yml | 1 + playbooks/gce/openshift-master/config.yml | 13 ++- playbooks/gce/openshift-master/terminate.yml | 2 +- playbooks/gce/openshift-node/config.yml | 12 ++- playbooks/gce/openshift-node/terminate.yml | 2 +- roles/docker/tasks/main.yml | 2 +- roles/openshift_common/tasks/main.yml | 3 + 12 files changed, 253 insertions(+), 7 deletions(-) create mode 100755 bin/cluster create mode 120000 playbooks/gce/openshift-cluster/filter_plugins create mode 100644 playbooks/gce/openshift-cluster/launch.yml create mode 100644 playbooks/gce/openshift-cluster/launch_instances.yml create mode 100644 playbooks/gce/openshift-cluster/terminate.yml create mode 100644 playbooks/gce/openshift-cluster/vars.yml diff --git a/bin/cluster b/bin/cluster new file mode 100755 index 000000000..7afdce0e5 --- /dev/null +++ b/bin/cluster @@ -0,0 +1,100 @@ +#!/usr/bin/env python +# vim: expandtab:tabstop=4:shiftwidth=4 + +import argparse +import ConfigParser +import sys +import os + + +class Cluster(object): + """Python wrapper to ensure environment is correct for running ansible playbooks + """ + + def __init__(self, args): + self.args = args + + # setup ansible ssh environment + if 'ANSIBLE_SSH_ARGS' not in os.environ: + os.environ['ANSIBLE_SSH_ARGS'] = ( + '-o ForwardAgent=yes' + '-o StrictHostKeyChecking=no' + '-o UserKnownHostsFile=/dev/null' + '-o ControlMaster=auto' + '-o ControlPersist=600s' + ) + + def apply(self): + # setup ansible playbook environment + config = ConfigParser.ConfigParser() + if 'gce' == self.args.provider: + config.readfp(open('inventory/gce/gce.ini')) + + for key in config.options('gce'): + os.environ[key] = config.get('gce', key) + + inventory = '-i inventory/gce/gce.py' + elif 'aws' == self.args.provider: + config.readfp(open('inventory/aws/ec2.ini')) + + for key in config.options('ec2'): + os.environ[key] = config.get('ec2', key) + + inventory = '-i inventory/aws/ec2.py' + else: + assert False, "invalid PROVIDER {}".format(self.args.provider) + + env = {'cluster_id': self.args.cluster_id} + + if 'create' == self.args.action: + playbook = "playbooks/{}/openshift-cluster/launch.yml".format(self.args.provider) + env['masters'] = self.args.masters + env['nodes'] = self.args.nodes + + elif 'terminate' == self.args.action: + playbook = "playbooks/{}/openshift-cluster/terminate.yml".format(self.args.provider) + elif 'list' == self.args.action: + # todo: implement cluster list + argparse.ArgumentError("ACTION {} not implemented".format(self.args.action)) + elif 'update' == self.args.action: + # todo: implement cluster update + argparse.ArgumentError("ACTION {} not implemented".format(self.args.action)) + else: + assert False, "invalid ACTION {}".format(self.args.action) + + verbose = '' + if self.args.verbose > 0: + verbose = '-{}'.format('v' * self.args.verbose) + + ansible_env = '-e \'{}\''.format( + ' '.join(['%s=%s' % (key, value) for (key, value) in env.items()]) + ) + + command = 'ansible-playbook {} {} {} {}'.format( + verbose, inventory, ansible_env, playbook + ) + + if self.args.verbose > 1: + command = 'time {}'.format(command) + + if self.args.verbose > 0: + sys.stderr.write('RUN [{}]\n'.format(command)) + sys.stderr.flush() + + os.system(command) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Manage OpenShift Cluster') + parser.add_argument('-p', '--provider', default='gce', choices=['gce', 'aws'], + help='One of the supported cloud providers') + parser.add_argument('-m', '--masters', default=1, type=int, help='number of masters to create in cluster') + parser.add_argument('-n', '--nodes', default=2, type=int, help='number of nodes to create in cluster') + parser.add_argument('-v', '--verbose', action='count', help='Multiple -v options increase the verbosity') + parser.add_argument('--version', action='version', version='%(prog)s 0.1') + parser.add_argument('action', choices=['create', 'terminate', 'update', 'list']) + parser.add_argument('provider', choices=['gce', 'aws']) + parser.add_argument('cluster_id', help='prefix for cluster VM names') + args = parser.parse_args() + + Cluster(args).apply() diff --git a/playbooks/gce/openshift-cluster/filter_plugins b/playbooks/gce/openshift-cluster/filter_plugins new file mode 120000 index 000000000..99a95e4ca --- /dev/null +++ b/playbooks/gce/openshift-cluster/filter_plugins @@ -0,0 +1 @@ +../../../filter_plugins \ No newline at end of file diff --git a/playbooks/gce/openshift-cluster/launch.yml b/playbooks/gce/openshift-cluster/launch.yml new file mode 100644 index 000000000..ba9d58a74 --- /dev/null +++ b/playbooks/gce/openshift-cluster/launch.yml @@ -0,0 +1,62 @@ +--- +- name: Launch instance(s) + hosts: localhost + connection: local + gather_facts: no + + vars_files: + - vars.yml + + tasks: + - set_fact: k8s_type="master" + + - name: "Generate master instance names(s)" + set_fact: scratch="{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' |format( 1048576 |random) }}" + register: instance_names_output + with_sequence: start=1 end={{ masters }} + + # These set_fact's cannot be combined + - set_fact: + instance_names_string: "{% for item in instance_names_output.results %}{{item.ansible_facts.scratch}} {% endfor %}" + + - set_fact: + master_names: "{{ instance_names_string.strip().split(' ') }}" + + - include: launch_instances.yml + vars: + instances: "{{ master_names }}" + cluster: "{{ cluster_id }}" + type: "{{ k8s_type }}" + group_name: "tag_env-host-type-{{ cluster_id }}-openshift-master" + + - set_fact: k8s_type="node" + + - name: "Generate node instance names(s)" + set_fact: scratch="{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' |format( 1048576 |random) }}" + register: instance_names_output + with_sequence: start=1 end={{ nodes }} + + # These set_fact's cannot be combined + - set_fact: + instance_names_string: "{% for item in instance_names_output.results %}{{item.ansible_facts.scratch}} {% endfor %}" + + - set_fact: + node_names: "{{ instance_names_string.strip().split(' ') }}" + + - include: launch_instances.yml + vars: + instances: "{{ node_names }}" + cluster: "{{ cluster_id }}" + type: "{{ k8s_type }}" + group_name: "tag_env-host-type-{{ cluster_id }}-openshift-node" + + +- include: ../openshift-master/config.yml + vars: + oo_host_group_exp: "{{ master_names }}" + oo_env: "{{ cluster_id }}" + +- include: ../openshift-node/config.yml + vars: + oo_host_group_exp: "{{ node_names }}" + oo_env: "{{ cluster_id }}" diff --git a/playbooks/gce/openshift-cluster/launch_instances.yml b/playbooks/gce/openshift-cluster/launch_instances.yml new file mode 100644 index 000000000..ff19b94d8 --- /dev/null +++ b/playbooks/gce/openshift-cluster/launch_instances.yml @@ -0,0 +1,37 @@ + +- set_fact: + machine_type: "{{ lookup('env', 'gce_machine_type') |default('n1-standard-1', true) }}" + machine_image: "{{ lookup('env', 'gce_machine_image') |default('libra-rhel7', true) }}" + +- name: Launch instance(s) + gce: + instance_names: "{{ instances }}" + machine_type: "{{ machine_type }}" + image: "{{ machine_image }}" + service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}" + pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}" + project_id: "{{ lookup('env', 'gce_project_id') }}" + tags: + - "created-by-{{ cluster }}" + - "env-{{ cluster }}" + - "host-type-{{ type }}" + - "env-host-type-{{ cluster }}-openshift-{{ type }}" + register: gce + +- name: Add new instances public IPs + add_host: "hostname={{ item.name }} ansible_ssh_host={{ item.public_ip }} groups={{ group_name }}" + with_items: gce.instance_data + +- name: Wait for ssh + wait_for: "port=22 host={{ item.public_ip }}" + with_items: gce.instance_data + +- debug: var=gce + +- name: Wait for root user setup + command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item.public_ip }} echo root user is setup" + register: result + until: result.rc == 0 + retries: 20 + delay: 10 + with_items: gce.instance_data diff --git a/playbooks/gce/openshift-cluster/terminate.yml b/playbooks/gce/openshift-cluster/terminate.yml new file mode 100644 index 000000000..eff52a807 --- /dev/null +++ b/playbooks/gce/openshift-cluster/terminate.yml @@ -0,0 +1,25 @@ +--- +- name: Terminate instance(s) + hosts: localhost + + vars_files: + - vars.yml + + tasks: + - debug: msg="Retrieve node names" + - debug: msg="Retrieve master names" + - debug: var=groups + +- include: ../openshift-node/terminate.yml + vars: + oo_host_group_exp: 'groups["tag_env-host-type-{{ cluster_id }}-openshift-node"]' + gce_service_account_email: "1043659492591-r0tpbf8q4fbb9dakhjfhj89e4m1ld83t@developer.gserviceaccount.com" + gce_pem_file: "~/.gce/openshift-gce-devel_priv_key.pem" + gce_project_id: "openshift-gce-devel" + +- include: ../openshift-master/terminate.yml + vars: + oo_host_group_exp: 'groups["tag_env-host-type-{{ cluster_id }}-openshift-master"]' + gce_service_account_email: "1043659492591-r0tpbf8q4fbb9dakhjfhj89e4m1ld83t@developer.gserviceaccount.com" + gce_pem_file: "~/.gce/openshift-gce-devel_priv_key.pem" + gce_project_id: "openshift-gce-devel" diff --git a/playbooks/gce/openshift-cluster/vars.yml b/playbooks/gce/openshift-cluster/vars.yml new file mode 100644 index 000000000..ed97d539c --- /dev/null +++ b/playbooks/gce/openshift-cluster/vars.yml @@ -0,0 +1 @@ +--- diff --git a/playbooks/gce/openshift-master/config.yml b/playbooks/gce/openshift-master/config.yml index a74250d13..5581e8401 100644 --- a/playbooks/gce/openshift-master/config.yml +++ b/playbooks/gce/openshift-master/config.yml @@ -1,5 +1,4 @@ ---- -- name: "populate oo_hosts_to_config host group if needed" +- name: "master/config.yml, populate oo_hosts_to_config host group if needed" hosts: localhost gather_facts: no tasks: @@ -13,6 +12,16 @@ connection: ssh user: root +- name: "Retrieve public ip" + hosts: oo_hosts_to_config + connection: ssh + user: root + gather_facts: yes + tasks: + - command: 'curl "http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip" -H "Metadata-Flavor: Google"' + register: output + - set_fact: gce_public_ip="{{ output.stdout }}" + - name: "Set Origin specific facts on localhost (for later use)" hosts: localhost gather_facts: no diff --git a/playbooks/gce/openshift-master/terminate.yml b/playbooks/gce/openshift-master/terminate.yml index 76e1404b5..f1345874a 100644 --- a/playbooks/gce/openshift-master/terminate.yml +++ b/playbooks/gce/openshift-master/terminate.yml @@ -12,7 +12,7 @@ - debug: msg="{{ groups['oo_hosts_to_terminate'] }}" -- name: Terminate instances +- name: Terminate master instances hosts: localhost connection: local tasks: diff --git a/playbooks/gce/openshift-node/config.yml b/playbooks/gce/openshift-node/config.yml index 78047cf40..57b9e3198 100644 --- a/playbooks/gce/openshift-node/config.yml +++ b/playbooks/gce/openshift-node/config.yml @@ -1,5 +1,4 @@ ---- -- name: "populate oo_hosts_to_config host group if needed" +- name: "node/config.yml, populate oo_hosts_to_config host group if needed" hosts: localhost gather_facts: no tasks: @@ -12,6 +11,11 @@ hosts: "tag_env-host-type-{{ oo_env }}-openshift-master" connection: ssh user: root + gather_facts: yes + tasks: + - command: 'curl "http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip" -H "Metadata-Flavor: Google"' + register: output + - set_fact: gce_public_ip="{{ output.stdout }}" - name: "Set OO sepcific facts on localhost (for later use)" hosts: localhost @@ -36,6 +40,10 @@ user: root vars_files: - vars.yml + + tasks: + - debug: var=gce_public_ip + roles: - { role: openshift_node, diff --git a/playbooks/gce/openshift-node/terminate.yml b/playbooks/gce/openshift-node/terminate.yml index 8d60f27b3..d4555084b 100644 --- a/playbooks/gce/openshift-node/terminate.yml +++ b/playbooks/gce/openshift-node/terminate.yml @@ -12,7 +12,7 @@ - debug: msg="{{ groups['oo_hosts_to_terminate'] }}" -- name: Terminate instances +- name: Terminate node instances hosts: localhost connection: local tasks: diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml index 2ecefd588..ca700db17 100644 --- a/roles/docker/tasks/main.yml +++ b/roles/docker/tasks/main.yml @@ -11,5 +11,5 @@ # From the origin rpm there exists instructions on how to # setup origin properly. The following steps come from there - name: Change root to be in the Docker group - user: name=root groups=docker append=yes + user: name=root groups=dockerroot append=yes diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml index 07737a71f..656a3880d 100644 --- a/roles/openshift_common/tasks/main.yml +++ b/roles/openshift_common/tasks/main.yml @@ -2,6 +2,9 @@ - name: Set hostname hostname: name={{ openshift_hostname }} +- name: Update all packages + yum: name=* state=latest + - name: Configure local facts file file: path=/etc/ansible/facts.d/ state=directory mode=0750 -- cgit v1.2.3 From f6b2eaf7d12ff1f74551662cea46a8bad6beac33 Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Fri, 13 Mar 2015 03:02:29 -0400 Subject: Add spacing to implicit string concatenation for python backwards compatibility --- bin/cluster | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/bin/cluster b/bin/cluster index 7afdce0e5..ad6e74577 100755 --- a/bin/cluster +++ b/bin/cluster @@ -18,10 +18,10 @@ class Cluster(object): if 'ANSIBLE_SSH_ARGS' not in os.environ: os.environ['ANSIBLE_SSH_ARGS'] = ( '-o ForwardAgent=yes' - '-o StrictHostKeyChecking=no' - '-o UserKnownHostsFile=/dev/null' - '-o ControlMaster=auto' - '-o ControlPersist=600s' + ' -o StrictHostKeyChecking=no' + ' -o UserKnownHostsFile=/dev/null' + ' -o ControlMaster=auto' + ' -o ControlPersist=600s' ) def apply(self): -- cgit v1.2.3 From 33dda93c3920d9f2df371d71393fb35829c1bdd1 Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Fri, 13 Mar 2015 03:49:43 -0400 Subject: add oo_prepend_strings_in_list filter --- filter_plugins/oo_filters.py | 106 +++++++++++++++++++++++++------------------ 1 file changed, 62 insertions(+), 44 deletions(-) diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py index b57056375..caf1fd1f0 100644 --- a/filter_plugins/oo_filters.py +++ b/filter_plugins/oo_filters.py @@ -1,39 +1,42 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# vim: expandtab:tabstop=4:shiftwidth=4 + from ansible import errors, runner import json import pdb def oo_pdb(arg): - ''' This pops you into a pdb instance where arg is the data passed in from the filter. + ''' This pops you into a pdb instance where arg is the data passed in from the filter. Ex: "{{ hostvars | oo_pdb }}" - ''' - pdb.set_trace() - return arg + ''' + pdb.set_trace() + return arg def oo_len(arg): - ''' This returns the length of the argument + ''' This returns the length of the argument Ex: "{{ hostvars | oo_len }}" - ''' - return len(arg) + ''' + return len(arg) def get_attr(data, attribute=None): - ''' This looks up dictionary attributes of the form a.b.c and returns the value. + ''' This looks up dictionary attributes of the form a.b.c and returns the value. Ex: data = {'a': {'b': {'c': 5}}} attribute = "a.b.c" returns 5 - ''' - - if not attribute: - raise errors.AnsibleFilterError("|failed expects attribute to be set") + ''' + if not attribute: + raise errors.AnsibleFilterError("|failed expects attribute to be set") - ptr = data - for attr in attribute.split('.'): - ptr = ptr[attr] + ptr = data + for attr in attribute.split('.'): + ptr = ptr[attr] - return ptr + return ptr def oo_collect(data, attribute=None, filters={}): - ''' This takes a list of dict and collects all attributes specified into a list - If filter is specified then we will include all items that match _ALL_ of filters. + ''' This takes a list of dict and collects all attributes specified into a list + If filter is specified then we will include all items that match _ALL_ of filters. Ex: data = [ {'a':1, 'b':5, 'z': 'z'}, # True, return {'a':2, 'z': 'z'}, # True, return {'a':3, 'z': 'z'}, # True, return @@ -42,44 +45,59 @@ def oo_collect(data, attribute=None, filters={}): attribute = 'a' filters = {'z': 'z'} returns [1, 2, 3] - ''' + ''' - if not issubclass(type(data), list): - raise errors.AnsibleFilterError("|failed expects to filter on a List") + if not issubclass(type(data), list): + raise errors.AnsibleFilterError("|failed expects to filter on a List") - if not attribute: - raise errors.AnsibleFilterError("|failed expects attribute to be set") + if not attribute: + raise errors.AnsibleFilterError("|failed expects attribute to be set") - if filters: - retval = [get_attr(d, attribute) for d in data if all([ d[key] == filters[key] for key in filters ]) ] - else: - retval = [get_attr(d, attribute) for d in data] + if filters: + retval = [get_attr(d, attribute) for d in data if all([ d[key] == filters[key] for key in filters ]) ] + else: + retval = [get_attr(d, attribute) for d in data] - return retval + return retval def oo_select_keys(data, keys): - ''' This returns a list, which contains the value portions for the keys + ''' This returns a list, which contains the value portions for the keys Ex: data = { 'a':1, 'b':2, 'c':3 } keys = ['a', 'c'] returns [1, 3] - ''' + ''' + + if not issubclass(type(data), dict): + raise errors.AnsibleFilterError("|failed expects to filter on a Dictionary") - if not issubclass(type(data), dict): - raise errors.AnsibleFilterError("|failed expects to filter on a Dictionary") + if not issubclass(type(keys), list): + raise errors.AnsibleFilterError("|failed expects first param is a list") - if not issubclass(type(keys), list): - raise errors.AnsibleFilterError("|failed expects first param is a list") + # Gather up the values for the list of keys passed in + retval = [data[key] for key in keys] - # Gather up the values for the list of keys passed in - retval = [data[key] for key in keys] + return retval - return retval +def oo_prepend_strings_in_list(data, prepend): + ''' This takes a list of strings and prepends a string to each item in the + list + Ex: data = ['cart', 'tree'] + prepend = 'apple-' + returns ['apple-cart', 'apple-tree'] + ''' + if not issubclass(type(data), list): + raise errors.AnsibleFilterError("|failed expects first param is a list") + if not all(isinstance(x, basestring) for x in data): + raise errors.AnsibleFilterError("|failed expects first param is a list of strings") + retval = [prepend + s for s in data] + return retval class FilterModule (object): - def filters(self): - return { - "oo_select_keys": oo_select_keys, - "oo_collect": oo_collect, - "oo_len": oo_len, - "oo_pdb": oo_pdb - } + def filters(self): + return { + "oo_select_keys": oo_select_keys, + "oo_collect": oo_collect, + "oo_len": oo_len, + "oo_pdb": oo_pdb, + "oo_prepend_strings_in_list": oo_prepend_strings_in_list + } -- cgit v1.2.3 From 66332175b61a5a538aa73b76cbcf151e1882a52c Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Fri, 13 Mar 2015 03:55:04 -0400 Subject: Move yum update * to new os_update_latest role --- roles/openshift_common/tasks/main.yml | 3 --- roles/os_update_latest/tasks/main.yml | 3 +++ 2 files changed, 3 insertions(+), 3 deletions(-) create mode 100644 roles/os_update_latest/tasks/main.yml diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml index 656a3880d..07737a71f 100644 --- a/roles/openshift_common/tasks/main.yml +++ b/roles/openshift_common/tasks/main.yml @@ -2,9 +2,6 @@ - name: Set hostname hostname: name={{ openshift_hostname }} -- name: Update all packages - yum: name=* state=latest - - name: Configure local facts file file: path=/etc/ansible/facts.d/ state=directory mode=0750 diff --git a/roles/os_update_latest/tasks/main.yml b/roles/os_update_latest/tasks/main.yml new file mode 100644 index 000000000..4a2c3d47a --- /dev/null +++ b/roles/os_update_latest/tasks/main.yml @@ -0,0 +1,3 @@ +--- +- name: Update all packages + yum: name=* state=latest -- cgit v1.2.3 From e337235d471468b400acadcbd56ad14f39a2a222 Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Fri, 13 Mar 2015 03:56:01 -0400 Subject: add roles symlink to playbooks/gce/openshift-cluster to allow launch to call os_update_latest role --- playbooks/gce/openshift-cluster/roles | 1 + 1 file changed, 1 insertion(+) create mode 120000 playbooks/gce/openshift-cluster/roles diff --git a/playbooks/gce/openshift-cluster/roles b/playbooks/gce/openshift-cluster/roles new file mode 120000 index 000000000..20c4c58cf --- /dev/null +++ b/playbooks/gce/openshift-cluster/roles @@ -0,0 +1 @@ +../../../roles \ No newline at end of file -- cgit v1.2.3 From 9199379f94f6b11a4841e31f6c58a11c1e9f8c3a Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Fri, 13 Mar 2015 03:58:23 -0400 Subject: Various fixes - playbooks/gce/openshift-cluster: - Remove some stray debugging statements - Some minor formatting fixes - removing un-necessary quotes - cleaning up some jinja templates for readability - add a play to the launch playbook to apply the os_update_latest role on all hosts in the new environment - improve setting groups and gce_public_ip when using add_host module - set gce_public_ip as a variable for the host using the returned gce instance_data - add a group for each tag configured on the host (pre-pending tag_ to the tag name) - update the openshift-master/config.yml and openshift-node/config.yml includes to use the tag_env-host-type groups - openshift-{master,node}/config.yml - Some cleanup - remove some extraneous quotes - remove connection: ssh from remote hosts, since it is the default - remove user: root and instead set ansible_ssh_user in inventory/gce/group_vars/all - set openshift_public_ip and openshift_env to templated values in inventory/gce/group_vars/all as well - no longer set openshift_node_ips for the master host, since nodes will register themselves now when they are configured (prevent reboot on adding nodes) - move setting openshift_master_ips and openshift_public_master_ips using set_fact and instead use the vars: of the 'Configure Instances' play --- inventory/gce/group_vars/all | 4 + playbooks/gce/openshift-cluster/launch.yml | 22 +-- .../gce/openshift-cluster/launch_instances.yml | 8 +- playbooks/gce/openshift-master/config.yml | 40 +----- playbooks/gce/openshift-node/config.yml | 148 +++++++++++++++------ 5 files changed, 133 insertions(+), 89 deletions(-) create mode 100644 inventory/gce/group_vars/all diff --git a/inventory/gce/group_vars/all b/inventory/gce/group_vars/all new file mode 100644 index 000000000..4cd94c509 --- /dev/null +++ b/inventory/gce/group_vars/all @@ -0,0 +1,4 @@ +--- +ansible_ssh_user: root +openshift_public_ip: "{{ gce_public_ip }}" +openshift_env: "{{ oo_env }}" diff --git a/playbooks/gce/openshift-cluster/launch.yml b/playbooks/gce/openshift-cluster/launch.yml index ba9d58a74..c70c199c6 100644 --- a/playbooks/gce/openshift-cluster/launch.yml +++ b/playbooks/gce/openshift-cluster/launch.yml @@ -3,21 +3,19 @@ hosts: localhost connection: local gather_facts: no - vars_files: - vars.yml - tasks: - set_fact: k8s_type="master" - - name: "Generate master instance names(s)" - set_fact: scratch="{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' |format( 1048576 |random) }}" + - name: Generate master instance names(s) + set_fact: scratch={{ cluster_id }}-{{ k8s_type }}-{{ '%05x' |format( 1048576 |random) }} register: instance_names_output with_sequence: start=1 end={{ masters }} # These set_fact's cannot be combined - set_fact: - instance_names_string: "{% for item in instance_names_output.results %}{{item.ansible_facts.scratch}} {% endfor %}" + instance_names_string: "{% for item in instance_names_output.results %}{{ item.ansible_facts.scratch }} {% endfor %}" - set_fact: master_names: "{{ instance_names_string.strip().split(' ') }}" @@ -31,14 +29,14 @@ - set_fact: k8s_type="node" - - name: "Generate node instance names(s)" - set_fact: scratch="{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' |format( 1048576 |random) }}" + - name: Generate node instance names(s) + set_fact: scratch={{ cluster_id }}-{{ k8s_type }}-{{ '%05x' |format( 1048576 |random) }} register: instance_names_output with_sequence: start=1 end={{ nodes }} # These set_fact's cannot be combined - set_fact: - instance_names_string: "{% for item in instance_names_output.results %}{{item.ansible_facts.scratch}} {% endfor %}" + instance_names_string: "{% for item in instance_names_output.results %}{{ item.ansible_facts.scratch }} {% endfor %}" - set_fact: node_names: "{{ instance_names_string.strip().split(' ') }}" @@ -48,15 +46,17 @@ instances: "{{ node_names }}" cluster: "{{ cluster_id }}" type: "{{ k8s_type }}" - group_name: "tag_env-host-type-{{ cluster_id }}-openshift-node" +- hosts: "tag_env-{{ cluster_id }}" + roles: + - os_update_latest - include: ../openshift-master/config.yml vars: - oo_host_group_exp: "{{ master_names }}" + oo_host_group_exp: "groups[\"tag_env-host-type-{{ cluster_id }}-openshift-master\"]" oo_env: "{{ cluster_id }}" - include: ../openshift-node/config.yml vars: - oo_host_group_exp: "{{ node_names }}" + oo_host_group_exp: "groups[\"tag_env-host-type-{{ cluster_id }}-openshift-node\"]" oo_env: "{{ cluster_id }}" diff --git a/playbooks/gce/openshift-cluster/launch_instances.yml b/playbooks/gce/openshift-cluster/launch_instances.yml index ff19b94d8..443e763de 100644 --- a/playbooks/gce/openshift-cluster/launch_instances.yml +++ b/playbooks/gce/openshift-cluster/launch_instances.yml @@ -19,15 +19,17 @@ register: gce - name: Add new instances public IPs - add_host: "hostname={{ item.name }} ansible_ssh_host={{ item.public_ip }} groups={{ group_name }}" + add_host: + hostname: "{{ item.name }}" + ansible_ssh_host: "{{ item.public_ip }}" + groups: "{{ item.tags | oo_prepend_strings_in_list('tag_') | join(',') }}" + gce_public_ip: "{{ item.public_ip }}" with_items: gce.instance_data - name: Wait for ssh wait_for: "port=22 host={{ item.public_ip }}" with_items: gce.instance_data -- debug: var=gce - - name: Wait for root user setup command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item.public_ip }} echo root user is setup" register: result diff --git a/playbooks/gce/openshift-master/config.yml b/playbooks/gce/openshift-master/config.yml index 5581e8401..812dcb91b 100644 --- a/playbooks/gce/openshift-master/config.yml +++ b/playbooks/gce/openshift-master/config.yml @@ -1,50 +1,20 @@ -- name: "master/config.yml, populate oo_hosts_to_config host group if needed" +- name: master/config.yml, populate oo_masters_to_config host group if needed hosts: localhost gather_facts: no tasks: - name: "Evaluate oo_host_group_exp if it's set" - add_host: "name={{ item }} groups=oo_hosts_to_config" + add_host: "name={{ item }} groups=oo_masters_to_config" with_items: "{{ oo_host_group_exp | default('') }}" when: oo_host_group_exp is defined - name: "Gather facts for nodes in {{ oo_env }}" hosts: "tag_env-host-type-{{ oo_env }}-openshift-node" - connection: ssh - user: root - -- name: "Retrieve public ip" - hosts: oo_hosts_to_config - connection: ssh - user: root - gather_facts: yes - tasks: - - command: 'curl "http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip" -H "Metadata-Flavor: Google"' - register: output - - set_fact: gce_public_ip="{{ output.stdout }}" - -- name: "Set Origin specific facts on localhost (for later use)" - hosts: localhost - gather_facts: no - tasks: - - name: Setting openshift_node_ips fact on localhost - set_fact: - openshift_node_ips: "{{ hostvars - | oo_select_keys(groups['tag_env-host-type-' + oo_env + '-openshift-node']) - | oo_collect(attribute='ansible_default_ipv4.address') }}" - when: groups['tag_env-host-type-' + oo_env + '-openshift-node'] is defined - name: "Configure instances" - hosts: oo_hosts_to_config - connection: ssh - user: root + hosts: oo_masters_to_config vars_files: - - vars.yml + - vars.yml roles: - - { - role: openshift_master, - openshift_node_ips: "{{ hostvars['localhost'].openshift_node_ips | default(['']) }}", - openshift_public_ip: "{{ gce_public_ip }}", - openshift_env: "{{ oo_env }}", - } + - openshift_master - pods - os_env_extras diff --git a/playbooks/gce/openshift-node/config.yml b/playbooks/gce/openshift-node/config.yml index 57b9e3198..17631d578 100644 --- a/playbooks/gce/openshift-node/config.yml +++ b/playbooks/gce/openshift-node/config.yml @@ -1,56 +1,124 @@ -- name: "node/config.yml, populate oo_hosts_to_config host group if needed" +- name: node/config.yml, populate oo_nodes_to_config host group if needed hosts: localhost gather_facts: no tasks: - name: Evaluate oo_host_group_exp - add_host: "name={{ item }} groups=oo_hosts_to_config" + add_host: "name={{ item }} groups=oo_nodes_to_config" with_items: "{{ oo_host_group_exp | default('') }}" when: oo_host_group_exp is defined + - name: Find masters for env + add_host: "name={{ item }} groups=oo_masters_for_node_config" + with_items: groups['tag_env-host-type-' + oo_env + '-openshift-master'] -- name: "Gather facts for masters in {{ oo_env }}" +- name: Gather facts for masters in {{ oo_env }} hosts: "tag_env-host-type-{{ oo_env }}-openshift-master" - connection: ssh - user: root - gather_facts: yes tasks: - - command: 'curl "http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip" -H "Metadata-Flavor: Google"' - register: output - - set_fact: gce_public_ip="{{ output.stdout }}" + - set_fact: + openshift_master_ip: "{{ openshift_ip }}" + openshift_master_api_url: "{{ openshift_api_url }}" + openshift_master_webui_url: "{{ openshift_webui_url }}" + openshift_master_hostname: "{{ openshift_hostname }}" + openshift_master_public_ip: "{{ openshift_public_ip }}" + openshift_master_api_public_url: "{{ openshift_api_public_url }}" + openshift_master_webui_public_url: "{{ openshift_webui_public_url }}" + openshift_master_public_hostnames: "{{ openshift_public_hostname }}" -- name: "Set OO sepcific facts on localhost (for later use)" - hosts: localhost - gather_facts: no +- name: Gather facts for hosts to configure + hosts: tag_env-host-type-{{ oo_env }}-openshift-node tasks: - - name: Setting openshift_master_ips fact on localhost - set_fact: - openshift_master_ips: "{{ hostvars - | oo_select_keys(groups['tag_env-host-type-' + oo_env + '-openshift-master']) - | oo_collect(attribute='ansible_default_ipv4.address') }}" - when: groups['tag_env-host-type-' + oo_env + '-openshift-master'] is defined - - name: Setting openshift_master_public_ips fact on localhost - set_fact: - openshift_master_public_ips: "{{ hostvars - | oo_select_keys(groups['tag_env-host-type-' + oo_env + '-openshift-master']) - | oo_collect(attribute='gce_public_ip') }}" - when: groups['tag_env-host-type-' + oo_env + '-openshift-master'] is defined - -- name: "Configure instances" - hosts: oo_hosts_to_config - connection: ssh - user: root - vars_files: - - vars.yml + - set_fact: + openshift_node_hostname: "{{ openshift_hostname }}" + openshift_node_name: "{{ openshift_hostname }}" + openshift_node_cpu: "{{ openshift_node_cpu if openshift_node_cpu else ansible_processor_cores }}" + openshift_node_memory: "{{ openshift_node_memory if openshift_node_memory else (ansible_memtotal_mb|int * 1024 * 1024 * 0.75)|int }}" + openshift_node_pod_cidr: "{{ openshift_node_pod_cidr if openshift_node_pod_cidr else None }}" + openshift_node_host_ip: "{{ openshift_ip }}" + openshift_node_labels: "{{ openshift_node_labels if openshift_node_labels else {} }}" + openshift_node_annotations: "{{ openshift_node_annotations if openshift_node_annotations else {} }}" +- name: Register nodes + hosts: tag_env-host-type-{{ oo_env }}-openshift-master[0] + vars: + openshift_node_group: tag_env-host-type-{{ oo_env }}-openshift-node + openshift_nodes: "{{ hostvars + | oo_select_keys(groups[openshift_node_group]) }}" + openshift_master_group: tag_env-host-type-{{ oo_env }}-openshift-master + openshift_master_urls: "{{ hostvars + | oo_select_keys(groups[openshift_master_group]) + | oo_collect(attribute='openshift_master_api_url') }}" + openshift_master_public_urls: "{{ hostvars + | oo_select_keys(groups[openshift_master_group]) + | oo_collect(attribute='openshift_master_api_public_url') }}" + pre_tasks: + roles: + - openshift_register_nodes tasks: - - debug: var=gce_public_ip + - name: Create local temp directory for syncing certs + local_action: command /usr/bin/mktemp -d /tmp/openshift-ansible-XXXXXXX + register: mktemp + - name: Sync master certs to localhost + synchronize: + mode: pull + checksum: yes + src: /var/lib/openshift/openshift.local.certificates + dest: "{{ mktemp.stdout }}" + +# TODO: sync generated certs between masters +# +- name: Configure instances + hosts: oo_nodes_to_config + vars_files: + - vars.yml + vars: + openshift_master_group: tag_env-host-type-{{ oo_env }}-openshift-master + openshift_master_ips: "{{ hostvars + | oo_select_keys(groups[openshift_master_group]) + | oo_collect(attribute='openshift_master_ip') }}" + openshift_master_hostnames: "{{ hostvars + | oo_select_keys(groups[openshift_master_group]) + | oo_collect(attribute='openshift_master_hostname') }}" + openshift_master_public_ips: "{{ hostvars + | oo_select_keys(groups[openshift_master_group]) + | oo_collect(attribute='openshift_master_public_ip') }}" + openshift_master_public_hostnames: "{{ hostvars + | oo_select_keys(groups[openshift_master_group]) + | oo_collect(attribute='openshift_master_public_hostname') }}" + cert_parent_rel_path: openshift.local.certificates + cert_rel_path: "{{ cert_parent_rel_path }}/node-{{ openshift_node_name }}" + cert_base_path: /var/lib/openshift + cert_parent_path: "{{ cert_base_path }}/{{ cert_parent_rel_path }}" + cert_path: "{{ cert_base_path }}/{{ cert_rel_path }}" + pre_tasks: + - name: Ensure certificate directories exists + file: + path: "{{ item }}" + state: directory + with_items: + - "{{ cert_path }}" + - "{{ cert_parent_path }}/ca" + + # TODO: only sync to a node if it's certs have been updated + # TODO: notify restart openshift-node and/or restart openshift-sdn-node, + # possibly test service started time against certificate/config file + # timestamps in openshift-node or openshift-sdn-node to trigger notify + # TODO: also copy ca cert: /var/lib/openshift/openshift.local.certificates/ca/cert.crt + - name: Sync certs to nodes + synchronize: + checksum: yes + src: "{{ item.src }}" + dest: "{{ item.dest }}" + owner: no + group: no + with_items: + - src: "{{ hostvars[groups[openshift_master_group][0]].mktemp.stdout }}/{{ cert_rel_path }}" + dest: "{{ cert_parent_path }}" + - src: "{{ hostvars[groups[openshift_master_group][0]].mktemp.stdout }}/{{ cert_parent_rel_path }}/ca/cert.crt" + dest: "{{ cert_parent_path }}/ca/cert.crt" + - local_action: file name={{ hostvars[groups[openshift_master_group][0]].mktemp.stdout }} state=absent + run_once: true roles: - - { - role: openshift_node, - openshift_master_ips: "{{ hostvars['localhost'].openshift_master_ips | default(['']) }}", - openshift_master_public_ips: "{{ hostvars['localhost'].openshift_master_public_ips | default(['']) }}", - openshift_public_ip: "{{ gce_public_ip }}", - openshift_env: "{{ oo_env }}", - } - - docker + - openshift_node - os_env_extras + - os_env_extras_node + -- cgit v1.2.3 From 3324b6c8889074ee17d7be05588de8b58aa3774f Mon Sep 17 00:00:00 2001 From: Jhon Honce Date: Fri, 6 Mar 2015 13:52:20 -0700 Subject: Use ansible playbook to initialize openshift cluster * Added playbooks/gce/openshift-cluster * Added bin/cluster (will replace cluster.sh) --- playbooks/gce/openshift-node/config.yml | 1 + roles/openshift_common/tasks/main.yml | 3 +++ 2 files changed, 4 insertions(+) diff --git a/playbooks/gce/openshift-node/config.yml b/playbooks/gce/openshift-node/config.yml index 17631d578..7f80b90a7 100644 --- a/playbooks/gce/openshift-node/config.yml +++ b/playbooks/gce/openshift-node/config.yml @@ -53,6 +53,7 @@ roles: - openshift_register_nodes tasks: + tasks: - name: Create local temp directory for syncing certs local_action: command /usr/bin/mktemp -d /tmp/openshift-ansible-XXXXXXX register: mktemp diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml index 07737a71f..656a3880d 100644 --- a/roles/openshift_common/tasks/main.yml +++ b/roles/openshift_common/tasks/main.yml @@ -2,6 +2,9 @@ - name: Set hostname hostname: name={{ openshift_hostname }} +- name: Update all packages + yum: name=* state=latest + - name: Configure local facts file file: path=/etc/ansible/facts.d/ state=directory mode=0750 -- cgit v1.2.3 From 12745f3eb1f4eb99a27f03d6b7a60cd376add580 Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Mon, 16 Mar 2015 15:12:10 -0400 Subject: Use env for gce params --- playbooks/gce/openshift-cluster/terminate.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/playbooks/gce/openshift-cluster/terminate.yml b/playbooks/gce/openshift-cluster/terminate.yml index eff52a807..ee536be69 100644 --- a/playbooks/gce/openshift-cluster/terminate.yml +++ b/playbooks/gce/openshift-cluster/terminate.yml @@ -13,13 +13,13 @@ - include: ../openshift-node/terminate.yml vars: oo_host_group_exp: 'groups["tag_env-host-type-{{ cluster_id }}-openshift-node"]' - gce_service_account_email: "1043659492591-r0tpbf8q4fbb9dakhjfhj89e4m1ld83t@developer.gserviceaccount.com" - gce_pem_file: "~/.gce/openshift-gce-devel_priv_key.pem" - gce_project_id: "openshift-gce-devel" + gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}" + gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}" + gce_project_id: "{{ lookup('env', 'gce_project_id') }}" - include: ../openshift-master/terminate.yml vars: oo_host_group_exp: 'groups["tag_env-host-type-{{ cluster_id }}-openshift-master"]' - gce_service_account_email: "1043659492591-r0tpbf8q4fbb9dakhjfhj89e4m1ld83t@developer.gserviceaccount.com" - gce_pem_file: "~/.gce/openshift-gce-devel_priv_key.pem" - gce_project_id: "openshift-gce-devel" + gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}" + gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}" + gce_project_id: "{{ lookup('env', 'gce_project_id') }}" -- cgit v1.2.3 From 6ad94864f7d985f1bb671536bd398ea4bcd0f163 Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Mon, 16 Mar 2015 15:13:12 -0400 Subject: add repos role to gce cluster launch so that we are applying os_update_latest after repo config --- playbooks/gce/openshift-cluster/launch.yml | 1 + roles/openshift_common/tasks/main.yml | 3 --- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/playbooks/gce/openshift-cluster/launch.yml b/playbooks/gce/openshift-cluster/launch.yml index c70c199c6..70025e103 100644 --- a/playbooks/gce/openshift-cluster/launch.yml +++ b/playbooks/gce/openshift-cluster/launch.yml @@ -49,6 +49,7 @@ - hosts: "tag_env-{{ cluster_id }}" roles: + - repos - os_update_latest - include: ../openshift-master/config.yml diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml index 656a3880d..07737a71f 100644 --- a/roles/openshift_common/tasks/main.yml +++ b/roles/openshift_common/tasks/main.yml @@ -2,9 +2,6 @@ - name: Set hostname hostname: name={{ openshift_hostname }} -- name: Update all packages - yum: name=* state=latest - - name: Configure local facts file file: path=/etc/ansible/facts.d/ state=directory mode=0750 -- cgit v1.2.3 From 13dc8505feb93adc311a4a2d8e714c7d1e61cf1f Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Mon, 16 Mar 2015 15:15:03 -0400 Subject: Fix openshift_master_ips and openshift_master_public_ips resolution - don't use set_fact on localhost for openshift_master_ips and openshift_master_public_ips - we are only using it for the configure play - move definition to vars section of configure play - otherwise we'd have to set openshift_master_ips and openshift_master_public_ips from hostvars['localhost'] and since we aren't refrerencing it anywhere else, might as well just do it in vars instead of set_fact on locahost. --- playbooks/gce/openshift-node/config.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/playbooks/gce/openshift-node/config.yml b/playbooks/gce/openshift-node/config.yml index 7f80b90a7..9d87c4e8f 100644 --- a/playbooks/gce/openshift-node/config.yml +++ b/playbooks/gce/openshift-node/config.yml @@ -53,7 +53,6 @@ roles: - openshift_register_nodes tasks: - tasks: - name: Create local temp directory for syncing certs local_action: command /usr/bin/mktemp -d /tmp/openshift-ansible-XXXXXXX register: mktemp @@ -69,7 +68,7 @@ # - name: Configure instances hosts: oo_nodes_to_config - vars_files: +vars_files: - vars.yml vars: openshift_master_group: tag_env-host-type-{{ oo_env }}-openshift-master -- cgit v1.2.3 From 9575258e5a1b8f9ee8ec7ffc7ad74fa5dfeabc00 Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Wed, 18 Mar 2015 13:25:18 -0400 Subject: replace oo_hosts_to_config with oo_nodes_to_config and oo_masters_to_config --- playbooks/aws/openshift-master/config.yml | 6 +++--- playbooks/aws/openshift-master/launch.yml | 4 ++-- playbooks/aws/openshift-node/config.yml | 6 +++--- playbooks/aws/openshift-node/launch.yml | 4 ++-- playbooks/gce/openshift-master/launch.yml | 4 ++-- playbooks/gce/openshift-node/config.yml | 1 - playbooks/gce/openshift-node/launch.yml | 8 ++++---- 7 files changed, 16 insertions(+), 17 deletions(-) diff --git a/playbooks/aws/openshift-master/config.yml b/playbooks/aws/openshift-master/config.yml index b3227afa9..bbf1f654a 100644 --- a/playbooks/aws/openshift-master/config.yml +++ b/playbooks/aws/openshift-master/config.yml @@ -1,10 +1,10 @@ --- -- name: "populate oo_hosts_to_config host group if needed" +- name: "populate oo_masters_to_config host group if needed" hosts: localhost gather_facts: no tasks: - name: "Evaluate oo_host_group_exp if it's set" - add_host: "name={{ item }} groups=oo_hosts_to_config" + add_host: "name={{ item }} groups=oo_masters_to_config" with_items: "{{ oo_host_group_exp | default('') }}" when: oo_host_group_exp is defined @@ -25,7 +25,7 @@ when: groups['tag_env-host-type_' + oo_env + '-openshift-node'] is defined - name: "Configure instances" - hosts: oo_hosts_to_config + hosts: oo_masters_to_config connection: ssh user: root vars_files: diff --git a/playbooks/aws/openshift-master/launch.yml b/playbooks/aws/openshift-master/launch.yml index a889b93be..3d5a7f579 100644 --- a/playbooks/aws/openshift-master/launch.yml +++ b/playbooks/aws/openshift-master/launch.yml @@ -45,8 +45,8 @@ args: tags: "{{ oo_new_inst_tags }}" - - name: Add new instances public IPs to oo_hosts_to_config - add_host: "hostname={{ item.0 }} ansible_ssh_host={{ item.1.dns_name }} groupname=oo_hosts_to_config" + - name: Add new instances public IPs to oo_masters_to_config + add_host: "hostname={{ item.0 }} ansible_ssh_host={{ item.1.dns_name }} groupname=oo_masters_to_config" with_together: - oo_new_inst_names - ec2.instances diff --git a/playbooks/aws/openshift-node/config.yml b/playbooks/aws/openshift-node/config.yml index 21807b1cf..822b66464 100644 --- a/playbooks/aws/openshift-node/config.yml +++ b/playbooks/aws/openshift-node/config.yml @@ -1,10 +1,10 @@ --- -- name: "populate oo_hosts_to_config host group if needed" +- name: "populate oo_nodes_to_config host group if needed" hosts: localhost gather_facts: no tasks: - name: Evaluate oo_host_group_exp - add_host: "name={{ item }} groups=oo_hosts_to_config" + add_host: "name={{ item }} groups=oo_nodes_to_config" with_items: "{{ oo_host_group_exp | default('') }}" when: oo_host_group_exp is defined @@ -31,7 +31,7 @@ when: groups['tag_env-host-type-' + oo_env + '-openshift-master'] is defined - name: "Configure instances" - hosts: oo_hosts_to_config + hosts: oo_nodes_to_config connection: ssh user: root vars_files: diff --git a/playbooks/aws/openshift-node/launch.yml b/playbooks/aws/openshift-node/launch.yml index a889b93be..4745fc658 100644 --- a/playbooks/aws/openshift-node/launch.yml +++ b/playbooks/aws/openshift-node/launch.yml @@ -45,8 +45,8 @@ args: tags: "{{ oo_new_inst_tags }}" - - name: Add new instances public IPs to oo_hosts_to_config - add_host: "hostname={{ item.0 }} ansible_ssh_host={{ item.1.dns_name }} groupname=oo_hosts_to_config" + - name: Add new instances public IPs to oo_nodes_to_config + add_host: "hostname={{ item.0 }} ansible_ssh_host={{ item.1.dns_name }} groupname=oo_nodes_to_config" with_together: - oo_new_inst_names - ec2.instances diff --git a/playbooks/gce/openshift-master/launch.yml b/playbooks/gce/openshift-master/launch.yml index f2800b061..3512274cc 100644 --- a/playbooks/gce/openshift-master/launch.yml +++ b/playbooks/gce/openshift-master/launch.yml @@ -24,8 +24,8 @@ tags: "{{ oo_new_inst_tags }}" register: gce - - name: Add new instances public IPs to oo_hosts_to_config - add_host: "hostname={{ item.name }} ansible_ssh_host={{ item.public_ip }} groupname=oo_hosts_to_config" + - name: Add new instances public IPs to oo_masters_to_config + add_host: "hostname={{ item.name }} ansible_ssh_host={{ item.public_ip }} groupname=oo_masters_to_config" with_items: gce.instance_data - name: Wait for ssh diff --git a/playbooks/gce/openshift-node/config.yml b/playbooks/gce/openshift-node/config.yml index 9d87c4e8f..d24acb8fa 100644 --- a/playbooks/gce/openshift-node/config.yml +++ b/playbooks/gce/openshift-node/config.yml @@ -121,4 +121,3 @@ vars_files: - openshift_node - os_env_extras - os_env_extras_node - diff --git a/playbooks/gce/openshift-node/launch.yml b/playbooks/gce/openshift-node/launch.yml index 935599efd..ca2914d8a 100644 --- a/playbooks/gce/openshift-node/launch.yml +++ b/playbooks/gce/openshift-node/launch.yml @@ -24,8 +24,8 @@ tags: "{{ oo_new_inst_tags }}" register: gce - - name: Add new instances public IPs to oo_hosts_to_config - add_host: "hostname={{ item.name }} ansible_ssh_host={{ item.public_ip }} groupname=oo_hosts_to_config" + - name: Add new instances public IPs to oo_nodes_to_config + add_host: "hostname={{ item.name }} ansible_ssh_host={{ item.public_ip }} groupname=oo_nodes_to_config" with_items: gce.instance_data - name: Wait for ssh @@ -48,10 +48,10 @@ # Always bounce service to pick up new credentials #- name: "Restart instances" -# hosts: oo_hosts_to_config +# hosts: oo_nodes_to_config # connection: ssh # user: root # tasks: -# - debug: var=groups.oo_hosts_to_config +# - debug: var=groups.oo_nodes_to_config # - name: Restart OpenShift # service: name=openshift-node enabled=yes state=restarted -- cgit v1.2.3 From 011ff923489fd1dd5fa072a685ce881ab69b8f1c Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Wed, 18 Mar 2015 13:26:46 -0400 Subject: use more specific variable names in gce/openshift-cluster/launch.yml --- playbooks/gce/openshift-cluster/launch.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/playbooks/gce/openshift-cluster/launch.yml b/playbooks/gce/openshift-cluster/launch.yml index 70025e103..b30452725 100644 --- a/playbooks/gce/openshift-cluster/launch.yml +++ b/playbooks/gce/openshift-cluster/launch.yml @@ -10,15 +10,15 @@ - name: Generate master instance names(s) set_fact: scratch={{ cluster_id }}-{{ k8s_type }}-{{ '%05x' |format( 1048576 |random) }} - register: instance_names_output + register: master_names_output with_sequence: start=1 end={{ masters }} # These set_fact's cannot be combined - set_fact: - instance_names_string: "{% for item in instance_names_output.results %}{{ item.ansible_facts.scratch }} {% endfor %}" + master_names_string: "{% for item in master_names_output.results %}{{ item.ansible_facts.scratch }} {% endfor %}" - set_fact: - master_names: "{{ instance_names_string.strip().split(' ') }}" + master_names: "{{ master_names_string.strip().split(' ') }}" - include: launch_instances.yml vars: @@ -31,15 +31,15 @@ - name: Generate node instance names(s) set_fact: scratch={{ cluster_id }}-{{ k8s_type }}-{{ '%05x' |format( 1048576 |random) }} - register: instance_names_output + register: node_names_output with_sequence: start=1 end={{ nodes }} # These set_fact's cannot be combined - set_fact: - instance_names_string: "{% for item in instance_names_output.results %}{{ item.ansible_facts.scratch }} {% endfor %}" + node_names_string: "{% for item in node_names_output.results %}{{ item.ansible_facts.scratch }} {% endfor %}" - set_fact: - node_names: "{{ instance_names_string.strip().split(' ') }}" + node_names: "{{ node_names_string.strip().split(' ') }}" - include: launch_instances.yml vars: -- cgit v1.2.3 From 85e6948fca954d3c066bf5a6123ada6b96adf45c Mon Sep 17 00:00:00 2001 From: Jhon Honce Date: Thu, 19 Mar 2015 15:06:38 -0700 Subject: * Add DOCKER chain to iptables --- README.md | 2 +- playbooks/gce/openshift-cluster/terminate.yml | 5 ----- roles/os_firewall/tasks/firewall/iptables.yml | 14 ++++++++++++++ 3 files changed, 15 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index ffdfee6f2..906d2e3f2 100644 --- a/README.md +++ b/README.md @@ -23,7 +23,7 @@ Setup - Directory Structure: - [cloud.rb](cloud.rb) - light wrapper around Ansible - - [cluster.sh](cluster.sh) - easily create OpenShift 3 clusters + - [bin/cluster](bin/cluster) - python script to easily create OpenShift 3 clusters - [filter_plugins/](filter_plugins) - custom filters used to manipulate data in Ansible - [inventory/](inventory) - houses Ansible dynamic inventory scripts - [lib/](lib) - library components of cloud.rb diff --git a/playbooks/gce/openshift-cluster/terminate.yml b/playbooks/gce/openshift-cluster/terminate.yml index ee536be69..0281ae953 100644 --- a/playbooks/gce/openshift-cluster/terminate.yml +++ b/playbooks/gce/openshift-cluster/terminate.yml @@ -5,11 +5,6 @@ vars_files: - vars.yml - tasks: - - debug: msg="Retrieve node names" - - debug: msg="Retrieve master names" - - debug: var=groups - - include: ../openshift-node/terminate.yml vars: oo_host_group_exp: 'groups["tag_env-host-type-{{ cluster_id }}-openshift-node"]' diff --git a/roles/os_firewall/tasks/firewall/iptables.yml b/roles/os_firewall/tasks/firewall/iptables.yml index 87e77c083..3d46d6e2d 100644 --- a/roles/os_firewall/tasks/firewall/iptables.yml +++ b/roles/os_firewall/tasks/firewall/iptables.yml @@ -41,6 +41,20 @@ changed_when: "'firewalld' in result.stdout" when: pkg_check.rc == 0 +- name: Check for DOCKER chain + shell: iptables -L |grep '^Chain DOCKER' + ignore_errors: yes + register: check_for_chain + +- name: Create DOCKER chain + command: iptables -N DOCKER + register: create_chain + when: check_for_chain.rc != 0 + +- name: Persist DOCKER chain + command: service iptables save + when: create_chain.rc == 0 + - name: Add iptables allow rules os_firewall_manage_iptables: name: "{{ item.service }}" -- cgit v1.2.3 From 9fb5bbc79a6753c6125e4f3ea007040dad0482ef Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Thu, 19 Mar 2015 23:04:21 -0400 Subject: Add verify_chain action to os_firewall_manage_iptables module - Add verify_chain action to os_firewall_manage_iptables module - Update os_firewall module to use os_firewall_manage_iptables for creating the DOCKER chain. --- .../library/os_firewall_manage_iptables.py | 62 ++++++++++++++-------- roles/os_firewall/tasks/firewall/iptables.yml | 20 +++---- 2 files changed, 47 insertions(+), 35 deletions(-) diff --git a/roles/os_firewall/library/os_firewall_manage_iptables.py b/roles/os_firewall/library/os_firewall_manage_iptables.py index fef710055..6a018d022 100644 --- a/roles/os_firewall/library/os_firewall_manage_iptables.py +++ b/roles/os_firewall/library/os_firewall_manage_iptables.py @@ -51,11 +51,13 @@ class IpTablesCreateJumpRuleError(IpTablesError): # exception was thrown later. for example, when the chain is created # successfully, but the add/remove rule fails. class IpTablesManager: - def __init__(self, module, ip_version, check_mode, chain): + def __init__(self, module): self.module = module - self.ip_version = ip_version - self.check_mode = check_mode - self.chain = chain + self.ip_version = module.params['ip_version'] + self.check_mode = module.check_mode + self.chain = module.params['chain'] + self.create_jump_rule = module.params['create_jump_rule'] + self.jump_rule_chain = module.params['jump_rule_chain'] self.cmd = self.gen_cmd() self.save_cmd = self.gen_save_cmd() self.output = [] @@ -70,13 +72,16 @@ class IpTablesManager: msg="Failed to save iptables rules", cmd=e.cmd, exit_code=e.returncode, output=e.output) + def verify_chain(self): + if not self.chain_exists(): + self.create_chain() + if self.create_jump_rule and not self.jump_rule_exists(): + self.create_jump() + def add_rule(self, port, proto): rule = self.gen_rule(port, proto) if not self.rule_exists(rule): - if not self.chain_exists(): - self.create_chain() - if not self.jump_rule_exists(): - self.create_jump_rule() + self.verify_chain() if self.check_mode: self.changed = True @@ -121,13 +126,13 @@ class IpTablesManager: return [self.chain, '-p', proto, '-m', 'state', '--state', 'NEW', '-m', proto, '--dport', str(port), '-j', 'ACCEPT'] - def create_jump_rule(self): + def create_jump(self): if self.check_mode: self.changed = True self.output.append("Create jump rule for chain %s" % self.chain) else: try: - cmd = self.cmd + ['-L', 'INPUT', '--line-numbers'] + cmd = self.cmd + ['-L', self.jump_rule_chain, '--line-numbers'] output = check_output(cmd, stderr=subprocess.STDOUT) # break the input rules into rows and columns @@ -144,11 +149,11 @@ class IpTablesManager: continue last_rule_target = rule[1] - # Raise an exception if we do not find a valid INPUT rule + # Raise an exception if we do not find a valid rule if not last_rule_num or not last_rule_target: raise IpTablesCreateJumpRuleError( chain=self.chain, - msg="Failed to find existing INPUT rules", + msg="Failed to find existing %s rules" % self.jump_rule_chain, cmd=None, exit_code=None, output=None) # Naively assume that if the last row is a REJECT rule, then @@ -156,19 +161,20 @@ class IpTablesManager: # assume that we can just append the rule. if last_rule_target == 'REJECT': # insert rule - cmd = self.cmd + ['-I', 'INPUT', str(last_rule_num)] + cmd = self.cmd + ['-I', self.jump_rule_chain, str(last_rule_num)] else: # append rule - cmd = self.cmd + ['-A', 'INPUT'] + cmd = self.cmd + ['-A', self.jump_rule_chain] cmd += ['-j', self.chain] output = check_output(cmd, stderr=subprocess.STDOUT) changed = True self.output.append(output) + self.save() except subprocess.CalledProcessError as e: if '--line-numbers' in e.cmd: raise IpTablesCreateJumpRuleError( chain=self.chain, - msg="Failed to query existing INPUT rules to " + msg="Failed to query existing %s rules to " % self.jump_rule_chain + "determine jump rule location", cmd=e.cmd, exit_code=e.returncode, output=e.output) @@ -192,6 +198,7 @@ class IpTablesManager: self.changed = True self.output.append("Successfully created chain %s" % self.chain) + self.save() except subprocess.CalledProcessError as e: raise IpTablesCreateChainError( chain=self.chain, @@ -200,7 +207,7 @@ class IpTablesManager: ) def jump_rule_exists(self): - cmd = self.cmd + ['-C', 'INPUT', '-j', self.chain] + cmd = self.cmd + ['-C', self.jump_rule_chain, '-j', self.chain] return True if subprocess.call(cmd) == 0 else False def chain_exists(self): @@ -220,9 +227,12 @@ def main(): module = AnsibleModule( argument_spec=dict( name=dict(required=True), - action=dict(required=True, choices=['add', 'remove']), - protocol=dict(required=True, choices=['tcp', 'udp']), - port=dict(required=True, type='int'), + action=dict(required=True, choices=['add', 'remove', 'verify_chain']), + chain=dict(required=False, default='OS_FIREWALL_ALLOW'), + create_jump_rule=dict(required=False, type='bool', default=True), + jump_rule_chain=dict(required=False, default='INPUT'), + protocol=dict(required=False, choices=['tcp', 'udp']), + port=dict(required=False, type='int'), ip_version=dict(required=False, default='ipv4', choices=['ipv4', 'ipv6']), ), @@ -232,16 +242,24 @@ def main(): action = module.params['action'] protocol = module.params['protocol'] port = module.params['port'] - ip_version = module.params['ip_version'] - chain = 'OS_FIREWALL_ALLOW' - iptables_manager = IpTablesManager(module, ip_version, module.check_mode, chain) + if action in ['add', 'remove']: + if not protocol: + error = "protocol is required when action is %s" % action + module.fail_json(msg=error) + if not port: + error = "port is required when action is %s" % action + module.fail_json(msg=error) + + iptables_manager = IpTablesManager(module) try: if action == 'add': iptables_manager.add_rule(port, protocol) elif action == 'remove': iptables_manager.remove_rule(port, protocol) + elif action == 'verify_chain': + iptables_manager.verify_chain() except IpTablesError as e: module.fail_json(msg=e.msg) diff --git a/roles/os_firewall/tasks/firewall/iptables.yml b/roles/os_firewall/tasks/firewall/iptables.yml index 3d46d6e2d..72a3401cf 100644 --- a/roles/os_firewall/tasks/firewall/iptables.yml +++ b/roles/os_firewall/tasks/firewall/iptables.yml @@ -41,19 +41,13 @@ changed_when: "'firewalld' in result.stdout" when: pkg_check.rc == 0 -- name: Check for DOCKER chain - shell: iptables -L |grep '^Chain DOCKER' - ignore_errors: yes - register: check_for_chain - -- name: Create DOCKER chain - command: iptables -N DOCKER - register: create_chain - when: check_for_chain.rc != 0 - -- name: Persist DOCKER chain - command: service iptables save - when: create_chain.rc == 0 +# Workaround for Docker 1.4 to create DOCKER chain +- name: Add DOCKER chain + os_firewall_manage_iptables: + name: "DOCKER chain" + action: verify_chain + create_jump_rule: no +# End of Docker 1.4 workaround - name: Add iptables allow rules os_firewall_manage_iptables: -- cgit v1.2.3 From 2147b1608140f2688ac9781b394824c04e55d07e Mon Sep 17 00:00:00 2001 From: Jhon Honce Date: Fri, 20 Mar 2015 09:31:05 -0700 Subject: * Updates from code reviews --- bin/cluster | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/bin/cluster b/bin/cluster index ad6e74577..ce17ee6d7 100755 --- a/bin/cluster +++ b/bin/cluster @@ -42,6 +42,7 @@ class Cluster(object): inventory = '-i inventory/aws/ec2.py' else: + # this code should never be reached assert False, "invalid PROVIDER {}".format(self.args.provider) env = {'cluster_id': self.args.cluster_id} @@ -55,11 +56,12 @@ class Cluster(object): playbook = "playbooks/{}/openshift-cluster/terminate.yml".format(self.args.provider) elif 'list' == self.args.action: # todo: implement cluster list - argparse.ArgumentError("ACTION {} not implemented".format(self.args.action)) + raise argparse.ArgumentError("ACTION {} not implemented".format(self.args.action)) elif 'update' == self.args.action: # todo: implement cluster update - argparse.ArgumentError("ACTION {} not implemented".format(self.args.action)) + raise argparse.ArgumentError("ACTION {} not implemented".format(self.args.action)) else: + # this code should never be reached assert False, "invalid ACTION {}".format(self.args.action) verbose = '' @@ -81,13 +83,14 @@ class Cluster(object): sys.stderr.write('RUN [{}]\n'.format(command)) sys.stderr.flush() - os.system(command) + error = os.system(command) + if error != 0: + raise Exception("Ansible run failed with exit code %d".format(error)) + if __name__ == '__main__': parser = argparse.ArgumentParser(description='Manage OpenShift Cluster') - parser.add_argument('-p', '--provider', default='gce', choices=['gce', 'aws'], - help='One of the supported cloud providers') parser.add_argument('-m', '--masters', default=1, type=int, help='number of masters to create in cluster') parser.add_argument('-n', '--nodes', default=2, type=int, help='number of nodes to create in cluster') parser.add_argument('-v', '--verbose', action='count', help='Multiple -v options increase the verbosity') -- cgit v1.2.3 From 14b19e665b118349327a5c8c219cc49c96ae1d52 Mon Sep 17 00:00:00 2001 From: Jhon Honce Date: Fri, 20 Mar 2015 09:36:34 -0700 Subject: * Replace asserts with raises --- bin/cluster | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bin/cluster b/bin/cluster index ce17ee6d7..af908155c 100755 --- a/bin/cluster +++ b/bin/cluster @@ -43,7 +43,7 @@ class Cluster(object): inventory = '-i inventory/aws/ec2.py' else: # this code should never be reached - assert False, "invalid PROVIDER {}".format(self.args.provider) + raise argparse.ArgumentError("invalid PROVIDER {}".format(self.args.provider)) env = {'cluster_id': self.args.cluster_id} @@ -62,7 +62,7 @@ class Cluster(object): raise argparse.ArgumentError("ACTION {} not implemented".format(self.args.action)) else: # this code should never be reached - assert False, "invalid ACTION {}".format(self.args.action) + raise argparse.ArgumentError("invalid ACTION {}".format(self.args.action)) verbose = '' if self.args.verbose > 0: -- cgit v1.2.3 From 8f35aff7245246de4116fcf3c81e7f095cf1be3a Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Sun, 22 Mar 2015 22:11:22 -0400 Subject: Add new role os_env_extras_node that is a subset of the docker role - Does not install or start docker, since the openshift-node role will handle that for us - Only add root to the dockerroot group and configures the enter-container script. --- playbooks/aws/openshift-node/config.yml | 2 +- roles/os_env_extras_node/README.md | 38 +++++++ roles/os_env_extras_node/files/enter-container.sh | 13 +++ roles/os_env_extras_node/meta/main.yml | 124 ++++++++++++++++++++++ roles/os_env_extras_node/tasks/main.yml | 7 ++ 5 files changed, 183 insertions(+), 1 deletion(-) create mode 100644 roles/os_env_extras_node/README.md create mode 100755 roles/os_env_extras_node/files/enter-container.sh create mode 100644 roles/os_env_extras_node/meta/main.yml create mode 100644 roles/os_env_extras_node/tasks/main.yml diff --git a/playbooks/aws/openshift-node/config.yml b/playbooks/aws/openshift-node/config.yml index 822b66464..3cf2c58b2 100644 --- a/playbooks/aws/openshift-node/config.yml +++ b/playbooks/aws/openshift-node/config.yml @@ -44,5 +44,5 @@ openshift_env: "{{ oo_env }}", openshift_public_ip: "{{ ec2_ip_address }}" } - - docker - os_env_extras + - os_env_extras_node diff --git a/roles/os_env_extras_node/README.md b/roles/os_env_extras_node/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/roles/os_env_extras_node/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/roles/os_env_extras_node/files/enter-container.sh b/roles/os_env_extras_node/files/enter-container.sh new file mode 100755 index 000000000..7cf5b8d83 --- /dev/null +++ b/roles/os_env_extras_node/files/enter-container.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +if [ $# -ne 1 ] +then + echo + echo "Usage: $(basename $0) " + echo + exit 1 +fi + +PID=$(docker inspect --format '{{.State.Pid}}' $1) + +nsenter --target $PID --mount --uts --ipc --net --pid diff --git a/roles/os_env_extras_node/meta/main.yml b/roles/os_env_extras_node/meta/main.yml new file mode 100644 index 000000000..c5c362c60 --- /dev/null +++ b/roles/os_env_extras_node/meta/main.yml @@ -0,0 +1,124 @@ +--- +galaxy_info: + author: your name + description: + company: your company (optional) + # Some suggested licenses: + # - BSD (default) + # - MIT + # - GPLv2 + # - GPLv3 + # - Apache + # - CC-BY + license: license (GPLv2, CC-BY, etc) + min_ansible_version: 1.2 + # + # Below are all platforms currently available. Just uncomment + # the ones that apply to your role. If you don't see your + # platform on this list, let us know and we'll get it added! + # + #platforms: + #- name: EL + # versions: + # - all + # - 5 + # - 6 + # - 7 + #- name: GenericUNIX + # versions: + # - all + # - any + #- name: Fedora + # versions: + # - all + # - 16 + # - 17 + # - 18 + # - 19 + # - 20 + #- name: opensuse + # versions: + # - all + # - 12.1 + # - 12.2 + # - 12.3 + # - 13.1 + # - 13.2 + #- name: Amazon + # versions: + # - all + # - 2013.03 + # - 2013.09 + #- name: GenericBSD + # versions: + # - all + # - any + #- name: FreeBSD + # versions: + # - all + # - 8.0 + # - 8.1 + # - 8.2 + # - 8.3 + # - 8.4 + # - 9.0 + # - 9.1 + # - 9.1 + # - 9.2 + #- name: Ubuntu + # versions: + # - all + # - lucid + # - maverick + # - natty + # - oneiric + # - precise + # - quantal + # - raring + # - saucy + # - trusty + #- name: SLES + # versions: + # - all + # - 10SP3 + # - 10SP4 + # - 11 + # - 11SP1 + # - 11SP2 + # - 11SP3 + #- name: GenericLinux + # versions: + # - all + # - any + #- name: Debian + # versions: + # - all + # - etch + # - lenny + # - squeeze + # - wheezy + # + # Below are all categories currently available. Just as with + # the platforms above, uncomment those that apply to your role. + # + #categories: + #- cloud + #- cloud:ec2 + #- cloud:gce + #- cloud:rax + #- clustering + #- database + #- database:nosql + #- database:sql + #- development + #- monitoring + #- networking + #- packaging + #- system + #- web +dependencies: [] + # List your role dependencies here, one per line. Only + # dependencies available via galaxy should be listed here. + # Be sure to remove the '[]' above if you add dependencies + # to this list. + diff --git a/roles/os_env_extras_node/tasks/main.yml b/roles/os_env_extras_node/tasks/main.yml new file mode 100644 index 000000000..065f71f74 --- /dev/null +++ b/roles/os_env_extras_node/tasks/main.yml @@ -0,0 +1,7 @@ +--- +- copy: src=enter-container.sh dest=/usr/local/bin/enter-container.sh mode=0755 + +# From the origin rpm there exists instructions on how to +# setup origin properly. The following steps come from there +- name: Change root to be in the Docker group + user: name=root groups=dockerroot append=yes -- cgit v1.2.3 From 70c5a715debc1c1a900c6dcfe178b36b2a014ab4 Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Sun, 22 Mar 2015 22:14:17 -0400 Subject: Use docker as package name instead of docker-io --- roles/docker/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml index ca700db17..593c4c877 100644 --- a/roles/docker/tasks/main.yml +++ b/roles/docker/tasks/main.yml @@ -1,7 +1,7 @@ --- # tasks file for docker - name: Install docker - yum: pkg=docker-io + yum: pkg=docker - name: enable and start the docker service service: name=docker enabled=yes state=started -- cgit v1.2.3 From 8b68846806d5294b5f43d14772d59aa2b8cf5e73 Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Sun, 22 Mar 2015 22:43:00 -0400 Subject: remove os_firewall creation of DOCKER chain --- roles/os_firewall/tasks/firewall/iptables.yml | 8 -------- 1 file changed, 8 deletions(-) diff --git a/roles/os_firewall/tasks/firewall/iptables.yml b/roles/os_firewall/tasks/firewall/iptables.yml index 72a3401cf..87e77c083 100644 --- a/roles/os_firewall/tasks/firewall/iptables.yml +++ b/roles/os_firewall/tasks/firewall/iptables.yml @@ -41,14 +41,6 @@ changed_when: "'firewalld' in result.stdout" when: pkg_check.rc == 0 -# Workaround for Docker 1.4 to create DOCKER chain -- name: Add DOCKER chain - os_firewall_manage_iptables: - name: "DOCKER chain" - action: verify_chain - create_jump_rule: no -# End of Docker 1.4 workaround - - name: Add iptables allow rules os_firewall_manage_iptables: name: "{{ item.service }}" -- cgit v1.2.3 From 557cc0ca9ecc22a9d90f9cf9ce549186fe286492 Mon Sep 17 00:00:00 2001 From: Jhon Honce Date: Mon, 23 Mar 2015 09:15:08 -0700 Subject: * Updates from code reviews --- bin/cluster | 14 +++++++++++--- playbooks/gce/openshift-cluster/launch_instances.yml | 2 +- playbooks/gce/openshift-master/terminate.yml | 1 + playbooks/gce/openshift-node/terminate.yml | 1 + 4 files changed, 14 insertions(+), 4 deletions(-) diff --git a/bin/cluster b/bin/cluster index af908155c..823f50671 100755 --- a/bin/cluster +++ b/bin/cluster @@ -83,9 +83,10 @@ class Cluster(object): sys.stderr.write('RUN [{}]\n'.format(command)) sys.stderr.flush() - error = os.system(command) - if error != 0: - raise Exception("Ansible run failed with exit code %d".format(error)) + status = os.system(command) + if status != 0: + sys.stderr.write("RUN [{}] failed with exit status %d".format(command, status)) + exit(status) @@ -100,4 +101,11 @@ if __name__ == '__main__': parser.add_argument('cluster_id', help='prefix for cluster VM names') args = parser.parse_args() + if 'terminate' == args.action: + sys.stderr.write("This will terminate the ENTIRE {} environment. Are you sure? [y/N] ".format(args.cluster_id)) + sys.stderr.flush() + answer = sys.stdin.read(1) + if answer not in ['y', 'Y']: + exit(0) + Cluster(args).apply() diff --git a/playbooks/gce/openshift-cluster/launch_instances.yml b/playbooks/gce/openshift-cluster/launch_instances.yml index 443e763de..20e31d990 100644 --- a/playbooks/gce/openshift-cluster/launch_instances.yml +++ b/playbooks/gce/openshift-cluster/launch_instances.yml @@ -12,7 +12,7 @@ pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}" project_id: "{{ lookup('env', 'gce_project_id') }}" tags: - - "created-by-{{ cluster }}" + - "created-by-{{ lookup('env', 'LOGNAME') |default(cluster, true) }}" - "env-{{ cluster }}" - "host-type-{{ type }}" - "env-host-type-{{ cluster }}-openshift-{{ type }}" diff --git a/playbooks/gce/openshift-master/terminate.yml b/playbooks/gce/openshift-master/terminate.yml index f1345874a..9e027cf41 100644 --- a/playbooks/gce/openshift-master/terminate.yml +++ b/playbooks/gce/openshift-master/terminate.yml @@ -15,6 +15,7 @@ - name: Terminate master instances hosts: localhost connection: local + gather_facts: no tasks: - name: Terminate master instances gce: diff --git a/playbooks/gce/openshift-node/terminate.yml b/playbooks/gce/openshift-node/terminate.yml index d4555084b..9aa8a48c1 100644 --- a/playbooks/gce/openshift-node/terminate.yml +++ b/playbooks/gce/openshift-node/terminate.yml @@ -15,6 +15,7 @@ - name: Terminate node instances hosts: localhost connection: local + gather_facts: no tasks: - name: Terminate node instances gce: -- cgit v1.2.3 From 461f6c1e07f36238729944a5f769600077ebf0b0 Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Wed, 18 Mar 2015 17:15:19 -0400 Subject: Rename repos role to openshift_repos - Rename repos role to openshift_repos - Make openshift_repos a dependency of openshift_common - Add README and metadata for openshift_repos - Playbook updates for role rename - Verify libselinux-python is installed, otherwise some of the bulit-in modules we use fail --- playbooks/gce/openshift-master/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/playbooks/gce/openshift-master/config.yml b/playbooks/gce/openshift-master/config.yml index 812dcb91b..e405e2fb4 100644 --- a/playbooks/gce/openshift-master/config.yml +++ b/playbooks/gce/openshift-master/config.yml @@ -7,7 +7,7 @@ with_items: "{{ oo_host_group_exp | default('') }}" when: oo_host_group_exp is defined -- name: "Gather facts for nodes in {{ oo_env }}" +- name: Gather facts for nodes in {{ oo_env }} hosts: "tag_env-host-type-{{ oo_env }}-openshift-node" - name: "Configure instances" -- cgit v1.2.3 From d67c5b8f79609d2d3b07cc009f58e3dc988782c5 Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Mon, 23 Mar 2015 16:30:49 -0400 Subject: node registration changes - Remove default value for openshift_hostname and make it required - Remove workarounds that are no longer needed - Remove resources parameter from openshift_register_node module - pre-create node certificates for each node before registering node - distribute created node certificates to each node - Move node registration logic to a new openshift_register_nodes role - This is because we now have to run the steps on a master as opposed to on the nodes like we were previously doing. - Rename openshift_register_node module to kubernetes_register_node, one more step to genericizing enough for upstreaming, however there are still plenty of openshift specific commands that still need to be genericized. --- roles/openshift_common/README.md | 2 +- roles/openshift_common/defaults/main.yml | 2 +- roles/openshift_master/README.md | 2 +- roles/openshift_master/tasks/main.yml | 35 +- roles/openshift_node/README.md | 3 +- roles/openshift_node/defaults/main.yml | 6 - .../library/openshift_register_node.py | 390 --------------------- roles/openshift_node/tasks/main.yml | 68 +--- roles/openshift_register_nodes/README.md | 38 ++ roles/openshift_register_nodes/defaults/main.yml | 5 + .../library/kubernetes_register_node.py | 370 +++++++++++++++++++ roles/openshift_register_nodes/meta/main.yml | 128 +++++++ roles/openshift_register_nodes/tasks/main.yml | 71 ++++ roles/openshift_sdn_node/README.md | 2 +- 14 files changed, 641 insertions(+), 481 deletions(-) delete mode 100644 roles/openshift_node/library/openshift_register_node.py create mode 100644 roles/openshift_register_nodes/README.md create mode 100644 roles/openshift_register_nodes/defaults/main.yml create mode 100644 roles/openshift_register_nodes/library/kubernetes_register_node.py create mode 100644 roles/openshift_register_nodes/meta/main.yml create mode 100644 roles/openshift_register_nodes/tasks/main.yml diff --git a/roles/openshift_common/README.md b/roles/openshift_common/README.md index fce79047c..592a276f9 100644 --- a/roles/openshift_common/README.md +++ b/roles/openshift_common/README.md @@ -16,7 +16,7 @@ Role Variables |-------------------------------|------------------------------|----------------------------------------| | openshift_debug_level | 0 | Global openshift debug log verbosity | | openshift_hostname_workaround | True | Workaround needed to set hostname to IP address | -| openshift_hostname | openshift_public_ip if openshift_hostname_workaround else ansible_fqdn | hostname to use for this instance | +| openshift_hostname | UNDEF (Required) | hostname to use for this instance | | openshift_public_ip | UNDEF (Required) | Public IP address to use for this host | | openshift_env | default | Envrionment name if multiple OpenShift instances | diff --git a/roles/openshift_common/defaults/main.yml b/roles/openshift_common/defaults/main.yml index eb6edbc03..86351f6f6 100644 --- a/roles/openshift_common/defaults/main.yml +++ b/roles/openshift_common/defaults/main.yml @@ -4,4 +4,4 @@ openshift_debug_level: 0 # TODO: Once openshift stops resolving hostnames for node queries remove # this... openshift_hostname_workaround: true -openshift_hostname: "{{ ansible_default_ipv4.address if openshift_hostname_workaround else ansible_fqdn }}" + diff --git a/roles/openshift_master/README.md b/roles/openshift_master/README.md index 5a1b889b2..2f03b4990 100644 --- a/roles/openshift_master/README.md +++ b/roles/openshift_master/README.md @@ -27,7 +27,7 @@ From openshift_common: | openshift_debug_level | 0 | Global openshift debug log verbosity | | openshift_hostname_workaround | True | | | openshift_public_ip | UNDEF (Required) | Public IP address to use for this host | -| openshift_hostname | openshift_public_ip if openshift_hostname_workaround else ansible_fqdn | hostname to use for this instance | +| openshift_hostname | UNDEF (Required) | hostname to use for this instance | Dependencies ------------ diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index d5f4776dc..52f5f694c 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -1,4 +1,8 @@ --- +# TODO: allow for overriding default ports where possible +# TODO: if setting up multiple masters, will need to predistribute the certs +# to the additional masters before starting openshift-master + - name: Install OpenShift Master package yum: pkg=openshift-master state=installed @@ -6,9 +10,7 @@ lineinfile: dest: /etc/sysconfig/openshift-master regexp: '^OPTIONS=' - line: "OPTIONS=\"--public-master={{ openshift_hostname }} {% if - openshift_node_ips %} --nodes={{ openshift_node_ips - | join(',') }} {% endif %} --loglevel={{ openshift_master_debug_level }}\"" + line: "OPTIONS=\"--public-master={{ openshift_hostname }} {% if openshift_node_ips %} --nodes={{ openshift_node_ips | join(',') }} {% endif %} --loglevel={{ openshift_master_debug_level }}\"" notify: - restart openshift-master @@ -34,42 +36,15 @@ option: externally_managed value: "{{ openshift_master_manage_service_externally }}" -# TODO: remove this when origin PR #1298 has landed in OSE -- name: Workaround for openshift-master taking longer than 90 seconds to issue sdNotify signal - command: cp /usr/lib/systemd/system/openshift-master.service /etc/systemd/system/ - args: - creates: /etc/systemd/system/openshift-master.service -- ini_file: - dest: /etc/systemd/system/openshift-master.service - option: TimeoutStartSec - section: Service - value: 300 - state: present - register: result -- command: systemctl daemon-reload - when: result | changed -# End of workaround pending PR #1298 - - name: Start and enable openshift-master service: name=openshift-master enabled=yes state=started when: not openshift_master_manage_service_externally register: result -#TODO: remove this when origin PR #1204 has landed in OSE -- name: need to pause here, otherwise we attempt to copy certificates generated by the master before they are generated - pause: seconds=30 - when: result | changed -# End of workaround pending PR #1204 - - name: Disable openshift-master if openshift-master is managed externally service: name=openshift-master enabled=false when: openshift_master_manage_service_externally -# TODO: create an os_vars role that has generic env related config and move -# the root kubeconfig setting there, cannot use dependencies to force ordering -# with openshift_node and openshift_master because the way conditional -# dependencies work with current ansible would also exclude the -# openshift_common dependency. - name: Create .kube directory file: path: /root/.kube diff --git a/roles/openshift_node/README.md b/roles/openshift_node/README.md index 9210bab16..d537a35a5 100644 --- a/roles/openshift_node/README.md +++ b/roles/openshift_node/README.md @@ -21,7 +21,6 @@ From this role: | openshift_master_public_ips | UNDEF (Required) | List of the public IPs for the openhift-master hosts | | openshift_master_ips | UNDEF (Required) | List of IP addresses for the openshift-master hosts to be used for node -> master communication | | openshift_registry_url | UNDEF (Optional) | Default docker registry to use | -| openshift_node_resources | { capacity: { cpu: , memory: } } | Resource specification for this node, cpu is the number of CPUs to advertise and memory is the amount of memory in bytes to advertise. Default values chosen when not set are the number of logical CPUs for the host and 75% of total system memory | From openshift_common: | Name | Default Value | | @@ -29,7 +28,7 @@ From openshift_common: | openshift_debug_level | 0 | Global openshift debug log verbosity | | openshift_hostname_workaround | True | | | openshift_public_ip | UNDEF (Required) | Public IP address to use for this host | -| openshift_hostname | openshift_public_ip if openshift_hostname_workaround else ansible_fqdn | hostname to use for this instance | +| openshift_hostname | UNDEF (Required) | hostname to use for this instance | Dependencies ------------ diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml index e4d5ebfee..6dc73a96e 100644 --- a/roles/openshift_node/defaults/main.yml +++ b/roles/openshift_node/defaults/main.yml @@ -4,9 +4,3 @@ openshift_node_debug_level: "{{ openshift_debug_level | default(0) }}" os_firewall_allow: - service: OpenShift kubelet port: 10250/tcp -openshift_node_resources: - cpu: - memory: - cidr: -openshift_node_labels: {} -openshift_node_annotations: {} diff --git a/roles/openshift_node/library/openshift_register_node.py b/roles/openshift_node/library/openshift_register_node.py deleted file mode 100644 index 4922585d7..000000000 --- a/roles/openshift_node/library/openshift_register_node.py +++ /dev/null @@ -1,390 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# vim: expandtab:tabstop=4:shiftwidth=4 - -import os -import multiprocessing -import socket -from subprocess import check_output, Popen -from decimal import * - -DOCUMENTATION = ''' ---- -module: kubernetes_register_node -short_description: Registers a kubernetes node with a master -description: - - Registers a kubernetes node with a master -options: - name: - default: null - description: - - Identifier for this node (usually the node fqdn). - required: true - api_verison: - choices: ['v1beta1', 'v1beta3'] - default: 'v1beta1' - description: - - Kubernetes API version to use - required: true - host_ip: - default: null - description: - - IP Address to associate with the node when registering. - Available in the following API versions: v1beta1. - required: false - hostnames: - default: [] - description: - - Valid hostnames for this node. Available in the following API - versions: v1beta3. - required: false - external_ips: - default: [] - description: - - External IP Addresses for this node. Available in the following API - versions: v1beta3. - required: false - internal_ips: - default: [] - description: - - Internal IP Addresses for this node. Available in the following API - versions: v1beta3. - required: false - cpu: - default: null - description: - - Number of CPUs to allocate for this node. If not provided, then - the node will be registered to advertise the number of logical - CPUs available. When using the v1beta1 API, you must specify the - CPU count as a floating point number with no more than 3 decimal - places. API version v1beta3 and newer accepts arbitrary float - values. - required: false - memory: - default: null - description: - - Memory available for this node. If not provided, then the node - will be registered to advertise 80% of MemTotal as available - memory. When using the v1beta1 API, you must specify the memory - size in bytes. API version v1beta3 and newer accepts binary SI - and decimal SI values. - required: false -''' -EXAMPLES = ''' -# Minimal node registration -- openshift_register_node: name=ose3.node.example.com - -# Node registration using the v1beta1 API and assigning 1 CPU core and 10 GB of -# Memory -- openshift_register_node: - name: ose3.node.example.com - api_version: v1beta1 - hostIP: 192.168.1.1 - cpu: 1 - memory: 500000000 - -# Node registration using the v1beta3 API, setting an alternate hostname, -# internalIP, externalIP and assigning 3.5 CPU cores and 1 TiB of Memory -- openshift_register_node: - name: ose3.node.example.com - api_version: v1beta3 - external_ips: ['192.168.1.5'] - internal_ips: ['10.0.0.5'] - hostnames: ['ose2.node.internal.local'] - cpu: 3.5 - memory: 1Ti -''' - - -class ClientConfigException(Exception): - pass - -class ClientConfig: - def __init__(self, client_opts, module): - _, output, error = module.run_command(["/usr/bin/openshift", "ex", - "config", "view", "-o", - "json"] + client_opts, - check_rc = True) - self.config = json.loads(output) - - if not (bool(self.config['clusters']) or - bool(self.config['contexts']) or - bool(self.config['current-context']) or - bool(self.config['users'])): - raise ClientConfigException(msg="Client config missing required " \ - "values", - output=output) - - def current_context(self): - return self.config['current-context'] - - def section_has_value(self, section_name, value): - section = self.config[section_name] - if isinstance(section, dict): - return value in section - else: - val = next((item for item in section - if item['name'] == value), None) - return val is not None - - def has_context(self, context): - return self.section_has_value('contexts', context) - - def has_user(self, user): - return self.section_has_value('users', user) - - def has_cluster(self, cluster): - return self.section_has_value('clusters', cluster) - - def get_value_for_context(self, context, attribute): - contexts = self.config['contexts'] - if isinstance(contexts, dict): - return contexts[context][attribute] - else: - return next((c['context'][attribute] for c in contexts - if c['name'] == context), None) - - def get_user_for_context(self, context): - return self.get_value_for_context(context, 'user') - - def get_cluster_for_context(self, context): - return self.get_value_for_context(context, 'cluster') - -class Util: - @staticmethod - def getLogicalCores(): - return multiprocessing.cpu_count() - - @staticmethod - def getMemoryPct(pct): - with open('/proc/meminfo', 'r') as mem: - for line in mem: - entries = line.split() - if str(entries.pop(0)) == 'MemTotal:': - mem_total_kb = Decimal(entries.pop(0)) - mem_capacity_kb = mem_total_kb * Decimal(pct) - return str(mem_capacity_kb.to_integral_value() * 1024) - - return "" - - @staticmethod - def remove_empty_elements(mapping): - if isinstance(mapping, dict): - m = mapping.copy() - for key, val in mapping.iteritems(): - if not val: - del m[key] - return m - else: - return mapping - -class NodeResources: - def __init__(self, version, cpu=None, memory=None): - if version == 'v1beta1': - self.resources = dict(capacity=dict()) - self.resources['capacity']['cpu'] = cpu if cpu else Util.getLogicalCores() - self.resources['capacity']['memory'] = memory if cpu else Util.getMemoryPct(.75) - - def get_resources(self): - return Util.remove_empty_elements(self.resources) - -class NodeSpec: - def __init__(self, version, cpu=None, memory=None, cidr=None, externalID=None): - if version == 'v1beta3': - self.spec = dict(podCIDR=cidr, externalID=externalID, - capacity=dict()) - self.spec['capacity']['cpu'] = cpu if cpu else Util.getLogicalCores() - self.spec['capacity']['memory'] = memory if memory else Util.getMemoryPct(.75) - - def get_spec(self): - return Util.remove_empty_elements(self.spec) - -class NodeStatus: - def addAddresses(self, addressType, addresses): - addressList = [] - for address in addresses: - addressList.append(dict(type=addressType, address=address)) - return addressList - - def __init__(self, version, externalIPs = [], internalIPs = [], - hostnames = []): - if version == 'v1beta3': - self.status = dict(addresses = addAddresses('ExternalIP', - externalIPs) + - addAddresses('InternalIP', - internalIPs) + - addAddresses('Hostname', - hostnames)) - - def get_status(self): - return Util.remove_empty_elements(self.status) - -class Node: - def __init__(self, module, client_opts, version='v1beta1', name=None, - hostIP = None, hostnames=[], externalIPs=[], internalIPs=[], - cpu=None, memory=None, labels=dict(), annotations=dict(), - podCIDR=None, externalID=None): - self.module = module - self.client_opts = client_opts - if version == 'v1beta1': - self.node = dict(id = name, - kind = 'Node', - apiVersion = version, - hostIP = hostIP, - resources = NodeResources(version, cpu, memory), - cidr = podCIDR, - labels = labels, - annotations = annotations - ) - elif version == 'v1beta3': - metadata = dict(name = name, - labels = labels, - annotations = annotations - ) - self.node = dict(kind = 'Node', - apiVersion = version, - metadata = metadata, - spec = NodeSpec(version, cpu, memory, podCIDR, - externalID), - status = NodeStatus(version, externalIPs, - internalIPs, hostnames), - ) - - def get_name(self): - if self.node['apiVersion'] == 'v1beta1': - return self.node['id'] - elif self.node['apiVersion'] == 'v1beta3': - return self.node['name'] - - def get_node(self): - node = self.node.copy() - if self.node['apiVersion'] == 'v1beta1': - node['resources'] = self.node['resources'].get_resources() - elif self.node['apiVersion'] == 'v1beta3': - node['spec'] = self.node['spec'].get_spec() - node['status'] = self.node['status'].get_status() - return Util.remove_empty_elements(node) - - def exists(self): - _, output, error = self.module.run_command(["/usr/bin/osc", "get", - "nodes"] + self.client_opts, - check_rc = True) - if re.search(self.module.params['name'], output, re.MULTILINE): - return True - return False - - def create(self): - cmd = ['/usr/bin/osc'] + self.client_opts + ['create', 'node', '-f', '-'] - rc, output, error = self.module.run_command(cmd, - data=self.module.jsonify(self.get_node())) - if rc != 0: - if re.search("minion \"%s\" already exists" % self.get_name(), - error): - self.module.exit_json(changed=False, - msg="node definition already exists", - node=self.get_node()) - else: - self.module.fail_json(msg="Node creation failed.", rc=rc, - output=output, error=error, - node=self.get_node()) - else: - return True - -def main(): - module = AnsibleModule( - argument_spec = dict( - name = dict(required = True, type = 'str'), - host_ip = dict(type = 'str'), - hostnames = dict(type = 'list', default = []), - external_ips = dict(type = 'list', default = []), - internal_ips = dict(type = 'list', default = []), - api_version = dict(type = 'str', default = 'v1beta1', # TODO: after kube rebase, we can default to v1beta3 - choices = ['v1beta1', 'v1beta3']), - cpu = dict(type = 'str'), - memory = dict(type = 'str'), - labels = dict(type = 'dict', default = {}), # TODO: needs documented - annotations = dict(type = 'dict', default = {}), # TODO: needs documented - pod_cidr = dict(type = 'str'), # TODO: needs documented - external_id = dict(type = 'str'), # TODO: needs documented - client_config = dict(type = 'str'), # TODO: needs documented - client_cluster = dict(type = 'str', default = 'master'), # TODO: needs documented - client_context = dict(type = 'str', default = 'master'), # TODO: needs documented - client_user = dict(type = 'str', default = 'admin') # TODO: needs documented - ), - mutually_exclusive = [ - ['host_ip', 'external_ips'], - ['host_ip', 'internal_ips'], - ['host_ip', 'hostnames'], - ], - supports_check_mode=True - ) - - user_has_client_config = os.path.exists(os.path.expanduser('~/.kube/.kubeconfig')) - if not (user_has_client_config or module.params['client_config']): - module.fail_json(msg="Could not locate client configuration, " - "client_config must be specified if " - "~/.kube/.kubeconfig is not present") - - client_opts = [] - if module.params['client_config']: - client_opts.append("--kubeconfig=%s" % module.params['client_config']) - - try: - config = ClientConfig(client_opts, module) - except ClientConfigException as e: - module.fail_json(msg="Failed to get client configuration", exception=e) - - client_context = module.params['client_context'] - if config.has_context(client_context): - if client_context != config.current_context(): - client_opts.append("--context=%s" % client_context) - else: - module.fail_json(msg="Context %s not found in client config" % - client_context) - - client_user = module.params['client_user'] - if config.has_user(client_user): - if client_user != config.get_user_for_context(client_context): - client_opts.append("--user=%s" % client_user) - else: - module.fail_json(msg="User %s not found in client config" % - client_user) - - client_cluster = module.params['client_cluster'] - if config.has_cluster(client_cluster): - if client_cluster != config.get_cluster_for_context(client_cluster): - client_opts.append("--cluster=%s" % client_cluster) - else: - module.fail_json(msg="Cluster %s not found in client config" % - client_cluster) - - # TODO: provide sane defaults for some (like hostname, externalIP, - # internalIP, etc) - node = Node(module, client_opts, module.params['api_version'], - module.params['name'], module.params['host_ip'], - module.params['hostnames'], module.params['external_ips'], - module.params['internal_ips'], module.params['cpu'], - module.params['memory'], module.params['labels'], - module.params['annotations'], module.params['pod_cidr'], - module.params['external_id']) - - # TODO: attempt to support changing node settings where possible and/or - # modifying node resources - if node.exists(): - module.exit_json(changed=False, node=node.get_node()) - elif module.check_mode: - module.exit_json(changed=True, node=node.get_node()) - else: - if node.create(): - module.exit_json(changed=True, - msg="Node created successfully", - node=node.get_node()) - else: - module.fail_json(msg="Unknown error creating node", - node=node.get_node()) - - -# import module snippets -from ansible.module_utils.basic import * -if __name__ == '__main__': - main() diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index e380ba1fb..c039e3f05 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -1,27 +1,29 @@ --- +- name: Test if node certs and config exist + stat: path={{ item }} + failed_when: not result.stat.exists + register: result + with_items: + - "{{ cert_path }}" + - "{{ cert_path }}/cert.crt" + - "{{ cert_path }}/key.key" + - "{{ cert_path }}/.kubeconfig" + - "{{ cert_path }}/server.crt" + - "{{ cert_path }}/server.key" + - "{{ cert_parent_path }}/ca/cert.crt" + #- "{{ cert_path }}/node.yaml" + - name: Install OpenShift Node package yum: pkg=openshift-node state=installed -- local_action: command /usr/bin/mktemp -d /tmp/openshift-ansible-XXXXXXX - register: mktemp - -- name: Retrieve OpenShift Master credentials - local_action: command /usr/bin/rsync --compress --archive --rsh 'ssh -S none -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null' root@{{ openshift_master_public_ips[0] }}:/var/lib/openshift/openshift.local.certificates/admin/ {{ mktemp.stdout }} - ignore_errors: yes - -- file: path=/var/lib/openshift/openshift.local.certificates/admin state=directory - -- name: Store OpenShift Master credentials - local_action: command /usr/bin/rsync --compress --archive --rsh 'ssh -S none -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null' {{ mktemp.stdout }}/ root@{{ openshift_public_ip }}:/var/lib/openshift/openshift.local.certificates/admin - ignore_errors: yes - -- local_action: file name={{ mktemp.stdout }} state=absent - +# --create-certs=false is a temporary workaround until +# https://github.com/openshift/origin/pull/1361 is merged upstream and it is +# the default for nodes - name: Configure OpenShift Node settings lineinfile: dest: /etc/sysconfig/openshift-node regexp: '^OPTIONS=' - line: "OPTIONS=\"--master=https://{{ openshift_master_ips[0] }}:8443 --hostname={{ openshift_hostname }} --loglevel={{ openshift_node_debug_level }}\"" + line: "OPTIONS=\"--hostname={{ openshift_hostname }} --loglevel={{ openshift_node_debug_level }} --create-certs=false\"" notify: - restart openshift-node @@ -47,42 +49,10 @@ option: externally_managed value: "{{ openshift_node_manage_service_externally }}" -# fixme: Once the openshift_cluster playbook is published state should be started -# Always bounce service to pick up new credentials - name: Start and enable openshift-node - service: name=openshift-node enabled=yes state=restarted + service: name=openshift-node enabled=yes state=started when: not openshift_node_manage_service_externally - name: Disable openshift-node if openshift-node is managed externally service: name=openshift-node enabled=false when: openshift_node_manage_service_externally - -# TODO: create an os_vars role that has generic env related config and move -# the root kubeconfig setting there, cannot use dependencies to force ordering -# with openshift_node and openshift_master because the way conditional -# dependencies work with current ansible would also exclude the -# openshift_common dependency. -- name: Create .kube directory - file: - path: /root/.kube - state: directory - mode: 0700 -- name: Configure root user kubeconfig - command: cp /var/lib/openshift/openshift.local.certificates/admin/.kubeconfig /root/.kube/.kubeconfig - args: - creates: /root/.kube/.kubeconfig - -- name: Register node (if not already registered) - openshift_register_node: - name: "{{ openshift_hostname }}" - api_version: v1beta1 - cpu: "{{ openshift_node_resources.cpu }}" - memory: "{{ openshift_node_resources.memory }}" - pod_cidr: "{{ openshift_node_resources.cidr }}" - host_ip: "{{ ansible_default_ipv4.address }}" - labels: "{{ openshift_node_labels }}" - annotations: "{{ openshift_node_annotations }}" - # TODO: support customizing other attributes such as: client_config, - # client_cluster, client_context, client_user - # TODO: updated for v1beta3 changes after rebase: hostnames, external_ips, - # internal_ips, external_id diff --git a/roles/openshift_register_nodes/README.md b/roles/openshift_register_nodes/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/roles/openshift_register_nodes/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/roles/openshift_register_nodes/defaults/main.yml b/roles/openshift_register_nodes/defaults/main.yml new file mode 100644 index 000000000..3501e8922 --- /dev/null +++ b/roles/openshift_register_nodes/defaults/main.yml @@ -0,0 +1,5 @@ +--- +openshift_kube_api_version: v1beta1 +openshift_cert_dir: openshift.local.certificates +openshift_cert_dir_parent: /var/lib/openshift +openshift_cert_dir_abs: "{{ openshift_cert_dir_parent ~ '/' ~ openshift_cert_dir }}" diff --git a/roles/openshift_register_nodes/library/kubernetes_register_node.py b/roles/openshift_register_nodes/library/kubernetes_register_node.py new file mode 100644 index 000000000..409215616 --- /dev/null +++ b/roles/openshift_register_nodes/library/kubernetes_register_node.py @@ -0,0 +1,370 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# vim: expandtab:tabstop=4:shiftwidth=4 + +import os +import multiprocessing +import socket +from subprocess import check_output, Popen +from decimal import * + +DOCUMENTATION = ''' +--- +module: kubernetes_register_node +short_description: Registers a kubernetes node with a master +description: + - Registers a kubernetes node with a master +options: + name: + default: null + description: + - Identifier for this node (usually the node fqdn). + required: true + api_verison: + choices: ['v1beta1', 'v1beta3'] + default: 'v1beta1' + description: + - Kubernetes API version to use + required: true + host_ip: + default: null + description: + - IP Address to associate with the node when registering. + Available in the following API versions: v1beta1. + required: false + hostnames: + default: [] + description: + - Valid hostnames for this node. Available in the following API + versions: v1beta3. + required: false + external_ips: + default: [] + description: + - External IP Addresses for this node. Available in the following API + versions: v1beta3. + required: false + internal_ips: + default: [] + description: + - Internal IP Addresses for this node. Available in the following API + versions: v1beta3. + required: false + cpu: + default: null + description: + - Number of CPUs to allocate for this node. When using the v1beta1 + API, you must specify the CPU count as a floating point number + with no more than 3 decimal places. API version v1beta3 and newer + accepts arbitrary float values. + required: false + memory: + default: null + description: + - Memory available for this node. When using the v1beta1 API, you + must specify the memory size in bytes. API version v1beta3 and + newer accepts binary SI and decimal SI values. + required: false +''' +EXAMPLES = ''' +# Minimal node registration +- openshift_register_node: name=ose3.node.example.com + +# Node registration using the v1beta1 API and assigning 1 CPU core and 10 GB of +# Memory +- openshift_register_node: + name: ose3.node.example.com + api_version: v1beta1 + hostIP: 192.168.1.1 + cpu: 1 + memory: 500000000 + +# Node registration using the v1beta3 API, setting an alternate hostname, +# internalIP, externalIP and assigning 3.5 CPU cores and 1 TiB of Memory +- openshift_register_node: + name: ose3.node.example.com + api_version: v1beta3 + external_ips: ['192.168.1.5'] + internal_ips: ['10.0.0.5'] + hostnames: ['ose2.node.internal.local'] + cpu: 3.5 + memory: 1Ti +''' + + +class ClientConfigException(Exception): + pass + +class ClientConfig: + def __init__(self, client_opts, module): + _, output, error = module.run_command(["/usr/bin/openshift", "ex", + "config", "view", "-o", + "json"] + client_opts, + check_rc = True) + self.config = json.loads(output) + + if not (bool(self.config['clusters']) or + bool(self.config['contexts']) or + bool(self.config['current-context']) or + bool(self.config['users'])): + raise ClientConfigException(msg="Client config missing required " \ + "values", + output=output) + + def current_context(self): + return self.config['current-context'] + + def section_has_value(self, section_name, value): + section = self.config[section_name] + if isinstance(section, dict): + return value in section + else: + val = next((item for item in section + if item['name'] == value), None) + return val is not None + + def has_context(self, context): + return self.section_has_value('contexts', context) + + def has_user(self, user): + return self.section_has_value('users', user) + + def has_cluster(self, cluster): + return self.section_has_value('clusters', cluster) + + def get_value_for_context(self, context, attribute): + contexts = self.config['contexts'] + if isinstance(contexts, dict): + return contexts[context][attribute] + else: + return next((c['context'][attribute] for c in contexts + if c['name'] == context), None) + + def get_user_for_context(self, context): + return self.get_value_for_context(context, 'user') + + def get_cluster_for_context(self, context): + return self.get_value_for_context(context, 'cluster') + +class Util: + @staticmethod + def remove_empty_elements(mapping): + if isinstance(mapping, dict): + m = mapping.copy() + for key, val in mapping.iteritems(): + if not val: + del m[key] + return m + else: + return mapping + +class NodeResources: + def __init__(self, version, cpu=None, memory=None): + if version == 'v1beta1': + self.resources = dict(capacity=dict()) + self.resources['capacity']['cpu'] = cpu + self.resources['capacity']['memory'] = memory + + def get_resources(self): + return Util.remove_empty_elements(self.resources) + +class NodeSpec: + def __init__(self, version, cpu=None, memory=None, cidr=None, externalID=None): + if version == 'v1beta3': + self.spec = dict(podCIDR=cidr, externalID=externalID, + capacity=dict()) + self.spec['capacity']['cpu'] = cpu + self.spec['capacity']['memory'] = memory + + def get_spec(self): + return Util.remove_empty_elements(self.spec) + +class NodeStatus: + def addAddresses(self, addressType, addresses): + addressList = [] + for address in addresses: + addressList.append(dict(type=addressType, address=address)) + return addressList + + def __init__(self, version, externalIPs = [], internalIPs = [], + hostnames = []): + if version == 'v1beta3': + self.status = dict(addresses = addAddresses('ExternalIP', + externalIPs) + + addAddresses('InternalIP', + internalIPs) + + addAddresses('Hostname', + hostnames)) + + def get_status(self): + return Util.remove_empty_elements(self.status) + +class Node: + def __init__(self, module, client_opts, version='v1beta1', name=None, + hostIP = None, hostnames=[], externalIPs=[], internalIPs=[], + cpu=None, memory=None, labels=dict(), annotations=dict(), + podCIDR=None, externalID=None): + self.module = module + self.client_opts = client_opts + if version == 'v1beta1': + self.node = dict(id = name, + kind = 'Node', + apiVersion = version, + hostIP = hostIP, + resources = NodeResources(version, cpu, memory), + cidr = podCIDR, + labels = labels, + annotations = annotations + ) + elif version == 'v1beta3': + metadata = dict(name = name, + labels = labels, + annotations = annotations + ) + self.node = dict(kind = 'Node', + apiVersion = version, + metadata = metadata, + spec = NodeSpec(version, cpu, memory, podCIDR, + externalID), + status = NodeStatus(version, externalIPs, + internalIPs, hostnames), + ) + + def get_name(self): + if self.node['apiVersion'] == 'v1beta1': + return self.node['id'] + elif self.node['apiVersion'] == 'v1beta3': + return self.node['name'] + + def get_node(self): + node = self.node.copy() + if self.node['apiVersion'] == 'v1beta1': + node['resources'] = self.node['resources'].get_resources() + elif self.node['apiVersion'] == 'v1beta3': + node['spec'] = self.node['spec'].get_spec() + node['status'] = self.node['status'].get_status() + return Util.remove_empty_elements(node) + + def exists(self): + _, output, error = self.module.run_command(["/usr/bin/osc", "get", + "nodes"] + self.client_opts, + check_rc = True) + if re.search(self.module.params['name'], output, re.MULTILINE): + return True + return False + + def create(self): + cmd = ['/usr/bin/osc'] + self.client_opts + ['create', 'node', '-f', '-'] + rc, output, error = self.module.run_command(cmd, + data=self.module.jsonify(self.get_node())) + if rc != 0: + if re.search("minion \"%s\" already exists" % self.get_name(), + error): + self.module.exit_json(changed=False, + msg="node definition already exists", + node=self.get_node()) + else: + self.module.fail_json(msg="Node creation failed.", rc=rc, + output=output, error=error, + node=self.get_node()) + else: + return True + +def main(): + module = AnsibleModule( + argument_spec = dict( + name = dict(required = True, type = 'str'), + host_ip = dict(type = 'str'), + hostnames = dict(type = 'list', default = []), + external_ips = dict(type = 'list', default = []), + internal_ips = dict(type = 'list', default = []), + api_version = dict(type = 'str', default = 'v1beta1', # TODO: after kube rebase, we can default to v1beta3 + choices = ['v1beta1', 'v1beta3']), + cpu = dict(type = 'str'), + memory = dict(type = 'str'), + labels = dict(type = 'dict', default = {}), # TODO: needs documented + annotations = dict(type = 'dict', default = {}), # TODO: needs documented + pod_cidr = dict(type = 'str'), # TODO: needs documented + external_id = dict(type = 'str'), # TODO: needs documented + client_config = dict(type = 'str'), # TODO: needs documented + client_cluster = dict(type = 'str', default = 'master'), # TODO: needs documented + client_context = dict(type = 'str', default = 'master'), # TODO: needs documented + client_user = dict(type = 'str', default = 'admin') # TODO: needs documented + ), + mutually_exclusive = [ + ['host_ip', 'external_ips'], + ['host_ip', 'internal_ips'], + ['host_ip', 'hostnames'], + ], + supports_check_mode=True + ) + + user_has_client_config = os.path.exists(os.path.expanduser('~/.kube/.kubeconfig')) + if not (user_has_client_config or module.params['client_config']): + module.fail_json(msg="Could not locate client configuration, " + "client_config must be specified if " + "~/.kube/.kubeconfig is not present") + + client_opts = [] + if module.params['client_config']: + client_opts.append("--kubeconfig=%s" % module.params['client_config']) + + try: + config = ClientConfig(client_opts, module) + except ClientConfigException as e: + module.fail_json(msg="Failed to get client configuration", exception=e) + + client_context = module.params['client_context'] + if config.has_context(client_context): + if client_context != config.current_context(): + client_opts.append("--context=%s" % client_context) + else: + module.fail_json(msg="Context %s not found in client config" % + client_context) + + client_user = module.params['client_user'] + if config.has_user(client_user): + if client_user != config.get_user_for_context(client_context): + client_opts.append("--user=%s" % client_user) + else: + module.fail_json(msg="User %s not found in client config" % + client_user) + + client_cluster = module.params['client_cluster'] + if config.has_cluster(client_cluster): + if client_cluster != config.get_cluster_for_context(client_cluster): + client_opts.append("--cluster=%s" % client_cluster) + else: + module.fail_json(msg="Cluster %s not found in client config" % + client_cluster) + + # TODO: provide sane defaults for some (like hostname, externalIP, + # internalIP, etc) + node = Node(module, client_opts, module.params['api_version'], + module.params['name'], module.params['host_ip'], + module.params['hostnames'], module.params['external_ips'], + module.params['internal_ips'], module.params['cpu'], + module.params['memory'], module.params['labels'], + module.params['annotations'], module.params['pod_cidr'], + module.params['external_id']) + + # TODO: attempt to support changing node settings where possible and/or + # modifying node resources + if node.exists(): + module.exit_json(changed=False, node=node.get_node()) + elif module.check_mode: + module.exit_json(changed=True, node=node.get_node()) + else: + if node.create(): + module.exit_json(changed=True, + msg="Node created successfully", + node=node.get_node()) + else: + module.fail_json(msg="Unknown error creating node", + node=node.get_node()) + + +# import module snippets +from ansible.module_utils.basic import * +if __name__ == '__main__': + main() diff --git a/roles/openshift_register_nodes/meta/main.yml b/roles/openshift_register_nodes/meta/main.yml new file mode 100644 index 000000000..7b1f0ef0a --- /dev/null +++ b/roles/openshift_register_nodes/meta/main.yml @@ -0,0 +1,128 @@ +--- +galaxy_info: + author: your name + description: + company: your company (optional) + # Some suggested licenses: + # - BSD (default) + # - MIT + # - GPLv2 + # - GPLv3 + # - Apache + # - CC-BY + license: license (GPLv2, CC-BY, etc) + min_ansible_version: 1.2 + # + # Below are all platforms currently available. Just uncomment + # the ones that apply to your role. If you don't see your + # platform on this list, let us know and we'll get it added! + # + #platforms: + #- name: EL + # versions: + # - all + # - 5 + # - 6 + # - 7 + #- name: GenericUNIX + # versions: + # - all + # - any + #- name: Fedora + # versions: + # - all + # - 16 + # - 17 + # - 18 + # - 19 + # - 20 + #- name: SmartOS + # versions: + # - all + # - any + #- name: opensuse + # versions: + # - all + # - 12.1 + # - 12.2 + # - 12.3 + # - 13.1 + # - 13.2 + #- name: Amazon + # versions: + # - all + # - 2013.03 + # - 2013.09 + #- name: GenericBSD + # versions: + # - all + # - any + #- name: FreeBSD + # versions: + # - all + # - 8.0 + # - 8.1 + # - 8.2 + # - 8.3 + # - 8.4 + # - 9.0 + # - 9.1 + # - 9.1 + # - 9.2 + #- name: Ubuntu + # versions: + # - all + # - lucid + # - maverick + # - natty + # - oneiric + # - precise + # - quantal + # - raring + # - saucy + # - trusty + #- name: SLES + # versions: + # - all + # - 10SP3 + # - 10SP4 + # - 11 + # - 11SP1 + # - 11SP2 + # - 11SP3 + #- name: GenericLinux + # versions: + # - all + # - any + #- name: Debian + # versions: + # - all + # - etch + # - lenny + # - squeeze + # - wheezy + # + # Below are all categories currently available. Just as with + # the platforms above, uncomment those that apply to your role. + # + #categories: + #- cloud + #- cloud:ec2 + #- cloud:gce + #- cloud:rax + #- clustering + #- database + #- database:nosql + #- database:sql + #- development + #- monitoring + #- networking + #- packaging + #- system + #- web +dependencies: [] + # List your role dependencies here, one per line. Only + # dependencies available via galaxy should be listed here. + # Be sure to remove the '[]' above if you add dependencies + # to this list. + diff --git a/roles/openshift_register_nodes/tasks/main.yml b/roles/openshift_register_nodes/tasks/main.yml new file mode 100644 index 000000000..59216fc87 --- /dev/null +++ b/roles/openshift_register_nodes/tasks/main.yml @@ -0,0 +1,71 @@ +--- +# TODO: support configuration for multiple masters, currently hardcoding +# the info from the first master + +# TODO: create a failed_when condition +- name: Create node server certificates + command: > + /usr/bin/openshift admin create-server-cert + --overwrite=false + --cert={{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/server.crt + --key={{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/server.key + --hostnames={{ [openshift_hostname, openshift_public_hostname, openshift_ip, openshift_public_ip]|join(",") }} + args: + chdir: "{{ openshift_cert_dir_parent }}" + creates: "{{ openshift_cert_dir_abs }}/node-{{ item.openshift_node_hostname }}/server.crt" + with_items: openshift_nodes + register: server_cert_result + +# TODO: create a failed_when condition +- name: Create node client certificates + command: > + /usr/bin/openshift admin create-node-cert + --overwrite=false + --cert={{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/cert.crt + --key={{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/key.key + --node-name={{ item.openshift_node_hostname }} + args: + chdir: "{{ openshift_cert_dir_parent }}" + creates: "{{ openshift_cert_dir_abs }}/node-{{ item.openshift_node_hostname }}/cert.crt" + with_items: openshift_nodes + register: node_cert_result + +# TODO: re-create kubeconfig if certs were regenerated, not just if +# .kubeconfig doesn't exist +# TODO: create a failed_when condition +- name: Create kubeconfigs for nodes + command: > + /usr/bin/openshift admin create-kubeconfig + --client-certificate={{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/cert.crt + --client-key={{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/key.key + --kubeconfig={{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/.kubeconfig + --master={{ openshift_master_urls[0] }} + --public-master={{ openshift_master_public_urls[0] }} + args: + chdir: "{{ openshift_cert_dir_parent }}" + creates: "{{ openshift_cert_dir_abs }}/node-{{ item.openshift_node_hostname }}/.kubeconfig" + with_items: openshift_nodes + register: kubeconfig_result + +# TODO: generate the node configs (openshift start node --write-config +# --config='{{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/node.yaml' +# --kubeconfig='{{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/.kubeconfig' +# will need to modify the generated node config as needed +# (servingInfo.{certFile,clientCA,keyFile}) + +- name: Register unregistered nodes + kubernetes_register_node: + name: "{{ item.openshift_node_name }}" + api_version: "{{ openshift_kube_api_version }}" + cpu: "{{ item.openshift_node_cpu if item.openshift_node_cpu else None }}" + memory: "{{ item.openshift_node_memory if item.openshift_node_memory else None }}" + pod_cidr: "{{ item.openshift_node_pod_cidr if item.openshift_node_pod_cidr else None }}" + host_ip: "{{ item.openshift_node_host_ip }}" + labels: "{{ item.openshift_node_labels if item.openshift_node_labels else {} }}" + annotations: "{{ item.openshift_node_annotations if item.openshift_node_annotations else {} }}" + # TODO: support customizing other attributes such as: client_config, + # client_cluster, client_context, client_user + # TODO: update for v1beta3 changes after rebase: hostnames, external_ips, + # internal_ips, external_id + with_items: openshift_nodes + register: register_result diff --git a/roles/openshift_sdn_node/README.md b/roles/openshift_sdn_node/README.md index 294550219..33197c241 100644 --- a/roles/openshift_sdn_node/README.md +++ b/roles/openshift_sdn_node/README.md @@ -29,7 +29,7 @@ From openshift_common: | openshift_debug_level | 0 | Global openshift debug log verbosity | | openshift_hostname_workaround | True | | | openshift_public_ip | UNDEF (Required) | Public IP address to use for this host | -| openshift_hostname | openshift_public_ip if openshift_hostname_workaround else ansible_fqdn | hostname to use for this instance | +| openshift_hostname | UNDEF (Required) | hostname to use for this instance | Dependencies ------------ -- cgit v1.2.3 From de1391db4309f020b5c8467597eef527b560bbaa Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Mon, 23 Mar 2015 23:36:08 -0400 Subject: remove openshift_hostname_workaround var for openshift_common, rather rely on inventory/playbook variables for openshift_hostname --- roles/openshift_common/README.md | 1 - roles/openshift_common/defaults/main.yml | 5 ----- roles/openshift_master/README.md | 1 - roles/openshift_node/README.md | 1 - roles/openshift_sdn_node/README.md | 1 - 5 files changed, 9 deletions(-) diff --git a/roles/openshift_common/README.md b/roles/openshift_common/README.md index 592a276f9..880d66e2c 100644 --- a/roles/openshift_common/README.md +++ b/roles/openshift_common/README.md @@ -15,7 +15,6 @@ Role Variables | Name | Default value | | |-------------------------------|------------------------------|----------------------------------------| | openshift_debug_level | 0 | Global openshift debug log verbosity | -| openshift_hostname_workaround | True | Workaround needed to set hostname to IP address | | openshift_hostname | UNDEF (Required) | hostname to use for this instance | | openshift_public_ip | UNDEF (Required) | Public IP address to use for this host | | openshift_env | default | Envrionment name if multiple OpenShift instances | diff --git a/roles/openshift_common/defaults/main.yml b/roles/openshift_common/defaults/main.yml index 86351f6f6..22b2c6ffd 100644 --- a/roles/openshift_common/defaults/main.yml +++ b/roles/openshift_common/defaults/main.yml @@ -1,7 +1,2 @@ --- openshift_debug_level: 0 - -# TODO: Once openshift stops resolving hostnames for node queries remove -# this... -openshift_hostname_workaround: true - diff --git a/roles/openshift_master/README.md b/roles/openshift_master/README.md index 2f03b4990..2d898bc3b 100644 --- a/roles/openshift_master/README.md +++ b/roles/openshift_master/README.md @@ -25,7 +25,6 @@ From openshift_common: | Name | Default Value | | |-------------------------------|---------------------|---------------------| | openshift_debug_level | 0 | Global openshift debug log verbosity | -| openshift_hostname_workaround | True | | | openshift_public_ip | UNDEF (Required) | Public IP address to use for this host | | openshift_hostname | UNDEF (Required) | hostname to use for this instance | diff --git a/roles/openshift_node/README.md b/roles/openshift_node/README.md index d537a35a5..c9b4eab34 100644 --- a/roles/openshift_node/README.md +++ b/roles/openshift_node/README.md @@ -26,7 +26,6 @@ From openshift_common: | Name | Default Value | | |-------------------------------|---------------------|---------------------| | openshift_debug_level | 0 | Global openshift debug log verbosity | -| openshift_hostname_workaround | True | | | openshift_public_ip | UNDEF (Required) | Public IP address to use for this host | | openshift_hostname | UNDEF (Required) | hostname to use for this instance | diff --git a/roles/openshift_sdn_node/README.md b/roles/openshift_sdn_node/README.md index 33197c241..2da2d74eb 100644 --- a/roles/openshift_sdn_node/README.md +++ b/roles/openshift_sdn_node/README.md @@ -27,7 +27,6 @@ From openshift_common: | Name | Default value | | |-------------------------------|---------------------|----------------------------------------| | openshift_debug_level | 0 | Global openshift debug log verbosity | -| openshift_hostname_workaround | True | | | openshift_public_ip | UNDEF (Required) | Public IP address to use for this host | | openshift_hostname | UNDEF (Required) | hostname to use for this instance | -- cgit v1.2.3 From 01ee65e99d39265f7d8db3ddbeca5d59ddfa2038 Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Mon, 23 Mar 2015 23:37:19 -0400 Subject: gce inventory/playbook updates for node registration changes --- inventory/gce/group_vars/all | 3 +++ inventory/gce/group_vars/tag_host-type-master | 5 +++++ inventory/gce/group_vars/tag_host-type-node | 6 ++++++ inventory/gce/group_vars/tag_host-type-openshift-master | 1 + inventory/gce/group_vars/tag_host-type-openshift-node | 1 + playbooks/gce/openshift-node/config.yml | 6 +++--- 6 files changed, 19 insertions(+), 3 deletions(-) create mode 100644 inventory/gce/group_vars/tag_host-type-master create mode 100644 inventory/gce/group_vars/tag_host-type-node create mode 120000 inventory/gce/group_vars/tag_host-type-openshift-master create mode 120000 inventory/gce/group_vars/tag_host-type-openshift-node diff --git a/inventory/gce/group_vars/all b/inventory/gce/group_vars/all index 4cd94c509..3e969df63 100644 --- a/inventory/gce/group_vars/all +++ b/inventory/gce/group_vars/all @@ -1,4 +1,7 @@ --- ansible_ssh_user: root +openshift_hostname: "{{ ansible_default_ipv4.address }}" +openshift_public_hostname: "{{ ansible_default_ipv4.address }}" +openshift_ip: "{{ ansible_default_ipv4.address }}" openshift_public_ip: "{{ gce_public_ip }}" openshift_env: "{{ oo_env }}" diff --git a/inventory/gce/group_vars/tag_host-type-master b/inventory/gce/group_vars/tag_host-type-master new file mode 100644 index 000000000..ddbdc650c --- /dev/null +++ b/inventory/gce/group_vars/tag_host-type-master @@ -0,0 +1,5 @@ +--- +openshift_api_url: https://{{ openshift_hostname }}:8443 +openshift_api_public_url: https://{{ openshift_public_hostname }}:8443 +openshift_webui_url: https://{{ openshift_hostname }}:8444 +openshift_webui_public_url: https://{{ openshift_public_hostname }}:8444 diff --git a/inventory/gce/group_vars/tag_host-type-node b/inventory/gce/group_vars/tag_host-type-node new file mode 100644 index 000000000..bb95a724d --- /dev/null +++ b/inventory/gce/group_vars/tag_host-type-node @@ -0,0 +1,6 @@ +--- +openshift_node_cpu: +openshift_node_memory: +openshift_node_pod_cidr: +openshift_node_labels: {} +openshift_node_annotations: {} diff --git a/inventory/gce/group_vars/tag_host-type-openshift-master b/inventory/gce/group_vars/tag_host-type-openshift-master new file mode 120000 index 000000000..c0c4cf370 --- /dev/null +++ b/inventory/gce/group_vars/tag_host-type-openshift-master @@ -0,0 +1 @@ +tag_host-type-master \ No newline at end of file diff --git a/inventory/gce/group_vars/tag_host-type-openshift-node b/inventory/gce/group_vars/tag_host-type-openshift-node new file mode 120000 index 000000000..ebbce6136 --- /dev/null +++ b/inventory/gce/group_vars/tag_host-type-openshift-node @@ -0,0 +1 @@ +tag_host-type-node \ No newline at end of file diff --git a/playbooks/gce/openshift-node/config.yml b/playbooks/gce/openshift-node/config.yml index d24acb8fa..bf28fc81d 100644 --- a/playbooks/gce/openshift-node/config.yml +++ b/playbooks/gce/openshift-node/config.yml @@ -6,12 +6,12 @@ add_host: "name={{ item }} groups=oo_nodes_to_config" with_items: "{{ oo_host_group_exp | default('') }}" when: oo_host_group_exp is defined - - name: Find masters for env + - name: Find masters for env add_host: "name={{ item }} groups=oo_masters_for_node_config" with_items: groups['tag_env-host-type-' + oo_env + '-openshift-master'] - name: Gather facts for masters in {{ oo_env }} - hosts: "tag_env-host-type-{{ oo_env }}-openshift-master" + hosts: tag_env-host-type-{{ oo_env }}-openshift-master tasks: - set_fact: openshift_master_ip: "{{ openshift_ip }}" @@ -68,7 +68,7 @@ # - name: Configure instances hosts: oo_nodes_to_config -vars_files: + vars_files: - vars.yml vars: openshift_master_group: tag_env-host-type-{{ oo_env }}-openshift-master -- cgit v1.2.3 From 41740bc6e177e58a0aa817e2d940e60be51d3bfe Mon Sep 17 00:00:00 2001 From: Jhon Honce Date: Tue, 24 Mar 2015 09:43:36 -0700 Subject: Revert "Jwhonce wip/cluster" --- playbooks/gce/openshift-node/config.yml | 1 - roles/docker/tasks/main.yml | 2 +- roles/os_env_extras_node/README.md | 38 ------- roles/os_env_extras_node/files/enter-container.sh | 13 --- roles/os_env_extras_node/meta/main.yml | 124 ---------------------- roles/os_env_extras_node/tasks/main.yml | 7 -- roles/os_firewall/tasks/firewall/iptables.yml | 8 ++ 7 files changed, 9 insertions(+), 184 deletions(-) delete mode 100644 roles/os_env_extras_node/README.md delete mode 100755 roles/os_env_extras_node/files/enter-container.sh delete mode 100644 roles/os_env_extras_node/meta/main.yml delete mode 100644 roles/os_env_extras_node/tasks/main.yml diff --git a/playbooks/gce/openshift-node/config.yml b/playbooks/gce/openshift-node/config.yml index bf28fc81d..e0d074572 100644 --- a/playbooks/gce/openshift-node/config.yml +++ b/playbooks/gce/openshift-node/config.yml @@ -120,4 +120,3 @@ roles: - openshift_node - os_env_extras - - os_env_extras_node diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml index 593c4c877..ca700db17 100644 --- a/roles/docker/tasks/main.yml +++ b/roles/docker/tasks/main.yml @@ -1,7 +1,7 @@ --- # tasks file for docker - name: Install docker - yum: pkg=docker + yum: pkg=docker-io - name: enable and start the docker service service: name=docker enabled=yes state=started diff --git a/roles/os_env_extras_node/README.md b/roles/os_env_extras_node/README.md deleted file mode 100644 index 225dd44b9..000000000 --- a/roles/os_env_extras_node/README.md +++ /dev/null @@ -1,38 +0,0 @@ -Role Name -========= - -A brief description of the role goes here. - -Requirements ------------- - -Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. - -Role Variables --------------- - -A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. - -Dependencies ------------- - -A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. - -Example Playbook ----------------- - -Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: - - - hosts: servers - roles: - - { role: username.rolename, x: 42 } - -License -------- - -BSD - -Author Information ------------------- - -An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/roles/os_env_extras_node/files/enter-container.sh b/roles/os_env_extras_node/files/enter-container.sh deleted file mode 100755 index 7cf5b8d83..000000000 --- a/roles/os_env_extras_node/files/enter-container.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash - -if [ $# -ne 1 ] -then - echo - echo "Usage: $(basename $0) " - echo - exit 1 -fi - -PID=$(docker inspect --format '{{.State.Pid}}' $1) - -nsenter --target $PID --mount --uts --ipc --net --pid diff --git a/roles/os_env_extras_node/meta/main.yml b/roles/os_env_extras_node/meta/main.yml deleted file mode 100644 index c5c362c60..000000000 --- a/roles/os_env_extras_node/meta/main.yml +++ /dev/null @@ -1,124 +0,0 @@ ---- -galaxy_info: - author: your name - description: - company: your company (optional) - # Some suggested licenses: - # - BSD (default) - # - MIT - # - GPLv2 - # - GPLv3 - # - Apache - # - CC-BY - license: license (GPLv2, CC-BY, etc) - min_ansible_version: 1.2 - # - # Below are all platforms currently available. Just uncomment - # the ones that apply to your role. If you don't see your - # platform on this list, let us know and we'll get it added! - # - #platforms: - #- name: EL - # versions: - # - all - # - 5 - # - 6 - # - 7 - #- name: GenericUNIX - # versions: - # - all - # - any - #- name: Fedora - # versions: - # - all - # - 16 - # - 17 - # - 18 - # - 19 - # - 20 - #- name: opensuse - # versions: - # - all - # - 12.1 - # - 12.2 - # - 12.3 - # - 13.1 - # - 13.2 - #- name: Amazon - # versions: - # - all - # - 2013.03 - # - 2013.09 - #- name: GenericBSD - # versions: - # - all - # - any - #- name: FreeBSD - # versions: - # - all - # - 8.0 - # - 8.1 - # - 8.2 - # - 8.3 - # - 8.4 - # - 9.0 - # - 9.1 - # - 9.1 - # - 9.2 - #- name: Ubuntu - # versions: - # - all - # - lucid - # - maverick - # - natty - # - oneiric - # - precise - # - quantal - # - raring - # - saucy - # - trusty - #- name: SLES - # versions: - # - all - # - 10SP3 - # - 10SP4 - # - 11 - # - 11SP1 - # - 11SP2 - # - 11SP3 - #- name: GenericLinux - # versions: - # - all - # - any - #- name: Debian - # versions: - # - all - # - etch - # - lenny - # - squeeze - # - wheezy - # - # Below are all categories currently available. Just as with - # the platforms above, uncomment those that apply to your role. - # - #categories: - #- cloud - #- cloud:ec2 - #- cloud:gce - #- cloud:rax - #- clustering - #- database - #- database:nosql - #- database:sql - #- development - #- monitoring - #- networking - #- packaging - #- system - #- web -dependencies: [] - # List your role dependencies here, one per line. Only - # dependencies available via galaxy should be listed here. - # Be sure to remove the '[]' above if you add dependencies - # to this list. - diff --git a/roles/os_env_extras_node/tasks/main.yml b/roles/os_env_extras_node/tasks/main.yml deleted file mode 100644 index 065f71f74..000000000 --- a/roles/os_env_extras_node/tasks/main.yml +++ /dev/null @@ -1,7 +0,0 @@ ---- -- copy: src=enter-container.sh dest=/usr/local/bin/enter-container.sh mode=0755 - -# From the origin rpm there exists instructions on how to -# setup origin properly. The following steps come from there -- name: Change root to be in the Docker group - user: name=root groups=dockerroot append=yes diff --git a/roles/os_firewall/tasks/firewall/iptables.yml b/roles/os_firewall/tasks/firewall/iptables.yml index 87e77c083..72a3401cf 100644 --- a/roles/os_firewall/tasks/firewall/iptables.yml +++ b/roles/os_firewall/tasks/firewall/iptables.yml @@ -41,6 +41,14 @@ changed_when: "'firewalld' in result.stdout" when: pkg_check.rc == 0 +# Workaround for Docker 1.4 to create DOCKER chain +- name: Add DOCKER chain + os_firewall_manage_iptables: + name: "DOCKER chain" + action: verify_chain + create_jump_rule: no +# End of Docker 1.4 workaround + - name: Add iptables allow rules os_firewall_manage_iptables: name: "{{ item.service }}" -- cgit v1.2.3 From fa746f02aa275cf8e1cbe9f90ec41fc27806a0bd Mon Sep 17 00:00:00 2001 From: Jhon Honce Date: Tue, 24 Mar 2015 10:49:15 -0700 Subject: * repos role renamed to openshift_repos --- playbooks/gce/openshift-cluster/launch.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/playbooks/gce/openshift-cluster/launch.yml b/playbooks/gce/openshift-cluster/launch.yml index b30452725..889d92d40 100644 --- a/playbooks/gce/openshift-cluster/launch.yml +++ b/playbooks/gce/openshift-cluster/launch.yml @@ -49,7 +49,7 @@ - hosts: "tag_env-{{ cluster_id }}" roles: - - repos + - openshift_repos - os_update_latest - include: ../openshift-master/config.yml -- cgit v1.2.3 From 4dc8ca74f47bcbe0fd6285b0d73cc5b193be17a9 Mon Sep 17 00:00:00 2001 From: Jhon Honce Date: Tue, 24 Mar 2015 12:40:21 -0700 Subject: * Remove DOCKER chain work around --- roles/os_firewall/tasks/firewall/iptables.yml | 8 -------- 1 file changed, 8 deletions(-) diff --git a/roles/os_firewall/tasks/firewall/iptables.yml b/roles/os_firewall/tasks/firewall/iptables.yml index 72a3401cf..87e77c083 100644 --- a/roles/os_firewall/tasks/firewall/iptables.yml +++ b/roles/os_firewall/tasks/firewall/iptables.yml @@ -41,14 +41,6 @@ changed_when: "'firewalld' in result.stdout" when: pkg_check.rc == 0 -# Workaround for Docker 1.4 to create DOCKER chain -- name: Add DOCKER chain - os_firewall_manage_iptables: - name: "DOCKER chain" - action: verify_chain - create_jump_rule: no -# End of Docker 1.4 workaround - - name: Add iptables allow rules os_firewall_manage_iptables: name: "{{ item.service }}" -- cgit v1.2.3