summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--playbooks/adhoc/uninstall.yml12
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py20
-rw-r--r--roles/openshift_node/templates/node.yaml.v1.j21
-rw-r--r--utils/src/ooinstall/cli_installer.py73
-rw-r--r--utils/src/ooinstall/openshift_ansible.py41
5 files changed, 98 insertions, 49 deletions
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml
index 0503b7cd4..e05ab43f8 100644
--- a/playbooks/adhoc/uninstall.yml
+++ b/playbooks/adhoc/uninstall.yml
@@ -45,6 +45,7 @@
- origin-master-api
- origin-master-controllers
- origin-node
+ - pcsd
- yum: name={{ item }} state=absent
when: not is_atomic | bool
@@ -59,6 +60,7 @@
- atomic-openshift-node
- atomic-openshift-sdn-ovs
- etcd
+ - corosync
- openshift
- openshift-master
- openshift-node
@@ -69,6 +71,8 @@
- origin-master
- origin-node
- origin-sdn-ovs
+ - pacemaker
+ - pcs
- tuned-profiles-atomic-enterprise-node
- tuned-profiles-atomic-openshift-node
- tuned-profiles-openshift-node
@@ -136,8 +140,10 @@
- file: path={{ item }} state=absent
with_items:
+ - "~{{ ansible_ssh_user }}/.kube"
- /etc/ansible/facts.d/openshift.fact
- /etc/atomic-enterprise
+ - /etc/corosync
- /etc/etcd
- /etc/openshift
- /etc/openshift-sdn
@@ -151,9 +157,13 @@
- /etc/sysconfig/origin-master
- /etc/sysconfig/origin-node
- /root/.kube
- - "~{{ ansible_ssh_user }}/.kube"
+ - /run/openshift-sdn
- /usr/share/openshift/examples
- /var/lib/atomic-enterprise
- /var/lib/etcd
- /var/lib/openshift
- /var/lib/origin
+ - /var/lib/pacemaker
+
+ - name: restart docker
+ service: name=docker state=restarted
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index ae5d99121..134734a65 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -607,11 +607,12 @@ def set_version_facts_if_unset(facts):
facts['common']['version_greater_than_3_1_or_1_1'] = version_gt_3_1_or_1_1
return facts
-def set_sdn_facts_if_unset(facts):
+def set_sdn_facts_if_unset(facts, system_facts):
""" Set sdn facts if not already present in facts dict
Args:
facts (dict): existing facts
+ system_facts (dict): ansible_facts
Returns:
dict: the facts dict updated with the generated sdn facts if they
were not already present
@@ -630,9 +631,18 @@ def set_sdn_facts_if_unset(facts):
if 'sdn_host_subnet_length' not in facts['master']:
facts['master']['sdn_host_subnet_length'] = '8'
- if 'node' in facts:
- if 'sdn_mtu' not in facts['node']:
- facts['node']['sdn_mtu'] = '1450'
+ if 'node' in facts and 'sdn_mtu' not in facts['node']:
+ node_ip = facts['common']['ip']
+
+ # default MTU if interface MTU cannot be detected
+ facts['node']['sdn_mtu'] = '1450'
+
+ for val in system_facts.itervalues():
+ if isinstance(val, dict) and 'mtu' in val:
+ mtu = val['mtu']
+
+ if 'ipv4' in val and val['ipv4'].get('address') == node_ip:
+ facts['node']['sdn_mtu'] = str(mtu - 50)
return facts
@@ -903,7 +913,7 @@ class OpenShiftFacts(object):
facts = set_master_selectors(facts)
facts = set_metrics_facts_if_unset(facts)
facts = set_identity_providers_if_unset(facts)
- facts = set_sdn_facts_if_unset(facts)
+ facts = set_sdn_facts_if_unset(facts, self.system_facts)
facts = set_deployment_facts_if_unset(facts)
facts = set_version_facts_if_unset(facts)
facts = set_aggregate_facts(facts)
diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2
index 4931d127e..509cce2e0 100644
--- a/roles/openshift_node/templates/node.yaml.v1.j2
+++ b/roles/openshift_node/templates/node.yaml.v1.j2
@@ -22,6 +22,7 @@ networkConfig:
{% if openshift.common.use_openshift_sdn %}
networkPluginName: {{ openshift.common.sdn_network_plugin_name }}
{% endif %}
+nodeIP: {{ openshift.common.ip }}
nodeName: {{ openshift.common.hostname | lower }}
podManifestConfig:
servingInfo:
diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py
index e4fda2813..8bee99f90 100644
--- a/utils/src/ooinstall/cli_installer.py
+++ b/utils/src/ooinstall/cli_installer.py
@@ -331,7 +331,22 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force):
# Check if master or nodes already have something installed
installed_hosts = get_installed_hosts(oo_cfg.hosts, callback_facts)
if len(installed_hosts) > 0:
- # present a message listing already installed hosts
+ click.echo('Installed environment detected.')
+ # This check has to happen before we start removing hosts later in this method
+ if not force:
+ if not unattended:
+ click.echo('By default the installer only adds new nodes to an installed environment.')
+ response = click.prompt('Do you want to (1) only add additional nodes or ' \
+ '(2) perform a clean install?', type=int)
+ # TODO: this should be reworked with error handling.
+ # Click can certainly do this for us.
+ # This should be refactored as soon as we add a 3rd option.
+ if response == 1:
+ force = False
+ if response == 2:
+ force = True
+
+ # present a message listing already installed hosts and remove hosts if needed
for host in installed_hosts:
if host.master:
click.echo("{} is already an OpenShift Master".format(host))
@@ -339,32 +354,42 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force):
# new nodes.
elif host.node:
click.echo("{} is already an OpenShift Node".format(host))
- hosts_to_run_on.remove(host)
- # for unattended either continue if they force install or exit if they didn't
- if unattended:
- if not force:
- click.echo('Installed environment detected and no additional nodes specified: ' \
- 'aborting. If you want a fresh install, use --force')
- sys.exit(1)
- # for attended ask the user what to do
+ # force is only used for reinstalls so we don't want to remove
+ # anything.
+ if not force:
+ hosts_to_run_on.remove(host)
+
+ # Handle the cases where we know about uninstalled systems
+ new_hosts = set(hosts_to_run_on) - set(installed_hosts)
+ if len(new_hosts) > 0:
+ for new_host in new_hosts:
+ click.echo("{} is currently uninstalled".format(new_host))
+
+ # Fall through
+ click.echo('Adding additional nodes...')
else:
- click.echo('Installed environment detected and no additional nodes specified. ')
- response = click.prompt('Do you want to (1) add more nodes or ' \
- '(2) perform a clean install?', type=int)
- if response == 1: # add more nodes
- new_nodes = collect_new_nodes()
-
- hosts_to_run_on.extend(new_nodes)
- oo_cfg.hosts.extend(new_nodes)
-
- openshift_ansible.set_config(oo_cfg)
- callback_facts, error = openshift_ansible.default_facts(oo_cfg.hosts)
- if error:
- click.echo("There was a problem fetching the required information. " \
- "See {} for details.".format(oo_cfg.settings['ansible_log_path']))
+ if unattended:
+ if not force:
+ click.echo('Installed environment detected and no additional nodes specified: ' \
+ 'aborting. If you want a fresh install, use ' \
+ '`atomic-openshift-installer install --force`')
sys.exit(1)
else:
- pass # proceeding as normal should do a clean install
+ if not force:
+ new_nodes = collect_new_nodes()
+
+ hosts_to_run_on.extend(new_nodes)
+ oo_cfg.hosts.extend(new_nodes)
+
+ openshift_ansible.set_config(oo_cfg)
+ click.echo('Gathering information from hosts...')
+ callback_facts, error = openshift_ansible.default_facts(oo_cfg.hosts)
+ if error:
+ click.echo("There was a problem fetching the required information. " \
+ "See {} for details.".format(oo_cfg.settings['ansible_log_path']))
+ sys.exit(1)
+ else:
+ pass # proceeding as normal should do a clean install
return hosts_to_run_on, callback_facts
diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py
index e33330102..0648df0fa 100644
--- a/utils/src/ooinstall/openshift_ansible.py
+++ b/utils/src/ooinstall/openshift_ansible.py
@@ -18,7 +18,6 @@ def set_config(cfg):
def generate_inventory(hosts):
global CFG
- installer_host = socket.gethostname()
base_inventory_path = CFG.settings['ansible_inventory_path']
base_inventory = open(base_inventory_path, 'w')
base_inventory.write('\n[OSEv3:children]\nmasters\nnodes\n')
@@ -32,25 +31,18 @@ def generate_inventory(hosts):
version=CFG.settings.get('variant_version', None))[1]
base_inventory.write('deployment_type={}\n'.format(ver.ansible_key))
- if 'OO_INSTALL_DEVEL_REGISTRY' in os.environ:
- base_inventory.write('oreg_url=rcm-img-docker01.build.eng.bos.redhat.com:'
- '5001/openshift3/ose-${component}:${version}\n')
- if 'OO_INSTALL_PUDDLE_REPO_ENABLE' in os.environ:
- base_inventory.write("openshift_additional_repos=[{'id': 'ose-devel', "
+ if 'OO_INSTALL_ADDITIONAL_REGISTRIES' in os.environ:
+ base_inventory.write('cli_docker_additional_registries={}\n'
+ .format(os.environ['OO_INSTALL_ADDITIONAL_REGISTRIES']))
+ if 'OO_INSTALL_INSECURE_REGISTRIES' in os.environ:
+ base_inventory.write('cli_docker_insecure_registries={}\n'
+ .format(os.environ['OO_INSTALL_INSECURE_REGISTRIES']))
+ if 'OO_INSTALL_PUDDLE_REPO' in os.environ:
+ # We have to double the '{' here for literals
+ base_inventory.write("openshift_additional_repos=[{{'id': 'ose-devel', "
"'name': 'ose-devel', "
- "'baseurl': 'http://buildvm-devops.usersys.redhat.com"
- "/puddle/build/AtomicOpenShift/3.1/latest/RH7-RHAOS-3.1/$basearch/os', "
- "'enabled': 1, 'gpgcheck': 0}]\n")
- if 'OO_INSTALL_STAGE_REGISTRY' in os.environ:
- base_inventory.write('oreg_url=registry.access.stage.redhat.com/openshift3/ose-${component}:${version}\n')
-
- if any(host.hostname == installer_host or host.public_hostname == installer_host
- for host in hosts):
- no_pwd_sudo = subprocess.call(['sudo', '-v', '--non-interactive'])
- if no_pwd_sudo == 1:
- print 'The atomic-openshift-installer requires sudo access without a password.'
- sys.exit(1)
- base_inventory.write("ansible_connection=local\n")
+ "'baseurl': '{}', "
+ "'enabled': 1, 'gpgcheck': 0}}]\n".format(os.environ['OO_INSTALL_PUDDLE_REPO']))
base_inventory.write('\n[masters]\n')
masters = (host for host in hosts if host.master)
@@ -72,6 +64,7 @@ def generate_inventory(hosts):
def write_host(host, inventory, scheduleable=True):
global CFG
+
facts = ''
if host.ip:
facts += ' openshift_ip={}'.format(host.ip)
@@ -85,6 +78,16 @@ def write_host(host, inventory, scheduleable=True):
# Technically only nodes will ever need this.
if not scheduleable:
facts += ' openshift_scheduleable=False'
+ installer_host = socket.gethostname()
+ if host.hostname == installer_host or host.public_hostname == installer_host:
+ facts += ' ansible_connection=local'
+ if os.geteuid() != 0:
+ no_pwd_sudo = subprocess.call(['sudo', '-v', '-n'])
+ if no_pwd_sudo == 1:
+ print 'The atomic-openshift-installer requires sudo access without a password.'
+ sys.exit(1)
+ facts += ' ansible_become=true'
+
inventory.write('{} {}\n'.format(host, facts))