diff options
Diffstat (limited to 'utils')
26 files changed, 1427 insertions, 470 deletions
diff --git a/utils/.coveragerc b/utils/.coveragerc new file mode 100644 index 000000000..e1d918755 --- /dev/null +++ b/utils/.coveragerc @@ -0,0 +1,5 @@ +[run] +omit= + */lib/python*/site-packages/* + */lib/python*/* + /usr/* diff --git a/utils/.pylintrc b/utils/.pylintrc new file mode 120000 index 000000000..30b33b524 --- /dev/null +++ b/utils/.pylintrc @@ -0,0 +1 @@ +../.pylintrc
\ No newline at end of file diff --git a/utils/Makefile b/utils/Makefile index 79c27626a..038c31fcf 100644 --- a/utils/Makefile +++ b/utils/Makefile @@ -22,9 +22,21 @@ NAME := oo-install +VENV := $(NAME)env TESTPACKAGE := oo-install SHORTNAME := ooinstall +# This doesn't evaluate until it's called. The -D argument is the +# directory of the target file ($@), kinda like `dirname`. +ASCII2MAN = a2x -D $(dir $@) -d manpage -f manpage $< +MANPAGES := docs/man/man1/atomic-openshift-installer.1 +# slipped into the manpage template before a2x processing +VERSION := 1.4 + +# YAMLFILES: Skipping all '/files/' folders due to conflicting yaml file definitions +YAMLFILES = $(shell find ../ -name $(VENV) -prune -o -name .tox -prune -o \( -name '*.yml' -o -name '*.yaml' \) ! -path "*/files/*" -print 2>&1) +PYFILES = $(shell find ../ -name $(VENV) -prune -o -name ooinstall.egg-info -prune -o -name test -prune -o -name .tox -prune -o -name "*.py" -print) + sdist: clean python setup.py sdist rm -fR $(SHORTNAME).egg-info @@ -33,51 +45,66 @@ clean: @find . -type f -regex ".*\.py[co]$$" -delete @find . -type f \( -name "*~" -or -name "#*" \) -delete @rm -fR build dist rpm-build MANIFEST htmlcov .coverage cover ooinstall.egg-info oo-install - @rm -fR $(NAME)env + @rm -fR $(VENV) + @rm -fR .tox + +# To force a rebuild of the docs run 'touch' on any *.in file under +# docs/man/man1/ +docs: $(MANPAGES) + +# Regenerate %.1.asciidoc if %.1.asciidoc.in has been modified more +# recently than %.1.asciidoc. +%.1.asciidoc: %.1.asciidoc.in + sed "s/%VERSION%/$(VERSION)/" $< > $@ + +# Regenerate %.1 if %.1.asciidoc or VERSION has been modified more +# recently than %.1. (Implicitly runs the %.1.asciidoc recipe) +%.1: %.1.asciidoc + $(ASCII2MAN) viewcover: xdg-open cover/index.html -virtualenv: +# Conditional virtualenv building strategy taken from this great post +# by Marcel Hellkamp: +# http://blog.bottlepy.org/2012/07/16/virtualenv-and-makefiles.html +$(VENV): $(VENV)/bin/activate +$(VENV)/bin/activate: test-requirements.txt @echo "#############################################" @echo "# Creating a virtualenv" @echo "#############################################" - virtualenv $(NAME)env - . $(NAME)env/bin/activate && pip install setuptools==17.1.1 - . $(NAME)env/bin/activate && pip install -r test-requirements.txt + test -d $(VENV) || virtualenv $(VENV) + . $(VENV)/bin/activate && pip install setuptools==17.1.1 + . $(VENV)/bin/activate && pip install -r test-requirements.txt + touch $(VENV)/bin/activate # If there are any special things to install do it here -# . $(NAME)env/bin/activate && INSTALL STUFF +# . $(VENV)/bin/activate && INSTALL STUFF -ci-unittests: +ci-unittests: $(VENV) @echo "#############################################" @echo "# Running Unit Tests in virtualenv" @echo "#############################################" - . $(NAME)env/bin/activate && nosetests -v --with-coverage --cover-html --cover-min-percentage=70 --cover-package=$(SHORTNAME) test/ + . $(VENV)/bin/activate && detox -e py27-unit,py35-unit @echo "VIEW CODE COVERAGE REPORT WITH 'xdg-open cover/index.html' or run 'make viewcover'" -ci-pylint: +ci-pylint: $(VENV) @echo "#############################################" @echo "# Running PyLint Tests in virtualenv" @echo "#############################################" - . $(NAME)env/bin/activate && python -m pylint --rcfile ../git/.pylintrc src/ooinstall/cli_installer.py src/ooinstall/oo_config.py src/ooinstall/openshift_ansible.py src/ooinstall/variants.py + . $(VENV)/bin/activate && detox -e py27-pylint,py35-pylint -ci-list-deps: +ci-flake8: $(VENV) @echo "#############################################" - @echo "# Listing all pip deps" + @echo "# Running Flake8 Compliance Tests in virtualenv" @echo "#############################################" - . $(NAME)env/bin/activate && pip freeze + . $(VENV)/bin/activate && detox -e py27-flake8,py35-flake8 -ci-pyflakes: - @echo "#################################################" - @echo "# Running Pyflakes Compliance Tests in virtualenv" - @echo "#################################################" - . $(NAME)env/bin/activate && pyflakes src/ooinstall/*.py +ci-tox: $(VENV) + . $(VENV)/bin/activate && detox -ci-pep8: - @echo "#############################################" - @echo "# Running PEP8 Compliance Tests in virtualenv" - @echo "#############################################" - . $(NAME)env/bin/activate && pep8 --ignore=E501,E121,E124 src/$(SHORTNAME)/ - -ci: clean virtualenv ci-list-deps ci-pep8 ci-pylint ci-pyflakes ci-unittests - : +ci: ci-tox + @echo + @echo "##################################################################################" + @echo "VIEW CODE COVERAGE REPORT WITH 'xdg-open cover/index.html' or run 'make viewcover'" + @echo "To clean your test environment run 'make clean'" + @echo "Other targets you may run with 'make': 'ci-pylint', 'ci-tox', 'ci-unittests', 'ci-flake8'" diff --git a/utils/README.md b/utils/README.md index 2abf2705e..c37ab41e6 100644 --- a/utils/README.md +++ b/utils/README.md @@ -6,6 +6,47 @@ Run the command: to run an array of unittests locally. +Underneath the covers, we use [tox](http://readthedocs.org/docs/tox/) to manage virtualenvs and run +tests. Alternatively, tests can be run using [detox](https://pypi.python.org/pypi/detox/) which allows +for running tests in parallel + + +``` +pip install tox detox +``` + +List the test environments available: +``` +tox -l +``` + +Run all of the tests with: +``` +tox +``` + +Run all of the tests in parallel with detox: +``` +detox +``` + +Running a particular test environment (python 2.7 flake8 tests in this case): +``` +tox -e py27-ansible22-flake8 +``` + +Running a particular test environment in a clean virtualenv (python 3.5 pylint +tests in this case): +``` +tox -r -e py35-ansible22-pylint +``` + +If you want to enter the virtualenv created by tox to do additional +testing/debugging (py27-flake8 env in this case): +``` +source .tox/py27-ansible22-flake8/bin/activate +``` + You will get errors if the log files already exist and can not be written to by the current user (`/tmp/ansible.log` and `/tmp/installer.txt`). *We're working on it.* diff --git a/utils/docs/man/man1/atomic-openshift-installer.1 b/utils/docs/man/man1/atomic-openshift-installer.1 new file mode 100644 index 000000000..827ce224b --- /dev/null +++ b/utils/docs/man/man1/atomic-openshift-installer.1 @@ -0,0 +1,200 @@ +'\" t +.\" Title: atomic-openshift-installer +.\" Author: [see the "AUTHOR" section] +.\" Generator: DocBook XSL Stylesheets v1.78.1 <http://docbook.sf.net/> +.\" Date: 12/28/2016 +.\" Manual: atomic-openshift-installer +.\" Source: atomic-openshift-utils 1.4 +.\" Language: English +.\" +.TH "ATOMIC\-OPENSHIFT\-I" "1" "12/28/2016" "atomic\-openshift\-utils 1\&.4" "atomic\-openshift\-installer" +.\" ----------------------------------------------------------------- +.\" * Define some portability stuff +.\" ----------------------------------------------------------------- +.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +.\" http://bugs.debian.org/507673 +.\" http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html +.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +.ie \n(.g .ds Aq \(aq +.el .ds Aq ' +.\" ----------------------------------------------------------------- +.\" * set default formatting +.\" ----------------------------------------------------------------- +.\" disable hyphenation +.nh +.\" disable justification (adjust text to left margin only) +.ad l +.\" ----------------------------------------------------------------- +.\" * MAIN CONTENT STARTS HERE * +.\" ----------------------------------------------------------------- +.SH "NAME" +atomic-openshift-installer \- Interactive OpenShift Container Platform (OCP) installer +.SH "SYNOPSIS" +.sp +atomic\-openshift\-installer [OPTIONS] COMMAND [OPTS] +.SH "DESCRIPTION" +.sp +\fBatomic\-openshift\-installer\fR makes the process for installing OCP easier by interactively gathering the data needed to run on each host\&. It can also be run in unattended mode if provided with a configuration file\&. +.SH "OPTIONS" +.sp +The following options are common to all commands\&. +.PP +\fB\-u\fR, \fB\-\-unattended\fR +.RS 4 +Run installer in +\fBunattended\fR +mode\&. You will not be prompted to answer any questions\&. +.RE +.PP +\fB\-c\fR, \fB\-\-configuration\fR \fIPATH\fR +.RS 4 +Provide an alternate +\fIPATH\fR +to an +\fIinstaller\&.cfg\&.yml\fR +file\&. +.RE +.PP +\fB\-a\fR \fIDIRECTORY\fR, \fB\-\-ansible\-playbook\-directory\fR \fIDIRECTORY\fR +.RS 4 +Manually set the +\fIDIRECTORY\fR +in which to look for Ansible playbooks\&. +.RE +.PP +\fB\-\-ansible\-log\-path\fR \fIPATH\fR +.RS 4 +Specify the +\fIPATH\fR +of the directory in which to save Ansible logs\&. +.RE +.PP +\fB\-v\fR, \fB\-\-verbose\fR +.RS 4 +Run the installer with more verbosity\&. +.RE +.PP +\fB\-d\fR, \fB\-\-debug\fR +.RS 4 +Enable installer debugging\&. Logs are saved in +\fI/tmp/installer\&.txt\fR\&. +.RE +.PP +\fB\-h\fR, \fB\-\-help\fR +.RS 4 +Show the usage help and exit\&. +.RE +.SH "COMMANDS" +.sp +\fBatomic\-openshift\-installer\fR has four modes of operation: +.sp +.RS 4 +.ie n \{\ +\h'-04'\(bu\h'+03'\c +.\} +.el \{\ +.sp -1 +.IP \(bu 2.3 +.\} +\fBinstall\fR +.RE +.sp +.RS 4 +.ie n \{\ +\h'-04'\(bu\h'+03'\c +.\} +.el \{\ +.sp -1 +.IP \(bu 2.3 +.\} +\fBuninstall\fR +.RE +.sp +.RS 4 +.ie n \{\ +\h'-04'\(bu\h'+03'\c +.\} +.el \{\ +.sp -1 +.IP \(bu 2.3 +.\} +\fBupgrade\fR +.RE +.sp +.RS 4 +.ie n \{\ +\h'-04'\(bu\h'+03'\c +.\} +.el \{\ +.sp -1 +.IP \(bu 2.3 +.\} +\fBscaleup\fR +.RE +.sp +The options specific to each command are described in the following sections\&. +.SH "INSTALL" +.sp +The \fBinstall\fR command will guide you through steps required to install an OCP cluster\&. After all of the required information has been collected (target hosts, storage options, high\-availability), the installation will begin\&. +.PP +\fB\-f\fR, \fB\-\-force\fR +.RS 4 +Forces an installation\&. This means that hosts with existing installations will be reinstalled if required\&. +.RE +.PP +\fB\-\-gen\-inventory\fR +.RS 4 +Generate an Ansible inventory file and exit\&. The default location for the inventory file is +\fI~/\&.config/openshift/hosts\fR\&. +.RE +.SH "UNINSTALL" +.sp +The \fBuninstall\fR command will uninstall OCP from your target hosts\&. This command has no additional options\&. +.SH "UPGRADE" +.sp +The \fBupgrade\fR command will upgrade a cluster of hosts to a newer version of OCP\&. +.PP +\fB\-l\fR, \fB\-\-latest\-minor\fR +.RS 4 +Upgrade to the latest minor version\&. For example, if you are running version +\fB3\&.2\&.1\fR +then this could upgrade you to +\fB3\&.2\&.2\fR\&. +.RE +.PP +\fB\-n\fR, \fB\-\-next\-major\fR +.RS 4 +Upgrade to the latest major version\&. For example, if you are running version +\fB3\&.2\fR +then this could upgrade you to +\fB3\&.3\fR\&. +.RE +.SH "SCALEUP" +.sp +The \fBscaleup\fR command is used to add new nodes to an existing cluster\&. This command has no additional options\&. +.SH "FILES" +.sp +\fB~/\&.config/openshift/installer\&.cfg\&.yml\fR \(em Installer configuration file\&. Can be used to generate an inventory later or start an unattended installation\&. +.sp +\fB~/\&.config/openshift/hosts\fR \(em Generated Ansible inventory file\&. Used to run the Ansible playbooks for install, uninstall, and upgrades\&. +.sp +\fB/tmp/ansible\&.log\fR \(em The default location of the ansible log file\&. +.sp +\fB/tmp/installer\&.txt\fR \(em The location of the log file for debugging the installer\&. +.SH "AUTHOR" +.sp +Red Hat OpenShift Productization team +.sp +For a complete list of contributors, please visit the GitHub charts page\&. +.SH "COPYRIGHT" +.sp +Copyright \(co 2016 Red Hat, Inc\&. +.sp +\fBatomic\-openshift\-installer\fR is released under the terms of the ASL 2\&.0 license\&. +.SH "SEE ALSO" +.sp +\fBansible\fR(1), \fBansible\-playbook\fR(1) +.sp +\fBThe openshift\-ansible GitHub Project\fR \(em https://github\&.com/openshift/openshift\-ansible/ +.sp +\fBThe atomic\-openshift\-installer Documentation\fR \(em https://docs\&.openshift\&.com/container\-platform/3\&.3/install_config/install/quick_install\&.html diff --git a/utils/docs/man/man1/atomic-openshift-installer.1.asciidoc.in b/utils/docs/man/man1/atomic-openshift-installer.1.asciidoc.in new file mode 100644 index 000000000..2917e9992 --- /dev/null +++ b/utils/docs/man/man1/atomic-openshift-installer.1.asciidoc.in @@ -0,0 +1,173 @@ +atomic-openshift-installer(1) +============================= +:man source: atomic-openshift-utils +:man version: %VERSION% +:man manual: atomic-openshift-installer + + +NAME +---- +atomic-openshift-installer - Interactive OpenShift Container Platform (OCP) installer + + +SYNOPSIS +-------- +atomic-openshift-installer [OPTIONS] COMMAND [OPTS] + + +DESCRIPTION +----------- + +**atomic-openshift-installer** makes the process for installing OCP +easier by interactively gathering the data needed to run on each +host. It can also be run in unattended mode if provided with a +configuration file. + + +OPTIONS +------- + +The following options are common to all commands. + +*-u*, *--unattended*:: + +Run installer in **unattended** mode. You will not be prompted to +answer any questions. + + +*-c*, *--configuration* 'PATH':: + +Provide an alternate 'PATH' to an 'installer.cfg.yml' file. + + +*-a* 'DIRECTORY', *--ansible-playbook-directory* 'DIRECTORY':: + +Manually set the 'DIRECTORY' in which to look for Ansible playbooks. + + +*--ansible-log-path* 'PATH':: + +Specify the 'PATH' of the directory in which to save Ansible logs. + + +*-v*, *--verbose*:: + +Run the installer with more verbosity. + + +*-d*, *--debug*:: + +Enable installer debugging. Logs are saved in '/tmp/installer.txt'. + + +*-h*, *--help*:: + +Show the usage help and exit. + + +COMMANDS +-------- + +**atomic-openshift-installer** has four modes of operation: + +* **install** +* **uninstall** +* **upgrade** +* **scaleup** + +The options specific to each command are described in the following +sections. + + + +INSTALL +------- + +The **install** command will guide you through steps required to +install an OCP cluster. After all of the required information has been +collected (target hosts, storage options, high-availability), the +installation will begin. + +*-f*, *--force*:: + +Forces an installation. This means that hosts with existing +installations will be reinstalled if required. + +*--gen-inventory*:: + +Generate an Ansible inventory file and exit. The default location for +the inventory file is '~/.config/openshift/hosts'. + + +UNINSTALL +--------- + +The **uninstall** command will uninstall OCP from your target +hosts. This command has no additional options. + + +UPGRADE +------- + +The **upgrade** command will upgrade a cluster of hosts to a newer +version of OCP. + +*-l*, *--latest-minor*:: + +Upgrade to the latest minor version. For example, if you are running +version **3.2.1** then this could upgrade you to **3.2.2**. + +*-n*, *--next-major*:: + +Upgrade to the latest major version. For example, if you are running +version **3.2** then this could upgrade you to **3.3**. + + +SCALEUP +------- + +The **scaleup** command is used to add new nodes to an existing cluster. +This command has no additional options. + +FILES +----- + +*~/.config/openshift/installer.cfg.yml* -- Installer configuration + file. Can be used to generate an inventory later or start an + unattended installation. + +*~/.config/openshift/hosts* -- Generated Ansible inventory file. Used + to run the Ansible playbooks for install, uninstall, and upgrades. + +*/tmp/ansible.log* -- The default location of the ansible log file. + +*/tmp/installer.txt* -- The location of the log file for debugging the + installer. + + +AUTHOR +------ + +Red Hat OpenShift Productization team + +For a complete list of contributors, please visit the GitHub charts +page. + + + +COPYRIGHT +--------- +Copyright © 2016 Red Hat, Inc. + +**atomic-openshift-installer** is released under the terms of the ASL +2.0 license. + + + +SEE ALSO +-------- +*ansible*(1), *ansible-playbook*(1) + +*The openshift-ansible GitHub Project* -- <https://github.com/openshift/openshift-ansible/> + +*The atomic-openshift-installer Documentation* -- <https://docs.openshift.com/container-platform/3.3/install_config/install/quick_install.html> diff --git a/utils/etc/ansible-quiet.cfg b/utils/etc/ansible-quiet.cfg new file mode 100644 index 000000000..0eb0efa49 --- /dev/null +++ b/utils/etc/ansible-quiet.cfg @@ -0,0 +1,33 @@ +# config file for ansible -- http://ansible.com/ +# ============================================== + +# This config file provides examples for running +# the OpenShift playbooks with the provided +# inventory scripts. Only global defaults are +# left uncommented + +[defaults] +# Add the roles directory to the roles path +roles_path = roles/ + +# Set the log_path +log_path = /tmp/ansible.log + +forks = 10 +host_key_checking = False +nocows = 1 + +retry_files_enabled = False + +deprecation_warnings=False + +# Need to handle: +# inventory - derive from OO_ANSIBLE_DIRECTORY env var +# callback_plugins - derive from pkg_resource.resource_filename +# private_key_file - prompt if missing +# remote_tmp - set if provided by user (cli) +# ssh_args - set if provided by user (cli) +# control_path + +stdout_callback = openshift_quick_installer +callback_plugins = /usr/share/ansible_plugins/callback_plugins diff --git a/utils/etc/ansible.cfg b/utils/etc/ansible.cfg index a53ab6cb1..3425e7e62 100644 --- a/utils/etc/ansible.cfg +++ b/utils/etc/ansible.cfg @@ -19,10 +19,12 @@ nocows = 1 retry_files_enabled = False +deprecation_warnings = False + # Need to handle: # inventory - derive from OO_ANSIBLE_DIRECTORY env var # callback_plugins - derive from pkg_resource.resource_filename # private_key_file - prompt if missing # remote_tmp - set if provided by user (cli) # ssh_args - set if provided by user (cli) -# control_path
\ No newline at end of file +# control_path diff --git a/utils/setup.cfg b/utils/setup.cfg index 79bc67848..862dffd7b 100644 --- a/utils/setup.cfg +++ b/utils/setup.cfg @@ -3,3 +3,21 @@ # 3. If at all possible, it is good practice to do this. If you cannot, you # will need to generate wheels for each Python version that you support. universal=1 + +[nosetests] +verbosity=2 +with-coverage=1 +cover-html=1 +cover-inclusive=1 +cover-min-percentage=70 +cover-erase=1 +detailed-errors=1 +cover-branches=1 + +[flake8] +max-line-length=120 +exclude=test/*,setup.py,oo-installenv +ignore=E501 + +[lint] +lint_disable=fixme,locally-disabled,file-ignored,duplicate-code diff --git a/utils/setup.py b/utils/setup.py index eac1b4b2e..3518581e7 100644 --- a/utils/setup.py +++ b/utils/setup.py @@ -47,7 +47,7 @@ setup( # your project is installed. For an analysis of "install_requires" vs pip's # requirements files see: # https://packaging.python.org/en/latest/requirements.html - install_requires=['click', 'PyYAML'], + install_requires=['click', 'PyYAML', 'ansible'], # List additional groups of dependencies here (e.g. development # dependencies). You can install these using the following syntax, @@ -62,14 +62,9 @@ setup( # installed, specify them here. If using Python 2.6 or less, then these # have to be included in MANIFEST.in as well. package_data={ - 'ooinstall': ['ansible.cfg', 'ansible_plugins/*'], + 'ooinstall': ['ansible.cfg', 'ansible-quiet.cfg', 'ansible_plugins/*'], }, - # Although 'package_data' is the preferred approach, in some case you may - # need to place data files outside of your packages. See: - # http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa - # In this case, 'data_file' will be installed into '<sys.prefix>/my_data' - #data_files=[('my_data', ['data/data_file'])], tests_require=['nose'], test_suite='nose.collector', diff --git a/utils/site_assets/oo-install-bootstrap.sh b/utils/site_assets/oo-install-bootstrap.sh index 3847c029a..3c5614d39 100755 --- a/utils/site_assets/oo-install-bootstrap.sh +++ b/utils/site_assets/oo-install-bootstrap.sh @@ -67,7 +67,7 @@ pip install --no-index -f file:///$(readlink -f deps) ansible 2>&1 >> $OO_INSTAL # TODO: these deps should technically be handled as part of installing ooinstall pip install --no-index -f file:///$(readlink -f deps) click 2>&1 >> $OO_INSTALL_LOG pip install --no-index ./src/ 2>&1 >> $OO_INSTALL_LOG -echo "Installation preperation done!" 2>&1 >> $OO_INSTALL_LOG +echo "Installation preparation done!" 2>&1 >> $OO_INSTALL_LOG echo "Using `ansible --version`" 2>&1 >> $OO_INSTALL_LOG diff --git a/utils/src/MANIFEST.in b/utils/src/MANIFEST.in index d4153e738..216f57e9c 100644 --- a/utils/src/MANIFEST.in +++ b/utils/src/MANIFEST.in @@ -7,3 +7,4 @@ include DESCRIPTION.rst # it's already declared in setup.py include ooinstall/* include ansible.cfg +include ansible-quiet.cfg diff --git a/utils/src/data/data_file b/utils/src/data/data_file deleted file mode 100644 index 7c0646bfd..000000000 --- a/utils/src/data/data_file +++ /dev/null @@ -1 +0,0 @@ -some data
\ No newline at end of file diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py index 85b4d29cb..0bc9aa45e 100644 --- a/utils/src/ooinstall/cli_installer.py +++ b/utils/src/ooinstall/cli_installer.py @@ -1,30 +1,27 @@ -# TODO: Temporarily disabled due to importing old code into openshift-ansible -# repo. We will work on these over time. -# pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name,no-value-for-parameter,too-many-lines +# pylint: disable=missing-docstring,no-self-use,no-value-for-parameter,too-many-lines +import logging import os -import re import sys -import logging + import click from pkg_resources import parse_version -from ooinstall import openshift_ansible -from ooinstall.oo_config import OOConfig -from ooinstall.oo_config import OOConfigInvalidHostError -from ooinstall.oo_config import Host, Role +from ooinstall import openshift_ansible, utils +from ooinstall.oo_config import Host, OOConfig, OOConfigInvalidHostError, Role from ooinstall.variants import find_variant, get_variant_version_combos -installer_log = logging.getLogger('installer') -installer_log.setLevel(logging.CRITICAL) -installer_file_handler = logging.FileHandler('/tmp/installer.txt') -installer_file_handler.setFormatter( +INSTALLER_LOG = logging.getLogger('installer') +INSTALLER_LOG.setLevel(logging.CRITICAL) +INSTALLER_FILE_HANDLER = logging.FileHandler('/tmp/installer.txt') +INSTALLER_FILE_HANDLER.setFormatter( logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')) # Example output: # 2016-08-23 07:34:58,480 - installer - DEBUG - Going to 'load_system_facts' -installer_file_handler.setLevel(logging.DEBUG) -installer_log.addHandler(installer_file_handler) +INSTALLER_FILE_HANDLER.setLevel(logging.DEBUG) +INSTALLER_LOG.addHandler(INSTALLER_FILE_HANDLER) DEFAULT_ANSIBLE_CONFIG = '/usr/share/atomic-openshift-utils/ansible.cfg' +QUIET_ANSIBLE_CONFIG = '/usr/share/atomic-openshift-utils/ansible-quiet.cfg' DEFAULT_PLAYBOOK_DIR = '/usr/share/ansible/openshift-ansible/' UPGRADE_MAPPINGS = { @@ -43,9 +40,19 @@ UPGRADE_MAPPINGS = { '3.2': { 'minor_version': '3.2', 'minor_playbook': 'v3_2/upgrade.yml', - 'major_playbook': 'v3_2/upgrade.yml', + 'major_playbook': 'v3_3/upgrade.yml', 'major_version': '3.3', - } + }, + '3.3': { + 'minor_version': '3.3', + 'minor_playbook': 'v3_3/upgrade.yml', + 'major_playbook': 'v3_4/upgrade.yml', + 'major_version': '3.4', + }, + '3.4': { + 'minor_version': '3.4', + 'minor_playbook': 'v3_4/upgrade.yml', + }, } @@ -57,17 +64,8 @@ def validate_ansible_dir(path): # raise click.BadParameter("Path \"{}\" doesn't exist".format(path)) -def is_valid_hostname(hostname): - if not hostname or len(hostname) > 255: - return False - if hostname[-1] == ".": - hostname = hostname[:-1] # strip exactly one dot from the right, if present - allowed = re.compile(r"(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE) - return all(allowed.match(x) for x in hostname.split(".")) - - def validate_prompt_hostname(hostname): - if hostname == '' or is_valid_hostname(hostname): + if hostname == '' or utils.is_valid_hostname(hostname): return hostname raise click.BadParameter('Invalid hostname. Please double-check this value and re-enter it.') @@ -83,7 +81,7 @@ passwordless sudo access. return click.prompt('User for ssh access', default='root') -def get_master_routingconfig_subdomain(): +def get_routingconfig_subdomain(): click.clear() message = """ You might want to override the default subdomain used for exposed routes. If you don't know what this is, use the default value. @@ -120,11 +118,6 @@ a high-availability (HA) deployment. If you choose an HA deployment, then you are prompted to identify a *separate* system to act as the load balancer for your cluster once you define all masters and nodes. -If only one master is specified, an etcd instance is embedded within the -OpenShift master service to use as the datastore. This can be later replaced -with a separate etcd instance, if required. If multiple masters are specified, -then a separate etcd cluster is configured with each master serving as a member. - Any masters configured as part of this installation process are also configured as nodes. This enables the master to proxy to pods from the API. By default, this node is unschedulable, but this can be changed @@ -182,9 +175,13 @@ http://docs.openshift.com/enterprise/latest/architecture/infrastructure_componen if masters_set or num_masters != 2: more_hosts = click.confirm('Do you want to add additional hosts?') - if num_masters >= 3: - collect_master_lb(hosts) - roles.add('master_lb') + if num_masters > 2: + master_lb = collect_master_lb(hosts) + if master_lb: + hosts.append(master_lb) + roles.add('master_lb') + else: + set_cluster_hostname(oo_cfg) if not existing_env: collect_storage_host(hosts) @@ -192,7 +189,8 @@ http://docs.openshift.com/enterprise/latest/architecture/infrastructure_componen return hosts, roles -def print_installation_summary(hosts, version=None): +# pylint: disable=too-many-branches +def print_installation_summary(hosts, version=None, verbose=True): """ Displays a summary of all hosts configured thus far, and what role each will play. @@ -213,35 +211,36 @@ def print_installation_summary(hosts, version=None): click.echo('Total OpenShift masters: %s' % len(masters)) click.echo('Total OpenShift nodes: %s' % len(nodes)) - if len(masters) == 1 and version != '3.0': - ha_hint_message = """ + if verbose: + if len(masters) == 1 and version != '3.0': + ha_hint_message = """ NOTE: Add a total of 3 or more masters to perform an HA installation.""" - click.echo(ha_hint_message) - elif len(masters) == 2: - min_masters_message = """ + click.echo(ha_hint_message) + elif len(masters) == 2: + min_masters_message = """ WARNING: A minimum of 3 masters are required to perform an HA installation. Please add one more to proceed.""" - click.echo(min_masters_message) - elif len(masters) >= 3: - ha_message = """ + click.echo(min_masters_message) + elif len(masters) >= 3: + ha_message = """ NOTE: Multiple masters specified, this will be an HA deployment with a separate etcd cluster. You will be prompted to provide the FQDN of a load balancer and a host for storage once finished entering hosts. -""" - click.echo(ha_message) + """ + click.echo(ha_message) - dedicated_nodes_message = """ + dedicated_nodes_message = """ WARNING: Dedicated nodes are recommended for an HA deployment. If no dedicated nodes are specified, each configured master will be marked as a schedulable node.""" - min_ha_nodes_message = """ + min_ha_nodes_message = """ WARNING: A minimum of 3 dedicated nodes are recommended for an HA deployment.""" - if len(dedicated_nodes) == 0: - click.echo(dedicated_nodes_message) - elif len(dedicated_nodes) < 3: - click.echo(min_ha_nodes_message) + if len(dedicated_nodes) == 0: + click.echo(dedicated_nodes_message) + elif len(dedicated_nodes) < 3: + click.echo(min_ha_nodes_message) click.echo('') @@ -262,13 +261,12 @@ def print_host_summary(all_hosts, host): click.echo(" - Load Balancer (Preconfigured)") else: click.echo(" - Load Balancer (HAProxy)") - if host.is_master(): - if host.is_etcd_member(all_hosts): - click.echo(" - Etcd Member") - else: - click.echo(" - Etcd (Embedded)") + if host.is_etcd(): + click.echo(" - Etcd") if host.is_storage(): click.echo(" - Storage") + if host.new_host: + click.echo(" - NEW") def collect_master_lb(hosts): @@ -306,14 +304,35 @@ hostname. 'please specify a separate host' % hostname) return hostname - host_props['connect_to'] = click.prompt('Enter hostname or IP address', - value_proc=validate_prompt_lb) - install_haproxy = \ - click.confirm('Should the reference HAProxy load balancer be installed on this host?') - host_props['preconfigured'] = not install_haproxy - host_props['roles'] = ['master_lb'] - master_lb = Host(**host_props) - hosts.append(master_lb) + lb_hostname = click.prompt('Enter hostname or IP address', + value_proc=validate_prompt_lb) + if lb_hostname: + host_props['connect_to'] = lb_hostname + install_haproxy = \ + click.confirm('Should the reference HAProxy load balancer be installed on this host?') + host_props['preconfigured'] = not install_haproxy + host_props['roles'] = ['master_lb'] + return Host(**host_props) + else: + return None + + +def set_cluster_hostname(oo_cfg): + first_master = next((host for host in oo_cfg.deployment.hosts if host.is_master()), None) + message = """ +You have chosen to install a single master cluster (non-HA). + +In a single master cluster, the cluster host name (Ansible variable openshift_master_cluster_public_hostname) is set by default to the host name of the single master. In a multiple master (HA) cluster, the FQDN of a host must be provided that will be configured as a proxy. This could be either an existing load balancer configured to balance all masters on +port 8443 or a new host that would have HAProxy installed on it. + +(Optional) +If you want to override the cluster host name now to something other than the default (the host name of the single master), or if you think you might add masters later to become an HA cluster and want to future proof your cluster host name choice, please provide a FQDN. Otherwise, press ENTER to continue and accept the default. +""" + click.echo(message) + cluster_hostname = click.prompt('Enter hostname or IP address', + default=str(first_master)) + oo_cfg.deployment.variables['openshift_master_cluster_hostname'] = cluster_hostname + oo_cfg.deployment.variables['openshift_master_cluster_public_hostname'] = cluster_hostname def collect_storage_host(hosts): @@ -394,29 +413,29 @@ Notes: default_facts_lines = [] default_facts = {} - for h in hosts: - if h.preconfigured: + for host in hosts: + if host.preconfigured: continue try: - default_facts[h.connect_to] = {} - h.ip = callback_facts[h.connect_to]["common"]["ip"] - h.public_ip = callback_facts[h.connect_to]["common"]["public_ip"] - h.hostname = callback_facts[h.connect_to]["common"]["hostname"] - h.public_hostname = callback_facts[h.connect_to]["common"]["public_hostname"] + default_facts[host.connect_to] = {} + host.ip = callback_facts[host.connect_to]["common"]["ip"] + host.public_ip = callback_facts[host.connect_to]["common"]["public_ip"] + host.hostname = callback_facts[host.connect_to]["common"]["hostname"] + host.public_hostname = callback_facts[host.connect_to]["common"]["public_hostname"] except KeyError: - click.echo("Problem fetching facts from {}".format(h.connect_to)) + click.echo("Problem fetching facts from {}".format(host.connect_to)) continue - default_facts_lines.append(",".join([h.connect_to, - h.ip, - h.public_ip, - h.hostname, - h.public_hostname])) - output = "%s\n%s" % (output, ",".join([h.connect_to, - h.ip, - h.public_ip, - h.hostname, - h.public_hostname])) + default_facts_lines.append(",".join([host.connect_to, + host.ip, + host.public_ip, + host.hostname, + host.public_hostname])) + output = "%s\n%s" % (output, ",".join([host.connect_to, + host.ip, + host.public_ip, + host.hostname, + host.public_hostname])) output = "%s\n%s" % (output, notes) click.echo(output) @@ -482,7 +501,7 @@ def get_variant_and_version(multi_master=False): i = 1 combos = get_variant_version_combos() - for (variant, version) in combos: + for (variant, _) in combos: message = "%s\n(%s) %s" % (message, i, variant.description) i = i + 1 message = "%s\n" % message @@ -533,7 +552,7 @@ def error_if_missing_info(oo_cfg): oo_cfg.settings['variant_version'] = version.name # check that all listed host roles are included - listed_roles = get_host_roles_set(oo_cfg) + listed_roles = oo_cfg.get_host_roles_set() configured_roles = set([role for role in oo_cfg.deployment.roles]) if listed_roles != configured_roles: missing_info = True @@ -543,16 +562,7 @@ def error_if_missing_info(oo_cfg): sys.exit(1) -def get_host_roles_set(oo_cfg): - roles_set = set() - for host in oo_cfg.deployment.hosts: - for role in host.roles: - roles_set.add(role) - - return roles_set - - -def get_proxy_hostnames_and_excludes(): +def get_proxy_hosts_excludes(): message = """ If a proxy is needed to reach HTTP and HTTPS traffic, please enter the name below. This proxy will be configured by default for all processes @@ -634,7 +644,8 @@ https://docs.openshift.com/enterprise/latest/admin_guide/install/prerequisites.h click.clear() if 'master_routingconfig_subdomain' not in oo_cfg.deployment.variables: - oo_cfg.deployment.variables['master_routingconfig_subdomain'] = get_master_routingconfig_subdomain() + oo_cfg.deployment.variables['master_routingconfig_subdomain'] = \ + get_routingconfig_subdomain() click.clear() # Are any proxy vars already presisted? @@ -643,7 +654,7 @@ https://docs.openshift.com/enterprise/latest/admin_guide/install/prerequisites.h saved_proxy_vars = [pv for pv in proxy_vars if oo_cfg.deployment.variables.get(pv, 'UNSET') is not 'UNSET'] - installer_log.debug("Evaluated proxy settings, found %s presisted values", + INSTALLER_LOG.debug("Evaluated proxy settings, found %s presisted values", len(saved_proxy_vars)) current_version = parse_version( oo_cfg.settings.get('variant_version', '0.0')) @@ -653,8 +664,8 @@ https://docs.openshift.com/enterprise/latest/admin_guide/install/prerequisites.h # recognizes proxy parameters. We must prompt the user for values # if this conditional is true. if not saved_proxy_vars and current_version >= min_version: - installer_log.debug("Prompting user to enter proxy values") - http_proxy, https_proxy, proxy_excludes = get_proxy_hostnames_and_excludes() + INSTALLER_LOG.debug("Prompting user to enter proxy values") + http_proxy, https_proxy, proxy_excludes = get_proxy_hosts_excludes() oo_cfg.deployment.variables['proxy_http'] = http_proxy oo_cfg.deployment.variables['proxy_https'] = https_proxy oo_cfg.deployment.variables['proxy_exclude_hosts'] = proxy_excludes @@ -694,8 +705,10 @@ def get_installed_hosts(hosts, callback_facts): for host in [h for h in hosts if h.is_master() or h.is_node()]: if host.connect_to in callback_facts.keys(): if is_installed_host(host, callback_facts): + INSTALLER_LOG.debug("%s is already installed", str(host)) installed_hosts.append(host) else: + INSTALLER_LOG.debug("%s is not installed", str(host)) uninstalled_hosts.append(host) return installed_hosts, uninstalled_hosts @@ -708,80 +721,85 @@ def is_installed_host(host, callback_facts): return version_found -# pylint: disable=too-many-branches -# This pylint error will be corrected shortly in separate PR. -def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force, verbose): - - # Copy the list of existing hosts so we can remove any already installed nodes. - hosts_to_run_on = list(oo_cfg.deployment.hosts) +def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force): + """ + We get here once there are hosts in oo_cfg and we need to find out what + state they are in. There are several different cases that might occur: + + 1. All hosts in oo_cfg are uninstalled. In this case, we should proceed + with a normal installation. + 2. All hosts in oo_cfg are installed. In this case, ask the user if they + want to force reinstall or exit. We can also hint in this case about + the scaleup workflow. + 3. Some hosts are installed and some are uninstalled. In this case, prompt + the user if they want to force (re)install all hosts specified or direct + them to the scaleup workflow and exit. + """ + hosts_to_run_on = [] # Check if master or nodes already have something installed - installed_hosts, uninstalled_hosts = get_installed_hosts(oo_cfg.deployment.hosts, callback_facts) - if len(installed_hosts) > 0: - click.echo('Installed environment detected.') - # This check has to happen before we start removing hosts later in this method + installed_hosts, uninstalled_hosts = get_installed_hosts(oo_cfg.deployment.hosts, + callback_facts) + nodes = [host for host in oo_cfg.deployment.hosts if host.is_node()] + masters_and_nodes = [host for host in oo_cfg.deployment.hosts if host.is_master() or host.is_node()] + + in_hosts = [str(h) for h in installed_hosts] + un_hosts = [str(h) for h in uninstalled_hosts] + all_hosts = [str(h) for h in oo_cfg.deployment.hosts] + m_and_n = [str(h) for h in masters_and_nodes] + + INSTALLER_LOG.debug("installed hosts: %s", ", ".join(in_hosts)) + INSTALLER_LOG.debug("uninstalled hosts: %s", ", ".join(un_hosts)) + INSTALLER_LOG.debug("deployment hosts: %s", ", ".join(all_hosts)) + INSTALLER_LOG.debug("masters and nodes: %s", ", ".join(m_and_n)) + + # Case (1): All uninstalled hosts + if len(uninstalled_hosts) == len(nodes): + click.echo('All hosts in config are uninstalled. Proceeding with installation...') + hosts_to_run_on = list(oo_cfg.deployment.hosts) + else: + # Case (2): All installed hosts + if len(installed_hosts) == len(masters_and_nodes): + message = """ +All specified hosts in specified environment are installed. +""" + # Case (3): Some installed, some uninstalled + else: + message = """ +A mix of installed and uninstalled hosts have been detected in your environment. +Please make sure your environment was installed successfully before adding new nodes. +""" + + # Still inside the case 2/3 else condition + mixed_msg = """ +\tInstalled hosts: +\t\t{inst_hosts} + +\tUninstalled hosts: +\t\t{uninst_hosts}""".format(inst_hosts=", ".join(in_hosts), uninst_hosts=", ".join(un_hosts)) + click.echo(mixed_msg) + + # Out of the case 2/3 if/else + click.echo(message) + + if not unattended: + response = click.confirm('Do you want to (re)install the environment?\n\n' + 'Note: This will potentially erase any custom changes.') + if response: + hosts_to_run_on = list(oo_cfg.deployment.hosts) + force = True + elif unattended and force: + hosts_to_run_on = list(oo_cfg.deployment.hosts) if not force: - if not unattended: - click.echo('By default the installer only adds new nodes ' - 'to an installed environment.') - response = click.prompt('Do you want to (1) only add additional nodes or ' - '(2) reinstall the existing hosts ' - 'potentially erasing any custom changes?', - type=int) - # TODO: this should be reworked with error handling. - # Click can certainly do this for us. - # This should be refactored as soon as we add a 3rd option. - if response == 1: - force = False - if response == 2: - force = True - - # present a message listing already installed hosts and remove hosts if needed - for host in installed_hosts: - if host.is_master(): - click.echo("{} is already an OpenShift master".format(host)) - # Masters stay in the list, we need to run against them when adding - # new nodes. - elif host.is_node(): - click.echo("{} is already an OpenShift node".format(host)) - # force is only used for reinstalls so we don't want to remove - # anything. - if not force: - hosts_to_run_on.remove(host) - - # Handle the cases where we know about uninstalled systems - if len(uninstalled_hosts) > 0: - for uninstalled_host in uninstalled_hosts: - click.echo("{} is currently uninstalled".format(uninstalled_host)) - # Fall through - click.echo('\nUninstalled hosts have been detected in your environment. ' - 'Please make sure your environment was installed successfully ' - 'before adding new nodes. If you want a fresh install, use ' - '`atomic-openshift-installer install --force`') + message = """ +If you want to force reinstall of your environment, run: +`atomic-openshift-installer install --force` + +If you want to add new nodes to this environment, run: +`atomic-openshift-installer scaleup` +""" + click.echo(message) sys.exit(1) - else: - if unattended: - if not force: - click.echo('Installed environment detected and no additional ' - 'nodes specified: aborting. If you want a fresh install, use ' - '`atomic-openshift-installer install --force`') - sys.exit(1) - else: - if not force: - new_nodes = collect_new_nodes(oo_cfg) - - hosts_to_run_on.extend(new_nodes) - oo_cfg.deployment.hosts.extend(new_nodes) - - openshift_ansible.set_config(oo_cfg) - click.echo('Gathering information from hosts...') - callback_facts, error = openshift_ansible.default_facts(oo_cfg.deployment.hosts, verbose) - if error or callback_facts is None: - click.echo("There was a problem fetching the required information. See " - "{} for details.".format(oo_cfg.settings['ansible_log_path'])) - sys.exit(1) - else: - pass # proceeding as normal should do a clean install return hosts_to_run_on, callback_facts @@ -797,6 +815,49 @@ def set_infra_nodes(hosts): host.node_labels = "{'region': 'infra'}" +def run_config_playbook(oo_cfg, hosts_to_run_on, unattended, verbose, gen_inventory): + # Write Ansible inventory file to disk: + inventory_file = openshift_ansible.generate_inventory(hosts_to_run_on) + + click.echo() + click.echo('Wrote atomic-openshift-installer config: %s' % oo_cfg.config_path) + click.echo("Wrote Ansible inventory: %s" % inventory_file) + click.echo() + + if gen_inventory: + sys.exit(0) + + click.echo('Ready to run installation process.') + message = """ +If changes are needed please edit the config file above and re-run. +""" + if not unattended: + confirm_continue(message) + + error = openshift_ansible.run_main_playbook(inventory_file, oo_cfg.deployment.hosts, + hosts_to_run_on, verbose) + + if error: + # The bootstrap script will print out the log location. + message = """ +An error was detected. After resolving the problem please relaunch the +installation process. +""" + click.echo(message) + sys.exit(1) + else: + message = """ +The installation was successful! + +If this is your first time installing please take a look at the Administrator +Guide for advanced options related to routing, storage, authentication, and +more: + +http://docs.openshift.com/enterprise/latest/admin_guide/overview.html +""" + click.echo(message) + + @click.group() @click.pass_context @click.option('--unattended', '-u', is_flag=True, default=False) @@ -815,12 +876,6 @@ def set_infra_nodes(hosts): # callback=validate_ansible_dir, default=DEFAULT_PLAYBOOK_DIR, envvar='OO_ANSIBLE_PLAYBOOK_DIRECTORY') -@click.option('--ansible-config', - type=click.Path(file_okay=True, - dir_okay=False, - writable=True, - readable=True), - default=None) @click.option('--ansible-log-path', type=click.Path(file_okay=True, dir_okay=False, @@ -836,7 +891,7 @@ def set_infra_nodes(hosts): # pylint: disable=too-many-arguments # pylint: disable=line-too-long # Main CLI entrypoint, not much we can do about too many arguments. -def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_config, ansible_log_path, verbose, debug): +def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_log_path, verbose, debug): """ atomic-openshift-installer makes the process for installing OSE or AEP easier by interactively gathering the data needed to run on each host. @@ -849,20 +904,19 @@ def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_conf # highest), anything below that (we only use debug/warning # presently) is not logged. If '-d' is given though, we'll # lower the threshold to debug (almost everything gets through) - installer_log.setLevel(logging.DEBUG) - installer_log.debug("Quick Installer debugging initialized") + INSTALLER_LOG.setLevel(logging.DEBUG) + INSTALLER_LOG.debug("Quick Installer debugging initialized") ctx.obj = {} ctx.obj['unattended'] = unattended ctx.obj['configuration'] = configuration - ctx.obj['ansible_config'] = ansible_config ctx.obj['ansible_log_path'] = ansible_log_path ctx.obj['verbose'] = verbose try: oo_cfg = OOConfig(ctx.obj['configuration']) - except OOConfigInvalidHostError as e: - click.echo(e) + except OOConfigInvalidHostError as err: + click.echo(err) sys.exit(1) # If no playbook dir on the CLI, check the config: @@ -876,13 +930,13 @@ def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_conf oo_cfg.ansible_playbook_directory = ansible_playbook_directory ctx.obj['ansible_playbook_directory'] = ansible_playbook_directory - if ctx.obj['ansible_config']: - oo_cfg.settings['ansible_config'] = ctx.obj['ansible_config'] - elif 'ansible_config' not in oo_cfg.settings and \ - os.path.exists(DEFAULT_ANSIBLE_CONFIG): + if os.path.exists(DEFAULT_ANSIBLE_CONFIG): # If we're installed by RPM this file should exist and we can use it as our default: oo_cfg.settings['ansible_config'] = DEFAULT_ANSIBLE_CONFIG + if not verbose and os.path.exists(QUIET_ANSIBLE_CONFIG): + oo_cfg.settings['ansible_quiet_config'] = QUIET_ANSIBLE_CONFIG + oo_cfg.settings['ansible_log_path'] = ctx.obj['ansible_log_path'] ctx.obj['oo_cfg'] = oo_cfg @@ -920,7 +974,7 @@ def uninstall(ctx): @click.option('--latest-minor', '-l', is_flag=True, default=False) @click.option('--next-major', '-n', is_flag=True, default=False) @click.pass_context -# pylint: disable=too-many-statements +# pylint: disable=too-many-statements,too-many-branches def upgrade(ctx, latest_minor, next_major): oo_cfg = ctx.obj['oo_cfg'] @@ -934,7 +988,12 @@ def upgrade(ctx, latest_minor, next_major): sys.exit(0) old_version = oo_cfg.settings['variant_version'] - mapping = UPGRADE_MAPPINGS.get(old_version) + + try: + mapping = UPGRADE_MAPPINGS[old_version] + except KeyError: + click.echo('No upgrades available for %s %s' % (variant, old_version)) + sys.exit(0) message = """ This tool will help you upgrade your existing OpenShift installation. @@ -968,7 +1027,7 @@ def upgrade(ctx, latest_minor, next_major): sys.exit(0) playbook = mapping['major_playbook'] new_version = mapping['major_version'] - # Update config to reflect the version we're targetting, we'll write + # Update config to reflect the version we're targeting, we'll write # to disk once Ansible completes successfully, not before. oo_cfg.settings['variant_version'] = new_version if oo_cfg.settings['variant'] == 'enterprise': @@ -1012,15 +1071,17 @@ def upgrade(ctx, latest_minor, next_major): def install(ctx, force, gen_inventory): oo_cfg = ctx.obj['oo_cfg'] verbose = ctx.obj['verbose'] + unattended = ctx.obj['unattended'] - if ctx.obj['unattended']: + if unattended: error_if_missing_info(oo_cfg) else: oo_cfg = get_missing_info_from_user(oo_cfg) - check_hosts_config(oo_cfg, ctx.obj['unattended']) + check_hosts_config(oo_cfg, unattended) - print_installation_summary(oo_cfg.deployment.hosts, oo_cfg.settings.get('variant_version', None)) + print_installation_summary(oo_cfg.deployment.hosts, + oo_cfg.settings.get('variant_version', None)) click.echo('Gathering information from hosts...') callback_facts, error = openshift_ansible.default_facts(oo_cfg.deployment.hosts, verbose) @@ -1030,63 +1091,106 @@ def install(ctx, force, gen_inventory): "Please see {} for details.".format(oo_cfg.settings['ansible_log_path'])) sys.exit(1) - hosts_to_run_on, callback_facts = get_hosts_to_run_on( - oo_cfg, callback_facts, ctx.obj['unattended'], force, verbose) + hosts_to_run_on, callback_facts = get_hosts_to_run_on(oo_cfg, + callback_facts, + unattended, + force) # We already verified this is not the case for unattended installs, so this can # only trigger for live CLI users: - # TODO: if there are *new* nodes and this is a live install, we may need the user - # to confirm the settings for new nodes. Look into this once we're distinguishing - # between new and pre-existing nodes. if not ctx.obj['unattended'] and len(oo_cfg.calc_missing_facts()) > 0: confirm_hosts_facts(oo_cfg, callback_facts) # Write quick installer config file to disk: oo_cfg.save_to_disk() - # Write Ansible inventory file to disk: - inventory_file = openshift_ansible.generate_inventory(hosts_to_run_on) + run_config_playbook(oo_cfg, hosts_to_run_on, unattended, verbose, gen_inventory) - click.echo() - click.echo('Wrote atomic-openshift-installer config: %s' % oo_cfg.config_path) - click.echo("Wrote Ansible inventory: %s" % inventory_file) - click.echo() - if gen_inventory: - sys.exit(0) +@click.command() +@click.option('--gen-inventory', is_flag=True, default=False, + help="Generate an Ansible inventory file and exit.") +@click.pass_context +def scaleup(ctx, gen_inventory): + oo_cfg = ctx.obj['oo_cfg'] + verbose = ctx.obj['verbose'] + unattended = ctx.obj['unattended'] - click.echo('Ready to run installation process.') - message = """ -If changes are needed please edit the config file above and re-run. -""" - if not ctx.obj['unattended']: - confirm_continue(message) + installed_hosts = list(oo_cfg.deployment.hosts) - error = openshift_ansible.run_main_playbook(inventory_file, oo_cfg.deployment.hosts, - hosts_to_run_on, verbose) + if len(installed_hosts) == 0: + click.echo('No hosts specified.') + sys.exit(1) - if error: - # The bootstrap script will print out the log location. - message = """ -An error was detected. After resolving the problem please relaunch the -installation process. + click.echo('Welcome to the OpenShift Enterprise 3 Scaleup utility.') + + # Scaleup requires manual data entry. Therefore, we do not support + # unattended operations. + if unattended: + msg = """ +--- + +The 'scaleup' operation does not support unattended +functionality. Re-run the installer without the '-u' or '--unattended' +option to continue. """ - click.echo(message) + click.echo(msg) sys.exit(1) - else: - message = """ -The installation was successful! -If this is your first time installing please take a look at the Administrator -Guide for advanced options related to routing, storage, authentication, and -more: + # Resume normal scaleup workflow + print_installation_summary(installed_hosts, + oo_cfg.settings['variant_version'], + verbose=False,) + message = """ +--- -http://docs.openshift.com/enterprise/latest/admin_guide/overview.html +We have detected this previously installed OpenShift environment. + +This tool will guide you through the process of adding additional +nodes to your cluster. """ - click.echo(message) - click.pause() + confirm_continue(message) + + error_if_missing_info(oo_cfg) + check_hosts_config(oo_cfg, True) + + installed_masters = [host for host in installed_hosts if host.is_master()] + new_nodes = collect_new_nodes(oo_cfg) + + oo_cfg.deployment.hosts.extend(new_nodes) + hosts_to_run_on = installed_masters + new_nodes + + openshift_ansible.set_config(oo_cfg) + click.echo('Gathering information from hosts...') + callback_facts, error = openshift_ansible.default_facts(oo_cfg.deployment.hosts, verbose) + if error or callback_facts is None: + click.echo("There was a problem fetching the required information. See " + "{} for details.".format(oo_cfg.settings['ansible_log_path'])) + sys.exit(1) + + print_installation_summary(oo_cfg.deployment.hosts, + oo_cfg.settings.get('variant_version', None)) + click.echo('Gathering information from hosts...') + callback_facts, error = openshift_ansible.default_facts(oo_cfg.deployment.hosts, + verbose) + + if error or callback_facts is None: + click.echo("There was a problem fetching the required information. " + "Please see {} for details.".format(oo_cfg.settings['ansible_log_path'])) + sys.exit(1) + + # We already verified this is not the case for unattended installs, so this can + # only trigger for live CLI users: + if not ctx.obj['unattended'] and len(oo_cfg.calc_missing_facts()) > 0: + confirm_hosts_facts(oo_cfg, callback_facts) + + # Write quick installer config file to disk: + oo_cfg.save_to_disk() + run_config_playbook(oo_cfg, hosts_to_run_on, unattended, verbose, gen_inventory) + cli.add_command(install) +cli.add_command(scaleup) cli.add_command(upgrade) cli.add_command(uninstall) diff --git a/utils/src/ooinstall/oo_config.py b/utils/src/ooinstall/oo_config.py index 393b36f6f..cf14105af 100644 --- a/utils/src/ooinstall/oo_config.py +++ b/utils/src/ooinstall/oo_config.py @@ -1,5 +1,7 @@ # pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name,too-many-instance-attributes,too-few-public-methods +from __future__ import (absolute_import, print_function) + import os import sys import logging @@ -12,7 +14,6 @@ installer_log = logging.getLogger('installer') CONFIG_PERSIST_SETTINGS = [ 'ansible_ssh_user', 'ansible_callback_facts_yaml', - 'ansible_config', 'ansible_inventory_path', 'ansible_log_path', 'deployment', @@ -27,6 +28,19 @@ DEPLOYMENT_VARIABLES_BLACKLIST = [ 'roles', ] +HOST_VARIABLES_BLACKLIST = [ + 'ip', + 'public_ip', + 'hostname', + 'public_hostname', + 'node_labels', + 'containerized', + 'preconfigured', + 'schedulable', + 'other_variables', + 'roles', +] + DEFAULT_REQUIRED_FACTS = ['ip', 'public_ip', 'hostname', 'public_hostname'] PRECONFIGURED_REQUIRED_FACTS = ['hostname', 'public_hostname'] @@ -38,7 +52,7 @@ Error loading config. {}. See https://docs.openshift.com/enterprise/latest/install_config/install/quick_install.html#defining-an-installation-configuration-file for information on creating a configuration file or delete {} and re-run the installer. """ - print message.format(error, path) + print(message.format(error, path)) class OOConfigFileError(Exception): @@ -67,7 +81,7 @@ class Host(object): self.containerized = kwargs.get('containerized', False) self.node_labels = kwargs.get('node_labels', '') - # allowable roles: master, node, etcd, storage, master_lb, new + # allowable roles: master, node, etcd, storage, master_lb self.roles = kwargs.get('roles', []) self.other_variables = kwargs.get('other_variables', {}) @@ -87,11 +101,13 @@ class Host(object): d = {} for prop in ['ip', 'hostname', 'public_ip', 'public_hostname', 'connect_to', - 'preconfigured', 'containerized', 'schedulable', 'roles', 'node_labels', - 'other_variables']: + 'preconfigured', 'containerized', 'schedulable', 'roles', 'node_labels', ]: # If the property is defined (not None or False), export it: if getattr(self, prop): d[prop] = getattr(self, prop) + for variable, value in self.other_variables.items(): + d[variable] = value + return d def is_master(self): @@ -106,6 +122,10 @@ class Host(object): def is_storage(self): return 'storage' in self.roles + def is_etcd(self): + """ Does this host have the etcd role """ + return 'etcd' in self.roles + def is_etcd_member(self, all_hosts): """ Will this host be a member of a standalone etcd cluster. """ if not self.is_master(): @@ -203,7 +223,6 @@ class OOConfig(object): role_list = loaded_config['deployment']['roles'] except KeyError as e: print_read_config_error("No such key: {}".format(e), self.config_path) - print "Error loading config, required key missing: {}".format(e) sys.exit(0) for setting in CONFIG_PERSIST_SETTINGS: @@ -238,13 +257,17 @@ class OOConfig(object): # Parse the hosts into DTO objects: for host in host_list: + host['other_variables'] = {} + for variable, value in host.items(): + if variable not in HOST_VARIABLES_BLACKLIST: + host['other_variables'][variable] = value self.deployment.hosts.append(Host(**host)) # Parse the roles into Objects - for name, variables in role_list.iteritems(): + for name, variables in role_list.items(): self.deployment.roles.update({name: Role(name, variables)}) - except IOError, ferr: + except IOError as ferr: raise OOConfigFileError('Cannot open config file "{}": {}'.format(ferr.filename, ferr.strerror)) except yaml.scanner.ScannerError: @@ -308,6 +331,12 @@ class OOConfig(object): if 'ansible_plugins_directory' not in self.settings: self.settings['ansible_plugins_directory'] = \ resource_filename(__name__, 'ansible_plugins') + installer_log.debug("We think the ansible plugins directory should be: %s (it is not already set)", + self.settings['ansible_plugins_directory']) + else: + installer_log.debug("The ansible plugins directory is already set: %s", + self.settings['ansible_plugins_directory']) + if 'version' not in self.settings: self.settings['version'] = 'v2' @@ -327,14 +356,13 @@ class OOConfig(object): self.settings['ansible_inventory_path'] = \ '{}/hosts'.format(os.path.dirname(self.config_path)) - # pylint: disable=consider-iterating-dictionary - # Disabled because we shouldn't alter the container we're - # iterating over - # # clean up any empty sets - for setting in self.settings.keys(): + empty_keys = [] + for setting in self.settings: if not self.settings[setting]: - self.settings.pop(setting) + empty_keys.append(setting) + for key in empty_keys: + self.settings.pop(key) installer_log.debug("Updated OOConfig settings: %s", self.settings) @@ -383,7 +411,7 @@ class OOConfig(object): for host in self.deployment.hosts: p_settings['deployment']['hosts'].append(host.to_dict()) - for name, role in self.deployment.roles.iteritems(): + for name, role in self.deployment.roles.items(): p_settings['deployment']['roles'][name] = role.variables for setting in self.deployment.variables: @@ -397,7 +425,7 @@ class OOConfig(object): if self.settings['ansible_inventory_directory'] != self._default_ansible_inv_dir(): p_settings['ansible_inventory_directory'] = self.settings['ansible_inventory_directory'] except KeyError as e: - print "Error persisting settings: {}".format(e) + print("Error persisting settings: {}".format(e)) sys.exit(0) return p_settings @@ -413,3 +441,11 @@ class OOConfig(object): if host.connect_to == name: return host return None + + def get_host_roles_set(self): + roles_set = set() + for host in self.deployment.hosts: + for role in host.roles: + roles_set.add(role) + + return roles_set diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py index 75d26c10a..ce6e54664 100644 --- a/utils/src/ooinstall/openshift_ansible.py +++ b/utils/src/ooinstall/openshift_ansible.py @@ -1,5 +1,7 @@ # pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name,global-statement,global-variable-not-assigned +from __future__ import (absolute_import, print_function) + import socket import subprocess import sys @@ -7,6 +9,7 @@ import os import logging import yaml from ooinstall.variants import find_variant +from ooinstall.utils import debug_env installer_log = logging.getLogger('installer') @@ -30,6 +33,14 @@ VARIABLES_MAP = { 'proxy_exclude_hosts': 'openshift_no_proxy', } +HOST_VARIABLES_MAP = { + 'ip': 'openshift_ip', + 'public_ip': 'openshift_public_ip', + 'hostname': 'openshift_hostname', + 'public_hostname': 'openshift_public_hostname', + 'containerized': 'containerized', +} + def set_config(cfg): global CFG @@ -39,9 +50,6 @@ def set_config(cfg): def generate_inventory(hosts): global CFG - masters = [host for host in hosts if host.is_master()] - multiple_masters = len(masters) > 1 - new_nodes = [host for host in hosts if host.is_node() and host.new_host] scaleup = len(new_nodes) > 0 @@ -52,7 +60,7 @@ def generate_inventory(hosts): write_inventory_children(base_inventory, scaleup) - write_inventory_vars(base_inventory, multiple_masters, lb) + write_inventory_vars(base_inventory, lb) # write_inventory_hosts for role in CFG.deployment.roles: @@ -97,16 +105,16 @@ def write_inventory_children(base_inventory, scaleup): # pylint: disable=too-many-branches -def write_inventory_vars(base_inventory, multiple_masters, lb): +def write_inventory_vars(base_inventory, lb): global CFG base_inventory.write('\n[OSEv3:vars]\n') - for variable, value in CFG.settings.iteritems(): + for variable, value in CFG.settings.items(): inventory_var = VARIABLES_MAP.get(variable, None) if inventory_var and value: base_inventory.write('{}={}\n'.format(inventory_var, value)) - for variable, value in CFG.deployment.variables.iteritems(): + for variable, value in CFG.deployment.variables.items(): inventory_var = VARIABLES_MAP.get(variable, variable) if value: base_inventory.write('{}={}\n'.format(inventory_var, value)) @@ -114,7 +122,7 @@ def write_inventory_vars(base_inventory, multiple_masters, lb): if CFG.deployment.variables['ansible_ssh_user'] != 'root': base_inventory.write('ansible_become=yes\n') - if multiple_masters and lb is not None: + if lb is not None: base_inventory.write('openshift_master_cluster_method=native\n') base_inventory.write("openshift_master_cluster_hostname={}\n".format(lb.hostname)) base_inventory.write( @@ -146,11 +154,11 @@ def write_inventory_vars(base_inventory, multiple_masters, lb): "'baseurl': '{}', " "'enabled': 1, 'gpgcheck': 0}}]\n".format(os.environ['OO_INSTALL_PUDDLE_REPO'])) - for name, role_obj in CFG.deployment.roles.iteritems(): + for name, role_obj in CFG.deployment.roles.items(): if role_obj.variables: group_name = ROLES_TO_GROUPS_MAP.get(name, name) base_inventory.write("\n[{}:vars]\n".format(group_name)) - for variable, value in role_obj.variables.iteritems(): + for variable, value in role_obj.variables.items(): inventory_var = VARIABLES_MAP.get(variable, variable) if value: base_inventory.write('{}={}\n'.format(inventory_var, value)) @@ -175,7 +183,6 @@ def write_proxy_settings(base_inventory): pass -# pylint: disable=too-many-branches def write_host(host, role, inventory, schedulable=None): global CFG @@ -183,22 +190,16 @@ def write_host(host, role, inventory, schedulable=None): return facts = '' - if host.ip: - facts += ' openshift_ip={}'.format(host.ip) - if host.public_ip: - facts += ' openshift_public_ip={}'.format(host.public_ip) - if host.hostname: - facts += ' openshift_hostname={}'.format(host.hostname) - if host.public_hostname: - facts += ' openshift_public_hostname={}'.format(host.public_hostname) - if host.containerized: - facts += ' containerized={}'.format(host.containerized) + for prop in HOST_VARIABLES_MAP: + if getattr(host, prop): + facts += ' {}={}'.format(HOST_VARIABLES_MAP.get(prop), getattr(host, prop)) + if host.other_variables: - for variable, value in host.other_variables.iteritems(): + for variable, value in host.other_variables.items(): facts += " {}={}".format(variable, value) - if host.node_labels: - if role == 'node': - facts += ' openshift_node_labels="{}"'.format(host.node_labels) + + if host.node_labels and role == 'node': + facts += ' openshift_node_labels="{}"'.format(host.node_labels) # Distinguish between three states, no schedulability specified (use default), # explicitly set to True, or explicitly set to False: @@ -211,9 +212,9 @@ def write_host(host, role, inventory, schedulable=None): if installer_host in [host.connect_to, host.hostname, host.public_hostname]: facts += ' ansible_connection=local' if os.geteuid() != 0: - no_pwd_sudo = subprocess.call(['sudo', '-n', 'echo', 'openshift']) + no_pwd_sudo = subprocess.call(['sudo', '-n', 'echo', '-n']) if no_pwd_sudo == 1: - print 'The atomic-openshift-installer requires sudo access without a password.' + print('The atomic-openshift-installer requires sudo access without a password.') sys.exit(1) facts += ' ansible_become=yes' @@ -225,6 +226,9 @@ def load_system_facts(inventory_file, os_facts_path, env_vars, verbose=False): Retrieves system facts from the remote systems. """ installer_log.debug("Inside load_system_facts") + installer_log.debug("load_system_facts will run with Ansible/Openshift environment variables:") + debug_env(env_vars) + FNULL = open(os.devnull, 'w') args = ['ansible-playbook', '-v'] if verbose \ else ['ansible-playbook'] @@ -232,6 +236,8 @@ def load_system_facts(inventory_file, os_facts_path, env_vars, verbose=False): '--inventory-file={}'.format(inventory_file), os_facts_path]) installer_log.debug("Going to subprocess out to ansible now with these args: %s", ' '.join(args)) + installer_log.debug("Subprocess will run with Ansible/Openshift environment variables:") + debug_env(env_vars) status = subprocess.call(args, env=env_vars, stdout=FNULL) if status != 0: installer_log.debug("Exit status from subprocess was not 0") @@ -241,9 +247,9 @@ def load_system_facts(inventory_file, os_facts_path, env_vars, verbose=False): installer_log.debug("Going to try to read this file: %s", CFG.settings['ansible_callback_facts_yaml']) try: callback_facts = yaml.safe_load(callback_facts_file) - except yaml.YAMLError, exc: - print "Error in {}".format(CFG.settings['ansible_callback_facts_yaml']), exc - print "Try deleting and rerunning the atomic-openshift-installer" + except yaml.YAMLError as exc: + print("Error in {}".format(CFG.settings['ansible_callback_facts_yaml']), exc) + print("Try deleting and rerunning the atomic-openshift-installer") sys.exit(1) return callback_facts, 0 @@ -280,17 +286,24 @@ def run_main_playbook(inventory_file, hosts, hosts_to_run_on, verbose=False): facts_env = os.environ.copy() if 'ansible_log_path' in CFG.settings: facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path'] - if 'ansible_config' in CFG.settings: - facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config'] + + # override the ansible config for our main playbook run + if 'ansible_quiet_config' in CFG.settings: + facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_quiet_config'] + return run_ansible(main_playbook_path, inventory_file, facts_env, verbose) def run_ansible(playbook, inventory, env_vars, verbose=False): + installer_log.debug("run_ansible will run with Ansible/Openshift environment variables:") + debug_env(env_vars) + args = ['ansible-playbook', '-v'] if verbose \ else ['ansible-playbook'] args.extend([ '--inventory-file={}'.format(inventory), playbook]) + installer_log.debug("Going to subprocess out to ansible now with these args: %s", ' '.join(args)) return subprocess.call(args, env=env_vars) @@ -303,6 +316,10 @@ def run_uninstall_playbook(hosts, verbose=False): facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path'] if 'ansible_config' in CFG.settings: facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config'] + # override the ansible config for our main playbook run + if 'ansible_quiet_config' in CFG.settings: + facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_quiet_config'] + return run_ansible(playbook, inventory_file, facts_env, verbose) @@ -317,4 +334,8 @@ def run_upgrade_playbook(hosts, playbook, verbose=False): facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path'] if 'ansible_config' in CFG.settings: facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config'] + # override the ansible config for our main playbook run + if 'ansible_quiet_config' in CFG.settings: + facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_quiet_config'] + return run_ansible(playbook, inventory_file, facts_env, verbose) diff --git a/utils/src/ooinstall/utils.py b/utils/src/ooinstall/utils.py new file mode 100644 index 000000000..c9e3e25e5 --- /dev/null +++ b/utils/src/ooinstall/utils.py @@ -0,0 +1,24 @@ +# pylint: disable=missing-docstring,invalid-name + +import logging +import re + + +installer_log = logging.getLogger('installer') + + +def debug_env(env): + for k in sorted(env.keys()): + if k.startswith("OPENSHIFT") or k.startswith("ANSIBLE") or k.startswith("OO"): + # pylint: disable=logging-format-interpolation + installer_log.debug("{key}: {value}".format( + key=k, value=env[k])) + + +def is_valid_hostname(hostname): + if not hostname or len(hostname) > 255: + return False + if hostname[-1] == ".": + hostname = hostname[:-1] # strip exactly one dot from the right, if present + allowed = re.compile(r"(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE) + return all(allowed.match(x) for x in hostname.split(".")) diff --git a/utils/src/ooinstall/variants.py b/utils/src/ooinstall/variants.py index 6993794fe..a45be98bf 100644 --- a/utils/src/ooinstall/variants.py +++ b/utils/src/ooinstall/variants.py @@ -38,31 +38,24 @@ class Variant(object): # WARNING: Keep the versions ordered, most recent first: -OSE = Variant('openshift-enterprise', 'OpenShift Container Platform', - [ - Version('3.3', 'openshift-enterprise'), - ] -) - -REG = Variant('openshift-enterprise', 'Registry', - [ - Version('3.3', 'openshift-enterprise', 'registry'), - ] -) - -origin = Variant('origin', 'OpenShift Origin', - [ - Version('1.2', 'origin'), - ] -) - -LEGACY = Variant('openshift-enterprise', 'OpenShift Container Platform', - [ - Version('3.2', 'openshift-enterprise'), - Version('3.1', 'openshift-enterprise'), - Version('3.0', 'openshift-enterprise'), - ] -) +OSE = Variant('openshift-enterprise', 'OpenShift Container Platform', [ + Version('3.4', 'openshift-enterprise'), +]) + +REG = Variant('openshift-enterprise', 'Registry', [ + Version('3.4', 'openshift-enterprise', 'registry'), +]) + +origin = Variant('origin', 'OpenShift Origin', [ + Version('1.4', 'origin'), +]) + +LEGACY = Variant('openshift-enterprise', 'OpenShift Container Platform', [ + Version('3.3', 'openshift-enterprise'), + Version('3.2', 'openshift-enterprise'), + Version('3.1', 'openshift-enterprise'), + Version('3.0', 'openshift-enterprise'), +]) # Ordered list of variants we can install, first is the default. SUPPORTED_VARIANTS = (OSE, REG, origin, LEGACY) diff --git a/utils/test-requirements.txt b/utils/test-requirements.txt index f2216a177..f6a7bde10 100644 --- a/utils/test-requirements.txt +++ b/utils/test-requirements.txt @@ -1,7 +1,7 @@ -enum +ansible configparser pylint -pep8 +setuptools-lint nose coverage mock @@ -9,3 +9,7 @@ flake8 PyYAML click backports.functools_lru_cache +pyOpenSSL +yamllint +tox +detox diff --git a/utils/test/cli_installer_tests.py b/utils/test/cli_installer_tests.py index 6d9d443ff..0cb37eaff 100644 --- a/utils/test/cli_installer_tests.py +++ b/utils/test/cli_installer_tests.py @@ -4,7 +4,8 @@ import copy import os -import ConfigParser + +from six.moves import configparser import ooinstall.cli_installer as cli @@ -408,7 +409,7 @@ class UnattendedCliTests(OOCliFixture): result = self.runner.invoke(cli.cli, self.cli_args) if result.exception is None or result.exit_code != 1: - print "Exit code: %s" % result.exit_code + print("Exit code: %s" % result.exit_code) self.fail("Unexpected CLI return") # unattended with config file and all installed hosts (with --force) @@ -523,7 +524,7 @@ class UnattendedCliTests(OOCliFixture): self.assert_result(result, 0) # Check the inventory file looks as we would expect: - inventory = ConfigParser.ConfigParser(allow_no_value=True) + inventory = configparser.ConfigParser(allow_no_value=True) inventory.read(os.path.join(self.work_dir, 'hosts')) self.assertEquals('root', inventory.get('OSEv3:vars', 'ansible_ssh_user')) @@ -566,7 +567,7 @@ class UnattendedCliTests(OOCliFixture): self.assertEquals('3.3', written_config['variant_version']) # Make sure the correct value was passed to ansible: - inventory = ConfigParser.ConfigParser(allow_no_value=True) + inventory = configparser.ConfigParser(allow_no_value=True) inventory.read(os.path.join(self.work_dir, 'hosts')) self.assertEquals('openshift-enterprise', inventory.get('OSEv3:vars', 'deployment_type')) @@ -594,87 +595,101 @@ class UnattendedCliTests(OOCliFixture): # and written to disk: self.assertEquals('3.3', written_config['variant_version']) - inventory = ConfigParser.ConfigParser(allow_no_value=True) + inventory = configparser.ConfigParser(allow_no_value=True) inventory.read(os.path.join(self.work_dir, 'hosts')) self.assertEquals('openshift-enterprise', inventory.get('OSEv3:vars', 'deployment_type')) - @patch('ooinstall.openshift_ansible.run_ansible') - @patch('ooinstall.openshift_ansible.load_system_facts') - def test_no_ansible_config_specified(self, load_facts_mock, run_ansible_mock): - load_facts_mock.return_value = (MOCK_FACTS, 0) - run_ansible_mock.return_value = 0 - - config = SAMPLE_CONFIG % 'openshift-enterprise' - - self._ansible_config_test(load_facts_mock, run_ansible_mock, - config, None, None) - - @patch('ooinstall.openshift_ansible.run_ansible') - @patch('ooinstall.openshift_ansible.load_system_facts') - def test_ansible_config_specified_cli(self, load_facts_mock, run_ansible_mock): - load_facts_mock.return_value = (MOCK_FACTS, 0) - run_ansible_mock.return_value = 0 - - config = SAMPLE_CONFIG % 'openshift-enterprise' - ansible_config = os.path.join(self.work_dir, 'ansible.cfg') - - self._ansible_config_test(load_facts_mock, run_ansible_mock, - config, ansible_config, ansible_config) - - @patch('ooinstall.openshift_ansible.run_ansible') - @patch('ooinstall.openshift_ansible.load_system_facts') - def test_ansible_config_specified_in_installer_config(self, - load_facts_mock, run_ansible_mock): - - load_facts_mock.return_value = (MOCK_FACTS, 0) - run_ansible_mock.return_value = 0 - - ansible_config = os.path.join(self.work_dir, 'ansible.cfg') - config = SAMPLE_CONFIG % 'openshift-enterprise' - config = "%s\nansible_config: %s" % (config, ansible_config) - self._ansible_config_test(load_facts_mock, run_ansible_mock, - config, None, ansible_config) - - #pylint: disable=too-many-arguments - # This method allows for drastically simpler tests to write, and the args - # are all useful. - def _ansible_config_test(self, load_facts_mock, run_ansible_mock, - installer_config, ansible_config_cli=None, expected_result=None): - """ - Utility method for testing the ways you can specify the ansible config. - """ - - load_facts_mock.return_value = (MOCK_FACTS, 0) - run_ansible_mock.return_value = 0 - - config_file = self.write_config(os.path.join(self.work_dir, - 'ooinstall.conf'), installer_config) - - self.cli_args.extend(["-c", config_file]) - if ansible_config_cli: - self.cli_args.extend(["--ansible-config", ansible_config_cli]) - self.cli_args.append("install") - result = self.runner.invoke(cli.cli, self.cli_args) - self.assert_result(result, 0) - - # Test the env vars for facts playbook: - facts_env_vars = load_facts_mock.call_args[0][2] - if expected_result: - self.assertEquals(expected_result, facts_env_vars['ANSIBLE_CONFIG']) - else: - # If user running test has rpm installed, this might be set to default: - self.assertTrue('ANSIBLE_CONFIG' not in facts_env_vars or - facts_env_vars['ANSIBLE_CONFIG'] == cli.DEFAULT_ANSIBLE_CONFIG) - - # Test the env vars for main playbook: - env_vars = run_ansible_mock.call_args[0][2] - if expected_result: - self.assertEquals(expected_result, env_vars['ANSIBLE_CONFIG']) - else: - # If user running test has rpm installed, this might be set to default: - self.assertTrue('ANSIBLE_CONFIG' not in env_vars or - env_vars['ANSIBLE_CONFIG'] == cli.DEFAULT_ANSIBLE_CONFIG) + # 2016-09-26 - tbielawa - COMMENTING OUT these tests FOR NOW while + # we wait to see if anyone notices that we took away their ability + # to set the ansible_config parameter in the command line options + # and in the installer config file. + # + # We have removed the ability to set the ansible config file + # manually so that our new quieter output mode is the default and + # only output mode. + # + # RE: https://trello.com/c/DSwwizwP - atomic-openshift-install + # should only output relevant information. + + # @patch('ooinstall.openshift_ansible.run_ansible') + # @patch('ooinstall.openshift_ansible.load_system_facts') + # def test_no_ansible_config_specified(self, load_facts_mock, run_ansible_mock): + # load_facts_mock.return_value = (MOCK_FACTS, 0) + # run_ansible_mock.return_value = 0 + + # config = SAMPLE_CONFIG % 'openshift-enterprise' + + # self._ansible_config_test(load_facts_mock, run_ansible_mock, + # config, None, None) + + # @patch('ooinstall.openshift_ansible.run_ansible') + # @patch('ooinstall.openshift_ansible.load_system_facts') + # def test_ansible_config_specified_cli(self, load_facts_mock, run_ansible_mock): + # load_facts_mock.return_value = (MOCK_FACTS, 0) + # run_ansible_mock.return_value = 0 + + # config = SAMPLE_CONFIG % 'openshift-enterprise' + # ansible_config = os.path.join(self.work_dir, 'ansible.cfg') + + # self._ansible_config_test(load_facts_mock, run_ansible_mock, + # config, ansible_config, ansible_config) + + # @patch('ooinstall.openshift_ansible.run_ansible') + # @patch('ooinstall.openshift_ansible.load_system_facts') + # def test_ansible_config_specified_in_installer_config(self, + # load_facts_mock, run_ansible_mock): + + # load_facts_mock.return_value = (MOCK_FACTS, 0) + # run_ansible_mock.return_value = 0 + + # ansible_config = os.path.join(self.work_dir, 'ansible.cfg') + # config = SAMPLE_CONFIG % 'openshift-enterprise' + # config = "%s\nansible_config: %s" % (config, ansible_config) + # self._ansible_config_test(load_facts_mock, run_ansible_mock, + # config, None, ansible_config) + + # #pylint: disable=too-many-arguments + # # This method allows for drastically simpler tests to write, and the args + # # are all useful. + # def _ansible_config_test(self, load_facts_mock, run_ansible_mock, + # installer_config, ansible_config_cli=None, expected_result=None): + # """ + # Utility method for testing the ways you can specify the ansible config. + # """ + + # load_facts_mock.return_value = (MOCK_FACTS, 0) + # run_ansible_mock.return_value = 0 + + # config_file = self.write_config(os.path.join(self.work_dir, + # 'ooinstall.conf'), installer_config) + + # self.cli_args.extend(["-c", config_file]) + # if ansible_config_cli: + # self.cli_args.extend(["--ansible-config", ansible_config_cli]) + # self.cli_args.append("install") + # result = self.runner.invoke(cli.cli, self.cli_args) + # self.assert_result(result, 0) + + # # Test the env vars for facts playbook: + # facts_env_vars = load_facts_mock.call_args[0][2] + # if expected_result: + # self.assertEquals(expected_result, facts_env_vars['ANSIBLE_CONFIG']) + # else: + # # If user running test has rpm installed, this might be set to default: + # self.assertTrue('ANSIBLE_CONFIG' not in facts_env_vars or + # facts_env_vars['ANSIBLE_CONFIG'] == cli.DEFAULT_ANSIBLE_CONFIG) + + # # Test the env vars for main playbook: + # env_vars = run_ansible_mock.call_args[0][2] + # if expected_result: + # self.assertEquals(expected_result, env_vars['ANSIBLE_CONFIG']) + # else: + # # If user running test has rpm installed, this might be set to default: + # # + # # By default we will use the quiet config + # self.assertTrue('ANSIBLE_CONFIG' not in env_vars or + # env_vars['ANSIBLE_CONFIG'] == cli.QUIET_ANSIBLE_CONFIG) # unattended with bad config file and no installed hosts (without --force) @patch('ooinstall.openshift_ansible.run_main_playbook') @@ -816,7 +831,7 @@ class AttendedCliTests(OOCliFixture): written_config = read_yaml(self.config_file) self._verify_config_hosts(written_config, 4) - inventory = ConfigParser.ConfigParser(allow_no_value=True) + inventory = configparser.ConfigParser(allow_no_value=True) inventory.read(os.path.join(self.work_dir, 'hosts')) self.assert_inventory_host_var(inventory, 'nodes', '10.0.0.1', 'openshift_schedulable=False') @@ -828,7 +843,7 @@ class AttendedCliTests(OOCliFixture): # interactive with config file and some installed some uninstalled hosts @patch('ooinstall.openshift_ansible.run_main_playbook') @patch('ooinstall.openshift_ansible.load_system_facts') - def test_add_nodes(self, load_facts_mock, run_playbook_mock): + def test_scaleup_hint(self, load_facts_mock, run_playbook_mock): # Modify the mock facts to return a version indicating OpenShift # is already installed on our master, and the first node. @@ -852,13 +867,12 @@ class AttendedCliTests(OOCliFixture): result = self.runner.invoke(cli.cli, self.cli_args, input=cli_input) - self.assert_result(result, 0) - self._verify_load_facts(load_facts_mock) - self._verify_run_playbook(run_playbook_mock, 3, 2) + # This is testing the install workflow so we want to make sure we + # exit with the appropriate hint. + self.assertTrue('scaleup' in result.output) + self.assert_result(result, 1) - written_config = read_yaml(self.config_file) - self._verify_config_hosts(written_config, 3) @patch('ooinstall.openshift_ansible.run_main_playbook') @patch('ooinstall.openshift_ansible.load_system_facts') @@ -883,30 +897,30 @@ class AttendedCliTests(OOCliFixture): written_config = read_yaml(config_file) self._verify_config_hosts(written_config, 3) - #interactive with config file and all installed hosts - @patch('ooinstall.openshift_ansible.run_main_playbook') - @patch('ooinstall.openshift_ansible.load_system_facts') - def test_get_hosts_to_run_on(self, load_facts_mock, run_playbook_mock): - mock_facts = copy.deepcopy(MOCK_FACTS) - mock_facts['10.0.0.1']['common']['version'] = "3.0.0" - mock_facts['10.0.0.2']['common']['version'] = "3.0.0" - - cli_input = build_input(hosts=[ - ('10.0.0.1', True, False), - ], - add_nodes=[('10.0.0.2', False, False)], - ssh_user='root', - variant_num=1, - schedulable_masters_ok=True, - confirm_facts='y', - storage='10.0.0.1',) - - self._verify_get_hosts_to_run_on(mock_facts, load_facts_mock, - run_playbook_mock, - cli_input, - exp_hosts_len=2, - exp_hosts_to_run_on_len=2, - force=False) +# #interactive with config file and all installed hosts +# @patch('ooinstall.openshift_ansible.run_main_playbook') +# @patch('ooinstall.openshift_ansible.load_system_facts') +# def test_get_hosts_to_run_on(self, load_facts_mock, run_playbook_mock): +# mock_facts = copy.deepcopy(MOCK_FACTS) +# mock_facts['10.0.0.1']['common']['version'] = "3.0.0" +# mock_facts['10.0.0.2']['common']['version'] = "3.0.0" +# +# cli_input = build_input(hosts=[ +# ('10.0.0.1', True, False), +# ], +# add_nodes=[('10.0.0.2', False, False)], +# ssh_user='root', +# variant_num=1, +# schedulable_masters_ok=True, +# confirm_facts='y', +# storage='10.0.0.1',) +# +# self._verify_get_hosts_to_run_on(mock_facts, load_facts_mock, +# run_playbook_mock, +# cli_input, +# exp_hosts_len=2, +# exp_hosts_to_run_on_len=2, +# force=False) #interactive multimaster: one more node than master @patch('ooinstall.openshift_ansible.run_main_playbook') @@ -936,7 +950,7 @@ class AttendedCliTests(OOCliFixture): written_config = read_yaml(self.config_file) self._verify_config_hosts(written_config, 6) - inventory = ConfigParser.ConfigParser(allow_no_value=True) + inventory = configparser.ConfigParser(allow_no_value=True) inventory.read(os.path.join(self.work_dir, 'hosts')) self.assert_inventory_host_var(inventory, 'nodes', '10.0.0.1', 'openshift_schedulable=False') @@ -977,7 +991,7 @@ class AttendedCliTests(OOCliFixture): written_config = read_yaml(self.config_file) self._verify_config_hosts(written_config, 5) - inventory = ConfigParser.ConfigParser(allow_no_value=True) + inventory = configparser.ConfigParser(allow_no_value=True) inventory.read(os.path.join(self.work_dir, 'hosts')) self.assert_inventory_host_var(inventory, 'nodes', '10.0.0.1', 'openshift_schedulable=True') @@ -1069,7 +1083,7 @@ class AttendedCliTests(OOCliFixture): written_config = read_yaml(self.config_file) self._verify_config_hosts(written_config, 1) - inventory = ConfigParser.ConfigParser(allow_no_value=True) + inventory = configparser.ConfigParser(allow_no_value=True) inventory.read(os.path.join(self.work_dir, 'hosts')) self.assert_inventory_host_var(inventory, 'nodes', '10.0.0.1', 'openshift_schedulable=True') @@ -1103,7 +1117,7 @@ class AttendedCliTests(OOCliFixture): written_config = read_yaml(self.config_file) self._verify_config_hosts(written_config, 4) - inventory = ConfigParser.ConfigParser(allow_no_value=True) + inventory = configparser.ConfigParser(allow_no_value=True) inventory.read(os.path.join(self.work_dir, 'hosts')) self.assert_inventory_host_var(inventory, 'nodes', '10.0.0.1', 'openshift_schedulable=False') diff --git a/utils/test/fixture.py b/utils/test/fixture.py index a883e5c56..5200d275d 100644 --- a/utils/test/fixture.py +++ b/utils/test/fixture.py @@ -65,13 +65,13 @@ class OOCliFixture(OOInstallFixture): def assert_result(self, result, exit_code): if result.exit_code != exit_code: - print "Unexpected result from CLI execution" - print "Exit code: %s" % result.exit_code - print "Exception: %s" % result.exception - print result.exc_info + print("Unexpected result from CLI execution") + print("Exit code: %s" % result.exit_code) + print("Exception: %s" % result.exception) + print(result.exc_info) import traceback traceback.print_exception(*result.exc_info) - print "Output:\n%s" % result.output + print("Output:\n%s" % result.output) self.fail("Exception during CLI execution") def _verify_load_facts(self, load_facts_mock): @@ -138,8 +138,8 @@ class OOCliFixture(OOInstallFixture): written_config = read_yaml(config_file) self._verify_config_hosts(written_config, exp_hosts_len) - if "Uninstalled" in result.output: - # verify we exited on seeing uninstalled hosts + if "If you want to force reinstall" in result.output: + # verify we exited on seeing installed hosts self.assertEqual(result.exit_code, 1) else: self.assert_result(result, 0) @@ -156,7 +156,7 @@ class OOCliFixture(OOInstallFixture): #pylint: disable=too-many-arguments,too-many-branches,too-many-statements def build_input(ssh_user=None, hosts=None, variant_num=None, add_nodes=None, confirm_facts=None, schedulable_masters_ok=None, - master_lb=None, storage=None): + master_lb=('', False), storage=None): """ Build an input string simulating a user entering values in an interactive attended install. @@ -204,11 +204,11 @@ def build_input(ssh_user=None, hosts=None, variant_num=None, i += 1 # You can pass a single master_lb or a list if you intend for one to get rejected: - if master_lb: - if isinstance(master_lb[0], list) or isinstance(master_lb[0], tuple): - inputs.extend(master_lb[0]) - else: - inputs.append(master_lb[0]) + if isinstance(master_lb[0], list) or isinstance(master_lb[0], tuple): + inputs.extend(master_lb[0]) + else: + inputs.append(master_lb[0]) + if master_lb[0]: inputs.append('y' if master_lb[1] else 'n') if storage: @@ -248,6 +248,7 @@ def build_input(ssh_user=None, hosts=None, variant_num=None, inputs.extend([ confirm_facts, 'y', # lets do this + 'y', ]) return '\n'.join(inputs) diff --git a/utils/test/oo_config_tests.py b/utils/test/oo_config_tests.py index b5068cc14..2b4fce512 100644 --- a/utils/test/oo_config_tests.py +++ b/utils/test/oo_config_tests.py @@ -8,7 +8,10 @@ import tempfile import shutil import yaml +from six.moves import cStringIO + from ooinstall.oo_config import OOConfig, Host, OOConfigInvalidHostError +import ooinstall.openshift_ansible SAMPLE_CONFIG = """ variant: openshift-enterprise @@ -224,3 +227,81 @@ class HostTests(OOInstallFixture): 'public_hostname': 'a.example.com', } self.assertRaises(OOConfigInvalidHostError, Host, **yaml_props) + + def test_inventory_file_quotes_node_labels(self): + """Verify a host entry wraps openshift_node_labels value in double quotes""" + yaml_props = { + 'ip': '192.168.0.1', + 'hostname': 'a.example.com', + 'connect_to': 'a-private.example.com', + 'public_ip': '192.168.0.1', + 'public_hostname': 'a.example.com', + 'new_host': True, + 'roles': ['node'], + 'node_labels': { + 'region': 'infra' + }, + + } + + new_node = Host(**yaml_props) + inventory = cStringIO() + # This is what the 'write_host' function generates. write_host + # has no return value, it just writes directly to the file + # 'inventory' which in this test-case is a StringIO object + ooinstall.openshift_ansible.write_host( + new_node, + 'node', + inventory, + schedulable=True) + # read the value of what was written to the inventory "file" + legacy_inventory_line = inventory.getvalue() + + # Given the `yaml_props` above we should see a line like this: + # openshift_node_labels="{'region': 'infra'}" + node_labels_expected = '''openshift_node_labels="{'region': 'infra'}"''' # Quotes around the hash + node_labels_bad = '''openshift_node_labels={'region': 'infra'}''' # No quotes around the hash + + # The good line is present in the written inventory line + self.assertIn(node_labels_expected, legacy_inventory_line) + # An unquoted version is not present + self.assertNotIn(node_labels_bad, legacy_inventory_line) + + + # def test_new_write_inventory_same_as_legacy(self): + # """Verify the original write_host function produces the same output as the new method""" + # yaml_props = { + # 'ip': '192.168.0.1', + # 'hostname': 'a.example.com', + # 'connect_to': 'a-private.example.com', + # 'public_ip': '192.168.0.1', + # 'public_hostname': 'a.example.com', + # 'new_host': True, + # 'roles': ['node'], + # 'other_variables': { + # 'zzz': 'last', + # 'foo': 'bar', + # 'aaa': 'first', + # }, + # } + + # new_node = Host(**yaml_props) + # inventory = cStringIO() + + # # This is what the original 'write_host' function will + # # generate. write_host has no return value, it just writes + # # directly to the file 'inventory' which in this test-case is + # # a StringIO object + # ooinstall.openshift_ansible.write_host( + # new_node, + # 'node', + # inventory, + # schedulable=True) + # legacy_inventory_line = inventory.getvalue() + + # # This is what the new method in the Host class generates + # new_inventory_line = new_node.inventory_string('node', schedulable=True) + + # self.assertEqual( + # legacy_inventory_line, + # new_inventory_line) diff --git a/utils/test/openshift_ansible_tests.py b/utils/test/openshift_ansible_tests.py new file mode 100644 index 000000000..5847fe37b --- /dev/null +++ b/utils/test/openshift_ansible_tests.py @@ -0,0 +1,71 @@ +import os +import unittest +import tempfile +import shutil +import yaml + +from six.moves import configparser + +from ooinstall import openshift_ansible +from ooinstall.oo_config import Host, OOConfig + + +BASE_CONFIG = """ +--- +variant: openshift-enterprise +variant_version: 3.3 +version: v2 +deployment: + ansible_ssh_user: cloud-user + hosts: [] + roles: + master: + node: +""" + + +class TestOpenShiftAnsible(unittest.TestCase): + + def setUp(self): + self.tempfiles = [] + self.work_dir = tempfile.mkdtemp(prefix='openshift_ansible_tests') + self.configfile = os.path.join(self.work_dir, 'ooinstall.config') + with open(self.configfile, 'w') as config_file: + config_file.write(BASE_CONFIG) + self.inventory = os.path.join(self.work_dir, 'hosts') + config = OOConfig(self.configfile) + config.settings['ansible_inventory_path'] = self.inventory + openshift_ansible.set_config(config) + + def tearDown(self): + shutil.rmtree(self.work_dir) + + def generate_hosts(self, num_hosts, name_prefix, roles=None, new_host=False): + hosts = [] + for num in range(1, num_hosts + 1): + hosts.append(Host(connect_to=name_prefix + str(num), + roles=roles, new_host=new_host)) + return hosts + + def test_generate_inventory_new_nodes(self): + hosts = self.generate_hosts(1, 'master', roles=(['master', 'etcd'])) + hosts.extend(self.generate_hosts(1, 'node', roles=['node'])) + hosts.extend(self.generate_hosts(1, 'new_node', roles=['node'], new_host=True)) + openshift_ansible.generate_inventory(hosts) + inventory = configparser.ConfigParser(allow_no_value=True) + inventory.read(self.inventory) + self.assertTrue(inventory.has_section('new_nodes')) + self.assertTrue(inventory.has_option('new_nodes', 'new_node1')) + + def test_write_inventory_vars_role_vars(self): + with open(self.inventory, 'w') as inv: + openshift_ansible.CFG.deployment.roles['master'].variables={'color': 'blue'} + openshift_ansible.CFG.deployment.roles['node'].variables={'color': 'green'} + openshift_ansible.write_inventory_vars(inv, None) + + inventory = configparser.ConfigParser(allow_no_value=True) + inventory.read(self.inventory) + self.assertTrue(inventory.has_section('masters:vars')) + self.assertEquals('blue', inventory.get('masters:vars', 'color')) + self.assertTrue(inventory.has_section('nodes:vars')) + self.assertEquals('green', inventory.get('nodes:vars', 'color')) diff --git a/utils/test/test_utils.py b/utils/test/test_utils.py new file mode 100644 index 000000000..cbce64f7e --- /dev/null +++ b/utils/test/test_utils.py @@ -0,0 +1,99 @@ +""" +Unittests for ooinstall utils. +""" + +import six +import unittest +import logging +import sys +import copy +from ooinstall.utils import debug_env, is_valid_hostname +import mock + + +class TestUtils(unittest.TestCase): + """ + Parent unittest TestCase. + """ + + def setUp(self): + self.debug_all_params = { + 'OPENSHIFT_FOO': 'bar', + 'ANSIBLE_FOO': 'bar', + 'OO_FOO': 'bar' + } + + self.expected = [ + mock.call('ANSIBLE_FOO: bar'), + mock.call('OPENSHIFT_FOO: bar'), + mock.call('OO_FOO: bar'), + ] + + + ###################################################################### + # Validate ooinstall.utils.debug_env functionality + + def test_utils_debug_env_all_debugged(self): + """Verify debug_env debugs specific env variables""" + + with mock.patch('ooinstall.utils.installer_log') as _il: + debug_env(self.debug_all_params) + + # Debug was called for each item we expect + self.assertEqual( + len(self.debug_all_params), + _il.debug.call_count) + + # Each item we expect was logged + six.assertCountEqual( + self, + self.expected, + _il.debug.call_args_list) + + def test_utils_debug_env_some_debugged(self): + """Verify debug_env skips non-wanted env variables""" + debug_some_params = copy.deepcopy(self.debug_all_params) + # This will not be logged by debug_env + debug_some_params['MG_FRBBR'] = "SKIPPED" + + with mock.patch('ooinstall.utils.installer_log') as _il: + debug_env(debug_some_params) + + # The actual number of debug calls was less than the + # number of items passed to debug_env + self.assertLess( + _il.debug.call_count, + len(debug_some_params)) + + six.assertCountEqual( + self, + self.expected, + _il.debug.call_args_list) + + ###################################################################### + def test_utils_is_valid_hostname_invalid(self): + """Verify is_valid_hostname can detect None or too-long hostnames""" + # A hostname that's empty, None, or more than 255 chars is invalid + empty_hostname = '' + res = is_valid_hostname(empty_hostname) + self.assertFalse(res) + + none_hostname = None + res = is_valid_hostname(none_hostname) + self.assertFalse(res) + + too_long_hostname = "a" * 256 + res = is_valid_hostname(too_long_hostname) + self.assertFalse(res) + + def test_utils_is_valid_hostname_ends_with_dot(self): + """Verify is_valid_hostname can parse hostnames with trailing periods""" + hostname = "foo.example.com." + res = is_valid_hostname(hostname) + self.assertTrue(res) + + def test_utils_is_valid_hostname_normal_hostname(self): + """Verify is_valid_hostname can parse regular hostnames""" + hostname = "foo.example.com" + res = is_valid_hostname(hostname) + self.assertTrue(res) diff --git a/utils/tox.ini b/utils/tox.ini new file mode 100644 index 000000000..1308f7505 --- /dev/null +++ b/utils/tox.ini @@ -0,0 +1,16 @@ +[tox] +minversion=2.3.1 +envlist = + py{27,35}-{flake8,unit,pylint} +skipsdist=True +skip_missing_interpreters=True + +[testenv] +usedevelop=True +deps = + -rtest-requirements.txt + py35-flake8: flake8-bugbear +commands = + flake8: python setup.py flake8 + unit: python setup.py nosetests + pylint: python setup.py lint diff --git a/utils/workflows/enterprise_deploy/openshift.sh b/utils/workflows/enterprise_deploy/openshift.sh deleted file mode 100644 index 040a9a84d..000000000 --- a/utils/workflows/enterprise_deploy/openshift.sh +++ /dev/null @@ -1,2 +0,0 @@ -# This file is not used for OpenShift 3.0. It's merely an artifact of the the -# installation framework originally used for OpenShift 2.x. |