diff options
36 files changed, 822 insertions, 779 deletions
diff --git a/.coveragerc b/.coveragerc new file mode 100644 index 000000000..e1d918755 --- /dev/null +++ b/.coveragerc @@ -0,0 +1,5 @@ +[run] +omit= + */lib/python*/site-packages/* + */lib/python*/* + /usr/* diff --git a/.gitignore b/.gitignore index ac249d5eb..9af271235 100644 --- a/.gitignore +++ b/.gitignore @@ -25,3 +25,5 @@ ansible.cfg .tox .coverage *.egg-info +.eggs +cover diff --git a/git/.pylintrc b/.pylintrc index 411330fe7..a32bd3d68 100644 --- a/git/.pylintrc +++ b/.pylintrc @@ -1,5 +1,4 @@ [MASTER] - # Specify a configuration file. #rcfile= @@ -7,12 +6,9 @@ # pygtk.require(). #init-hook= -# Profiled execution. -#profile=no - # Add files or directories to the blacklist. They should be base names, not # paths. -ignore=CVS +ignore=CVS,setup.py # Pickle collected data for later comparisons. persistent=no @@ -21,14 +17,6 @@ persistent=no # usually to register additional checkers. load-plugins= -# Deprecated. It was used to include message's id in output. Use --msg-template -# instead. -#include-ids=no - -# Deprecated. It was used to include symbolic ids of messages in output. Use -# --msg-template instead. -#symbols=no - # Use multiple processes to speed up Pylint. jobs=1 @@ -58,7 +46,8 @@ confidence= # Enable the message, report, category or checker with the given id(s). You can # either give multiple identifier separated by comma (,) or put this option -# multiple time. See also the "--disable" option for examples. +# multiple time (only on the command line, not in the configuration file where +# it should appear only once). See also the "--disable" option for examples. #enable= # Disable the message, report, category or checker with the given id(s). You @@ -70,8 +59,7 @@ confidence= # --enable=similarities". If you want to run only the classes checker, but have # no Warning level messages displayed, use"--disable=all --enable=classes # --disable=W" -# w0511 - fixme - disabled because TODOs are acceptable -disable=E1608,W1627,E1601,E1603,E1602,E1605,E1604,E1607,E1606,W1621,W1620,W1623,W1622,W1625,W1624,W1609,W1608,W1607,W1606,W1605,W1604,W1603,W1602,W1601,W1639,W1640,I0021,W1638,I0020,W1618,W1619,W1630,W1626,W1637,W1634,W1635,W1610,W1611,W1612,W1613,W1614,W1615,W1616,W1617,W1632,W1633,W0704,W1628,W1629,W1636,W0511,R0801,locally-disabled,file-ignored +disable=import-star-module-level,old-octal-literal,oct-method,print-statement,unpacking-in-except,parameter-unpacking,backtick,old-raise-syntax,old-ne-operator,long-suffix,dict-view-method,dict-iter-method,metaclass-assignment,next-method-called,raising-string,indexing-exception,raw_input-builtin,long-builtin,file-builtin,execfile-builtin,coerce-builtin,cmp-builtin,buffer-builtin,basestring-builtin,apply-builtin,filter-builtin-not-iterating,using-cmp-argument,useless-suppression,range-builtin-not-iterating,suppressed-message,no-absolute-import,old-division,cmp-method,reload-builtin,zip-builtin-not-iterating,intern-builtin,unichr-builtin,reduce-builtin,standarderror-builtin,unicode-builtin,xrange-builtin,coerce-method,delslice-method,getslice-method,setslice-method,input-builtin,round-builtin,hex-method,nonzero-method,map-builtin-not-iterating [REPORTS] @@ -96,20 +84,24 @@ reports=no # (RP0004). evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) -# Add a comment according to your evaluation note. This is used by the global -# evaluation report (RP0004). -#comment=no - # Template used to display messages. This is a python new-style format string # used to format the message information. See doc for all details #msg-template= -[LOGGING] +[SIMILARITIES] -# Logging modules to check that the string format arguments are in logging -# function parameter format -logging-modules=logging +# Minimum lines number of a similarity. +min-similarity-lines=4 + +# Ignore comments when computing similarities. +ignore-comments=yes + +# Ignore docstrings when computing similarities. +ignore-docstrings=yes + +# Ignore imports when computing similarities. +ignore-imports=yes [BASIC] @@ -192,44 +184,23 @@ method-name-hint=[a-z_][a-z0-9_]{2,30}$ # Regular expression which should only match function or class names that do # not require a docstring. -no-docstring-rgx=__.*__ +no-docstring-rgx=^_ # Minimum line length for functions/classes that require docstrings, shorter # ones are exempt. docstring-min-length=-1 -[SIMILARITIES] - -# Minimum lines number of a similarity. -min-similarity-lines=0 - -# Ignore comments when computing similarities. -ignore-comments=yes - -# Ignore docstrings when computing similarities. -ignore-docstrings=yes - -# Ignore imports when computing similarities. -ignore-imports=yes - - -[VARIABLES] +[ELIF] -# Tells whether we should check for unused import in __init__ files. -init-import=no +# Maximum number of nested blocks for function / method body +max-nested-blocks=5 -# A regular expression matching the name of dummy variables (i.e. expectedly -# not used). -dummy-variables-rgx=_$|dummy -# List of additional names supposed to be defined in builtins. Remember that -# you should avoid to define new builtins when possible. -additional-builtins= +[MISCELLANEOUS] -# List of strings which can identify a callback function by name. A callback -# name must start or end with one of those strings. -callbacks=cb_,_cb +# List of note tags to take in consideration, separated by a comma. +notes=FIXME,XXX,TODO [TYPECHECK] @@ -240,27 +211,30 @@ ignore-mixin-members=yes # List of module names for which member attributes should not be checked # (useful for modules/projects where namespaces are manipulated during runtime -# and thus existing member attributes cannot be deduced by static analysis +# and thus existing member attributes cannot be deduced by static analysis. It +# supports qualified module names, as well as Unix pattern matching. ignored-modules= # List of classes names for which member attributes should not be checked -# (useful for classes with attributes dynamically set). -ignored-classes=SQLObject - -# When zope mode is activated, add a predefined set of Zope acquired attributes -# to generated-members. -#zope=no +# (useful for classes with attributes dynamically set). This supports can work +# with qualified names. +ignored-classes= # List of members which are set dynamically and missed by pylint inference -# system, and so shouldn't trigger E0201 when accessed. Python regular +# system, and so shouldn't trigger E1101 when accessed. Python regular # expressions are accepted. -generated-members=REQUEST,acl_users,aq_parent +generated-members= [SPELLING] -# Spelling dictionary name. Available dictionaries: none. To make it working -# install python-enchant package. +# Spelling dictionary name. Available dictionaries: en_ZW (myspell), en_NG +# (myspell), en_NA (myspell), en_NZ (myspell), en_PH (myspell), en_AG +# (myspell), en_BW (myspell), en_IE (myspell), en_ZM (myspell), en_DK +# (myspell), en_CA (myspell), en_GH (myspell), en_IN (myspell), en_BZ +# (myspell), en_MW (myspell), en_TT (myspell), en_JM (myspell), en_GB +# (myspell), en_ZA (myspell), en_SG (myspell), en_AU (myspell), en_US +# (myspell), en_BS (myspell), en_HK (myspell). spelling-dict= # List of comma separated words that should not be checked. @@ -274,12 +248,6 @@ spelling-private-dict-file= spelling-store-unknown-words=no -[MISCELLANEOUS] - -# List of note tags to take in consideration, separated by a comma. -notes=FIXME,XXX,TODO - - [FORMAT] # Maximum number of characters on a single line. @@ -292,23 +260,67 @@ ignore-long-lines=^\s*(# )?<?https?://\S+>?$ # else. single-line-if-stmt=no -# List of optional constructs for which whitespace checking is disabled +# List of optional constructs for which whitespace checking is disabled. `dict- +# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}. +# `trailing-comma` allows a space between comma and closing bracket: (a, ). +# `empty-line` allows space-only lines. no-space-check=trailing-comma,dict-separator # Maximum number of lines in a module max-module-lines=1000 -# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 +# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 # tab). indent-string=' ' -# Number of spaces of indent required inside a hanging or continued line. +# Number of spaces of indent required inside a hanging or continued line. indent-after-paren=4 # Expected format of line ending, e.g. empty (any line ending), LF or CRLF. expected-line-ending-format= +[VARIABLES] + +# Tells whether we should check for unused import in __init__ files. +init-import=no + +# A regular expression matching the name of dummy variables (i.e. expectedly +# not used). +dummy-variables-rgx=_$|dummy + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid to define new builtins when possible. +additional-builtins= + +# List of strings which can identify a callback function by name. A callback +# name must start or end with one of those strings. +callbacks=cb_,_cb + + +[LOGGING] + +# Logging modules to check that the string format arguments are in logging +# function parameter format +logging-modules=logging + + +[CLASSES] + +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__,__new__,setUp + +# List of valid names for the first argument in a class method. +valid-classmethod-first-arg=cls + +# List of valid names for the first argument in a metaclass class method. +valid-metaclass-classmethod-first-arg=mcs + +# List of member names, which should be excluded from the protected access +# warning. +exclude-protected=_asdict,_fields,_replace,_source,_make + + [DESIGN] # Maximum number of arguments for function / method @@ -342,21 +354,8 @@ min-public-methods=2 # Maximum number of public methods for a class (see R0904). max-public-methods=20 - -[CLASSES] - -# List of method names used to declare (i.e. assign) instance attributes. -defining-attr-methods=__init__,__new__,setUp - -# List of valid names for the first argument in a class method. -valid-classmethod-first-arg=cls - -# List of valid names for the first argument in a metaclass class method. -valid-metaclass-classmethod-first-arg=mcs - -# List of member names, which should be excluded from the protected access -# warning. -exclude-protected=_asdict,_fields,_replace,_source,_make +# Maximum number of boolean expressions in a if statement +max-bool-expr=5 [IMPORTS] diff --git a/.travis.yml b/.travis.yml index 0e3a75df7..f0a228c23 100644 --- a/.travis.yml +++ b/.travis.yml @@ -11,8 +11,10 @@ python: install: - pip install -r requirements.txt + - pip install tox-travis script: # TODO(rhcarvalho): check syntax of other important entrypoint playbooks - ansible-playbook --syntax-check playbooks/byo/config.yml - - cd utils && make ci + - tox + - cd utils && tox diff --git a/git/.yamllint b/.yamllint index 573321a94..573321a94 100644 --- a/git/.yamllint +++ b/.yamllint diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 1145da495..83c844e28 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -66,30 +66,55 @@ These are plugins used in playbooks and roles: └── test Contains tests. ``` -### Others - -``` -. -└── git Contains some helper scripts for repository maintenance. -``` - ## Building RPMs See the [RPM build instructions](BUILD.md). ## Running tests -We use [Nose](http://readthedocs.org/docs/nose/) as a test runner. Make sure it -is installed along with other test dependencies: +This section covers how to run tests for the root of this repo, running tests +for the oo-install wrapper is described in [utils/README.md](utils/README.md). + +We use [tox](http://readthedocs.org/docs/tox/) to manage virtualenvs and run +tests. Alternatively, tests can be run using +[detox](https://pypi.python.org/pypi/detox/) which allows for running tests in +parallel + ``` -pip install -r utils/test-requirements.txt +pip install tox detox ``` -Run the tests with: +List the test environments available: +``` +tox -l +``` + +Run all of the tests with: +``` +tox +``` + +Run all of the tests in parallel with detox: +``` +detox +``` + +Running a particular test environment (python 2.7 flake8 tests in this case): +``` +tox -e py27-ansible22-flake8 +``` + +Running a particular test environment in a clean virtualenv (python 3.5 pylint +tests in this case): +``` +tox -r -e py35-ansible22-pylint +``` +If you want to enter the virtualenv created by tox to do additional +testing/debugging (py27-flake8 env in this case): ``` -nosetests +source .tox/py27-ansible22-flake8/bin/activate ``` ## Submitting contributions diff --git a/git/parent.py b/git/parent.py deleted file mode 100755 index 92f57df3e..000000000 --- a/git/parent.py +++ /dev/null @@ -1,97 +0,0 @@ -#!/usr/bin/env python -# flake8: noqa -# pylint: skip-file -''' - Script to determine if this commit has also - been merged through the stage branch -''' -# -# Usage: -# parent_check.py <branch> <commit_id> -# -# -import sys -import subprocess - -def run_cli_cmd(cmd, in_stdout=None, in_stderr=None): - '''Run a command and return its output''' - if not in_stderr: - proc = subprocess.Popen(cmd, bufsize=-1, stderr=subprocess.PIPE, stdout=subprocess.PIPE, shell=False) - else: - proc = subprocess.check_output(cmd, bufsize=-1, stdout=in_stdout, stderr=in_stderr, shell=False) - stdout, stderr = proc.communicate() - if proc.returncode != 0: - return {"rc": proc.returncode, "error": stderr} - else: - return {"rc": proc.returncode, "result": stdout} - -def main(): - '''Check to ensure that the commit that is currently - being submitted is also in the stage branch. - - if it is, succeed - else, fail - ''' - branch = 'prod' - - if sys.argv[1] != branch: - sys.exit(0) - - # git co stg - results = run_cli_cmd(['/usr/bin/git', 'checkout', 'stg']) - - # git pull latest - results = run_cli_cmd(['/usr/bin/git', 'pull']) - - # setup on the <prod> branch in git - results = run_cli_cmd(['/usr/bin/git', 'checkout', 'prod']) - - results = run_cli_cmd(['/usr/bin/git', 'pull']) - # merge the passed in commit into my current <branch> - - commit_id = sys.argv[2] - results = run_cli_cmd(['/usr/bin/git', 'merge', commit_id]) - - # get the differences from stg and <branch> - results = run_cli_cmd(['/usr/bin/git', 'rev-list', '--left-right', 'stg...prod']) - - # exit here with error code if the result coming back is an error - if results['rc'] != 0: - print results['error'] - sys.exit(results['rc']) - - count = 0 - # Each 'result' is a commit - # Walk through each commit and see if it is in stg - for commit in results['result'].split('\n'): - - # continue if it is already in stg - if not commit or commit.startswith('<'): - continue - - # remove the first char '>' - commit = commit[1:] - - # check if any remote branches contain $commit - results = run_cli_cmd(['/usr/bin/git', 'branch', '-q', '-r', '--contains', commit], in_stderr=None) - - # if this comes back empty, nothing contains it, we can skip it as - # we have probably created the merge commit here locally - if results['rc'] == 0 and len(results['result']) == 0: - continue - - # The results generally contain origin/pr/246/merge and origin/pr/246/head - # this is the pull request which would contain the commit in question. - # - # If the results do not contain origin/stg then stage does not contain - # the commit in question. Therefore we need to alert! - if 'origin/stg' not in results['result']: - print "\nFAILED: (These commits are not in stage.)\n" - print "\t%s" % commit - count += 1 - - # Exit with count of commits in #{branch} but not stg - sys.exit(count) - -if __name__ == '__main__': - main() diff --git a/git/pylint.sh b/git/pylint.sh deleted file mode 100755 index 3acf9cc8c..000000000 --- a/git/pylint.sh +++ /dev/null @@ -1,51 +0,0 @@ -#!/usr/bin/env bash -set -eu - -ANSIBLE_UPSTREAM_FILES=( - 'inventory/aws/hosts/ec2.py' - 'inventory/gce/hosts/gce.py' - 'inventory/libvirt/hosts/libvirt_generic.py' - 'inventory/openstack/hosts/nova.py' - 'lookup_plugins/sequence.py' - 'playbooks/gce/openshift-cluster/library/gce.py' - ) - -OLDREV=$1 -NEWREV=$2 -#TRG_BRANCH=$3 - -PYTHON=$(which python) - -set +e -PY_DIFF=$(/usr/bin/git diff --name-only $OLDREV $NEWREV --diff-filter=ACM | grep ".py$") -set -e - -FILES_TO_TEST="" - -for PY_FILE in $PY_DIFF; do - IGNORE_FILE=false - for UPSTREAM_FILE in "${ANSIBLE_UPSTREAM_FILES[@]}"; do - if [ "${PY_FILE}" == "${UPSTREAM_FILE}" ]; then - IGNORE_FILE=true - break - fi - done - - if [ "${IGNORE_FILE}" == true ]; then - echo "Skipping file ${PY_FILE} as an upstream Ansible file..." - continue - fi - - if [ -e "${PY_FILE}" ]; then - FILES_TO_TEST="${FILES_TO_TEST} ${PY_FILE}" - fi -done - -export PYTHONPATH=${WORKSPACE}/utils/src/:${WORKSPACE}/utils/test/ - -if [ "${FILES_TO_TEST}" != "" ]; then - echo "Testing files: ${FILES_TO_TEST}" - exec ${PYTHON} -m pylint --rcfile ${WORKSPACE}/git/.pylintrc ${FILES_TO_TEST} -else - exit 0 -fi diff --git a/git/yaml_validation.py b/git/yaml_validation.py deleted file mode 100755 index 6672876bb..000000000 --- a/git/yaml_validation.py +++ /dev/null @@ -1,73 +0,0 @@ -#!/usr/bin/env python -# flake8: noqa -# -# python yaml validator for a git commit -# -''' -python yaml validator for a git commit -''' -import shutil -import sys -import os -import tempfile -import subprocess -import yaml - -def get_changes(oldrev, newrev, tempdir): - '''Get a list of git changes from oldrev to newrev''' - proc = subprocess.Popen(['/usr/bin/git', 'diff', '--name-only', oldrev, - newrev, '--diff-filter=ACM'], stdout=subprocess.PIPE) - stdout, _ = proc.communicate() - files = stdout.split('\n') - - # No file changes - if not files: - return [] - - cmd = '/usr/bin/git archive %s %s | /bin/tar x -C %s' % (newrev, " ".join(files), tempdir) - proc = subprocess.Popen(cmd, shell=True) - _, _ = proc.communicate() - - rfiles = [] - for dirpath, _, fnames in os.walk(tempdir): - for fname in fnames: - rfiles.append(os.path.join(dirpath, fname)) - - return rfiles - -def main(): - ''' - Perform yaml validation - ''' - results = [] - try: - tmpdir = tempfile.mkdtemp(prefix='jenkins-git-') - old, new, _ = sys.argv[1:] - - for file_mod in get_changes(old, new, tmpdir): - - print "+++++++ Received: %s" % file_mod - - # if the file extensions is not yml or yaml, move along. - if not file_mod.endswith('.yml') and not file_mod.endswith('.yaml'): - continue - - # We use symlinks in our repositories, ignore them. - if os.path.islink(file_mod): - continue - - try: - yaml.load(open(file_mod)) - results.append(True) - - except yaml.scanner.ScannerError as yerr: - print yerr - results.append(False) - finally: - shutil.rmtree(tmpdir) - - if not all(results): - sys.exit(1) - -if __name__ == "__main__": - main() diff --git a/inventory/libvirt/hosts/libvirt_generic.py b/inventory/libvirt/hosts/libvirt_generic.py index ac2f0430a..d63e07b64 100755 --- a/inventory/libvirt/hosts/libvirt_generic.py +++ b/inventory/libvirt/hosts/libvirt_generic.py @@ -61,11 +61,11 @@ class LibvirtInventory(object): self.parse_cli_args() if self.args.host: - print _json_format_dict(self.get_host_info(), self.args.pretty) + print(_json_format_dict(self.get_host_info(), self.args.pretty)) elif self.args.list: - print _json_format_dict(self.get_inventory(), self.args.pretty) + print(_json_format_dict(self.get_inventory(), self.args.pretty)) else: # default action with no options - print _json_format_dict(self.get_inventory(), self.args.pretty) + print(_json_format_dict(self.get_inventory(), self.args.pretty)) def read_settings(self): ''' Reads the settings from the libvirt.ini file ''' @@ -115,12 +115,12 @@ class LibvirtInventory(object): conn = libvirt.openReadOnly(self.libvirt_uri) if conn is None: - print "Failed to open connection to %s" % self.libvirt_uri + print("Failed to open connection to %s" % self.libvirt_uri) sys.exit(1) domains = conn.listAllDomains() if domains is None: - print "Failed to list domains for connection %s" % self.libvirt_uri + print("Failed to list domains for connection %s" % self.libvirt_uri) sys.exit(1) for domain in domains: diff --git a/openshift-ansible.spec b/openshift-ansible.spec index 665ede1cb..955772486 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -15,6 +15,7 @@ BuildArch: noarch Requires: ansible >= 2.2.0.0-1 Requires: python2 +Requires: python-six Requires: openshift-ansible-docs = %{version}-%{release} %description diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml index b9966e715..f0cfa7f55 100644 --- a/playbooks/adhoc/uninstall.yml +++ b/playbooks/adhoc/uninstall.yml @@ -75,6 +75,10 @@ - hosts: nodes become: yes + vars: + node_dirs: + - "/etc/origin" + - "/var/lib/origin" tasks: - name: unmask services command: systemctl unmask "{{ item }}" @@ -83,63 +87,66 @@ with_items: - firewalld - - name: Remove packages - package: name={{ item }} state=absent - when: not is_atomic | bool - with_items: - - atomic-enterprise - - atomic-enterprise-node - - atomic-enterprise-sdn-ovs - - atomic-openshift - - atomic-openshift-clients - - atomic-openshift-excluder - - atomic-openshift-docker-excluder - - atomic-openshift-node - - atomic-openshift-sdn-ovs - - cockpit-bridge - - cockpit-docker - - cockpit-shell - - cockpit-ws - - kubernetes-client - - openshift - - openshift-node - - openshift-sdn - - openshift-sdn-ovs - - openvswitch - - origin - - origin-excluder - - origin-docker-excluder - - origin-clients - - origin-node - - origin-sdn-ovs - - tuned-profiles-atomic-enterprise-node - - tuned-profiles-atomic-openshift-node - - tuned-profiles-openshift-node - - tuned-profiles-origin-node - - - name: Remove flannel package - package: name=flannel state=absent - when: openshift_use_flannel | default(false) | bool and not is_atomic | bool - - - shell: systemctl reset-failed - changed_when: False - - - shell: systemctl daemon-reload - changed_when: False - - - name: Remove br0 interface - shell: ovs-vsctl del-br br0 - changed_when: False - failed_when: False - - - name: Remove linux interfaces - shell: ip link del "{{ item }}" - changed_when: False - failed_when: False - with_items: - - lbr0 - - vlinuxbr - - vovsbr + - block: + - block: + - name: Remove packages + package: name={{ item }} state=absent + with_items: + - atomic-enterprise + - atomic-enterprise-node + - atomic-enterprise-sdn-ovs + - atomic-openshift + - atomic-openshift-clients + - atomic-openshift-excluder + - atomic-openshift-docker-excluder + - atomic-openshift-node + - atomic-openshift-sdn-ovs + - cockpit-bridge + - cockpit-docker + - cockpit-shell + - cockpit-ws + - kubernetes-client + - openshift + - openshift-node + - openshift-sdn + - openshift-sdn-ovs + - openvswitch + - origin + - origin-excluder + - origin-docker-excluder + - origin-clients + - origin-node + - origin-sdn-ovs + - tuned-profiles-atomic-enterprise-node + - tuned-profiles-atomic-openshift-node + - tuned-profiles-openshift-node + - tuned-profiles-origin-node + + - name: Remove flannel package + package: name=flannel state=absent + when: openshift_use_flannel | default(false) | bool + when: "{{ not is_atomic | bool }}" + + - shell: systemctl reset-failed + changed_when: False + + - shell: systemctl daemon-reload + changed_when: False + + - name: Remove br0 interface + shell: ovs-vsctl del-br br0 + changed_when: False + failed_when: False + + - name: Remove linux interfaces + shell: ip link del "{{ item }}" + changed_when: False + failed_when: False + with_items: + - lbr0 + - vlinuxbr + - vovsbr + when: "{{ openshift_remove_all | default(true) | bool }}" - shell: find /var/lib/origin/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true changed_when: False @@ -176,28 +183,57 @@ failed_when: False with_items: "{{ exited_containers_to_delete.results }}" - - shell: docker images | egrep {{ item }} | awk '{ print $3 }' - changed_when: False - failed_when: False - register: images_to_delete + - block: + - block: + - shell: docker images | egrep {{ item }} | awk '{ print $3 }' + changed_when: False + failed_when: False + register: images_to_delete + with_items: + - registry\.access\..*redhat\.com/openshift3 + - registry\.access\..*redhat\.com/aep3 + - registry\.qe\.openshift\.com/.* + - registry\.access\..*redhat\.com/rhel7/etcd + - docker.io/openshift + + - shell: "docker rmi -f {{ item.stdout_lines | join(' ') }}" + changed_when: False + failed_when: False + with_items: "{{ images_to_delete.results }}" + when: "{{ openshift_uninstall_images | default(True) | bool }}" + + - name: remove sdn drop files + file: + path: /run/openshift-sdn + state: absent + + - name: Remove files owned by RPMs + file: path={{ item }} state=absent + with_items: + - /etc/sysconfig/openshift-node + - /etc/sysconfig/openvswitch + - /run/openshift-sdn + when: "{{ openshift_remove_all | default(True) | bool }}" + + - find: path={{ item }} file_type=file + register: files with_items: - - registry\.access\..*redhat\.com/openshift3 - - registry\.access\..*redhat\.com/aep3 - - registry\.qe\.openshift\.com/.* - - registry\.access\..*redhat\.com/rhel7/etcd - - docker.io/openshift - when: openshift_uninstall_images | default(True) | bool - - - shell: "docker rmi -f {{ item.stdout_lines | join(' ') }}" - changed_when: False - failed_when: False - with_items: "{{ images_to_delete.results }}" - when: openshift_uninstall_images | default(True) | bool + - "{{ node_dirs }}" + + - find: path={{ item }} file_type=directory + register: directories + with_items: + - "{{ node_dirs }}" - - name: Remove sdn drop files - file: - path: /run/openshift-sdn - state: absent + - file: path={{ item.1.path }} state=absent + with_subelements: + - "{{ files.results | default([]) }}" + - files + + - file: path={{ item.1.path }} state=absent + with_subelements: + - "{{ directories.results | default([]) }}" + - files - name: Remove remaining files file: path={{ item }} state=absent @@ -209,13 +245,10 @@ - /etc/NetworkManager/dispatcher.d/99-origin-dns.sh - /etc/openshift - /etc/openshift-sdn - - /etc/origin - /etc/sysconfig/atomic-enterprise-node - /etc/sysconfig/atomic-openshift-node - /etc/sysconfig/atomic-openshift-node-dep - - /etc/sysconfig/openshift-node - /etc/sysconfig/openshift-node-dep - - /etc/sysconfig/openvswitch - /etc/sysconfig/origin-node - /etc/sysconfig/origin-node - /etc/sysconfig/origin-node-dep @@ -227,10 +260,8 @@ - /etc/systemd/system/origin-node-dep.service - /etc/systemd/system/origin-node.service - /etc/systemd/system/origin-node.service.wants - - /run/openshift-sdn - /var/lib/atomic-enterprise - /var/lib/openshift - - /var/lib/origin - name: restart docker service: name=docker state=restarted @@ -238,9 +269,12 @@ - name: restart NetworkManager service: name=NetworkManager state=restarted - - hosts: masters become: yes + vars: + master_dirs: + - "/etc/origin" + - "/var/lib/origin" tasks: - name: unmask services command: systemctl unmask "{{ item }}" @@ -252,7 +286,7 @@ - name: Remove packages package: name={{ item }} state=absent - when: not is_atomic | bool + when: not is_atomic | bool and openshift_remove_all | default(True) | bool with_items: - atomic-enterprise - atomic-enterprise-master @@ -283,6 +317,33 @@ - shell: systemctl daemon-reload changed_when: False + - name: Remove files owned by RPMs + file: path={{ item }} state=absent + when: openshift_remove_all | default(True) | bool + with_items: + - /etc/sysconfig/atomic-openshift-master + - /etc/sysconfig/openvswitch + + - find: path={{ item }} file_type=file + register: files + with_items: + - "{{ master_dirs }}" + + - find: path={{ item }} file_type=directory + register: directories + with_items: + - "{{ master_dirs }}" + + - file: path={{ item.1.path }} state=absent + with_subelements: + - "{{ files.results | default([]) }}" + - files + + - file: path={{ item.1.path }} state=absent + with_subelements: + - "{{ directories.results | default([]) }}" + - files + - name: Remove remaining files file: path={{ item }} state=absent with_items: @@ -292,7 +353,6 @@ - /etc/corosync - /etc/openshift - /etc/openshift-sdn - - /etc/origin - /etc/systemd/system/atomic-openshift-master.service - /etc/systemd/system/atomic-openshift-master-api.service - /etc/systemd/system/atomic-openshift-master-controllers.service @@ -303,14 +363,12 @@ - /etc/sysconfig/atomic-enterprise-master - /etc/sysconfig/atomic-enterprise-master-api - /etc/sysconfig/atomic-enterprise-master-controllers - - /etc/sysconfig/atomic-openshift-master - /etc/sysconfig/atomic-openshift-master-api - /etc/sysconfig/atomic-openshift-master-controllers - /etc/sysconfig/origin-master - /etc/sysconfig/origin-master-api - /etc/sysconfig/origin-master-controllers - /etc/sysconfig/openshift-master - - /etc/sysconfig/openvswitch - /etc/sysconfig/origin-master - /etc/sysconfig/origin-master-api - /etc/sysconfig/origin-master-controllers @@ -318,7 +376,6 @@ - /usr/share/openshift/examples - /var/lib/atomic-enterprise - /var/lib/openshift - - /var/lib/origin - /var/lib/pacemaker - /var/lib/pcsd - /usr/lib/systemd/system/atomic-openshift-master-api.service @@ -339,6 +396,10 @@ - hosts: etcd become: yes + vars: + etcd_dirs: + - "/etc/etcd" + - "/var/lib/etcd" tasks: - name: unmask services command: systemctl unmask "{{ item }}" @@ -358,7 +419,7 @@ - name: Remove packages package: name={{ item }} state=absent - when: not is_atomic | bool + when: not is_atomic | bool and openshift_remove_all | default(True) | bool with_items: - etcd - etcd3 @@ -369,13 +430,25 @@ - shell: systemctl daemon-reload changed_when: False - - name: Remove remaining files - file: path={{ item }} state=absent + - find: path={{ item }} file_type=file + register: files with_items: - - /etc/ansible/facts.d/openshift.fact - - /etc/etcd - - /etc/systemd/system/etcd_container.service - - /etc/profile.d/etcdctl.sh + - "{{ etcd_dirs }}" + + - find: path={{ item }} file_type=directory + register: directories + with_items: + - "{{ etcd_dirs }}" + + - file: path={{ item.1.path }} state=absent + with_subelements: + - "{{ files.results | default([]) }}" + - files + + - file: path={{ item.1.path }} state=absent + with_subelements: + - "{{ directories.results | default([]) }}" + - files # Intenationally using rm command over file module because if someone had mounted a filesystem # at /var/lib/etcd then the contents was not removed correctly @@ -385,6 +458,13 @@ warn: no failed_when: false + - name: Remove remaining files + file: path={{ item }} state=absent + with_items: + - /etc/ansible/facts.d/openshift.fact + - /etc/systemd/system/etcd_container.service + - /etc/profile.d/etcdctl.sh + - hosts: lb become: yes tasks: @@ -397,7 +477,7 @@ - name: Remove packages package: name={{ item }} state=absent - when: not is_atomic | bool + when: not is_atomic | bool and openshift_remove_all | default(True) | bool with_items: - haproxy @@ -411,4 +491,4 @@ file: path={{ item }} state=absent with_items: - /etc/ansible/facts.d/openshift.fact - - /var/lib/haproxy + - /var/lib/haproxy/stats diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml index cd2f2e6aa..7839b85e8 100644 --- a/playbooks/common/openshift-cluster/openshift_hosted.yml +++ b/playbooks/common/openshift-cluster/openshift_hosted.yml @@ -26,27 +26,6 @@ logging_elasticsearch_cluster_size: "{{ openshift_hosted_logging_elasticsearch_cluster_size | default(1) }}" logging_elasticsearch_ops_cluster_size: "{{ openshift_hosted_logging_elasticsearch_ops_cluster_size | default(1) }}" roles: - - role: openshift_cli - - role: openshift_hosted_facts - - role: openshift_projects - # TODO: Move standard project definitions to openshift_hosted/vars/main.yml - # Vars are not accessible in meta/main.yml in ansible-1.9.x - openshift_projects: "{{ openshift_additional_projects | default({}) | oo_merge_dicts({'default':{'default_node_selector':''},'openshift-infra':{'default_node_selector':''},'logging':{'default_node_selector':''}}) }}" - - role: openshift_serviceaccounts - openshift_serviceaccounts_names: - - router - openshift_serviceaccounts_namespace: default - openshift_serviceaccounts_sccs: - - hostnetwork - when: openshift.common.version_gte_3_2_or_1_2 - - role: openshift_serviceaccounts - openshift_serviceaccounts_names: - - router - - registry - openshift_serviceaccounts_namespace: default - openshift_serviceaccounts_sccs: - - privileged - when: not openshift.common.version_gte_3_2_or_1_2 - role: openshift_hosted - role: openshift_metrics when: openshift_hosted_metrics_deploy | default(false) | bool diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml index 44ddf97ad..17f8fc6e9 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml @@ -20,7 +20,7 @@ - debug: var=docker_image_count.stdout - name: Remove all containers and images - script: nuke_images.sh docker + script: nuke_images.sh register: nuke_images_result when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool diff --git a/playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check b/playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check deleted file mode 100644 index e5c958ebb..000000000 --- a/playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check +++ /dev/null @@ -1,193 +0,0 @@ -#!/usr/bin/env python -""" -Pre-upgrade checks that must be run on a master before proceeding with upgrade. -""" -# This is a script not a python module: -# pylint: disable=invalid-name - -# NOTE: This script should not require any python libs other than what is -# in the standard library. - -__license__ = "ASL 2.0" - -import json -import os -import subprocess -import re - -# The maximum length of container.ports.name -ALLOWED_LENGTH = 15 -# The valid structure of container.ports.name -ALLOWED_CHARS = re.compile('^[a-z0-9][a-z0-9\\-]*[a-z0-9]$') -AT_LEAST_ONE_LETTER = re.compile('[a-z]') -# look at OS_PATH for the full path. Default ot 'oc' -OC_PATH = os.getenv('OC_PATH', 'oc') - - -def validate(value): - """ - validate verifies that value matches required conventions - - Rules of container.ports.name validation: - - * must be less that 16 chars - * at least one letter - * only a-z0-9- - * hyphens can not be leading or trailing or next to each other - - :Parameters: - - `value`: Value to validate - """ - if len(value) > ALLOWED_LENGTH: - return False - - if '--' in value: - return False - - # We search since it can be anywhere - if not AT_LEAST_ONE_LETTER.search(value): - return False - - # We match because it must start at the beginning - if not ALLOWED_CHARS.match(value): - return False - return True - - -def list_items(kind): - """ - list_items returns a list of items from the api - - :Parameters: - - `kind`: Kind of item to access - """ - response = subprocess.check_output([OC_PATH, 'get', '--all-namespaces', '-o', 'json', kind]) - items = json.loads(response) - return items.get("items", []) - - -def get(obj, *paths): - """ - Gets an object - - :Parameters: - - `obj`: A dictionary structure - - `path`: All other non-keyword arguments - """ - ret_obj = obj - for path in paths: - if ret_obj.get(path, None) is None: - return [] - ret_obj = ret_obj[path] - return ret_obj - - -# pylint: disable=too-many-arguments -def pretty_print_errors(namespace, kind, item_name, container_name, invalid_label, port_name, valid): - """ - Prints out results in human friendly way. - - :Parameters: - - `namespace`: Namespace of the resource - - `kind`: Kind of the resource - - `item_name`: Name of the resource - - `container_name`: Name of the container. May be "" when kind=Service. - - `port_name`: Name of the port - - `invalid_label`: The label of the invalid port. Port.name/targetPort - - `valid`: True if the port is valid - """ - if not valid: - if len(container_name) > 0: - print('%s/%s -n %s (Container="%s" %s="%s")' % ( - kind, item_name, namespace, container_name, invalid_label, port_name)) - else: - print('%s/%s -n %s (%s="%s")' % ( - kind, item_name, namespace, invalid_label, port_name)) - - -def print_validation_header(): - """ - Prints the error header. Should run on the first error to avoid - overwhelming the user. - """ - print """\ -At least one port name is invalid and must be corrected before upgrading. -Please update or remove any resources with invalid port names. - - Valid port names must: - - * be less that 16 characters - * have at least one letter - * contain only a-z0-9- - * not start or end with - - * not contain dashes next to each other ('--') -""" - - -def main(): - """ - main is the main entry point to this script - """ - try: - # the comma at the end suppresses the newline - print "Checking for oc ...", - subprocess.check_output([OC_PATH, 'whoami']) - print "found" - except: - print( - 'Unable to run "%s whoami"\n' - 'Please ensure OpenShift is running, and "oc" is on your system ' - 'path.\n' - 'You can override the path with the OC_PATH environment variable.' - % OC_PATH) - raise SystemExit(1) - - # Where the magic happens - first_error = True - for kind, path in [ - ('deploymentconfigs', ("spec", "template", "spec", "containers")), - ('replicationcontrollers', ("spec", "template", "spec", "containers")), - ('pods', ("spec", "containers"))]: - for item in list_items(kind): - namespace = item["metadata"]["namespace"] - item_name = item["metadata"]["name"] - for container in get(item, *path): - container_name = container["name"] - for port in get(container, "ports"): - port_name = port.get("name", None) - if not port_name: - # Unnamed ports are OK - continue - valid = validate(port_name) - if not valid and first_error: - first_error = False - print_validation_header() - pretty_print_errors( - namespace, kind, item_name, - container_name, "Port.name", port_name, valid) - - # Services follow a different flow - for item in list_items('services'): - namespace = item["metadata"]["namespace"] - item_name = item["metadata"]["name"] - for port in get(item, "spec", "ports"): - port_name = port.get("targetPort", None) - if isinstance(port_name, int) or port_name is None: - # Integer only or unnamed ports are OK - continue - valid = validate(port_name) - if not valid and first_error: - first_error = False - print_validation_header() - pretty_print_errors( - namespace, "services", item_name, "", - "targetPort", port_name, valid) - - # If we had at least 1 error then exit with 1 - if not first_error: - raise SystemExit(1) - - -if __name__ == '__main__': - main() - diff --git a/playbooks/common/openshift-cluster/upgrades/files/rpm_versions.sh b/playbooks/common/openshift-cluster/upgrades/files/rpm_versions.sh deleted file mode 100644 index 7bf249742..000000000 --- a/playbooks/common/openshift-cluster/upgrades/files/rpm_versions.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash -if [ `which dnf 2> /dev/null` ]; then - installed=$(dnf repoquery --installed --latest-limit 1 -d 0 --qf '%{version}-%{release}' "${@}" 2> /dev/null) - available=$(dnf repoquery --available --latest-limit 1 -d 0 --qf '%{version}-%{release}' "${@}" 2> /dev/null) -else - installed=$(repoquery --plugins --pkgnarrow=installed --qf '%{version}-%{release}' "${@}" 2> /dev/null) - available=$(repoquery --plugins --pkgnarrow=available --qf '%{version}-%{release}' "${@}" 2> /dev/null) -fi - -echo "---" -echo "curr_version: ${installed}" -echo "avail_version: ${available}" diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index 8058d3377..21f3c80a1 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -133,9 +133,7 @@ | oo_collect('openshift.common.hostname') | default([]) | join (',') }}" roles: - - role: openshift_master_facts - - role: openshift_hosted_facts - - role: openshift_master_certificates + - role: openshift_master openshift_ca_host: "{{ groups.oo_first_master.0 }}" openshift_master_etcd_hosts: "{{ hostvars | oo_select_keys(groups['oo_etcd_to_config'] | default([])) @@ -145,35 +143,12 @@ | oo_select_keys(groups['oo_masters_to_config'] | default([])) | oo_collect('openshift.common.all_hostnames') | oo_flatten | unique }}" - - role: openshift_etcd_client_certificates + openshift_master_hosts: "{{ groups.oo_masters_to_config }}" etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}" etcd_cert_config_dir: "{{ openshift.common.config_base }}/master" etcd_cert_prefix: "master.etcd-" - when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config - - role: openshift_clock - - role: openshift_cloud_provider - - role: openshift_builddefaults - - role: os_firewall - os_firewall_allow: - - service: api server https - port: "{{ openshift.master.api_port }}/tcp" - - service: api controllers https - port: "{{ openshift.master.controllers_port }}/tcp" - - service: skydns tcp - port: "{{ openshift.master.dns_port }}/tcp" - - service: skydns udp - port: "{{ openshift.master.dns_port }}/udp" - - role: os_firewall - os_firewall_allow: - - service: etcd embedded - port: 4001/tcp - when: groups.oo_etcd_to_config | default([]) | length == 0 - - role: openshift_master - openshift_master_hosts: "{{ groups.oo_masters_to_config }}" - - role: nickhammond.logrotate - - role: nuage_master - when: openshift.common.use_nuage | bool + post_tasks: - name: Create group for deployment type group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }} diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml index e28da5713..b36c0eedf 100644 --- a/playbooks/common/openshift-node/config.yml +++ b/playbooks/common/openshift-node/config.yml @@ -60,30 +60,8 @@ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and openshift_generate_no_proxy_hosts | default(True) | bool }}" roles: - - role: openshift_common - - role: openshift_clock - - role: openshift_docker - - role: openshift_node_certificates - openshift_ca_host: "{{ groups.oo_first_master.0 }}" - - role: openshift_cloud_provider - - role: openshift_node_dnsmasq - when: openshift.common.use_dnsmasq | bool - - role: os_firewall - os_firewall_allow: - - service: Kubernetes kubelet - port: 10250/tcp - - service: http - port: 80/tcp - - service: https - port: 443/tcp - - service: Openshift kubelet ReadOnlyPort - port: 10255/tcp - - service: Openshift kubelet ReadOnlyPort udp - port: 10255/udp - - service: OpenShift OVS sdn - port: 4789/udp - when: openshift.node.use_openshift_sdn | bool - role: openshift_node + openshift_ca_host: "{{ groups.oo_first_master.0 }}" - name: Configure nodes hosts: oo_nodes_to_config:!oo_containerized_master_nodes @@ -99,30 +77,8 @@ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and openshift_generate_no_proxy_hosts | default(True) | bool }}" roles: - - role: openshift_common - - role: openshift_clock - - role: openshift_docker - - role: openshift_node_certificates - openshift_ca_host: "{{ groups.oo_first_master.0 }}" - - role: openshift_cloud_provider - - role: openshift_node_dnsmasq - when: openshift.common.use_dnsmasq | bool - - role: os_firewall - os_firewall_allow: - - service: Kubernetes kubelet - port: 10250/tcp - - service: http - port: 80/tcp - - service: https - port: 443/tcp - - service: Openshift kubelet ReadOnlyPort - port: 10255/tcp - - service: Openshift kubelet ReadOnlyPort udp - port: 10255/udp - - service: OpenShift OVS sdn - port: 4789/udp - when: openshift.node.use_openshift_sdn | bool - role: openshift_node + openshift_ca_host: "{{ groups.oo_first_master.0 }}" - name: Additional node config hosts: oo_nodes_to_config diff --git a/requirements.txt b/requirements.txt index e55ef5f0b..8f47033f8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1,4 @@ -ansible>=2.1 +ansible>=2.2 +six pyOpenSSL +PyYAML diff --git a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py b/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py index 7161b5277..a474b36b0 100644 --- a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py +++ b/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py @@ -4,17 +4,13 @@ """For details on this module see DOCUMENTATION (below)""" -# router/registry cert grabbing -import subprocess -# etcd config file -import ConfigParser -# Expiration parsing import datetime -# File path stuff import os -# Config file parsing +import subprocess + +from six.moves import configparser + import yaml -# Certificate loading import OpenSSL.crypto DOCUMENTATION = ''' @@ -260,7 +256,10 @@ Return: # This is our module MAIN function after all, so there's bound to be a # lot of code bundled up into one block # -# pylint: disable=too-many-locals,too-many-locals,too-many-statements,too-many-branches +# Reason: These checks are disabled because the issue was introduced +# during a period where the pylint checks weren't enabled for this file +# Status: temporarily disabled pending future refactoring +# pylint: disable=too-many-locals,too-many-statements,too-many-branches def main(): """This module examines certificates (in various forms) which compose an OpenShift Container Platform cluster @@ -479,13 +478,17 @@ an OpenShift Container Platform cluster etcd_cert_params.append('dne') try: with open('/etc/etcd/etcd.conf', 'r') as fp: - etcd_config = ConfigParser.ConfigParser() + etcd_config = configparser.ConfigParser() + # Reason: This check is disabled because the issue was introduced + # during a period where the pylint checks weren't enabled for this file + # Status: temporarily disabled pending future refactoring + # pylint: disable=deprecated-method etcd_config.readfp(FakeSecHead(fp)) for param in etcd_cert_params: try: etcd_certs_to_check.add(etcd_config.get('ETCD', param)) - except ConfigParser.NoOptionError: + except configparser.NoOptionError: # That parameter does not exist, oh well... pass except IOError: diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index d7e3596fd..6baf9d016 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -7,13 +7,6 @@ """Ansible module for retrieving and setting openshift related facts""" -try: - # python2 - import ConfigParser -except ImportError: - # python3 - import configparser as ConfigParser - # pylint: disable=no-name-in-module, import-error, wrong-import-order import copy import errno @@ -26,8 +19,8 @@ import struct import socket from distutils.util import strtobool from distutils.version import LooseVersion -from six import string_types -from six import text_type +from six import string_types, text_type +from six.moves import configparser # ignore pylint errors related to the module_utils import # pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import @@ -776,7 +769,7 @@ def set_etcd_facts_if_unset(facts): # Add a fake section for parsing: ini_str = text_type('[root]\n' + open('/etc/etcd/etcd.conf', 'r').read(), 'utf-8') ini_fp = io.StringIO(ini_str) - config = ConfigParser.RawConfigParser() + config = configparser.RawConfigParser() config.readfp(ini_fp) etcd_data_dir = config.get('root', 'ETCD_DATA_DIR') if etcd_data_dir.startswith('"') and etcd_data_dir.endswith('"'): @@ -1256,6 +1249,13 @@ def is_service_running(service): return service_running +def rpm_rebuilddb(): + """ + Runs rpm --rebuilddb to ensure the db is in good shape. + """ + module.run_command(['/usr/bin/rpm', '--rebuilddb']) # noqa: F405 + + def get_version_output(binary, version_cmd): """ runs and returns the version output for a command """ cmd = [] @@ -1292,7 +1292,7 @@ def get_hosted_registry_insecure(): try: ini_str = text_type('[root]\n' + open('/etc/sysconfig/docker', 'r').read(), 'utf-8') ini_fp = io.StringIO(ini_str) - config = ConfigParser.RawConfigParser() + config = configparser.RawConfigParser() config.readfp(ini_fp) options = config.get('root', 'OPTIONS') if 'insecure-registry' in options: @@ -1561,15 +1561,15 @@ def get_local_facts_from_file(filename): local_facts = dict() try: # Handle conversion of INI style facts file to json style - ini_facts = ConfigParser.SafeConfigParser() + ini_facts = configparser.SafeConfigParser() ini_facts.read(filename) for section in ini_facts.sections(): local_facts[section] = dict() for key, value in ini_facts.items(section): local_facts[section][key] = value - except (ConfigParser.MissingSectionHeaderError, - ConfigParser.ParsingError): + except (configparser.MissingSectionHeaderError, + configparser.ParsingError): try: with open(filename, 'r') as facts_file: local_facts = json.load(facts_file) @@ -1966,6 +1966,11 @@ class OpenShiftFacts(object): if 'docker' in roles: docker = dict(disable_push_dockerhub=False, options='--log-driver=json-file --log-opt max-size=50m') + # NOTE: This is a workaround for a dnf output racecondition that can occur in + # some situations. See https://bugzilla.redhat.com/show_bug.cgi?id=918184 + if self.system_facts['ansible_pkg_mgr'] == 'dnf': + rpm_rebuilddb() + version_info = get_docker_version_info() if version_info is not None: docker['api_version'] = version_info['api_version'] diff --git a/roles/openshift_hosted/meta/main.yml b/roles/openshift_hosted/meta/main.yml index 74c50ae1d..ca5e88b15 100644 --- a/roles/openshift_hosted/meta/main.yml +++ b/roles/openshift_hosted/meta/main.yml @@ -11,4 +11,23 @@ galaxy_info: - 7 categories: - cloud -dependencies: [] +dependencies: +- role: openshift_cli +- role: openshift_hosted_facts +- role: openshift_projects + openshift_projects: "{{ openshift_additional_projects | default({}) | oo_merge_dicts({'default':{'default_node_selector':''},'openshift-infra':{'default_node_selector':''},'logging':{'default_node_selector':''}}) }}" +- role: openshift_serviceaccounts + openshift_serviceaccounts_names: + - router + openshift_serviceaccounts_namespace: default + openshift_serviceaccounts_sccs: + - hostnetwork + when: openshift.common.version_gte_3_2_or_1_2 +- role: openshift_serviceaccounts + openshift_serviceaccounts_names: + - router + - registry + openshift_serviceaccounts_namespace: default + openshift_serviceaccounts_sccs: + - privileged + when: not openshift.common.version_gte_3_2_or_1_2 diff --git a/roles/openshift_master/meta/main.yml b/roles/openshift_master/meta/main.yml index 7457e4378..3a595b2d1 100644 --- a/roles/openshift_master/meta/main.yml +++ b/roles/openshift_master/meta/main.yml @@ -11,4 +11,33 @@ galaxy_info: - 7 categories: - cloud -dependencies: [] +dependencies: +- role: openshift_master_facts +- role: openshift_hosted_facts +- role: openshift_master_certificates +- role: openshift_etcd_client_certificates + etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}" + etcd_cert_config_dir: "{{ openshift.common.config_base }}/master" + etcd_cert_prefix: "master.etcd-" + when: groups.oo_etcd_to_config | default([]) | length != 0 +- role: openshift_clock +- role: openshift_cloud_provider +- role: openshift_builddefaults +- role: os_firewall + os_firewall_allow: + - service: api server https + port: "{{ openshift.master.api_port }}/tcp" + - service: api controllers https + port: "{{ openshift.master.controllers_port }}/tcp" + - service: skydns tcp + port: "{{ openshift.master.dns_port }}/tcp" + - service: skydns udp + port: "{{ openshift.master.dns_port }}/udp" +- role: os_firewall + os_firewall_allow: + - service: etcd embedded + port: 4001/tcp + when: groups.oo_etcd_to_config | default([]) | length == 0 +- role: nickhammond.logrotate +- role: nuage_master + when: openshift.common.use_nuage | bool diff --git a/roles/openshift_node/meta/main.yml b/roles/openshift_node/meta/main.yml index c39269f33..56dee2958 100644 --- a/roles/openshift_node/meta/main.yml +++ b/roles/openshift_node/meta/main.yml @@ -11,4 +11,26 @@ galaxy_info: - 7 categories: - cloud -dependencies: [] +dependencies: +- role: openshift_common +- role: openshift_clock +- role: openshift_docker +- role: openshift_node_certificates +- role: openshift_cloud_provider +- role: openshift_node_dnsmasq + when: openshift.common.use_dnsmasq | bool +- role: os_firewall + os_firewall_allow: + - service: Kubernetes kubelet + port: 10250/tcp + - service: http + port: 80/tcp + - service: https + port: 443/tcp + - service: Openshift kubelet ReadOnlyPort + port: 10255/tcp + - service: Openshift kubelet ReadOnlyPort udp + port: 10255/udp + - service: OpenShift OVS sdn + port: 4789/udp + when: openshift.node.use_openshift_sdn | bool diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 000000000..d55df9d37 --- /dev/null +++ b/setup.cfg @@ -0,0 +1,27 @@ +[bdist_wheel] +# This flag says that the code is written to work on both Python 2 and Python +# 3. If at all possible, it is good practice to do this. If you cannot, you +# will need to generate wheels for each Python version that you support. +universal=1 + +[nosetests] +tests=roles/openshift_master_facts/test/, test/ +verbosity=2 +with-coverage=1 +cover-html=1 +cover-inclusive=1 +cover-min-percentage=70 +cover-erase=1 +detailed-errors=1 +cover-branches=1 + +[yamllint] +excludes=.tox,utils,files + +[lint] +lint_disable=fixme,locally-disabled,file-ignored,duplicate-code + +[flake8] +exclude=.tox/*,setup.py,utils/*,inventory/* +max_line_length = 120 +ignore = E501,T003 diff --git a/setup.py b/setup.py new file mode 100644 index 000000000..e598c3502 --- /dev/null +++ b/setup.py @@ -0,0 +1,191 @@ +"""A setuptools based setup module. + +""" +from __future__ import print_function + +import os +import fnmatch +import re + +import yaml + +# Always prefer setuptools over distutils +from setuptools import setup, Command +from setuptools_lint.setuptools_command import PylintCommand +from six import string_types +from yamllint.config import YamlLintConfig +from yamllint.cli import Format +from yamllint import linter + +def find_files(base_dir, exclude_dirs, include_dirs, file_regex): + ''' find files matching file_regex ''' + found = [] + exclude_regex = '' + include_regex = '' + + if exclude_dirs is not None: + exclude_regex = r'|'.join([fnmatch.translate(x) for x in exclude_dirs]) or r'$.' + + if include_dirs is not None: + include_regex = r'|'.join([fnmatch.translate(x) for x in include_dirs]) or r'$.' + + for root, dirs, files in os.walk(base_dir): + if exclude_dirs is not None: + # filter out excludes for dirs + dirs[:] = [d for d in dirs if not re.match(exclude_regex, d)] + + if include_dirs is not None: + # filter for includes for dirs + dirs[:] = [d for d in dirs if re.match(include_regex, d)] + + matches = [os.path.join(root, f) for f in files if re.search(file_regex, f) is not None] + found.extend(matches) + + return found + + +class OpenShiftAnsibleYamlLint(Command): + ''' Command to run yamllint ''' + description = "Run yamllint tests" + user_options = [ + ('excludes=', 'e', 'directories to exclude'), + ('config-file=', 'c', 'config file to use'), + ('format=', 'f', 'format to use (standard, parsable)'), + ] + + def initialize_options(self): + ''' initialize_options ''' + # Reason: Defining these attributes as a part of initialize_options is + # consistent with upstream usage + # Status: permanently disabled + # pylint: disable=attribute-defined-outside-init + self.excludes = None + self.config_file = None + self.format = None + + def finalize_options(self): + ''' finalize_options ''' + # Reason: These attributes are defined in initialize_options and this + # usage is consistant with upstream usage + # Status: permanently disabled + # pylint: disable=attribute-defined-outside-init + if isinstance(self.excludes, string_types): + self.excludes = self.excludes.split(',') + if self.format is None: + self.format = 'standard' + assert (self.format in ['standard', 'parsable']), ( + 'unknown format {0}.'.format(self.format)) + if self.config_file is None: + self.config_file = '.yamllint' + assert os.path.isfile(self.config_file), ( + 'yamllint config file {0} does not exist.'.format(self.config_file)) + + def run(self): + ''' run command ''' + if self.excludes is not None: + print("Excludes:\n{0}".format(yaml.dump(self.excludes, default_flow_style=False))) + + config = YamlLintConfig(file=self.config_file) + + has_errors = False + has_warnings = False + + if self.format == 'parsable': + format_method = Format.parsable + else: + format_method = Format.standard_color + + for yaml_file in find_files(os.getcwd(), self.excludes, None, r'\.ya?ml$'): + first = True + with open(yaml_file, 'r') as contents: + for problem in linter.run(contents, config): + if first and self.format != 'parsable': + print('\n{0}:'.format(os.path.relpath(yaml_file))) + first = False + + print(format_method(problem, yaml_file)) + if problem.level == linter.PROBLEM_LEVELS['error']: + has_errors = True + elif problem.level == linter.PROBLEM_LEVELS['warning']: + has_warnings = True + + assert not has_errors, 'yamllint errors found' + assert not has_warnings, 'yamllint warnings found' + + +class OpenShiftAnsiblePylint(PylintCommand): + ''' Class to override the default behavior of PylintCommand ''' + + # Reason: This method needs to be an instance method to conform to the + # overridden method's signature + # Status: permanently disabled + # pylint: disable=no-self-use + def find_all_modules(self): + ''' find all python files to test ''' + exclude_dirs = ['.tox', 'utils', 'test', 'tests', 'git'] + modules = [] + for match in find_files(os.getcwd(), exclude_dirs, None, r'\.py$'): + package = os.path.basename(match).replace('.py', '') + modules.append(('openshift_ansible', package, match)) + return modules + + def get_finalized_command(self, cmd): + ''' override get_finalized_command to ensure we use our + find_all_modules method ''' + if cmd == 'build_py': + return self + + # Reason: This method needs to be an instance method to conform to the + # overridden method's signature + # Status: permanently disabled + # pylint: disable=no-self-use + def with_project_on_sys_path(self, func, func_args, func_kwargs): + ''' override behavior, since we don't need to build ''' + return func(*func_args, **func_kwargs) + + +class UnsupportedCommand(Command): + ''' Basic Command to override unsupported commands ''' + user_options = [] + + # Reason: This method needs to be an instance method to conform to the + # overridden method's signature + # Status: permanently disabled + # pylint: disable=no-self-use + def initialize_options(self): + ''' initialize_options ''' + pass + + # Reason: This method needs to be an instance method to conform to the + # overridden method's signature + # Status: permanently disabled + # pylint: disable=no-self-use + def finalize_options(self): + ''' initialize_options ''' + pass + + # Reason: This method needs to be an instance method to conform to the + # overridden method's signature + # Status: permanently disabled + # pylint: disable=no-self-use + def run(self): + ''' run command ''' + print("Unsupported command for openshift-ansible") + + +setup( + name='openshift-ansible', + license="Apache 2.0", + cmdclass={ + 'install': UnsupportedCommand, + 'develop': UnsupportedCommand, + 'build': UnsupportedCommand, + 'build_py': UnsupportedCommand, + 'build_ext': UnsupportedCommand, + 'egg_info': UnsupportedCommand, + 'sdist': UnsupportedCommand, + 'lint': OpenShiftAnsiblePylint, + 'yamllint': OpenShiftAnsibleYamlLint, + }, + packages=[], +) diff --git a/test-requirements.txt b/test-requirements.txt new file mode 100644 index 000000000..2ee1e657d --- /dev/null +++ b/test-requirements.txt @@ -0,0 +1,11 @@ +six +pyOpenSSL +flake8 +flake8-mutable +flake8-print +pylint +setuptools-lint +PyYAML +yamllint +nose +coverage diff --git a/tox.ini b/tox.ini new file mode 100644 index 000000000..c0e7732c3 --- /dev/null +++ b/tox.ini @@ -0,0 +1,19 @@ +[tox] +minversion=2.3.1 +envlist = + py{27,35}-ansible22-{pylint,unit,flake8} + yamllint +skipsdist=True +skip_missing_interpreters=True + +[testenv] +deps = + -rtest-requirements.txt + py35-flake8: flake8-bugbear + ansible22: ansible~=2.2 + +commands = + flake8: flake8 + pylint: python setup.py lint + yamllint: python setup.py yamllint + unit: nosetests diff --git a/utils/.pylintrc b/utils/.pylintrc new file mode 120000 index 000000000..30b33b524 --- /dev/null +++ b/utils/.pylintrc @@ -0,0 +1 @@ +../.pylintrc
\ No newline at end of file diff --git a/utils/Makefile b/utils/Makefile index 2a37b922c..038c31fcf 100644 --- a/utils/Makefile +++ b/utils/Makefile @@ -46,7 +46,7 @@ clean: @find . -type f \( -name "*~" -or -name "#*" \) -delete @rm -fR build dist rpm-build MANIFEST htmlcov .coverage cover ooinstall.egg-info oo-install @rm -fR $(VENV) - + @rm -fR .tox # To force a rebuild of the docs run 'touch' on any *.in file under # docs/man/man1/ @@ -84,41 +84,27 @@ ci-unittests: $(VENV) @echo "#############################################" @echo "# Running Unit Tests in virtualenv" @echo "#############################################" - . $(VENV)/bin/activate && tox -e py27-unit - . $(VENV)/bin/activate && tox -e py35-unit + . $(VENV)/bin/activate && detox -e py27-unit,py35-unit @echo "VIEW CODE COVERAGE REPORT WITH 'xdg-open cover/index.html' or run 'make viewcover'" ci-pylint: $(VENV) @echo "#############################################" @echo "# Running PyLint Tests in virtualenv" @echo "#############################################" - . $(VENV)/bin/activate && python -m pylint --rcfile ../git/.pylintrc $(PYFILES) - -ci-yamllint: $(VENV) - @echo "#############################################" - @echo "# Running yamllint Tests in virtualenv" - @echo "#############################################" - @. $(VENV)/bin/activate && yamllint -c ../git/.yamllint $(YAMLFILES) - -ci-list-deps: $(VENV) - @echo "#############################################" - @echo "# Listing all pip deps" - @echo "#############################################" - . $(VENV)/bin/activate && pip freeze + . $(VENV)/bin/activate && detox -e py27-pylint,py35-pylint ci-flake8: $(VENV) @echo "#############################################" @echo "# Running Flake8 Compliance Tests in virtualenv" @echo "#############################################" - . $(VENV)/bin/activate && tox -e py27-flake8 - . $(VENV)/bin/activate && tox -e py35-flake8 + . $(VENV)/bin/activate && detox -e py27-flake8,py35-flake8 -ci-tox: - . $(VENV)/bin/activate && tox +ci-tox: $(VENV) + . $(VENV)/bin/activate && detox -ci: ci-list-deps ci-tox ci-pylint ci-yamllint +ci: ci-tox @echo @echo "##################################################################################" @echo "VIEW CODE COVERAGE REPORT WITH 'xdg-open cover/index.html' or run 'make viewcover'" @echo "To clean your test environment run 'make clean'" - @echo "Other targets you may run with 'make': 'ci-pylint', 'ci-tox', 'ci-unittests', 'ci-flake8', 'ci-yamllint'" + @echo "Other targets you may run with 'make': 'ci-pylint', 'ci-tox', 'ci-unittests', 'ci-flake8'" diff --git a/utils/README.md b/utils/README.md index 2abf2705e..c37ab41e6 100644 --- a/utils/README.md +++ b/utils/README.md @@ -6,6 +6,47 @@ Run the command: to run an array of unittests locally. +Underneath the covers, we use [tox](http://readthedocs.org/docs/tox/) to manage virtualenvs and run +tests. Alternatively, tests can be run using [detox](https://pypi.python.org/pypi/detox/) which allows +for running tests in parallel + + +``` +pip install tox detox +``` + +List the test environments available: +``` +tox -l +``` + +Run all of the tests with: +``` +tox +``` + +Run all of the tests in parallel with detox: +``` +detox +``` + +Running a particular test environment (python 2.7 flake8 tests in this case): +``` +tox -e py27-ansible22-flake8 +``` + +Running a particular test environment in a clean virtualenv (python 3.5 pylint +tests in this case): +``` +tox -r -e py35-ansible22-pylint +``` + +If you want to enter the virtualenv created by tox to do additional +testing/debugging (py27-flake8 env in this case): +``` +source .tox/py27-ansible22-flake8/bin/activate +``` + You will get errors if the log files already exist and can not be written to by the current user (`/tmp/ansible.log` and `/tmp/installer.txt`). *We're working on it.* diff --git a/utils/setup.cfg b/utils/setup.cfg index ea07eea9f..862dffd7b 100644 --- a/utils/setup.cfg +++ b/utils/setup.cfg @@ -5,7 +5,6 @@ universal=1 [nosetests] -tests=../,../roles/openshift_master_facts/test/,test/ verbosity=2 with-coverage=1 cover-html=1 @@ -19,3 +18,6 @@ cover-branches=1 max-line-length=120 exclude=test/*,setup.py,oo-installenv ignore=E501 + +[lint] +lint_disable=fixme,locally-disabled,file-ignored,duplicate-code diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py index b70bd1817..0bc9aa45e 100644 --- a/utils/src/ooinstall/cli_installer.py +++ b/utils/src/ooinstall/cli_installer.py @@ -1124,6 +1124,20 @@ def scaleup(ctx, gen_inventory): click.echo('Welcome to the OpenShift Enterprise 3 Scaleup utility.') + # Scaleup requires manual data entry. Therefore, we do not support + # unattended operations. + if unattended: + msg = """ +--- + +The 'scaleup' operation does not support unattended +functionality. Re-run the installer without the '-u' or '--unattended' +option to continue. +""" + click.echo(msg) + sys.exit(1) + + # Resume normal scaleup workflow print_installation_summary(installed_hosts, oo_cfg.settings['variant_version'], verbose=False,) diff --git a/utils/test-requirements.txt b/utils/test-requirements.txt index e5c5360c3..f6a7bde10 100644 --- a/utils/test-requirements.txt +++ b/utils/test-requirements.txt @@ -1,6 +1,7 @@ ansible configparser pylint +setuptools-lint nose coverage mock @@ -11,3 +12,4 @@ backports.functools_lru_cache pyOpenSSL yamllint tox +detox diff --git a/utils/test/openshift_ansible_tests.py b/utils/test/openshift_ansible_tests.py new file mode 100644 index 000000000..fe3029ca1 --- /dev/null +++ b/utils/test/openshift_ansible_tests.py @@ -0,0 +1,72 @@ +import os +import unittest +import tempfile +import shutil +import yaml + +from six.moves import configparser + +from ooinstall import openshift_ansible +from ooinstall.oo_config import Host, OOConfig + + +BASE_CONFIG = """ +--- +variant: openshift-enterprise +variant_version: 3.3 +version: v2 +deployment: + ansible_ssh_user: cloud-user + hosts: [] + roles: + master: + node: +""" + + +class TestOpenShiftAnsible(unittest.TestCase): + + def setUp(self): + self.tempfiles = [] + self.work_dir = tempfile.mkdtemp(prefix='openshift_ansible_tests') + self.configfile = os.path.join(self.work_dir, 'ooinstall.config') + with open(self.configfile, 'w') as config_file: + config_file.write(BASE_CONFIG) + self.inventory = os.path.join(self.work_dir, 'hosts') + config = OOConfig(self.configfile) + config.settings['ansible_inventory_path'] = self.inventory + openshift_ansible.set_config(config) + + def tearDown(self): + shutil.rmtree(self.work_dir) + + def generate_hosts(self, num_hosts, name_prefix, roles=None, new_host=False): + hosts = [] + for num in range(1, num_hosts + 1): + hosts.append(Host(connect_to=name_prefix + str(num), + roles=roles, new_host=new_host)) + return hosts + + def test_generate_inventory_new_nodes(self): + hosts = self.generate_hosts(1, 'master', roles=(['master', 'etcd'])) + hosts.extend(self.generate_hosts(1, 'node', roles=['node'])) + hosts.extend(self.generate_hosts(1, 'new_node', roles=['node'], new_host=True)) + openshift_ansible.generate_inventory(hosts) + inventory = configparser.ConfigParser(allow_no_value=True) + inventory.read(self.inventory) + self.assertTrue(inventory.has_section('new_nodes')) + self.assertTrue(inventory.has_option('new_nodes', 'new_node1')) + + def test_write_inventory_vars_role_vars(self): + print(yaml.dump(openshift_ansible.CFG.deployment.roles)) + with open(self.inventory, 'w') as inv: + openshift_ansible.CFG.deployment.roles['master'].variables={'color': 'blue'} + openshift_ansible.CFG.deployment.roles['node'].variables={'color': 'green'} + openshift_ansible.write_inventory_vars(inv, None) + + inventory = configparser.ConfigParser(allow_no_value=True) + inventory.read(self.inventory) + self.assertTrue(inventory.has_section('masters:vars')) + self.assertEquals('blue', inventory.get('masters:vars', 'color')) + self.assertTrue(inventory.has_section('nodes:vars')) + self.assertEquals('green', inventory.get('nodes:vars', 'color')) diff --git a/utils/tox.ini b/utils/tox.ini index 747d79dfe..1308f7505 100644 --- a/utils/tox.ini +++ b/utils/tox.ini @@ -1,7 +1,7 @@ [tox] minversion=2.3.1 envlist = - py{27,35}-{flake8,unit} + py{27,35}-{flake8,unit,pylint} skipsdist=True skip_missing_interpreters=True @@ -10,8 +10,7 @@ usedevelop=True deps = -rtest-requirements.txt py35-flake8: flake8-bugbear - commands = - flake8: flake8 --config=setup.cfg ../ --exclude="../utils,.tox,../inventory" flake8: python setup.py flake8 unit: python setup.py nosetests + pylint: python setup.py lint |