summaryrefslogtreecommitdiffstats
path: root/roles
diff options
context:
space:
mode:
Diffstat (limited to 'roles')
-rw-r--r--roles/docker/meta/main.yml3
-rw-r--r--roles/docker/tasks/main.yml40
-rw-r--r--roles/etcd/templates/etcd.docker.service2
-rwxr-xr-xroles/etcd_common/library/delegated_serial_command.py18
-rw-r--r--roles/flannel_register/defaults/main.yaml1
-rw-r--r--roles/kube_nfs_volumes/library/partitionpool.py31
-rw-r--r--roles/kube_nfs_volumes/meta/main.yml2
-rw-r--r--roles/lib_utils/library/yedit.py766
-rw-r--r--roles/lib_utils/src/ansible/yedit.py84
-rw-r--r--roles/lib_utils/src/class/import.py11
-rw-r--r--roles/lib_utils/src/class/yedit.py520
-rw-r--r--roles/lib_utils/src/doc/license16
-rw-r--r--roles/lib_utils/src/doc/yedit132
-rwxr-xr-xroles/lib_utils/src/generate.py45
-rw-r--r--roles/lib_utils/src/generate_sources.yml7
-rw-r--r--roles/lib_utils/src/test/integration/files/kube-manager.yaml39
-rwxr-xr-xroles/lib_utils/src/test/integration/yedit_test.yml221
-rwxr-xr-xroles/lib_utils/src/test/unit/yedit_test.py277
-rw-r--r--roles/nuage_ca/meta/main.yml2
-rw-r--r--roles/nuage_common/defaults/main.yaml1
-rw-r--r--roles/nuage_master/defaults/main.yaml2
-rw-r--r--roles/nuage_master/meta/main.yml14
-rw-r--r--roles/nuage_master/tasks/certificates.yml8
-rw-r--r--roles/nuage_master/tasks/main.yaml14
-rw-r--r--roles/nuage_master/vars/main.yaml17
-rw-r--r--roles/nuage_node/meta/main.yml16
-rw-r--r--roles/nuage_node/tasks/certificates.yml6
-rw-r--r--roles/nuage_node/tasks/iptables.yml2
-rw-r--r--roles/nuage_node/tasks/main.yaml22
-rw-r--r--roles/nuage_node/vars/main.yaml4
-rw-r--r--roles/openshift_builddefaults/tasks/main.yml3
-rw-r--r--roles/openshift_certificate_expiry/README.md20
-rw-r--r--roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py28
-rw-r--r--roles/openshift_certificate_expiry/library/openshift_cert_expiry.py85
-rw-r--r--roles/openshift_cli/library/openshift_container_binary_sync.py6
-rw-r--r--roles/openshift_cloud_provider/tasks/aws.yml1
-rw-r--r--roles/openshift_cloud_provider/tasks/gce.yml1
-rw-r--r--roles/openshift_common/tasks/main.yml5
-rw-r--r--roles/openshift_docker_facts/tasks/main.yml2
-rw-r--r--roles/openshift_examples/defaults/main.yml4
-rwxr-xr-xroles/openshift_examples/examples-sync.sh2
-rw-r--r--roles/openshift_examples/files/examples/v1.3/db-templates/mariadb-ephemeral-template.json12
-rw-r--r--roles/openshift_examples/files/examples/v1.3/db-templates/mariadb-persistent-template.json12
-rw-r--r--roles/openshift_examples/files/examples/v1.3/db-templates/mongodb-ephemeral-template.json13
-rw-r--r--roles/openshift_examples/files/examples/v1.3/db-templates/mongodb-persistent-template.json13
-rw-r--r--roles/openshift_examples/files/examples/v1.3/db-templates/mysql-ephemeral-template.json12
-rw-r--r--roles/openshift_examples/files/examples/v1.3/db-templates/mysql-persistent-template.json12
-rw-r--r--roles/openshift_examples/files/examples/v1.3/db-templates/postgresql-ephemeral-template.json12
-rw-r--r--roles/openshift_examples/files/examples/v1.3/db-templates/postgresql-persistent-template.json12
-rw-r--r--roles/openshift_examples/files/examples/v1.3/image-streams/dotnet_imagestreams.json36
-rw-r--r--roles/openshift_examples/files/examples/v1.3/image-streams/image-streams-centos7.json187
-rw-r--r--roles/openshift_examples/files/examples/v1.3/image-streams/image-streams-rhel7.json167
-rw-r--r--roles/openshift_examples/files/examples/v1.3/quickstart-templates/cakephp-mysql.json9
-rw-r--r--roles/openshift_examples/files/examples/v1.3/quickstart-templates/dancer-mysql.json9
-rw-r--r--roles/openshift_examples/files/examples/v1.3/quickstart-templates/django-postgresql.json9
-rw-r--r--roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkins-ephemeral-template.json29
-rw-r--r--roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkins-persistent-template.json29
-rw-r--r--roles/openshift_examples/files/examples/v1.3/quickstart-templates/nodejs-mongodb.json9
-rw-r--r--roles/openshift_examples/files/examples/v1.3/quickstart-templates/rails-postgresql.json9
-rw-r--r--roles/openshift_examples/files/examples/v1.3/xpaas-streams/fis-image-streams.json28
-rw-r--r--roles/openshift_examples/files/examples/v1.3/xpaas-streams/jboss-image-streams.json22
-rw-r--r--roles/openshift_examples/files/examples/v1.3/xpaas-templates/datavirt63-basic-s2i.json415
-rw-r--r--roles/openshift_examples/files/examples/v1.3/xpaas-templates/datavirt63-extensions-support-s2i.json763
-rw-r--r--roles/openshift_examples/files/examples/v1.3/xpaas-templates/datavirt63-secure-s2i.json642
-rw-r--r--roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-pv-app-example.yaml13
-rw-r--r--roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-pv-example.yaml13
-rw-r--r--roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-template.yaml479
-rw-r--r--roles/openshift_examples/files/examples/v1.4/image-streams/dotnet_imagestreams.json25
-rw-r--r--roles/openshift_examples/files/examples/v1.4/image-streams/image-streams-centos7.json4
-rw-r--r--roles/openshift_examples/files/examples/v1.4/image-streams/image-streams-rhel7.json4
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-streams/jboss-image-streams.json22
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/datavirt63-basic-s2i.json415
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/datavirt63-extensions-support-s2i.json763
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/datavirt63-secure-s2i.json642
-rw-r--r--roles/openshift_expand_partition/meta/main.yml4
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py342
-rw-r--r--roles/openshift_facts/tasks/main.yml6
-rw-r--r--roles/openshift_facts/vars/main.yml7
-rw-r--r--roles/openshift_hosted/meta/main.yml21
-rw-r--r--roles/openshift_hosted/tasks/registry/secure.yml12
-rw-r--r--roles/openshift_hosted/tasks/registry/storage/object_storage.yml1
-rw-r--r--roles/openshift_hosted_logging/tasks/cleanup_logging.yaml86
-rw-r--r--roles/openshift_hosted_logging/tasks/deploy_logging.yaml348
-rw-r--r--roles/openshift_hosted_logging/vars/main.yaml1
-rw-r--r--roles/openshift_hosted_metrics/README.md (renamed from roles/openshift_metrics/README.md)10
-rw-r--r--roles/openshift_hosted_metrics/defaults/main.yml (renamed from roles/openshift_metrics/defaults/main.yml)0
-rw-r--r--roles/openshift_hosted_metrics/handlers/main.yml (renamed from roles/openshift_metrics/handlers/main.yml)0
-rw-r--r--roles/openshift_hosted_metrics/meta/main.yaml (renamed from roles/openshift_metrics/meta/main.yaml)1
-rw-r--r--roles/openshift_hosted_metrics/tasks/install.yml (renamed from roles/openshift_metrics/tasks/install.yml)33
-rw-r--r--roles/openshift_hosted_metrics/tasks/main.yaml (renamed from roles/openshift_metrics/tasks/main.yaml)24
-rw-r--r--roles/openshift_hosted_metrics/vars/main.yaml (renamed from roles/openshift_metrics/vars/main.yaml)7
-rw-r--r--roles/openshift_hosted_templates/files/v1.3/enterprise/logging-deployer.yaml2
-rw-r--r--roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml3
-rw-r--r--roles/openshift_manageiq/vars/main.yml53
-rw-r--r--roles/openshift_master/meta/main.yml31
-rw-r--r--roles/openshift_master/tasks/main.yml7
-rw-r--r--roles/openshift_master/tasks/systemd_units.yml9
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j28
-rw-r--r--roles/openshift_master/vars/main.yml11
-rw-r--r--roles/openshift_master_facts/defaults/main.yml2
-rw-r--r--roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py93
-rw-r--r--roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py85
-rw-r--r--roles/openshift_master_facts/tasks/main.yml63
-rw-r--r--roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py251
-rw-r--r--roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py238
-rw-r--r--roles/openshift_master_facts/vars/main.yml10
-rw-r--r--roles/openshift_node/README.md4
-rw-r--r--roles/openshift_node/handlers/main.yml6
-rw-r--r--roles/openshift_node/meta/main.yml33
-rw-r--r--roles/openshift_node/tasks/main.yml16
-rw-r--r--roles/openshift_node/tasks/systemd_units.yml39
-rw-r--r--roles/openshift_node_certificates/tasks/main.yml14
-rwxr-xr-xroles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh7
-rw-r--r--roles/openshift_node_dnsmasq/tasks/no-network-manager.yml2
-rw-r--r--roles/openshift_node_dnsmasq/templates/origin-dns.conf.j21
-rw-r--r--roles/openshift_preflight/README.md52
-rwxr-xr-xroles/openshift_preflight/base/library/aos_version.py100
-rwxr-xr-xroles/openshift_preflight/base/library/check_yum_update.py116
-rw-r--r--roles/openshift_preflight/common/meta/main.yml3
-rw-r--r--roles/openshift_preflight/common/tasks/main.yml21
-rw-r--r--roles/openshift_preflight/init/meta/main.yml3
-rw-r--r--roles/openshift_preflight/init/tasks/main.yml4
-rw-r--r--roles/openshift_preflight/masters/meta/main.yml3
-rw-r--r--roles/openshift_preflight/masters/tasks/main.yml31
-rw-r--r--roles/openshift_preflight/nodes/meta/main.yml3
-rw-r--r--roles/openshift_preflight/nodes/tasks/main.yml41
-rw-r--r--roles/openshift_preflight/verify_status/callback_plugins/zz_failure_summary.py96
-rw-r--r--roles/openshift_preflight/verify_status/tasks/main.yml8
-rw-r--r--roles/openshift_repos/tasks/main.yaml4
-rw-r--r--roles/openshift_repos/templates/yum_repo.j24
-rw-r--r--roles/openshift_repos/vars/main.yml2
-rw-r--r--roles/openshift_serviceaccounts/tasks/legacy_add_scc_to_user.yml3
-rw-r--r--roles/openshift_serviceaccounts/tasks/main.yml1
-rw-r--r--roles/openshift_storage_nfs_lvm/README.md9
-rw-r--r--roles/openshift_storage_nfs_lvm/defaults/main.yml7
-rw-r--r--roles/openshift_storage_nfs_lvm/meta/main.yml5
-rw-r--r--roles/openshift_storage_nfs_lvm/tasks/main.yml2
-rw-r--r--roles/openshift_storage_nfs_lvm/templates/nfs.json.j26
-rw-r--r--roles/openshift_version/tasks/main.yml2
-rw-r--r--roles/openshift_version/tasks/set_version_containerized.yml5
-rw-r--r--roles/os_firewall/README.md7
-rw-r--r--roles/os_firewall/defaults/main.yml8
-rwxr-xr-xroles/os_firewall/library/os_firewall_manage_iptables.py60
-rw-r--r--roles/os_firewall/tasks/main.yml6
-rw-r--r--roles/rhel_subscribe/meta/main.yml3
145 files changed, 8888 insertions, 939 deletions
diff --git a/roles/docker/meta/main.yml b/roles/docker/meta/main.yml
index c5c95c0d2..ad28cece9 100644
--- a/roles/docker/meta/main.yml
+++ b/roles/docker/meta/main.yml
@@ -10,5 +10,4 @@ galaxy_info:
versions:
- 7
dependencies:
- - role: os_firewall
- os_firewall_use_firewalld: False
+- role: os_firewall
diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml
index a2b18baa1..57da23e0a 100644
--- a/roles/docker/tasks/main.yml
+++ b/roles/docker/tasks/main.yml
@@ -43,16 +43,18 @@
package: name=docker{{ '-' + docker_version if docker_version is defined else '' }} state=present
when: not openshift.common.is_atomic | bool
-- name: Ensure docker.service.d directory exists
- file:
- path: "{{ docker_systemd_dir }}"
- state: directory
-
-# Extend the default Docker service unit file
-- name: Configure Docker service unit file
- template:
- dest: "{{ docker_systemd_dir }}/custom.conf"
- src: custom.conf.j2
+- block:
+ # Extend the default Docker service unit file when using iptables-services
+ - name: Ensure docker.service.d directory exists
+ file:
+ path: "{{ docker_systemd_dir }}"
+ state: directory
+
+ - name: Configure Docker service unit file
+ template:
+ dest: "{{ docker_systemd_dir }}/custom.conf"
+ src: custom.conf.j2
+ when: not os_firewall_use_firewalld | default(True) | bool
- include: udev_workaround.yml
when: docker_udev_workaround | default(False) | bool
@@ -86,16 +88,16 @@
line: "{{ item.reg_conf_var }}='{{ item.reg_fact_val }}'"
state: "{{ 'present' if item.reg_fact_val != '' else 'absent'}}"
with_items:
- - reg_conf_var: HTTP_PROXY
- reg_fact_val: "{{ docker_http_proxy | default('') }}"
- - reg_conf_var: HTTPS_PROXY
- reg_fact_val: "{{ docker_https_proxy | default('') }}"
- - reg_conf_var: NO_PROXY
- reg_fact_val: "{{ docker_no_proxy | default('') | join(',') }}"
+ - reg_conf_var: HTTP_PROXY
+ reg_fact_val: "{{ docker_http_proxy | default('') }}"
+ - reg_conf_var: HTTPS_PROXY
+ reg_fact_val: "{{ docker_https_proxy | default('') }}"
+ - reg_conf_var: NO_PROXY
+ reg_fact_val: "{{ docker_no_proxy | default('') | join(',') }}"
notify:
- - restart docker
+ - restart docker
when:
- - docker_check.stat.isreg is defined and docker_check.stat.isreg and '"http_proxy" in openshift.common or "https_proxy" in openshift.common'
+ - docker_check.stat.isreg is defined and docker_check.stat.isreg and '"http_proxy" in openshift.common or "https_proxy" in openshift.common'
- name: Set various Docker options
lineinfile:
@@ -109,7 +111,7 @@
{% if docker_disable_push_dockerhub is defined %} --confirm-def-push={{ docker_disable_push_dockerhub | bool }}{% endif %}'"
when: docker_check.stat.isreg is defined and docker_check.stat.isreg
notify:
- - restart docker
+ - restart docker
- name: Start the Docker service
systemd:
diff --git a/roles/etcd/templates/etcd.docker.service b/roles/etcd/templates/etcd.docker.service
index cf957ede8..ae059b549 100644
--- a/roles/etcd/templates/etcd.docker.service
+++ b/roles/etcd/templates/etcd.docker.service
@@ -7,7 +7,7 @@ PartOf=docker.service
[Service]
EnvironmentFile=/etc/etcd/etcd.conf
ExecStartPre=-/usr/bin/docker rm -f {{ etcd_service }}
-ExecStart=/usr/bin/docker run --name {{ etcd_service }} --rm -v /var/lib/etcd:/var/lib/etcd:z -v /etc/etcd:/etc/etcd:z --env-file=/etc/etcd/etcd.conf --net=host --entrypoint=/usr/bin/etcd {{ openshift.etcd.etcd_image }}
+ExecStart=/usr/bin/docker run --name {{ etcd_service }} --rm -v /var/lib/etcd:/var/lib/etcd:z -v /etc/etcd:/etc/etcd:ro --env-file=/etc/etcd/etcd.conf --net=host --entrypoint=/usr/bin/etcd {{ openshift.etcd.etcd_image }}
ExecStop=/usr/bin/docker stop {{ etcd_service }}
SyslogIdentifier=etcd_container
Restart=always
diff --git a/roles/etcd_common/library/delegated_serial_command.py b/roles/etcd_common/library/delegated_serial_command.py
index 84d4f97c2..0cab1ca88 100755
--- a/roles/etcd_common/library/delegated_serial_command.py
+++ b/roles/etcd_common/library/delegated_serial_command.py
@@ -24,12 +24,9 @@
''' delegated_serial_command '''
-import copy
-import sys
import datetime
+import errno
import glob
-import traceback
-import re
import shlex
import os
import fcntl
@@ -133,6 +130,7 @@ OPTIONS = {'chdir': None,
'lockfile': None,
'timeout': None}
+
def check_command(commandline):
''' Check provided command '''
arguments = {'chown': 'owner', 'chmod': 'mode', 'chgrp': 'group',
@@ -160,7 +158,7 @@ def check_command(commandline):
# pylint: disable=too-many-statements,too-many-branches,too-many-locals
def main():
''' Main module function '''
- module = AnsibleModule(
+ module = AnsibleModule( # noqa: F405
argument_spec=dict(
_uses_shell=dict(type='bool', default=False),
command=dict(required=True),
@@ -220,9 +218,9 @@ def main():
)
if removes:
- # do not run the command if the line contains removes=filename
- # and the filename does not exist. This allows idempotence
- # of command executions.
+ # do not run the command if the line contains removes=filename
+ # and the filename does not exist. This allows idempotence
+ # of command executions.
path = os.path.expanduser(removes)
if not glob.glob(path):
module.exit_json(
@@ -268,7 +266,9 @@ def main():
iterated=iterated
)
+
# import module snippets
-from ansible.module_utils.basic import *
+# pylint: disable=wrong-import-position
+from ansible.module_utils.basic import * # noqa: F402,F403
main()
diff --git a/roles/flannel_register/defaults/main.yaml b/roles/flannel_register/defaults/main.yaml
index b1279aa88..ddf8230ec 100644
--- a/roles/flannel_register/defaults/main.yaml
+++ b/roles/flannel_register/defaults/main.yaml
@@ -8,4 +8,3 @@ etcd_conf_dir: "{{ openshift.common.config_base }}/master"
etcd_peer_ca_file: "{{ etcd_conf_dir + '/ca.crt' if (openshift.master.embedded_etcd | bool) else etcd_conf_dir + '/master.etcd-ca.crt' }}"
etcd_peer_cert_file: "{{ etcd_conf_dir }}/master.etcd-client.crt"
etcd_peer_key_file: "{{ etcd_conf_dir }}/master.etcd-client.key"
-
diff --git a/roles/kube_nfs_volumes/library/partitionpool.py b/roles/kube_nfs_volumes/library/partitionpool.py
index 9bd3228c1..1857433c7 100644
--- a/roles/kube_nfs_volumes/library/partitionpool.py
+++ b/roles/kube_nfs_volumes/library/partitionpool.py
@@ -3,6 +3,8 @@
Ansible module for partitioning.
"""
+from __future__ import print_function
+
# There is no pyparted on our Jenkins worker
# pylint: disable=import-error
import parted
@@ -52,7 +54,7 @@ options:
partitions. On 1 TiB disk, 10 partitions will be created.
- Example 2: size=100G:1,10G:1 says that ratio of space occupied by 100 GiB
- partitions and 10 GiB partitions is 1:1. Therefore, on 1 TiB disk, 500 GiB
+ partitions and 10 GiB partitions is 1:1. Therefore, on 1 TiB disk, 500 GiB
will be split into five 100 GiB partition and 500 GiB will be split into fifty
10GiB partitions.
- size=100G:1,10G:1 = 5x 100 GiB and 50x 10 GiB partitions (on 1 TiB disk).
@@ -73,7 +75,7 @@ options:
and eight 50 GiB partitions (again, 400 GiB).
- size=200G:1,100G:1,50G:1 = 1x 200 GiB, 4x 100 GiB and 8x 50 GiB partitions
(on 1 TiB disk).
-
+
force:
description:
- If True, it will always overwite partition table on the disk and create new one.
@@ -81,6 +83,7 @@ options:
"""
+
# It's not class, it's more a simple struct with almost no functionality.
# pylint: disable=too-few-public-methods
class PartitionSpec(object):
@@ -98,6 +101,7 @@ class PartitionSpec(object):
""" Set count of parititions of this specification. """
self.count = count
+
def assign_space(total_size, specs):
"""
Satisfy all the PartitionSpecs according to their weight.
@@ -113,6 +117,7 @@ def assign_space(total_size, specs):
total_size -= num_blocks * spec.size
total_weight -= spec.weight
+
def partition(diskname, specs, force=False, check_mode=False):
"""
Create requested partitions.
@@ -128,7 +133,7 @@ def partition(diskname, specs, force=False, check_mode=False):
disk = None
if disk and len(disk.partitions) > 0 and not force:
- print "skipping", diskname
+ print("skipping", diskname)
return 0
# create new partition table, wiping all existing data
@@ -161,16 +166,17 @@ def partition(diskname, specs, force=False, check_mode=False):
pass
return count
+
def parse_spec(text):
""" Parse string with partition specification. """
tokens = text.split(",")
specs = []
for token in tokens:
- if not ":" in token:
+ if ":" not in token:
token += ":1"
(sizespec, weight) = token.split(':')
- weight = float(weight) # throws exception with reasonable error string
+ weight = float(weight) # throws exception with reasonable error string
units = {"m": 1, "g": 1 << 10, "t": 1 << 20, "p": 1 << 30}
unit = units.get(sizespec[-1].lower(), None)
@@ -184,6 +190,7 @@ def parse_spec(text):
specs.append(spec)
return specs
+
def get_partitions(diskpath):
""" Return array of partition names for given disk """
dev = parted.getDevice(diskpath)
@@ -198,7 +205,7 @@ def get_partitions(diskpath):
def main():
""" Ansible module main method. """
- module = AnsibleModule(
+ module = AnsibleModule( # noqa: F405
argument_spec=dict(
disks=dict(required=True, type='str'),
force=dict(required=False, default="no", type='bool'),
@@ -215,7 +222,7 @@ def main():
try:
specs = parse_spec(sizes)
- except ValueError, ex:
+ except ValueError as ex:
err = "Error parsing sizes=" + sizes + ": " + str(ex)
module.fail_json(msg=err)
@@ -224,17 +231,17 @@ def main():
for disk in disks.split(","):
try:
changed_count += partition(disk, specs, force, module.check_mode)
- except Exception, ex:
+ except Exception as ex:
err = "Error creating partitions on " + disk + ": " + str(ex)
raise
- #module.fail_json(msg=err)
+ # module.fail_json(msg=err)
partitions += get_partitions(disk)
module.exit_json(changed=(changed_count > 0), ansible_facts={"partition_pool": partitions})
+
# ignore pylint errors related to the module_utils import
-# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, wrong-import-order, wrong-import-position
# import module snippets
-from ansible.module_utils.basic import *
+from ansible.module_utils.basic import * # noqa: E402,F403
main()
-
diff --git a/roles/kube_nfs_volumes/meta/main.yml b/roles/kube_nfs_volumes/meta/main.yml
index be6ca6b88..7ed028138 100644
--- a/roles/kube_nfs_volumes/meta/main.yml
+++ b/roles/kube_nfs_volumes/meta/main.yml
@@ -13,5 +13,5 @@ galaxy_info:
versions:
- all
categories:
- - cloud
+ - cloud
dependencies: []
diff --git a/roles/lib_utils/library/yedit.py b/roles/lib_utils/library/yedit.py
new file mode 100644
index 000000000..fb545c7c8
--- /dev/null
+++ b/roles/lib_utils/library/yedit.py
@@ -0,0 +1,766 @@
+#!/usr/bin/env python
+# pylint: disable=missing-docstring
+# ___ ___ _ _ ___ ___ _ _____ ___ ___
+# / __| __| \| | __| _ \ /_\_ _| __| \
+# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
+# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
+# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
+# | |) | (_) | | .` | (_) || | | _|| |) | | | |
+# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
+#
+# Copyright 2016 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+# pylint: disable=wrong-import-order
+import json
+import os
+import re
+# pylint: disable=import-error
+import ruamel.yaml as yaml
+import shutil
+from ansible.module_utils.basic import AnsibleModule
+
+DOCUMENTATION = '''
+---
+module: yedit
+short_description: Create, modify, and idempotently manage yaml files.
+description:
+ - Modify yaml files programmatically.
+options:
+ state:
+ description:
+ - State represents whether to create, modify, delete, or list yaml
+ required: true
+ default: present
+ choices: ["present", "absent", "list"]
+ aliases: []
+ debug:
+ description:
+ - Turn on debug information.
+ required: false
+ default: false
+ aliases: []
+ src:
+ description:
+ - The file that is the target of the modifications.
+ required: false
+ default: None
+ aliases: []
+ content:
+ description:
+ - Content represents the yaml content you desire to work with. This
+ - could be the file contents to write or the inmemory data to modify.
+ required: false
+ default: None
+ aliases: []
+ content_type:
+ description:
+ - The python type of the content parameter.
+ required: false
+ default: 'dict'
+ aliases: []
+ key:
+ description:
+ - The path to the value you wish to modify. Emtpy string means the top of
+ - the document.
+ required: false
+ default: ''
+ aliases: []
+ value:
+ description:
+ - The incoming value of parameter 'key'.
+ required: false
+ default:
+ aliases: []
+ value_type:
+ description:
+ - The python type of the incoming value.
+ required: false
+ default: ''
+ aliases: []
+ update:
+ description:
+ - Whether the update should be performed on a dict/hash or list/array
+ - object.
+ required: false
+ default: false
+ aliases: []
+ append:
+ description:
+ - Whether to append to an array/list. When the key does not exist or is
+ - null, a new array is created. When the key is of a non-list type,
+ - nothing is done.
+ required: false
+ default: false
+ aliases: []
+ index:
+ description:
+ - Used in conjunction with the update parameter. This will update a
+ - specific index in an array/list.
+ required: false
+ default: false
+ aliases: []
+ curr_value:
+ description:
+ - Used in conjunction with the update parameter. This is the current
+ - value of 'key' in the yaml file.
+ required: false
+ default: false
+ aliases: []
+ curr_value_format:
+ description:
+ - Format of the incoming current value.
+ choices: ["yaml", "json", "str"]
+ required: false
+ default: false
+ aliases: []
+ backup:
+ description:
+ - Whether to make a backup copy of the current file when performing an
+ - edit.
+ required: false
+ default: true
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+# Simple insert of key, value
+- name: insert simple key, value
+ yedit:
+ src: somefile.yml
+ key: test
+ value: somevalue
+ state: present
+# Results:
+# test: somevalue
+
+# Multilevel insert of key, value
+- name: insert simple key, value
+ yedit:
+ src: somefile.yml
+ key: a#b#c
+ value: d
+ state: present
+# Results:
+# a:
+# b:
+# c: d
+'''
+
+
+class YeditException(Exception):
+ ''' Exception class for Yedit '''
+ pass
+
+
+class Yedit(object):
+ ''' Class to modify yaml files '''
+ re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ com_sep = set(['.', '#', '|', ':'])
+
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ filename=None,
+ content=None,
+ content_type='yaml',
+ separator='.',
+ backup=False):
+ self.content = content
+ self._separator = separator
+ self.filename = filename
+ self.__yaml_dict = content
+ self.content_type = content_type
+ self.backup = backup
+ self.load(content_type=self.content_type)
+ if self.__yaml_dict is None:
+ self.__yaml_dict = {}
+
+ @property
+ def separator(self):
+ ''' getter method for yaml_dict '''
+ return self._separator
+
+ @separator.setter
+ def separator(self):
+ ''' getter method for yaml_dict '''
+ return self._separator
+
+ @property
+ def yaml_dict(self):
+ ''' getter method for yaml_dict '''
+ return self.__yaml_dict
+
+ @yaml_dict.setter
+ def yaml_dict(self, value):
+ ''' setter method for yaml_dict '''
+ self.__yaml_dict = value
+
+ @staticmethod
+ def parse_key(key, sep='.'):
+ '''parse the key allowing the appropriate separator'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ return re.findall(Yedit.re_key % ''.join(common_separators), key)
+
+ @staticmethod
+ def valid_key(key, sep='.'):
+ '''validate the incoming key'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
+ return False
+
+ return True
+
+ @staticmethod
+ def remove_entry(data, key, sep='.'):
+ ''' remove data at location key '''
+ if key == '' and isinstance(data, dict):
+ data.clear()
+ return True
+ elif key == '' and isinstance(data, list):
+ del data[:]
+ return True
+
+ if not (key and Yedit.valid_key(key, sep)) and \
+ isinstance(data, (list, dict)):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ # process last index for remove
+ # expected list entry
+ if key_indexes[-1][0]:
+ if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ del data[int(key_indexes[-1][0])]
+ return True
+
+ # expected dict entry
+ elif key_indexes[-1][1]:
+ if isinstance(data, dict):
+ del data[key_indexes[-1][1]]
+ return True
+
+ @staticmethod
+ def add_entry(data, key, item=None, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a#b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key:
+ if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
+ data = data[dict_key]
+ continue
+
+ elif data and not isinstance(data, dict):
+ return None
+
+ data[dict_key] = {}
+ data = data[dict_key]
+
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ if key == '':
+ data = item
+
+ # process last index for add
+ # expected list entry
+ elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ data[int(key_indexes[-1][0])] = item
+
+ # expected dict entry
+ elif key_indexes[-1][1] and isinstance(data, dict):
+ data[key_indexes[-1][1]] = item
+
+ return data
+
+ @staticmethod
+ def get_entry(data, key, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a.b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ return data
+
+ def write(self):
+ ''' write to file '''
+ if not self.filename:
+ raise YeditException('Please specify a filename.')
+
+ if self.backup and self.file_exists():
+ shutil.copy(self.filename, self.filename + '.orig')
+
+ tmp_filename = self.filename + '.yedit'
+ with open(tmp_filename, 'w') as yfd:
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ self.yaml_dict.fa.set_block_style()
+
+ yfd.write(yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+
+ os.rename(tmp_filename, self.filename)
+
+ return (True, self.yaml_dict)
+
+ def read(self):
+ ''' read from file '''
+ # check if it exists
+ if self.filename is None or not self.file_exists():
+ return None
+
+ contents = None
+ with open(self.filename) as yfd:
+ contents = yfd.read()
+
+ return contents
+
+ def file_exists(self):
+ ''' return whether file exists '''
+ if os.path.exists(self.filename):
+ return True
+
+ return False
+
+ def load(self, content_type='yaml'):
+ ''' return yaml file '''
+ contents = self.read()
+
+ if not contents and not self.content:
+ return None
+
+ if self.content:
+ if isinstance(self.content, dict):
+ self.yaml_dict = self.content
+ return self.yaml_dict
+ elif isinstance(self.content, str):
+ contents = self.content
+
+ # check if it is yaml
+ try:
+ if content_type == 'yaml' and contents:
+ self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ self.yaml_dict.fa.set_block_style()
+ elif content_type == 'json' and contents:
+ self.yaml_dict = json.loads(contents)
+ except yaml.YAMLError as err:
+ # Error loading yaml or json
+ raise YeditException('Problem with loading yaml file. %s' % err)
+
+ return self.yaml_dict
+
+ def get(self, key):
+ ''' get a specified key'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
+ except KeyError:
+ entry = None
+
+ return entry
+
+ def pop(self, path, key_or_item):
+ ''' remove a key, value pair from a dict or an item for a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ if isinstance(entry, dict):
+ # pylint: disable=no-member,maybe-no-member
+ if key_or_item in entry:
+ entry.pop(key_or_item)
+ return (True, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # pylint: disable=no-member,maybe-no-member
+ ind = None
+ try:
+ ind = entry.index(key_or_item)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ entry.pop(ind)
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ def delete(self, path):
+ ''' remove path from a dict'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ return (True, self.yaml_dict)
+
+ def exists(self, path, value):
+ ''' check if value exists at path'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, list):
+ if value in entry:
+ return True
+ return False
+
+ elif isinstance(entry, dict):
+ if isinstance(value, dict):
+ rval = False
+ for key, val in value.items():
+ if entry[key] != val:
+ rval = False
+ break
+ else:
+ rval = True
+ return rval
+
+ return value in entry
+
+ return entry == value
+
+ def append(self, path, value):
+ '''append value to a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ self.put(path, [])
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ if not isinstance(entry, list):
+ return (False, self.yaml_dict)
+
+ # pylint: disable=no-member,maybe-no-member
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # pylint: disable=too-many-arguments
+ def update(self, path, value, index=None, curr_value=None):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, dict):
+ # pylint: disable=no-member,maybe-no-member
+ if not isinstance(value, dict):
+ raise YeditException('Cannot replace key, value entry in ' +
+ 'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
+
+ entry.update(value)
+ return (True, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # pylint: disable=no-member,maybe-no-member
+ ind = None
+ if curr_value:
+ try:
+ ind = entry.index(curr_value)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ elif index is not None:
+ ind = index
+
+ if ind is not None and entry[ind] != value:
+ entry[ind] = value
+ return (True, self.yaml_dict)
+
+ # see if it exists in the list
+ try:
+ ind = entry.index(value)
+ except ValueError:
+ # doesn't exist, append it
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # already exists, return
+ if ind is not None:
+ return (False, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ def put(self, path, value):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry == value:
+ return (False, self.yaml_dict)
+
+ # deepcopy didn't work
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ tmp_copy.fa.set_block_style()
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ self.yaml_dict = tmp_copy
+
+ return (True, self.yaml_dict)
+
+ def create(self, path, value):
+ ''' create a yaml file '''
+ if not self.file_exists():
+ # deepcopy didn't work
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), # noqa: E501
+ yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ tmp_copy.fa.set_block_style()
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if result:
+ self.yaml_dict = tmp_copy
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ @staticmethod
+ def run_ansible(module):
+ '''perform the idempotent crud operations'''
+ yamlfile = Yedit(filename=module.params['src'],
+ backup=module.params['backup'],
+ separator=module.params['separator'])
+
+ if module.params['src']:
+ rval = yamlfile.load()
+
+ if yamlfile.yaml_dict is None and \
+ module.params['state'] != 'present':
+ return {'failed': True,
+ 'msg': 'Error opening file [%s]. Verify that the ' +
+ 'file exists, that it is has correct' +
+ ' permissions, and is valid yaml.'}
+
+ if module.params['state'] == 'list':
+ if module.params['content']:
+ content = parse_value(module.params['content'],
+ module.params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if module.params['key']:
+ rval = yamlfile.get(module.params['key']) or {}
+
+ return {'changed': False, 'result': rval, 'state': "list"}
+
+ elif module.params['state'] == 'absent':
+ if module.params['content']:
+ content = parse_value(module.params['content'],
+ module.params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if module.params['update']:
+ rval = yamlfile.pop(module.params['key'],
+ module.params['value'])
+ else:
+ rval = yamlfile.delete(module.params['key'])
+
+ if rval[0] and module.params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
+
+ elif module.params['state'] == 'present':
+ # check if content is different than what is in the file
+ if module.params['content']:
+ content = parse_value(module.params['content'],
+ module.params['content_type'])
+
+ # We had no edits to make and the contents are the same
+ if yamlfile.yaml_dict == content and \
+ module.params['value'] is None:
+ return {'changed': False,
+ 'result': yamlfile.yaml_dict,
+ 'state': "present"}
+
+ yamlfile.yaml_dict = content
+
+ # we were passed a value; parse it
+ if module.params['value']:
+ value = parse_value(module.params['value'],
+ module.params['value_type'])
+ key = module.params['key']
+ if module.params['update']:
+ # pylint: disable=line-too-long
+ curr_value = get_curr_value(parse_value(module.params['curr_value']), module.params['curr_value_format']) # noqa: #501
+
+ rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
+
+ elif module.params['append']:
+ rval = yamlfile.append(key, value)
+ else:
+ rval = yamlfile.put(key, value)
+
+ if rval[0] and module.params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0],
+ 'result': rval[1], 'state': "present"}
+
+ # no edits to make
+ if module.params['src']:
+ # pylint: disable=redefined-variable-type
+ rval = yamlfile.write()
+ return {'changed': rval[0],
+ 'result': rval[1],
+ 'state': "present"}
+
+ return {'failed': True, 'msg': 'Unkown state passed'}
+
+
+def get_curr_value(invalue, val_type):
+ '''return the current value'''
+ if invalue is None:
+ return None
+
+ curr_value = invalue
+ if val_type == 'yaml':
+ curr_value = yaml.load(invalue)
+ elif val_type == 'json':
+ curr_value = json.loads(invalue)
+
+ return curr_value
+
+
+def parse_value(inc_value, vtype=''):
+ '''determine value type passed'''
+ true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
+ 'on', 'On', 'ON', ]
+ false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
+ 'off', 'Off', 'OFF']
+
+ # It came in as a string but you didn't specify value_type as string
+ # we will convert to bool if it matches any of the above cases
+ if isinstance(inc_value, str) and 'bool' in vtype:
+ if inc_value not in true_bools and inc_value not in false_bools:
+ raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
+ % (inc_value, vtype))
+ elif isinstance(inc_value, bool) and 'str' in vtype:
+ inc_value = str(inc_value)
+
+ # If vtype is not str then go ahead and attempt to yaml load it.
+ if isinstance(inc_value, str) and 'str' not in vtype:
+ try:
+ inc_value = yaml.load(inc_value)
+ except Exception:
+ raise YeditException('Could not determine type of incoming ' +
+ 'value. value=[%s] vtype=[%s]'
+ % (type(inc_value), vtype))
+
+ return inc_value
+
+
+# pylint: disable=too-many-branches
+def main():
+ ''' ansible oc module for secrets '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default='present', type='str',
+ choices=['present', 'absent', 'list']),
+ debug=dict(default=False, type='bool'),
+ src=dict(default=None, type='str'),
+ content=dict(default=None),
+ content_type=dict(default='dict', choices=['dict']),
+ key=dict(default='', type='str'),
+ value=dict(),
+ value_type=dict(default='', type='str'),
+ update=dict(default=False, type='bool'),
+ append=dict(default=False, type='bool'),
+ index=dict(default=None, type='int'),
+ curr_value=dict(default=None, type='str'),
+ curr_value_format=dict(default='yaml',
+ choices=['yaml', 'json', 'str'],
+ type='str'),
+ backup=dict(default=True, type='bool'),
+ separator=dict(default='.', type='str'),
+ ),
+ mutually_exclusive=[["curr_value", "index"], ['update', "append"]],
+ required_one_of=[["content", "src"]],
+ )
+
+ rval = Yedit.run_ansible(module)
+ if 'failed' in rval and rval['failed']:
+ module.fail_json(msg=rval['msg'])
+
+ module.exit_json(**rval)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_utils/src/ansible/yedit.py b/roles/lib_utils/src/ansible/yedit.py
new file mode 100644
index 000000000..a80cd520c
--- /dev/null
+++ b/roles/lib_utils/src/ansible/yedit.py
@@ -0,0 +1,84 @@
+# flake8: noqa
+# pylint: skip-file
+
+
+def get_curr_value(invalue, val_type):
+ '''return the current value'''
+ if invalue is None:
+ return None
+
+ curr_value = invalue
+ if val_type == 'yaml':
+ curr_value = yaml.load(invalue)
+ elif val_type == 'json':
+ curr_value = json.loads(invalue)
+
+ return curr_value
+
+
+def parse_value(inc_value, vtype=''):
+ '''determine value type passed'''
+ true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
+ 'on', 'On', 'ON', ]
+ false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
+ 'off', 'Off', 'OFF']
+
+ # It came in as a string but you didn't specify value_type as string
+ # we will convert to bool if it matches any of the above cases
+ if isinstance(inc_value, str) and 'bool' in vtype:
+ if inc_value not in true_bools and inc_value not in false_bools:
+ raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
+ % (inc_value, vtype))
+ elif isinstance(inc_value, bool) and 'str' in vtype:
+ inc_value = str(inc_value)
+
+ # If vtype is not str then go ahead and attempt to yaml load it.
+ if isinstance(inc_value, str) and 'str' not in vtype:
+ try:
+ inc_value = yaml.load(inc_value)
+ except Exception:
+ raise YeditException('Could not determine type of incoming ' +
+ 'value. value=[%s] vtype=[%s]'
+ % (type(inc_value), vtype))
+
+ return inc_value
+
+
+# pylint: disable=too-many-branches
+def main():
+ ''' ansible oc module for secrets '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default='present', type='str',
+ choices=['present', 'absent', 'list']),
+ debug=dict(default=False, type='bool'),
+ src=dict(default=None, type='str'),
+ content=dict(default=None),
+ content_type=dict(default='dict', choices=['dict']),
+ key=dict(default='', type='str'),
+ value=dict(),
+ value_type=dict(default='', type='str'),
+ update=dict(default=False, type='bool'),
+ append=dict(default=False, type='bool'),
+ index=dict(default=None, type='int'),
+ curr_value=dict(default=None, type='str'),
+ curr_value_format=dict(default='yaml',
+ choices=['yaml', 'json', 'str'],
+ type='str'),
+ backup=dict(default=True, type='bool'),
+ separator=dict(default='.', type='str'),
+ ),
+ mutually_exclusive=[["curr_value", "index"], ['update', "append"]],
+ required_one_of=[["content", "src"]],
+ )
+
+ rval = Yedit.run_ansible(module)
+ if 'failed' in rval and rval['failed']:
+ module.fail_json(msg=rval['msg'])
+
+ module.exit_json(**rval)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_utils/src/class/import.py b/roles/lib_utils/src/class/import.py
new file mode 100644
index 000000000..249e07228
--- /dev/null
+++ b/roles/lib_utils/src/class/import.py
@@ -0,0 +1,11 @@
+# flake8: noqa
+# pylint: skip-file
+
+# pylint: disable=wrong-import-order
+import json
+import os
+import re
+# pylint: disable=import-error
+import ruamel.yaml as yaml
+import shutil
+from ansible.module_utils.basic import AnsibleModule
diff --git a/roles/lib_utils/src/class/yedit.py b/roles/lib_utils/src/class/yedit.py
new file mode 100644
index 000000000..e110bc11e
--- /dev/null
+++ b/roles/lib_utils/src/class/yedit.py
@@ -0,0 +1,520 @@
+# flake8: noqa
+# pylint: skip-file
+
+class YeditException(Exception):
+ ''' Exception class for Yedit '''
+ pass
+
+
+class Yedit(object):
+ ''' Class to modify yaml files '''
+ re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ com_sep = set(['.', '#', '|', ':'])
+
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ filename=None,
+ content=None,
+ content_type='yaml',
+ separator='.',
+ backup=False):
+ self.content = content
+ self._separator = separator
+ self.filename = filename
+ self.__yaml_dict = content
+ self.content_type = content_type
+ self.backup = backup
+ self.load(content_type=self.content_type)
+ if self.__yaml_dict is None:
+ self.__yaml_dict = {}
+
+ @property
+ def separator(self):
+ ''' getter method for yaml_dict '''
+ return self._separator
+
+ @separator.setter
+ def separator(self):
+ ''' getter method for yaml_dict '''
+ return self._separator
+
+ @property
+ def yaml_dict(self):
+ ''' getter method for yaml_dict '''
+ return self.__yaml_dict
+
+ @yaml_dict.setter
+ def yaml_dict(self, value):
+ ''' setter method for yaml_dict '''
+ self.__yaml_dict = value
+
+ @staticmethod
+ def parse_key(key, sep='.'):
+ '''parse the key allowing the appropriate separator'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ return re.findall(Yedit.re_key % ''.join(common_separators), key)
+
+ @staticmethod
+ def valid_key(key, sep='.'):
+ '''validate the incoming key'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
+ return False
+
+ return True
+
+ @staticmethod
+ def remove_entry(data, key, sep='.'):
+ ''' remove data at location key '''
+ if key == '' and isinstance(data, dict):
+ data.clear()
+ return True
+ elif key == '' and isinstance(data, list):
+ del data[:]
+ return True
+
+ if not (key and Yedit.valid_key(key, sep)) and \
+ isinstance(data, (list, dict)):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ # process last index for remove
+ # expected list entry
+ if key_indexes[-1][0]:
+ if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ del data[int(key_indexes[-1][0])]
+ return True
+
+ # expected dict entry
+ elif key_indexes[-1][1]:
+ if isinstance(data, dict):
+ del data[key_indexes[-1][1]]
+ return True
+
+ @staticmethod
+ def add_entry(data, key, item=None, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a#b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key:
+ if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
+ data = data[dict_key]
+ continue
+
+ elif data and not isinstance(data, dict):
+ return None
+
+ data[dict_key] = {}
+ data = data[dict_key]
+
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ if key == '':
+ data = item
+
+ # process last index for add
+ # expected list entry
+ elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ data[int(key_indexes[-1][0])] = item
+
+ # expected dict entry
+ elif key_indexes[-1][1] and isinstance(data, dict):
+ data[key_indexes[-1][1]] = item
+
+ return data
+
+ @staticmethod
+ def get_entry(data, key, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a.b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ return data
+
+ def write(self):
+ ''' write to file '''
+ if not self.filename:
+ raise YeditException('Please specify a filename.')
+
+ if self.backup and self.file_exists():
+ shutil.copy(self.filename, self.filename + '.orig')
+
+ tmp_filename = self.filename + '.yedit'
+ with open(tmp_filename, 'w') as yfd:
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ self.yaml_dict.fa.set_block_style()
+
+ yfd.write(yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+
+ os.rename(tmp_filename, self.filename)
+
+ return (True, self.yaml_dict)
+
+ def read(self):
+ ''' read from file '''
+ # check if it exists
+ if self.filename is None or not self.file_exists():
+ return None
+
+ contents = None
+ with open(self.filename) as yfd:
+ contents = yfd.read()
+
+ return contents
+
+ def file_exists(self):
+ ''' return whether file exists '''
+ if os.path.exists(self.filename):
+ return True
+
+ return False
+
+ def load(self, content_type='yaml'):
+ ''' return yaml file '''
+ contents = self.read()
+
+ if not contents and not self.content:
+ return None
+
+ if self.content:
+ if isinstance(self.content, dict):
+ self.yaml_dict = self.content
+ return self.yaml_dict
+ elif isinstance(self.content, str):
+ contents = self.content
+
+ # check if it is yaml
+ try:
+ if content_type == 'yaml' and contents:
+ self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ self.yaml_dict.fa.set_block_style()
+ elif content_type == 'json' and contents:
+ self.yaml_dict = json.loads(contents)
+ except yaml.YAMLError as err:
+ # Error loading yaml or json
+ raise YeditException('Problem with loading yaml file. %s' % err)
+
+ return self.yaml_dict
+
+ def get(self, key):
+ ''' get a specified key'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
+ except KeyError:
+ entry = None
+
+ return entry
+
+ def pop(self, path, key_or_item):
+ ''' remove a key, value pair from a dict or an item for a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ if isinstance(entry, dict):
+ # pylint: disable=no-member,maybe-no-member
+ if key_or_item in entry:
+ entry.pop(key_or_item)
+ return (True, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # pylint: disable=no-member,maybe-no-member
+ ind = None
+ try:
+ ind = entry.index(key_or_item)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ entry.pop(ind)
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ def delete(self, path):
+ ''' remove path from a dict'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ return (True, self.yaml_dict)
+
+ def exists(self, path, value):
+ ''' check if value exists at path'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, list):
+ if value in entry:
+ return True
+ return False
+
+ elif isinstance(entry, dict):
+ if isinstance(value, dict):
+ rval = False
+ for key, val in value.items():
+ if entry[key] != val:
+ rval = False
+ break
+ else:
+ rval = True
+ return rval
+
+ return value in entry
+
+ return entry == value
+
+ def append(self, path, value):
+ '''append value to a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ self.put(path, [])
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ if not isinstance(entry, list):
+ return (False, self.yaml_dict)
+
+ # pylint: disable=no-member,maybe-no-member
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # pylint: disable=too-many-arguments
+ def update(self, path, value, index=None, curr_value=None):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, dict):
+ # pylint: disable=no-member,maybe-no-member
+ if not isinstance(value, dict):
+ raise YeditException('Cannot replace key, value entry in ' +
+ 'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
+
+ entry.update(value)
+ return (True, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # pylint: disable=no-member,maybe-no-member
+ ind = None
+ if curr_value:
+ try:
+ ind = entry.index(curr_value)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ elif index is not None:
+ ind = index
+
+ if ind is not None and entry[ind] != value:
+ entry[ind] = value
+ return (True, self.yaml_dict)
+
+ # see if it exists in the list
+ try:
+ ind = entry.index(value)
+ except ValueError:
+ # doesn't exist, append it
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # already exists, return
+ if ind is not None:
+ return (False, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ def put(self, path, value):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry == value:
+ return (False, self.yaml_dict)
+
+ # deepcopy didn't work
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ tmp_copy.fa.set_block_style()
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ self.yaml_dict = tmp_copy
+
+ return (True, self.yaml_dict)
+
+ def create(self, path, value):
+ ''' create a yaml file '''
+ if not self.file_exists():
+ # deepcopy didn't work
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), # noqa: E501
+ yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ tmp_copy.fa.set_block_style()
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if result:
+ self.yaml_dict = tmp_copy
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ @staticmethod
+ def run_ansible(module):
+ '''perform the idempotent crud operations'''
+ yamlfile = Yedit(filename=module.params['src'],
+ backup=module.params['backup'],
+ separator=module.params['separator'])
+
+ if module.params['src']:
+ rval = yamlfile.load()
+
+ if yamlfile.yaml_dict is None and \
+ module.params['state'] != 'present':
+ return {'failed': True,
+ 'msg': 'Error opening file [%s]. Verify that the ' +
+ 'file exists, that it is has correct' +
+ ' permissions, and is valid yaml.'}
+
+ if module.params['state'] == 'list':
+ if module.params['content']:
+ content = parse_value(module.params['content'],
+ module.params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if module.params['key']:
+ rval = yamlfile.get(module.params['key']) or {}
+
+ return {'changed': False, 'result': rval, 'state': "list"}
+
+ elif module.params['state'] == 'absent':
+ if module.params['content']:
+ content = parse_value(module.params['content'],
+ module.params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if module.params['update']:
+ rval = yamlfile.pop(module.params['key'],
+ module.params['value'])
+ else:
+ rval = yamlfile.delete(module.params['key'])
+
+ if rval[0] and module.params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
+
+ elif module.params['state'] == 'present':
+ # check if content is different than what is in the file
+ if module.params['content']:
+ content = parse_value(module.params['content'],
+ module.params['content_type'])
+
+ # We had no edits to make and the contents are the same
+ if yamlfile.yaml_dict == content and \
+ module.params['value'] is None:
+ return {'changed': False,
+ 'result': yamlfile.yaml_dict,
+ 'state': "present"}
+
+ yamlfile.yaml_dict = content
+
+ # we were passed a value; parse it
+ if module.params['value']:
+ value = parse_value(module.params['value'],
+ module.params['value_type'])
+ key = module.params['key']
+ if module.params['update']:
+ # pylint: disable=line-too-long
+ curr_value = get_curr_value(parse_value(module.params['curr_value']), module.params['curr_value_format']) # noqa: #501
+
+ rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
+
+ elif module.params['append']:
+ rval = yamlfile.append(key, value)
+ else:
+ rval = yamlfile.put(key, value)
+
+ if rval[0] and module.params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0],
+ 'result': rval[1], 'state': "present"}
+
+ # no edits to make
+ if module.params['src']:
+ # pylint: disable=redefined-variable-type
+ rval = yamlfile.write()
+ return {'changed': rval[0],
+ 'result': rval[1],
+ 'state': "present"}
+
+ return {'failed': True, 'msg': 'Unkown state passed'}
diff --git a/roles/lib_utils/src/doc/license b/roles/lib_utils/src/doc/license
new file mode 100644
index 000000000..717bb7f17
--- /dev/null
+++ b/roles/lib_utils/src/doc/license
@@ -0,0 +1,16 @@
+#
+# Copyright 2016 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
diff --git a/roles/lib_utils/src/doc/yedit b/roles/lib_utils/src/doc/yedit
new file mode 100644
index 000000000..e367a389e
--- /dev/null
+++ b/roles/lib_utils/src/doc/yedit
@@ -0,0 +1,132 @@
+# flake8: noqa
+# pylint: skip-file
+
+DOCUMENTATION = '''
+---
+module: yedit
+short_description: Create, modify, and idempotently manage yaml files.
+description:
+ - Modify yaml files programmatically.
+options:
+ state:
+ description:
+ - State represents whether to create, modify, delete, or list yaml
+ required: true
+ default: present
+ choices: ["present", "absent", "list"]
+ aliases: []
+ debug:
+ description:
+ - Turn on debug information.
+ required: false
+ default: false
+ aliases: []
+ src:
+ description:
+ - The file that is the target of the modifications.
+ required: false
+ default: None
+ aliases: []
+ content:
+ description:
+ - Content represents the yaml content you desire to work with. This
+ - could be the file contents to write or the inmemory data to modify.
+ required: false
+ default: None
+ aliases: []
+ content_type:
+ description:
+ - The python type of the content parameter.
+ required: false
+ default: 'dict'
+ aliases: []
+ key:
+ description:
+ - The path to the value you wish to modify. Emtpy string means the top of
+ - the document.
+ required: false
+ default: ''
+ aliases: []
+ value:
+ description:
+ - The incoming value of parameter 'key'.
+ required: false
+ default:
+ aliases: []
+ value_type:
+ description:
+ - The python type of the incoming value.
+ required: false
+ default: ''
+ aliases: []
+ update:
+ description:
+ - Whether the update should be performed on a dict/hash or list/array
+ - object.
+ required: false
+ default: false
+ aliases: []
+ append:
+ description:
+ - Whether to append to an array/list. When the key does not exist or is
+ - null, a new array is created. When the key is of a non-list type,
+ - nothing is done.
+ required: false
+ default: false
+ aliases: []
+ index:
+ description:
+ - Used in conjunction with the update parameter. This will update a
+ - specific index in an array/list.
+ required: false
+ default: false
+ aliases: []
+ curr_value:
+ description:
+ - Used in conjunction with the update parameter. This is the current
+ - value of 'key' in the yaml file.
+ required: false
+ default: false
+ aliases: []
+ curr_value_format:
+ description:
+ - Format of the incoming current value.
+ choices: ["yaml", "json", "str"]
+ required: false
+ default: false
+ aliases: []
+ backup:
+ description:
+ - Whether to make a backup copy of the current file when performing an
+ - edit.
+ required: false
+ default: true
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+# Simple insert of key, value
+- name: insert simple key, value
+ yedit:
+ src: somefile.yml
+ key: test
+ value: somevalue
+ state: present
+# Results:
+# test: somevalue
+
+# Multilevel insert of key, value
+- name: insert simple key, value
+ yedit:
+ src: somefile.yml
+ key: a#b#c
+ value: d
+ state: present
+# Results:
+# a:
+# b:
+# c: d
+'''
diff --git a/roles/lib_utils/src/generate.py b/roles/lib_utils/src/generate.py
new file mode 100755
index 000000000..f4b46aa91
--- /dev/null
+++ b/roles/lib_utils/src/generate.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python
+'''
+ Generate the openshift-ansible/roles/lib_openshift_cli/library/ modules.
+'''
+
+import os
+import yaml
+
+# pylint: disable=anomalous-backslash-in-string
+GEN_STR = "#!/usr/bin/env python\n" + \
+ "# pylint: disable=missing-docstring\n" + \
+ "# ___ ___ _ _ ___ ___ _ _____ ___ ___\n" + \
+ "# / __| __| \| | __| _ \ /_\_ _| __| \\\n" + \
+ "# | (_ | _|| .` | _|| / / _ \| | | _|| |) |\n" + \
+ "# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____\n" + \
+ "# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|\n" + \
+ "# | |) | (_) | | .` | (_) || | | _|| |) | | | |\n" + \
+ "# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|\n"
+
+OPENSHIFT_ANSIBLE_PATH = os.path.dirname(os.path.realpath(__file__))
+OPENSHIFT_ANSIBLE_SOURCES_PATH = os.path.join(OPENSHIFT_ANSIBLE_PATH, 'generate_sources.yml') # noqa: E501
+
+
+def main():
+ ''' combine the necessary files to create the ansible module '''
+
+ library = os.path.join(OPENSHIFT_ANSIBLE_PATH, '..', 'library/')
+ sources = yaml.load(open(OPENSHIFT_ANSIBLE_SOURCES_PATH).read())
+ for fname, parts in sources.items():
+ with open(os.path.join(library, fname), 'w') as afd:
+ afd.seek(0)
+ afd.write(GEN_STR)
+ for fpart in parts:
+ with open(os.path.join(OPENSHIFT_ANSIBLE_PATH, fpart)) as pfd:
+ # first line is pylint disable so skip it
+ for idx, line in enumerate(pfd):
+ if idx in [0, 1] and 'flake8: noqa' in line \
+ or 'pylint: skip-file' in line:
+ continue
+
+ afd.write(line)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_utils/src/generate_sources.yml b/roles/lib_utils/src/generate_sources.yml
new file mode 100644
index 000000000..83b21de1b
--- /dev/null
+++ b/roles/lib_utils/src/generate_sources.yml
@@ -0,0 +1,7 @@
+---
+yedit.py:
+- doc/license
+- class/import.py
+- doc/yedit
+- class/yedit.py
+- ansible/yedit.py
diff --git a/roles/lib_utils/src/test/integration/files/kube-manager.yaml b/roles/lib_utils/src/test/integration/files/kube-manager.yaml
new file mode 100644
index 000000000..6f4b9e6dc
--- /dev/null
+++ b/roles/lib_utils/src/test/integration/files/kube-manager.yaml
@@ -0,0 +1,39 @@
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ name: kube-controller-manager
+ namespace: kube-system
+spec:
+ hostNetwork: true
+ containers:
+ - name: kube-controller-manager
+ image: openshift/kube:v1.0.0
+ command:
+ - /hyperkube
+ - controller-manager
+ - --master=http://127.0.0.1:8080
+ - --leader-elect=true
+ - --service-account-private-key-file=/etc/kubernetes/ssl/apiserver-key.pem
+ - --root-ca-file=/etc/kubernetes/ssl/ca.pem
+ livenessProbe:
+ httpGet:
+ host: 127.0.0.1
+ path: /healthz
+ port: 10252
+ initialDelaySeconds: 15
+ timeoutSeconds: 1
+ volumeMounts:
+ - mountPath: /etc/kubernetes/ssl
+ name: ssl-certs-kubernetes
+ readOnly: true
+ - mountPath: /etc/ssl/certs
+ name: ssl-certs-host
+ readOnly: true
+ volumes:
+ - hostPath:
+ path: /etc/kubernetes/ssl
+ name: ssl-certs-kubernetes
+ - hostPath:
+ path: /usr/share/ca-certificates
+ name: ssl-certs-host
diff --git a/roles/lib_utils/src/test/integration/yedit_test.yml b/roles/lib_utils/src/test/integration/yedit_test.yml
new file mode 100755
index 000000000..1760a7466
--- /dev/null
+++ b/roles/lib_utils/src/test/integration/yedit_test.yml
@@ -0,0 +1,221 @@
+#!/usr/bin/ansible-playbook
+# Yedit test so that we can quickly determine if features are working
+# Ensure that the kube-manager.yaml file exists
+#
+# ./yedit_test.yml -M ../../library
+#
+---
+- hosts: localhost
+ gather_facts: no
+ vars:
+ test_file: kube-manager-test.yaml
+ test: test
+ strategy: debug
+
+ post_tasks:
+ - name: copy the kube-manager.yaml file so that we have a pristine copy each time
+ copy:
+ src: kube-manager.yaml
+ dest: "./{{ test_file }}"
+ changed_when: False
+
+ ####### add key to top level #####
+ - name: add a key at the top level
+ yedit:
+ src: "{{ test_file }}"
+ key: yedittest
+ value: yedittest
+
+ - name: retrieve the inserted key
+ yedit:
+ src: "{{ test_file }}"
+ state: list
+ key: yedittest
+ register: results
+
+ - name: Assert that key is at top level
+ assert:
+ that: results.result == 'yedittest'
+ msg: 'Test: add a key to top level failed. yedittest != [{{ results.result }}]'
+ ###### end add key to top level #####
+
+ ###### modify multilevel key, value #####
+ - name: modify multilevel key, value
+ yedit:
+ src: "{{ test_file }}"
+ key: metadata-namespace
+ value: openshift-is-awesome
+ separator: '-'
+
+ - name: retrieve the inserted key
+ yedit:
+ src: "{{ test_file }}"
+ state: list
+ key: metadata-namespace
+ separator: '-'
+ register: results
+
+ - name: Assert that key is as expected
+ assert:
+ that: results.result == 'openshift-is-awesome'
+ msg: 'Test: multilevel key, value modification: openshift-is-awesome != [{{ results.result }}]'
+ ###### end modify multilevel key, value #####
+
+ ###### test a string boolean #####
+ - name: test a string boolean
+ yedit:
+ src: "{{ test_file }}"
+ key: spec.containers[0].volumeMounts[1].readOnly
+ value: 'true'
+ value_type: str
+
+ - name: retrieve the inserted key
+ yedit:
+ src: "{{ test_file }}"
+ state: list
+ key: spec.containers[0].volumeMounts[1].readOnly
+ register: results
+
+ - name: Assert that key is a string
+ assert:
+ that: results.result == "true"
+ msg: "Test: boolean str: 'true' != [{{ results.result }}]"
+
+ - name: Assert that key is not bool
+ assert:
+ that: results.result != true
+ msg: "Test: boolean str: true != [{{ results.result }}]"
+ ###### end test boolean string #####
+
+ ###### test array append #####
+ - name: test array append
+ yedit:
+ src: "{{ test_file }}"
+ key: spec.containers[0].command
+ value: --my-new-parameter=openshift
+ append: True
+
+ - name: retrieve the array
+ yedit:
+ src: "{{ test_file }}"
+ state: list
+ key: spec.containers[0].command
+ register: results
+
+ - name: Assert that the last element in array is our value
+ assert:
+ that: results.result[-1] == "--my-new-parameter=openshift"
+ msg: "Test: '--my-new-parameter=openshift' != [{{ results.result[-1] }}]"
+ ###### end test array append #####
+
+ ###### test non-existing array append #####
+ - name: test array append to non-existing key
+ yedit:
+ src: "{{ test_file }}"
+ key: nonexistingkey
+ value: --my-new-parameter=openshift
+ append: True
+
+ - name: retrieve the array
+ yedit:
+ src: "{{ test_file }}"
+ state: list
+ key: nonexistingkey
+ register: results
+
+ - name: Assert that the last element in array is our value
+ assert:
+ that: results.result[-1] == "--my-new-parameter=openshift"
+ msg: "Test: '--my-new-parameter=openshift' != [{{ results.result[-1] }}]"
+ ###### end test non-existing array append #####
+
+ ###### test array update modify #####
+ - name: test array update modify
+ yedit:
+ src: "{{ test_file }}"
+ key: spec.containers[0].command
+ value: --root-ca-file=/etc/k8s/ssl/my.pem
+ curr_value: --root-ca-file=/etc/kubernetes/ssl/ca.pem
+ curr_value_format: str
+ update: True
+
+ - name: retrieve the array
+ yedit:
+ src: "{{ test_file }}"
+ state: list
+ key: spec.containers[0].command
+ register: results
+
+ - name: Assert that the element in array is our value
+ assert:
+ that: results.result[5] == "--root-ca-file=/etc/k8s/ssl/my.pem"
+ msg: "Test: '--root-ca-file=/etc/k8s/ssl/my.pem' != [{{ results.result[5] }}]"
+ ###### end test array update modify#####
+
+ ###### test dict create #####
+ - name: test dict create
+ yedit:
+ src: "{{ test_file }}"
+ key: a.b.c
+ value: d
+
+ - name: retrieve the key
+ yedit:
+ src: "{{ test_file }}"
+ state: list
+ key: a.b.c
+ register: results
+
+ - name: Assert that the key was created
+ assert:
+ that: results.result == "d"
+ msg: "Test: 'd' != [{{ results.result }}]"
+ ###### end test dict create #####
+
+ ###### test create dict value #####
+ - name: test create dict value
+ yedit:
+ src: "{{ test_file }}"
+ key: e.f.g
+ value:
+ h:
+ i:
+ j: k
+
+ - name: retrieve the key
+ yedit:
+ src: "{{ test_file }}"
+ state: list
+ key: e.f.g.h.i.j
+ register: results
+
+ - name: Assert that the key was created
+ assert:
+ that: results.result == "k"
+ msg: "Test: 'k' != [{{ results.result }}]"
+ ###### end test dict create #####
+
+ ###### test create list value #####
+ - name: test create list value
+ yedit:
+ src: "{{ test_file }}"
+ key: z.x.y
+ value:
+ - 1
+ - 2
+ - 3
+
+ - name: retrieve the key
+ yedit:
+ src: "{{ test_file }}"
+ state: list
+ key: z#x#y
+ separator: '#'
+ register: results
+ - debug: var=results
+
+ - name: Assert that the key was created
+ assert:
+ that: results.result == [1, 2, 3]
+ msg: "Test: '[1, 2, 3]' != [{{ results.result }}]"
+###### end test create list value #####
diff --git a/roles/lib_utils/src/test/unit/yedit_test.py b/roles/lib_utils/src/test/unit/yedit_test.py
new file mode 100755
index 000000000..2793c5c1a
--- /dev/null
+++ b/roles/lib_utils/src/test/unit/yedit_test.py
@@ -0,0 +1,277 @@
+#!/usr/bin/env python2
+'''
+ Unit tests for yedit
+'''
+# To run
+# python -m unittest yedit_test
+#
+# .............................
+# ----------------------------------------------------------------------
+# Ran 29 tests in 0.133s
+# OK
+
+import os
+import sys
+import unittest
+
+# Removing invalid variable names for tests so that I can
+# keep them brief
+# pylint: disable=invalid-name,no-name-in-module
+# Disable import-error b/c our libraries aren't loaded in jenkins
+# pylint: disable=import-error
+# place yedit in our path
+yedit_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
+sys.path.insert(0, yedit_path)
+
+from yedit import Yedit # noqa: E402
+
+# pylint: disable=too-many-public-methods
+# Silly pylint, moar tests!
+
+
+class YeditTest(unittest.TestCase):
+ '''
+ Test class for yedit
+ '''
+ data = {'a': 'a',
+ 'b': {'c': {'d': [{'e': 'x'}, 'f', 'g']}},
+ } # noqa: E124
+
+ filename = 'yedit_test.yml'
+
+ def setUp(self):
+ ''' setup method will create a file and set to known configuration '''
+ yed = Yedit(YeditTest.filename)
+ yed.yaml_dict = YeditTest.data
+ yed.write()
+
+ def test_load(self):
+ ''' Testing a get '''
+ yed = Yedit('yedit_test.yml')
+ self.assertEqual(yed.yaml_dict, self.data)
+
+ def test_write(self):
+ ''' Testing a simple write '''
+ yed = Yedit('yedit_test.yml')
+ yed.put('key1', 1)
+ yed.write()
+ self.assertTrue('key1' in yed.yaml_dict)
+ self.assertEqual(yed.yaml_dict['key1'], 1)
+
+ def test_write_x_y_z(self):
+ '''Testing a write of multilayer key'''
+ yed = Yedit('yedit_test.yml')
+ yed.put('x.y.z', 'modified')
+ yed.write()
+ yed.load()
+ self.assertEqual(yed.get('x.y.z'), 'modified')
+
+ def test_delete_a(self):
+ '''Testing a simple delete '''
+ yed = Yedit('yedit_test.yml')
+ yed.delete('a')
+ yed.write()
+ yed.load()
+ self.assertTrue('a' not in yed.yaml_dict)
+
+ def test_delete_b_c(self):
+ '''Testing delete of layered key '''
+ yed = Yedit('yedit_test.yml', separator=':')
+ yed.delete('b:c')
+ yed.write()
+ yed.load()
+ self.assertTrue('b' in yed.yaml_dict)
+ self.assertFalse('c' in yed.yaml_dict['b'])
+
+ def test_create(self):
+ '''Testing a create '''
+ os.unlink(YeditTest.filename)
+ yed = Yedit('yedit_test.yml')
+ yed.create('foo', 'bar')
+ yed.write()
+ yed.load()
+ self.assertTrue('foo' in yed.yaml_dict)
+ self.assertTrue(yed.yaml_dict['foo'] == 'bar')
+
+ def test_create_content(self):
+ '''Testing a create with content '''
+ content = {"foo": "bar"}
+ yed = Yedit("yedit_test.yml", content)
+ yed.write()
+ yed.load()
+ self.assertTrue('foo' in yed.yaml_dict)
+ self.assertTrue(yed.yaml_dict['foo'], 'bar')
+
+ def test_array_insert(self):
+ '''Testing a create with content '''
+ yed = Yedit("yedit_test.yml", separator=':')
+ yed.put('b:c:d[0]', 'inject')
+ self.assertTrue(yed.get('b:c:d[0]') == 'inject')
+
+ def test_array_insert_first_index(self):
+ '''Testing a create with content '''
+ yed = Yedit("yedit_test.yml", separator=':')
+ yed.put('b:c:d[0]', 'inject')
+ self.assertTrue(yed.get('b:c:d[1]') == 'f')
+
+ def test_array_insert_second_index(self):
+ '''Testing a create with content '''
+ yed = Yedit("yedit_test.yml", separator=':')
+ yed.put('b:c:d[0]', 'inject')
+ self.assertTrue(yed.get('b:c:d[2]') == 'g')
+
+ def test_dict_array_dict_access(self):
+ '''Testing a create with content'''
+ yed = Yedit("yedit_test.yml", separator=':')
+ yed.put('b:c:d[0]', [{'x': {'y': 'inject'}}])
+ self.assertTrue(yed.get('b:c:d[0]:[0]:x:y') == 'inject')
+
+ def test_dict_array_dict_replace(self):
+ '''Testing multilevel delete'''
+ yed = Yedit("yedit_test.yml", separator=':')
+ yed.put('b:c:d[0]', [{'x': {'y': 'inject'}}])
+ yed.put('b:c:d[0]:[0]:x:y', 'testing')
+ self.assertTrue('b' in yed.yaml_dict)
+ self.assertTrue('c' in yed.yaml_dict['b'])
+ self.assertTrue('d' in yed.yaml_dict['b']['c'])
+ self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'], list))
+ self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'][0], list))
+ self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'][0][0], dict))
+ self.assertTrue('y' in yed.yaml_dict['b']['c']['d'][0][0]['x'])
+ self.assertTrue(yed.yaml_dict['b']['c']['d'][0][0]['x']['y'] == 'testing') # noqa: E501
+
+ def test_dict_array_dict_remove(self):
+ '''Testing multilevel delete'''
+ yed = Yedit("yedit_test.yml", separator=':')
+ yed.put('b:c:d[0]', [{'x': {'y': 'inject'}}])
+ yed.delete('b:c:d[0]:[0]:x:y')
+ self.assertTrue('b' in yed.yaml_dict)
+ self.assertTrue('c' in yed.yaml_dict['b'])
+ self.assertTrue('d' in yed.yaml_dict['b']['c'])
+ self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'], list))
+ self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'][0], list))
+ self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'][0][0], dict))
+ self.assertFalse('y' in yed.yaml_dict['b']['c']['d'][0][0]['x'])
+
+ def test_key_exists_in_dict(self):
+ '''Testing exist in dict'''
+ yed = Yedit("yedit_test.yml", separator=':')
+ yed.put('b:c:d[0]', [{'x': {'y': 'inject'}}])
+ self.assertTrue(yed.exists('b:c', 'd'))
+
+ def test_key_exists_in_list(self):
+ '''Testing exist in list'''
+ yed = Yedit("yedit_test.yml", separator=':')
+ yed.put('b:c:d[0]', [{'x': {'y': 'inject'}}])
+ self.assertTrue(yed.exists('b:c:d', [{'x': {'y': 'inject'}}]))
+ self.assertFalse(yed.exists('b:c:d', [{'x': {'y': 'test'}}]))
+
+ def test_update_to_list_with_index(self):
+ '''Testing update to list with index'''
+ yed = Yedit("yedit_test.yml", separator=':')
+ yed.put('x:y:z', [1, 2, 3])
+ yed.update('x:y:z', [5, 6], index=2)
+ self.assertTrue(yed.get('x:y:z') == [1, 2, [5, 6]])
+ self.assertTrue(yed.exists('x:y:z', [5, 6]))
+ self.assertFalse(yed.exists('x:y:z', 4))
+
+ def test_update_to_list_with_curr_value(self):
+ '''Testing update to list with index'''
+ yed = Yedit("yedit_test.yml", separator=':')
+ yed.put('x:y:z', [1, 2, 3])
+ yed.update('x:y:z', [5, 6], curr_value=3)
+ self.assertTrue(yed.get('x:y:z') == [1, 2, [5, 6]])
+ self.assertTrue(yed.exists('x:y:z', [5, 6]))
+ self.assertFalse(yed.exists('x:y:z', 4))
+
+ def test_update_to_list(self):
+ '''Testing update to list'''
+ yed = Yedit("yedit_test.yml", separator=':')
+ yed.put('x:y:z', [1, 2, 3])
+ yed.update('x:y:z', [5, 6])
+ self.assertTrue(yed.get('x:y:z') == [1, 2, 3, [5, 6]])
+ self.assertTrue(yed.exists('x:y:z', [5, 6]))
+ self.assertFalse(yed.exists('x:y:z', 4))
+
+ def test_append_twice_to_list(self):
+ '''Testing append to list'''
+ yed = Yedit("yedit_test.yml", separator=':')
+ yed.put('x:y:z', [1, 2, 3])
+ yed.append('x:y:z', [5, 6])
+ yed.append('x:y:z', [5, 6])
+ self.assertTrue(yed.get('x:y:z') == [1, 2, 3, [5, 6], [5, 6]])
+ self.assertTrue(2 == yed.get('x:y:z').count([5, 6]))
+ self.assertFalse(yed.exists('x:y:z', 4))
+
+ def test_add_item_to_dict(self):
+ '''Testing update to dict'''
+ yed = Yedit("yedit_test.yml", separator=':')
+ yed.put('x:y:z', {'a': 1, 'b': 2})
+ yed.update('x:y:z', {'c': 3, 'd': 4})
+ self.assertTrue(yed.get('x:y:z') == {'a': 1, 'b': 2, 'c': 3, 'd': 4})
+ self.assertTrue(yed.exists('x:y:z', {'c': 3}))
+
+ def test_first_level_dict_with_none_value(self):
+ '''test dict value with none value'''
+ yed = Yedit(content={'a': None}, separator=":")
+ yed.put('a:b:c', 'test')
+ self.assertTrue(yed.get('a:b:c') == 'test')
+ self.assertTrue(yed.get('a:b'), {'c': 'test'})
+
+ def test_adding_yaml_variable(self):
+ '''test dict value with none value'''
+ yed = Yedit("yedit_test.yml", separator=':')
+ yed.put('z:y', '{{test}}')
+ self.assertTrue(yed.get('z:y') == '{{test}}')
+
+ def test_keys_with_underscore(self):
+ '''test dict value with none value'''
+ yed = Yedit("yedit_test.yml", separator=':')
+ yed.put('z_:y_y', {'test': '{{test}}'})
+ self.assertTrue(yed.get('z_:y_y') == {'test': '{{test}}'})
+
+ def test_first_level_array_update(self):
+ '''test update on top level array'''
+ yed = Yedit(content=[{'a': 1}, {'b': 2}, {'b': 3}], separator=':')
+ yed.update('', {'c': 4})
+ self.assertTrue({'c': 4} in yed.get(''))
+
+ def test_first_level_array_delete(self):
+ '''test remove top level key'''
+ yed = Yedit(content=[{'a': 1}, {'b': 2}, {'b': 3}])
+ yed.delete('')
+ self.assertTrue({'b': 3} not in yed.get(''))
+
+ def test_first_level_array_get(self):
+ '''test dict value with none value'''
+ yed = Yedit(content=[{'a': 1}, {'b': 2}, {'b': 3}])
+ yed.get('')
+ self.assertTrue([{'a': 1}, {'b': 2}, {'b': 3}] == yed.yaml_dict)
+
+ def test_pop_list_item(self):
+ '''test dict value with none value'''
+ yed = Yedit(content=[{'a': 1}, {'b': 2}, {'b': 3}], separator=':')
+ yed.pop('', {'b': 2})
+ self.assertTrue([{'a': 1}, {'b': 3}] == yed.yaml_dict)
+
+ def test_pop_list_item_2(self):
+ '''test dict value with none value'''
+ z = range(10)
+ yed = Yedit(content=z, separator=':')
+ yed.pop('', 5)
+ z.pop(5)
+ self.assertTrue(z == yed.yaml_dict)
+
+ def test_pop_dict_key(self):
+ '''test dict value with none value'''
+ yed = Yedit(content={'a': {'b': {'c': 1, 'd': 2}}}, separator='#')
+ yed.pop('a#b', 'c')
+ self.assertTrue({'a': {'b': {'d': 2}}} == yed.yaml_dict)
+
+ def tearDown(self):
+ '''TearDown method'''
+ os.unlink(YeditTest.filename)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/roles/nuage_ca/meta/main.yml b/roles/nuage_ca/meta/main.yml
index 2b06613f3..36838debc 100644
--- a/roles/nuage_ca/meta/main.yml
+++ b/roles/nuage_ca/meta/main.yml
@@ -1,6 +1,6 @@
---
galaxy_info:
- author: Vishal Patil
+ author: Vishal Patil
description:
company: Nuage Networks
license: Apache License, Version 2.0
diff --git a/roles/nuage_common/defaults/main.yaml b/roles/nuage_common/defaults/main.yaml
index 16dac8720..a7803c0ee 100644
--- a/roles/nuage_common/defaults/main.yaml
+++ b/roles/nuage_common/defaults/main.yaml
@@ -1,3 +1,4 @@
+---
nuage_ca_master: "{{ groups.oo_first_master.0 }}"
nuage_ca_master_crt_dir: /usr/share/nuage-openshift-certificates
diff --git a/roles/nuage_master/defaults/main.yaml b/roles/nuage_master/defaults/main.yaml
index cf670a9e1..c90f4f443 100644
--- a/roles/nuage_master/defaults/main.yaml
+++ b/roles/nuage_master/defaults/main.yaml
@@ -1,4 +1,4 @@
---
nuage_master_cspadminpasswd: ""
nuage_master_adminusername: admin
-nuage_master_adminuserpasswd: admin
+nuage_master_adminuserpasswd: admin
diff --git a/roles/nuage_master/meta/main.yml b/roles/nuage_master/meta/main.yml
index b2a47ef71..a8a9bd3b4 100644
--- a/roles/nuage_master/meta/main.yml
+++ b/roles/nuage_master/meta/main.yml
@@ -13,10 +13,10 @@ galaxy_info:
- cloud
- system
dependencies:
- - role: nuage_ca
- - role: nuage_common
- - role: openshift_etcd_client_certificates
- - role: os_firewall
- os_firewall_allow:
- - service: openshift-monitor
- port: "{{ nuage_mon_rest_server_port }}/tcp"
+- role: nuage_ca
+- role: nuage_common
+- role: openshift_etcd_client_certificates
+- role: os_firewall
+ os_firewall_allow:
+ - service: openshift-monitor
+ port: "{{ nuage_mon_rest_server_port }}/tcp"
diff --git a/roles/nuage_master/tasks/certificates.yml b/roles/nuage_master/tasks/certificates.yml
index 0a2f375cd..c16616e1c 100644
--- a/roles/nuage_master/tasks/certificates.yml
+++ b/roles/nuage_master/tasks/certificates.yml
@@ -1,11 +1,11 @@
---
- name: Create a directory to hold the certificates
file: path="{{ nuage_mon_rest_server_crt_dir }}" state=directory
- delegate_to: "{{ nuage_ca_master }}"
+ delegate_to: "{{ nuage_ca_master }}"
- name: Create the key
command: >
- openssl genrsa -out "{{ nuage_ca_master_rest_server_key }}" 4096
+ openssl genrsa -out "{{ nuage_ca_master_rest_server_key }}" 4096
delegate_to: "{{ nuage_ca_master }}"
- name: Create the req file
@@ -30,7 +30,7 @@
shell: "cd {{ nuage_mon_rest_server_crt_dir }} && tar -czvf /tmp/{{ ansible_nodename }}.tgz *"
delegate_to: "{{ nuage_ca_master }}"
-- name: Create a temp directory for the certificates
+- name: Create a temp directory for the certificates
local_action: command mktemp -d "/tmp/openshift-{{ ansible_nodename }}-XXXXXXX"
register: mktemp
@@ -42,7 +42,7 @@
unarchive: src="{{ mktemp.stdout }}/{{ ansible_nodename }}.tgz" dest={{ nuage_master_crt_dir }}
- name: Delete the certificates after copy
- file: path="{{ nuage_mon_rest_server_crt_dir }}" state=absent
+ file: path="{{ nuage_mon_rest_server_crt_dir }}" state=absent
delegate_to: "{{ nuage_ca_master }}"
- name: Delete the temp directory
diff --git a/roles/nuage_master/tasks/main.yaml b/roles/nuage_master/tasks/main.yaml
index b8eaede3b..d211d30e8 100644
--- a/roles/nuage_master/tasks/main.yaml
+++ b/roles/nuage_master/tasks/main.yaml
@@ -1,13 +1,13 @@
---
- name: Create directory /usr/share/nuage-openshift-monitor
become: yes
- file: path=/usr/share/nuage-openshift-monitor state=directory
+ file: path=/usr/share/nuage-openshift-monitor state=directory
- name: Create the log directory
become: yes
file: path={{ nuage_mon_rest_server_logdir }} state=directory
-- name: Install Nuage Openshift Monitor
+- name: Install Nuage Openshift Monitor
become: yes
yum: name={{ nuage_openshift_rpm }} state=present
@@ -17,12 +17,12 @@
become: yes
fetch: src={{ cert_output_dir }}/{{ item }} dest=/tmp/{{ item }} flat=yes
with_items:
- - ca.crt
- - nuage.crt
- - nuage.key
- - nuage.kubeconfig
+ - ca.crt
+ - nuage.crt
+ - nuage.key
+ - nuage.kubeconfig
-- include: certificates.yml
+- include: certificates.yml
- name: Create nuage-openshift-monitor.yaml
become: yes
diff --git a/roles/nuage_master/vars/main.yaml b/roles/nuage_master/vars/main.yaml
index b395eba99..dba399a03 100644
--- a/roles/nuage_master/vars/main.yaml
+++ b/roles/nuage_master/vars/main.yaml
@@ -1,3 +1,4 @@
+---
openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
openshift_master_ca_cert: "{{ openshift_master_config_dir }}/ca.crt"
openshift_master_ca_key: "{{ openshift_master_config_dir }}/ca.key"
@@ -6,7 +7,7 @@ ca_cert: "{{ openshift_master_config_dir }}/ca.crt"
admin_config: "{{ openshift.common.config_base }}/master/admin.kubeconfig"
cert_output_dir: /usr/share/nuage-openshift-monitor
kube_config: /usr/share/nuage-openshift-monitor/nuage.kubeconfig
-kubemon_yaml: /usr/share/nuage-openshift-monitor/nuage-openshift-monitor.yaml
+kubemon_yaml: /usr/share/nuage-openshift-monitor/nuage-openshift-monitor.yaml
master_config_yaml: "{{ openshift_master_config_dir }}/master-config.yaml"
nuage_mon_rest_server_url: "0.0.0.0:{{ nuage_mon_rest_server_port }}"
nuage_mon_rest_server_logdir: "{{ nuage_openshift_monitor_log_dir | default('/var/log/nuage-openshift-monitor') }}"
@@ -14,18 +15,18 @@ nuage_mon_log_level: "{{ nuage_openshift_monitor_log_level | default('3') }}"
nuage_mon_rest_server_crt_dir: "{{ nuage_ca_master_crt_dir }}/{{ ansible_nodename }}"
nuage_ca_master_rest_server_key: "{{ nuage_mon_rest_server_crt_dir }}/nuageMonServer.key"
-nuage_ca_master_rest_server_crt: "{{ nuage_mon_rest_server_crt_dir }}/nuageMonServer.crt"
+nuage_ca_master_rest_server_crt: "{{ nuage_mon_rest_server_crt_dir }}/nuageMonServer.crt"
nuage_mon_rest_server_host: "{{ openshift.master.cluster_hostname | default(openshift.common.hostname) }}"
-nuage_master_crt_dir : /usr/share/nuage-openshift-monitor
+nuage_master_crt_dir: /usr/share/nuage-openshift-monitor
nuage_service_account: system:serviceaccount:default:nuage
nuage_service_account_config:
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: nuage
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: nuage
nuage_tasks:
- - policy add-cluster-role-to-user cluster-reader {{ nuage_service_account }}
+ - policy add-cluster-role-to-user cluster-reader {{ nuage_service_account }}
diff --git a/roles/nuage_node/meta/main.yml b/roles/nuage_node/meta/main.yml
index f96318611..3e2a5e0c9 100644
--- a/roles/nuage_node/meta/main.yml
+++ b/roles/nuage_node/meta/main.yml
@@ -13,11 +13,11 @@ galaxy_info:
- cloud
- system
dependencies:
- - role: nuage_common
- - role: nuage_ca
- - role: os_firewall
- os_firewall_allow:
- - service: vxlan
- port: 4789/udp
- - service: nuage-monitor
- port: "{{ nuage_mon_rest_server_port }}/tcp"
+- role: nuage_common
+- role: nuage_ca
+- role: os_firewall
+ os_firewall_allow:
+ - service: vxlan
+ port: 4789/udp
+ - service: nuage-monitor
+ port: "{{ nuage_mon_rest_server_port }}/tcp"
diff --git a/roles/nuage_node/tasks/certificates.yml b/roles/nuage_node/tasks/certificates.yml
index 7fcd4274d..d1c8bf59a 100644
--- a/roles/nuage_node/tasks/certificates.yml
+++ b/roles/nuage_node/tasks/certificates.yml
@@ -5,7 +5,7 @@
- name: Create the key
command: >
- openssl genrsa -out "{{ nuage_ca_master_plugin_key }}" 4096
+ openssl genrsa -out "{{ nuage_ca_master_plugin_key }}" 4096
delegate_to: "{{ nuage_ca_master }}"
- name: Create the req file
@@ -30,7 +30,7 @@
shell: "cd {{ nuage_plugin_rest_client_crt_dir }} && tar -czvf /tmp/{{ ansible_nodename }}.tgz *"
delegate_to: "{{ nuage_ca_master }}"
-- name: Create a temp directory for the certificates
+- name: Create a temp directory for the certificates
local_action: command mktemp -d "/tmp/openshift-{{ ansible_nodename }}-XXXXXXX"
register: mktemp
@@ -42,7 +42,7 @@
unarchive: src="{{ mktemp.stdout }}/{{ ansible_nodename }}.tgz" dest={{ nuage_plugin_crt_dir }}
- name: Delete the certificates after copy
- file: path="{{ nuage_plugin_rest_client_crt_dir }}" state=absent
+ file: path="{{ nuage_plugin_rest_client_crt_dir }}" state=absent
delegate_to: "{{ nuage_ca_master }}"
- name: Delete the temp directory
diff --git a/roles/nuage_node/tasks/iptables.yml b/roles/nuage_node/tasks/iptables.yml
index 52935f075..8e2c29620 100644
--- a/roles/nuage_node/tasks/iptables.yml
+++ b/roles/nuage_node/tasks/iptables.yml
@@ -5,7 +5,7 @@
always_run: yes
- name: Allow traffic from overlay to underlay
- command: /sbin/iptables --wait -I FORWARD 1 -s {{ hostvars[groups.oo_first_master.0].openshift.master.sdn_cluster_network_cidr }} -j ACCEPT -m comment --comment "nuage-overlay-underlay"
+ command: /sbin/iptables --wait -I FORWARD 1 -s {{ hostvars[groups.oo_first_master.0].openshift.master.sdn_cluster_network_cidr }} -j ACCEPT -m comment --comment "nuage-overlay-underlay"
when: "'nuage-overlay-underlay' not in iptablesrules.stdout"
notify:
- save iptable rules
diff --git a/roles/nuage_node/tasks/main.yaml b/roles/nuage_node/tasks/main.yaml
index 2ec4be2c2..d82dd36a4 100644
--- a/roles/nuage_node/tasks/main.yaml
+++ b/roles/nuage_node/tasks/main.yaml
@@ -2,16 +2,16 @@
- name: Install Nuage VRS
become: yes
yum: name={{ vrs_rpm }} state=present
-
-- name: Set the uplink interface
+
+- name: Set the uplink interface
become: yes
lineinfile: dest={{ vrs_config }} regexp=^NETWORK_UPLINK_INTF line='NETWORK_UPLINK_INTF={{ uplink_interface }}'
-- name: Set the Active Controller
+- name: Set the Active Controller
become: yes
lineinfile: dest={{ vrs_config }} regexp=^ACTIVE_CONTROLLER line='ACTIVE_CONTROLLER={{ vsc_active_ip }}'
-- name: Set the Standby Controller
+- name: Set the Standby Controller
become: yes
lineinfile: dest={{ vrs_config }} regexp=^STANDBY_CONTROLLER line='STANDBY_CONTROLLER={{ vsc_standby_ip }}'
when: vsc_standby_ip is defined
@@ -24,18 +24,18 @@
become: yes
copy: src="/tmp/{{ item }}" dest="{{ vsp_openshift_dir }}/{{ item }}"
with_items:
- - ca.crt
- - nuage.crt
- - nuage.key
- - nuage.kubeconfig
+ - ca.crt
+ - nuage.crt
+ - nuage.key
+ - nuage.kubeconfig
- include: certificates.yml
-- name: Set the vsp-openshift.yaml
+- name: Set the vsp-openshift.yaml
become: yes
- template: src=vsp-openshift.j2 dest={{ vsp_openshift_yaml }} owner=root mode=0644
+ template: src=vsp-openshift.j2 dest={{ vsp_openshift_yaml }} owner=root mode=0644
notify:
- restart vrs
- - restart node
+ - restart node
- include: iptables.yml
diff --git a/roles/nuage_node/vars/main.yaml b/roles/nuage_node/vars/main.yaml
index 86486259f..7b789152f 100644
--- a/roles/nuage_node/vars/main.yaml
+++ b/roles/nuage_node/vars/main.yaml
@@ -17,6 +17,6 @@ plugin_log_level: "{{ nuage_plugin_log_level | default('err') }}"
nuage_plugin_rest_client_crt_dir: "{{ nuage_ca_master_crt_dir }}/{{ ansible_nodename }}"
nuage_ca_master_plugin_key: "{{ nuage_plugin_rest_client_crt_dir }}/nuageMonClient.key"
-nuage_ca_master_plugin_crt: "{{ nuage_plugin_rest_client_crt_dir }}/nuageMonClient.crt"
+nuage_ca_master_plugin_crt: "{{ nuage_plugin_rest_client_crt_dir }}/nuageMonClient.crt"
-nuage_plugin_crt_dir : /usr/share/vsp-openshift
+nuage_plugin_crt_dir: /usr/share/vsp-openshift
diff --git a/roles/openshift_builddefaults/tasks/main.yml b/roles/openshift_builddefaults/tasks/main.yml
index 6a4e919e8..1f44b29b9 100644
--- a/roles/openshift_builddefaults/tasks/main.yml
+++ b/roles/openshift_builddefaults/tasks/main.yml
@@ -15,10 +15,9 @@
no_proxy: "{{ openshift_builddefaults_no_proxy | default(None) }}"
git_http_proxy: "{{ openshift_builddefaults_git_http_proxy | default(None) }}"
git_https_proxy: "{{ openshift_builddefaults_git_https_proxy | default(None) }}"
-
+
- name: Set builddefaults config structure
openshift_facts:
role: builddefaults
local_facts:
config: "{{ openshift_builddefaults_json | default(builddefaults_yaml) }}"
-
diff --git a/roles/openshift_certificate_expiry/README.md b/roles/openshift_certificate_expiry/README.md
index d44438332..a88470bdd 100644
--- a/roles/openshift_certificate_expiry/README.md
+++ b/roles/openshift_certificate_expiry/README.md
@@ -9,7 +9,7 @@ include:
* Master/Node Service Certificates
* Router/Registry Service Certificates from etcd secrets
* Master/Node/Router/Registry/Admin `kubeconfig`s
-* Etcd certificates
+* Etcd certificates (including embedded)
This role pairs well with the redeploy certificates playbook:
@@ -111,12 +111,16 @@ There are two top-level keys in the saved JSON results, `data` and
`summary`.
The `data` key is a hash where the keys are the names of each host
-examined and the values are the check results for each respective
-host.
+examined and the values are the check results for the certificates
+identified on each respective host.
-The `summary` key is a hash that summarizes the number of certificates
-expiring within the configured warning window and the number of
-already expired certificates.
+The `summary` key is a hash that summarizes the total number of
+certificates:
+
+* examined on the entire cluster
+* OK
+* expiring within the configured warning window
+* already expired
The example below is abbreviated to save space:
@@ -193,7 +197,9 @@ The example below is abbreviated to save space:
},
"summary": {
"warning": 6,
- "expired": 0
+ "expired": 0,
+ "total": 7,
+ "ok": 1
}
}
```
diff --git a/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py b/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py
index 2e2430ee6..5f102e960 100644
--- a/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py
+++ b/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py
@@ -5,29 +5,6 @@
Custom filters for use in openshift-ansible
"""
-from ansible import errors
-from collections import Mapping
-from distutils.util import strtobool
-from distutils.version import LooseVersion
-from operator import itemgetter
-import OpenSSL.crypto
-import os
-import pdb
-import pkg_resources
-import re
-import json
-import yaml
-from ansible.parsing.yaml.dumper import AnsibleDumper
-from urlparse import urlparse
-
-try:
- # ansible-2.2
- # ansible.utils.unicode.to_unicode is deprecated in ansible-2.2,
- # ansible.module_utils._text.to_text should be used instead.
- from ansible.module_utils._text import to_text
-except ImportError:
- # ansible-2.1
- from ansible.utils.unicode import to_unicode as to_text
# Disabling too-many-public-methods, since filter methods are necessarily
# public
@@ -74,13 +51,16 @@ Example playbook usage:
total_warnings = sum([hostvars[h]['check_results']['summary']['warning'] for h in play_hosts])
total_expired = sum([hostvars[h]['check_results']['summary']['expired'] for h in play_hosts])
+ total_ok = sum([hostvars[h]['check_results']['summary']['ok'] for h in play_hosts])
+ total_total = sum([hostvars[h]['check_results']['summary']['total'] for h in play_hosts])
json_result['summary']['warning'] = total_warnings
json_result['summary']['expired'] = total_expired
+ json_result['summary']['ok'] = total_ok
+ json_result['summary']['total'] = total_total
return json_result
-
def filters(self):
""" returns a mapping of filters to methods """
return {
diff --git a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py b/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
index 2cdb87dc1..a474b36b0 100644
--- a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
+++ b/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
@@ -4,17 +4,13 @@
"""For details on this module see DOCUMENTATION (below)"""
-# router/registry cert grabbing
-import subprocess
-# etcd config file
-import ConfigParser
-# Expiration parsing
import datetime
-# File path stuff
import os
-# Config file parsing
+import subprocess
+
+from six.moves import configparser
+
import yaml
-# Certificate loading
import OpenSSL.crypto
DOCUMENTATION = '''
@@ -260,7 +256,10 @@ Return:
# This is our module MAIN function after all, so there's bound to be a
# lot of code bundled up into one block
#
-# pylint: disable=too-many-locals,too-many-locals,too-many-statements,too-many-branches
+# Reason: These checks are disabled because the issue was introduced
+# during a period where the pylint checks weren't enabled for this file
+# Status: temporarily disabled pending future refactoring
+# pylint: disable=too-many-locals,too-many-statements,too-many-branches
def main():
"""This module examines certificates (in various forms) which compose
an OpenShift Container Platform cluster
@@ -371,7 +370,7 @@ an OpenShift Container Platform cluster
######################################################################
# Load the certificate and the CA, parse their expiration dates into
# datetime objects so we can manipulate them later
- for _, v in cert_meta.iteritems():
+ for _, v in cert_meta.items():
with open(v, 'r') as fp:
cert = fp.read()
cert_subject, cert_expiry_date, time_remaining = load_and_handle_cert(cert, now)
@@ -467,7 +466,11 @@ an OpenShift Container Platform cluster
######################################################################
# Check etcd certs
+ #
+ # Two things to check: 'external' etcd, and embedded etcd.
######################################################################
+ # FIRST: The 'external' etcd
+ #
# Some values may be duplicated, make this a set for now so we
# unique them all
etcd_certs_to_check = set([])
@@ -475,13 +478,17 @@ an OpenShift Container Platform cluster
etcd_cert_params.append('dne')
try:
with open('/etc/etcd/etcd.conf', 'r') as fp:
- etcd_config = ConfigParser.ConfigParser()
+ etcd_config = configparser.ConfigParser()
+ # Reason: This check is disabled because the issue was introduced
+ # during a period where the pylint checks weren't enabled for this file
+ # Status: temporarily disabled pending future refactoring
+ # pylint: disable=deprecated-method
etcd_config.readfp(FakeSecHead(fp))
for param in etcd_cert_params:
try:
etcd_certs_to_check.add(etcd_config.get('ETCD', param))
- except ConfigParser.NoOptionError:
+ except configparser.NoOptionError:
# That parameter does not exist, oh well...
pass
except IOError:
@@ -506,6 +513,43 @@ an OpenShift Container Platform cluster
classify_cert(expire_check_result, now, time_remaining, expire_window, etcd_certs)
######################################################################
+ # Now the embedded etcd
+ ######################################################################
+ try:
+ with open('/etc/origin/master/master-config.yaml', 'r') as fp:
+ cfg = yaml.load(fp)
+ except IOError:
+ # Not present
+ pass
+ else:
+ if cfg.get('etcdConfig', {}).get('servingInfo', {}).get('certFile', None) is not None:
+ # This is embedded
+ etcd_crt_name = cfg['etcdConfig']['servingInfo']['certFile']
+ else:
+ # Not embedded
+ etcd_crt_name = None
+
+ if etcd_crt_name is not None:
+ # etcd_crt_name is relative to the location of the
+ # master-config.yaml file
+ cfg_path = os.path.dirname(fp.name)
+ etcd_cert = os.path.join(cfg_path, etcd_crt_name)
+ with open(etcd_cert, 'r') as etcd_fp:
+ (cert_subject,
+ cert_expiry_date,
+ time_remaining) = load_and_handle_cert(etcd_fp.read(), now)
+
+ expire_check_result = {
+ 'cert_cn': cert_subject,
+ 'path': etcd_fp.name,
+ 'expiry': cert_expiry_date,
+ 'days_remaining': time_remaining.days,
+ 'health': None,
+ }
+
+ classify_cert(expire_check_result, now, time_remaining, expire_window, etcd_certs)
+
+ ######################################################################
# /Check etcd certs
######################################################################
@@ -523,7 +567,7 @@ an OpenShift Container Platform cluster
######################################################################
# First the router certs
try:
- router_secrets_raw = subprocess.Popen('oc get secret router-certs -o yaml'.split(),
+ router_secrets_raw = subprocess.Popen('oc get -n default secret router-certs -o yaml'.split(),
stdout=subprocess.PIPE)
router_ds = yaml.load(router_secrets_raw.communicate()[0])
router_c = router_ds['data']['tls.crt']
@@ -552,7 +596,7 @@ an OpenShift Container Platform cluster
######################################################################
# Now for registry
try:
- registry_secrets_raw = subprocess.Popen('oc get secret registry-certificates -o yaml'.split(),
+ registry_secrets_raw = subprocess.Popen('oc get -n default secret registry-certificates -o yaml'.split(),
stdout=subprocess.PIPE)
registry_ds = yaml.load(registry_secrets_raw.communicate()[0])
registry_c = registry_ds['data']['registry.crt']
@@ -613,9 +657,13 @@ an OpenShift Container Platform cluster
# will be at the front of the list and certificates which will
# expire later are at the end. Router and registry certs should be
# limited to just 1 result, so don't bother sorting those.
- check_results['ocp_certs'] = sorted(check_results['ocp_certs'], cmp=lambda x, y: cmp(x['days_remaining'], y['days_remaining']))
- check_results['kubeconfigs'] = sorted(check_results['kubeconfigs'], cmp=lambda x, y: cmp(x['days_remaining'], y['days_remaining']))
- check_results['etcd'] = sorted(check_results['etcd'], cmp=lambda x, y: cmp(x['days_remaining'], y['days_remaining']))
+ def cert_key(item):
+ ''' return the days_remaining key '''
+ return item['days_remaining']
+
+ check_results['ocp_certs'] = sorted(check_results['ocp_certs'], key=cert_key)
+ check_results['kubeconfigs'] = sorted(check_results['kubeconfigs'], key=cert_key)
+ check_results['etcd'] = sorted(check_results['etcd'], key=cert_key)
# This module will never change anything, but we might want to
# change the return code parameter if there is some catastrophic
@@ -628,10 +676,11 @@ an OpenShift Container Platform cluster
changed=False
)
+
######################################################################
# It's just the way we do things in Ansible. So disable this warning
#
# pylint: disable=wrong-import-position,import-error
-from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.basic import AnsibleModule # noqa: E402
if __name__ == '__main__':
main()
diff --git a/roles/openshift_cli/library/openshift_container_binary_sync.py b/roles/openshift_cli/library/openshift_container_binary_sync.py
index 9ff738d14..4ed3e1f01 100644
--- a/roles/openshift_cli/library/openshift_container_binary_sync.py
+++ b/roles/openshift_cli/library/openshift_container_binary_sync.py
@@ -10,7 +10,7 @@ import shutil
import os.path
# pylint: disable=redefined-builtin,wildcard-import,unused-wildcard-import
-from ansible.module_utils.basic import *
+from ansible.module_utils.basic import * # noqa: F403
DOCUMENTATION = '''
@@ -40,7 +40,7 @@ class BinarySyncer(object):
self.bin_dir = '/usr/local/bin'
self.image = image
self.tag = tag
- self.temp_dir = None # TBD
+ self.temp_dir = None # TBD
def sync(self):
container_name = "openshift-cli-%s" % random.randint(1, 100000)
@@ -110,7 +110,7 @@ class BinarySyncer(object):
def main():
- module = AnsibleModule(
+ module = AnsibleModule( # noqa: F405
argument_spec=dict(
image=dict(required=True),
tag=dict(required=True),
diff --git a/roles/openshift_cloud_provider/tasks/aws.yml b/roles/openshift_cloud_provider/tasks/aws.yml
index 127a5b392..5fa8773f5 100644
--- a/roles/openshift_cloud_provider/tasks/aws.yml
+++ b/roles/openshift_cloud_provider/tasks/aws.yml
@@ -1,3 +1,4 @@
+---
# Work around ini_file create option in 2.2 which defaults to no
- name: Create cloud config file
file:
diff --git a/roles/openshift_cloud_provider/tasks/gce.yml b/roles/openshift_cloud_provider/tasks/gce.yml
index 14ad8ba94..ee4048911 100644
--- a/roles/openshift_cloud_provider/tasks/gce.yml
+++ b/roles/openshift_cloud_provider/tasks/gce.yml
@@ -1,3 +1,4 @@
+---
# Work around ini_file create option in 2.2 which defaults to no
- name: Create cloud config file
file:
diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml
index c9a44b3f5..0a476ac26 100644
--- a/roles/openshift_common/tasks/main.yml
+++ b/roles/openshift_common/tasks/main.yml
@@ -4,11 +4,11 @@
when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_flannel | default(false) | bool
- fail:
- msg: Nuage sdn can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use nuage
+ msg: Nuage sdn can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use nuage
when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_nuage | default(false) | bool
- fail:
- msg: Nuage sdn can not be used with flannel
+ msg: Nuage sdn can not be used with flannel
when: openshift_use_flannel | default(false) | bool and openshift_use_nuage | default(false) | bool
- fail:
@@ -46,4 +46,3 @@
command: >
hostnamectl set-hostname {{ openshift.common.hostname }}
when: openshift_set_hostname | default(set_hostname_default) | bool
-
diff --git a/roles/openshift_docker_facts/tasks/main.yml b/roles/openshift_docker_facts/tasks/main.yml
index c690c5243..613c237a3 100644
--- a/roles/openshift_docker_facts/tasks/main.yml
+++ b/roles/openshift_docker_facts/tasks/main.yml
@@ -9,7 +9,7 @@
additional_registries: "{{ openshift_docker_additional_registries | default(None) }}"
blocked_registries: "{{ openshift_docker_blocked_registries | default(None) }}"
insecure_registries: "{{ openshift_docker_insecure_registries | default(None) }}"
- log_driver: "{{ openshift_docker_log_driver | default(None) }}"
+ log_driver: "{{ openshift_docker_log_driver | default(None) }}"
log_options: "{{ openshift_docker_log_options | default(None) }}"
options: "{{ openshift_docker_options | default(None) }}"
disable_push_dockerhub: "{{ openshift_disable_push_dockerhub | default(None) }}"
diff --git a/roles/openshift_examples/defaults/main.yml b/roles/openshift_examples/defaults/main.yml
index e843049f9..fc4b56bbf 100644
--- a/roles/openshift_examples/defaults/main.yml
+++ b/roles/openshift_examples/defaults/main.yml
@@ -12,8 +12,8 @@ examples_base: "{{ openshift.common.config_base if openshift.common.is_container
image_streams_base: "{{ examples_base }}/image-streams"
centos_image_streams: "{{ image_streams_base}}/image-streams-centos7.json"
rhel_image_streams:
- - "{{ image_streams_base}}/image-streams-rhel7.json"
- - "{{ image_streams_base}}/dotnet_imagestreams.json"
+ - "{{ image_streams_base}}/image-streams-rhel7.json"
+ - "{{ image_streams_base}}/dotnet_imagestreams.json"
db_templates_base: "{{ examples_base }}/db-templates"
xpaas_image_streams: "{{ examples_base }}/xpaas-streams/"
xpaas_templates_base: "{{ examples_base }}/xpaas-templates"
diff --git a/roles/openshift_examples/examples-sync.sh b/roles/openshift_examples/examples-sync.sh
index d8c45dbc6..b139cc599 100755
--- a/roles/openshift_examples/examples-sync.sh
+++ b/roles/openshift_examples/examples-sync.sh
@@ -5,7 +5,7 @@
#
# This script should be run from openshift-ansible/roles/openshift_examples
-XPAAS_VERSION=ose-v1.3.3
+XPAAS_VERSION=ose-v1.3.5
ORIGIN_VERSION=${1:-v1.4}
EXAMPLES_BASE=$(pwd)/files/examples/${ORIGIN_VERSION}
find ${EXAMPLES_BASE} -name '*.json' -delete
diff --git a/roles/openshift_examples/files/examples/v1.3/db-templates/mariadb-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.3/db-templates/mariadb-ephemeral-template.json
index 64b004ff4..8e43bfbc3 100644
--- a/roles/openshift_examples/files/examples/v1.3/db-templates/mariadb-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v1.3/db-templates/mariadb-ephemeral-template.json
@@ -4,11 +4,16 @@
"metadata": {
"name": "mariadb-ephemeral",
"annotations": {
- "description": "MariaDB database service, without persistent storage. WARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
+ "openshift.io/display-name": "MariaDB (Ephemeral)",
+ "description": "MariaDB database service, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
"iconClass": "icon-mariadb",
"tags": "database,mariadb"
}
},
+ "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MYSQL_USER}\n Password: ${MYSQL_PASSWORD}\n Database Name: ${MYSQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:3306/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md.",
+ "labels": {
+ "template": "mariadb-persistent-template"
+ },
"objects": [
{
"kind": "Service",
@@ -177,8 +182,5 @@
"value": "sampledb",
"required": true
}
- ],
- "labels": {
- "template": "mariadb-persistent-template"
- }
+ ]
}
diff --git a/roles/openshift_examples/files/examples/v1.3/db-templates/mariadb-persistent-template.json b/roles/openshift_examples/files/examples/v1.3/db-templates/mariadb-persistent-template.json
index 0d5b39e81..bc85277a9 100644
--- a/roles/openshift_examples/files/examples/v1.3/db-templates/mariadb-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.3/db-templates/mariadb-persistent-template.json
@@ -4,11 +4,16 @@
"metadata": {
"name": "mariadb-persistent",
"annotations": {
- "description": "MariaDB database service, with persistent storage. Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
+ "openshift.io/display-name": "MariaDB (Persistent)",
+ "description": "MariaDB database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-mariadb",
"tags": "database,mariadb"
}
},
+ "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MYSQL_USER}\n Password: ${MYSQL_PASSWORD}\n Database Name: ${MYSQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:3306/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md.",
+ "labels": {
+ "template": "mariadb-persistent-template"
+ },
"objects": [
{
"kind": "Service",
@@ -201,8 +206,5 @@
"value": "1Gi",
"required": true
}
- ],
- "labels": {
- "template": "mariadb-persistent-template"
- }
+ ]
}
diff --git a/roles/openshift_examples/files/examples/v1.3/db-templates/mongodb-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.3/db-templates/mongodb-ephemeral-template.json
index 5ed92b3ad..605601ef2 100644
--- a/roles/openshift_examples/files/examples/v1.3/db-templates/mongodb-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v1.3/db-templates/mongodb-ephemeral-template.json
@@ -5,11 +5,16 @@
"name": "mongodb-ephemeral",
"creationTimestamp": null,
"annotations": {
- "description": "MongoDB database service, without persistent storage. WARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
+ "openshift.io/display-name": "MongoDB (Ephemeral)",
+ "description": "MongoDB database service, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/blob/master/3.2/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
"iconClass": "icon-mongodb",
"tags": "database,mongodb"
}
},
+ "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MONGODB_USER}\n Password: ${MONGODB_PASSWORD}\n Database Name: ${MONGODB_DATABASE}\n Connection URL: mongodb://${MONGODB_USER}:${MONGODB_PASSWORD}@${DATABASE_SERVICE_NAME}/${MONGODB_DATABASE}\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/blob/master/3.2/README.md.",
+ "labels": {
+ "template": "mongodb-ephemeral-template"
+ },
"objects": [
{
"kind": "Service",
@@ -217,9 +222,5 @@
"value": "3.2",
"required": true
}
- ],
- "labels": {
- "template": "mongodb-ephemeral-template"
- },
- "message": "You can connect to the database using MongoDB connection URL mongodb://${MONGODB_USER}:${MONGODB_PASSWORD}@${DATABASE_SERVICE_NAME}/${MONGODB_DATABASE}"
+ ]
}
diff --git a/roles/openshift_examples/files/examples/v1.3/db-templates/mongodb-persistent-template.json b/roles/openshift_examples/files/examples/v1.3/db-templates/mongodb-persistent-template.json
index 00d550d7d..d2a0d01f0 100644
--- a/roles/openshift_examples/files/examples/v1.3/db-templates/mongodb-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.3/db-templates/mongodb-persistent-template.json
@@ -5,11 +5,16 @@
"name": "mongodb-persistent",
"creationTimestamp": null,
"annotations": {
- "description": "MongoDB database service, with persistent storage. Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
+ "openshift.io/display-name": "MongoDB (Persistent)",
+ "description": "MongoDB database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/blob/master/3.2/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-mongodb",
"tags": "database,mongodb"
}
},
+ "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MONGODB_USER}\n Password: ${MONGODB_PASSWORD}\n Database Name: ${MONGODB_DATABASE}\n Connection URL: mongodb://${MONGODB_USER}:${MONGODB_PASSWORD}@${DATABASE_SERVICE_NAME}/${MONGODB_DATABASE}\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/blob/master/3.2/README.md.",
+ "labels": {
+ "template": "mongodb-persistent-template"
+ },
"objects": [
{
"kind": "Service",
@@ -241,9 +246,5 @@
"value": "3.2",
"required": true
}
- ],
- "labels": {
- "template": "mongodb-persistent-template"
- },
- "message": "You can connect to the database using MongoDB connection URL mongodb://${MONGODB_USER}:${MONGODB_PASSWORD}@${DATABASE_SERVICE_NAME}/${MONGODB_DATABASE}"
+ ]
}
diff --git a/roles/openshift_examples/files/examples/v1.3/db-templates/mysql-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.3/db-templates/mysql-ephemeral-template.json
index a7c731243..0cea42f8b 100644
--- a/roles/openshift_examples/files/examples/v1.3/db-templates/mysql-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v1.3/db-templates/mysql-ephemeral-template.json
@@ -4,11 +4,16 @@
"metadata": {
"name": "mysql-ephemeral",
"annotations": {
- "description": "MySQL database service, without persistent storage. WARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
+ "openshift.io/display-name": "MySQL (Ephemeral)",
+ "description": "MySQL database service, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.6/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
"iconClass": "icon-mysql-database",
"tags": "database,mysql"
}
},
+ "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MYSQL_USER}\n Password: ${MYSQL_PASSWORD}\n Database Name: ${MYSQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:3306/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.6/README.md.",
+ "labels": {
+ "template": "mysql-ephemeral-template"
+ },
"objects": [
{
"kind": "Service",
@@ -205,8 +210,5 @@
"value": "5.6",
"required": true
}
- ],
- "labels": {
- "template": "mysql-ephemeral-template"
- }
+ ]
}
diff --git a/roles/openshift_examples/files/examples/v1.3/db-templates/mysql-persistent-template.json b/roles/openshift_examples/files/examples/v1.3/db-templates/mysql-persistent-template.json
index 05add25e2..fc7cd7d09 100644
--- a/roles/openshift_examples/files/examples/v1.3/db-templates/mysql-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.3/db-templates/mysql-persistent-template.json
@@ -4,11 +4,16 @@
"metadata": {
"name": "mysql-persistent",
"annotations": {
- "description": "MySQL database service, with persistent storage. Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
+ "openshift.io/display-name": "MySQL (Persistent)",
+ "description": "MySQL database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.6/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-mysql-database",
"tags": "database,mysql"
}
},
+ "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MYSQL_USER}\n Password: ${MYSQL_PASSWORD}\n Database Name: ${MYSQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:3306/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.6/README.md.",
+ "labels": {
+ "template": "mysql-persistent-template"
+ },
"objects": [
{
"kind": "Service",
@@ -208,8 +213,5 @@
"value": "5.6",
"required": true
}
- ],
- "labels": {
- "template": "mysql-persistent-template"
- }
+ ]
}
diff --git a/roles/openshift_examples/files/examples/v1.3/db-templates/postgresql-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.3/db-templates/postgresql-ephemeral-template.json
index 1562204e5..505224b62 100644
--- a/roles/openshift_examples/files/examples/v1.3/db-templates/postgresql-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v1.3/db-templates/postgresql-ephemeral-template.json
@@ -5,11 +5,16 @@
"name": "postgresql-ephemeral",
"creationTimestamp": null,
"annotations": {
- "description": "PostgreSQL database service, without persistent storage. WARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
+ "openshift.io/display-name": "PostgreSQL (Ephemeral)",
+ "description": "PostgreSQL database service, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
"iconClass": "icon-postgresql",
"tags": "database,postgresql"
}
},
+ "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${POSTGRESQL_USER}\n Password: ${POSTGRESQL_PASSWORD}\n Database Name: ${POSTGRESQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:5432/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.",
+ "labels": {
+ "template": "postgresql-ephemeral-template"
+ },
"objects": [
{
"kind": "Service",
@@ -205,8 +210,5 @@
"value": "9.5",
"required": true
}
- ],
- "labels": {
- "template": "postgresql-ephemeral-template"
- }
+ ]
}
diff --git a/roles/openshift_examples/files/examples/v1.3/db-templates/postgresql-persistent-template.json b/roles/openshift_examples/files/examples/v1.3/db-templates/postgresql-persistent-template.json
index fd2b6a0fb..7ff49782b 100644
--- a/roles/openshift_examples/files/examples/v1.3/db-templates/postgresql-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.3/db-templates/postgresql-persistent-template.json
@@ -5,11 +5,16 @@
"name": "postgresql-persistent",
"creationTimestamp": null,
"annotations": {
- "description": "PostgreSQL database service, with persistent storage. Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
+ "openshift.io/display-name": "PostgreSQL (Persistent)",
+ "description": "PostgreSQL database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-postgresql",
"tags": "database,postgresql"
}
},
+ "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${POSTGRESQL_USER}\n Password: ${POSTGRESQL_PASSWORD}\n Database Name: ${POSTGRESQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:5432/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.",
+ "labels": {
+ "template": "postgresql-persistent-template"
+ },
"objects": [
{
"kind": "Service",
@@ -229,8 +234,5 @@
"value": "9.5",
"required": true
}
- ],
- "labels": {
- "template": "postgresql-persistent-template"
- }
+ ]
}
diff --git a/roles/openshift_examples/files/examples/v1.3/image-streams/dotnet_imagestreams.json b/roles/openshift_examples/files/examples/v1.3/image-streams/dotnet_imagestreams.json
index 6cbf81591..0d5ac21d8 100644
--- a/roles/openshift_examples/files/examples/v1.3/image-streams/dotnet_imagestreams.json
+++ b/roles/openshift_examples/files/examples/v1.3/image-streams/dotnet_imagestreams.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "dotnet-image-streams",
"annotations": {
- "description": "ImageStream definitions for .Net Core on RHEL"
+ "description": "ImageStream definitions for .NET Core on RHEL"
}
},
"items": [
@@ -12,29 +12,51 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "dotnet"
+ "name": "dotnet",
+ "annotations": {
+ "openshift.io/display-name": ".NET Core"
+ }
},
"spec": {
"tags": [
{
"name": "latest",
"annotations": {
- "description": ".Net Core 1.0 S2I image.",
+ "openshift.io/display-name": ".NET Core (Latest)",
+ "description": "Build and run .NET Core applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/redhat-developer/s2i-dotnetcore/tree/master/1.1/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of .NET Core available on OpenShift, including major versions updates.",
"iconClass": "icon-dotnet",
- "tags": "builder,.net,dotnet,dotnetcore,rh-dotnetcore10",
+ "tags": "builder,.net,dotnet,dotnetcore",
"supports":"dotnet",
"sampleRepo": "https://github.com/redhat-developer/s2i-dotnetcore.git",
- "sampleContextDir": "1.0/test/asp-net-hello-world"
+ "sampleContextDir": "1.1/test/asp-net-hello-world"
},
"from": {
"kind": "ImageStreamTag",
- "name": "1.0"
+ "name": "1.1"
+ }
+ },
+ {
+ "name": "1.1",
+ "annotations": {
+ "openshift.io/display-name": ".NET Core 1.1",
+ "description": "Build and run .NET Core 1.1 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/redhat-developer/s2i-dotnetcore/tree/master/1.1/README.md.",
+ "iconClass": "icon-dotnet",
+ "tags": "builder,.net,dotnet,dotnetcore,rh-dotnetcore11",
+ "supports":"dotnet:1.1,dotnet",
+ "sampleRepo": "https://github.com/redhat-developer/s2i-dotnetcore.git",
+ "sampleContextDir": "1.1/test/asp-net-hello-world",
+ "version": "1.1"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/dotnet/dotnetcore-11-rhel7:1.1"
}
},
{
"name": "1.0",
"annotations": {
- "description": ".Net Core 1.0 S2I image.",
+ "openshift.io/display-name": ".NET Core 1.0",
+ "description": "Build and run .NET Core 1.0 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/redhat-developer/s2i-dotnetcore/tree/master/1.0/README.md.",
"iconClass": "icon-dotnet",
"tags": "builder,.net,dotnet,dotnetcore,rh-dotnetcore10",
"supports":"dotnet:1.0,dotnet",
diff --git a/roles/openshift_examples/files/examples/v1.3/image-streams/image-streams-centos7.json b/roles/openshift_examples/files/examples/v1.3/image-streams/image-streams-centos7.json
index 386f16d26..edaac73ca 100644
--- a/roles/openshift_examples/files/examples/v1.3/image-streams/image-streams-centos7.json
+++ b/roles/openshift_examples/files/examples/v1.3/image-streams/image-streams-centos7.json
@@ -7,14 +7,18 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "ruby"
+ "name": "ruby",
+ "annotations": {
+ "openshift.io/display-name": "Ruby"
+ }
},
"spec": {
"tags": [
{
"name": "latest",
"annotations": {
- "description": "Build and run Ruby applications",
+ "openshift.io/display-name": "Ruby (Latest)",
+ "description": "Build and run Ruby applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-ruby-container/tree/master/2.3/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Ruby available on OpenShift, including major versions updates.",
"iconClass": "icon-ruby",
"tags": "builder,ruby",
"supports": "ruby",
@@ -28,7 +32,8 @@
{
"name": "2.0",
"annotations": {
- "description": "Build and run Ruby 2.0 applications",
+ "openshift.io/display-name": "Ruby 2.0",
+ "description": "Build and run Ruby 2.0 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-ruby-container/tree/master/2.0/README.md.",
"iconClass": "icon-ruby",
"tags": "builder,ruby",
"supports": "ruby:2.0,ruby",
@@ -43,7 +48,8 @@
{
"name": "2.2",
"annotations": {
- "description": "Build and run Ruby 2.2 applications",
+ "openshift.io/display-name": "Ruby 2.2",
+ "description": "Build and run Ruby 2.2 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-ruby-container/tree/master/2.2/README.md.",
"iconClass": "icon-ruby",
"tags": "builder,ruby",
"supports": "ruby:2.2,ruby",
@@ -58,7 +64,8 @@
{
"name": "2.3",
"annotations": {
- "description": "Build and run Ruby 2.3 applications",
+ "openshift.io/display-name": "Ruby 2.3",
+ "description": "Build and run Ruby 2.3 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-ruby-container/blob/master/2.3/README.md.",
"iconClass": "icon-ruby",
"tags": "builder,ruby",
"supports": "ruby:2.3,ruby",
@@ -77,14 +84,18 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "nodejs"
+ "name": "nodejs",
+ "annotations": {
+ "openshift.io/display-name": "Node.js"
+ }
},
"spec": {
"tags": [
{
"name": "latest",
"annotations": {
- "description": "Build and run NodeJS applications",
+ "openshift.io/display-name": "Node.js (Latest)",
+ "description": "Build and run Node.js applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/4/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Node.js available on OpenShift, including major versions updates.",
"iconClass": "icon-nodejs",
"tags": "builder,nodejs",
"supports":"nodejs",
@@ -98,9 +109,10 @@
{
"name": "0.10",
"annotations": {
- "description": "Build and run NodeJS 0.10 applications",
+ "openshift.io/display-name": "Node.js 0.10",
+ "description": "DEPRECATED: Build and run Node.js 0.10 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/0.10/README.md.",
"iconClass": "icon-nodejs",
- "tags": "builder,nodejs",
+ "tags": "hidden,nodejs",
"supports":"nodejs:0.10,nodejs:0.1,nodejs",
"version": "0.10",
"sampleRepo": "https://github.com/openshift/nodejs-ex.git"
@@ -113,7 +125,8 @@
{
"name": "4",
"annotations": {
- "description": "Build and run NodeJS 4 applications",
+ "openshift.io/display-name": "Node.js 4",
+ "description": "Build and run Node.js 4 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/4/README.md.",
"iconClass": "icon-nodejs",
"tags": "builder,nodejs",
"supports":"nodejs:4,nodejs",
@@ -132,14 +145,18 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "perl"
+ "name": "perl",
+ "annotations": {
+ "openshift.io/display-name": "Perl"
+ }
},
"spec": {
"tags": [
{
"name": "latest",
"annotations": {
- "description": "Build and run Perl applications",
+ "openshift.io/display-name": "Perl (Latest)",
+ "description": "Build and run Perl applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-perl-container/blob/master/5.20/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Perl available on OpenShift, including major versions updates.",
"iconClass": "icon-perl",
"tags": "builder,perl",
"supports":"perl",
@@ -153,7 +170,8 @@
{
"name": "5.16",
"annotations": {
- "description": "Build and run Perl 5.16 applications",
+ "openshift.io/display-name": "Perl 5.16",
+ "description": "Build and run Perl 5.16 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-perl-container/blob/master/5.16/README.md.",
"iconClass": "icon-perl",
"tags": "builder,perl",
"supports":"perl:5.16,perl",
@@ -168,7 +186,8 @@
{
"name": "5.20",
"annotations": {
- "description": "Build and run Perl 5.20 applications",
+ "openshift.io/display-name": "Perl 5.20",
+ "description": "Build and run Perl 5.20 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-perl-container/blob/master/5.20/README.md.",
"iconClass": "icon-perl",
"tags": "builder,perl",
"supports":"perl:5.20,perl",
@@ -188,14 +207,18 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "php"
+ "name": "php",
+ "annotations": {
+ "openshift.io/display-name": "PHP"
+ }
},
"spec": {
"tags": [
{
"name": "latest",
"annotations": {
- "description": "Build and run PHP applications",
+ "openshift.io/display-name": "PHP (Latest)",
+ "description": "Build and run PHP applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/5.6/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of PHP available on OpenShift, including major versions updates.",
"iconClass": "icon-php",
"tags": "builder,php",
"supports":"php",
@@ -209,7 +232,8 @@
{
"name": "5.5",
"annotations": {
- "description": "Build and run PHP 5.5 applications",
+ "openshift.io/display-name": "PHP 5.5",
+ "description": "Build and run PHP 5.5 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/5.5/README.md.",
"iconClass": "icon-php",
"tags": "builder,php",
"supports":"php:5.5,php",
@@ -224,7 +248,8 @@
{
"name": "5.6",
"annotations": {
- "description": "Build and run PHP 5.6 applications",
+ "openshift.io/display-name": "PHP 5.6",
+ "description": "Build and run PHP 5.6 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/5.6/README.md.",
"iconClass": "icon-php",
"tags": "builder,php",
"supports":"php:5.6,php",
@@ -243,14 +268,18 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "python"
+ "name": "python",
+ "annotations": {
+ "openshift.io/display-name": "Python"
+ }
},
"spec": {
"tags": [
{
"name": "latest",
"annotations": {
- "description": "Build and run Python applications",
+ "openshift.io/display-name": "Python (Latest)",
+ "description": "Build and run Python applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.5/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Python available on OpenShift, including major versions updates.",
"iconClass": "icon-python",
"tags": "builder,python",
"supports":"python",
@@ -264,7 +293,8 @@
{
"name": "3.3",
"annotations": {
- "description": "Build and run Python 3.3 applications",
+ "openshift.io/display-name": "Python 3.3",
+ "description": "Build and run Python 3.3 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.3/README.md.",
"iconClass": "icon-python",
"tags": "builder,python",
"supports":"python:3.3,python",
@@ -279,7 +309,8 @@
{
"name": "2.7",
"annotations": {
- "description": "Build and run Python 2.7 applications",
+ "openshift.io/display-name": "Python 2.7",
+ "description": "Build and run Python 2.7 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/2.7/README.md.",
"iconClass": "icon-python",
"tags": "builder,python",
"supports":"python:2.7,python",
@@ -294,7 +325,8 @@
{
"name": "3.4",
"annotations": {
- "description": "Build and run Python 3.4 applications",
+ "openshift.io/display-name": "Python 3.4",
+ "description": "Build and run Python 3.4 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.4/README.md.",
"iconClass": "icon-python",
"tags": "builder,python",
"supports":"python:3.4,python",
@@ -309,7 +341,8 @@
{
"name": "3.5",
"annotations": {
- "description": "Build and run Python 3.5 applications",
+ "openshift.io/display-name": "Python 3.5",
+ "description": "Build and run Python 3.5 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.5/README.md.",
"iconClass": "icon-python",
"tags": "builder,python",
"supports":"python:3.5,python",
@@ -328,14 +361,18 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "wildfly"
+ "name": "wildfly",
+ "annotations": {
+ "openshift.io/display-name": "WildFly"
+ }
},
"spec": {
"tags": [
{
"name": "latest",
"annotations": {
- "description": "Build and run Java applications on Wildfly",
+ "openshift.io/display-name": "WildFly (Latest)",
+ "description": "Build and run WildFly applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/openshift-s2i/s2i-wildfly/blob/master/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of WildFly available on OpenShift, including major versions updates.",
"iconClass": "icon-wildfly",
"tags": "builder,wildfly,java",
"supports":"jee,java",
@@ -349,7 +386,8 @@
{
"name": "8.1",
"annotations": {
- "description": "Build and run Java applications on Wildfly 8.1",
+ "openshift.io/display-name": "WildFly 8.1",
+ "description": "Build and run WildFly 8.1 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/openshift-s2i/s2i-wildfly/blob/master/README.md.",
"iconClass": "icon-wildfly",
"tags": "builder,wildfly,java",
"supports":"wildfly:8.1,jee,java",
@@ -364,7 +402,8 @@
{
"name": "9.0",
"annotations": {
- "description": "Build and run Java applications on Wildfly 9.0",
+ "openshift.io/display-name": "WildFly 9.0",
+ "description": "Build and run WildFly 9.0 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/openshift-s2i/s2i-wildfly/blob/master/README.md.",
"iconClass": "icon-wildfly",
"tags": "builder,wildfly,java",
"supports":"wildfly:9.0,jee,java",
@@ -379,7 +418,8 @@
{
"name": "10.0",
"annotations": {
- "description": "Build and run Java applications on Wildfly 10.0",
+ "openshift.io/display-name": "WildFly 10.0",
+ "description": "Build and run WildFly 10.0 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/openshift-s2i/s2i-wildfly/blob/master/README.md.",
"iconClass": "icon-wildfly",
"tags": "builder,wildfly,java",
"supports":"wildfly:10.0,jee,java",
@@ -394,7 +434,8 @@
{
"name": "10.1",
"annotations": {
- "description": "Build and run Java applications on Wildfly 10.1",
+ "openshift.io/display-name": "WildFly 10.1",
+ "description": "Build and run WildFly 10.1 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/openshift-s2i/s2i-wildfly/blob/master/README.md.",
"iconClass": "icon-wildfly",
"tags": "builder,wildfly,java",
"supports":"wildfly:10.1,jee,java",
@@ -413,14 +454,18 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "mysql"
+ "name": "mysql",
+ "annotations": {
+ "openshift.io/display-name": "MySQL"
+ }
},
"spec": {
"tags": [
{
"name": "latest",
"annotations": {
- "description": "Provides a MySQL database",
+ "openshift.io/display-name": "MySQL (Latest)",
+ "description": "Provides a MySQL database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mysql-container/tree/master/5.6/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of MySQL available on OpenShift, including major versions updates.",
"iconClass": "icon-mysql-database",
"tags": "mysql"
},
@@ -432,7 +477,8 @@
{
"name": "5.5",
"annotations": {
- "description": "Provides a MySQL v5.5 database",
+ "openshift.io/display-name": "MySQL 5.5",
+ "description": "Provides a MySQL 5.5 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mysql-container/tree/master/5.5/README.md.",
"iconClass": "icon-mysql-database",
"tags": "mysql",
"version": "5.5"
@@ -445,7 +491,8 @@
{
"name": "5.6",
"annotations": {
- "description": "Provides a MySQL v5.6 database",
+ "openshift.io/display-name": "MySQL 5.6",
+ "description": "Provides a MySQL 5.6 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mysql-container/tree/master/5.6/README.md.",
"iconClass": "icon-mysql-database",
"tags": "mysql",
"version": "5.6"
@@ -462,14 +509,18 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "mariadb"
+ "name": "mariadb",
+ "annotations": {
+ "openshift.io/display-name": "MariaDB"
+ }
},
"spec": {
"tags": [
{
"name": "latest",
"annotations": {
- "description": "Provides a MariaDB database",
+ "openshift.io/display-name": "MariaDB (Latest)",
+ "description": "Provides a MariaDB database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/tree/master/10.1/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of MariaDB available on OpenShift, including major versions updates.",
"iconClass": "icon-mariadb",
"tags": "mariadb"
},
@@ -481,7 +532,8 @@
{
"name": "10.1",
"annotations": {
- "description": "Provides a MariaDB v10.1 database",
+ "openshift.io/display-name": "MariaDB 10.1",
+ "description": "Provides a MariaDB 10.1 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/tree/master/10.1/README.md.",
"iconClass": "icon-mariadb",
"tags": "mariadb",
"version": "10.1"
@@ -498,14 +550,18 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "postgresql"
+ "name": "postgresql",
+ "annotations": {
+ "openshift.io/display-name": "PostgreSQL"
+ }
},
"spec": {
"tags": [
{
"name": "latest",
"annotations": {
- "description": "Provides a PostgreSQL database",
+ "openshift.io/display-name": "PostgreSQL (Latest)",
+ "description": "Provides a PostgreSQL database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.5.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of PostgreSQL available on OpenShift, including major versions updates.",
"iconClass": "icon-postgresql",
"tags": "postgresql"
},
@@ -517,7 +573,8 @@
{
"name": "9.2",
"annotations": {
- "description": "Provides a PostgreSQL v9.2 database",
+ "openshift.io/display-name": "PostgreSQL 9.2",
+ "description": "Provides a PostgreSQL 9.2 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.2.",
"iconClass": "icon-postgresql",
"tags": "postgresql",
"version": "9.2"
@@ -530,7 +587,8 @@
{
"name": "9.4",
"annotations": {
- "description": "Provides a PostgreSQL v9.4 database",
+ "openshift.io/display-name": "PostgreSQL 9.4",
+ "description": "Provides a PostgreSQL 9.4 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.4.",
"iconClass": "icon-postgresql",
"tags": "postgresql",
"version": "9.4"
@@ -543,7 +601,8 @@
{
"name": "9.5",
"annotations": {
- "description": "Provides a PostgreSQL v9.5 database",
+ "openshift.io/display-name": "PostgreSQL 9.5",
+ "description": "Provides a PostgreSQL 9.5 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.5.",
"iconClass": "icon-postgresql",
"tags": "postgresql",
"version": "9.5"
@@ -560,14 +619,18 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "mongodb"
+ "name": "mongodb",
+ "annotations": {
+ "openshift.io/display-name": "MongoDB"
+ }
},
"spec": {
"tags": [
{
"name": "latest",
"annotations": {
- "description": "Provides a MongoDB database",
+ "openshift.io/display-name": "MongoDB (Latest)",
+ "description": "Provides a MongoDB database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/3.2/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of MongoDB available on OpenShift, including major versions updates.",
"iconClass": "icon-mongodb",
"tags": "mongodb"
},
@@ -579,7 +642,8 @@
{
"name": "2.4",
"annotations": {
- "description": "Provides a MongoDB v2.4 database",
+ "openshift.io/display-name": "MongoDB 2.4",
+ "description": "Provides a MongoDB 2.4 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/2.4/README.md.",
"iconClass": "icon-mongodb",
"tags": "mongodb",
"version": "2.4"
@@ -592,7 +656,8 @@
{
"name": "2.6",
"annotations": {
- "description": "Provides a MongoDB v2.6 database",
+ "openshift.io/display-name": "MongoDB 2.6",
+ "description": "Provides a MongoDB 2.6 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/2.6/README.md.",
"iconClass": "icon-mongodb",
"tags": "mongodb",
"version": "2.6"
@@ -605,7 +670,8 @@
{
"name": "3.2",
"annotations": {
- "description": "Provides a MongoDB v3.2 database",
+ "openshift.io/display-name": "MongoDB 3.2",
+ "description": "Provides a MongoDB 3.2 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/3.2/README.md.",
"iconClass": "icon-mongodb",
"tags": "mongodb",
"version": "3.2"
@@ -622,26 +688,31 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "jenkins"
+ "name": "jenkins",
+ "annotations": {
+ "openshift.io/display-name": "Jenkins"
+ }
},
"spec": {
"tags": [
{
"name": "latest",
"annotations": {
- "description": "Provides a Jenkins server",
+ "openshift.io/display-name": "Jenkins (Latest)",
+ "description": "Provides a Jenkins server on CentOS 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Jenkins available on OpenShift, including major versions updates.",
"iconClass": "icon-jenkins",
"tags": "jenkins"
},
"from": {
"kind": "ImageStreamTag",
- "name": "1"
+ "name": "2"
}
},
{
"name": "1",
"annotations": {
- "description": "Provides a Jenkins server",
+ "openshift.io/display-name": "Jenkins 1.X",
+ "description": "Provides a Jenkins 1.X server on CentOS 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.",
"iconClass": "icon-jenkins",
"tags": "jenkins",
"version": "1.x"
@@ -650,6 +721,20 @@
"kind": "DockerImage",
"name": "openshift/jenkins-1-centos7:latest"
}
+ },
+ {
+ "name": "2",
+ "annotations": {
+ "openshift.io/display-name": "Jenkins 2.X",
+ "description": "Provides a Jenkins v2.x server on CentOS 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.",
+ "iconClass": "icon-jenkins",
+ "tags": "jenkins",
+ "version": "2.x"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "openshift/jenkins-2-centos7:latest"
+ }
}
]
}
diff --git a/roles/openshift_examples/files/examples/v1.3/image-streams/image-streams-rhel7.json b/roles/openshift_examples/files/examples/v1.3/image-streams/image-streams-rhel7.json
index 56c63263b..88ee79a84 100644
--- a/roles/openshift_examples/files/examples/v1.3/image-streams/image-streams-rhel7.json
+++ b/roles/openshift_examples/files/examples/v1.3/image-streams/image-streams-rhel7.json
@@ -7,14 +7,18 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "ruby"
+ "name": "ruby",
+ "annotations": {
+ "openshift.io/display-name": "Ruby"
+ }
},
"spec": {
"tags": [
{
"name": "latest",
"annotations": {
- "description": "Build and run Ruby applications",
+ "openshift.io/display-name": "Ruby (Latest)",
+ "description": "Build and run Ruby applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-ruby-container/tree/master/2.3/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Ruby available on OpenShift, including major versions updates.",
"iconClass": "icon-ruby",
"tags": "builder,ruby",
"supports": "ruby",
@@ -28,7 +32,8 @@
{
"name": "2.0",
"annotations": {
- "description": "Build and run Ruby 2.0 applications",
+ "openshift.io/display-name": "Ruby 2.0",
+ "description": "Build and run Ruby 2.0 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-ruby-container/tree/master/2.0/README.md.",
"iconClass": "icon-ruby",
"tags": "builder,ruby",
"supports": "ruby:2.0,ruby",
@@ -43,7 +48,8 @@
{
"name": "2.2",
"annotations": {
- "description": "Build and run Ruby 2.2 applications",
+ "openshift.io/display-name": "Ruby 2.2",
+ "description": "Build and run Ruby 2.2 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-ruby-container/tree/master/2.2/README.md.",
"iconClass": "icon-ruby",
"tags": "builder,ruby",
"supports": "ruby:2.2,ruby",
@@ -58,7 +64,8 @@
{
"name": "2.3",
"annotations": {
- "description": "Build and run Ruby 2.3 applications",
+ "openshift.io/display-name": "Ruby 2.3",
+ "description": "Build and run Ruby 2.3 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-ruby-container/blob/master/2.3/README.md.",
"iconClass": "icon-ruby",
"tags": "builder,ruby",
"supports": "ruby:2.3,ruby",
@@ -77,14 +84,18 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "nodejs"
+ "name": "nodejs",
+ "annotations": {
+ "openshift.io/display-name": "Node.js"
+ }
},
"spec": {
"tags": [
{
"name": "latest",
"annotations": {
- "description": "Build and run NodeJS applications",
+ "openshift.io/display-name": "Node.js (Latest)",
+ "description": "Build and run Node.js applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/4/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Node.js available on OpenShift, including major versions updates.",
"iconClass": "icon-nodejs",
"tags": "builder,nodejs",
"supports":"nodejs",
@@ -98,9 +109,10 @@
{
"name": "0.10",
"annotations": {
- "description": "Build and run NodeJS 0.10 applications",
+ "openshift.io/display-name": "Node.js 0.10",
+ "description": "DEPRECATED: Build and run Node.js 0.10 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/0.10/README.md.",
"iconClass": "icon-nodejs",
- "tags": "builder,nodejs",
+ "tags": "hidden,nodejs",
"supports":"nodejs:0.10,nodejs:0.1,nodejs",
"version": "0.10",
"sampleRepo": "https://github.com/openshift/nodejs-ex.git"
@@ -113,7 +125,8 @@
{
"name": "4",
"annotations": {
- "description": "Build and run NodeJS 4.x applications",
+ "openshift.io/display-name": "Node.js 4",
+ "description": "Build and run Node.js 4 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/4/README.md.",
"iconClass": "icon-nodejs",
"tags": "builder,nodejs",
"supports":"nodejs:4,nodejs",
@@ -132,14 +145,18 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "perl"
+ "name": "perl",
+ "annotations": {
+ "openshift.io/display-name": "Perl"
+ }
},
"spec": {
"tags": [
{
"name": "latest",
"annotations": {
- "description": "Build and run Perl applications",
+ "openshift.io/display-name": "Perl (Latest)",
+ "description": "Build and run Perl applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-perl-container/blob/master/5.20/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Perl available on OpenShift, including major versions updates.",
"iconClass": "icon-perl",
"tags": "builder,perl",
"supports":"perl",
@@ -153,7 +170,8 @@
{
"name": "5.16",
"annotations": {
- "description": "Build and run Perl 5.16 applications",
+ "openshift.io/display-name": "Perl 5.16",
+ "description": "Build and run Perl 5.16 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-perl-container/blob/master/5.16/README.md.",
"iconClass": "icon-perl",
"tags": "builder,perl",
"supports":"perl:5.16,perl",
@@ -168,7 +186,8 @@
{
"name": "5.20",
"annotations": {
- "description": "Build and run Perl 5.20 applications",
+ "openshift.io/display-name": "Perl 5.20",
+ "description": "Build and run Perl 5.20 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-perl-container/blob/master/5.20/README.md.",
"iconClass": "icon-perl",
"tags": "builder,perl",
"supports":"perl:5.20,perl",
@@ -188,14 +207,18 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "php"
+ "name": "php",
+ "annotations": {
+ "openshift.io/display-name": "PHP"
+ }
},
"spec": {
"tags": [
{
"name": "latest",
"annotations": {
- "description": "Build and run PHP applications",
+ "openshift.io/display-name": "PHP (Latest)",
+ "description": "Build and run PHP applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/5.6/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of PHP available on OpenShift, including major versions updates.",
"iconClass": "icon-php",
"tags": "builder,php",
"supports":"php",
@@ -209,7 +232,8 @@
{
"name": "5.5",
"annotations": {
- "description": "Build and run PHP 5.5 applications",
+ "openshift.io/display-name": "PHP 5.5",
+ "description": "Build and run PHP 5.5 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/5.5/README.md.",
"iconClass": "icon-php",
"tags": "builder,php",
"supports":"php:5.5,php",
@@ -224,7 +248,8 @@
{
"name": "5.6",
"annotations": {
- "description": "Build and run PHP 5.6 applications",
+ "openshift.io/display-name": "PHP 5.6",
+ "description": "Build and run PHP 5.6 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/5.6/README.md.",
"iconClass": "icon-php",
"tags": "builder,php",
"supports":"php:5.6,php",
@@ -243,14 +268,18 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "python"
+ "name": "python",
+ "annotations": {
+ "openshift.io/display-name": "Python"
+ }
},
"spec": {
"tags": [
{
"name": "latest",
"annotations": {
- "description": "Build and run Python applications",
+ "openshift.io/display-name": "Python (Latest)",
+ "description": "Build and run Python applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.5/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Python available on OpenShift, including major versions updates.",
"iconClass": "icon-python",
"tags": "builder,python",
"supports":"python",
@@ -264,7 +293,8 @@
{
"name": "3.3",
"annotations": {
- "description": "Build and run Python 3.3 applications",
+ "openshift.io/display-name": "Python 3.3",
+ "description": "Build and run Python 3.3 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.3/README.md.",
"iconClass": "icon-python",
"tags": "builder,python",
"supports":"python:3.3,python",
@@ -279,7 +309,8 @@
{
"name": "2.7",
"annotations": {
- "description": "Build and run Python 2.7 applications",
+ "openshift.io/display-name": "Python 2.7",
+ "description": "Build and run Python 2.7 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/2.7/README.md.",
"iconClass": "icon-python",
"tags": "builder,python",
"supports":"python:2.7,python",
@@ -294,7 +325,8 @@
{
"name": "3.4",
"annotations": {
- "description": "Build and run Python 3.4 applications",
+ "openshift.io/display-name": "Python 3.4",
+ "description": "Build and run Python 3.4 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.4/README.md.",
"iconClass": "icon-python",
"tags": "builder,python",
"supports":"python:3.4,python",
@@ -309,7 +341,8 @@
{
"name": "3.5",
"annotations": {
- "description": "Build and run Python 3.5 applications",
+ "openshift.io/display-name": "Python 3.5",
+ "description": "Build and run Python 3.5 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.5/README.md.",
"iconClass": "icon-python",
"tags": "builder,python",
"supports":"python:3.5,python",
@@ -328,14 +361,18 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "mysql"
+ "name": "mysql",
+ "annotations": {
+ "openshift.io/display-name": "MySQL"
+ }
},
"spec": {
"tags": [
{
"name": "latest",
"annotations": {
- "description": "Provides a MySQL database",
+ "openshift.io/display-name": "MySQL (Latest)",
+ "description": "Provides a MySQL database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mysql-container/tree/master/5.6/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of MySQL available on OpenShift, including major versions updates.",
"iconClass": "icon-mysql-database",
"tags": "mysql"
},
@@ -347,7 +384,8 @@
{
"name": "5.5",
"annotations": {
- "description": "Provides a MySQL v5.5 database",
+ "openshift.io/display-name": "MySQL 5.5",
+ "description": "Provides a MySQL 5.5 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mysql-container/tree/master/5.5/README.md.",
"iconClass": "icon-mysql-database",
"tags": "mysql",
"version": "5.5"
@@ -360,7 +398,8 @@
{
"name": "5.6",
"annotations": {
- "description": "Provides a MySQL v5.6 database",
+ "openshift.io/display-name": "MySQL 5.6",
+ "description": "Provides a MySQL 5.6 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mysql-container/tree/master/5.6/README.md.",
"iconClass": "icon-mysql-database",
"tags": "mysql",
"version": "5.6"
@@ -377,14 +416,18 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "mariadb"
+ "name": "mariadb",
+ "annotations": {
+ "openshift.io/display-name": "MariaDB"
+ }
},
"spec": {
"tags": [
{
"name": "latest",
"annotations": {
- "description": "Provides a MariaDB database",
+ "openshift.io/display-name": "MariaDB (Latest)",
+ "description": "Provides a MariaDB database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/tree/master/10.1/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of MariaDB available on OpenShift, including major versions updates.",
"iconClass": "icon-mariadb",
"tags": "mariadb"
},
@@ -396,7 +439,8 @@
{
"name": "10.1",
"annotations": {
- "description": "Provides a MariaDB v10.1 database",
+ "openshift.io/display-name": "MariaDB 10.1",
+ "description": "Provides a MariaDB 10.1 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/tree/master/10.1/README.md.",
"iconClass": "icon-mariadb",
"tags": "mariadb",
"version": "10.1"
@@ -413,14 +457,18 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "postgresql"
+ "name": "postgresql",
+ "annotations": {
+ "openshift.io/display-name": "PostgreSQL"
+ }
},
"spec": {
"tags": [
{
"name": "latest",
"annotations": {
- "description": "Provides a PostgreSQL database",
+ "openshift.io/display-name": "PostgreSQL (Latest)",
+ "description": "Provides a PostgreSQL database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.5.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of PostgreSQL available on OpenShift, including major versions updates.",
"iconClass": "icon-postgresql",
"tags": "postgresql"
},
@@ -432,7 +480,8 @@
{
"name": "9.2",
"annotations": {
- "description": "Provides a PostgreSQL v9.2 database",
+ "openshift.io/display-name": "PostgreSQL 9.2",
+ "description": "Provides a PostgreSQL 9.2 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.2.",
"iconClass": "icon-postgresql",
"tags": "postgresql",
"version": "9.2"
@@ -445,7 +494,8 @@
{
"name": "9.4",
"annotations": {
- "description": "Provides a PostgreSQL v9.4 database",
+ "openshift.io/display-name": "PostgreSQL 9.4",
+ "description": "Provides a PostgreSQL 9.4 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.4.",
"iconClass": "icon-postgresql",
"tags": "postgresql",
"version": "9.4"
@@ -458,7 +508,8 @@
{
"name": "9.5",
"annotations": {
- "description": "Provides a PostgreSQL v9.5 database",
+ "openshift.io/display-name": "PostgreSQL 9.5",
+ "description": "Provides a PostgreSQL 9.5 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.5.",
"iconClass": "icon-postgresql",
"tags": "postgresql",
"version": "9.5"
@@ -475,14 +526,18 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "mongodb"
+ "name": "mongodb",
+ "annotations": {
+ "openshift.io/display-name": "MongoDB"
+ }
},
"spec": {
"tags": [
{
"name": "latest",
"annotations": {
- "description": "Provides a MongoDB database",
+ "openshift.io/display-name": "MongoDB (Latest)",
+ "description": "Provides a MongoDB database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/3.2/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of MongoDB available on OpenShift, including major versions updates.",
"iconClass": "icon-mongodb",
"tags": "mongodb"
},
@@ -494,7 +549,8 @@
{
"name": "2.4",
"annotations": {
- "description": "Provides a MongoDB v2.4 database",
+ "openshift.io/display-name": "MongoDB 2.4",
+ "description": "Provides a MongoDB 2.4 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/2.4/README.md.",
"iconClass": "icon-mongodb",
"tags": "mongodb",
"version": "2.4"
@@ -507,7 +563,8 @@
{
"name": "2.6",
"annotations": {
- "description": "Provides a MongoDB v2.6 database",
+ "openshift.io/display-name": "MongoDB 2.6",
+ "description": "Provides a MongoDB 2.6 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/2.6/README.md.",
"iconClass": "icon-mongodb",
"tags": "mongodb",
"version": "2.6"
@@ -520,7 +577,8 @@
{
"name": "3.2",
"annotations": {
- "description": "Provides a MongoDB v3.2 database",
+ "openshift.io/display-name": "MongoDB 3.2",
+ "description": "Provides a MongoDB 3.2 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/3.2/README.md.",
"iconClass": "icon-mongodb",
"tags": "mongodb",
"version": "3.2"
@@ -537,26 +595,31 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "jenkins"
+ "name": "jenkins",
+ "annotations": {
+ "openshift.io/display-name": "Jenkins"
+ }
},
"spec": {
"tags": [
{
"name": "latest",
"annotations": {
- "description": "Provides a Jenkins server",
+ "openshift.io/display-name": "Jenkins (Latest)",
+ "description": "Provides a Jenkins server on RHEL 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Jenkins available on OpenShift, including major versions updates.",
"iconClass": "icon-jenkins",
"tags": "jenkins"
},
"from": {
"kind": "ImageStreamTag",
- "name": "1"
+ "name": "2"
}
},
{
"name": "1",
"annotations": {
- "description": "Provides a Jenkins server",
+ "openshift.io/display-name": "Jenkins 1.X",
+ "description": "Provides a Jenkins 1.X server on RHEL 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.",
"iconClass": "icon-jenkins",
"tags": "jenkins",
"version": "1.x"
@@ -565,6 +628,20 @@
"kind": "DockerImage",
"name": "registry.access.redhat.com/openshift3/jenkins-1-rhel7:latest"
}
+ },
+ {
+ "name": "2",
+ "annotations": {
+ "openshift.io/display-name": "Jenkins 2.X",
+ "description": "Provides a Jenkins 2.X server on RHEL 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.",
+ "iconClass": "icon-jenkins",
+ "tags": "jenkins",
+ "version": "2.x"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/openshift3/jenkins-2-rhel7:latest"
+ }
}
]
}
diff --git a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/cakephp-mysql.json b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/cakephp-mysql.json
index ab4982690..354978891 100644
--- a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/cakephp-mysql.json
+++ b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/cakephp-mysql.json
@@ -4,11 +4,13 @@
"metadata": {
"name": "cakephp-mysql-example",
"annotations": {
- "description": "An example CakePHP application with a MySQL database",
- "tags": "quickstart,php,cakephp,mysql",
+ "openshift.io/display-name": "CakePHP + MySQL (Ephemeral)",
+ "description": "An example CakePHP application with a MySQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/cakephp-ex/blob/master/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing.",
+ "tags": "quickstart,php,cakephp",
"iconClass": "icon-php"
}
},
+ "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/cake-ex/blob/master/README.md.",
"labels": {
"template": "cakephp-mysql-example"
},
@@ -19,7 +21,8 @@
"metadata": {
"name": "${NAME}",
"annotations": {
- "description": "Exposes and load balances the application pods"
+ "description": "Exposes and load balances the application pods",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${DATABASE_SERVICE_NAME}\", \"kind\": \"Service\"}]"
}
},
"spec": {
diff --git a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/dancer-mysql.json b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/dancer-mysql.json
index cc7920b7d..9fc5be5e0 100644
--- a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/dancer-mysql.json
+++ b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/dancer-mysql.json
@@ -4,11 +4,13 @@
"metadata": {
"name": "dancer-mysql-example",
"annotations": {
- "description": "An example Dancer application with a MySQL database",
- "tags": "quickstart,perl,dancer,mysql",
+ "openshift.io/display-name": "Dancer + MySQL (Ephemeral)",
+ "description": "An example Dancer application with a MySQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/dancer-ex/blob/master/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing.",
+ "tags": "quickstart,perl,dancer",
"iconClass": "icon-perl"
}
},
+ "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/dancer-ex/blob/master/README.md.",
"labels": {
"template": "dancer-mysql-example"
},
@@ -19,7 +21,8 @@
"metadata": {
"name": "${NAME}",
"annotations": {
- "description": "Exposes and load balances the application pods"
+ "description": "Exposes and load balances the application pods",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${DATABASE_SERVICE_NAME}\", \"kind\": \"Service\"}]"
}
},
"spec": {
diff --git a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/django-postgresql.json b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/django-postgresql.json
index 7d1dea11b..590d5fd4f 100644
--- a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/django-postgresql.json
+++ b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/django-postgresql.json
@@ -4,11 +4,13 @@
"metadata": {
"name": "django-psql-example",
"annotations": {
- "description": "An example Django application with a PostgreSQL database",
- "tags": "quickstart,python,django,postgresql",
+ "openshift.io/display-name": "Django + PostgreSQL (Ephemeral)",
+ "description": "An example Django application with a PostgreSQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/django-ex/blob/master/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing.",
+ "tags": "quickstart,python,django",
"iconClass": "icon-python"
}
},
+ "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/django-ex/blob/master/README.md.",
"labels": {
"template": "django-psql-example"
},
@@ -19,7 +21,8 @@
"metadata": {
"name": "${NAME}",
"annotations": {
- "description": "Exposes and load balances the application pods"
+ "description": "Exposes and load balances the application pods",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${DATABASE_SERVICE_NAME}\", \"kind\": \"Service\"}]"
}
},
"spec": {
diff --git a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkins-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkins-ephemeral-template.json
index 880f0b34e..fc7423840 100644
--- a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkins-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkins-ephemeral-template.json
@@ -5,12 +5,13 @@
"name": "jenkins-ephemeral",
"creationTimestamp": null,
"annotations": {
- "description": "Jenkins service, without persistent storage.\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
+ "openshift.io/display-name": "Jenkins (Ephemeral)",
+ "description": "Jenkins service, without persistent storage.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing.",
"iconClass": "icon-jenkins",
"tags": "instant-app,jenkins"
}
},
- "message": "A Jenkins service has been created in your project. The username/password are admin/${JENKINS_PASSWORD}. The tutorial at https://github.com/openshift/origin/blob/master/examples/jenkins/README.md contains more information about using this template.",
+ "message": "A Jenkins service has been created in your project. Log into Jenkins with your OpenShift account. The tutorial at https://github.com/openshift/origin/blob/master/examples/jenkins/README.md contains more information about using this template.",
"objects": [
{
"kind": "Route",
@@ -89,6 +90,7 @@
"livenessProbe": {
"timeoutSeconds": 3,
"initialDelaySeconds": 120,
+ "failureThreshold" : 30,
"httpGet": {
"path": "/login",
"port": 8080
@@ -96,8 +98,12 @@
},
"env": [
{
- "name": "JENKINS_PASSWORD",
- "value": "${JENKINS_PASSWORD}"
+ "name": "OPENSHIFT_ENABLE_OAUTH",
+ "value": "${ENABLE_OAUTH}"
+ },
+ {
+ "name": "OPENSHIFT_ENABLE_REDIRECT_PROMPT",
+ "value": "true"
},
{
"name": "KUBERNETES_MASTER",
@@ -150,7 +156,10 @@
"kind": "ServiceAccount",
"apiVersion": "v1",
"metadata": {
- "name": "${JENKINS_SERVICE_NAME}"
+ "name": "${JENKINS_SERVICE_NAME}",
+ "annotations": {
+ "serviceaccounts.openshift.io/oauth-redirectreference.jenkins": "{\"kind\":\"OAuthRedirectReference\",\"apiVersion\":\"v1\",\"reference\":{\"kind\":\"Route\",\"name\":\"${JENKINS_SERVICE_NAME}\"}}"
+ }
}
},
{
@@ -236,12 +245,10 @@
"value": "jenkins-jnlp"
},
{
- "name": "JENKINS_PASSWORD",
- "displayName": "Jenkins Password",
- "description": "Password for the Jenkins 'admin' user.",
- "generate": "expression",
- "from": "[a-zA-Z0-9]{16}",
- "required": true
+ "name": "ENABLE_OAUTH",
+ "displayName": "Enable OAuth in Jenkins",
+ "description": "Whether to enable OAuth OpenShift integration. If false, the static account 'admin' will be initialized with the password 'password'.",
+ "value": "true"
},
{
"name": "MEMORY_LIMIT",
diff --git a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkins-persistent-template.json b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkins-persistent-template.json
index 3291f3594..acf59ee94 100644
--- a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkins-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkins-persistent-template.json
@@ -5,12 +5,13 @@
"name": "jenkins-persistent",
"creationTimestamp": null,
"annotations": {
- "description": "Jenkins service, with persistent storage.\nYou must have persistent volumes available in your cluster to use this template.",
+ "openshift.io/display-name": "Jenkins (Persistent)",
+ "description": "Jenkins service, with persistent storage.\n\nNOTE: You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-jenkins",
"tags": "instant-app,jenkins"
}
},
- "message": "A Jenkins service has been created in your project. The username/password are admin/${JENKINS_PASSWORD}. The tutorial at https://github.com/openshift/origin/blob/master/examples/jenkins/README.md contains more information about using this template.",
+ "message": "A Jenkins service has been created in your project. Log into Jenkins with your OpenShift account. The tutorial at https://github.com/openshift/origin/blob/master/examples/jenkins/README.md contains more information about using this template.",
"objects": [
{
"kind": "Route",
@@ -106,6 +107,7 @@
"livenessProbe": {
"timeoutSeconds": 3,
"initialDelaySeconds": 120,
+ "failureThreshold" : 30,
"httpGet": {
"path": "/login",
"port": 8080
@@ -113,8 +115,12 @@
},
"env": [
{
- "name": "JENKINS_PASSWORD",
- "value": "${JENKINS_PASSWORD}"
+ "name": "OPENSHIFT_ENABLE_OAUTH",
+ "value": "${ENABLE_OAUTH}"
+ },
+ {
+ "name": "OPENSHIFT_ENABLE_REDIRECT_PROMPT",
+ "value": "true"
},
{
"name": "KUBERNETES_MASTER",
@@ -167,7 +173,10 @@
"kind": "ServiceAccount",
"apiVersion": "v1",
"metadata": {
- "name": "${JENKINS_SERVICE_NAME}"
+ "name": "${JENKINS_SERVICE_NAME}",
+ "annotations": {
+ "serviceaccounts.openshift.io/oauth-redirectreference.jenkins": "{\"kind\":\"OAuthRedirectReference\",\"apiVersion\":\"v1\",\"reference\":{\"kind\":\"Route\",\"name\":\"${JENKINS_SERVICE_NAME}\"}}"
+ }
}
},
{
@@ -253,12 +262,10 @@
"value": "jenkins-jnlp"
},
{
- "name": "JENKINS_PASSWORD",
- "displayName": "Jenkins Password",
- "description": "Password for the Jenkins 'admin' user.",
- "generate": "expression",
- "from": "[a-zA-Z0-9]{16}",
- "required": true
+ "name": "ENABLE_OAUTH",
+ "displayName": "Enable OAuth in Jenkins",
+ "description": "Whether to enable OAuth OpenShift integration. If false, the static account 'admin' will be initialized with the password 'password'.",
+ "value": "true"
},
{
"name": "MEMORY_LIMIT",
diff --git a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/nodejs-mongodb.json b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/nodejs-mongodb.json
index 6ab4a1781..d4b4add18 100644
--- a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/nodejs-mongodb.json
+++ b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/nodejs-mongodb.json
@@ -4,11 +4,13 @@
"metadata": {
"name": "nodejs-mongodb-example",
"annotations": {
- "description": "An example Node.js application with a MongoDB database",
- "tags": "quickstart,nodejs,mongodb",
+ "openshift.io/display-name": "Node.js + MongoDB (Ephemeral)",
+ "description": "An example Node.js application with a MongoDB database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/nodejs-ex/blob/master/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing.",
+ "tags": "quickstart,nodejs",
"iconClass": "icon-nodejs"
}
},
+ "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/nodejs-ex/blob/master/README.md.",
"labels": {
"template": "nodejs-mongodb-example"
},
@@ -19,7 +21,8 @@
"metadata": {
"name": "${NAME}",
"annotations": {
- "description": "Exposes and load balances the application pods"
+ "description": "Exposes and load balances the application pods",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${DATABASE_SERVICE_NAME}\", \"kind\": \"Service\"}]"
}
},
"spec": {
diff --git a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/rails-postgresql.json b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/rails-postgresql.json
index 50d60f2bb..baed15d8a 100644
--- a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/rails-postgresql.json
+++ b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/rails-postgresql.json
@@ -4,11 +4,13 @@
"metadata": {
"name": "rails-postgresql-example",
"annotations": {
- "description": "An example Rails application with a PostgreSQL database",
- "tags": "quickstart,ruby,rails,postgresql",
+ "openshift.io/display-name": "Rails + PostgreSQL (Ephemeral)",
+ "description": "An example Rails application with a PostgreSQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/rails-ex/blob/master/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing.",
+ "tags": "quickstart,ruby,rails",
"iconClass": "icon-ruby"
}
},
+ "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/rails-ex/blob/master/README.md.",
"labels": {
"template": "rails-postgresql-example"
},
@@ -19,7 +21,8 @@
"metadata": {
"name": "${NAME}",
"annotations": {
- "description": "Exposes and load balances the application pods"
+ "description": "Exposes and load balances the application pods",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${DATABASE_SERVICE_NAME}\", \"kind\": \"Service\"}]"
}
},
"spec": {
diff --git a/roles/openshift_examples/files/examples/v1.3/xpaas-streams/fis-image-streams.json b/roles/openshift_examples/files/examples/v1.3/xpaas-streams/fis-image-streams.json
index 65060cc2c..ed0e94bed 100644
--- a/roles/openshift_examples/files/examples/v1.3/xpaas-streams/fis-image-streams.json
+++ b/roles/openshift_examples/files/examples/v1.3/xpaas-streams/fis-image-streams.json
@@ -20,23 +20,13 @@
{
"name": "1.0",
"annotations": {
- "description": "JBoss Fuse Integration Services 1.0 Java S2I images.",
+ "description": "JBoss Fuse Integration Services 6.2.1 Java S2I images.",
"iconClass": "icon-jboss",
"tags": "builder,jboss-fuse,java,xpaas",
"supports":"jboss-fuse:6.2.1,java:8,xpaas:1.2",
"version": "1.0"
}
- },
- {
- "name": "2.0",
- "annotations": {
- "description": "JBoss Fuse Integration Services 2.0 Java S2I images.",
- "iconClass": "icon-jboss",
- "tags": "builder,jboss-fuse,java,xpaas",
- "supports":"jboss-fuse:6.3.0,java:8,xpaas:1.2",
- "version": "2.0"
- }
- }
+ }
]
}
},
@@ -52,23 +42,13 @@
{
"name": "1.0",
"annotations": {
- "description": "JBoss Fuse Integration Services 1.0 Karaf S2I images.",
+ "description": "JBoss Fuse Integration Services 6.2.1 Karaf S2I images.",
"iconClass": "icon-jboss",
"tags": "builder,jboss-fuse,java,karaf,xpaas",
"supports":"jboss-fuse:6.2.1,java:8,xpaas:1.2",
"version": "1.0"
}
- },
- {
- "name": "2.0",
- "annotations": {
- "description": "JBoss Fuse Integration Services 2.0 Karaf S2I images.",
- "iconClass": "icon-jboss",
- "tags": "builder,jboss-fuse,java,karaf,xpaas",
- "supports":"jboss-fuse:6.3.0,java:8,xpaas:1.2",
- "version": "2.0"
- }
- }
+ }
]
}
}
diff --git a/roles/openshift_examples/files/examples/v1.3/xpaas-streams/jboss-image-streams.json b/roles/openshift_examples/files/examples/v1.3/xpaas-streams/jboss-image-streams.json
index 4edc97f41..a7cb12867 100644
--- a/roles/openshift_examples/files/examples/v1.3/xpaas-streams/jboss-image-streams.json
+++ b/roles/openshift_examples/files/examples/v1.3/xpaas-streams/jboss-image-streams.json
@@ -283,6 +283,28 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
+ "name": "jboss-datavirt63-openshift"
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-datavirt-6/datavirt63-openshift",
+ "tags": [
+ {
+ "name": "1.0",
+ "annotations": {
+ "description": "Red Hat JBoss Data Virtualization 6.3 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "datavirt,java,jboss,xpaas",
+ "supports":"datavirt:6.3,java:8,xpaas:1.4",
+ "version": "1.0"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
"name": "jboss-amq-62"
},
"spec": {
diff --git a/roles/openshift_examples/files/examples/v1.3/xpaas-templates/datavirt63-basic-s2i.json b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/datavirt63-basic-s2i.json
new file mode 100644
index 000000000..7d64dac98
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/datavirt63-basic-s2i.json
@@ -0,0 +1,415 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-jboss",
+ "description": "Application template for JBoss Data Virtualization 6.3 services built using S2I.",
+ "tags": "jdv,datavirt,jboss,xpaas",
+ "version": "1.4.0"
+ },
+ "name": "datavirt63-basic-s2i"
+ },
+ "labels": {
+ "template": "datavirt63-basic-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new data service has been created in your project. The username/password for accessing the service is ${TEIID_USERNAME}/${TEIID_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the secret named ${CONFIGURATION_NAME} containing the datasource configuration details required by the deployed VDB(s).",
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "displayName": "Application Name",
+ "name": "APPLICATION_NAME",
+ "value": "datavirt-app",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing configuration properties for the data sources.",
+ "displayName": "Configuration Secret Name",
+ "name": "CONFIGURATION_NAME",
+ "value": "datavirt-app-config",
+ "required": true
+ },
+ {
+ "description": "Specify a custom hostname for the http route. Leave blank to use default hostname, e.g.: <service-name>-<project>.<default-domain-suffix>",
+ "displayName": "Custom http Route Hostname",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The URL of the repository with your application source code.",
+ "displayName": "Git Repository URL",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch.",
+ "displayName": "Git Reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "master",
+ "required": false
+ },
+ {
+ "description": "Set this to the relative path to your project if it is not in the root of your repository.",
+ "displayName": "Context Directory",
+ "name": "CONTEXT_DIR",
+ "value": "datavirt/dynamicvdb-datafederation/app",
+ "required": false
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow usage of the secret specified by CONFIGURATION_NAME.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "datavirt-service-account",
+ "required": true
+ },
+ {
+ "description": "Username associated with Teiid data service.",
+ "displayName": "Teiid Username",
+ "name": "TEIID_USERNAME",
+ "from": "[\\a]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Password for Teiid user.",
+ "displayName": "Teiid User Password",
+ "name": "TEIID_PASSWORD",
+ "from": "[\\a\\A]{8}[\\d]{1}[\\A]{1}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Username associated with ModeShape.",
+ "displayName": "ModeShape Username",
+ "name": "MODESHAPE_USERNAME",
+ "from": "[\\a]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Password for ModeShape user.",
+ "displayName": "ModeShape User Password",
+ "name": "MODESHAPE_PASSWORD",
+ "from": "[\\a\\A]{8}[\\d]{1}[\\A]{1}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "A secret string used to configure the GitHub webhook.",
+ "displayName": "Github Webhook Secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "A secret string used to configure the Generic webhook.",
+ "displayName": "Generic Webhook Secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "displayName": "ImageStream Namespace",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "Password used by JGroups to authenticate nodes in the cluster.",
+ "displayName": "JGroups Cluster Password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "displayName": "Deploy Exploded Archives",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "name": "http",
+ "port": 8080,
+ "targetPort": "http"
+ },
+ {
+ "name": "jdbc",
+ "port": 31000,
+ "targetPort": "jdbc"
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The data virtualization services."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http (REST) service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "port": {
+ "targetPort": "http"
+ },
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-datavirt63-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "configuration",
+ "mountPath": "/etc/datavirt-environment",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "jdbc",
+ "containerPort": 31000,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ },
+ {
+ "name": "TEIID_USERNAME",
+ "value": "${TEIID_USERNAME}"
+ },
+ {
+ "name": "TEIID_PASSWORD",
+ "value": "${TEIID_PASSWORD}"
+ },
+ {
+ "name": "MODESHAPE_USERNAME",
+ "value": "${MODESHAPE_USERNAME}"
+ },
+ {
+ "name": "MODESHAPE_PASSWORD",
+ "value": "${MODESHAPE_PASSWORD}"
+ },
+ {
+ "name": "ENV_FILES",
+ "value": "/etc/datavirt-environment/*"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "configuration",
+ "secret": {
+ "secretName": "${CONFIGURATION_NAME}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.3/xpaas-templates/datavirt63-extensions-support-s2i.json b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/datavirt63-extensions-support-s2i.json
new file mode 100644
index 000000000..1e7c03b99
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/datavirt63-extensions-support-s2i.json
@@ -0,0 +1,763 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-jboss",
+ "description": "Application template for JBoss Data Virtualization 6.3 services built using S2I. Includes support for installing extensions (e.g. third-party DB drivers) and the ability to configure certificates for serving secure content.",
+ "tags": "jdv,datavirt,jboss,xpaas",
+ "version": "1.4.0"
+ },
+ "name": "datavirt63-extensions-support-s2i"
+ },
+ "labels": {
+ "template": "datavirt63-extensions-support-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new data service has been created in your project. The username/password for accessing the service is ${TEIID_USERNAME}/${TEIID_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${CONFIGURATION_NAME}\" containing the datasource configuration details required by the deployed VDB(s); \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "displayName": "Application Name",
+ "name": "APPLICATION_NAME",
+ "value": "datavirt-app",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing configuration properties for the data sources.",
+ "displayName": "Configuration Secret Name",
+ "name": "CONFIGURATION_NAME",
+ "value": "datavirt-app-config",
+ "required": true
+ },
+ {
+ "description": "Specify a custom hostname for the http route. Leave blank to use default hostname, e.g.: <service-name>-<project>.<default-domain-suffix>",
+ "displayName": "Custom http Route Hostname",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Specify a custom hostname for the https route. Leave blank to use default hostname, e.g.: secure-<service-name>-<project>.<default-domain-suffix>",
+ "displayName": "Custom https Route Hostname",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Specify a custom hostname for the JDBC route. Leave blank to use default hostname, e.g.: secure-<service-name>-<project>.<default-domain-suffix>",
+ "displayName": "Custom JDBC Route Hostname",
+ "name": "HOSTNAME_JDBC",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The URL of the repository with your application source code.",
+ "displayName": "Git Repository URL",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch.",
+ "displayName": "Git Reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "master",
+ "required": false
+ },
+ {
+ "description": "Set this to the relative path to your project if it is not in the root of your repository.",
+ "displayName": "Context Directory",
+ "name": "CONTEXT_DIR",
+ "value": "datavirt/dynamicvdb-datafederation/app",
+ "required": false
+ },
+ {
+ "description": "The URL of the repository with source code for the extensions image. The image should have all modules, etc., placed in the \"/extensions/\" directory in the image. If the contents are in a different directory, the sourcePath for the ImageSource in the BuildConfig must be modified.",
+ "displayName": "Extensions Git Repository URL",
+ "name": "EXTENSIONS_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Set this to a branch name, tag or other ref of your extensions repository if you are not using the default branch.",
+ "displayName": "Extensions Git Reference",
+ "name": "EXTENSIONS_REPOSITORY_REF",
+ "value": "master",
+ "required": false
+ },
+ {
+ "description": "Set this to the relative path to your project if it is not in the root of your extensions repository.",
+ "displayName": "Extensions Context Directory",
+ "name": "EXTENSIONS_DIR",
+ "value": "datavirt/derby-driver-image",
+ "required": false
+ },
+ {
+ "description": "Set this to the relative path to the Dockerfile in your extensions directory.",
+ "displayName": "Extensions Dockerfile",
+ "name": "EXTENSIONS_DOCKERFILE",
+ "value": "Dockerfile",
+ "required": false
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow usage of the secret(s) specified by CONFIGURATION_NAME, HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "datavirt-service-account",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore to be used for serving secure content.",
+ "displayName": "Server Keystore Secret Name",
+ "name": "HTTPS_SECRET",
+ "value": "datavirt-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the keystore file within the secret.",
+ "displayName": "Server Keystore Filename",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The type of the keystore file (JKS or JCEKS).",
+ "displayName": "Server Keystore Type",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate.",
+ "displayName": "Server Certificate Name",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "displayName": "Server Keystore Password",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "description": "Username associated with Teiid data service.",
+ "displayName": "Teiid Username",
+ "name": "TEIID_USERNAME",
+ "from": "[\\a]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Password for Teiid user.",
+ "displayName": "Teiid User Password",
+ "name": "TEIID_PASSWORD",
+ "from": "[\\a\\A]{8}[\\d]{1}[\\A]{1}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Username associated with ModeShape.",
+ "displayName": "ModeShape Username",
+ "name": "MODESHAPE_USERNAME",
+ "from": "[\\a]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Password for ModeShape user.",
+ "displayName": "ModeShape User Password",
+ "name": "MODESHAPE_PASSWORD",
+ "from": "[\\a\\A]{8}[\\d]{1}[\\A]{1}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "A secret string used to configure the GitHub webhook.",
+ "displayName": "Github Webhook Secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "A secret string used to configure the Generic webhook.",
+ "displayName": "Generic Webhook Secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "displayName": "ImageStream Namespace",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore to be used for securing JGroups communications.",
+ "displayName": "JGroups Secret Name",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "datavirt-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the JGroups secret.",
+ "displayName": "JGroups Keystore Filename",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the JGroups server certificate",
+ "displayName": "JGroups Certificate Name",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "secret-key",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "displayName": "JGroups Keystore Password",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "password",
+ "required": false
+ },
+ {
+ "description": "Password used by JGroups to authenticate nodes in the cluster.",
+ "displayName": "JGroups Cluster Password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "displayName": "Deploy Exploded Archives",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "name": "http",
+ "port": 8080,
+ "targetPort": "http"
+ },
+ {
+ "name": "https",
+ "port": 8443,
+ "targetPort": "https"
+ },
+ {
+ "name": "jdbc",
+ "port": 31000,
+ "targetPort": "jdbc"
+ },
+ {
+ "name": "jdbcs",
+ "port": 31443,
+ "targetPort": "jdbcs"
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The data virtualization services."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http (REST) service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "port": {
+ "targetPort": "http"
+ },
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https (REST) service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "port": {
+ "targetPort": "https"
+ },
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-jdbc",
+ "metadata": {
+ "name": "jdbc-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's JDBC service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_JDBC}",
+ "port": {
+ "targetPort": "jdbcs"
+ },
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-ext",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-ext",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${EXTENSIONS_REPOSITORY_URL}",
+ "ref": "${EXTENSIONS_REPOSITORY_REF}"
+ },
+ "contextDir": "${EXTENSIONS_DIR}"
+ },
+ "strategy": {
+ "type": "Docker",
+ "dockerStrategy": {
+ "dockerfilePath": "${EXTENSIONS_DOCKERFILE}"
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}-ext:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}",
+ "images": [
+ {
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}-ext:latest"
+ },
+ "paths": [
+ {
+ "destinationDir": "./${CONTEXT_DIR}/extensions/extras",
+ "sourcePath": "/extensions/."
+ }
+ ]
+ }
+ ]
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-datavirt63-openshift:1.0"
+ },
+ "env": [
+ {
+ "name": "CUSTOM_INSTALL_DIRECTORIES",
+ "value": "extensions/*"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}-ext:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "configuration",
+ "mountPath": "/etc/datavirt-environment",
+ "readOnly": true
+ },
+ {
+ "name": "datavirt-keystore-volume",
+ "mountPath": "/etc/datavirt-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "datavirt-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "jdbc",
+ "containerPort": 31000,
+ "protocol": "TCP"
+ },
+ {
+ "name": "jdbcs",
+ "containerPort": 31443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/datavirt-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ },
+ {
+ "name": "TEIID_USERNAME",
+ "value": "${TEIID_USERNAME}"
+ },
+ {
+ "name": "TEIID_PASSWORD",
+ "value": "${TEIID_PASSWORD}"
+ },
+ {
+ "name": "MODESHAPE_USERNAME",
+ "value": "${MODESHAPE_USERNAME}"
+ },
+ {
+ "name": "MODESHAPE_PASSWORD",
+ "value": "${MODESHAPE_PASSWORD}"
+ },
+ {
+ "name": "ENV_FILES",
+ "value": "/etc/datavirt-environment/*"
+ },
+ {
+ "name": "DATAVIRT_TRANSPORT_KEYSTORE",
+ "value": "/etc/datavirt-secret-volume/${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "DATAVIRT_TRANSPORT_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "DATAVIRT_TRANSPORT_KEY_ALIAS",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "DATAVIRT_TRANSPORT_KEYSTORE_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "QS_DB_TYPE",
+ "value": "derby",
+ "description": "Used soley by the quickstart and set here to ensure the template can be instatiated with its default parameter values, i.e. so itworks ootb."
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "configuration",
+ "secret": {
+ "secretName": "${CONFIGURATION_NAME}"
+ }
+ },
+ {
+ "name": "datavirt-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "datavirt-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.3/xpaas-templates/datavirt63-secure-s2i.json b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/datavirt63-secure-s2i.json
new file mode 100644
index 000000000..07f926ff3
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/datavirt63-secure-s2i.json
@@ -0,0 +1,642 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-jboss",
+ "description": "Application template for JBoss Data Virtualization 6.3 services built using S2I. Includes ability to configure certificates for serving secure content.",
+ "tags": "jdv,datavirt,jboss,xpaas",
+ "version": "1.4.0"
+ },
+ "name": "datavirt63-secure-s2i"
+ },
+ "labels": {
+ "template": "datavirt63-secure-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new data service has been created in your project. The username/password for accessing the service is ${TEIID_USERNAME}/${TEIID_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${CONFIGURATION_NAME}\" containing the datasource configuration details required by the deployed VDB(s); \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "displayName": "Application Name",
+ "name": "APPLICATION_NAME",
+ "value": "datavirt-app",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing configuration properties for the data sources.",
+ "displayName": "Configuration Secret Name",
+ "name": "CONFIGURATION_NAME",
+ "value": "datavirt-app-config",
+ "required": true
+ },
+ {
+ "description": "Specify a custom hostname for the http route. Leave blank to use default hostname, e.g.: <service-name>-<project>.<default-domain-suffix>",
+ "displayName": "Custom http Route Hostname",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Specify a custom hostname for the https route. Leave blank to use default hostname, e.g.: secure-<service-name>-<project>.<default-domain-suffix>",
+ "displayName": "Custom https Route Hostname",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Specify a custom hostname for the JDBC route. Leave blank to use default hostname, e.g.: secure-<service-name>-<project>.<default-domain-suffix>",
+ "displayName": "Custom JDBC Route Hostname",
+ "name": "HOSTNAME_JDBC",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The URL of the repository with your application source code.",
+ "displayName": "Git Repository URL",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch.",
+ "displayName": "Git Reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "master",
+ "required": false
+ },
+ {
+ "description": "Set this to the relative path to your project if it is not in the root of your repository.",
+ "displayName": "Context Directory",
+ "name": "CONTEXT_DIR",
+ "value": "datavirt/dynamicvdb-datafederation/app",
+ "required": false
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow usage of the secret(s) specified by CONFIGURATION_NAME, HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "datavirt-service-account",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore to be used for serving secure content.",
+ "displayName": "Server Keystore Secret Name",
+ "name": "HTTPS_SECRET",
+ "value": "datavirt-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the keystore file within the secret.",
+ "displayName": "Server Keystore Filename",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The type of the keystore file (JKS or JCEKS).",
+ "displayName": "Server Keystore Type",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate.",
+ "displayName": "Server Certificate Name",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "displayName": "Server Keystore Password",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "description": "Username associated with Teiid data service.",
+ "displayName": "Teiid Username",
+ "name": "TEIID_USERNAME",
+ "from": "[\\a]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Password for Teiid user.",
+ "displayName": "Teiid User Password",
+ "name": "TEIID_PASSWORD",
+ "from": "[\\a\\A]{8}[\\d]{1}[\\A]{1}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Username associated with ModeShape.",
+ "displayName": "ModeShape Username",
+ "name": "MODESHAPE_USERNAME",
+ "from": "[\\a]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Password for ModeShape user.",
+ "displayName": "ModeShape User Password",
+ "name": "MODESHAPE_PASSWORD",
+ "from": "[\\a\\A]{8}[\\d]{1}[\\A]{1}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "A secret string used to configure the GitHub webhook.",
+ "displayName": "Github Webhook Secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "A secret string used to configure the Generic webhook.",
+ "displayName": "Generic Webhook Secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "displayName": "ImageStream Namespace",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore to be used for securing JGroups communications.",
+ "displayName": "JGroups Secret Name",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "datavirt-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the JGroups secret.",
+ "displayName": "JGroups Keystore Filename",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the JGroups server certificate",
+ "displayName": "JGroups Certificate Name",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "secret-key",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "displayName": "JGroups Keystore Password",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "password",
+ "required": false
+ },
+ {
+ "description": "Password used by JGroups to authenticate nodes in the cluster.",
+ "displayName": "JGroups Cluster Password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "displayName": "Deploy Exploded Archives",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "name": "http",
+ "port": 8080,
+ "targetPort": "http"
+ },
+ {
+ "name": "https",
+ "port": 8443,
+ "targetPort": "https"
+ },
+ {
+ "name": "jdbc",
+ "port": 31000,
+ "targetPort": "jdbc"
+ },
+ {
+ "name": "jdbcs",
+ "port": 31443,
+ "targetPort": "jdbcs"
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The data virtualization services."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http (REST) service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "port": {
+ "targetPort": "http"
+ },
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https (REST) service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "port": {
+ "targetPort": "https"
+ },
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-jdbc",
+ "metadata": {
+ "name": "jdbc-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's JDBC service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_JDBC}",
+ "port": {
+ "targetPort": "jdbcs"
+ },
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-datavirt63-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "configuration",
+ "mountPath": "/etc/datavirt-environment",
+ "readOnly": true
+ },
+ {
+ "name": "datavirt-keystore-volume",
+ "mountPath": "/etc/datavirt-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "datavirt-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "jdbc",
+ "containerPort": 31000,
+ "protocol": "TCP"
+ },
+ {
+ "name": "jdbcs",
+ "containerPort": 31443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/datavirt-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ },
+ {
+ "name": "TEIID_USERNAME",
+ "value": "${TEIID_USERNAME}"
+ },
+ {
+ "name": "TEIID_PASSWORD",
+ "value": "${TEIID_PASSWORD}"
+ },
+ {
+ "name": "MODESHAPE_USERNAME",
+ "value": "${MODESHAPE_USERNAME}"
+ },
+ {
+ "name": "MODESHAPE_PASSWORD",
+ "value": "${MODESHAPE_PASSWORD}"
+ },
+ {
+ "name": "ENV_FILES",
+ "value": "/etc/datavirt-environment/*"
+ },
+ {
+ "name": "DATAVIRT_TRANSPORT_KEYSTORE",
+ "value": "/etc/datavirt-secret-volume/${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "DATAVIRT_TRANSPORT_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "DATAVIRT_TRANSPORT_KEY_ALIAS",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "DATAVIRT_TRANSPORT_KEYSTORE_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "configuration",
+ "secret": {
+ "secretName": "${CONFIGURATION_NAME}"
+ }
+ },
+ {
+ "name": "datavirt-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "datavirt-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-pv-app-example.yaml b/roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-pv-app-example.yaml
new file mode 100644
index 000000000..14bdd1dca
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-pv-app-example.yaml
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: cloudforms
+spec:
+ capacity:
+ storage: 2Gi
+ accessModes:
+ - ReadWriteOnce
+ nfs:
+ path: /opt/nfs/volumes-app
+ server: 10.19.0.216
+ persistentVolumeReclaimPolicy: Recycle
diff --git a/roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-pv-example.yaml b/roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-pv-example.yaml
new file mode 100644
index 000000000..709d8d976
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-pv-example.yaml
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: nfs-pv01
+spec:
+ capacity:
+ storage: 2Gi
+ accessModes:
+ - ReadWriteOnce
+ nfs:
+ path: /opt/nfs/volumes
+ server: 10.19.0.216
+ persistentVolumeReclaimPolicy: Recycle
diff --git a/roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-template.yaml b/roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-template.yaml
new file mode 100644
index 000000000..c8e3d4083
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-template.yaml
@@ -0,0 +1,479 @@
+apiVersion: v1
+kind: Template
+labels:
+ template: cloudforms
+metadata:
+ name: cloudforms
+ annotations:
+ description: "CloudForms appliance with persistent storage"
+ tags: "instant-app,cloudforms,cfme"
+ iconClass: "icon-rails"
+objects:
+- apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ description: "Exposes and load balances CloudForms pods"
+ service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"},{"name":"${MEMCACHED_SERVICE_NAME}","namespace":"","kind":"Service"}]'
+ name: ${NAME}
+ spec:
+ ports:
+ - name: http
+ port: 80
+ protocol: TCP
+ targetPort: 80
+ - name: https
+ port: 443
+ protocol: TCP
+ targetPort: 443
+ selector:
+ name: ${NAME}
+- apiVersion: v1
+ kind: Route
+ metadata:
+ name: ${NAME}
+ spec:
+ host: ${APPLICATION_DOMAIN}
+ port:
+ targetPort: https
+ tls:
+ termination: passthrough
+ to:
+ kind: Service
+ name: ${NAME}
+- apiVersion: v1
+ kind: ImageStream
+ metadata:
+ name: cfme-openshift-app
+ annotations:
+ description: "Keeps track of changes in the CloudForms app image"
+ spec:
+ dockerImageRepository: registry.access.redhat.com/cloudforms/cfme-openshift-app
+- apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: ${DATABASE_SERVICE_NAME}
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: ${DATABASE_VOLUME_CAPACITY}
+- apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: ${NAME}
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: ${APPLICATION_VOLUME_CAPACITY}
+- apiVersion: v1
+ kind: "DeploymentConfig"
+ metadata:
+ name: ${NAME}
+ annotations:
+ description: "Defines how to deploy the CloudForms appliance"
+ spec:
+ template:
+ metadata:
+ labels:
+ name: ${NAME}
+ name: ${NAME}
+ spec:
+ volumes:
+ -
+ name: "cfme-app-volume"
+ persistentVolumeClaim:
+ claimName: ${NAME}
+ containers:
+ - image: cloudforms/cfme-openshift-app:${APPLICATION_IMG_TAG}
+ imagePullPolicy: IfNotPresent
+ name: cloudforms
+ livenessProbe:
+ httpGet:
+ path: /
+ port: 80
+ initialDelaySeconds: 480
+ timeoutSeconds: 3
+ readinessProbe:
+ httpGet:
+ path: /
+ port: 80
+ initialDelaySeconds: 200
+ timeoutSeconds: 3
+ ports:
+ - containerPort: 80
+ protocol: TCP
+ - containerPort: 443
+ protocol: TCP
+ securityContext:
+ privileged: true
+ volumeMounts:
+ -
+ name: "cfme-app-volume"
+ mountPath: "/persistent"
+ env:
+ -
+ name: "APPLICATION_INIT_DELAY"
+ value: "${APPLICATION_INIT_DELAY}"
+ -
+ name: "DATABASE_SERVICE_NAME"
+ value: "${DATABASE_SERVICE_NAME}"
+ -
+ name: "DATABASE_REGION"
+ value: "${DATABASE_REGION}"
+ -
+ name: "MEMCACHED_SERVICE_NAME"
+ value: "${MEMCACHED_SERVICE_NAME}"
+ -
+ name: "POSTGRESQL_USER"
+ value: "${DATABASE_USER}"
+ -
+ name: "POSTGRESQL_PASSWORD"
+ value: "${DATABASE_PASSWORD}"
+ -
+ name: "POSTGRESQL_DATABASE"
+ value: "${DATABASE_NAME}"
+ -
+ name: "POSTGRESQL_MAX_CONNECTIONS"
+ value: "${POSTGRESQL_MAX_CONNECTIONS}"
+ -
+ name: "POSTGRESQL_SHARED_BUFFERS"
+ value: "${POSTGRESQL_SHARED_BUFFERS}"
+ resources:
+ requests:
+ memory: "${MEMORY_APPLICATION_MIN}"
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - /opt/rh/cfme-container-scripts/sync-pv-data
+ replicas: 1
+ selector:
+ name: ${NAME}
+ triggers:
+ - type: "ConfigChange"
+ - type: "ImageChange"
+ imageChangeParams:
+ automatic: false
+ containerNames:
+ - "cloudforms"
+ from:
+ kind: "ImageStreamTag"
+ name: "cfme-openshift-app:${APPLICATION_IMG_TAG}"
+ strategy:
+ type: "Recreate"
+ recreateParams:
+ timeoutSeconds: 1200
+- apiVersion: v1
+ kind: "Service"
+ metadata:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ annotations:
+ description: "Exposes the memcached server"
+ spec:
+ ports:
+ -
+ name: "memcached"
+ port: 11211
+ targetPort: 11211
+ selector:
+ name: "${MEMCACHED_SERVICE_NAME}"
+- apiVersion: v1
+ kind: ImageStream
+ metadata:
+ name: cfme-openshift-memcached
+ annotations:
+ description: "Keeps track of changes in the CloudForms memcached image"
+ spec:
+ dockerImageRepository: registry.access.redhat.com/cloudforms/cfme-openshift-memcached
+- apiVersion: v1
+ kind: "DeploymentConfig"
+ metadata:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ annotations:
+ description: "Defines how to deploy memcached"
+ spec:
+ strategy:
+ type: "Recreate"
+ triggers:
+ -
+ type: "ImageChange"
+ imageChangeParams:
+ automatic: false
+ containerNames:
+ - "memcached"
+ from:
+ kind: "ImageStreamTag"
+ name: "cfme-openshift-memcached:${MEMCACHED_IMG_TAG}"
+ -
+ type: "ConfigChange"
+ replicas: 1
+ selector:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ template:
+ metadata:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ labels:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ spec:
+ volumes: []
+ containers:
+ -
+ name: "memcached"
+ image: "cloudforms/cfme-openshift-memcached:${MEMCACHED_IMG_TAG}"
+ ports:
+ -
+ containerPort: 11211
+ readinessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 5
+ tcpSocket:
+ port: 11211
+ livenessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 30
+ tcpSocket:
+ port: 11211
+ volumeMounts: []
+ env:
+ -
+ name: "MEMCACHED_MAX_MEMORY"
+ value: "${MEMCACHED_MAX_MEMORY}"
+ -
+ name: "MEMCACHED_MAX_CONNECTIONS"
+ value: "${MEMCACHED_MAX_CONNECTIONS}"
+ -
+ name: "MEMCACHED_SLAB_PAGE_SIZE"
+ value: "${MEMCACHED_SLAB_PAGE_SIZE}"
+ resources:
+ limits:
+ memory: "${MEMORY_MEMCACHED_LIMIT}"
+- apiVersion: v1
+ kind: "Service"
+ metadata:
+ name: "${DATABASE_SERVICE_NAME}"
+ annotations:
+ description: "Exposes the database server"
+ spec:
+ ports:
+ -
+ name: "postgresql"
+ port: 5432
+ targetPort: 5432
+ selector:
+ name: "${DATABASE_SERVICE_NAME}"
+- apiVersion: v1
+ kind: ImageStream
+ metadata:
+ name: cfme-openshift-postgresql
+ annotations:
+ description: "Keeps track of changes in the CloudForms postgresql image"
+ spec:
+ dockerImageRepository: registry.access.redhat.com/cloudforms/cfme-openshift-postgresql
+- apiVersion: v1
+ kind: "DeploymentConfig"
+ metadata:
+ name: "${DATABASE_SERVICE_NAME}"
+ annotations:
+ description: "Defines how to deploy the database"
+ spec:
+ strategy:
+ type: "Recreate"
+ triggers:
+ -
+ type: "ImageChange"
+ imageChangeParams:
+ automatic: false
+ containerNames:
+ - "postgresql"
+ from:
+ kind: "ImageStreamTag"
+ name: "cfme-openshift-postgresql:${POSTGRESQL_IMG_TAG}"
+ -
+ type: "ConfigChange"
+ replicas: 1
+ selector:
+ name: "${DATABASE_SERVICE_NAME}"
+ template:
+ metadata:
+ name: "${DATABASE_SERVICE_NAME}"
+ labels:
+ name: "${DATABASE_SERVICE_NAME}"
+ spec:
+ volumes:
+ -
+ name: "cfme-pgdb-volume"
+ persistentVolumeClaim:
+ claimName: ${DATABASE_SERVICE_NAME}
+ containers:
+ -
+ name: "postgresql"
+ image: "cloudforms/cfme-openshift-postgresql:${POSTGRESQL_IMG_TAG}"
+ ports:
+ -
+ containerPort: 5432
+ readinessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 15
+ exec:
+ command:
+ - "/bin/sh"
+ - "-i"
+ - "-c"
+ - "psql -h 127.0.0.1 -U ${POSTGRESQL_USER} -q -d ${POSTGRESQL_DATABASE} -c 'SELECT 1'"
+ livenessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 60
+ tcpSocket:
+ port: 5432
+ volumeMounts:
+ -
+ name: "cfme-pgdb-volume"
+ mountPath: "/var/lib/pgsql/data"
+ env:
+ -
+ name: "POSTGRESQL_USER"
+ value: "${DATABASE_USER}"
+ -
+ name: "POSTGRESQL_PASSWORD"
+ value: "${DATABASE_PASSWORD}"
+ -
+ name: "POSTGRESQL_DATABASE"
+ value: "${DATABASE_NAME}"
+ -
+ name: "POSTGRESQL_MAX_CONNECTIONS"
+ value: "${POSTGRESQL_MAX_CONNECTIONS}"
+ -
+ name: "POSTGRESQL_SHARED_BUFFERS"
+ value: "${POSTGRESQL_SHARED_BUFFERS}"
+ resources:
+ limits:
+ memory: "${MEMORY_POSTGRESQL_LIMIT}"
+
+parameters:
+ -
+ name: "NAME"
+ displayName: Name
+ required: true
+ description: "The name assigned to all of the frontend objects defined in this template."
+ value: cloudforms
+ -
+ name: "DATABASE_SERVICE_NAME"
+ displayName: "PostgreSQL Service Name"
+ required: true
+ description: "The name of the OpenShift Service exposed for the PostgreSQL container."
+ value: "postgresql"
+ -
+ name: "DATABASE_USER"
+ displayName: "PostgreSQL User"
+ required: true
+ description: "PostgreSQL user that will access the database."
+ value: "root"
+ -
+ name: "DATABASE_PASSWORD"
+ displayName: "PostgreSQL Password"
+ required: true
+ description: "Password for the PostgreSQL user."
+ value: "smartvm"
+ -
+ name: "DATABASE_NAME"
+ required: true
+ displayName: "PostgreSQL Database Name"
+ description: "Name of the PostgreSQL database accessed."
+ value: "vmdb_production"
+ -
+ name: "DATABASE_REGION"
+ required: true
+ displayName: "Application Database Region"
+ description: "Database region that will be used for application."
+ value: "0"
+ -
+ name: "MEMCACHED_SERVICE_NAME"
+ required: true
+ displayName: "Memcached Service Name"
+ description: "The name of the OpenShift Service exposed for the Memcached container."
+ value: "memcached"
+ -
+ name: "MEMCACHED_MAX_MEMORY"
+ displayName: "Memcached Max Memory"
+ description: "Memcached maximum memory for memcached object storage in MB."
+ value: "64"
+ -
+ name: "MEMCACHED_MAX_CONNECTIONS"
+ displayName: "Memcached Max Connections"
+ description: "Memcached maximum number of connections allowed."
+ value: "1024"
+ -
+ name: "MEMCACHED_SLAB_PAGE_SIZE"
+ displayName: "Memcached Slab Page Size"
+ description: "Memcached size of each slab page."
+ value: "1m"
+ -
+ name: "POSTGRESQL_MAX_CONNECTIONS"
+ displayName: "PostgreSQL Max Connections"
+ description: "PostgreSQL maximum number of database connections allowed."
+ value: "100"
+ -
+ name: "POSTGRESQL_SHARED_BUFFERS"
+ displayName: "PostgreSQL Shared Buffer Amount"
+ description: "Amount of memory dedicated for PostgreSQL shared memory buffers."
+ value: "64MB"
+ -
+ name: "MEMORY_APPLICATION_MIN"
+ displayName: "Application Memory Minimum"
+ required: true
+ description: "Minimum amount of memory the Application container will need."
+ value: "4096Mi"
+ -
+ name: "MEMORY_POSTGRESQL_LIMIT"
+ displayName: "PostgreSQL Memory Limit"
+ required: true
+ description: "Maximum amount of memory the PostgreSQL container can use."
+ value: "2048Mi"
+ -
+ name: "MEMORY_MEMCACHED_LIMIT"
+ displayName: "Memcached Memory Limit"
+ required: true
+ description: "Maximum amount of memory the Memcached container can use."
+ value: "256Mi"
+ -
+ name: "POSTGRESQL_IMG_TAG"
+ displayName: "PostgreSQL Image Tag"
+ description: "This is the PostgreSQL image tag/version requested to deploy."
+ value: "latest"
+ -
+ name: "MEMCACHED_IMG_TAG"
+ displayName: "Memcached Image Tag"
+ description: "This is the Memcached image tag/version requested to deploy."
+ value: "latest"
+ -
+ name: "APPLICATION_IMG_TAG"
+ displayName: "Application Image Tag"
+ description: "This is the Application image tag/version requested to deploy."
+ value: "latest"
+ -
+ name: "APPLICATION_DOMAIN"
+ displayName: "Application Hostname"
+ description: "The exposed hostname that will route to the application service, if left blank a value will be defaulted."
+ value: ""
+ -
+ name: "APPLICATION_INIT_DELAY"
+ displayName: "Application Init Delay"
+ required: true
+ description: "Delay in seconds before we attempt to initialize the application."
+ value: "30"
+ -
+ name: "APPLICATION_VOLUME_CAPACITY"
+ displayName: "Application Volume Capacity"
+ required: true
+ description: "Volume space available for application data."
+ value: "1Gi"
+ -
+ name: "DATABASE_VOLUME_CAPACITY"
+ displayName: "Database Volume Capacity"
+ required: true
+ description: "Volume space available for database."
+ value: "1Gi"
diff --git a/roles/openshift_examples/files/examples/v1.4/image-streams/dotnet_imagestreams.json b/roles/openshift_examples/files/examples/v1.4/image-streams/dotnet_imagestreams.json
index a65d35c2e..0d5ac21d8 100644
--- a/roles/openshift_examples/files/examples/v1.4/image-streams/dotnet_imagestreams.json
+++ b/roles/openshift_examples/files/examples/v1.4/image-streams/dotnet_imagestreams.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "dotnet-image-streams",
"annotations": {
- "description": "ImageStream definitions for .Net Core on RHEL"
+ "description": "ImageStream definitions for .NET Core on RHEL"
}
},
"items": [
@@ -23,16 +23,33 @@
"name": "latest",
"annotations": {
"openshift.io/display-name": ".NET Core (Latest)",
- "description": "Build and run .NET Core applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/redhat-developer/s2i-dotnetcore/tree/master/1.0/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of .NET Core available on OpenShift, including major versions updates.",
+ "description": "Build and run .NET Core applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/redhat-developer/s2i-dotnetcore/tree/master/1.1/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of .NET Core available on OpenShift, including major versions updates.",
"iconClass": "icon-dotnet",
"tags": "builder,.net,dotnet,dotnetcore",
"supports":"dotnet",
"sampleRepo": "https://github.com/redhat-developer/s2i-dotnetcore.git",
- "sampleContextDir": "1.0/test/asp-net-hello-world"
+ "sampleContextDir": "1.1/test/asp-net-hello-world"
},
"from": {
"kind": "ImageStreamTag",
- "name": "1.0"
+ "name": "1.1"
+ }
+ },
+ {
+ "name": "1.1",
+ "annotations": {
+ "openshift.io/display-name": ".NET Core 1.1",
+ "description": "Build and run .NET Core 1.1 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/redhat-developer/s2i-dotnetcore/tree/master/1.1/README.md.",
+ "iconClass": "icon-dotnet",
+ "tags": "builder,.net,dotnet,dotnetcore,rh-dotnetcore11",
+ "supports":"dotnet:1.1,dotnet",
+ "sampleRepo": "https://github.com/redhat-developer/s2i-dotnetcore.git",
+ "sampleContextDir": "1.1/test/asp-net-hello-world",
+ "version": "1.1"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/dotnet/dotnetcore-11-rhel7:1.1"
}
},
{
diff --git a/roles/openshift_examples/files/examples/v1.4/image-streams/image-streams-centos7.json b/roles/openshift_examples/files/examples/v1.4/image-streams/image-streams-centos7.json
index a645de7e2..edaac73ca 100644
--- a/roles/openshift_examples/files/examples/v1.4/image-streams/image-streams-centos7.json
+++ b/roles/openshift_examples/files/examples/v1.4/image-streams/image-streams-centos7.json
@@ -110,9 +110,9 @@
"name": "0.10",
"annotations": {
"openshift.io/display-name": "Node.js 0.10",
- "description": "Build and run Node.js 0.10 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/0.10/README.md.",
+ "description": "DEPRECATED: Build and run Node.js 0.10 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/0.10/README.md.",
"iconClass": "icon-nodejs",
- "tags": "builder,nodejs",
+ "tags": "hidden,nodejs",
"supports":"nodejs:0.10,nodejs:0.1,nodejs",
"version": "0.10",
"sampleRepo": "https://github.com/openshift/nodejs-ex.git"
diff --git a/roles/openshift_examples/files/examples/v1.4/image-streams/image-streams-rhel7.json b/roles/openshift_examples/files/examples/v1.4/image-streams/image-streams-rhel7.json
index 9b9cd236f..88ee79a84 100644
--- a/roles/openshift_examples/files/examples/v1.4/image-streams/image-streams-rhel7.json
+++ b/roles/openshift_examples/files/examples/v1.4/image-streams/image-streams-rhel7.json
@@ -110,9 +110,9 @@
"name": "0.10",
"annotations": {
"openshift.io/display-name": "Node.js 0.10",
- "description": "Build and run Node.js 0.10 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/0.10/README.md.",
+ "description": "DEPRECATED: Build and run Node.js 0.10 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/0.10/README.md.",
"iconClass": "icon-nodejs",
- "tags": "builder,nodejs",
+ "tags": "hidden,nodejs",
"supports":"nodejs:0.10,nodejs:0.1,nodejs",
"version": "0.10",
"sampleRepo": "https://github.com/openshift/nodejs-ex.git"
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-streams/jboss-image-streams.json b/roles/openshift_examples/files/examples/v1.4/xpaas-streams/jboss-image-streams.json
index 4edc97f41..a7cb12867 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-streams/jboss-image-streams.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-streams/jboss-image-streams.json
@@ -283,6 +283,28 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
+ "name": "jboss-datavirt63-openshift"
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-datavirt-6/datavirt63-openshift",
+ "tags": [
+ {
+ "name": "1.0",
+ "annotations": {
+ "description": "Red Hat JBoss Data Virtualization 6.3 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "datavirt,java,jboss,xpaas",
+ "supports":"datavirt:6.3,java:8,xpaas:1.4",
+ "version": "1.0"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
"name": "jboss-amq-62"
},
"spec": {
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/datavirt63-basic-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/datavirt63-basic-s2i.json
new file mode 100644
index 000000000..7d64dac98
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/datavirt63-basic-s2i.json
@@ -0,0 +1,415 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-jboss",
+ "description": "Application template for JBoss Data Virtualization 6.3 services built using S2I.",
+ "tags": "jdv,datavirt,jboss,xpaas",
+ "version": "1.4.0"
+ },
+ "name": "datavirt63-basic-s2i"
+ },
+ "labels": {
+ "template": "datavirt63-basic-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new data service has been created in your project. The username/password for accessing the service is ${TEIID_USERNAME}/${TEIID_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the secret named ${CONFIGURATION_NAME} containing the datasource configuration details required by the deployed VDB(s).",
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "displayName": "Application Name",
+ "name": "APPLICATION_NAME",
+ "value": "datavirt-app",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing configuration properties for the data sources.",
+ "displayName": "Configuration Secret Name",
+ "name": "CONFIGURATION_NAME",
+ "value": "datavirt-app-config",
+ "required": true
+ },
+ {
+ "description": "Specify a custom hostname for the http route. Leave blank to use default hostname, e.g.: <service-name>-<project>.<default-domain-suffix>",
+ "displayName": "Custom http Route Hostname",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The URL of the repository with your application source code.",
+ "displayName": "Git Repository URL",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch.",
+ "displayName": "Git Reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "master",
+ "required": false
+ },
+ {
+ "description": "Set this to the relative path to your project if it is not in the root of your repository.",
+ "displayName": "Context Directory",
+ "name": "CONTEXT_DIR",
+ "value": "datavirt/dynamicvdb-datafederation/app",
+ "required": false
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow usage of the secret specified by CONFIGURATION_NAME.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "datavirt-service-account",
+ "required": true
+ },
+ {
+ "description": "Username associated with Teiid data service.",
+ "displayName": "Teiid Username",
+ "name": "TEIID_USERNAME",
+ "from": "[\\a]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Password for Teiid user.",
+ "displayName": "Teiid User Password",
+ "name": "TEIID_PASSWORD",
+ "from": "[\\a\\A]{8}[\\d]{1}[\\A]{1}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Username associated with ModeShape.",
+ "displayName": "ModeShape Username",
+ "name": "MODESHAPE_USERNAME",
+ "from": "[\\a]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Password for ModeShape user.",
+ "displayName": "ModeShape User Password",
+ "name": "MODESHAPE_PASSWORD",
+ "from": "[\\a\\A]{8}[\\d]{1}[\\A]{1}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "A secret string used to configure the GitHub webhook.",
+ "displayName": "Github Webhook Secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "A secret string used to configure the Generic webhook.",
+ "displayName": "Generic Webhook Secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "displayName": "ImageStream Namespace",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "Password used by JGroups to authenticate nodes in the cluster.",
+ "displayName": "JGroups Cluster Password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "displayName": "Deploy Exploded Archives",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "name": "http",
+ "port": 8080,
+ "targetPort": "http"
+ },
+ {
+ "name": "jdbc",
+ "port": 31000,
+ "targetPort": "jdbc"
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The data virtualization services."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http (REST) service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "port": {
+ "targetPort": "http"
+ },
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-datavirt63-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "configuration",
+ "mountPath": "/etc/datavirt-environment",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "jdbc",
+ "containerPort": 31000,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ },
+ {
+ "name": "TEIID_USERNAME",
+ "value": "${TEIID_USERNAME}"
+ },
+ {
+ "name": "TEIID_PASSWORD",
+ "value": "${TEIID_PASSWORD}"
+ },
+ {
+ "name": "MODESHAPE_USERNAME",
+ "value": "${MODESHAPE_USERNAME}"
+ },
+ {
+ "name": "MODESHAPE_PASSWORD",
+ "value": "${MODESHAPE_PASSWORD}"
+ },
+ {
+ "name": "ENV_FILES",
+ "value": "/etc/datavirt-environment/*"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "configuration",
+ "secret": {
+ "secretName": "${CONFIGURATION_NAME}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/datavirt63-extensions-support-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/datavirt63-extensions-support-s2i.json
new file mode 100644
index 000000000..1e7c03b99
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/datavirt63-extensions-support-s2i.json
@@ -0,0 +1,763 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-jboss",
+ "description": "Application template for JBoss Data Virtualization 6.3 services built using S2I. Includes support for installing extensions (e.g. third-party DB drivers) and the ability to configure certificates for serving secure content.",
+ "tags": "jdv,datavirt,jboss,xpaas",
+ "version": "1.4.0"
+ },
+ "name": "datavirt63-extensions-support-s2i"
+ },
+ "labels": {
+ "template": "datavirt63-extensions-support-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new data service has been created in your project. The username/password for accessing the service is ${TEIID_USERNAME}/${TEIID_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${CONFIGURATION_NAME}\" containing the datasource configuration details required by the deployed VDB(s); \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "displayName": "Application Name",
+ "name": "APPLICATION_NAME",
+ "value": "datavirt-app",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing configuration properties for the data sources.",
+ "displayName": "Configuration Secret Name",
+ "name": "CONFIGURATION_NAME",
+ "value": "datavirt-app-config",
+ "required": true
+ },
+ {
+ "description": "Specify a custom hostname for the http route. Leave blank to use default hostname, e.g.: <service-name>-<project>.<default-domain-suffix>",
+ "displayName": "Custom http Route Hostname",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Specify a custom hostname for the https route. Leave blank to use default hostname, e.g.: secure-<service-name>-<project>.<default-domain-suffix>",
+ "displayName": "Custom https Route Hostname",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Specify a custom hostname for the JDBC route. Leave blank to use default hostname, e.g.: secure-<service-name>-<project>.<default-domain-suffix>",
+ "displayName": "Custom JDBC Route Hostname",
+ "name": "HOSTNAME_JDBC",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The URL of the repository with your application source code.",
+ "displayName": "Git Repository URL",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch.",
+ "displayName": "Git Reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "master",
+ "required": false
+ },
+ {
+ "description": "Set this to the relative path to your project if it is not in the root of your repository.",
+ "displayName": "Context Directory",
+ "name": "CONTEXT_DIR",
+ "value": "datavirt/dynamicvdb-datafederation/app",
+ "required": false
+ },
+ {
+ "description": "The URL of the repository with source code for the extensions image. The image should have all modules, etc., placed in the \"/extensions/\" directory in the image. If the contents are in a different directory, the sourcePath for the ImageSource in the BuildConfig must be modified.",
+ "displayName": "Extensions Git Repository URL",
+ "name": "EXTENSIONS_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Set this to a branch name, tag or other ref of your extensions repository if you are not using the default branch.",
+ "displayName": "Extensions Git Reference",
+ "name": "EXTENSIONS_REPOSITORY_REF",
+ "value": "master",
+ "required": false
+ },
+ {
+ "description": "Set this to the relative path to your project if it is not in the root of your extensions repository.",
+ "displayName": "Extensions Context Directory",
+ "name": "EXTENSIONS_DIR",
+ "value": "datavirt/derby-driver-image",
+ "required": false
+ },
+ {
+ "description": "Set this to the relative path to the Dockerfile in your extensions directory.",
+ "displayName": "Extensions Dockerfile",
+ "name": "EXTENSIONS_DOCKERFILE",
+ "value": "Dockerfile",
+ "required": false
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow usage of the secret(s) specified by CONFIGURATION_NAME, HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "datavirt-service-account",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore to be used for serving secure content.",
+ "displayName": "Server Keystore Secret Name",
+ "name": "HTTPS_SECRET",
+ "value": "datavirt-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the keystore file within the secret.",
+ "displayName": "Server Keystore Filename",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The type of the keystore file (JKS or JCEKS).",
+ "displayName": "Server Keystore Type",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate.",
+ "displayName": "Server Certificate Name",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "displayName": "Server Keystore Password",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "description": "Username associated with Teiid data service.",
+ "displayName": "Teiid Username",
+ "name": "TEIID_USERNAME",
+ "from": "[\\a]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Password for Teiid user.",
+ "displayName": "Teiid User Password",
+ "name": "TEIID_PASSWORD",
+ "from": "[\\a\\A]{8}[\\d]{1}[\\A]{1}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Username associated with ModeShape.",
+ "displayName": "ModeShape Username",
+ "name": "MODESHAPE_USERNAME",
+ "from": "[\\a]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Password for ModeShape user.",
+ "displayName": "ModeShape User Password",
+ "name": "MODESHAPE_PASSWORD",
+ "from": "[\\a\\A]{8}[\\d]{1}[\\A]{1}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "A secret string used to configure the GitHub webhook.",
+ "displayName": "Github Webhook Secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "A secret string used to configure the Generic webhook.",
+ "displayName": "Generic Webhook Secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "displayName": "ImageStream Namespace",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore to be used for securing JGroups communications.",
+ "displayName": "JGroups Secret Name",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "datavirt-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the JGroups secret.",
+ "displayName": "JGroups Keystore Filename",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the JGroups server certificate",
+ "displayName": "JGroups Certificate Name",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "secret-key",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "displayName": "JGroups Keystore Password",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "password",
+ "required": false
+ },
+ {
+ "description": "Password used by JGroups to authenticate nodes in the cluster.",
+ "displayName": "JGroups Cluster Password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "displayName": "Deploy Exploded Archives",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "name": "http",
+ "port": 8080,
+ "targetPort": "http"
+ },
+ {
+ "name": "https",
+ "port": 8443,
+ "targetPort": "https"
+ },
+ {
+ "name": "jdbc",
+ "port": 31000,
+ "targetPort": "jdbc"
+ },
+ {
+ "name": "jdbcs",
+ "port": 31443,
+ "targetPort": "jdbcs"
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The data virtualization services."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http (REST) service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "port": {
+ "targetPort": "http"
+ },
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https (REST) service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "port": {
+ "targetPort": "https"
+ },
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-jdbc",
+ "metadata": {
+ "name": "jdbc-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's JDBC service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_JDBC}",
+ "port": {
+ "targetPort": "jdbcs"
+ },
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-ext",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-ext",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${EXTENSIONS_REPOSITORY_URL}",
+ "ref": "${EXTENSIONS_REPOSITORY_REF}"
+ },
+ "contextDir": "${EXTENSIONS_DIR}"
+ },
+ "strategy": {
+ "type": "Docker",
+ "dockerStrategy": {
+ "dockerfilePath": "${EXTENSIONS_DOCKERFILE}"
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}-ext:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}",
+ "images": [
+ {
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}-ext:latest"
+ },
+ "paths": [
+ {
+ "destinationDir": "./${CONTEXT_DIR}/extensions/extras",
+ "sourcePath": "/extensions/."
+ }
+ ]
+ }
+ ]
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-datavirt63-openshift:1.0"
+ },
+ "env": [
+ {
+ "name": "CUSTOM_INSTALL_DIRECTORIES",
+ "value": "extensions/*"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}-ext:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "configuration",
+ "mountPath": "/etc/datavirt-environment",
+ "readOnly": true
+ },
+ {
+ "name": "datavirt-keystore-volume",
+ "mountPath": "/etc/datavirt-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "datavirt-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "jdbc",
+ "containerPort": 31000,
+ "protocol": "TCP"
+ },
+ {
+ "name": "jdbcs",
+ "containerPort": 31443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/datavirt-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ },
+ {
+ "name": "TEIID_USERNAME",
+ "value": "${TEIID_USERNAME}"
+ },
+ {
+ "name": "TEIID_PASSWORD",
+ "value": "${TEIID_PASSWORD}"
+ },
+ {
+ "name": "MODESHAPE_USERNAME",
+ "value": "${MODESHAPE_USERNAME}"
+ },
+ {
+ "name": "MODESHAPE_PASSWORD",
+ "value": "${MODESHAPE_PASSWORD}"
+ },
+ {
+ "name": "ENV_FILES",
+ "value": "/etc/datavirt-environment/*"
+ },
+ {
+ "name": "DATAVIRT_TRANSPORT_KEYSTORE",
+ "value": "/etc/datavirt-secret-volume/${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "DATAVIRT_TRANSPORT_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "DATAVIRT_TRANSPORT_KEY_ALIAS",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "DATAVIRT_TRANSPORT_KEYSTORE_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "QS_DB_TYPE",
+ "value": "derby",
+ "description": "Used soley by the quickstart and set here to ensure the template can be instatiated with its default parameter values, i.e. so itworks ootb."
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "configuration",
+ "secret": {
+ "secretName": "${CONFIGURATION_NAME}"
+ }
+ },
+ {
+ "name": "datavirt-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "datavirt-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/datavirt63-secure-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/datavirt63-secure-s2i.json
new file mode 100644
index 000000000..07f926ff3
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/datavirt63-secure-s2i.json
@@ -0,0 +1,642 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-jboss",
+ "description": "Application template for JBoss Data Virtualization 6.3 services built using S2I. Includes ability to configure certificates for serving secure content.",
+ "tags": "jdv,datavirt,jboss,xpaas",
+ "version": "1.4.0"
+ },
+ "name": "datavirt63-secure-s2i"
+ },
+ "labels": {
+ "template": "datavirt63-secure-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new data service has been created in your project. The username/password for accessing the service is ${TEIID_USERNAME}/${TEIID_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${CONFIGURATION_NAME}\" containing the datasource configuration details required by the deployed VDB(s); \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "displayName": "Application Name",
+ "name": "APPLICATION_NAME",
+ "value": "datavirt-app",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing configuration properties for the data sources.",
+ "displayName": "Configuration Secret Name",
+ "name": "CONFIGURATION_NAME",
+ "value": "datavirt-app-config",
+ "required": true
+ },
+ {
+ "description": "Specify a custom hostname for the http route. Leave blank to use default hostname, e.g.: <service-name>-<project>.<default-domain-suffix>",
+ "displayName": "Custom http Route Hostname",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Specify a custom hostname for the https route. Leave blank to use default hostname, e.g.: secure-<service-name>-<project>.<default-domain-suffix>",
+ "displayName": "Custom https Route Hostname",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Specify a custom hostname for the JDBC route. Leave blank to use default hostname, e.g.: secure-<service-name>-<project>.<default-domain-suffix>",
+ "displayName": "Custom JDBC Route Hostname",
+ "name": "HOSTNAME_JDBC",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The URL of the repository with your application source code.",
+ "displayName": "Git Repository URL",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch.",
+ "displayName": "Git Reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "master",
+ "required": false
+ },
+ {
+ "description": "Set this to the relative path to your project if it is not in the root of your repository.",
+ "displayName": "Context Directory",
+ "name": "CONTEXT_DIR",
+ "value": "datavirt/dynamicvdb-datafederation/app",
+ "required": false
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow usage of the secret(s) specified by CONFIGURATION_NAME, HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "datavirt-service-account",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore to be used for serving secure content.",
+ "displayName": "Server Keystore Secret Name",
+ "name": "HTTPS_SECRET",
+ "value": "datavirt-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the keystore file within the secret.",
+ "displayName": "Server Keystore Filename",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The type of the keystore file (JKS or JCEKS).",
+ "displayName": "Server Keystore Type",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate.",
+ "displayName": "Server Certificate Name",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "displayName": "Server Keystore Password",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "description": "Username associated with Teiid data service.",
+ "displayName": "Teiid Username",
+ "name": "TEIID_USERNAME",
+ "from": "[\\a]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Password for Teiid user.",
+ "displayName": "Teiid User Password",
+ "name": "TEIID_PASSWORD",
+ "from": "[\\a\\A]{8}[\\d]{1}[\\A]{1}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Username associated with ModeShape.",
+ "displayName": "ModeShape Username",
+ "name": "MODESHAPE_USERNAME",
+ "from": "[\\a]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Password for ModeShape user.",
+ "displayName": "ModeShape User Password",
+ "name": "MODESHAPE_PASSWORD",
+ "from": "[\\a\\A]{8}[\\d]{1}[\\A]{1}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "A secret string used to configure the GitHub webhook.",
+ "displayName": "Github Webhook Secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "A secret string used to configure the Generic webhook.",
+ "displayName": "Generic Webhook Secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "displayName": "ImageStream Namespace",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore to be used for securing JGroups communications.",
+ "displayName": "JGroups Secret Name",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "datavirt-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the JGroups secret.",
+ "displayName": "JGroups Keystore Filename",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the JGroups server certificate",
+ "displayName": "JGroups Certificate Name",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "secret-key",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "displayName": "JGroups Keystore Password",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "password",
+ "required": false
+ },
+ {
+ "description": "Password used by JGroups to authenticate nodes in the cluster.",
+ "displayName": "JGroups Cluster Password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "displayName": "Deploy Exploded Archives",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "name": "http",
+ "port": 8080,
+ "targetPort": "http"
+ },
+ {
+ "name": "https",
+ "port": 8443,
+ "targetPort": "https"
+ },
+ {
+ "name": "jdbc",
+ "port": 31000,
+ "targetPort": "jdbc"
+ },
+ {
+ "name": "jdbcs",
+ "port": 31443,
+ "targetPort": "jdbcs"
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The data virtualization services."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http (REST) service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "port": {
+ "targetPort": "http"
+ },
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https (REST) service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "port": {
+ "targetPort": "https"
+ },
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-jdbc",
+ "metadata": {
+ "name": "jdbc-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's JDBC service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_JDBC}",
+ "port": {
+ "targetPort": "jdbcs"
+ },
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-datavirt63-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "configuration",
+ "mountPath": "/etc/datavirt-environment",
+ "readOnly": true
+ },
+ {
+ "name": "datavirt-keystore-volume",
+ "mountPath": "/etc/datavirt-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "datavirt-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "jdbc",
+ "containerPort": 31000,
+ "protocol": "TCP"
+ },
+ {
+ "name": "jdbcs",
+ "containerPort": 31443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/datavirt-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ },
+ {
+ "name": "TEIID_USERNAME",
+ "value": "${TEIID_USERNAME}"
+ },
+ {
+ "name": "TEIID_PASSWORD",
+ "value": "${TEIID_PASSWORD}"
+ },
+ {
+ "name": "MODESHAPE_USERNAME",
+ "value": "${MODESHAPE_USERNAME}"
+ },
+ {
+ "name": "MODESHAPE_PASSWORD",
+ "value": "${MODESHAPE_PASSWORD}"
+ },
+ {
+ "name": "ENV_FILES",
+ "value": "/etc/datavirt-environment/*"
+ },
+ {
+ "name": "DATAVIRT_TRANSPORT_KEYSTORE",
+ "value": "/etc/datavirt-secret-volume/${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "DATAVIRT_TRANSPORT_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "DATAVIRT_TRANSPORT_KEY_ALIAS",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "DATAVIRT_TRANSPORT_KEYSTORE_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "configuration",
+ "secret": {
+ "secretName": "${CONFIGURATION_NAME}"
+ }
+ },
+ {
+ "name": "datavirt-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "datavirt-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_expand_partition/meta/main.yml b/roles/openshift_expand_partition/meta/main.yml
index a596d6c63..dea6b6ee0 100644
--- a/roles/openshift_expand_partition/meta/main.yml
+++ b/roles/openshift_expand_partition/meta/main.yml
@@ -13,6 +13,6 @@ galaxy_info:
versions:
- all
categories:
- - openshift
- - cloud
+ - openshift
+ - cloud
dependencies: []
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index ad4b1e47b..616b41c7b 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -7,27 +7,37 @@
"""Ansible module for retrieving and setting openshift related facts"""
-try:
- # python2
- import ConfigParser
-except ImportError:
- # python3
- import configparser as ConfigParser
-
+# pylint: disable=no-name-in-module, import-error, wrong-import-order
import copy
+import errno
+import json
+import re
import io
import os
import yaml
-from distutils.util import strtobool
-from distutils.version import LooseVersion
import struct
import socket
+from distutils.util import strtobool
+from distutils.version import LooseVersion
+from six import string_types, text_type
+from six.moves import configparser
+
+# ignore pylint errors related to the module_utils import
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
+# import module snippets
+from ansible.module_utils.basic import * # noqa: F403
+from ansible.module_utils.facts import * # noqa: F403
+from ansible.module_utils.urls import * # noqa: F403
+from ansible.module_utils.six import iteritems, itervalues
+from ansible.module_utils.six.moves.urllib.parse import urlparse, urlunparse
+from ansible.module_utils._text import to_native
+
+HAVE_DBUS = False
-HAVE_DBUS=False
try:
from dbus import SystemBus, Interface
from dbus.exceptions import DBusException
- HAVE_DBUS=True
+ HAVE_DBUS = True
except ImportError:
pass
@@ -58,6 +68,7 @@ def migrate_docker_facts(facts):
}
if 'docker' not in facts:
facts['docker'] = {}
+ # pylint: disable=consider-iterating-dictionary
for role in params.keys():
if role in facts:
for param in params[role]:
@@ -71,11 +82,12 @@ def migrate_docker_facts(facts):
# log_options was originally meant to be a comma separated string, but
# we now prefer an actual list, with backward compatibility:
if 'log_options' in facts['docker'] and \
- isinstance(facts['docker']['log_options'], basestring):
+ isinstance(facts['docker']['log_options'], string_types):
facts['docker']['log_options'] = facts['docker']['log_options'].split(",")
return facts
+
# TODO: We should add a generic migration function that takes source and destination
# paths and does the right thing rather than one function for common, one for node, etc.
def migrate_common_facts(facts):
@@ -86,6 +98,7 @@ def migrate_common_facts(facts):
}
if 'common' not in facts:
facts['common'] = {}
+ # pylint: disable=consider-iterating-dictionary
for role in params.keys():
if role in facts:
for param in params[role]:
@@ -93,6 +106,7 @@ def migrate_common_facts(facts):
facts['common'][param] = facts[role].pop(param)
return facts
+
def migrate_node_facts(facts):
""" Migrate facts from various roles into node """
params = {
@@ -100,6 +114,7 @@ def migrate_node_facts(facts):
}
if 'node' not in facts:
facts['node'] = {}
+ # pylint: disable=consider-iterating-dictionary
for role in params.keys():
if role in facts:
for param in params[role]:
@@ -125,7 +140,9 @@ def migrate_hosted_facts(facts):
facts['hosted']['registry']['selector'] = facts['master'].pop('registry_selector')
return facts
+
def migrate_admission_plugin_facts(facts):
+ """ Apply migrations for admission plugin facts """
if 'master' in facts:
if 'kube_admission_plugin_config' in facts['master']:
if 'admission_plugin_config' not in facts['master']:
@@ -139,6 +156,7 @@ def migrate_admission_plugin_facts(facts):
facts['master'].pop('kube_admission_plugin_config', None)
return facts
+
def migrate_local_facts(facts):
""" Apply migrations of local facts """
migrated_facts = copy.deepcopy(facts)
@@ -149,6 +167,7 @@ def migrate_local_facts(facts):
migrated_facts = migrate_admission_plugin_facts(migrated_facts)
return migrated_facts
+
def first_ip(network):
""" Return the first IPv4 address in network
@@ -157,13 +176,14 @@ def first_ip(network):
Returns:
str: first IPv4 address
"""
- atoi = lambda addr: struct.unpack("!I", socket.inet_aton(addr))[0]
- itoa = lambda addr: socket.inet_ntoa(struct.pack("!I", addr))
+ atoi = lambda addr: struct.unpack("!I", socket.inet_aton(addr))[0] # noqa: E731
+ itoa = lambda addr: socket.inet_ntoa(struct.pack("!I", addr)) # noqa: E731
(address, netmask) = network.split('/')
netmask_i = (0xffffffff << (32 - atoi(netmask))) & 0xffffffff
return itoa((atoi(address) & netmask_i) + 1)
+
def hostname_valid(hostname):
""" Test if specified hostname should be considered valid
@@ -201,11 +221,8 @@ def choose_hostname(hostnames=None, fallback=''):
return hostname
ip_regex = r'\A\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\Z'
- ips = [i for i in hostnames
- if (i is not None and isinstance(i, basestring)
- and re.match(ip_regex, i))]
- hosts = [i for i in hostnames
- if i is not None and i != '' and i not in ips]
+ ips = [i for i in hostnames if i is not None and isinstance(i, string_types) and re.match(ip_regex, i)]
+ hosts = [i for i in hostnames if i is not None and i != '' and i not in ips]
for host_list in (hosts, ips):
for host in host_list:
@@ -225,11 +242,11 @@ def query_metadata(metadata_url, headers=None, expect_json=False):
Returns:
dict or list: metadata request result
"""
- result, info = fetch_url(module, metadata_url, headers=headers)
+ result, info = fetch_url(module, metadata_url, headers=headers) # noqa: F405
if info['status'] != 200:
raise OpenShiftFactsMetadataUnavailableError("Metadata unavailable")
if expect_json:
- return module.from_json(to_native(result.read()))
+ return module.from_json(to_native(result.read())) # noqa: F405
else:
return [to_native(line.strip()) for line in result.readlines()]
@@ -341,7 +358,7 @@ def normalize_aws_facts(metadata, facts):
var_map = {'ips': 'local-ipv4s', 'public_ips': 'public-ipv4s'}
for ips_var, int_var in iteritems(var_map):
ips = interface.get(int_var)
- if isinstance(ips, basestring):
+ if isinstance(ips, string_types):
int_info[ips_var] = [ips]
else:
int_info[ips_var] = ips
@@ -390,7 +407,7 @@ def normalize_openstack_facts(metadata, facts):
facts['network']['ip'] = local_ipv4
facts['network']['public_ip'] = metadata['ec2_compat']['public-ipv4']
- for f_var, h_var, ip_var in [('hostname', 'hostname', 'local-ipv4'),
+ for f_var, h_var, ip_var in [('hostname', 'hostname', 'local-ipv4'),
('public_hostname', 'public-hostname', 'public-ipv4')]:
try:
if socket.gethostbyname(metadata['ec2_compat'][h_var]) == metadata['ec2_compat'][ip_var]:
@@ -431,6 +448,7 @@ def normalize_provider_facts(provider, metadata):
facts = normalize_openstack_facts(metadata, facts)
return facts
+
def set_flannel_facts_if_unset(facts):
""" Set flannel facts if not already present in facts dict
dict: the facts dict updated with the flannel facts if
@@ -448,6 +466,7 @@ def set_flannel_facts_if_unset(facts):
facts['common']['use_flannel'] = use_flannel
return facts
+
def set_nuage_facts_if_unset(facts):
""" Set nuage facts if not already present in facts dict
dict: the facts dict updated with the nuage facts if
@@ -465,6 +484,7 @@ def set_nuage_facts_if_unset(facts):
facts['common']['use_nuage'] = use_nuage
return facts
+
def set_node_schedulability(facts):
""" Set schedulable facts if not already present in facts dict
Args:
@@ -482,6 +502,7 @@ def set_node_schedulability(facts):
facts['node']['schedulable'] = True
return facts
+
def set_selectors(facts):
""" Set selectors facts if not already present in facts dict
Args:
@@ -518,6 +539,7 @@ def set_selectors(facts):
return facts
+
def set_dnsmasq_facts_if_unset(facts):
""" Set dnsmasq facts if not already present in facts
Args:
@@ -537,6 +559,7 @@ def set_dnsmasq_facts_if_unset(facts):
return facts
+
def set_project_cfg_facts_if_unset(facts):
""" Set Project Configuration facts if not already present in facts dict
dict:
@@ -564,6 +587,7 @@ def set_project_cfg_facts_if_unset(facts):
return facts
+
def set_identity_providers_if_unset(facts):
""" Set identity_providers fact if not already present in facts dict
@@ -590,6 +614,7 @@ def set_identity_providers_if_unset(facts):
return facts
+
def set_url_facts_if_unset(facts):
""" Set url facts if not already present in facts dict
@@ -649,7 +674,6 @@ def set_url_facts_if_unset(facts):
host,
ports[prefix]))
-
r_lhn = "{0}:{1}".format(hostname, ports['api']).replace('.', '-')
r_lhu = "system:openshift-master/{0}:{1}".format(api_hostname, ports['api']).replace('.', '-')
facts['master'].setdefault('loopback_cluster_name', r_lhn)
@@ -665,6 +689,7 @@ def set_url_facts_if_unset(facts):
return facts
+
def set_aggregate_facts(facts):
""" Set aggregate facts
@@ -742,9 +767,9 @@ def set_etcd_facts_if_unset(facts):
# Read ETCD_DATA_DIR from /etc/etcd/etcd.conf:
try:
# Add a fake section for parsing:
- ini_str = unicode('[root]\n' + open('/etc/etcd/etcd.conf', 'r').read(), 'utf-8')
+ ini_str = text_type('[root]\n' + open('/etc/etcd/etcd.conf', 'r').read(), 'utf-8')
ini_fp = io.StringIO(ini_str)
- config = ConfigParser.RawConfigParser()
+ config = configparser.RawConfigParser()
config.readfp(ini_fp)
etcd_data_dir = config.get('root', 'ETCD_DATA_DIR')
if etcd_data_dir.startswith('"') and etcd_data_dir.endswith('"'):
@@ -760,6 +785,7 @@ def set_etcd_facts_if_unset(facts):
return facts
+
def set_deployment_facts_if_unset(facts):
""" Set Facts that vary based on deployment_type. This currently
includes common.service_type, common.config_base, master.registry_url,
@@ -840,6 +866,21 @@ def set_deployment_facts_if_unset(facts):
return facts
+
+def set_evacuate_or_drain_option(facts):
+ """OCP before 1.5/3.5 used '--evacuate'. As of 1.5/3.5 OCP uses
+'--drain'. Let's make that a fact for easy reference later.
+ """
+ if facts['common']['version_gte_3_5_or_1_5']:
+ # New-style
+ facts['common']['evacuate_or_drain'] = '--drain'
+ else:
+ # Old-style
+ facts['common']['evacuate_or_drain'] = '--evacuate'
+
+ return facts
+
+
def set_version_facts_if_unset(facts):
""" Set version facts. This currently includes common.version and
common.version_gte_3_1_or_1_1.
@@ -851,33 +892,42 @@ def set_version_facts_if_unset(facts):
"""
if 'common' in facts:
deployment_type = facts['common']['deployment_type']
- version = get_openshift_version(facts)
- if version:
- facts['common']['version'] = version
+ openshift_version = get_openshift_version(facts)
+ if openshift_version:
+ version = LooseVersion(openshift_version)
+ facts['common']['version'] = openshift_version
+ facts['common']['short_version'] = '.'.join([str(x) for x in version.version[0:2]])
if deployment_type == 'origin':
- version_gte_3_1_or_1_1 = LooseVersion(version) >= LooseVersion('1.1.0')
- version_gte_3_1_1_or_1_1_1 = LooseVersion(version) >= LooseVersion('1.1.1')
- version_gte_3_2_or_1_2 = LooseVersion(version) >= LooseVersion('1.2.0')
- version_gte_3_3_or_1_3 = LooseVersion(version) >= LooseVersion('1.3.0')
- version_gte_3_4_or_1_4 = LooseVersion(version) >= LooseVersion('1.4.0')
+ version_gte_3_1_or_1_1 = version >= LooseVersion('1.1.0')
+ version_gte_3_1_1_or_1_1_1 = version >= LooseVersion('1.1.1')
+ version_gte_3_2_or_1_2 = version >= LooseVersion('1.2.0')
+ version_gte_3_3_or_1_3 = version >= LooseVersion('1.3.0')
+ version_gte_3_4_or_1_4 = version >= LooseVersion('1.4.0')
+ version_gte_3_5_or_1_5 = version >= LooseVersion('1.5.0')
+ version_gte_3_6_or_1_6 = version >= LooseVersion('1.6.0')
else:
- version_gte_3_1_or_1_1 = LooseVersion(version) >= LooseVersion('3.0.2.905')
- version_gte_3_1_1_or_1_1_1 = LooseVersion(version) >= LooseVersion('3.1.1')
- version_gte_3_2_or_1_2 = LooseVersion(version) >= LooseVersion('3.1.1.901')
- version_gte_3_3_or_1_3 = LooseVersion(version) >= LooseVersion('3.3.0')
- version_gte_3_4_or_1_4 = LooseVersion(version) >= LooseVersion('3.4.0')
+ version_gte_3_1_or_1_1 = version >= LooseVersion('3.0.2.905')
+ version_gte_3_1_1_or_1_1_1 = version >= LooseVersion('3.1.1')
+ version_gte_3_2_or_1_2 = version >= LooseVersion('3.1.1.901')
+ version_gte_3_3_or_1_3 = version >= LooseVersion('3.3.0')
+ version_gte_3_4_or_1_4 = version >= LooseVersion('3.4.0')
+ version_gte_3_5_or_1_5 = version >= LooseVersion('3.5.0')
+ version_gte_3_6_or_1_6 = version >= LooseVersion('3.6.0')
else:
version_gte_3_1_or_1_1 = True
version_gte_3_1_1_or_1_1_1 = True
version_gte_3_2_or_1_2 = True
version_gte_3_3_or_1_3 = True
version_gte_3_4_or_1_4 = False
+ version_gte_3_5_or_1_5 = False
+ version_gte_3_6_or_1_6 = False
facts['common']['version_gte_3_1_or_1_1'] = version_gte_3_1_or_1_1
facts['common']['version_gte_3_1_1_or_1_1_1'] = version_gte_3_1_1_or_1_1_1
facts['common']['version_gte_3_2_or_1_2'] = version_gte_3_2_or_1_2
facts['common']['version_gte_3_3_or_1_3'] = version_gte_3_3_or_1_3
facts['common']['version_gte_3_4_or_1_4'] = version_gte_3_4_or_1_4
-
+ facts['common']['version_gte_3_5_or_1_5'] = version_gte_3_5_or_1_5
+ facts['common']['version_gte_3_6_or_1_6'] = version_gte_3_6_or_1_6
if version_gte_3_4_or_1_4:
examples_content_version = 'v1.4'
@@ -894,6 +944,7 @@ def set_version_facts_if_unset(facts):
return facts
+
def set_manageiq_facts_if_unset(facts):
""" Set manageiq facts. This currently includes common.use_manageiq.
@@ -914,6 +965,7 @@ def set_manageiq_facts_if_unset(facts):
return facts
+
def set_sdn_facts_if_unset(facts, system_facts):
""" Set sdn facts if not already present in facts dict
@@ -924,6 +976,7 @@ def set_sdn_facts_if_unset(facts, system_facts):
dict: the facts dict updated with the generated sdn facts if they
were not already present
"""
+ # pylint: disable=too-many-branches
if 'common' in facts:
use_sdn = facts['common']['use_openshift_sdn']
if not (use_sdn == '' or isinstance(use_sdn, bool)):
@@ -973,7 +1026,9 @@ def set_sdn_facts_if_unset(facts, system_facts):
return facts
+
def set_nodename(facts):
+ """ set nodename """
if 'node' in facts and 'common' in facts:
if 'cloudprovider' in facts and facts['cloudprovider']['kind'] == 'openstack':
facts['node']['nodename'] = facts['provider']['metadata']['hostname'].replace('.novalocal', '')
@@ -981,6 +1036,7 @@ def set_nodename(facts):
facts['node']['nodename'] = facts['common']['hostname'].lower()
return facts
+
def migrate_oauth_template_facts(facts):
"""
Migrate an old oauth template fact to a newer format if it's present.
@@ -1000,6 +1056,7 @@ def migrate_oauth_template_facts(facts):
facts['master']['oauth_templates']['login'] = facts['master']['oauth_template']
return facts
+
def format_url(use_ssl, hostname, port, path=''):
""" Format url based on ssl flag, hostname, port and path
@@ -1022,6 +1079,7 @@ def format_url(use_ssl, hostname, port, path=''):
url = urlunparse((scheme, netloc, path, '', '', ''))
return url
+
def get_current_config(facts):
""" Get current openshift config
@@ -1051,10 +1109,9 @@ def get_current_config(facts):
)
kubeconfig_path = os.path.join(kubeconfig_dir, '.kubeconfig')
- if (os.path.isfile('/usr/bin/openshift')
- and os.path.isfile(kubeconfig_path)):
+ if os.path.isfile('/usr/bin/openshift') and os.path.isfile(kubeconfig_path):
try:
- _, output, _ = module.run_command(
+ _, output, _ = module.run_command( # noqa: F405
["/usr/bin/openshift", "ex", "config", "view", "-o",
"json", "--kubeconfig=%s" % kubeconfig_path],
check_rc=False
@@ -1085,6 +1142,7 @@ def get_current_config(facts):
return current_config
+
def build_kubelet_args(facts):
"""Build node kubelet_args
@@ -1129,6 +1187,7 @@ values provided as a list. Hence the gratuitous use of ['foo'] below.
#
# map() seems to be returning an itertools.imap object
# instead of a list. We cast it to a list ourselves.
+ # pylint: disable=unnecessary-lambda
labels_str = list(map(lambda x: '='.join(x), facts['node']['labels'].items()))
if labels_str != '':
kubelet_args['node-labels'] = labels_str
@@ -1139,6 +1198,7 @@ values provided as a list. Hence the gratuitous use of ['foo'] below.
facts = merge_facts({'node': {'kubelet_args': kubelet_args}}, facts, [], [])
return facts
+
def build_controller_args(facts):
""" Build master controller_args """
cloud_cfg_path = os.path.join(facts['common']['config_base'],
@@ -1160,6 +1220,7 @@ def build_controller_args(facts):
facts = merge_facts({'master': {'controller_args': controller_args}}, facts, [], [])
return facts
+
def build_api_server_args(facts):
""" Build master api_server_args """
cloud_cfg_path = os.path.join(facts['common']['config_base'],
@@ -1181,13 +1242,14 @@ def build_api_server_args(facts):
facts = merge_facts({'master': {'api_server_args': api_server_args}}, facts, [], [])
return facts
+
def is_service_running(service):
""" Queries systemd through dbus to see if the service is running """
service_running = False
- bus = SystemBus()
- systemd = bus.get_object('org.freedesktop.systemd1', '/org/freedesktop/systemd1')
- manager = Interface(systemd, dbus_interface='org.freedesktop.systemd1.Manager')
try:
+ bus = SystemBus()
+ systemd = bus.get_object('org.freedesktop.systemd1', '/org/freedesktop/systemd1')
+ manager = Interface(systemd, dbus_interface='org.freedesktop.systemd1.Manager')
service_unit = service if service.endswith('.service') else manager.GetUnit('{0}.service'.format(service))
service_proxy = bus.get_object('org.freedesktop.systemd1', str(service_unit))
service_properties = Interface(service_proxy, dbus_interface='org.freedesktop.DBus.Properties')
@@ -1196,10 +1258,20 @@ def is_service_running(service):
if service_load_state == 'loaded' and service_active_state == 'active':
service_running = True
except DBusException:
+ # TODO: do not swallow exception, as it may be hiding useful debugging
+ # information.
pass
return service_running
+
+def rpm_rebuilddb():
+ """
+ Runs rpm --rebuilddb to ensure the db is in good shape.
+ """
+ module.run_command(['/usr/bin/rpm', '--rebuilddb']) # noqa: F405
+
+
def get_version_output(binary, version_cmd):
""" runs and returns the version output for a command """
cmd = []
@@ -1210,9 +1282,10 @@ def get_version_output(binary, version_cmd):
cmd.append(item)
if os.path.isfile(cmd[0]):
- _, output, _ = module.run_command(cmd)
+ _, output, _ = module.run_command(cmd) # noqa: F405
return output
+
def get_docker_version_info():
""" Parses and returns the docker version info """
result = None
@@ -1225,6 +1298,7 @@ def get_docker_version_info():
}
return result
+
def get_hosted_registry_insecure():
""" Parses OPTIONS from /etc/sysconfig/docker to determine if the
registry is currently insecure.
@@ -1232,17 +1306,18 @@ def get_hosted_registry_insecure():
hosted_registry_insecure = None
if os.path.exists('/etc/sysconfig/docker'):
try:
- ini_str = unicode('[root]\n' + open('/etc/sysconfig/docker', 'r').read(), 'utf-8')
+ ini_str = text_type('[root]\n' + open('/etc/sysconfig/docker', 'r').read(), 'utf-8')
ini_fp = io.StringIO(ini_str)
- config = ConfigParser.RawConfigParser()
+ config = configparser.RawConfigParser()
config.readfp(ini_fp)
options = config.get('root', 'OPTIONS')
if 'insecure-registry' in options:
hosted_registry_insecure = True
- except:
+ except Exception: # pylint: disable=broad-except
pass
return hosted_registry_insecure
+
def get_openshift_version(facts):
""" Get current version of openshift on the host.
@@ -1259,12 +1334,13 @@ def get_openshift_version(facts):
# No need to run this method repeatedly on a system if we already know the
# version
+ # TODO: We need a way to force reload this after upgrading bits.
if 'common' in facts:
if 'version' in facts['common'] and facts['common']['version'] is not None:
return chomp_commit_offset(facts['common']['version'])
if os.path.isfile('/usr/bin/openshift'):
- _, output, _ = module.run_command(['/usr/bin/openshift', 'version'])
+ _, output, _ = module.run_command(['/usr/bin/openshift', 'version']) # noqa: F405
version = parse_openshift_version(output)
elif 'common' in facts and 'is_containerized' in facts['common']:
version = get_container_openshift_version(facts)
@@ -1273,7 +1349,7 @@ def get_openshift_version(facts):
# This can be very slow and may get re-run multiple times, so we only use this
# if other methods failed to find a version.
if not version and os.path.isfile('/usr/local/bin/openshift'):
- _, output, _ = module.run_command(['/usr/local/bin/openshift', 'version'])
+ _, output, _ = module.run_command(['/usr/local/bin/openshift', 'version']) # noqa: F405
version = parse_openshift_version(output)
return chomp_commit_offset(version)
@@ -1363,9 +1439,10 @@ def apply_provider_facts(facts, provider_facts):
facts['provider'] = provider_facts
return facts
+
# Disabling pylint too many branches. This function needs refactored
# but is a very core part of openshift_facts.
-# pylint: disable=too-many-branches
+# pylint: disable=too-many-branches, too-many-nested-blocks
def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overwrite):
""" Recursively merge facts dicts
@@ -1397,7 +1474,7 @@ def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overw
if key in inventory_json_facts:
# Watchout for JSON facts that sometimes load as strings.
# (can happen if the JSON contains a boolean)
- if isinstance(new[key], basestring):
+ if isinstance(new[key], string_types):
facts[key] = yaml.safe_load(new[key])
else:
facts[key] = copy.deepcopy(new[key])
@@ -1434,16 +1511,18 @@ def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overw
elif key in protected_facts and key not in [x.split('.')[-1] for x in protected_facts_to_overwrite]:
# The master count (int) can only increase unless it
# has been passed as a protected fact to overwrite.
- if key == 'master_count':
+ if key == 'master_count' and new[key] is not None and new[key] is not '':
if int(value) <= int(new[key]):
facts[key] = copy.deepcopy(new[key])
else:
- module.fail_json(msg='openshift_facts received a lower value for openshift.master.master_count')
+ # pylint: disable=line-too-long
+ module.fail_json(msg='openshift_facts received a lower value for openshift.master.master_count') # noqa: F405
# ha (bool) can not change unless it has been passed
# as a protected fact to overwrite.
if key == 'ha':
if safe_get_bool(value) != safe_get_bool(new[key]):
- module.fail_json(msg='openshift_facts received a different value for openshift.master.ha')
+ # pylint: disable=line-too-long
+ module.fail_json(msg='openshift_facts received a different value for openshift.master.ha') # noqa: F405
else:
facts[key] = value
# No other condition has been met. Overwrite the old fact
@@ -1457,12 +1536,13 @@ def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overw
for key in new_keys:
# Watchout for JSON facts that sometimes load as strings.
# (can happen if the JSON contains a boolean)
- if key in inventory_json_facts and isinstance(new[key], basestring):
+ if key in inventory_json_facts and isinstance(new[key], string_types):
facts[key] = yaml.safe_load(new[key])
else:
facts[key] = copy.deepcopy(new[key])
return facts
+
def save_local_facts(filename, facts):
""" Save local facts
@@ -1478,7 +1558,7 @@ def save_local_facts(filename, facts):
if exception.errno != errno.EEXIST: # but it is okay if it is already there
raise # pass any other exceptions up the chain
with open(filename, 'w') as fact_file:
- fact_file.write(module.jsonify(facts))
+ fact_file.write(module.jsonify(facts)) # noqa: F405
os.chmod(filename, 0o600)
except (IOError, OSError) as ex:
raise OpenShiftFactsFileWriteError(
@@ -1497,15 +1577,15 @@ def get_local_facts_from_file(filename):
local_facts = dict()
try:
# Handle conversion of INI style facts file to json style
- ini_facts = ConfigParser.SafeConfigParser()
+ ini_facts = configparser.SafeConfigParser()
ini_facts.read(filename)
for section in ini_facts.sections():
local_facts[section] = dict()
for key, value in ini_facts.items(section):
local_facts[section][key] = value
- except (ConfigParser.MissingSectionHeaderError,
- ConfigParser.ParsingError):
+ except (configparser.MissingSectionHeaderError,
+ configparser.ParsingError):
try:
with open(filename, 'r') as facts_file:
local_facts = json.load(facts_file)
@@ -1514,6 +1594,7 @@ def get_local_facts_from_file(filename):
return local_facts
+
def sort_unique(alist):
""" Sorts and de-dupes a list
@@ -1531,6 +1612,7 @@ def sort_unique(alist):
return out
+
def safe_get_bool(fact):
""" Get a boolean fact safely.
@@ -1541,6 +1623,7 @@ def safe_get_bool(fact):
"""
return bool(strtobool(str(fact)))
+
def set_proxy_facts(facts):
""" Set global proxy facts and promote defaults from http_proxy, https_proxy,
no_proxy to the more specific builddefaults and builddefaults_git vars.
@@ -1556,13 +1639,11 @@ def set_proxy_facts(facts):
if 'common' in facts:
common = facts['common']
if 'http_proxy' in common or 'https_proxy' in common:
- if 'no_proxy' in common and \
- isinstance(common['no_proxy'], basestring):
+ if 'no_proxy' in common and isinstance(common['no_proxy'], string_types):
common['no_proxy'] = common['no_proxy'].split(",")
elif 'no_proxy' not in common:
common['no_proxy'] = []
- if 'generate_no_proxy_hosts' in common and \
- safe_get_bool(common['generate_no_proxy_hosts']):
+ if 'generate_no_proxy_hosts' in common and safe_get_bool(common['generate_no_proxy_hosts']):
if 'no_proxy_internal_hostnames' in common:
common['no_proxy'].extend(common['no_proxy_internal_hostnames'].split(','))
# We always add local dns domain and ourselves no matter what
@@ -1580,7 +1661,7 @@ def set_proxy_facts(facts):
if 'https_proxy' not in builddefaults and 'https_proxy' in common:
builddefaults['https_proxy'] = common['https_proxy']
# make no_proxy into a list if it's not
- if 'no_proxy' in builddefaults and isinstance(builddefaults['no_proxy'], basestring):
+ if 'no_proxy' in builddefaults and isinstance(builddefaults['no_proxy'], string_types):
builddefaults['no_proxy'] = builddefaults['no_proxy'].split(",")
if 'no_proxy' not in builddefaults and 'no_proxy' in common:
builddefaults['no_proxy'] = common['no_proxy']
@@ -1591,15 +1672,16 @@ def set_proxy_facts(facts):
# If we're actually defining a proxy config then create admission_plugin_config
# if it doesn't exist, then merge builddefaults[config] structure
# into admission_plugin_config
- if 'admission_plugin_config' not in facts['master']:
- facts['master']['admission_plugin_config'] = dict()
- if 'config' in builddefaults and ('http_proxy' in builddefaults or \
- 'https_proxy' in builddefaults):
+ if 'config' in builddefaults and ('http_proxy' in builddefaults or
+ 'https_proxy' in builddefaults):
+ if 'admission_plugin_config' not in facts['master']:
+ facts['master']['admission_plugin_config'] = dict()
facts['master']['admission_plugin_config'].update(builddefaults['config'])
facts['builddefaults'] = builddefaults
return facts
+
# pylint: disable=too-many-statements
def set_container_facts_if_unset(facts):
""" Set containerized facts.
@@ -1616,7 +1698,7 @@ def set_container_facts_if_unset(facts):
cli_image = master_image
node_image = 'openshift3/node'
ovs_image = 'openshift3/openvswitch'
- etcd_image = 'registry.access.redhat.com/rhel7/etcd3'
+ etcd_image = 'registry.access.redhat.com/rhel7/etcd'
pod_image = 'openshift3/ose-pod'
router_image = 'openshift3/ose-haproxy-router'
registry_image = 'openshift3/ose-docker-registry'
@@ -1626,7 +1708,7 @@ def set_container_facts_if_unset(facts):
cli_image = master_image
node_image = 'aep3_beta/node'
ovs_image = 'aep3_beta/openvswitch'
- etcd_image = 'registry.access.redhat.com/rhel7/etcd3'
+ etcd_image = 'registry.access.redhat.com/rhel7/etcd'
pod_image = 'aep3_beta/aep-pod'
router_image = 'aep3_beta/aep-haproxy-router'
registry_image = 'aep3_beta/aep-docker-registry'
@@ -1636,7 +1718,7 @@ def set_container_facts_if_unset(facts):
cli_image = master_image
node_image = 'openshift/node'
ovs_image = 'openshift/openvswitch'
- etcd_image = 'registry.access.redhat.com/rhel7/etcd3'
+ etcd_image = 'registry.access.redhat.com/rhel7/etcd'
pod_image = 'openshift/origin-pod'
router_image = 'openshift/origin-haproxy-router'
registry_image = 'openshift/origin-docker-registry'
@@ -1671,6 +1753,7 @@ def set_container_facts_if_unset(facts):
return facts
+
def set_installed_variant_rpm_facts(facts):
""" Set RPM facts of installed variant
Args:
@@ -1685,7 +1768,7 @@ def set_installed_variant_rpm_facts(facts):
['{0}-{1}'.format(base_rpm, r) for r in optional_rpms] + \
['tuned-profiles-%s-node' % base_rpm]
for rpm in variant_rpms:
- exit_code, _, _ = module.run_command(['rpm', '-q', rpm])
+ exit_code, _, _ = module.run_command(['rpm', '-q', rpm]) # noqa: F405
if exit_code == 0:
installed_rpms.append(rpm)
@@ -1693,7 +1776,6 @@ def set_installed_variant_rpm_facts(facts):
return facts
-
class OpenShiftFactsInternalError(Exception):
"""Origin Facts Error"""
pass
@@ -1761,12 +1843,12 @@ class OpenShiftFacts(object):
try:
# ansible-2.1
# pylint: disable=too-many-function-args,invalid-name
- self.system_facts = ansible_facts(module, ['hardware', 'network', 'virtual', 'facter'])
+ self.system_facts = ansible_facts(module, ['hardware', 'network', 'virtual', 'facter']) # noqa: F405
for (k, v) in self.system_facts.items():
self.system_facts["ansible_%s" % k.replace('-', '_')] = v
except UnboundLocalError:
# ansible-2.2
- self.system_facts = get_all_facts(module)['ansible_facts']
+ self.system_facts = get_all_facts(module)['ansible_facts'] # noqa: F405
self.facts = self.generate_facts(local_facts,
additive_facts_to_overwrite,
@@ -1799,7 +1881,6 @@ class OpenShiftFacts(object):
protected_facts_to_overwrite)
roles = local_facts.keys()
-
if 'common' in local_facts and 'deployment_type' in local_facts['common']:
deployment_type = local_facts['common']['deployment_type']
else:
@@ -1833,6 +1914,7 @@ class OpenShiftFacts(object):
facts = build_controller_args(facts)
facts = build_api_server_args(facts)
facts = set_version_facts_if_unset(facts)
+ facts = set_evacuate_or_drain_option(facts)
facts = set_dnsmasq_facts_if_unset(facts)
facts = set_manageiq_facts_if_unset(facts)
facts = set_aggregate_facts(facts)
@@ -1854,7 +1936,7 @@ class OpenShiftFacts(object):
"""
defaults = {}
ip_addr = self.system_facts['ansible_default_ipv4']['address']
- exit_code, output, _ = module.run_command(['hostname', '-f'])
+ exit_code, output, _ = module.run_command(['hostname', '-f']) # noqa: F405
hostname_f = output.strip() if exit_code == 0 else ''
hostname_values = [hostname_f, self.system_facts['ansible_nodename'],
self.system_facts['ansible_fqdn']]
@@ -1873,22 +1955,6 @@ class OpenShiftFacts(object):
debug_level=2)
if 'master' in roles:
- scheduler_predicates = [
- {"name": "MatchNodeSelector"},
- {"name": "PodFitsResources"},
- {"name": "PodFitsPorts"},
- {"name": "NoDiskConflict"},
- {"name": "NoVolumeZoneConflict"},
- {"name": "MaxEBSVolumeCount"},
- {"name": "MaxGCEPDVolumeCount"},
- {"name": "Region", "argument": {"serviceAffinity" : {"labels" : ["region"]}}}
- ]
- scheduler_priorities = [
- {"name": "LeastRequestedPriority", "weight": 1},
- {"name": "SelectorSpreadPriority", "weight": 1},
- {"name": "Zone", "weight" : 2, "argument": {"serviceAntiAffinity" : {"label": "zone"}}}
- ]
-
defaults['master'] = dict(api_use_ssl=True, api_port='8443',
controllers_port='8444',
console_use_ssl=True,
@@ -1905,8 +1971,6 @@ class OpenShiftFacts(object):
access_token_max_seconds=86400,
auth_token_max_seconds=500,
oauth_grant_method='auto',
- scheduler_predicates=scheduler_predicates,
- scheduler_priorities=scheduler_priorities,
dynamic_provisioning_enabled=True,
max_requests_inflight=500)
@@ -1919,6 +1983,11 @@ class OpenShiftFacts(object):
if 'docker' in roles:
docker = dict(disable_push_dockerhub=False,
options='--log-driver=json-file --log-opt max-size=50m')
+ # NOTE: This is a workaround for a dnf output racecondition that can occur in
+ # some situations. See https://bugzilla.redhat.com/show_bug.cgi?id=918184
+ if self.system_facts['ansible_pkg_mgr'] == 'dnf':
+ rpm_rebuilddb()
+
version_info = get_docker_version_info()
if version_info is not None:
docker['api_version'] = version_info['api_version']
@@ -1930,7 +1999,7 @@ class OpenShiftFacts(object):
defaults['docker'] = docker
if 'clock' in roles:
- exit_code, _, _ = module.run_command(['rpm', '-q', 'chrony'])
+ exit_code, _, _ = module.run_command(['rpm', '-q', 'chrony']) # noqa: F405
chrony_installed = bool(exit_code == 0)
defaults['clock'] = dict(
enabled=True,
@@ -1956,7 +2025,9 @@ class OpenShiftFacts(object):
options='*(rw,root_squash)'
),
host=None,
- access_modes=['ReadWriteOnce'],
+ access=dict(
+ modes=['ReadWriteOnce']
+ ),
create_pv=True,
create_pvc=False
)
@@ -1973,7 +2044,9 @@ class OpenShiftFacts(object):
options='*(rw,root_squash)'
),
host=None,
- access_modes=['ReadWriteOnce'],
+ access=dict(
+ modes=['ReadWriteOnce']
+ ),
create_pv=True,
create_pvc=False
)
@@ -1989,7 +2062,9 @@ class OpenShiftFacts(object):
directory='/exports',
options='*(rw,root_squash)'),
host=None,
- access_modes=['ReadWriteMany'],
+ access=dict(
+ modes=['ReadWriteMany']
+ ),
create_pv=True,
create_pvc=True
)
@@ -2015,7 +2090,7 @@ class OpenShiftFacts(object):
# TODO: this is not exposed through module_utils/facts.py in ansible,
# need to create PR for ansible to expose it
- bios_vendor = get_file_content(
+ bios_vendor = get_file_content( # noqa: F405
'/sys/devices/virtual/dmi/id/bios_vendor'
)
if bios_vendor == 'Google':
@@ -2030,8 +2105,7 @@ class OpenShiftFacts(object):
if metadata:
metadata['project']['attributes'].pop('sshKeys', None)
metadata['instance'].pop('serviceAccounts', None)
- elif (virt_type == 'xen' and virt_role == 'guest'
- and re.match(r'.*\.amazon$', product_version)):
+ elif virt_type == 'xen' and virt_role == 'guest' and re.match(r'.*\.amazon$', product_version):
provider = 'aws'
metadata_url = 'http://169.254.169.254/latest/meta-data/'
metadata = get_provider_metadata(metadata_url)
@@ -2092,7 +2166,7 @@ class OpenShiftFacts(object):
# Determine if any of the provided variable structures match the fact.
matching_structure = None
- if openshift_env_structures != None:
+ if openshift_env_structures is not None:
for structure in openshift_env_structures:
if re.match(structure, openshift_env_fact):
matching_structure = structure
@@ -2116,7 +2190,7 @@ class OpenShiftFacts(object):
# Disabling too-many-branches and too-many-locals.
# This should be cleaned up as a TODO item.
- #pylint: disable=too-many-branches, too-many-locals
+ # pylint: disable=too-many-branches, too-many-locals
def init_local_facts(self, facts=None,
additive_facts_to_overwrite=None,
openshift_env=None,
@@ -2144,7 +2218,7 @@ class OpenShiftFacts(object):
if facts is not None:
facts_to_set[self.role] = facts
- if openshift_env != {} and openshift_env != None:
+ if openshift_env != {} and openshift_env is not None:
for fact, value in iteritems(openshift_env):
oo_env_facts = dict()
current_level = oo_env_facts
@@ -2173,16 +2247,16 @@ class OpenShiftFacts(object):
if 'docker' in new_local_facts:
# remove duplicate and empty strings from registry lists
- for cat in ['additional', 'blocked', 'insecure']:
+ for cat in ['additional', 'blocked', 'insecure']:
key = '{0}_registries'.format(cat)
if key in new_local_facts['docker']:
val = new_local_facts['docker'][key]
- if isinstance(val, basestring):
+ if isinstance(val, string_types):
val = [x.strip() for x in val.split(',')]
new_local_facts['docker'][key] = list(set(val) - set(['']))
# Convert legacy log_options comma sep string to a list if present:
if 'log_options' in new_local_facts['docker'] and \
- isinstance(new_local_facts['docker']['log_options'], basestring):
+ isinstance(new_local_facts['docker']['log_options'], string_types):
new_local_facts['docker']['log_options'] = new_local_facts['docker']['log_options'].split(',')
new_local_facts = self.remove_empty_facts(new_local_facts)
@@ -2190,7 +2264,7 @@ class OpenShiftFacts(object):
if new_local_facts != local_facts:
self.validate_local_facts(new_local_facts)
changed = True
- if not module.check_mode:
+ if not module.check_mode: # noqa: F405
save_local_facts(self.filename, new_local_facts)
self.changed = changed
@@ -2223,10 +2297,10 @@ class OpenShiftFacts(object):
invalid_facts = self.validate_master_facts(facts, invalid_facts)
if invalid_facts:
msg = 'Invalid facts detected:\n'
+ # pylint: disable=consider-iterating-dictionary
for key in invalid_facts.keys():
msg += '{0}: {1}\n'.format(key, invalid_facts[key])
- module.fail_json(msg=msg,
- changed=self.changed)
+ module.fail_json(msg=msg, changed=self.changed) # noqa: F405
# disabling pylint errors for line-too-long since we're dealing
# with best effort reduction of error messages here.
@@ -2278,13 +2352,14 @@ class OpenShiftFacts(object):
'Secrets must be 16, 24, or 32 characters in length.')
return invalid_facts
+
def main():
""" main """
# disabling pylint errors for global-variable-undefined and invalid-name
# for 'global module' usage, since it is required to use ansible_facts
# pylint: disable=global-variable-undefined, invalid-name
global module
- module = AnsibleModule(
+ module = AnsibleModule( # noqa: F405
argument_spec=dict(
role=dict(default='common', required=False,
choices=OpenShiftFacts.known_roles),
@@ -2299,18 +2374,18 @@ def main():
)
if not HAVE_DBUS:
- module.fail_json(msg="This module requires dbus python bindings")
+ module.fail_json(msg="This module requires dbus python bindings") # noqa: F405
- module.params['gather_subset'] = ['hardware', 'network', 'virtual', 'facter']
- module.params['gather_timeout'] = 10
- module.params['filter'] = '*'
+ module.params['gather_subset'] = ['hardware', 'network', 'virtual', 'facter'] # noqa: F405
+ module.params['gather_timeout'] = 10 # noqa: F405
+ module.params['filter'] = '*' # noqa: F405
- role = module.params['role']
- local_facts = module.params['local_facts']
- additive_facts_to_overwrite = module.params['additive_facts_to_overwrite']
- openshift_env = module.params['openshift_env']
- openshift_env_structures = module.params['openshift_env_structures']
- protected_facts_to_overwrite = module.params['protected_facts_to_overwrite']
+ role = module.params['role'] # noqa: F405
+ local_facts = module.params['local_facts'] # noqa: F405
+ additive_facts_to_overwrite = module.params['additive_facts_to_overwrite'] # noqa: F405
+ openshift_env = module.params['openshift_env'] # noqa: F405
+ openshift_env_structures = module.params['openshift_env_structures'] # noqa: F405
+ protected_facts_to_overwrite = module.params['protected_facts_to_overwrite'] # noqa: F405
fact_file = '/etc/ansible/facts.d/openshift.fact'
@@ -2322,24 +2397,15 @@ def main():
openshift_env_structures,
protected_facts_to_overwrite)
- file_params = module.params.copy()
+ file_params = module.params.copy() # noqa: F405
file_params['path'] = fact_file
- file_args = module.load_file_common_arguments(file_params)
- changed = module.set_fs_attributes_if_different(file_args,
+ file_args = module.load_file_common_arguments(file_params) # noqa: F405
+ changed = module.set_fs_attributes_if_different(file_args, # noqa: F405
openshift_facts.changed)
- return module.exit_json(changed=changed,
+ return module.exit_json(changed=changed, # noqa: F405
ansible_facts=openshift_facts.facts)
-# ignore pylint errors related to the module_utils import
-# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
-# import module snippets
-from ansible.module_utils.basic import *
-from ansible.module_utils.facts import *
-from ansible.module_utils.urls import *
-from ansible.module_utils.six import iteritems, itervalues
-from ansible.module_utils._text import to_native
-from ansible.module_utils.six import b
if __name__ == '__main__':
main()
diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml
index 70cf49dd4..b7b521f1a 100644
--- a/roles/openshift_facts/tasks/main.yml
+++ b/roles/openshift_facts/tasks/main.yml
@@ -10,11 +10,9 @@
- set_fact:
l_is_containerized: "{{ (l_is_atomic | bool) or (containerized | default(false) | bool) }}"
-- name: Ensure PyYaml and yum-utils are installed
+- name: Ensure various deps are installed
package: name={{ item }} state=present
- with_items:
- - PyYAML
- - yum-utils
+ with_items: "{{ required_packages }}"
when: not l_is_atomic | bool
- name: Gather Cluster facts and set is_containerized if needed
diff --git a/roles/openshift_facts/vars/main.yml b/roles/openshift_facts/vars/main.yml
new file mode 100644
index 000000000..9c3110ff6
--- /dev/null
+++ b/roles/openshift_facts/vars/main.yml
@@ -0,0 +1,7 @@
+---
+required_packages:
+ - iproute
+ - python-dbus
+ - python-six
+ - PyYAML
+ - yum-utils
diff --git a/roles/openshift_hosted/meta/main.yml b/roles/openshift_hosted/meta/main.yml
index 74c50ae1d..ca5e88b15 100644
--- a/roles/openshift_hosted/meta/main.yml
+++ b/roles/openshift_hosted/meta/main.yml
@@ -11,4 +11,23 @@ galaxy_info:
- 7
categories:
- cloud
-dependencies: []
+dependencies:
+- role: openshift_cli
+- role: openshift_hosted_facts
+- role: openshift_projects
+ openshift_projects: "{{ openshift_additional_projects | default({}) | oo_merge_dicts({'default':{'default_node_selector':''},'openshift-infra':{'default_node_selector':''},'logging':{'default_node_selector':''}}) }}"
+- role: openshift_serviceaccounts
+ openshift_serviceaccounts_names:
+ - router
+ openshift_serviceaccounts_namespace: default
+ openshift_serviceaccounts_sccs:
+ - hostnetwork
+ when: openshift.common.version_gte_3_2_or_1_2
+- role: openshift_serviceaccounts
+ openshift_serviceaccounts_names:
+ - router
+ - registry
+ openshift_serviceaccounts_namespace: default
+ openshift_serviceaccounts_sccs:
+ - privileged
+ when: not openshift.common.version_gte_3_2_or_1_2
diff --git a/roles/openshift_hosted/tasks/registry/secure.yml b/roles/openshift_hosted/tasks/registry/secure.yml
index d2f6ba5f6..d87a3847c 100644
--- a/roles/openshift_hosted/tasks/registry/secure.yml
+++ b/roles/openshift_hosted/tasks/registry/secure.yml
@@ -29,14 +29,14 @@
changed_when: false
- set_fact:
- docker_registry_route_hostname: "{{ 'docker-registry-default.' ~ (openshift.master.default_subdomain | default('router.default.svc.cluster.local', true)) }}"
+ docker_registry_route_hostname: "{{ 'docker-registry-default.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
- name: Create registry certificates if they do not exist
command: >
{{ openshift.common.client_binary }} adm ca create-server-cert
- --signer-cert=/etc/origin/master/ca.crt
- --signer-key=/etc/origin/master/ca.key
- --signer-serial=/etc/origin/master/ca.serial.txt
+ --signer-cert={{ openshift_master_config_dir }}/ca.crt
+ --signer-key={{ openshift_master_config_dir }}/ca.key
+ --signer-serial={{ openshift_master_config_dir }}/ca.serial.txt
--hostnames="{{ docker_registry_service_ip.stdout }},docker-registry.default.svc.cluster.local,{{ docker_registry_route_hostname }}"
--cert={{ openshift_master_config_dir }}/registry.crt
--key={{ openshift_master_config_dir }}/registry.key
@@ -65,12 +65,12 @@
- name: Determine if registry-certificates secret volume attached
command: >
{{ openshift.common.client_binary }} get dc/docker-registry
- -o jsonpath='{.spec.template.spec.volumes[*].secret.secretName}'
+ -o jsonpath='{.spec.template.spec.volumes[?(@.secret)].secret.secretName}'
--config={{ openshift_hosted_kubeconfig }}
-n default
register: docker_registry_volumes
changed_when: false
- failed_when: "'secretName is not found' not in docker_registry_volumes.stdout and docker_registry_volumes.rc != 0"
+ failed_when: "docker_registry_volumes.stdout != '' and 'secretName is not found' not in docker_registry_volumes.stdout and docker_registry_volumes.rc != 0"
- name: Attach registry-certificates secret volume
command: >
diff --git a/roles/openshift_hosted/tasks/registry/storage/object_storage.yml b/roles/openshift_hosted/tasks/registry/storage/object_storage.yml
index 7b1b3f6ff..e56a68e27 100644
--- a/roles/openshift_hosted/tasks/registry/storage/object_storage.yml
+++ b/roles/openshift_hosted/tasks/registry/storage/object_storage.yml
@@ -1,3 +1,4 @@
+---
- fail:
msg: >
Object Storage Provider: {{ openshift.hosted.registry.storage.provider }}
diff --git a/roles/openshift_hosted_logging/tasks/cleanup_logging.yaml b/roles/openshift_hosted_logging/tasks/cleanup_logging.yaml
index 8754616d9..70b0d67a4 100644
--- a/roles/openshift_hosted_logging/tasks/cleanup_logging.yaml
+++ b/roles/openshift_hosted_logging/tasks/cleanup_logging.yaml
@@ -1,59 +1,59 @@
---
- - name: Create temp directory for kubeconfig
- command: mktemp -d /tmp/openshift-ansible-XXXXXX
- register: mktemp
- changed_when: False
+- name: Create temp directory for kubeconfig
+ command: mktemp -d /tmp/openshift-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
- - name: Copy the admin client config(s)
- command: >
- cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
- changed_when: False
+- name: Copy the admin client config(s)
+ command: >
+ cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
+ changed_when: False
- - name: "Checking for logging project"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get project logging"
- register: logging_project
- failed_when: "'FAILED' in logging_project.stderr"
+- name: "Checking for logging project"
+ command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get project logging"
+ register: logging_project
+ failed_when: "'FAILED' in logging_project.stderr"
- - name: "Changing projects"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig project logging"
+- name: "Changing projects"
+ command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig project logging"
- - name: "Cleanup any previous logging infrastructure"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete --ignore-not-found all --selector logging-infra={{ item }}"
- with_items:
- - kibana
- - fluentd
- - elasticsearch
- ignore_errors: yes
+- name: "Cleanup any previous logging infrastructure"
+ command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete --ignore-not-found all --selector logging-infra={{ item }}"
+ with_items:
+ - kibana
+ - fluentd
+ - elasticsearch
+ ignore_errors: yes
- - name: "Cleanup existing support infrastructure"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete --ignore-not-found all,sa,oauthclient --selector logging-infra=support"
- ignore_errors: yes
+- name: "Cleanup existing support infrastructure"
+ command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete --ignore-not-found all,sa,oauthclient --selector logging-infra=support"
+ ignore_errors: yes
- - name: "Cleanup existing secrets"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete secret logging-fluentd logging-elasticsearch logging-es-proxy logging-kibana logging-kibana-proxy logging-kibana-ops-proxy"
- ignore_errors: yes
- register: clean_result
- failed_when: clean_result.rc == 1 and 'not found' not in clean_result.stderr
+- name: "Cleanup existing secrets"
+ command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete secret logging-fluentd logging-elasticsearch logging-es-proxy logging-kibana logging-kibana-proxy logging-kibana-ops-proxy"
+ ignore_errors: yes
+ register: clean_result
+ failed_when: clean_result.rc == 1 and 'not found' not in clean_result.stderr
- - name: "Cleanup existing logging deployers"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete pods --all"
+- name: "Cleanup existing logging deployers"
+ command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete pods --all"
- - name: "Cleanup logging project"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete project logging"
+- name: "Cleanup logging project"
+ command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete project logging"
- - name: "Remove deployer template"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete template logging-deployer-template -n openshift"
- register: delete_output
- failed_when: delete_output.rc == 1 and 'exists' not in delete_output.stderr
+- name: "Remove deployer template"
+ command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete template logging-deployer-template -n openshift"
+ register: delete_output
+ failed_when: delete_output.rc == 1 and 'exists' not in delete_output.stderr
- - name: Delete temp directory
- file:
- name: "{{ mktemp.stdout }}"
- state: absent
- changed_when: False
+- name: Delete temp directory
+ file:
+ name: "{{ mktemp.stdout }}"
+ state: absent
+ changed_when: False
- - debug: msg="Success!"
+- debug: msg="Success!"
diff --git a/roles/openshift_hosted_logging/tasks/deploy_logging.yaml b/roles/openshift_hosted_logging/tasks/deploy_logging.yaml
index 625af9acd..513a74c69 100644
--- a/roles/openshift_hosted_logging/tasks/deploy_logging.yaml
+++ b/roles/openshift_hosted_logging/tasks/deploy_logging.yaml
@@ -1,175 +1,175 @@
---
- - debug: msg="WARNING target_registry is deprecated, use openshift_hosted_logging_image_prefix instead"
- when: target_registry is defined and target_registry
-
- - fail: msg="This role requires the following vars to be defined. openshift_hosted_logging_master_public_url, openshift_hosted_logging_hostname, openshift_hosted_logging_elasticsearch_cluster_size"
- when: "openshift_hosted_logging_hostname is not defined or
- openshift_hosted_logging_elasticsearch_cluster_size is not defined or
- openshift_hosted_logging_master_public_url is not defined"
-
- - name: Create temp directory for kubeconfig
- command: mktemp -d /tmp/openshift-ansible-XXXXXX
- register: mktemp
- changed_when: False
-
- - name: Copy the admin client config(s)
- command: >
- cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
- changed_when: False
-
- - name: "Check for logging project already exists"
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get project logging -o jsonpath='{.metadata.name}'
- register: logging_project_result
- ignore_errors: True
-
- - name: "Create logging project"
- command: >
- {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig new-project logging
- when: logging_project_result.stdout == ""
-
- - name: "Changing projects"
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig project logging
-
- - name: "Creating logging deployer secret"
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig secrets new logging-deployer {{ openshift_hosted_logging_secret_vars | default('nothing=/dev/null') }}
- register: secret_output
- failed_when: "secret_output.rc == 1 and 'exists' not in secret_output.stderr"
-
- - name: "Create templates for logging accounts and the deployer"
- command: >
- {{ openshift.common.client_binary }} create --config={{ mktemp.stdout }}/admin.kubeconfig
- -f {{ hosted_base }}/logging-deployer.yaml
- --config={{ mktemp.stdout }}/admin.kubeconfig
- -n logging
- register: logging_import_template
- failed_when: "'already exists' not in logging_import_template.stderr and logging_import_template.rc != 0"
- changed_when: "'created' in logging_import_template.stdout"
-
- - name: "Process the logging accounts template"
- shell: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
- process logging-deployer-account-template | {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f -
- register: process_deployer_accounts
- failed_when: process_deployer_accounts.rc == 1 and 'already exists' not in process_deployer_accounts.stderr
-
- - name: "Set permissions for logging-deployer service account"
- command: >
- {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig
- policy add-cluster-role-to-user oauth-editor system:serviceaccount:logging:logging-deployer
- register: permiss_output
- failed_when: "permiss_output.rc == 1 and 'exists' not in permiss_output.stderr"
-
- - name: "Set permissions for fluentd"
- command: >
- {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig
- policy add-scc-to-user privileged system:serviceaccount:logging:aggregated-logging-fluentd
- register: fluentd_output
- failed_when: "fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr"
-
- - name: "Set additional permissions for fluentd"
- command: >
- {{ openshift.common.client_binary }} adm policy --config={{ mktemp.stdout }}/admin.kubeconfig
- add-cluster-role-to-user cluster-reader system:serviceaccount:logging:aggregated-logging-fluentd
- register: fluentd2_output
- failed_when: "fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr"
-
- - name: "Add rolebinding-reader to aggregated-logging-elasticsearch"
- command: >
- {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig
- policy add-cluster-role-to-user rolebinding-reader \
- system:serviceaccount:logging:aggregated-logging-elasticsearch
- register: rolebinding_reader_output
- failed_when: "rolebinding_reader_output == 1 and 'exists' not in rolebinding_reader_output.stderr"
-
- - name: "Create ConfigMap for deployer parameters"
- command: >
- {{ openshift.common.client_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-deployer {{ deployer_cmap_params }}
- register: deployer_configmap_output
- failed_when: "deployer_configmap_output.rc == 1 and 'exists' not in deployer_configmap_output.stderr"
-
- - name: "Process the deployer template"
- shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-app logging-deployer-template {{ oc_new_app_values }}"
- register: process_deployer
- failed_when: process_deployer.rc == 1 and 'already exists' not in process_deployer.stderr
-
- - name: "Wait for image pull and deployer pod"
- shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods | grep logging-deployer.*Completed"
- register: result
- until: result.rc == 0
- retries: 20
- delay: 15
-
- - name: "Process imagestream template"
- shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-app logging-imagestream-template {{ oc_new_app_values }}"
- when: tr_or_ohlip is defined and insecure_registry is defined and insecure_registry
- register: process_is
- failed_when: process_is.rc == 1 and 'already exists' not in process_is.stderr
-
- - name: "Set insecured registry"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig annotate is --all openshift.io/image.insecureRepository=true --overwrite"
- when: tr_or_ohlip is defined and insecure_registry is defined and insecure_registry
-
- - name: "Wait for imagestreams to become available"
- shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get is | grep logging-fluentd"
- when: tr_or_ohlip is defined and insecure_registry is defined and insecure_registry
- register: result
- until: result.rc == 0
- failed_when: result.rc == 1 and 'not found' not in result.stderr
- retries: 20
- delay: 5
-
- - name: "Wait for component pods to be running"
- shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods -l component={{ item }} | grep Running"
- with_items:
- - es
- - kibana
- - curator
- register: result
- until: result.rc == 0
- failed_when: result.rc == 1 or 'Error' in result.stderr
- retries: 20
- delay: 15
-
- - name: "Wait for ops component pods to be running"
- shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods -l component={{ item }} | grep Running"
- with_items:
- - es-ops
- - kibana-ops
- - curator-ops
- when: openshift_hosted_logging_enable_ops_cluster is defined and openshift_hosted_logging_enable_ops_cluster
- register: result
- until: result.rc == 0
- failed_when: result.rc == 1 or 'Error' in result.stderr
- retries: 20
- delay: 15
-
- - name: "Wait for fluentd DaemonSet to exist"
- shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get daemonset logging-fluentd"
- register: result
- until: result.rc == 0
- failed_when: result.rc == 1 or 'Error' in result.stderr
- retries: 20
- delay: 5
-
- - name: "Deploy fluentd by labeling the node"
- shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig label node --overwrite=true {{ '-l' ~ openshift_hosted_logging_fluentd_nodeselector if openshift_hosted_logging_fluentd_nodeselector is defined else '--all' }} {{ openshift_hosted_logging_fluentd_nodeselector_label if openshift_hosted_logging_fluentd_nodeselector_label is defined else 'logging-infra-fluentd=true' }}"
-
- - name: "Wait for fluentd to be running"
- shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods -l component=fluentd | grep Running"
- register: result
- until: result.rc == 0
- failed_when: result.rc == 1 or 'Error' in result.stderr
- retries: 20
- delay: 15
-
- - debug:
- msg: "Logging components deployed. Note persistent volume for elasticsearch must be setup manually"
-
- - name: Delete temp directory
- file:
- name: "{{ mktemp.stdout }}"
- state: absent
- changed_when: False
+- debug: msg="WARNING target_registry is deprecated, use openshift_hosted_logging_image_prefix instead"
+ when: target_registry is defined and target_registry
+
+- fail: msg="This role requires the following vars to be defined. openshift_hosted_logging_master_public_url, openshift_hosted_logging_hostname, openshift_hosted_logging_elasticsearch_cluster_size"
+ when: "openshift_hosted_logging_hostname is not defined or
+ openshift_hosted_logging_elasticsearch_cluster_size is not defined or
+ openshift_hosted_logging_master_public_url is not defined"
+
+- name: Create temp directory for kubeconfig
+ command: mktemp -d /tmp/openshift-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
+
+- name: Copy the admin client config(s)
+ command: >
+ cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
+ changed_when: False
+
+- name: "Check for logging project already exists"
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get project logging -o jsonpath='{.metadata.name}'
+ register: logging_project_result
+ ignore_errors: True
+
+- name: "Create logging project"
+ command: >
+ {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig new-project logging
+ when: logging_project_result.stdout == ""
+
+- name: "Changing projects"
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig project logging
+
+- name: "Creating logging deployer secret"
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig secrets new logging-deployer {{ openshift_hosted_logging_secret_vars | default('nothing=/dev/null') }}
+ register: secret_output
+ failed_when: "secret_output.rc == 1 and 'exists' not in secret_output.stderr"
+
+- name: "Create templates for logging accounts and the deployer"
+ command: >
+ {{ openshift.common.client_binary }} create --config={{ mktemp.stdout }}/admin.kubeconfig
+ -f {{ hosted_base }}/logging-deployer.yaml
+ --config={{ mktemp.stdout }}/admin.kubeconfig
+ -n logging
+ register: logging_import_template
+ failed_when: "'already exists' not in logging_import_template.stderr and logging_import_template.rc != 0"
+ changed_when: "'created' in logging_import_template.stdout"
+
+- name: "Process the logging accounts template"
+ shell: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ process logging-deployer-account-template | {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f -
+ register: process_deployer_accounts
+ failed_when: process_deployer_accounts.rc == 1 and 'already exists' not in process_deployer_accounts.stderr
+
+- name: "Set permissions for logging-deployer service account"
+ command: >
+ {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig
+ policy add-cluster-role-to-user oauth-editor system:serviceaccount:logging:logging-deployer
+ register: permiss_output
+ failed_when: "permiss_output.rc == 1 and 'exists' not in permiss_output.stderr"
+
+- name: "Set permissions for fluentd"
+ command: >
+ {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig
+ policy add-scc-to-user privileged system:serviceaccount:logging:aggregated-logging-fluentd
+ register: fluentd_output
+ failed_when: "fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr"
+
+- name: "Set additional permissions for fluentd"
+ command: >
+ {{ openshift.common.client_binary }} adm policy --config={{ mktemp.stdout }}/admin.kubeconfig
+ add-cluster-role-to-user cluster-reader system:serviceaccount:logging:aggregated-logging-fluentd
+ register: fluentd2_output
+ failed_when: "fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr"
+
+- name: "Add rolebinding-reader to aggregated-logging-elasticsearch"
+ command: >
+ {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig
+ policy add-cluster-role-to-user rolebinding-reader \
+ system:serviceaccount:logging:aggregated-logging-elasticsearch
+ register: rolebinding_reader_output
+ failed_when: "rolebinding_reader_output == 1 and 'exists' not in rolebinding_reader_output.stderr"
+
+- name: "Create ConfigMap for deployer parameters"
+ command: >
+ {{ openshift.common.client_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-deployer {{ deployer_cmap_params }}
+ register: deployer_configmap_output
+ failed_when: "deployer_configmap_output.rc == 1 and 'exists' not in deployer_configmap_output.stderr"
+
+- name: "Process the deployer template"
+ shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-app logging-deployer-template {{ oc_new_app_values }}"
+ register: process_deployer
+ failed_when: process_deployer.rc == 1 and 'already exists' not in process_deployer.stderr
+
+- name: "Wait for image pull and deployer pod"
+ shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods | grep logging-deployer.*Completed"
+ register: result
+ until: result.rc == 0
+ retries: 20
+ delay: 15
+
+- name: "Process imagestream template"
+ shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-app logging-imagestream-template {{ oc_new_app_values }}"
+ when: tr_or_ohlip is defined and insecure_registry is defined and insecure_registry
+ register: process_is
+ failed_when: process_is.rc == 1 and 'already exists' not in process_is.stderr
+
+- name: "Set insecured registry"
+ command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig annotate is --all openshift.io/image.insecureRepository=true --overwrite"
+ when: tr_or_ohlip is defined and insecure_registry is defined and insecure_registry
+
+- name: "Wait for imagestreams to become available"
+ shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get is | grep logging-fluentd"
+ when: tr_or_ohlip is defined and insecure_registry is defined and insecure_registry
+ register: result
+ until: result.rc == 0
+ failed_when: result.rc == 1 and 'not found' not in result.stderr
+ retries: 20
+ delay: 5
+
+- name: "Wait for component pods to be running"
+ shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods -l component={{ item }} | grep Running"
+ with_items:
+ - es
+ - kibana
+ - curator
+ register: result
+ until: result.rc == 0
+ failed_when: result.rc == 1 or 'Error' in result.stderr
+ retries: 20
+ delay: 15
+
+- name: "Wait for ops component pods to be running"
+ shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods -l component={{ item }} | grep Running"
+ with_items:
+ - es-ops
+ - kibana-ops
+ - curator-ops
+ when: openshift_hosted_logging_enable_ops_cluster is defined and openshift_hosted_logging_enable_ops_cluster
+ register: result
+ until: result.rc == 0
+ failed_when: result.rc == 1 or 'Error' in result.stderr
+ retries: 20
+ delay: 15
+
+- name: "Wait for fluentd DaemonSet to exist"
+ shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get daemonset logging-fluentd"
+ register: result
+ until: result.rc == 0
+ failed_when: result.rc == 1 or 'Error' in result.stderr
+ retries: 20
+ delay: 5
+
+- name: "Deploy fluentd by labeling the node"
+ shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig label node --overwrite=true {{ '-l' ~ openshift_hosted_logging_fluentd_nodeselector if openshift_hosted_logging_fluentd_nodeselector is defined else '--all' }} {{ openshift_hosted_logging_fluentd_nodeselector_label if openshift_hosted_logging_fluentd_nodeselector_label is defined else 'logging-infra-fluentd=true' }}"
+
+- name: "Wait for fluentd to be running"
+ shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods -l component=fluentd | grep Running"
+ register: result
+ until: result.rc == 0
+ failed_when: result.rc == 1 or 'Error' in result.stderr
+ retries: 20
+ delay: 15
+
+- debug:
+ msg: "Logging components deployed. Note persistent volume for elasticsearch must be setup manually"
+
+- name: Delete temp directory
+ file:
+ name: "{{ mktemp.stdout }}"
+ state: absent
+ changed_when: False
diff --git a/roles/openshift_hosted_logging/vars/main.yaml b/roles/openshift_hosted_logging/vars/main.yaml
index 11412733b..33320e9c8 100644
--- a/roles/openshift_hosted_logging/vars/main.yaml
+++ b/roles/openshift_hosted_logging/vars/main.yaml
@@ -1,3 +1,4 @@
+---
tr_or_ohlip: "{{ openshift_hosted_logging_deployer_prefix | default(target_registry) | default(None) }}"
ip_kv: "{{ '-p IMAGE_PREFIX=' ~ tr_or_ohlip | quote if tr_or_ohlip != '' else '' }}"
iv_kv: "{{ '-p IMAGE_VERSION=' ~ openshift_hosted_logging_deployer_version | quote if openshift_hosted_logging_deployer_version | default(none) is not none else '' }}"
diff --git a/roles/openshift_metrics/README.md b/roles/openshift_hosted_metrics/README.md
index f3c0f3474..c2af3c494 100644
--- a/roles/openshift_metrics/README.md
+++ b/roles/openshift_hosted_metrics/README.md
@@ -27,17 +27,11 @@ From this role:
| openshift_hosted_metrics_resolution | `10s` | Metrics resolution |
-From openshift_common:
-
-| Name | Default Value | |
-|---------------------------------------|----------------|----------------------------------------|
-| openshift_master_default_subdomain | null | Subdomain FQDN (Mandatory) |
-
-
Dependencies
------------
openshift_facts
openshift_examples
+openshift_master_facts
Example Playbook
----------------
@@ -46,7 +40,7 @@ Example Playbook
- name: Configure openshift-metrics
hosts: oo_first_master
roles:
- - role: openshift_metrics
+ - role: openshift_hosted_metrics
```
License
diff --git a/roles/openshift_metrics/defaults/main.yml b/roles/openshift_hosted_metrics/defaults/main.yml
index a01f24df8..a01f24df8 100644
--- a/roles/openshift_metrics/defaults/main.yml
+++ b/roles/openshift_hosted_metrics/defaults/main.yml
diff --git a/roles/openshift_metrics/handlers/main.yml b/roles/openshift_hosted_metrics/handlers/main.yml
index 69c5a1663..69c5a1663 100644
--- a/roles/openshift_metrics/handlers/main.yml
+++ b/roles/openshift_hosted_metrics/handlers/main.yml
diff --git a/roles/openshift_metrics/meta/main.yaml b/roles/openshift_hosted_metrics/meta/main.yaml
index a89467de5..debca3ca6 100644
--- a/roles/openshift_metrics/meta/main.yaml
+++ b/roles/openshift_hosted_metrics/meta/main.yaml
@@ -15,3 +15,4 @@ galaxy_info:
dependencies:
- { role: openshift_examples }
- { role: openshift_facts }
+- { role: openshift_master_facts }
diff --git a/roles/openshift_metrics/tasks/install.yml b/roles/openshift_hosted_metrics/tasks/install.yml
index 98e21375a..2c839996e 100644
--- a/roles/openshift_metrics/tasks/install.yml
+++ b/roles/openshift_hosted_metrics/tasks/install.yml
@@ -3,7 +3,7 @@
- name: Test if metrics-deployer service account exists
command: >
{{ openshift.common.client_binary }}
- --config={{ openshift_metrics_kubeconfig }}
+ --config={{ openshift_hosted_metrics_kubeconfig }}
--namespace=openshift-infra
get serviceaccount metrics-deployer -o json
register: serviceaccount
@@ -14,7 +14,7 @@
shell: >
echo {{ metrics_deployer_sa | to_json | quote }} |
{{ openshift.common.client_binary }}
- --config={{ openshift_metrics_kubeconfig }}
+ --config={{ openshift_hosted_metrics_kubeconfig }}
--namespace openshift-infra
create -f -
when: serviceaccount.rc == 1
@@ -22,7 +22,7 @@
- name: Test edit permissions
command: >
{{ openshift.common.client_binary }}
- --config={{ openshift_metrics_kubeconfig }}
+ --config={{ openshift_hosted_metrics_kubeconfig }}
--namespace openshift-infra
get rolebindings -o jsonpath='{.items[?(@.metadata.name == "edit")].userNames}'
register: edit_rolebindings
@@ -31,7 +31,7 @@
- name: Add edit permission to the openshift-infra project to metrics-deployer SA
command: >
{{ openshift.common.client_binary }} adm
- --config={{ openshift_metrics_kubeconfig }}
+ --config={{ openshift_hosted_metrics_kubeconfig }}
--namespace openshift-infra
policy add-role-to-user edit
system:serviceaccount:openshift-infra:metrics-deployer
@@ -40,7 +40,7 @@
- name: Test hawkular view permissions
command: >
{{ openshift.common.client_binary }}
- --config={{ openshift_metrics_kubeconfig }}
+ --config={{ openshift_hosted_metrics_kubeconfig }}
--namespace openshift-infra
get rolebindings -o jsonpath='{.items[?(@.metadata.name == "view")].userNames}'
register: view_rolebindings
@@ -49,7 +49,7 @@
- name: Add view permissions to hawkular SA
command: >
{{ openshift.common.client_binary }} adm
- --config={{ openshift_metrics_kubeconfig }}
+ --config={{ openshift_hosted_metrics_kubeconfig }}
--namespace openshift-infra
policy add-role-to-user view
system:serviceaccount:openshift-infra:hawkular
@@ -58,7 +58,7 @@
- name: Test cluster-reader permissions
command: >
{{ openshift.common.client_binary }}
- --config={{ openshift_metrics_kubeconfig }}
+ --config={{ openshift_hosted_metrics_kubeconfig }}
--namespace openshift-infra
get clusterrolebindings -o jsonpath='{.items[?(@.metadata.name == "cluster-reader")].userNames}'
register: cluster_reader_clusterrolebindings
@@ -67,7 +67,7 @@
- name: Add cluster-reader permission to the openshift-infra project to heapster SA
command: >
{{ openshift.common.client_binary }} adm
- --config={{ openshift_metrics_kubeconfig }}
+ --config={{ openshift_hosted_metrics_kubeconfig }}
--namespace openshift-infra
policy add-cluster-role-to-user cluster-reader
system:serviceaccount:openshift-infra:heapster
@@ -76,7 +76,7 @@
- name: Create metrics-deployer secret
command: >
{{ openshift.common.client_binary }}
- --config={{ openshift_metrics_kubeconfig }}
+ --config={{ openshift_hosted_metrics_kubeconfig }}
--namespace openshift-infra
secrets new metrics-deployer nothing=/dev/null
register: metrics_deployer_secret
@@ -89,7 +89,7 @@
set_fact:
deployer_cmd: "{{ openshift.common.client_binary }} process -f \
{{ hosted_base }}/metrics-deployer.yaml -v \
- HAWKULAR_METRICS_HOSTNAME={{ metrics_hostname }} \
+ HAWKULAR_METRICS_HOSTNAME={{ g_metrics_hostname }} \
-v USE_PERSISTENT_STORAGE={{metrics_persistence | string | lower }} \
-v DYNAMICALLY_PROVISION_STORAGE={{metrics_dynamic_vol | string | lower }} \
-v METRIC_DURATION={{ openshift.hosted.metrics.duration }} \
@@ -98,7 +98,7 @@
{{ image_version }} \
-v MODE={{ deployment_mode }} \
| {{ openshift.common.client_binary }} --namespace openshift-infra \
- --config={{ openshift_metrics_kubeconfig }} \
+ --config={{ openshift_hosted_metrics_kubeconfig }} \
create -o name -f -"
- name: Deploy Metrics
@@ -116,7 +116,7 @@
shell: >
{{ openshift.common.client_binary }}
--namespace openshift-infra
- --config={{ openshift_metrics_kubeconfig }}
+ --config={{ openshift_hosted_metrics_kubeconfig }}
get {{ deploy_metrics.stdout }}
register: deploy_result
until: "{{ 'Completed' in deploy_result.stdout }}"
@@ -128,12 +128,5 @@
modify_yaml:
dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
yaml_key: assetConfig.metricsPublicURL
- yaml_value: "https://{{ metrics_hostname }}/hawkular/metrics"
+ yaml_value: "{{ openshift_hosted_metrics_public_url }}"
notify: restart master
-
-- name: Store metrics public_url
- openshift_facts:
- role: master
- local_facts:
- metrics_public_url: "https://{{ metrics_hostname }}/hawkular/metrics"
- when: deploy_result | changed
diff --git a/roles/openshift_metrics/tasks/main.yaml b/roles/openshift_hosted_metrics/tasks/main.yaml
index 26af279b1..5ce8aa92b 100644
--- a/roles/openshift_metrics/tasks/main.yaml
+++ b/roles/openshift_hosted_metrics/tasks/main.yaml
@@ -1,8 +1,4 @@
---
-- fail:
- msg: This role required openshift_master_default_subdomain or openshift_hosted_metrics_public_url be set
- when: openshift.master.metrics_public_url | default(openshift_hosted_metrics_public_url | default(openshift.master.default_subdomain | default(openshift_master_default_subdomain | default(none)))) is none
-
- name: Create temp directory for kubeconfig
command: mktemp -d /tmp/openshift-ansible-XXXXXX
register: mktemp
@@ -10,11 +6,11 @@
- name: Record kubeconfig tmp dir
set_fact:
- openshift_metrics_kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
+ openshift_hosted_metrics_kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
- name: Copy the admin client config(s)
command: >
- cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ openshift_metrics_kubeconfig }}
+ cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ openshift_hosted_metrics_kubeconfig }}
changed_when: False
- name: Set hosted metrics facts
@@ -27,12 +23,6 @@
- 'openshift.hosted.metrics.*'
- set_fact:
- # Prefer the master facts over bare variables if present, prefer
- # metrics_public_url over creating a default using default_subdomain
- metrics_hostname: "{{ openshift.hosted.metrics.public_url
- | default('hawkular-metrics.' ~ (openshift.master.default_subdomain
- | default(openshift_master_default_subdomain )))
- | oo_hostname_from_url }}"
metrics_persistence: "{{ openshift.hosted.metrics.storage_kind | default(none) is not none }}"
metrics_dynamic_vol: "{{ openshift.hosted.metrics.storage_kind | default(none) == 'dynamic' }}"
metrics_template_dir: "{{ openshift.common.config_base if openshift.common.is_containerized | bool else '/usr/share/openshift' }}/examples/infrastructure-templates/{{ 'origin' if deployment_type == 'origin' else 'enterprise' }}"
@@ -43,21 +33,21 @@
- name: Check for existing metrics pods
shell: >
{{ openshift.common.client_binary }}
- --config={{ openshift_metrics_kubeconfig }}
+ --config={{ openshift_hosted_metrics_kubeconfig }}
--namespace openshift-infra
get pods -l {{ item }} | grep -q Running
register: metrics_pods_status
with_items:
- - metrics-infra=hawkular-metrics
- - metrics-infra=heapster
- - metrics-infra=hawkular-cassandra
+ - metrics-infra=hawkular-metrics
+ - metrics-infra=heapster
+ - metrics-infra=hawkular-cassandra
failed_when: false
changed_when: false
- name: Check for previous deployer
shell: >
{{ openshift.common.client_binary }}
- --config={{ openshift_metrics_kubeconfig }}
+ --config={{ openshift_hosted_metrics_kubeconfig }}
--namespace openshift-infra
get pods -l metrics-infra=deployer --sort-by='{.metadata.creationTimestamp}' | tail -1 | grep metrics-deployer-
register: metrics_deployer_status
diff --git a/roles/openshift_metrics/vars/main.yaml b/roles/openshift_hosted_metrics/vars/main.yaml
index 0331bcb89..6c207d6ac 100644
--- a/roles/openshift_metrics/vars/main.yaml
+++ b/roles/openshift_hosted_metrics/vars/main.yaml
@@ -1,6 +1,7 @@
+---
hawkular_permission_oc_commands:
- - policy add-role-to-user edit system:serviceaccount:openshift-infra:metrics-deployer -n openshift-infra
- - policy add-cluster-role-to-user cluster-admin system:serviceaccount:openshift-infra:heapster
+ - policy add-role-to-user edit system:serviceaccount:openshift-infra:metrics-deployer -n openshift-infra
+ - policy add-cluster-role-to-user cluster-admin system:serviceaccount:openshift-infra:heapster
metrics_deployer_sa:
apiVersion: v1
@@ -8,7 +9,7 @@ metrics_deployer_sa:
metadata:
name: metrics-deployer
secrets:
- - name: metrics-deployer
+ - name: metrics-deployer
hawkular_tmp_conf: /tmp/hawkular_admin.kubeconfig
diff --git a/roles/openshift_hosted_templates/files/v1.3/enterprise/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.3/enterprise/logging-deployer.yaml
index 13cef2d66..c47d5361d 100644
--- a/roles/openshift_hosted_templates/files/v1.3/enterprise/logging-deployer.yaml
+++ b/roles/openshift_hosted_templates/files/v1.3/enterprise/logging-deployer.yaml
@@ -72,7 +72,6 @@ items:
metadata:
name: logging-deployer-edit-role
roleRef:
- kind: ClusterRole
name: edit
subjects:
- kind: ServiceAccount
@@ -83,7 +82,6 @@ items:
metadata:
name: logging-deployer-dsadmin-role
roleRef:
- kind: ClusterRole
name: daemonset-admin
subjects:
- kind: ServiceAccount
diff --git a/roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml
index ddfda1272..c67058696 100644
--- a/roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml
+++ b/roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml
@@ -81,7 +81,6 @@ items:
metadata:
name: logging-deployer-edit-role
roleRef:
- kind: ClusterRole
name: edit
subjects:
- kind: ServiceAccount
@@ -92,7 +91,6 @@ items:
metadata:
name: logging-deployer-dsadmin-role
roleRef:
- kind: ClusterRole
name: daemonset-admin
subjects:
- kind: ServiceAccount
@@ -103,7 +101,6 @@ items:
metadata:
name: logging-elasticsearch-view-role
roleRef:
- kind: ClusterRole
name: view
subjects:
- kind: ServiceAccount
diff --git a/roles/openshift_manageiq/vars/main.yml b/roles/openshift_manageiq/vars/main.yml
index 37d4679ef..3f24fd6be 100644
--- a/roles/openshift_manageiq/vars/main.yml
+++ b/roles/openshift_manageiq/vars/main.yml
@@ -1,13 +1,14 @@
+---
manageiq_cluster_role:
- apiVersion: v1
- kind: ClusterRole
- metadata:
- name: management-infra-admin
- rules:
- - resources:
- - pods/proxy
- verbs:
- - '*'
+ apiVersion: v1
+ kind: ClusterRole
+ metadata:
+ name: management-infra-admin
+ rules:
+ - resources:
+ - pods/proxy
+ verbs:
+ - '*'
manageiq_metrics_admin_clusterrole:
apiVersion: v1
@@ -24,28 +25,28 @@ manageiq_metrics_admin_clusterrole:
- '*'
manageiq_service_account:
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: management-admin
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: management-admin
manageiq_image_inspector_service_account:
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: inspector-admin
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: inspector-admin
manage_iq_tmp_conf: /tmp/manageiq_admin.kubeconfig
manage_iq_tasks:
- - policy add-role-to-user -n management-infra admin -z management-admin
- - policy add-role-to-user -n management-infra management-infra-admin -z management-admin
- - policy add-cluster-role-to-user cluster-reader system:serviceaccount:management-infra:management-admin
- - policy add-scc-to-user privileged system:serviceaccount:management-infra:management-admin
- - policy add-cluster-role-to-user system:image-puller system:serviceaccount:management-infra:inspector-admin
- - policy add-scc-to-user privileged system:serviceaccount:management-infra:inspector-admin
- - policy add-cluster-role-to-user self-provisioner system:serviceaccount:management-infra:management-admin
- - policy add-cluster-role-to-user hawkular-metrics-admin system:serviceaccount:management-infra:management-admin
+- policy add-role-to-user -n management-infra admin -z management-admin
+- policy add-role-to-user -n management-infra management-infra-admin -z management-admin
+- policy add-cluster-role-to-user cluster-reader system:serviceaccount:management-infra:management-admin
+- policy add-scc-to-user privileged system:serviceaccount:management-infra:management-admin
+- policy add-cluster-role-to-user system:image-puller system:serviceaccount:management-infra:inspector-admin
+- policy add-scc-to-user privileged system:serviceaccount:management-infra:inspector-admin
+- policy add-cluster-role-to-user self-provisioner system:serviceaccount:management-infra:management-admin
+- policy add-cluster-role-to-user hawkular-metrics-admin system:serviceaccount:management-infra:management-admin
manage_iq_openshift_3_2_tasks:
- - policy add-cluster-role-to-user system:image-auditor system:serviceaccount:management-infra:management-admin
+- policy add-cluster-role-to-user system:image-auditor system:serviceaccount:management-infra:management-admin
diff --git a/roles/openshift_master/meta/main.yml b/roles/openshift_master/meta/main.yml
index 7457e4378..3a595b2d1 100644
--- a/roles/openshift_master/meta/main.yml
+++ b/roles/openshift_master/meta/main.yml
@@ -11,4 +11,33 @@ galaxy_info:
- 7
categories:
- cloud
-dependencies: []
+dependencies:
+- role: openshift_master_facts
+- role: openshift_hosted_facts
+- role: openshift_master_certificates
+- role: openshift_etcd_client_certificates
+ etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}"
+ etcd_cert_config_dir: "{{ openshift.common.config_base }}/master"
+ etcd_cert_prefix: "master.etcd-"
+ when: groups.oo_etcd_to_config | default([]) | length != 0
+- role: openshift_clock
+- role: openshift_cloud_provider
+- role: openshift_builddefaults
+- role: os_firewall
+ os_firewall_allow:
+ - service: api server https
+ port: "{{ openshift.master.api_port }}/tcp"
+ - service: api controllers https
+ port: "{{ openshift.master.controllers_port }}/tcp"
+ - service: skydns tcp
+ port: "{{ openshift.master.dns_port }}/tcp"
+ - service: skydns udp
+ port: "{{ openshift.master.dns_port }}/udp"
+- role: os_firewall
+ os_firewall_allow:
+ - service: etcd embedded
+ port: 4001/tcp
+ when: groups.oo_etcd_to_config | default([]) | length == 0
+- role: nickhammond.logrotate
+- role: nuage_master
+ when: openshift.common.use_nuage | bool
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index 2de5cd3f3..9cd6b6c81 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -29,13 +29,6 @@
state: present
when: not openshift.common.is_containerized | bool
-- name: Pull master image
- command: >
- docker pull {{ openshift.master.master_image }}:{{ openshift_image_tag }}
- register: pull_result
- changed_when: "'Downloaded newer image' in pull_result.stdout"
- when: openshift.common.is_containerized | bool
-
- name: Create openshift.common.data_dir
file:
path: "{{ openshift.common.data_dir }}"
diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml
index 11e010716..39ea42ab3 100644
--- a/roles/openshift_master/tasks/systemd_units.yml
+++ b/roles/openshift_master/tasks/systemd_units.yml
@@ -1,3 +1,4 @@
+---
# This file is included both in the openshift_master role and in the upgrade
# playbooks. For that reason the ha_svc variables are use set_fact instead of
# the vars directory on the role.
@@ -13,6 +14,14 @@
ha_svc_template_path: "docker-cluster"
when: openshift.common.is_containerized | bool
+# This is the image used for both HA and non-HA clusters:
+- name: Pre-pull master image
+ command: >
+ docker pull {{ openshift.master.master_image }}:{{ openshift_image_tag }}
+ register: pull_result
+ changed_when: "'Downloaded newer image' in pull_result.stdout"
+ when: openshift.common.is_containerized | bool
+
# workaround for missing systemd unit files
- name: Create the systemd unit files
template:
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
index dc9226a5a..fcb8125e9 100644
--- a/roles/openshift_master/templates/master.yaml.v1.j2
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -15,8 +15,8 @@ assetConfig:
{% if 'logging_public_url' in openshift.master %}
loggingPublicURL: {{ openshift.master.logging_public_url }}
{% endif %}
-{% if 'metrics_public_url' in openshift.master %}
- metricsPublicURL: {{ openshift.master.metrics_public_url }}
+{% if openshift_hosted_metrics_deploy | default(false) | bool %}
+ metricsPublicURL: {{ openshift_hosted_metrics_public_url }}
{% endif %}
{% if 'extension_scripts' in openshift.master %}
extensionScripts: {{ openshift.master.extension_scripts | to_padded_yaml(1, 2) }}
@@ -123,7 +123,7 @@ kubernetesMasterConfig:
keyFile: master.proxy-client.key
schedulerArguments: {{ openshift_master_scheduler_args | default(None) | to_padded_yaml( level=3 ) }}
schedulerConfigFile: {{ openshift_master_scheduler_conf }}
- servicesNodePortRange: ""
+ servicesNodePortRange: "{{ openshift_node_port_range | default("") }}"
servicesSubnet: {{ openshift.common.portal_net }}
staticNodeNames: {{ openshift_node_ips | default([], true) }}
{% endif %}
@@ -202,7 +202,7 @@ projectConfig:
mcsLabelsPerProject: {{ openshift.master.mcs_labels_per_project }}
uidAllocatorRange: "{{ openshift.master.uid_allocator_range }}"
routingConfig:
- subdomain: "{{ openshift.master.default_subdomain | default("") }}"
+ subdomain: "{{ openshift_master_default_subdomain | default("") }}"
serviceAccountConfig:
limitSecretReferences: false
managedNames:
diff --git a/roles/openshift_master/vars/main.yml b/roles/openshift_master/vars/main.yml
index 7c1d5a212..01cd28c66 100644
--- a/roles/openshift_master/vars/main.yml
+++ b/roles/openshift_master/vars/main.yml
@@ -1,17 +1,18 @@
---
-openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
-openshift_master_config_file: "{{ openshift_master_config_dir }}/master-config.yaml"
openshift_master_loopback_config: "{{ openshift_master_config_dir }}/openshift-master.kubeconfig"
loopback_context_string: "current-context: {{ openshift.master.loopback_context_name }}"
-openshift_master_scheduler_conf: "{{ openshift_master_config_dir }}/scheduler.json"
openshift_master_session_secrets_file: "{{ openshift_master_config_dir }}/session-secrets.yaml"
openshift_master_policy: "{{ openshift_master_config_dir }}/policy.json"
scheduler_config:
kind: Policy
apiVersion: v1
- predicates: "{{ openshift.master.scheduler_predicates }}"
- priorities: "{{ openshift.master.scheduler_priorities }}"
+ predicates: "{{ openshift_master_scheduler_predicates
+ | default(openshift_master_scheduler_current_predicates
+ | default(openshift_master_scheduler_default_predicates)) }}"
+ priorities: "{{ openshift_master_scheduler_priorities
+ | default(openshift_master_scheduler_current_priorities
+ | default(openshift_master_scheduler_default_priorities)) }}"
openshift_master_valid_grant_methods:
- auto
diff --git a/roles/openshift_master_facts/defaults/main.yml b/roles/openshift_master_facts/defaults/main.yml
new file mode 100644
index 000000000..f1cbbeb2d
--- /dev/null
+++ b/roles/openshift_master_facts/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+openshift_master_default_subdomain: "{{ lookup('oo_option', 'openshift_master_default_subdomain') | default(None, true) }}"
diff --git a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py
new file mode 100644
index 000000000..29a59a0d3
--- /dev/null
+++ b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py
@@ -0,0 +1,93 @@
+# pylint: disable=missing-docstring
+
+import re
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+
+
+class LookupModule(LookupBase):
+ # pylint: disable=too-many-branches,too-many-statements,too-many-arguments
+
+ def run(self, terms, variables=None, regions_enabled=True, short_version=None,
+ deployment_type=None, **kwargs):
+
+ predicates = []
+
+ if short_version is None or deployment_type is None:
+ if 'openshift' not in variables:
+ raise AnsibleError("This lookup module requires openshift_facts to be run prior to use")
+
+ if deployment_type is None:
+ if 'common' not in variables['openshift'] or 'deployment_type' not in variables['openshift']['common']:
+ raise AnsibleError("This lookup module requires that the deployment_type be set")
+
+ deployment_type = variables['openshift']['common']['deployment_type']
+
+ if short_version is None:
+ if 'short_version' in variables['openshift']['common']:
+ short_version = variables['openshift']['common']['short_version']
+ elif 'openshift_release' in variables:
+ release = variables['openshift_release']
+ if release.startswith('v'):
+ short_version = release[1:]
+ else:
+ short_version = release
+ short_version = '.'.join(short_version.split('.')[0:2])
+ elif 'openshift_version' in variables:
+ version = variables['openshift_version']
+ short_version = '.'.join(version.split('.')[0:2])
+ else:
+ # pylint: disable=line-too-long
+ raise AnsibleError("Either OpenShift needs to be installed or openshift_release needs to be specified")
+ if deployment_type == 'origin':
+ if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '1.6', 'latest']:
+ raise AnsibleError("Unknown short_version %s" % short_version)
+ elif deployment_type == 'openshift-enterprise':
+ if short_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6', 'latest']:
+ raise AnsibleError("Unknown short_version %s" % short_version)
+ else:
+ raise AnsibleError("Unknown deployment_type %s" % deployment_type)
+
+ if deployment_type == 'openshift-enterprise':
+ # convert short_version to origin short_version
+ short_version = re.sub('^3.', '1.', short_version)
+
+ if short_version in ['1.1', '1.2']:
+ predicates.append({'name': 'PodFitsHostPorts'})
+ predicates.append({'name': 'PodFitsResources'})
+
+ # applies to all known versions
+ predicates.append({'name': 'NoDiskConflict'})
+
+ # only 1.1 didn't include NoVolumeZoneConflict
+ if short_version != '1.1':
+ predicates.append({'name': 'NoVolumeZoneConflict'})
+
+ if short_version in ['1.1', '1.2']:
+ predicates.append({'name': 'MatchNodeSelector'})
+
+ if short_version != '1.1':
+ predicates.append({'name': 'MaxEBSVolumeCount'})
+ predicates.append({'name': 'MaxGCEPDVolumeCount'})
+
+ if short_version not in ['1.1', '1.2']:
+ predicates.append({'name': 'GeneralPredicates'})
+ predicates.append({'name': 'PodToleratesNodeTaints'})
+ predicates.append({'name': 'CheckNodeMemoryPressure'})
+
+ if short_version not in ['1.1', '1.2', '1.3']:
+ predicates.append({'name': 'CheckNodeDiskPressure'})
+ predicates.append({'name': 'MatchInterPodAffinity'})
+
+ if regions_enabled:
+ region_predicate = {
+ 'name': 'Region',
+ 'argument': {
+ 'serviceAffinity': {
+ 'labels': ['region']
+ }
+ }
+ }
+ predicates.append(region_predicate)
+
+ return predicates
diff --git a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py
new file mode 100644
index 000000000..36022597f
--- /dev/null
+++ b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py
@@ -0,0 +1,85 @@
+# pylint: disable=missing-docstring
+
+import re
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+
+
+class LookupModule(LookupBase):
+ # pylint: disable=too-many-branches,too-many-statements,too-many-arguments
+
+ def run(self, terms, variables=None, zones_enabled=True, short_version=None,
+ deployment_type=None, **kwargs):
+
+ priorities = [
+ {'name': 'LeastRequestedPriority', 'weight': 1},
+ {'name': 'BalancedResourceAllocation', 'weight': 1},
+ {'name': 'SelectorSpreadPriority', 'weight': 1}
+ ]
+
+ if short_version is None or deployment_type is None:
+ if 'openshift' not in variables:
+ raise AnsibleError("This lookup module requires openshift_facts to be run prior to use")
+
+ if deployment_type is None:
+ if 'common' not in variables['openshift'] or 'deployment_type' not in variables['openshift']['common']:
+ raise AnsibleError("This lookup module requires that the deployment_type be set")
+
+ deployment_type = variables['openshift']['common']['deployment_type']
+
+ if short_version is None:
+ if 'short_version' in variables['openshift']['common']:
+ short_version = variables['openshift']['common']['short_version']
+ elif 'openshift_release' in variables:
+ release = variables['openshift_release']
+ if release.startswith('v'):
+ short_version = release[1:]
+ else:
+ short_version = release
+ short_version = '.'.join(short_version.split('.')[0:2])
+ elif 'openshift_version' in variables:
+ version = variables['openshift_version']
+ short_version = '.'.join(version.split('.')[0:2])
+ else:
+ # pylint: disable=line-too-long
+ raise AnsibleError("Either OpenShift needs to be installed or openshift_release needs to be specified")
+
+ if deployment_type == 'origin':
+ if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '1.6', 'latest']:
+ raise AnsibleError("Unknown short_version %s" % short_version)
+ elif deployment_type == 'openshift-enterprise':
+ if short_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6', 'latest']:
+ raise AnsibleError("Unknown short_version %s" % short_version)
+ else:
+ raise AnsibleError("Unknown deployment_type %s" % deployment_type)
+
+ if deployment_type == 'openshift-enterprise':
+ # convert short_version to origin short_version
+ short_version = re.sub('^3.', '1.', short_version)
+
+ if short_version == '1.4':
+ priorities.append({'name': 'NodePreferAvoidPodsPriority', 'weight': 10000})
+
+ # only 1.1 didn't include NodeAffinityPriority
+ if short_version != '1.1':
+ priorities.append({'name': 'NodeAffinityPriority', 'weight': 1})
+
+ if short_version not in ['1.1', '1.2']:
+ priorities.append({'name': 'TaintTolerationPriority', 'weight': 1})
+
+ if short_version not in ['1.1', '1.2', '1.3']:
+ priorities.append({'name': 'InterPodAffinityPriority', 'weight': 1})
+
+ if zones_enabled:
+ zone_priority = {
+ 'name': 'Zone',
+ 'argument': {
+ 'serviceAntiAffinity': {
+ 'label': 'zone'
+ }
+ },
+ 'weight': 2
+ }
+ priorities.append(zone_priority)
+
+ return priorities
diff --git a/roles/openshift_master_facts/tasks/main.yml b/roles/openshift_master_facts/tasks/main.yml
index 1f27a2c1d..0dba4b3ba 100644
--- a/roles/openshift_master_facts/tasks/main.yml
+++ b/roles/openshift_master_facts/tasks/main.yml
@@ -1,4 +1,32 @@
---
+
+# Ensure the default sub-domain is set:
+- name: Migrate legacy osm_default_subdomain fact
+ set_fact:
+ openshift_master_default_subdomain: "{{ osm_default_subdomain | default(None) }}"
+ when: openshift_master_default_subdomain is not defined
+
+- fail:
+ msg: openshift_master_default_subdomain must be set to deploy metrics
+ when: openshift_hosted_metrics_deploy | default(false) | bool and openshift_master_default_subdomain | default("") == ""
+
+# NOTE: These metrics variables are unfortunately needed by both the master and the metrics roles
+# to properly configure the master-config.yaml file.
+#
+# NOTE: Today only changing the hostname for the metrics public URL is supported, the
+# path must stay consistent. As such if openshift_hosted_metrics_public_url is set in
+# inventory, we extract the hostname, and then reset openshift_hosted_metrics_public_url
+# to the format that we know is valid. (This may change in future)
+- set_fact:
+ g_metrics_hostname: "{{ openshift_hosted_metrics_public_url
+ | default('hawkular-metrics.' ~ (openshift_master_default_subdomain))
+ | oo_hostname_from_url }}"
+ when: openshift_hosted_metrics_deploy | default(false) | bool
+
+- set_fact:
+ openshift_hosted_metrics_public_url: "https://{{ g_metrics_hostname }}/hawkular/metrics"
+ when: openshift_hosted_metrics_deploy | default(false) | bool
+
- name: Set master facts
openshift_facts:
role: master
@@ -49,7 +77,6 @@
oauth_grant_method: "{{ openshift_master_oauth_grant_method | default(None) }}"
sdn_cluster_network_cidr: "{{ osm_cluster_network_cidr | default(None) }}"
sdn_host_subnet_length: "{{ osm_host_subnet_length | default(None) }}"
- default_subdomain: "{{ openshift_master_default_subdomain | default(osm_default_subdomain | default(None), true) }}"
custom_cors_origins: "{{ osm_custom_cors_origins | default(None) }}"
default_node_selector: "{{ osm_default_node_selector | default(None) }}"
project_request_message: "{{ osm_project_request_message | default(None) }}"
@@ -64,11 +91,9 @@
master_count: "{{ openshift_master_count | default(None) }}"
controller_lease_ttl: "{{ osm_controller_lease_ttl | default(None) }}"
master_image: "{{ osm_image | default(None) }}"
- scheduler_predicates: "{{ openshift_master_scheduler_predicates | default(None) }}"
- scheduler_priorities: "{{ openshift_master_scheduler_priorities | default(None) }}"
admission_plugin_config: "{{openshift_master_admission_plugin_config | default(None) }}"
- kube_admission_plugin_config: "{{openshift_master_kube_admission_plugin_config | default(None) }}" # deprecated, merged with admission_plugin_config
- oauth_template: "{{ openshift_master_oauth_template | default(None) }}" # deprecated in origin 1.2 / OSE 3.2
+ kube_admission_plugin_config: "{{openshift_master_kube_admission_plugin_config | default(None) }}" # deprecated, merged with admission_plugin_config
+ oauth_template: "{{ openshift_master_oauth_template | default(None) }}" # deprecated in origin 1.2 / OSE 3.2
oauth_templates: "{{ openshift_master_oauth_templates | default(None) }}"
oauth_always_show_provider_selection: "{{ openshift_master_oauth_always_show_provider_selection | default(None) }}"
image_policy_config: "{{ openshift_master_image_policy_config | default(None) }}"
@@ -77,5 +102,31 @@
api_env_vars: "{{ openshift_master_api_env_vars | default(None) }}"
controllers_env_vars: "{{ openshift_master_controllers_env_vars | default(None) }}"
audit_config: "{{ openshift_master_audit_config | default(None) }}"
- metrics_public_url: "{% if openshift_hosted_metrics_deploy | default(false) %}https://{{ metrics_hostname }}/hawkular/metrics{% endif %}"
scheduler_args: "{{ openshift_master_scheduler_args | default(None) }}"
+
+- name: Determine if scheduler config present
+ stat:
+ path: "{{ openshift_master_scheduler_conf }}"
+ register: scheduler_config_stat
+
+- set_fact:
+ openshift_master_scheduler_default_predicates: "{{ lookup('openshift_master_facts_default_predicates') }}"
+ openshift_master_scheduler_default_priorities: "{{ lookup('openshift_master_facts_default_priorities') }}"
+
+- block:
+ - name: Retrieve current scheduler config
+ slurp:
+ src: "{{ openshift_master_scheduler_conf }}"
+ register: current_scheduler_config
+
+ - set_fact:
+ openshift_master_scheduler_current_config: "{{ current_scheduler_config.content | b64decode | from_json }}"
+
+ - fail:
+ msg: "Unknown scheduler config apiVersion {{ openshift_master_scheduler_config.apiVersion }}"
+ when: "{{ openshift_master_scheduler_current_config.apiVersion | default(None) != 'v1' }}"
+
+ - set_fact:
+ openshift_master_scheduler_current_predicates: "{{ openshift_master_scheduler_current_config.predicates }}"
+ openshift_master_scheduler_current_priorities: "{{ openshift_master_scheduler_current_config.priorities }}"
+ when: "{{ scheduler_config_stat.stat.exists }}"
diff --git a/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py b/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py
new file mode 100644
index 000000000..07bac6826
--- /dev/null
+++ b/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py
@@ -0,0 +1,251 @@
+import copy
+import os
+import sys
+
+from ansible.errors import AnsibleError
+from nose.tools import raises, assert_equal
+
+sys.path = [os.path.abspath(os.path.dirname(__file__) + "/../lookup_plugins/")] + sys.path
+
+from openshift_master_facts_default_predicates import LookupModule # noqa: E402
+
+DEFAULT_PREDICATES_1_1 = [
+ {'name': 'PodFitsHostPorts'},
+ {'name': 'PodFitsResources'},
+ {'name': 'NoDiskConflict'},
+ {'name': 'MatchNodeSelector'},
+]
+
+DEFAULT_PREDICATES_1_2 = [
+ {'name': 'PodFitsHostPorts'},
+ {'name': 'PodFitsResources'},
+ {'name': 'NoDiskConflict'},
+ {'name': 'NoVolumeZoneConflict'},
+ {'name': 'MatchNodeSelector'},
+ {'name': 'MaxEBSVolumeCount'},
+ {'name': 'MaxGCEPDVolumeCount'}
+]
+
+DEFAULT_PREDICATES_1_3 = [
+ {'name': 'NoDiskConflict'},
+ {'name': 'NoVolumeZoneConflict'},
+ {'name': 'MaxEBSVolumeCount'},
+ {'name': 'MaxGCEPDVolumeCount'},
+ {'name': 'GeneralPredicates'},
+ {'name': 'PodToleratesNodeTaints'},
+ {'name': 'CheckNodeMemoryPressure'}
+]
+
+DEFAULT_PREDICATES_1_4 = [
+ {'name': 'NoDiskConflict'},
+ {'name': 'NoVolumeZoneConflict'},
+ {'name': 'MaxEBSVolumeCount'},
+ {'name': 'MaxGCEPDVolumeCount'},
+ {'name': 'GeneralPredicates'},
+ {'name': 'PodToleratesNodeTaints'},
+ {'name': 'CheckNodeMemoryPressure'},
+ {'name': 'CheckNodeDiskPressure'},
+ {'name': 'MatchInterPodAffinity'}
+]
+
+REGION_PREDICATE = {
+ 'name': 'Region',
+ 'argument': {
+ 'serviceAffinity': {
+ 'labels': ['region']
+ }
+ }
+}
+
+TEST_VARS = [
+ ('1.1', 'origin', DEFAULT_PREDICATES_1_1),
+ ('3.1', 'openshift-enterprise', DEFAULT_PREDICATES_1_1),
+ ('1.2', 'origin', DEFAULT_PREDICATES_1_2),
+ ('3.2', 'openshift-enterprise', DEFAULT_PREDICATES_1_2),
+ ('1.3', 'origin', DEFAULT_PREDICATES_1_3),
+ ('3.3', 'openshift-enterprise', DEFAULT_PREDICATES_1_3),
+ ('1.4', 'origin', DEFAULT_PREDICATES_1_4),
+ ('3.4', 'openshift-enterprise', DEFAULT_PREDICATES_1_4),
+ ('1.5', 'origin', DEFAULT_PREDICATES_1_4),
+ ('3.5', 'openshift-enterprise', DEFAULT_PREDICATES_1_4),
+ ('1.6', 'origin', DEFAULT_PREDICATES_1_4),
+ ('3.6', 'openshift-enterprise', DEFAULT_PREDICATES_1_4),
+]
+
+
+class TestOpenShiftMasterFactsDefaultPredicates(object):
+ def setUp(self):
+ self.lookup = LookupModule()
+ self.default_facts = {
+ 'openshift': {
+ 'common': {}
+ }
+ }
+
+ @raises(AnsibleError)
+ def test_missing_short_version_and_missing_openshift_release(self):
+ facts = copy.deepcopy(self.default_facts)
+ facts['openshift']['common']['deployment_type'] = 'origin'
+ self.lookup.run(None, variables=facts)
+
+ def check_defaults_short_version(self, short_version, deployment_type, default_predicates,
+ regions_enabled):
+ facts = copy.deepcopy(self.default_facts)
+ facts['openshift']['common']['short_version'] = short_version
+ facts['openshift']['common']['deployment_type'] = deployment_type
+ results = self.lookup.run(None, variables=facts,
+ regions_enabled=regions_enabled)
+ if regions_enabled:
+ assert_equal(results, default_predicates + [REGION_PREDICATE])
+ else:
+ assert_equal(results, default_predicates)
+
+ def check_defaults_short_version_kwarg(self, short_version, deployment_type, default_predicates,
+ regions_enabled):
+ facts = copy.deepcopy(self.default_facts)
+ facts['openshift']['common']['deployment_type'] = deployment_type
+ results = self.lookup.run(None, variables=facts,
+ regions_enabled=regions_enabled,
+ short_version=short_version)
+ if regions_enabled:
+ assert_equal(results, default_predicates + [REGION_PREDICATE])
+ else:
+ assert_equal(results, default_predicates)
+
+ def check_defaults_deployment_type_kwarg(self, short_version, deployment_type,
+ default_predicates, regions_enabled):
+ facts = copy.deepcopy(self.default_facts)
+ facts['openshift']['common']['short_version'] = short_version
+ results = self.lookup.run(None, variables=facts,
+ regions_enabled=regions_enabled,
+ deployment_type=deployment_type)
+ if regions_enabled:
+ assert_equal(results, default_predicates + [REGION_PREDICATE])
+ else:
+ assert_equal(results, default_predicates)
+
+ def check_defaults_only_kwargs(self, short_version, deployment_type,
+ default_predicates, regions_enabled):
+ facts = copy.deepcopy(self.default_facts)
+ results = self.lookup.run(None, variables=facts,
+ regions_enabled=regions_enabled,
+ short_version=short_version,
+ deployment_type=deployment_type)
+ if regions_enabled:
+ assert_equal(results, default_predicates + [REGION_PREDICATE])
+ else:
+ assert_equal(results, default_predicates)
+
+ def check_defaults_release(self, release, deployment_type, default_predicates,
+ regions_enabled):
+ facts = copy.deepcopy(self.default_facts)
+ facts['openshift_release'] = release
+ facts['openshift']['common']['deployment_type'] = deployment_type
+ results = self.lookup.run(None, variables=facts,
+ regions_enabled=regions_enabled)
+ if regions_enabled:
+ assert_equal(results, default_predicates + [REGION_PREDICATE])
+ else:
+ assert_equal(results, default_predicates)
+
+ def check_defaults_version(self, version, deployment_type, default_predicates,
+ regions_enabled):
+ facts = copy.deepcopy(self.default_facts)
+ facts['openshift_version'] = version
+ facts['openshift']['common']['deployment_type'] = deployment_type
+ results = self.lookup.run(None, variables=facts,
+ regions_enabled=regions_enabled)
+ if regions_enabled:
+ assert_equal(results, default_predicates + [REGION_PREDICATE])
+ else:
+ assert_equal(results, default_predicates)
+
+ def check_defaults_override_vars(self, release, deployment_type,
+ default_predicates, regions_enabled,
+ extra_facts=None):
+ facts = copy.deepcopy(self.default_facts)
+ facts['openshift']['common']['short_version'] = release
+ facts['openshift']['common']['deployment_type'] = deployment_type
+ if extra_facts is not None:
+ for fact in extra_facts:
+ facts[fact] = extra_facts[fact]
+ results = self.lookup.run(None, variables=facts,
+ regions_enabled=regions_enabled,
+ return_set_vars=False)
+ if regions_enabled:
+ assert_equal(results, default_predicates + [REGION_PREDICATE])
+ else:
+ assert_equal(results, default_predicates)
+
+ def test_openshift_version(self):
+ for regions_enabled in (True, False):
+ for release, deployment_type, default_predicates in TEST_VARS:
+ release = release + '.1'
+ yield self.check_defaults_version, release, deployment_type, default_predicates, regions_enabled
+
+ def test_v_release_defaults(self):
+ for regions_enabled in (True, False):
+ for release, deployment_type, default_predicates in TEST_VARS:
+ yield self.check_defaults_release, 'v' + release, deployment_type, default_predicates, regions_enabled
+
+ def test_release_defaults(self):
+ for regions_enabled in (True, False):
+ for release, deployment_type, default_predicates in TEST_VARS:
+ yield self.check_defaults_release, release, deployment_type, default_predicates, regions_enabled
+
+ def test_short_version_defaults(self):
+ for regions_enabled in (True, False):
+ for release, deployment_type, default_predicates in TEST_VARS:
+ yield self.check_defaults_short_version, release, deployment_type, default_predicates, regions_enabled
+
+ def test_short_version_kwarg(self):
+ for regions_enabled in (True, False):
+ for release, deployment_type, default_predicates in TEST_VARS:
+ yield self.check_defaults_short_version_kwarg, release, deployment_type, default_predicates, regions_enabled
+
+ def test_only_kwargs(self):
+ for regions_enabled in (True, False):
+ for release, deployment_type, default_predicates in TEST_VARS:
+ yield self.check_defaults_only_kwargs, release, deployment_type, default_predicates, regions_enabled
+
+ def test_deployment_type_kwarg(self):
+ for regions_enabled in (True, False):
+ for release, deployment_type, default_predicates in TEST_VARS:
+ yield self.check_defaults_deployment_type_kwarg, release, deployment_type, default_predicates, regions_enabled
+
+ def test_trunc_openshift_release(self):
+ for release, deployment_type, default_predicates in TEST_VARS:
+ release = release + '.1'
+ yield self.check_defaults_release, release, deployment_type, default_predicates, False
+
+ @raises(AnsibleError)
+ def test_unknown_deployment_types(self):
+ facts = copy.deepcopy(self.default_facts)
+ facts['openshift']['common']['short_version'] = '1.1'
+ facts['openshift']['common']['deployment_type'] = 'bogus'
+ self.lookup.run(None, variables=facts)
+
+ @raises(AnsibleError)
+ def test_unknown_origin_version(self):
+ facts = copy.deepcopy(self.default_facts)
+ facts['openshift']['common']['short_version'] = '0.1'
+ facts['openshift']['common']['deployment_type'] = 'origin'
+ self.lookup.run(None, variables=facts)
+
+ @raises(AnsibleError)
+ def test_unknown_ocp_version(self):
+ facts = copy.deepcopy(self.default_facts)
+ facts['openshift']['common']['short_version'] = '0.1'
+ facts['openshift']['common']['deployment_type'] = 'openshift-enterprise'
+ self.lookup.run(None, variables=facts)
+
+ @raises(AnsibleError)
+ def test_missing_deployment_type(self):
+ facts = copy.deepcopy(self.default_facts)
+ facts['openshift']['common']['short_version'] = '10.10'
+ self.lookup.run(None, variables=facts)
+
+ @raises(AnsibleError)
+ def testMissingOpenShiftFacts(self):
+ facts = {}
+ self.lookup.run(None, variables=facts)
diff --git a/roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py b/roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py
new file mode 100644
index 000000000..5427a07a1
--- /dev/null
+++ b/roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py
@@ -0,0 +1,238 @@
+import copy
+import os
+import sys
+
+from ansible.errors import AnsibleError
+from nose.tools import raises, assert_equal
+
+sys.path = [os.path.abspath(os.path.dirname(__file__) + "/../lookup_plugins/")] + sys.path
+
+from openshift_master_facts_default_priorities import LookupModule # noqa: E402
+
+DEFAULT_PRIORITIES_1_1 = [
+ {'name': 'LeastRequestedPriority', 'weight': 1},
+ {'name': 'BalancedResourceAllocation', 'weight': 1},
+ {'name': 'SelectorSpreadPriority', 'weight': 1}
+]
+
+DEFAULT_PRIORITIES_1_2 = [
+ {'name': 'LeastRequestedPriority', 'weight': 1},
+ {'name': 'BalancedResourceAllocation', 'weight': 1},
+ {'name': 'SelectorSpreadPriority', 'weight': 1},
+ {'name': 'NodeAffinityPriority', 'weight': 1}
+]
+
+DEFAULT_PRIORITIES_1_3 = [
+ {'name': 'LeastRequestedPriority', 'weight': 1},
+ {'name': 'BalancedResourceAllocation', 'weight': 1},
+ {'name': 'SelectorSpreadPriority', 'weight': 1},
+ {'name': 'NodeAffinityPriority', 'weight': 1},
+ {'name': 'TaintTolerationPriority', 'weight': 1}
+]
+
+DEFAULT_PRIORITIES_1_4 = [
+ {'name': 'LeastRequestedPriority', 'weight': 1},
+ {'name': 'BalancedResourceAllocation', 'weight': 1},
+ {'name': 'SelectorSpreadPriority', 'weight': 1},
+ {'name': 'NodePreferAvoidPodsPriority', 'weight': 10000},
+ {'name': 'NodeAffinityPriority', 'weight': 1},
+ {'name': 'TaintTolerationPriority', 'weight': 1},
+ {'name': 'InterPodAffinityPriority', 'weight': 1}
+]
+
+ZONE_PRIORITY = {
+ 'name': 'Zone',
+ 'argument': {
+ 'serviceAntiAffinity': {
+ 'label': 'zone'
+ }
+ },
+ 'weight': 2
+}
+
+TEST_VARS = [
+ ('1.1', 'origin', DEFAULT_PRIORITIES_1_1),
+ ('3.1', 'openshift-enterprise', DEFAULT_PRIORITIES_1_1),
+ ('1.2', 'origin', DEFAULT_PRIORITIES_1_2),
+ ('3.2', 'openshift-enterprise', DEFAULT_PRIORITIES_1_2),
+ ('1.3', 'origin', DEFAULT_PRIORITIES_1_3),
+ ('3.3', 'openshift-enterprise', DEFAULT_PRIORITIES_1_3),
+ ('1.4', 'origin', DEFAULT_PRIORITIES_1_4),
+ ('3.4', 'openshift-enterprise', DEFAULT_PRIORITIES_1_4)
+]
+
+
+class TestOpenShiftMasterFactsDefaultPredicates(object):
+ def setUp(self):
+ self.lookup = LookupModule()
+ self.default_facts = {
+ 'openshift': {
+ 'common': {}
+ }
+ }
+
+ @raises(AnsibleError)
+ def test_missing_short_version_and_missing_openshift_release(self):
+ facts = copy.deepcopy(self.default_facts)
+ facts['openshift']['common']['deployment_type'] = 'origin'
+ self.lookup.run(None, variables=facts)
+
+ def check_defaults_short_version(self, release, deployment_type,
+ default_priorities, zones_enabled):
+ facts = copy.deepcopy(self.default_facts)
+ facts['openshift']['common']['short_version'] = release
+ facts['openshift']['common']['deployment_type'] = deployment_type
+ results = self.lookup.run(None, variables=facts, zones_enabled=zones_enabled)
+ if zones_enabled:
+ assert_equal(results, default_priorities + [ZONE_PRIORITY])
+ else:
+ assert_equal(results, default_priorities)
+
+ def check_defaults_short_version_kwarg(self, release, deployment_type,
+ default_priorities, zones_enabled):
+ facts = copy.deepcopy(self.default_facts)
+ facts['openshift']['common']['deployment_type'] = deployment_type
+ results = self.lookup.run(None, variables=facts,
+ zones_enabled=zones_enabled,
+ short_version=release)
+ if zones_enabled:
+ assert_equal(results, default_priorities + [ZONE_PRIORITY])
+ else:
+ assert_equal(results, default_priorities)
+
+ def check_defaults_deployment_type_kwarg(self, release, deployment_type,
+ default_priorities, zones_enabled):
+ facts = copy.deepcopy(self.default_facts)
+ facts['openshift']['common']['short_version'] = release
+ results = self.lookup.run(None, variables=facts,
+ zones_enabled=zones_enabled,
+ deployment_type=deployment_type)
+ if zones_enabled:
+ assert_equal(results, default_priorities + [ZONE_PRIORITY])
+ else:
+ assert_equal(results, default_priorities)
+
+ def check_defaults_only_kwargs(self, release, deployment_type,
+ default_priorities, zones_enabled):
+ facts = copy.deepcopy(self.default_facts)
+ results = self.lookup.run(None, variables=facts,
+ zones_enabled=zones_enabled,
+ short_version=release,
+ deployment_type=deployment_type)
+ if zones_enabled:
+ assert_equal(results, default_priorities + [ZONE_PRIORITY])
+ else:
+ assert_equal(results, default_priorities)
+
+ def check_defaults_release(self, release, deployment_type, default_priorities,
+ zones_enabled):
+ facts = copy.deepcopy(self.default_facts)
+ facts['openshift_release'] = release
+ facts['openshift']['common']['deployment_type'] = deployment_type
+ results = self.lookup.run(None, variables=facts, zones_enabled=zones_enabled)
+ if zones_enabled:
+ assert_equal(results, default_priorities + [ZONE_PRIORITY])
+ else:
+ assert_equal(results, default_priorities)
+
+ def check_defaults_version(self, release, deployment_type, default_priorities,
+ zones_enabled):
+ facts = copy.deepcopy(self.default_facts)
+ facts['openshift_version'] = release
+ facts['openshift']['common']['deployment_type'] = deployment_type
+ results = self.lookup.run(None, variables=facts, zones_enabled=zones_enabled)
+ if zones_enabled:
+ assert_equal(results, default_priorities + [ZONE_PRIORITY])
+ else:
+ assert_equal(results, default_priorities)
+
+ def check_defaults_override_vars(self, release, deployment_type,
+ default_priorities, zones_enabled,
+ extra_facts=None):
+ facts = copy.deepcopy(self.default_facts)
+ facts['openshift']['common']['short_version'] = release
+ facts['openshift']['common']['deployment_type'] = deployment_type
+ if extra_facts is not None:
+ for fact in extra_facts:
+ facts[fact] = extra_facts[fact]
+ results = self.lookup.run(None, variables=facts,
+ zones_enabled=zones_enabled,
+ return_set_vars=False)
+ if zones_enabled:
+ assert_equal(results, default_priorities + [ZONE_PRIORITY])
+ else:
+ assert_equal(results, default_priorities)
+
+ def test_openshift_version(self):
+ for zones_enabled in (True, False):
+ for release, deployment_type, default_priorities in TEST_VARS:
+ release = release + '.1'
+ yield self.check_defaults_version, release, deployment_type, default_priorities, zones_enabled
+
+ def test_v_release_defaults(self):
+ for zones_enabled in (True, False):
+ for release, deployment_type, default_priorities in TEST_VARS:
+ release = 'v' + release
+ yield self.check_defaults_release, release, deployment_type, default_priorities, zones_enabled
+
+ def test_release_defaults(self):
+ for zones_enabled in (True, False):
+ for release, deployment_type, default_priorities in TEST_VARS:
+ yield self.check_defaults_release, release, deployment_type, default_priorities, zones_enabled
+
+ def test_short_version_defaults(self):
+ for zones_enabled in (True, False):
+ for short_version, deployment_type, default_priorities in TEST_VARS:
+ yield self.check_defaults_short_version, short_version, deployment_type, default_priorities, zones_enabled
+
+ def test_only_kwargs(self):
+ for zones_enabled in (True, False):
+ for short_version, deployment_type, default_priorities in TEST_VARS:
+ yield self.check_defaults_only_kwargs, short_version, deployment_type, default_priorities, zones_enabled
+
+ def test_deployment_type_kwarg(self):
+ for zones_enabled in (True, False):
+ for short_version, deployment_type, default_priorities in TEST_VARS:
+ yield self.check_defaults_deployment_type_kwarg, short_version, deployment_type, default_priorities, zones_enabled
+
+ def test_release_kwarg(self):
+ for zones_enabled in (True, False):
+ for short_version, deployment_type, default_priorities in TEST_VARS:
+ yield self.check_defaults_short_version_kwarg, short_version, deployment_type, default_priorities, zones_enabled
+
+ def test_trunc_openshift_release(self):
+ for release, deployment_type, default_priorities in TEST_VARS:
+ release = release + '.1'
+ yield self.check_defaults_release, release, deployment_type, default_priorities, False
+
+ @raises(AnsibleError)
+ def test_unknown_origin_version(self):
+ facts = copy.deepcopy(self.default_facts)
+ facts['openshift']['common']['short_version'] = '0.1'
+ facts['openshift']['common']['deployment_type'] = 'origin'
+ self.lookup.run(None, variables=facts)
+
+ @raises(AnsibleError)
+ def test_unknown_ocp_version(self):
+ facts = copy.deepcopy(self.default_facts)
+ facts['openshift']['common']['short_version'] = '0.1'
+ facts['openshift']['common']['deployment_type'] = 'openshift-enterprise'
+ self.lookup.run(None, variables=facts)
+
+ @raises(AnsibleError)
+ def test_unknown_deployment_types(self):
+ facts = copy.deepcopy(self.default_facts)
+ facts['openshift']['common']['short_version'] = '1.1'
+ facts['openshift']['common']['deployment_type'] = 'bogus'
+ self.lookup.run(None, variables=facts)
+
+ @raises(AnsibleError)
+ def test_missing_deployment_type(self):
+ facts = copy.deepcopy(self.default_facts)
+ facts['openshift']['common']['short_version'] = '10.10'
+ self.lookup.run(None, variables=facts)
+
+ @raises(AnsibleError)
+ def test_missing_openshift_facts(self):
+ facts = {}
+ self.lookup.run(None, variables=facts)
diff --git a/roles/openshift_master_facts/vars/main.yml b/roles/openshift_master_facts/vars/main.yml
index 406d50c24..fa745eb66 100644
--- a/roles/openshift_master_facts/vars/main.yml
+++ b/roles/openshift_master_facts/vars/main.yml
@@ -1,3 +1,8 @@
+---
+openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
+openshift_master_config_file: "{{ openshift_master_config_dir }}/master-config.yaml"
+openshift_master_scheduler_conf: "{{ openshift_master_config_dir }}/scheduler.json"
+
builddefaults_yaml:
BuildDefaults:
configuration:
@@ -18,8 +23,3 @@ builddefaults_yaml:
value: "{{ openshift.master.builddefaults_https_proxy | default(omit, true) }}"
- name: no_proxy
value: "{{ openshift.master.builddefaults_no_proxy | default(omit, true) | join(',') }}"
-
-metrics_hostname: "{{ openshift_hosted_metrics_public_url
- | default('hawkular-metrics.' ~ (openshift.master.default_subdomain
- | default(openshift_master_default_subdomain )))
- | oo_hostname_from_url }}"
diff --git a/roles/openshift_node/README.md b/roles/openshift_node/README.md
index d1920c485..b69b60c1d 100644
--- a/roles/openshift_node/README.md
+++ b/roles/openshift_node/README.md
@@ -43,10 +43,12 @@ Currently we support re-labeling nodes but we don't re-schedule running pods nor
```
oadm manage-node --schedulable=false ${NODE}
-oadm manage-node --evacuate ${NODE}
+oadm manage-node --drain ${NODE}
oadm manage-node --schedulable=true ${NODE}
````
+> If you are using version less than 1.5/3.5 you must replace `--drain` with `--evacuate`.
+
TODO
diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml
index ebe584588..cb51416d4 100644
--- a/roles/openshift_node/handlers/main.yml
+++ b/roles/openshift_node/handlers/main.yml
@@ -1,14 +1,14 @@
---
- name: restart openvswitch
systemd: name=openvswitch state=restarted
- when: not (ovs_service_status_changed | default(false) | bool) and openshift.common.use_openshift_sdn | bool
+ when: (not skip_node_svc_handlers | default(False) | bool) and not (ovs_service_status_changed | default(false) | bool) and openshift.common.use_openshift_sdn | bool
notify:
- restart openvswitch pause
- name: restart openvswitch pause
pause: seconds=15
- when: openshift.common.is_containerized | bool
+ when: (not skip_node_svc_handlers | default(False) | bool) and openshift.common.is_containerized | bool
- name: restart node
systemd: name={{ openshift.common.service_type }}-node state=restarted
- when: not (node_service_status_changed | default(false) | bool)
+ when: (not skip_node_svc_handlers | default(False) | bool) and not (node_service_status_changed | default(false) | bool)
diff --git a/roles/openshift_node/meta/main.yml b/roles/openshift_node/meta/main.yml
index c39269f33..91f118191 100644
--- a/roles/openshift_node/meta/main.yml
+++ b/roles/openshift_node/meta/main.yml
@@ -11,4 +11,35 @@ galaxy_info:
- 7
categories:
- cloud
-dependencies: []
+dependencies:
+- role: openshift_common
+- role: openshift_clock
+- role: openshift_docker
+- role: openshift_node_certificates
+- role: openshift_cloud_provider
+- role: openshift_node_dnsmasq
+ when: openshift.common.use_dnsmasq | bool
+- role: os_firewall
+ os_firewall_allow:
+ - service: Kubernetes kubelet
+ port: 10250/tcp
+ - service: http
+ port: 80/tcp
+ - service: https
+ port: 443/tcp
+ - service: Openshift kubelet ReadOnlyPort
+ port: 10255/tcp
+ - service: Openshift kubelet ReadOnlyPort udp
+ port: 10255/udp
+- role: os_firewall
+ os_firewall_allow:
+ - service: OpenShift OVS sdn
+ port: 4789/udp
+ when: openshift.common.use_openshift_sdn | bool
+- role: os_firewall
+ os_firewall_allow:
+ - service: Kubernetes service NodePort TCP
+ port: "{{ openshift_node_port_range | default('') }}/tcp"
+ - service: Kubernetes service NodePort UDP
+ port: "{{ openshift_node_port_range | default('') }}/udp"
+ when: openshift_node_port_range is defined
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index 31d07838d..e970c4cd1 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -44,6 +44,8 @@
- name: Check for tuned package
command: rpm -q tuned
+ args:
+ warn: no
register: tuned_installed
changed_when: false
failed_when: false
@@ -58,20 +60,6 @@
state: present
when: openshift.common.use_openshift_sdn and not openshift.common.is_containerized | bool
-- name: Pull node image
- command: >
- docker pull {{ openshift.node.node_image }}:{{ openshift_image_tag }}
- register: pull_result
- changed_when: "'Downloaded newer image' in pull_result.stdout"
- when: openshift.common.is_containerized | bool
-
-- name: Pull OpenVSwitch image
- command: >
- docker pull {{ openshift.node.ovs_image }}:{{ openshift_image_tag }}
- register: pull_result
- changed_when: "'Downloaded newer image' in pull_result.stdout"
- when: openshift.common.is_containerized | bool and openshift.common.use_openshift_sdn | bool
-
- name: Install the systemd units
include: systemd_units.yml
diff --git a/roles/openshift_node/tasks/systemd_units.yml b/roles/openshift_node/tasks/systemd_units.yml
index f722a6e69..626c47387 100644
--- a/roles/openshift_node/tasks/systemd_units.yml
+++ b/roles/openshift_node/tasks/systemd_units.yml
@@ -1,6 +1,21 @@
+---
# This file is included both in the openshift_master role and in the upgrade
# playbooks.
+- name: Pre-pull node image
+ command: >
+ docker pull {{ openshift.node.node_image }}:{{ openshift_image_tag }}
+ register: pull_result
+ changed_when: "'Downloaded newer image' in pull_result.stdout"
+ when: openshift.common.is_containerized | bool
+
+- name: Pre-pull openvswitch image
+ command: >
+ docker pull {{ openshift.node.ovs_image }}:{{ openshift_image_tag }}
+ register: pull_result
+ changed_when: "'Downloaded newer image' in pull_result.stdout"
+ when: openshift.common.is_containerized | bool and openshift.common.use_openshift_sdn | bool
+
- name: Install Node dependencies docker service file
template:
dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node-dep.service"
@@ -54,12 +69,12 @@
line: "{{ item.line }}"
create: true
with_items:
- - regex: '^OPTIONS='
- line: "OPTIONS=--loglevel={{ openshift.node.debug_level | default(2) }}"
- - regex: '^CONFIG_FILE='
- line: "CONFIG_FILE={{ openshift.common.config_base }}/node/node-config.yaml"
- - regex: '^IMAGE_VERSION='
- line: "IMAGE_VERSION={{ openshift_image_tag }}"
+ - regex: '^OPTIONS='
+ line: "OPTIONS=--loglevel={{ openshift.node.debug_level | default(2) }}"
+ - regex: '^CONFIG_FILE='
+ line: "CONFIG_FILE={{ openshift.common.config_base }}/node/node-config.yaml"
+ - regex: '^IMAGE_VERSION='
+ line: "IMAGE_VERSION={{ openshift_image_tag }}"
notify:
- restart node
@@ -70,12 +85,12 @@
line: "{{ item.line }}"
create: true
with_items:
- - regex: '^HTTP_PROXY='
- line: "HTTP_PROXY={{ openshift.common.http_proxy | default('') }}"
- - regex: '^HTTPS_PROXY='
- line: "HTTPS_PROXY={{ openshift.common.https_proxy | default('') }}"
- - regex: '^NO_PROXY='
- line: "NO_PROXY={{ openshift.common.no_proxy | default([]) | join(',') }},{{ openshift.common.portal_net }},{{ hostvars[groups.oo_first_master.0].openshift.master.sdn_cluster_network_cidr }}"
+ - regex: '^HTTP_PROXY='
+ line: "HTTP_PROXY={{ openshift.common.http_proxy | default('') }}"
+ - regex: '^HTTPS_PROXY='
+ line: "HTTPS_PROXY={{ openshift.common.https_proxy | default('') }}"
+ - regex: '^NO_PROXY='
+ line: "NO_PROXY={{ openshift.common.no_proxy | default([]) | join(',') }},{{ openshift.common.portal_net }},{{ hostvars[groups.oo_first_master.0].openshift.master.sdn_cluster_network_cidr }}"
when: ('http_proxy' in openshift.common and openshift.common.http_proxy != '')
notify:
- restart node
diff --git a/roles/openshift_node_certificates/tasks/main.yml b/roles/openshift_node_certificates/tasks/main.yml
index 35f84c2cf..717bf3cea 100644
--- a/roles/openshift_node_certificates/tasks/main.yml
+++ b/roles/openshift_node_certificates/tasks/main.yml
@@ -64,13 +64,13 @@
- name: Generate the node server certificate
command: >
{{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm ca create-server-cert
- --cert={{ openshift_node_generated_config_dir }}/server.crt
- --key={{ openshift_generated_configs_dir }}/node-{{ openshift.common.hostname }}/server.key
- --overwrite=true
- --hostnames={{ openshift.common.all_hostnames |join(",") }}
- --signer-cert={{ openshift_ca_cert }}
- --signer-key={{ openshift_ca_key }}
- --signer-serial={{ openshift_ca_serial }}
+ --cert={{ openshift_node_generated_config_dir }}/server.crt
+ --key={{ openshift_generated_configs_dir }}/node-{{ openshift.common.hostname }}/server.key
+ --overwrite=true
+ --hostnames={{ openshift.common.hostname }},{{ openshift.common.public_hostname }},{{ openshift.common.ip }},{{ openshift.common.public_ip }}
+ --signer-cert={{ openshift_ca_cert }}
+ --signer-key={{ openshift_ca_key }}
+ --signer-serial={{ openshift_ca_serial }}
args:
creates: "{{ openshift_node_generated_config_dir }}/server.crt"
when: node_certs_missing | bool
diff --git a/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh b/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh
index c3d5efb9e..24798d3d2 100755
--- a/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh
+++ b/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh
@@ -28,7 +28,7 @@ cd /etc/sysconfig/network-scripts
[ -f ../network ] && . ../network
-if [[ $2 =~ ^(up|dhcp4-change)$ ]]; then
+if [[ $2 =~ ^(up|dhcp4-change|dhcp6-change)$ ]]; then
# If the origin-upstream-dns config file changed we need to restart
NEEDS_RESTART=0
UPSTREAM_DNS='/etc/dnsmasq.d/origin-upstream-dns.conf'
@@ -48,7 +48,6 @@ if [[ $2 =~ ^(up|dhcp4-change)$ ]]; then
-n "${IP4_NAMESERVERS}" ]]; then
if [ ! -f /etc/dnsmasq.d/origin-dns.conf ]; then
cat << EOF > /etc/dnsmasq.d/origin-dns.conf
-strict-order
no-resolv
domain-needed
server=/cluster.local/172.30.0.1
@@ -81,6 +80,10 @@ EOF
NEEDS_RESTART=1
fi
+ if ! `systemctl -q is-active dnsmasq.service`; then
+ NEEDS_RESTART=1
+ fi
+
######################################################################
if [ "${NEEDS_RESTART}" -eq "1" ]; then
systemctl restart dnsmasq
diff --git a/roles/openshift_node_dnsmasq/tasks/no-network-manager.yml b/roles/openshift_node_dnsmasq/tasks/no-network-manager.yml
index 4d1bd3794..d5fda7bd0 100644
--- a/roles/openshift_node_dnsmasq/tasks/no-network-manager.yml
+++ b/roles/openshift_node_dnsmasq/tasks/no-network-manager.yml
@@ -1,2 +1,2 @@
---
-- fail: msg="Currently, NetworkManager must be installed and enabled prior to installation." \ No newline at end of file
+- fail: msg="Currently, NetworkManager must be installed and enabled prior to installation."
diff --git a/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2 b/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2
index 1753bb821..f397cbbf1 100644
--- a/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2
+++ b/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2
@@ -1,4 +1,3 @@
-strict-order
no-resolv
domain-needed
server=/{{ openshift.common.dns_domain }}/{{ openshift.common.kube_svc_ip }}
diff --git a/roles/openshift_preflight/README.md b/roles/openshift_preflight/README.md
new file mode 100644
index 000000000..b6d3542d3
--- /dev/null
+++ b/roles/openshift_preflight/README.md
@@ -0,0 +1,52 @@
+OpenShift Preflight Checks
+==========================
+
+This role detects common problems prior to installing OpenShift.
+
+Requirements
+------------
+
+* Ansible 2.2+
+
+Role Variables
+--------------
+
+None
+
+Dependencies
+------------
+
+None
+
+Example Playbook
+----------------
+
+```yaml
+---
+- hosts: OSEv3
+ roles:
+ - openshift_preflight/init
+
+- hosts: OSEv3
+ name: checks that apply to all hosts
+ gather_facts: no
+ ignore_errors: yes
+ roles:
+ - openshift_preflight/common
+
+- hosts: OSEv3
+ name: verify check results
+ gather_facts: no
+ roles:
+ - openshift_preflight/verify_status
+```
+
+License
+-------
+
+Apache License Version 2.0
+
+Author Information
+------------------
+
+Customer Success team (dev@lists.openshift.redhat.com)
diff --git a/roles/openshift_preflight/base/library/aos_version.py b/roles/openshift_preflight/base/library/aos_version.py
new file mode 100755
index 000000000..f7fcb6da5
--- /dev/null
+++ b/roles/openshift_preflight/base/library/aos_version.py
@@ -0,0 +1,100 @@
+#!/usr/bin/python
+# vim: expandtab:tabstop=4:shiftwidth=4
+'''
+An ansible module for determining if more than one minor version
+of any atomic-openshift package is available, which would indicate
+that multiple repos are enabled for different versions of the same
+thing which may cause problems.
+
+Also, determine if the version requested is available down to the
+precision requested.
+'''
+
+# import os
+# import sys
+import yum # pylint: disable=import-error
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main(): # pylint: disable=missing-docstring
+ module = AnsibleModule(
+ argument_spec=dict(
+ version=dict(required=True)
+ ),
+ supports_check_mode=True
+ )
+
+ # NOTE(rhcarvalho): sosiouxme added _unmute, but I couldn't find a case yet
+ # for when it is actually necessary. Leaving it commented out for now,
+ # though this comment and the commented out code related to _unmute should
+ # be deleted later if not proven necessary.
+
+ # sys.stdout = os.devnull # mute yum so it doesn't break our output
+ # sys.stderr = os.devnull # mute yum so it doesn't break our output
+
+ # def _unmute(): # pylint: disable=missing-docstring
+ # sys.stdout = sys.__stdout__
+
+ def bail(error): # pylint: disable=missing-docstring
+ # _unmute()
+ module.fail_json(msg=error)
+
+ yb = yum.YumBase() # pylint: disable=invalid-name
+
+ # search for package versions available for aos pkgs
+ expected_pkgs = [
+ 'atomic-openshift',
+ 'atomic-openshift-master',
+ 'atomic-openshift-node',
+ ]
+ try:
+ pkgs = yb.pkgSack.returnPackages(patterns=expected_pkgs)
+ except yum.Errors.PackageSackError as e: # pylint: disable=invalid-name
+ # you only hit this if *none* of the packages are available
+ bail('Unable to find any atomic-openshift packages. \nCheck your subscription and repo settings. \n%s' % e)
+
+ # determine what level of precision we're expecting for the version
+ expected_version = module.params['version']
+ if expected_version.startswith('v'): # v3.3 => 3.3
+ expected_version = expected_version[1:]
+ num_dots = expected_version.count('.')
+
+ pkgs_by_name_version = {}
+ pkgs_precise_version_found = {}
+ for pkg in pkgs:
+ # get expected version precision
+ match_version = '.'.join(pkg.version.split('.')[:num_dots + 1])
+ if match_version == expected_version:
+ pkgs_precise_version_found[pkg.name] = True
+ # get x.y version precision
+ minor_version = '.'.join(pkg.version.split('.')[:2])
+ if pkg.name not in pkgs_by_name_version:
+ pkgs_by_name_version[pkg.name] = {}
+ pkgs_by_name_version[pkg.name][minor_version] = True
+
+ # see if any packages couldn't be found at requested version
+ # see if any packages are available in more than one minor version
+ not_found = []
+ multi_found = []
+ for name in expected_pkgs:
+ if name not in pkgs_precise_version_found:
+ not_found.append(name)
+ if name in pkgs_by_name_version and len(pkgs_by_name_version[name]) > 1:
+ multi_found.append(name)
+ if not_found:
+ msg = 'Not all of the required packages are available at requested version %s:\n' % expected_version
+ for name in not_found:
+ msg += ' %s\n' % name
+ bail(msg + 'Please check your subscriptions and enabled repositories.')
+ if multi_found:
+ msg = 'Multiple minor versions of these packages are available\n'
+ for name in multi_found:
+ msg += ' %s\n' % name
+ bail(msg + "There should only be one OpenShift version's repository enabled at a time.")
+
+ # _unmute()
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/openshift_preflight/base/library/check_yum_update.py b/roles/openshift_preflight/base/library/check_yum_update.py
new file mode 100755
index 000000000..296ebd44f
--- /dev/null
+++ b/roles/openshift_preflight/base/library/check_yum_update.py
@@ -0,0 +1,116 @@
+#!/usr/bin/python
+# vim: expandtab:tabstop=4:shiftwidth=4
+'''
+Ansible module to test whether a yum update or install will succeed,
+without actually performing it or running yum.
+parameters:
+ packages: (optional) A list of package names to install or update.
+ If omitted, all installed RPMs are considered for updates.
+'''
+
+# import os
+import sys
+import yum # pylint: disable=import-error
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main(): # pylint: disable=missing-docstring,too-many-branches
+ module = AnsibleModule(
+ argument_spec=dict(
+ packages=dict(type='list', default=[])
+ ),
+ supports_check_mode=True
+ )
+
+ # NOTE(rhcarvalho): sosiouxme added _unmute, but I couldn't find a case yet
+ # for when it is actually necessary. Leaving it commented out for now,
+ # though this comment and the commented out code related to _unmute should
+ # be deleted later if not proven necessary.
+
+ # sys.stdout = os.devnull # mute yum so it doesn't break our output
+
+ # def _unmute(): # pylint: disable=missing-docstring
+ # sys.stdout = sys.__stdout__
+
+ def bail(error): # pylint: disable=missing-docstring
+ # _unmute()
+ module.fail_json(msg=error)
+
+ yb = yum.YumBase() # pylint: disable=invalid-name
+ # determine if the existing yum configuration is valid
+ try:
+ yb.repos.populateSack(mdtype='metadata', cacheonly=1)
+ # for error of type:
+ # 1. can't reach the repo URL(s)
+ except yum.Errors.NoMoreMirrorsRepoError as e: # pylint: disable=invalid-name
+ bail('Error getting data from at least one yum repository: %s' % e)
+ # 2. invalid repo definition
+ except yum.Errors.RepoError as e: # pylint: disable=invalid-name
+ bail('Error with yum repository configuration: %s' % e)
+ # 3. other/unknown
+ # * just report the problem verbatim
+ except: # pylint: disable=bare-except; # noqa
+ bail('Unexpected error with yum repository: %s' % sys.exc_info()[1])
+
+ packages = module.params['packages']
+ no_such_pkg = []
+ for pkg in packages:
+ try:
+ yb.install(name=pkg)
+ except yum.Errors.InstallError as e: # pylint: disable=invalid-name
+ no_such_pkg.append(pkg)
+ except: # pylint: disable=bare-except; # noqa
+ bail('Unexpected error with yum install/update: %s' %
+ sys.exc_info()[1])
+ if not packages:
+ # no packages requested means test a yum update of everything
+ yb.update()
+ elif no_such_pkg:
+ # wanted specific packages to install but some aren't available
+ user_msg = 'Cannot install all of the necessary packages. Unavailable:\n'
+ for pkg in no_such_pkg:
+ user_msg += ' %s\n' % pkg
+ user_msg += 'You may need to enable one or more yum repositories to make this content available.'
+ bail(user_msg)
+
+ try:
+ txn_result, txn_msgs = yb.buildTransaction()
+ except: # pylint: disable=bare-except; # noqa
+ bail('Unexpected error during dependency resolution for yum update: \n %s' %
+ sys.exc_info()[1])
+
+ # find out if there are any errors with the update/install
+ if txn_result == 0: # 'normal exit' meaning there's nothing to install/update
+ pass
+ elif txn_result == 1: # error with transaction
+ user_msg = 'Could not perform a yum update.\n'
+ if len(txn_msgs) > 0:
+ user_msg += 'Errors from dependency resolution:\n'
+ for msg in txn_msgs:
+ user_msg += ' %s\n' % msg
+ user_msg += 'You should resolve these issues before proceeding with an install.\n'
+ user_msg += 'You may need to remove or downgrade packages or enable/disable yum repositories.'
+ bail(user_msg)
+ # TODO: it would be nice depending on the problem:
+ # 1. dependency for update not found
+ # * construct the dependency tree
+ # * find the installed package(s) that required the missing dep
+ # * determine if any of these packages matter to openshift
+ # * build helpful error output
+ # 2. conflicts among packages in available content
+ # * analyze dependency tree and build helpful error output
+ # 3. other/unknown
+ # * report the problem verbatim
+ # * add to this list as we come across problems we can clearly diagnose
+ elif txn_result == 2: # everything resolved fine
+ pass
+ else:
+ bail('Unknown error(s) from dependency resolution. Exit Code: %d:\n%s' %
+ (txn_result, txn_msgs))
+
+ # _unmute()
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/openshift_preflight/common/meta/main.yml b/roles/openshift_preflight/common/meta/main.yml
new file mode 100644
index 000000000..6f23cbf3b
--- /dev/null
+++ b/roles/openshift_preflight/common/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - role: openshift_preflight/base
diff --git a/roles/openshift_preflight/common/tasks/main.yml b/roles/openshift_preflight/common/tasks/main.yml
new file mode 100644
index 000000000..f1a4a160e
--- /dev/null
+++ b/roles/openshift_preflight/common/tasks/main.yml
@@ -0,0 +1,21 @@
+---
+# check content available on all hosts
+- when: not openshift.common.is_containerized | bool
+ block:
+
+ - name: determine if yum update will work
+ action: check_yum_update
+ register: r
+
+ - set_fact:
+ oo_preflight_check_results: "{{ oo_preflight_check_results + [r|combine({'_task': 'determine if yum update will work'})] }}"
+
+ - name: determine if expected version matches what is available
+ aos_version:
+ version: "{{ openshift_release }}"
+ when:
+ - deployment_type == "openshift-enterprise"
+ register: r
+
+ - set_fact:
+ oo_preflight_check_results: "{{ oo_preflight_check_results + [r|combine({'_task': 'determine if expected version matches what is available'})] }}"
diff --git a/roles/openshift_preflight/init/meta/main.yml b/roles/openshift_preflight/init/meta/main.yml
new file mode 100644
index 000000000..0bbeadd34
--- /dev/null
+++ b/roles/openshift_preflight/init/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - role: openshift_facts
diff --git a/roles/openshift_preflight/init/tasks/main.yml b/roles/openshift_preflight/init/tasks/main.yml
new file mode 100644
index 000000000..bf2d82196
--- /dev/null
+++ b/roles/openshift_preflight/init/tasks/main.yml
@@ -0,0 +1,4 @@
+---
+- name: set common variables
+ set_fact:
+ oo_preflight_check_results: "{{ oo_preflight_check_results | default([]) }}"
diff --git a/roles/openshift_preflight/masters/meta/main.yml b/roles/openshift_preflight/masters/meta/main.yml
new file mode 100644
index 000000000..6f23cbf3b
--- /dev/null
+++ b/roles/openshift_preflight/masters/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - role: openshift_preflight/base
diff --git a/roles/openshift_preflight/masters/tasks/main.yml b/roles/openshift_preflight/masters/tasks/main.yml
new file mode 100644
index 000000000..35fb1e3ca
--- /dev/null
+++ b/roles/openshift_preflight/masters/tasks/main.yml
@@ -0,0 +1,31 @@
+---
+# determine if yum install of master pkgs will work
+- when: not openshift.common.is_containerized | bool
+ block:
+
+ - name: main master packages availability
+ check_yum_update:
+ packages:
+ - "{{ openshift.common.service_type }}"
+ - "{{ openshift.common.service_type }}-clients"
+ - "{{ openshift.common.service_type }}-master"
+ register: r
+
+ - set_fact:
+ oo_preflight_check_results: "{{ oo_preflight_check_results + [r|combine({'_task': 'main master packages availability'})] }}"
+
+ - name: other master packages availability
+ check_yum_update:
+ packages:
+ - etcd
+ - bash-completion
+ - cockpit-bridge
+ - cockpit-docker
+ - cockpit-kubernetes
+ - cockpit-shell
+ - cockpit-ws
+ - httpd-tools
+ register: r
+
+ - set_fact:
+ oo_preflight_check_results: "{{ oo_preflight_check_results + [r|combine({'_task': 'other master packages availability'})] }}"
diff --git a/roles/openshift_preflight/nodes/meta/main.yml b/roles/openshift_preflight/nodes/meta/main.yml
new file mode 100644
index 000000000..6f23cbf3b
--- /dev/null
+++ b/roles/openshift_preflight/nodes/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - role: openshift_preflight/base
diff --git a/roles/openshift_preflight/nodes/tasks/main.yml b/roles/openshift_preflight/nodes/tasks/main.yml
new file mode 100644
index 000000000..a10e69024
--- /dev/null
+++ b/roles/openshift_preflight/nodes/tasks/main.yml
@@ -0,0 +1,41 @@
+---
+# determine if yum install of node pkgs will work
+- when: not openshift.common.is_containerized | bool
+ block:
+
+ - name: main node packages availability
+ check_yum_update:
+ packages:
+ - "{{ openshift.common.service_type }}"
+ - "{{ openshift.common.service_type }}-node"
+ - "{{ openshift.common.service_type }}-sdn-ovs"
+ register: r
+
+ - set_fact:
+ oo_preflight_check_results: "{{ oo_preflight_check_results + [r|combine({'_task': 'main node packages availability'})] }}"
+
+ - name: other node packages availability
+ check_yum_update:
+ packages:
+ - docker
+ - PyYAML
+ - firewalld
+ - iptables
+ - iptables-services
+ - nfs-utils
+ - ntp
+ - yum-utils
+ - dnsmasq
+ - libselinux-python
+ - ceph-common
+ - glusterfs-fuse
+ - iscsi-initiator-utils
+ - pyparted
+ - python-httplib2
+ - openssl
+ - flannel
+ - bind
+ register: r
+
+ - set_fact:
+ oo_preflight_check_results: "{{ oo_preflight_check_results + [r|combine({'_task': 'other node packages availability'})] }}"
diff --git a/roles/openshift_preflight/verify_status/callback_plugins/zz_failure_summary.py b/roles/openshift_preflight/verify_status/callback_plugins/zz_failure_summary.py
new file mode 100644
index 000000000..180ed8d8f
--- /dev/null
+++ b/roles/openshift_preflight/verify_status/callback_plugins/zz_failure_summary.py
@@ -0,0 +1,96 @@
+# vim: expandtab:tabstop=4:shiftwidth=4
+'''
+Ansible callback plugin.
+'''
+
+from ansible.plugins.callback import CallbackBase
+from ansible import constants as C
+from ansible.utils.color import stringc
+
+
+class CallbackModule(CallbackBase):
+ '''
+ This callback plugin stores task results and summarizes failures.
+ The file name is prefixed with `zz_` to make this plugin be loaded last by
+ Ansible, thus making its output the last thing that users see.
+ '''
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'aggregate'
+ CALLBACK_NAME = 'failure_summary'
+ CALLBACK_NEEDS_WHITELIST = False
+
+ def __init__(self):
+ super(CallbackModule, self).__init__()
+ self.__failures = []
+
+ def v2_runner_on_failed(self, result, ignore_errors=False):
+ super(CallbackModule, self).v2_runner_on_failed(result, ignore_errors)
+ self.__failures.append(dict(result=result, ignore_errors=ignore_errors))
+
+ def v2_playbook_on_stats(self, stats):
+ super(CallbackModule, self).v2_playbook_on_stats(stats)
+ # TODO: update condition to consider a host var or env var to
+ # enable/disable the summary, so that we can control the output from a
+ # play.
+ if self.__failures:
+ self._print_failure_summary()
+
+ def _print_failure_summary(self):
+ '''Print a summary of failed tasks (including ignored failures).'''
+ self._display.display(u'\nFailure summary:\n')
+
+ # TODO: group failures by host or by task. If grouped by host, it is
+ # easy to see all problems of a given host. If grouped by task, it is
+ # easy to see what hosts needs the same fix.
+
+ width = len(str(len(self.__failures)))
+ initial_indent_format = u' {{:>{width}}}. '.format(width=width)
+ initial_indent_len = len(initial_indent_format.format(0))
+ subsequent_indent = u' ' * initial_indent_len
+ subsequent_extra_indent = u' ' * (initial_indent_len + 10)
+
+ for i, failure in enumerate(self.__failures, 1):
+ lines = _format_failure(failure)
+ self._display.display(u'\n{}{}'.format(initial_indent_format.format(i), lines[0]))
+ for line in lines[1:]:
+ line = line.replace(u'\n', u'\n' + subsequent_extra_indent)
+ indented = u'{}{}'.format(subsequent_indent, line)
+ self._display.display(indented)
+
+
+# Reason: disable pylint protected-access because we need to access _*
+# attributes of a task result to implement this method.
+# Status: permanently disabled unless Ansible's API changes.
+# pylint: disable=protected-access
+def _format_failure(failure):
+ '''Return a list of pretty-formatted lines describing a failure, including
+ relevant information about it. Line separators are not included.'''
+ result = failure['result']
+ host = result._host.get_name()
+ play = _get_play(result._task)
+ if play:
+ play = play.get_name()
+ task = result._task.get_name()
+ msg = result._result.get('msg', u'???')
+ rows = (
+ (u'Host', host),
+ (u'Play', play),
+ (u'Task', task),
+ (u'Message', stringc(msg, C.COLOR_ERROR)),
+ )
+ row_format = '{:10}{}'
+ return [row_format.format(header + u':', body) for header, body in rows]
+
+
+# Reason: disable pylint protected-access because we need to access _*
+# attributes of obj to implement this function.
+# This is inspired by ansible.playbook.base.Base.dump_me.
+# Status: permanently disabled unless Ansible's API changes.
+# pylint: disable=protected-access
+def _get_play(obj):
+ '''Given a task or block, recursively tries to find its parent play.'''
+ if hasattr(obj, '_play'):
+ return obj._play
+ if getattr(obj, '_parent'):
+ return _get_play(obj._parent)
diff --git a/roles/openshift_preflight/verify_status/tasks/main.yml b/roles/openshift_preflight/verify_status/tasks/main.yml
new file mode 100644
index 000000000..36ccf648a
--- /dev/null
+++ b/roles/openshift_preflight/verify_status/tasks/main.yml
@@ -0,0 +1,8 @@
+---
+- name: find check failures
+ set_fact:
+ oo_preflight_check_failures: "{{ oo_preflight_check_results | select('failed', 'equalto', True) | list }}"
+
+- name: ensure all checks succeed
+ action: fail
+ when: oo_preflight_check_failures
diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml
index d5ed9c09d..23dcd0440 100644
--- a/roles/openshift_repos/tasks/main.yaml
+++ b/roles/openshift_repos/tasks/main.yaml
@@ -37,7 +37,7 @@
when: ansible_os_family == "RedHat" and ansible_distribution != "Fedora"
and openshift_deployment_type == 'origin'
and not openshift.common.is_containerized | bool
- and openshift_enable_origin_repo | default(true)
+ and openshift_enable_origin_repo | default(true) | bool
- name: Configure origin yum repositories RHEL/CentOS
copy:
@@ -47,4 +47,4 @@
when: ansible_os_family == "RedHat" and ansible_distribution != "Fedora"
and openshift_deployment_type == 'origin'
and not openshift.common.is_containerized | bool
- and openshift_enable_origin_repo | default(true)
+ and openshift_enable_origin_repo | default(true) | bool
diff --git a/roles/openshift_repos/templates/yum_repo.j2 b/roles/openshift_repos/templates/yum_repo.j2
index 2d9243545..ef2cd6603 100644
--- a/roles/openshift_repos/templates/yum_repo.j2
+++ b/roles/openshift_repos/templates/yum_repo.j2
@@ -2,9 +2,9 @@
[{{ repo.id }}]
name={{ repo.name | default(repo.id) }}
baseurl={{ repo.baseurl }}
-{% set enable_repo = repo.enabled | default('1') %}
+{% set enable_repo = repo.enabled | default(1) %}
enabled={{ 1 if ( enable_repo == 1 or enable_repo == True ) else 0 }}
-{% set enable_gpg_check = repo.gpgcheck | default('1') %}
+{% set enable_gpg_check = repo.gpgcheck | default(1) %}
gpgcheck={{ 1 if ( enable_gpg_check == 1 or enable_gpg_check == True ) else 0 }}
{% for key, value in repo.iteritems() %}
{% if key not in ['id', 'name', 'baseurl', 'enabled', 'gpgcheck'] and value is defined %}
diff --git a/roles/openshift_repos/vars/main.yml b/roles/openshift_repos/vars/main.yml
index 319611a0b..da48e42c1 100644
--- a/roles/openshift_repos/vars/main.yml
+++ b/roles/openshift_repos/vars/main.yml
@@ -4,4 +4,4 @@
# enterprise is used for OSE 3.0 < 3.1 which uses packages named 'openshift'
# atomic-enterprise uses Red Hat packages named 'atomic-openshift'
# openshift-enterprise uses Red Hat packages named 'atomic-openshift' starting with OSE 3.1
-known_openshift_deployment_types: ['origin', 'online', 'enterprise','atomic-enterprise','openshift-enterprise']
+known_openshift_deployment_types: ['origin', 'online', 'enterprise', 'atomic-enterprise', 'openshift-enterprise']
diff --git a/roles/openshift_serviceaccounts/tasks/legacy_add_scc_to_user.yml b/roles/openshift_serviceaccounts/tasks/legacy_add_scc_to_user.yml
index 8715fc64e..b8cbe9a84 100644
--- a/roles/openshift_serviceaccounts/tasks/legacy_add_scc_to_user.yml
+++ b/roles/openshift_serviceaccounts/tasks/legacy_add_scc_to_user.yml
@@ -1,3 +1,4 @@
+---
####
#
# OSE 3.0.z did not have 'oadm policy add-scc-to-user'.
@@ -9,7 +10,7 @@
path: /tmp/openshift
state: directory
owner: root
- mode: 700
+ mode: 0700
- name: Create service account configs
template:
diff --git a/roles/openshift_serviceaccounts/tasks/main.yml b/roles/openshift_serviceaccounts/tasks/main.yml
index 1ff9e6dcb..d83ccf7de 100644
--- a/roles/openshift_serviceaccounts/tasks/main.yml
+++ b/roles/openshift_serviceaccounts/tasks/main.yml
@@ -1,3 +1,4 @@
+---
- name: test if service accounts exists
command: >
{{ openshift.common.client_binary }} get sa {{ item }} -n {{ openshift_serviceaccounts_namespace }}
diff --git a/roles/openshift_storage_nfs_lvm/README.md b/roles/openshift_storage_nfs_lvm/README.md
index 8b8471745..cc674d3fd 100644
--- a/roles/openshift_storage_nfs_lvm/README.md
+++ b/roles/openshift_storage_nfs_lvm/README.md
@@ -48,6 +48,13 @@ osnl_volume_num_start: 3
# How many volumes/partitions to build, with the size we stated.
osnl_number_of_volumes: 2
+# osnl_volume_reclaim_policy
+# Volume reclaim policy of a PersistentVolume tells the cluster
+# what to do with the volume after it is released.
+#
+# Valid values are "Retain" or "Recycle" (default).
+osnl_volume_reclaim_policy: "Recycle"
+
```
## Dependencies
@@ -71,6 +78,7 @@ exported via NFS. json files are created in /root.
osnl_volume_size: 5
osnl_volume_num_start: 3
osnl_number_of_volumes: 2
+ osnl_volume_reclaim_policy: "Recycle"
## Full example
@@ -96,6 +104,7 @@ exported via NFS. json files are created in /root.
osnl_volume_size: 5
osnl_volume_num_start: 3
osnl_number_of_volumes: 2
+ osnl_volume_reclaim_policy: "Recycle"
* Run the playbook:
```
diff --git a/roles/openshift_storage_nfs_lvm/defaults/main.yml b/roles/openshift_storage_nfs_lvm/defaults/main.yml
index f81cdc724..48352187c 100644
--- a/roles/openshift_storage_nfs_lvm/defaults/main.yml
+++ b/roles/openshift_storage_nfs_lvm/defaults/main.yml
@@ -8,3 +8,10 @@ osnl_mount_dir: /exports/openshift
# Volume Group to use.
osnl_volume_group: openshiftvg
+
+# Volume reclaim policy of a PersistentVolume tells the cluster
+# what to do with the volume after it is released.
+#
+# Valid values are "Retain" or "Recycle".
+# See https://docs.openshift.com/enterprise/3.0/architecture/additional_concepts/storage.html#pv-recycling-policy
+osnl_volume_reclaim_policy: "Recycle"
diff --git a/roles/openshift_storage_nfs_lvm/meta/main.yml b/roles/openshift_storage_nfs_lvm/meta/main.yml
index bed1216f8..50d94f6a3 100644
--- a/roles/openshift_storage_nfs_lvm/meta/main.yml
+++ b/roles/openshift_storage_nfs_lvm/meta/main.yml
@@ -13,5 +13,6 @@ galaxy_info:
versions:
- all
categories:
- - openshift
-dependencies: []
+ - openshift
+dependencies:
+- role: openshift_facts
diff --git a/roles/openshift_storage_nfs_lvm/tasks/main.yml b/roles/openshift_storage_nfs_lvm/tasks/main.yml
index ea0cc2a94..49dd657b5 100644
--- a/roles/openshift_storage_nfs_lvm/tasks/main.yml
+++ b/roles/openshift_storage_nfs_lvm/tasks/main.yml
@@ -2,7 +2,7 @@
# TODO -- this may actually work on atomic hosts
- fail:
msg: "openshift_storage_nfs_lvm is not compatible with atomic host"
- when: openshift.common.is_atomic | true
+ when: openshift.common.is_atomic | bool
- name: Create lvm volumes
lvol: vg={{osnl_volume_group}} lv={{ item }} size={{osnl_volume_size}}G
diff --git a/roles/openshift_storage_nfs_lvm/templates/nfs.json.j2 b/roles/openshift_storage_nfs_lvm/templates/nfs.json.j2
index 3c4d2f56c..c273aca9f 100644
--- a/roles/openshift_storage_nfs_lvm/templates/nfs.json.j2
+++ b/roles/openshift_storage_nfs_lvm/templates/nfs.json.j2
@@ -12,10 +12,10 @@
"storage": "{{ osnl_volume_size }}Gi"
},
"accessModes": [ "ReadWriteOnce", "ReadWriteMany" ],
- "persistentVolumeReclaimPolicy": "Recycle",
+ "persistentVolumeReclaimPolicy": "{{ osnl_volume_reclaim_policy }}",
"nfs": {
- "Server": "{{ inventory_hostname }}",
- "Path": "{{ osnl_mount_dir }}/{{ item }}"
+ "server": "{{ inventory_hostname }}",
+ "path": "{{ osnl_mount_dir }}/{{ item }}"
}
}
}
diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml
index a3a99d248..0f2a660a7 100644
--- a/roles/openshift_version/tasks/main.yml
+++ b/roles/openshift_version/tasks/main.yml
@@ -22,7 +22,7 @@
- set_fact:
openshift_image_tag: "{{ 'v' + openshift_image_tag }}"
- when: openshift_image_tag is defined and openshift_image_tag[0] != 'v'
+ when: openshift_image_tag is defined and openshift_image_tag[0] != 'v' and openshift_image_tag != 'latest'
- set_fact:
openshift_pkg_version: "{{ '-' + openshift_pkg_version }}"
diff --git a/roles/openshift_version/tasks/set_version_containerized.yml b/roles/openshift_version/tasks/set_version_containerized.yml
index 718537287..cd0f20ae9 100644
--- a/roles/openshift_version/tasks/set_version_containerized.yml
+++ b/roles/openshift_version/tasks/set_version_containerized.yml
@@ -1,8 +1,9 @@
---
- name: Set containerized version to configure if openshift_image_tag specified
set_fact:
- # Expects a leading "v" in inventory, strip it off here:
- openshift_version: "{{ openshift_image_tag[1:].split('-')[0] }}"
+ # Expects a leading "v" in inventory, strip it off here unless
+ # openshift_image_tag=latest
+ openshift_version: "{{ openshift_image_tag[1:].split('-')[0] if openshift_image_tag != 'latest' else openshift_image_tag }}"
when: openshift_image_tag is defined and openshift_version is not defined
- name: Set containerized version to configure if openshift_release specified
diff --git a/roles/os_firewall/README.md b/roles/os_firewall/README.md
index c13c5dfc9..43db3cc74 100644
--- a/roles/os_firewall/README.md
+++ b/roles/os_firewall/README.md
@@ -4,6 +4,9 @@ OS Firewall
OS Firewall manages firewalld and iptables firewall settings for a minimal use
case (Adding/Removing rules based on protocol and port number).
+Note: firewalld is not supported on Atomic Host
+https://bugzilla.redhat.com/show_bug.cgi?id=1403331
+
Requirements
------------
@@ -14,7 +17,7 @@ Role Variables
| Name | Default | |
|---------------------------|---------|----------------------------------------|
-| os_firewall_use_firewalld | False | If false, use iptables |
+| os_firewall_use_firewalld | True | If false, use iptables |
| os_firewall_allow | [] | List of service,port mappings to allow |
| os_firewall_deny | [] | List of service, port mappings to deny |
@@ -31,6 +34,7 @@ Use iptables and open tcp ports 80 and 443:
---
- hosts: servers
vars:
+ os_firewall_use_firewalld: false
os_firewall_allow:
- service: httpd
port: 80/tcp
@@ -45,7 +49,6 @@ Use firewalld and open tcp port 443 and close previously open tcp port 80:
---
- hosts: servers
vars:
- os_firewall_use_firewalld: true
os_firewall_allow:
- service: https
port: 443/tcp
diff --git a/roles/os_firewall/defaults/main.yml b/roles/os_firewall/defaults/main.yml
index c870a301a..4c544122f 100644
--- a/roles/os_firewall/defaults/main.yml
+++ b/roles/os_firewall/defaults/main.yml
@@ -1,9 +1,7 @@
---
os_firewall_enabled: True
-# TODO: Upstream kubernetes only supports iptables currently
-# TODO: it might be possible to still use firewalld if we wire up the created
-# chains with the public zone (or the zone associated with the correct
-# interfaces)
-os_firewall_use_firewalld: False
+# firewalld is not supported on Atomic Host
+# https://bugzilla.redhat.com/show_bug.cgi?id=1403331
+os_firewall_use_firewalld: "{{ False if openshift.common.is_atomic | bool else True }}"
os_firewall_allow: []
os_firewall_deny: []
diff --git a/roles/os_firewall/library/os_firewall_manage_iptables.py b/roles/os_firewall/library/os_firewall_manage_iptables.py
index 37bb16f35..8ba650994 100755
--- a/roles/os_firewall/library/os_firewall_manage_iptables.py
+++ b/roles/os_firewall/library/os_firewall_manage_iptables.py
@@ -2,7 +2,7 @@
# -*- coding: utf-8 -*-
# vim: expandtab:tabstop=4:shiftwidth=4
# pylint: disable=fixme, missing-docstring
-from subprocess import call, check_output
+import subprocess
DOCUMENTATION = '''
---
@@ -29,7 +29,10 @@ class IpTablesAddRuleError(IpTablesError):
class IpTablesRemoveRuleError(IpTablesError):
- pass
+ def __init__(self, chain, msg, cmd, exit_code, output): # pylint: disable=too-many-arguments, line-too-long, redefined-outer-name
+ super(IpTablesRemoveRuleError, self).__init__(msg, cmd, exit_code,
+ output)
+ self.chain = chain
class IpTablesSaveError(IpTablesError):
@@ -37,14 +40,14 @@ class IpTablesSaveError(IpTablesError):
class IpTablesCreateChainError(IpTablesError):
- def __init__(self, chain, msg, cmd, exit_code, output): # pylint: disable=too-many-arguments, line-too-long, redefined-outer-name
+ def __init__(self, chain, msg, cmd, exit_code, output): # pylint: disable=too-many-arguments, line-too-long, redefined-outer-name
super(IpTablesCreateChainError, self).__init__(msg, cmd, exit_code,
output)
self.chain = chain
class IpTablesCreateJumpRuleError(IpTablesError):
- def __init__(self, chain, msg, cmd, exit_code, output): # pylint: disable=too-many-arguments, line-too-long, redefined-outer-name
+ def __init__(self, chain, msg, cmd, exit_code, output): # pylint: disable=too-many-arguments, line-too-long, redefined-outer-name
super(IpTablesCreateJumpRuleError, self).__init__(msg, cmd, exit_code,
output)
self.chain = chain
@@ -53,7 +56,7 @@ class IpTablesCreateJumpRuleError(IpTablesError):
# TODO: implement rollbacks for any events that were successful and an
# exception was thrown later. For example, when the chain is created
# successfully, but the add/remove rule fails.
-class IpTablesManager(object): # pylint: disable=too-many-instance-attributes
+class IpTablesManager(object): # pylint: disable=too-many-instance-attributes
def __init__(self, module):
self.module = module
self.ip_version = module.params['ip_version']
@@ -68,8 +71,7 @@ class IpTablesManager(object): # pylint: disable=too-many-instance-attributes
def save(self):
try:
- self.output.append(check_output(self.save_cmd,
- stderr=subprocess.STDOUT))
+ self.output.append(subprocess.check_output(self.save_cmd, stderr=subprocess.STDOUT))
except subprocess.CalledProcessError as ex:
raise IpTablesSaveError(
msg="Failed to save iptables rules",
@@ -92,7 +94,7 @@ class IpTablesManager(object): # pylint: disable=too-many-instance-attributes
else:
cmd = self.cmd + ['-A'] + rule
try:
- self.output.append(check_output(cmd))
+ self.output.append(subprocess.check_output(cmd))
self.changed = True
self.save()
except subprocess.CalledProcessError as ex:
@@ -112,7 +114,7 @@ class IpTablesManager(object): # pylint: disable=too-many-instance-attributes
else:
cmd = self.cmd + ['-D'] + rule
try:
- self.output.append(check_output(cmd))
+ self.output.append(subprocess.check_output(cmd))
self.changed = True
self.save()
except subprocess.CalledProcessError as ex:
@@ -123,11 +125,19 @@ class IpTablesManager(object): # pylint: disable=too-many-instance-attributes
def rule_exists(self, rule):
check_cmd = self.cmd + ['-C'] + rule
- return True if call(check_cmd) == 0 else False
+ return True if subprocess.call(check_cmd) == 0 else False
+
+ @staticmethod
+ def port_as_argument(port):
+ if isinstance(port, int):
+ return str(port)
+ if isinstance(port, basestring): # noqa: F405
+ return port.replace('-', ":")
+ return port
def gen_rule(self, port, proto):
return [self.chain, '-p', proto, '-m', 'state', '--state', 'NEW',
- '-m', proto, '--dport', str(port), '-j', 'ACCEPT']
+ '-m', proto, '--dport', IpTablesManager.port_as_argument(port), '-j', 'ACCEPT']
def create_jump(self):
if self.check_mode:
@@ -136,7 +146,7 @@ class IpTablesManager(object): # pylint: disable=too-many-instance-attributes
else:
try:
cmd = self.cmd + ['-L', self.jump_rule_chain, '--line-numbers']
- output = check_output(cmd, stderr=subprocess.STDOUT)
+ output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
# break the input rules into rows and columns
input_rules = [s.split() for s in to_native(output).split('\n')]
@@ -155,8 +165,7 @@ class IpTablesManager(object): # pylint: disable=too-many-instance-attributes
# Naively assume that if the last row is a REJECT or DROP rule,
# then we can insert our rule right before it, otherwise we
# assume that we can just append the rule.
- if (last_rule_num and last_rule_target
- and last_rule_target in ['REJECT', 'DROP']):
+ if (last_rule_num and last_rule_target and last_rule_target in ['REJECT', 'DROP']):
# insert rule
cmd = self.cmd + ['-I', self.jump_rule_chain,
str(last_rule_num)]
@@ -164,7 +173,7 @@ class IpTablesManager(object): # pylint: disable=too-many-instance-attributes
# append rule
cmd = self.cmd + ['-A', self.jump_rule_chain]
cmd += ['-j', self.chain]
- output = check_output(cmd, stderr=subprocess.STDOUT)
+ output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
self.changed = True
self.output.append(output)
self.save()
@@ -192,8 +201,7 @@ class IpTablesManager(object): # pylint: disable=too-many-instance-attributes
else:
try:
cmd = self.cmd + ['-N', self.chain]
- self.output.append(check_output(cmd,
- stderr=subprocess.STDOUT))
+ self.output.append(subprocess.check_output(cmd, stderr=subprocess.STDOUT))
self.changed = True
self.output.append("Successfully created chain %s" %
self.chain)
@@ -203,26 +211,26 @@ class IpTablesManager(object): # pylint: disable=too-many-instance-attributes
chain=self.chain,
msg="Failed to create chain: %s" % self.chain,
cmd=ex.cmd, exit_code=ex.returncode, output=ex.output
- )
+ )
def jump_rule_exists(self):
cmd = self.cmd + ['-C', self.jump_rule_chain, '-j', self.chain]
- return True if call(cmd) == 0 else False
+ return True if subprocess.call(cmd) == 0 else False
def chain_exists(self):
cmd = self.cmd + ['-L', self.chain]
- return True if call(cmd) == 0 else False
+ return True if subprocess.call(cmd) == 0 else False
def gen_cmd(self):
cmd = 'iptables' if self.ip_version == 'ipv4' else 'ip6tables'
return ["/usr/sbin/%s" % cmd]
- def gen_save_cmd(self): # pylint: disable=no-self-use
+ def gen_save_cmd(self): # pylint: disable=no-self-use
return ['/usr/libexec/iptables/iptables.init', 'save']
def main():
- module = AnsibleModule(
+ module = AnsibleModule( # noqa: F405
argument_spec=dict(
name=dict(required=True),
action=dict(required=True, choices=['add', 'remove',
@@ -231,7 +239,7 @@ def main():
create_jump_rule=dict(required=False, type='bool', default=True),
jump_rule_chain=dict(required=False, default='INPUT'),
protocol=dict(required=False, choices=['tcp', 'udp']),
- port=dict(required=False, type='int'),
+ port=dict(required=False, type='str'),
ip_version=dict(required=False, default='ipv4',
choices=['ipv4', 'ipv6']),
),
@@ -266,9 +274,9 @@ def main():
output=iptables_manager.output)
-# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, wrong-import-position
# import module snippets
-from ansible.module_utils.basic import *
-from ansible.module_utils._text import to_native
+from ansible.module_utils.basic import * # noqa: F403,E402
+from ansible.module_utils._text import to_native # noqa: E402
if __name__ == '__main__':
main()
diff --git a/roles/os_firewall/tasks/main.yml b/roles/os_firewall/tasks/main.yml
index 076e5e311..20efe5b0d 100644
--- a/roles/os_firewall/tasks/main.yml
+++ b/roles/os_firewall/tasks/main.yml
@@ -1,4 +1,10 @@
---
+- name: Assert - Do not use firewalld on Atomic Host
+ assert:
+ that: not os_firewall_use_firewalld | bool
+ msg: "Firewalld is not supported on Atomic Host"
+ when: openshift.common.is_atomic | bool
+
- include: firewall/firewalld.yml
when: os_firewall_enabled | bool and os_firewall_use_firewalld | bool
diff --git a/roles/rhel_subscribe/meta/main.yml b/roles/rhel_subscribe/meta/main.yml
index 6204a5aa5..0bbeadd34 100644
--- a/roles/rhel_subscribe/meta/main.yml
+++ b/roles/rhel_subscribe/meta/main.yml
@@ -1,2 +1,3 @@
+---
dependencies:
-- role: openshift_facts
+ - role: openshift_facts