diff options
280 files changed, 7371 insertions, 3698 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible index 4d61a759b..608b430ce 100644 --- a/.tito/packages/openshift-ansible +++ b/.tito/packages/openshift-ansible @@ -1 +1 @@ -3.7.0-0.134.0 ./ +3.7.0-0.147.0 ./ diff --git a/files/origin-components/template-service-broker-registration.yaml b/files/origin-components/template-service-broker-registration.yaml new file mode 100644 index 000000000..2086978f0 --- /dev/null +++ b/files/origin-components/template-service-broker-registration.yaml @@ -0,0 +1,25 @@ +apiVersion: template.openshift.io/v1 +kind: Template +metadata: + name: template-service-broker-registration +parameters: +- name: TSB_NAMESPACE + value: openshift-template-service-broker +- name: CA_BUNDLE + required: true +objects: +# register the tsb with the service catalog +- apiVersion: servicecatalog.k8s.io/v1alpha1 + kind: ServiceBroker + metadata: + name: template-service-broker + spec: + url: https://apiserver.${TSB_NAMESPACE}.svc:443/brokers/template.openshift.io + insecureSkipTLSVerify: false + caBundle: ${CA_BUNDLE} + authInfo: + bearer: + secretRef: + kind: Secret + name: templateservicebroker-client + namespace: ${TSB_NAMESPACE} diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py index f0f250480..83a05370a 100644 --- a/filter_plugins/oo_filters.py +++ b/filter_plugins/oo_filters.py @@ -707,11 +707,6 @@ def oo_openshift_env(hostvars): if regex.match(key): facts[key] = hostvars[key] - migrations = {'openshift_router_selector': 'openshift_hosted_router_selector', - 'openshift_registry_selector': 'openshift_hosted_registry_selector'} - for old_fact, new_fact in migrations.items(): - if old_fact in facts and new_fact not in facts: - facts[new_fact] = facts[old_fact] return facts diff --git a/images/installer/README_CONTAINER_IMAGE.md b/images/installer/README_CONTAINER_IMAGE.md index bc1ebb4a8..bfe3661c0 100644 --- a/images/installer/README_CONTAINER_IMAGE.md +++ b/images/installer/README_CONTAINER_IMAGE.md @@ -45,4 +45,6 @@ These options may be set via the ``atomic`` ``--set`` flag. For defaults see ``r * ANSIBLE_CONFIG - Full path for the ansible configuration file to use inside the container -* INVENTORY_FILE - Full path for the inventory to use from the host
\ No newline at end of file +* INVENTORY_FILE - Full path for the inventory to use from the host + +* INVENTORY_DIR - Full path for the inventory directory to use (e.g. for use with a hybrid dynamic/static inventory) diff --git a/images/installer/root/usr/local/bin/run b/images/installer/root/usr/local/bin/run index 70aa0bac3..cd38a6ff0 100755 --- a/images/installer/root/usr/local/bin/run +++ b/images/installer/root/usr/local/bin/run @@ -19,6 +19,9 @@ if [[ -v INVENTORY_FILE ]]; then # Make a copy so that ALLOW_ANSIBLE_CONNECTION_LOCAL below # does not attempt to modify the original cp -a ${INVENTORY_FILE} ${INVENTORY} +elif [[ -v INVENTORY_DIR ]]; then + INVENTORY="$(mktemp -d)" + cp -R ${INVENTORY_DIR}/* ${INVENTORY} elif [[ -v INVENTORY_URL ]]; then curl -o ${INVENTORY} ${INVENTORY_URL} elif [[ -v DYNAMIC_SCRIPT_URL ]]; then @@ -29,7 +32,7 @@ elif [[ -v GENERATE_INVENTORY ]]; then /usr/local/bin/generate ${INVENTORY} else echo - echo "One of INVENTORY_FILE, INVENTORY_URL, GENERATE_INVENTORY, or DYNAMIC_SCRIPT_URL must be provided." + echo "One of INVENTORY_FILE, INVENTORY_DIR, INVENTORY_URL, GENERATE_INVENTORY, or DYNAMIC_SCRIPT_URL must be provided." exec /usr/local/bin/usage fi INVENTORY_ARG="-i ${INVENTORY}" diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.example index 03fbcc63c..0b6050891 100644 --- a/inventory/byo/hosts.ose.example +++ b/inventory/byo/hosts.example @@ -27,7 +27,8 @@ ansible_ssh_user=root debug_level=2 # Specify the deployment type. Valid values are origin and openshift-enterprise. -openshift_deployment_type=openshift-enterprise +openshift_deployment_type=origin +#openshift_deployment_type=openshift-enterprise # Specify the generic release of OpenShift to install. This is used mainly just during installation, after which we # rely on the version running on the first master. Works best for containerized installs where we can usually @@ -58,6 +59,8 @@ openshift_release=v3.7 #openshift_use_etcd_system_container=False # # In either case, system_images_registry must be specified to be able to find the system images +#system_images_registry="docker.io" +# when openshift_deployment_type=='openshift-enterprise' #system_images_registry="registry.access.redhat.com" # Manage openshift example imagestreams and templates during install and upgrade @@ -124,15 +127,15 @@ openshift_release=v3.7 # Default value: "--log-driver=journald" #openshift_docker_options="-l warn --ipv6=false" +# Specify exact version of Docker to configure or upgrade to. +# Downgrades are not supported and will error out. Be careful when upgrading docker from < 1.10 to > 1.10. +# docker_version="1.12.1" + # Specify whether to run Docker daemon with SELinux enabled in containers. Default is True. # Uncomment below to disable; for example if your kernel does not support the # Docker overlay/overlay2 storage drivers with SELinux enabled. #openshift_docker_selinux_enabled=False -# Specify exact version of Docker to configure or upgrade to. -# Downgrades are not supported and will error out. Be careful when upgrading docker from < 1.10 to > 1.10. -# docker_version="1.12.1" - # Skip upgrading Docker during an OpenShift upgrade, leaves the current Docker version alone. # docker_upgrade=False @@ -179,7 +182,7 @@ openshift_release=v3.7 #oreg_auth_credentials_replace: True # OpenShift repository configuration -#openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://example.com/puddle/build/AtomicOpenShift/3.1/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}] +#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, 'gpgkey': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}] #openshift_repos_enable_testing=false # htpasswd auth @@ -237,9 +240,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # CloudForms Management Engine (ManageIQ) App Install # # Enables installation of MIQ server. Recommended for dedicated -# clusters only. See roles/openshift_cfme/README.md for instructions +# clusters only. See roles/openshift_management/README.md for instructions # and requirements. -#openshift_cfme_install_app=False +#openshift_management_install_management=False # Cloud Provider Configuration # @@ -346,7 +349,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # default storage plugin dependencies to install, by default the ceph and # glusterfs plugin dependencies will be installed, if available. -#osn_storage_plugin_deps=['ceph','glusterfs'] +#osn_storage_plugin_deps=['ceph','glusterfs','iscsi'] # OpenShift Router Options # @@ -432,6 +435,8 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # path using these options would be "/exports/registry" #openshift_hosted_registry_storage_kind=nfs #openshift_hosted_registry_storage_access_modes=['ReadWriteMany'] +# nfs_directory must conform to DNS-1123 subdomain must consist of lower case +# alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character #openshift_hosted_registry_storage_nfs_directory=/exports #openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)' #openshift_hosted_registry_storage_volume_name=registry @@ -444,6 +449,8 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', #openshift_hosted_registry_storage_kind=nfs #openshift_hosted_registry_storage_access_modes=['ReadWriteMany'] #openshift_hosted_registry_storage_host=nfs.example.com +# nfs_directory must conform to DNS-1123 subdomain must consist of lower case +# alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character #openshift_hosted_registry_storage_nfs_directory=/exports #openshift_hosted_registry_storage_volume_name=registry #openshift_hosted_registry_storage_volume_size=10Gi @@ -457,7 +464,6 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', #openshift_hosted_registry_storage_volume_size=10Gi # # AWS S3 -# # S3 bucket must already exist. #openshift_hosted_registry_storage_kind=object #openshift_hosted_registry_storage_provider=s3 @@ -543,10 +549,13 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # Defaults to https://hawkular-metrics.{{openshift_master_default_subdomain}}/hawkular/metrics # Currently, you may only alter the hostname portion of the url, alterting the # `/hawkular/metrics` path will break installation of metrics. -#openshift_metrics_hawkular_hostname=https://hawkular-metrics.example.com/hawkular/metrics +#openshift_metrics_hawkular_hostname=hawkular-metrics.example.com # Configure the prefix and version for the component images -#openshift_metrics_image_prefix=registry.example.com:8888/openshift3/ -#openshift_metrics_image_version=3.7.0 +#openshift_metrics_image_prefix=docker.io/openshift/origin- +#openshift_metrics_image_version=v3.7 +# when openshift_deployment_type=='openshift-enterprise' +#openshift_metrics_image_prefix=registry.access.redhat.com/openshift3/ +#openshift_metrics_image_version=v3.7 # # StorageClass # openshift_storageclass_name=gp2 @@ -600,7 +609,10 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # this value must be 1 #openshift_logging_es_cluster_size=1 # Configure the prefix and version for the component images -#openshift_logging_image_prefix=registry.example.com:8888/openshift3/ +#openshift_logging_image_prefix=docker.io/openshift/origin- +#openshift_logging_image_version=v3.7.0 +# when openshift_deployment_type=='openshift-enterprise' +#openshift_logging_image_prefix=registry.access.redhat.com/openshift3/ #openshift_logging_image_version=3.7.0 # Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet') @@ -658,8 +670,10 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', #openshift_master_api_port=8443 #openshift_master_console_port=8443 -# set RPM version for debugging purposes -#openshift_pkg_version=-3.1.0.0 +# set exact RPM version (include - prefix) +#openshift_pkg_version=-3.6.0 +# you may also specify version and release, ie: +#openshift_pkg_version=-3.7.0-0.126.0.git.0.9351aae.el7 # Configure custom ca certificate #openshift_master_ca_certificate={'certfile': '/path/to/ca.crt', 'keyfile': '/path/to/ca.key'} @@ -671,6 +685,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # Configure custom named certificates (SNI certificates) # +# https://docs.openshift.org/latest/install_config/certificate_customization.html # https://docs.openshift.com/enterprise/latest/install_config/certificate_customization.html # # NOTE: openshift_master_named_certificates is cached on masters and is an @@ -735,6 +750,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # openshift_use_dnsmasq is deprecated. This must be true, or installs will fail # in versions >= 3.6 #openshift_use_dnsmasq=False + # Define an additional dnsmasq.conf file to deploy to /etc/dnsmasq.d/openshift-ansible.conf # This is useful for POC environments where DNS may not actually be available yet or to set # options like 'strict-order' to alter dnsmasq configuration. @@ -817,7 +833,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', #openshift_master_controllers_env_vars={"ENABLE_HTTP2": "true"} #openshift_node_env_vars={"ENABLE_HTTP2": "true"} -# Enable API service auditing, available as of 3.2 +# Enable API service auditing #openshift_master_audit_config={"enabled": true} # # In case you want more advanced setup for the auditlog you can @@ -826,6 +842,10 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # exist #openshift_master_audit_config={"enabled": true, "auditFilePath": "/var/log/openpaas-oscp-audit/openpaas-oscp-audit.log", "maximumFileRetentionDays": 14, "maximumFileSizeMegabytes": 500, "maximumRetainedFiles": 5} +# Enable origin repos that point at Centos PAAS SIG, defaults to true, only used +# by deployment_type=origin +#openshift_enable_origin_repo=false + # Validity of the auto-generated OpenShift certificates in days. # See also openshift_hosted_registry_cert_expire_days above. # @@ -874,9 +894,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # You may wish to disable these or make them non fatal # # openshift_upgrade_pre_storage_migration_enabled=true -# openshift_upgrade_pre_storage_migration_fatal==true +# openshift_upgrade_pre_storage_migration_fatal=true # openshift_upgrade_post_storage_migration_enabled=true -# openshift_upgrade_post_storage_migration_fatal==false +# openshift_upgrade_post_storage_migration_fatal=false # host group for masters [masters] @@ -896,3 +916,61 @@ ose3-lb-ansible.test.example.com containerized=false [nodes] ose3-master[1:3]-ansible.test.example.com ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}" + +# CloudForms/ManageIQ (CFME/MIQ) Configuration + +# See the readme for full descriptions and getting started +# instructions: ../../roles/openshift_management/README.md or go directly to +# their definitions: ../../roles/openshift_management/defaults/main.yml +# ../../roles/openshift_management/vars/main.yml +# +# Namespace for the CFME project +#openshift_management_project: openshift-management + +# Namespace/project description +#openshift_management_project_description: CloudForms Management Engine + +# Choose 'miq-template' for a podified database install +# Choose 'miq-template-ext-db' for an external database install +# +# If you are using the miq-template-ext-db template then you must add +# the required database parameters to the +# openshift_management_template_parameters variable. +#openshift_management_app_template: miq-template + +# Allowed options: nfs, nfs_external, preconfigured, cloudprovider. +#openshift_management_storage_class: nfs + +# [OPTIONAL] - If you are using an EXTERNAL NFS server, such as a +# netapp appliance, then you must set the hostname here. Leave the +# value as 'false' if you are not using external NFS. +#openshift_management_storage_nfs_external_hostname: false + +# [OPTIONAL] - If you are using external NFS then you must set the base +# path to the exports location here. +# +# Additionally: EXTERNAL NFS REQUIRES that YOU CREATE the nfs exports +# that will back the application PV and optionally the database +# pv. Export path definitions, relative to +# {{ openshift_management_storage_nfs_base_dir }} +# +# LOCAL NFS NOTE: +# +# You may may also change this value if you want to change the default +# path used for local NFS exports. +#openshift_management_storage_nfs_base_dir: /exports + +# LOCAL NFS NOTE: +# +# You may override the automatically selected LOCAL NFS server by +# setting this variable. Useful for testing specific task files. +#openshift_management_storage_nfs_local_hostname: false + +# A hash of parameters you want to override or set in the +# miq-template.yaml or miq-template-ext-db.yaml templates. Set this in +# your inventory file as a simple hash. Acceptable values are defined +# under the .parameters list in files/miq-template{-ext-db}.yaml +# Example: +# +# openshift_management_template_parameters={'APPLICATION_MEM_REQ': '512Mi'} +#openshift_management_template_parameters: {} diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example deleted file mode 100644 index 4a0630a69..000000000 --- a/inventory/byo/hosts.origin.example +++ /dev/null @@ -1,895 +0,0 @@ -# This is an example of a bring your own (byo) host inventory - -# Create an OSEv3 group that contains the masters and nodes groups -[OSEv3:children] -masters -nodes -etcd -lb -nfs - -# Set variables common for all OSEv3 hosts -[OSEv3:vars] -# Enable unsupported configurations, things that will yield a partially -# functioning cluster but would not be supported for production use -#openshift_enable_unsupported_configurations=false - -# SSH user, this user should allow ssh based auth without requiring a -# password. If using ssh key based auth, then the key should be managed by an -# ssh agent. -ansible_ssh_user=root - -# If ansible_ssh_user is not root, ansible_become must be set to true and the -# user must be configured for passwordless sudo -#ansible_become=yes - -# Debug level for all OpenShift components (Defaults to 2) -debug_level=2 - -# Specify the deployment type. Valid values are origin and openshift-enterprise. -openshift_deployment_type=origin - -# Specify the generic release of OpenShift to install. This is used mainly just during installation, after which we -# rely on the version running on the first master. Works best for containerized installs where we can usually -# use this to lookup the latest exact version of the container images, which is the tag actually used to configure -# the cluster. For RPM installations we just verify the version detected in your configured repos matches this -# release. -openshift_release=v3.7 - -# Specify an exact container image tag to install or configure. -# WARNING: This value will be used for all hosts in containerized environments, even those that have another version installed. -# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up. -#openshift_image_tag=v3.7.0 - -# Specify an exact rpm version to install or configure. -# WARNING: This value will be used for all hosts in RPM based environments, even those that have another version installed. -# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up. -#openshift_pkg_version=-3.7.0 - -# This enables all the system containers except for docker: -#openshift_use_system_containers=False -# -# But you can choose separately each component that must be a -# system container: -# -#openshift_use_openvswitch_system_container=False -#openshift_use_node_system_container=False -#openshift_use_master_system_container=False -#openshift_use_etcd_system_container=False -# -# In either case, system_images_registry must be specified to be able to find the system images -#system_images_registry="docker.io" - -# Install the openshift examples -#openshift_install_examples=true - -# Configure logoutURL in the master config for console customization -# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#changing-the-logout-url -#openshift_master_logout_url=http://example.com - -# Configure extensionScripts in the master config for console customization -# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets -#openshift_master_extension_scripts=['/path/to/script1.js','/path/to/script2.js'] - -# Configure extensionStylesheets in the master config for console customization -# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets -#openshift_master_extension_stylesheets=['/path/to/stylesheet1.css','/path/to/stylesheet2.css'] - -# Configure extensions in the master config for console customization -# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files -#openshift_master_extensions=[{'name': 'images', 'sourceDirectory': '/path/to/my_images'}] - -# Configure extensions in the master config for console customization -# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files -#openshift_master_oauth_template=/path/to/login-template.html - -# Configure imagePolicyConfig in the master config -# See: https://godoc.org/github.com/openshift/origin/pkg/cmd/server/api#ImagePolicyConfig -#openshift_master_image_policy_config={"maxImagesBulkImportedPerRepository": 3, "disableScheduledImport": true} - -# Configure master API rate limits for external clients -#openshift_master_external_ratelimit_qps=200 -#openshift_master_external_ratelimit_burst=400 -# Configure master API rate limits for loopback clients -#openshift_master_loopback_ratelimit_qps=300 -#openshift_master_loopback_ratelimit_burst=600 - -# Docker Configuration -# Add additional, insecure, and blocked registries to global docker configuration -# For enterprise deployment types we ensure that registry.access.redhat.com is -# included if you do not include it -#openshift_docker_additional_registries=registry.example.com -#openshift_docker_insecure_registries=registry.example.com -#openshift_docker_blocked_registries=registry.hacker.com -# Disable pushing to dockerhub -#openshift_docker_disable_push_dockerhub=True -# Use Docker inside a System Container. Note that this is a tech preview and should -# not be used to upgrade! -# The following options for docker are ignored: -# - docker_version -# - docker_upgrade -# The following options must not be used -# - openshift_docker_options -#openshift_docker_use_system_container=False -# Instead of using docker, replacec it with cri-o -# NOTE: This uses openshift_docker_systemcontainer_image_registry_override as it's override -# just as container-engine does. -#openshift_use_crio=False -# Force the registry to use for the docker/crio system container. By default the registry -# will be built off of the deployment type and ansible_distribution. Only -# use this option if you are sure you know what you are doing! -#openshift_docker_systemcontainer_image_override="registry.example.com/container-engine:latest" -#openshift_crio_systemcontainer_image_override="registry.example.com/cri-o:latest" -# Items added, as is, to end of /etc/sysconfig/docker OPTIONS -# Default value: "--log-driver=journald" -#openshift_docker_options="-l warn --ipv6=false" - -# Specify exact version of Docker to configure or upgrade to. -# Downgrades are not supported and will error out. Be careful when upgrading docker from < 1.10 to > 1.10. -# docker_version="1.12.1" - -# Specify whether to run Docker daemon with SELinux enabled in containers. Default is True. -# Uncomment below to disable; for example if your kernel does not support the -# Docker overlay/overlay2 storage drivers with SELinux enabled. -#openshift_docker_selinux_enabled=False - -# Skip upgrading Docker during an OpenShift upgrade, leaves the current Docker version alone. -# docker_upgrade=False - -# Specify exact version of etcd to configure or upgrade to. -# etcd_version="3.1.0" -# Enable etcd debug logging, defaults to false -# etcd_debug=true -# Set etcd log levels by package -# etcd_log_package_levels="etcdserver=WARNING,security=DEBUG" - -# Upgrade Hooks -# -# Hooks are available to run custom tasks at various points during a cluster -# upgrade. Each hook should point to a file with Ansible tasks defined. Suggest using -# absolute paths, if not the path will be treated as relative to the file where the -# hook is actually used. -# -# Tasks to run before each master is upgraded. -# openshift_master_upgrade_pre_hook=/usr/share/custom/pre_master.yml -# -# Tasks to run to upgrade the master. These tasks run after the main openshift-ansible -# upgrade steps, but before we restart system/services. -# openshift_master_upgrade_hook=/usr/share/custom/master.yml -# -# Tasks to run after each master is upgraded and system/services have been restarted. -# openshift_master_upgrade_post_hook=/usr/share/custom/post_master.yml - - -# Alternate image format string, useful if you've got your own registry mirror -# Configure this setting just on node or master -#oreg_url_master=example.com/openshift3/ose-${component}:${version} -#oreg_url_node=example.com/openshift3/ose-${component}:${version} -# For setting the configuration globally -#oreg_url=example.com/openshift3/ose-${component}:${version} -# If oreg_url points to a registry other than registry.access.redhat.com we can -# modify image streams to point at that registry by setting the following to true -#openshift_examples_modify_imagestreams=true - -# OpenShift repository configuration -#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, 'gpgkey': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}] -#openshift_repos_enable_testing=false - -# htpasswd auth -openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}] -# Defining htpasswd users -#openshift_master_htpasswd_users={'user1': '<pre-hashed password>', 'user2': '<pre-hashed password>'} -# or -#openshift_master_htpasswd_file=<path to local pre-generated htpasswd file> - -# Allow all auth -#openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}] - -# LDAP auth -#openshift_master_identity_providers=[{'name': 'my_ldap_provider', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider', 'attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': '', 'bindPassword': '', 'ca': 'my-ldap-ca.crt', 'insecure': 'false', 'url': 'ldap://ldap.example.com:389/ou=users,dc=example,dc=com?uid'}] -# -# Configure LDAP CA certificate -# Specify either the ASCII contents of the certificate or the path to -# the local file that will be copied to the remote host. CA -# certificate contents will be copied to master systems and saved -# within /etc/origin/master/ with a filename matching the "ca" key set -# within the LDAPPasswordIdentityProvider. -# -#openshift_master_ldap_ca=<ca text> -# or -#openshift_master_ldap_ca_file=<path to local ca file to use> - -# OpenID auth -#openshift_master_identity_providers=[{"name": "openid_auth", "login": "true", "challenge": "false", "kind": "OpenIDIdentityProvider", "client_id": "my_client_id", "client_secret": "my_client_secret", "claims": {"id": ["sub"], "preferredUsername": ["preferred_username"], "name": ["name"], "email": ["email"]}, "urls": {"authorize": "https://myidp.example.com/oauth2/authorize", "token": "https://myidp.example.com/oauth2/token"}, "ca": "my-openid-ca-bundle.crt"}] -# -# Configure OpenID CA certificate -# Specify either the ASCII contents of the certificate or the path to -# the local file that will be copied to the remote host. CA -# certificate contents will be copied to master systems and saved -# within /etc/origin/master/ with a filename matching the "ca" key set -# within the OpenIDIdentityProvider. -# -#openshift_master_openid_ca=<ca text> -# or -#openshift_master_openid_ca_file=<path to local ca file to use> - -# Request header auth -#openshift_master_identity_providers=[{"name": "my_request_header_provider", "challenge": "true", "login": "true", "kind": "RequestHeaderIdentityProvider", "challengeURL": "https://www.example.com/challenging-proxy/oauth/authorize?${query}", "loginURL": "https://www.example.com/login-proxy/oauth/authorize?${query}", "clientCA": "my-request-header-ca.crt", "clientCommonNames": ["my-auth-proxy"], "headers": ["X-Remote-User", "SSO-User"], "emailHeaders": ["X-Remote-User-Email"], "nameHeaders": ["X-Remote-User-Display-Name"], "preferredUsernameHeaders": ["X-Remote-User-Login"]}] -# -# Configure request header CA certificate -# Specify either the ASCII contents of the certificate or the path to -# the local file that will be copied to the remote host. CA -# certificate contents will be copied to master systems and saved -# within /etc/origin/master/ with a filename matching the "clientCA" -# key set within the RequestHeaderIdentityProvider. -# -#openshift_master_request_header_ca=<ca text> -# or -#openshift_master_request_header_ca_file=<path to local ca file to use> - -# CloudForms Management Engine (ManageIQ) App Install -# -# Enables installation of MIQ server. Recommended for dedicated -# clusters only. See roles/openshift_cfme/README.md for instructions -# and requirements. -#openshift_cfme_install_app=False - -# Cloud Provider Configuration -# -# Note: You may make use of environment variables rather than store -# sensitive configuration within the ansible inventory. -# For example: -#openshift_cloudprovider_aws_access_key="{{ lookup('env','AWS_ACCESS_KEY_ID') }}" -#openshift_cloudprovider_aws_secret_key="{{ lookup('env','AWS_SECRET_ACCESS_KEY') }}" -# -# AWS -#openshift_cloudprovider_kind=aws -# Note: IAM profiles may be used instead of storing API credentials on disk. -#openshift_cloudprovider_aws_access_key=aws_access_key_id -#openshift_cloudprovider_aws_secret_key=aws_secret_access_key -# -# Openstack -#openshift_cloudprovider_kind=openstack -#openshift_cloudprovider_openstack_auth_url=http://openstack.example.com:35357/v2.0/ -#openshift_cloudprovider_openstack_username=username -#openshift_cloudprovider_openstack_password=password -#openshift_cloudprovider_openstack_domain_id=domain_id -#openshift_cloudprovider_openstack_domain_name=domain_name -#openshift_cloudprovider_openstack_tenant_id=tenant_id -#openshift_cloudprovider_openstack_tenant_name=tenant_name -#openshift_cloudprovider_openstack_region=region -#openshift_cloudprovider_openstack_lb_subnet_id=subnet_id -# -# GCE -#openshift_cloudprovider_kind=gce - -# Project Configuration -#osm_project_request_message='' -#osm_project_request_template='' -#osm_mcs_allocator_range='s0:/2' -#osm_mcs_labels_per_project=5 -#osm_uid_allocator_range='1000000000-1999999999/10000' - -# Configure additional projects -#openshift_additional_projects={'my-project': {'default_node_selector': 'label=value'}} - -# Enable cockpit -#osm_use_cockpit=true -# -# Set cockpit plugins -#osm_cockpit_plugins=['cockpit-kubernetes'] - -# Native high availability cluster method with optional load balancer. -# If no lb group is defined, the installer assumes that a load balancer has -# been preconfigured. For installation the value of -# openshift_master_cluster_hostname must resolve to the load balancer -# or to one or all of the masters defined in the inventory if no load -# balancer is present. -#openshift_master_cluster_method=native -#openshift_master_cluster_hostname=openshift-ansible.test.example.com -#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com - -# Pacemaker high availability cluster method. -# Pacemaker HA environment must be able to self provision the -# configured VIP. For installation openshift_master_cluster_hostname -# must resolve to the configured VIP. -#openshift_master_cluster_method=pacemaker -#openshift_master_cluster_password=openshift_cluster -#openshift_master_cluster_vip=192.168.133.25 -#openshift_master_cluster_public_vip=192.168.133.25 -#openshift_master_cluster_hostname=openshift-ansible.test.example.com -#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com - -# Override the default controller lease ttl -#osm_controller_lease_ttl=30 - -# Configure controller arguments -#osm_controller_args={'resource-quota-sync-period': ['10s']} - -# Configure api server arguments -#osm_api_server_args={'max-requests-inflight': ['400']} - -# default subdomain to use for exposed routes -#openshift_master_default_subdomain=apps.test.example.com - -# additional cors origins -#osm_custom_cors_origins=['foo.example.com', 'bar.example.com'] - -# default project node selector -#osm_default_node_selector='region=primary' - -# Override the default pod eviction timeout -#openshift_master_pod_eviction_timeout=5m - -# Override the default oauth tokenConfig settings: -# openshift_master_access_token_max_seconds=86400 -# openshift_master_auth_token_max_seconds=500 - -# Override master servingInfo.maxRequestsInFlight -#openshift_master_max_requests_inflight=500 - -# Override master and node servingInfo.minTLSVersion and .cipherSuites -# valid TLS versions are VersionTLS10, VersionTLS11, VersionTLS12 -# example cipher suites override, valid cipher suites are https://golang.org/pkg/crypto/tls/#pkg-constants -#openshift_master_min_tls_version=VersionTLS12 -#openshift_master_cipher_suites=['TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', '...'] -# -#openshift_node_min_tls_version=VersionTLS12 -#openshift_node_cipher_suites=['TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', '...'] - -# default storage plugin dependencies to install, by default the ceph and -# glusterfs plugin dependencies will be installed, if available. -#osn_storage_plugin_deps=['ceph','glusterfs','iscsi'] - -# OpenShift Router Options -# -# An OpenShift router will be created during install if there are -# nodes present with labels matching the default router selector, -# "region=infra". Set openshift_node_labels per node as needed in -# order to label nodes. -# -# Example: -# [nodes] -# node.example.com openshift_node_labels="{'region': 'infra'}" -# -# Router selector (optional) -# Router will only be created if nodes matching this label are present. -# Default value: 'region=infra' -#openshift_hosted_router_selector='region=infra' -# -# Router replicas (optional) -# Unless specified, openshift-ansible will calculate the replica count -# based on the number of nodes matching the openshift router selector. -#openshift_hosted_router_replicas=2 -# -# Router force subdomain (optional) -# A router path format to force on all routes used by this router -# (will ignore the route host value) -#openshift_hosted_router_force_subdomain='${name}-${namespace}.apps.example.com' -# -# Router certificate (optional) -# Provide local certificate paths which will be configured as the -# router's default certificate. -#openshift_hosted_router_certificate={"certfile": "/path/to/router.crt", "keyfile": "/path/to/router.key", "cafile": "/path/to/router-ca.crt"} -# -# Manage the OpenShift Router -#openshift_hosted_manage_router=true -# -# Router sharding support has been added and can be achieved by supplying the correct -# data to the inventory. The variable to house the data is openshift_hosted_routers -# and is in the form of a list. If no data is passed then a default router will be -# created. There are multiple combinations of router sharding. The one described -# below supports routers on separate nodes. -# -#openshift_hosted_routers=[{'name': 'router1', 'certificate': {'certfile': '/path/to/certificate/abc.crt', 'keyfile': '/path/to/certificate/abc.key', 'cafile': '/path/to/certificate/ca.crt'}, 'replicas': 1, 'serviceaccount': 'router', 'namespace': 'default', 'stats_port': 1936, 'edits': [], 'images': 'openshift3/ose-${component}:${version}', 'selector': 'type=router1', 'ports': ['80:80', '443:443']}, {'name': 'router2', 'certificate': {'certfile': '/path/to/certificate/xyz.crt', 'keyfile': '/path/to/certificate/xyz.key', 'cafile': '/path/to/certificate/ca.crt'}, 'replicas': 1, 'serviceaccount': 'router', 'namespace': 'default', 'stats_port': 1936, 'edits': [{'action': 'append', 'key': 'spec.template.spec.containers[0].env', 'value': {'name': 'ROUTE_LABELS', 'value': 'route=external'}}], 'images': 'openshift3/ose-${component}:${version}', 'selector': 'type=router2', 'ports': ['80:80', '443:443']}] - -# OpenShift Registry Console Options -# Override the console image prefix for enterprise deployments, not used in origin -# default is "registry.access.redhat.com/openshift3/" and the image appended is "registry-console" -#openshift_cockpit_deployer_prefix=registry.example.com/myrepo/ -# Override image version, defaults to latest for origin, matches the product version for enterprise -#openshift_cockpit_deployer_version=1.4.1 - -# Openshift Registry Options -# -# An OpenShift registry will be created during install if there are -# nodes present with labels matching the default registry selector, -# "region=infra". Set openshift_node_labels per node as needed in -# order to label nodes. -# -# Example: -# [nodes] -# node.example.com openshift_node_labels="{'region': 'infra'}" -# -# Registry selector (optional) -# Registry will only be created if nodes matching this label are present. -# Default value: 'region=infra' -#openshift_hosted_registry_selector='region=infra' -# -# Registry replicas (optional) -# Unless specified, openshift-ansible will calculate the replica count -# based on the number of nodes matching the openshift registry selector. -#openshift_hosted_registry_replicas=2 -# -# Validity of the auto-generated certificate in days (optional) -#openshift_hosted_registry_cert_expire_days=730 -# -# Manage the OpenShift Registry -#openshift_hosted_manage_registry=true - -# Registry Storage Options -# -# NFS Host Group -# An NFS volume will be created with path "nfs_directory/volume_name" -# on the host within the [nfs] host group. For example, the volume -# path using these options would be "/exports/registry" -#openshift_hosted_registry_storage_kind=nfs -#openshift_hosted_registry_storage_access_modes=['ReadWriteMany'] -#openshift_hosted_registry_storage_nfs_directory=/exports -#openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)' -#openshift_hosted_registry_storage_volume_name=registry -#openshift_hosted_registry_storage_volume_size=10Gi -# -# External NFS Host -# NFS volume must already exist with path "nfs_directory/_volume_name" on -# the storage_host. For example, the remote volume path using these -# options would be "nfs.example.com:/exports/registry" -#openshift_hosted_registry_storage_kind=nfs -#openshift_hosted_registry_storage_access_modes=['ReadWriteMany'] -#openshift_hosted_registry_storage_host=nfs.example.com -#openshift_hosted_registry_storage_nfs_directory=/exports -#openshift_hosted_registry_storage_volume_name=registry -#openshift_hosted_registry_storage_volume_size=10Gi -# -# Openstack -# Volume must already exist. -#openshift_hosted_registry_storage_kind=openstack -#openshift_hosted_registry_storage_access_modes=['ReadWriteOnce'] -#openshift_hosted_registry_storage_openstack_filesystem=ext4 -#openshift_hosted_registry_storage_openstack_volumeID=3a650b4f-c8c5-4e0a-8ca5-eaee11f16c57 -#openshift_hosted_registry_storage_volume_size=10Gi -# -# AWS S3 -# S3 bucket must already exist. -#openshift_hosted_registry_storage_kind=object -#openshift_hosted_registry_storage_provider=s3 -#openshift_hosted_registry_storage_s3_encrypt=false -#openshift_hosted_registry_storage_s3_kmskeyid=aws_kms_key_id -#openshift_hosted_registry_storage_s3_accesskey=aws_access_key_id -#openshift_hosted_registry_storage_s3_secretkey=aws_secret_access_key -#openshift_hosted_registry_storage_s3_bucket=bucket_name -#openshift_hosted_registry_storage_s3_region=bucket_region -#openshift_hosted_registry_storage_s3_chunksize=26214400 -#openshift_hosted_registry_storage_s3_rootdirectory=/registry -#openshift_hosted_registry_pullthrough=true -#openshift_hosted_registry_acceptschema2=true -#openshift_hosted_registry_enforcequota=true -# -# Any S3 service (Minio, ExoScale, ...): Basically the same as above -# but with regionendpoint configured -# S3 bucket must already exist. -#openshift_hosted_registry_storage_kind=object -#openshift_hosted_registry_storage_provider=s3 -#openshift_hosted_registry_storage_s3_accesskey=access_key_id -#openshift_hosted_registry_storage_s3_secretkey=secret_access_key -#openshift_hosted_registry_storage_s3_regionendpoint=https://myendpoint.example.com/ -#openshift_hosted_registry_storage_s3_bucket=bucket_name -#openshift_hosted_registry_storage_s3_region=bucket_region -#openshift_hosted_registry_storage_s3_chunksize=26214400 -#openshift_hosted_registry_storage_s3_rootdirectory=/registry -#openshift_hosted_registry_pullthrough=true -#openshift_hosted_registry_acceptschema2=true -#openshift_hosted_registry_enforcequota=true -# -# Additional CloudFront Options. When using CloudFront all three -# of the followingg variables must be defined. -#openshift_hosted_registry_storage_s3_cloudfront_baseurl=https://myendpoint.cloudfront.net/ -#openshift_hosted_registry_storage_s3_cloudfront_privatekeyfile=/full/path/to/secret.pem -#openshift_hosted_registry_storage_s3_cloudfront_keypairid=yourpairid - -# Metrics deployment -# See: https://docs.openshift.com/enterprise/latest/install_config/cluster_metrics.html -# -# By default metrics are not automatically deployed, set this to enable them -#openshift_metrics_install_metrics=true -# -# Storage Options -# If openshift_metrics_storage_kind is unset then metrics will be stored -# in an EmptyDir volume and will be deleted when the cassandra pod terminates. -# Storage options A & B currently support only one cassandra pod which is -# generally enough for up to 1000 pods. Additional volumes can be created -# manually after the fact and metrics scaled per the docs. -# -# Option A - NFS Host Group -# An NFS volume will be created with path "nfs_directory/volume_name" -# on the host within the [nfs] host group. For example, the volume -# path using these options would be "/exports/metrics" -#openshift_metrics_storage_kind=nfs -#openshift_metrics_storage_access_modes=['ReadWriteOnce'] -#openshift_metrics_storage_nfs_directory=/exports -#openshift_metrics_storage_nfs_options='*(rw,root_squash)' -#openshift_metrics_storage_volume_name=metrics -#openshift_metrics_storage_volume_size=10Gi -#openshift_metrics_storage_labels={'storage': 'metrics'} -# -# Option B - External NFS Host -# NFS volume must already exist with path "nfs_directory/_volume_name" on -# the storage_host. For example, the remote volume path using these -# options would be "nfs.example.com:/exports/metrics" -#openshift_metrics_storage_kind=nfs -#openshift_metrics_storage_access_modes=['ReadWriteOnce'] -#openshift_metrics_storage_host=nfs.example.com -#openshift_metrics_storage_nfs_directory=/exports -#openshift_metrics_storage_volume_name=metrics -#openshift_metrics_storage_volume_size=10Gi -#openshift_metrics_storage_labels={'storage': 'metrics'} -# -# Option C - Dynamic -- If openshift supports dynamic volume provisioning for -# your cloud platform use this. -#openshift_metrics_storage_kind=dynamic -# -# Other Metrics Options -- Common items you may wish to reconfigure, for the complete -# list of options please see roles/openshift_metrics/README.md -# -# Override metricsPublicURL in the master config for cluster metrics -# Defaults to https://hawkular-metrics.{{openshift_master_default_subdomain}}/hawkular/metrics -# Currently, you may only alter the hostname portion of the url, alterting the -# `/hawkular/metrics` path will break installation of metrics. -#openshift_metrics_hawkular_hostname=https://hawkular-metrics.example.com/hawkular/metrics -# Configure the prefix and version for the component images -#openshift_metrics_image_prefix=docker.io/openshift/origin- -#openshift_metrics_image_version=v3.7.0 -# -# StorageClass -# openshift_storageclass_name=gp2 -# openshift_storageclass_parameters={'type': 'gp2', 'encrypted': 'false'} -# - -# Logging deployment -# -# Currently logging deployment is disabled by default, enable it by setting this -#openshift_logging_install_logging=true -# -# Logging storage config -# Option A - NFS Host Group -# An NFS volume will be created with path "nfs_directory/volume_name" -# on the host within the [nfs] host group. For example, the volume -# path using these options would be "/exports/logging" -#openshift_logging_storage_kind=nfs -#openshift_logging_storage_access_modes=['ReadWriteOnce'] -#openshift_logging_storage_nfs_directory=/exports -#openshift_logging_storage_nfs_options='*(rw,root_squash)' -#openshift_logging_storage_volume_name=logging -#openshift_logging_storage_volume_size=10Gi -#openshift_logging_storage_labels={'storage': 'logging'} -# -# Option B - External NFS Host -# NFS volume must already exist with path "nfs_directory/_volume_name" on -# the storage_host. For example, the remote volume path using these -# options would be "nfs.example.com:/exports/logging" -#openshift_logging_storage_kind=nfs -#openshift_logging_storage_access_modes=['ReadWriteOnce'] -#openshift_logging_storage_host=nfs.example.com -#openshift_logging_storage_nfs_directory=/exports -#openshift_logging_storage_volume_name=logging -#openshift_logging_storage_volume_size=10Gi -#openshift_logging_storage_labels={'storage': 'logging'} -# -# Option C - Dynamic -- If openshift supports dynamic volume provisioning for -# your cloud platform use this. -#openshift_logging_storage_kind=dynamic -# -# Option D - none -- Logging will use emptydir volumes which are destroyed when -# pods are deleted -# -# Other Logging Options -- Common items you may wish to reconfigure, for the complete -# list of options please see roles/openshift_logging/README.md -# -# Configure loggingPublicURL in the master config for aggregate logging, defaults -# to kibana.{{ openshift_master_default_subdomain }} -#openshift_logging_kibana_hostname=logging.apps.example.com -# Configure the number of elastic search nodes, unless you're using dynamic provisioning -# this value must be 1 -#openshift_logging_es_cluster_size=1 -# Configure the prefix and version for the component images -#openshift_logging_image_prefix=docker.io/openshift/origin- -#openshift_logging_image_version=v3.7.0 - -# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet') -# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant' - -# Disable the OpenShift SDN plugin -# openshift_use_openshift_sdn=False - -# Configure SDN cluster network and kubernetes service CIDR blocks. These -# network blocks should be private and should not conflict with network blocks -# in your infrastructure that pods may require access to. Can not be changed -# after deployment. -# -# WARNING : Do not pick subnets that overlap with the default Docker bridge subnet of -# 172.17.0.0/16. Your installation will fail and/or your configuration change will -# cause the Pod SDN or Cluster SDN to fail. -# -# WORKAROUND : If you must use an overlapping subnet, you can configure a non conflicting -# docker0 CIDR range by adding '--bip=192.168.2.1/24' to DOCKER_NETWORK_OPTIONS -# environment variable located in /etc/sysconfig/docker-network. -# When upgrading or scaling up the following must match whats in your master config! -# Inventory: master yaml field -# osm_cluster_network_cidr: clusterNetworkCIDR -# openshift_portal_net: serviceNetworkCIDR -# When installing osm_cluster_network_cidr and openshift_portal_net must be set. -# Sane examples are provided below. -#osm_cluster_network_cidr=10.128.0.0/14 -#openshift_portal_net=172.30.0.0/16 - -# ExternalIPNetworkCIDRs controls what values are acceptable for the -# service external IP field. If empty, no externalIP may be set. It -# may contain a list of CIDRs which are checked for access. If a CIDR -# is prefixed with !, IPs in that CIDR will be rejected. Rejections -# will be applied first, then the IP checked against one of the -# allowed CIDRs. You should ensure this range does not overlap with -# your nodes, pods, or service CIDRs for security reasons. -#openshift_master_external_ip_network_cidrs=['0.0.0.0/0'] - -# IngressIPNetworkCIDR controls the range to assign ingress IPs from for -# services of type LoadBalancer on bare metal. If empty, ingress IPs will not -# be assigned. It may contain a single CIDR that will be allocated from. For -# security reasons, you should ensure that this range does not overlap with -# the CIDRs reserved for external IPs, nodes, pods, or services. -#openshift_master_ingress_ip_network_cidr=172.46.0.0/16 - -# Configure number of bits to allocate to each host's subnet e.g. 9 -# would mean a /23 network on the host. -# When upgrading or scaling up the following must match whats in your master config! -# Inventory: master yaml field -# osm_host_subnet_length: hostSubnetLength -# When installing osm_host_subnet_length must be set. A sane example is provided below. -#osm_host_subnet_length=9 - -# Configure master API and console ports. -#openshift_master_api_port=8443 -#openshift_master_console_port=8443 - -# set RPM version for debugging purposes -#openshift_pkg_version=-1.1 - -# Configure custom ca certificate -#openshift_master_ca_certificate={'certfile': '/path/to/ca.crt', 'keyfile': '/path/to/ca.key'} -# -# NOTE: CA certificate will not be replaced with existing clusters. -# This option may only be specified when creating a new cluster or -# when redeploying cluster certificates with the redeploy-certificates -# playbook. - -# Configure custom named certificates (SNI certificates) -# -# https://docs.openshift.org/latest/install_config/certificate_customization.html -# -# NOTE: openshift_master_named_certificates is cached on masters and is an -# additive fact, meaning that each run with a different set of certificates -# will add the newly provided certificates to the cached set of certificates. -# -# An optional CA may be specified for each named certificate. CAs will -# be added to the OpenShift CA bundle which allows for the named -# certificate to be served for internal cluster communication. -# -# If you would like openshift_master_named_certificates to be overwritten with -# the provided value, specify openshift_master_overwrite_named_certificates. -#openshift_master_overwrite_named_certificates=true -# -# Provide local certificate paths which will be deployed to masters -#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "cafile": "/path/to/custom-ca1.crt"}] -# -# Detected names may be overridden by specifying the "names" key -#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "names": ["public-master-host.com"], "cafile": "/path/to/custom-ca1.crt"}] - -# Session options -#openshift_master_session_name=ssn -#openshift_master_session_max_seconds=3600 - -# An authentication and encryption secret will be generated if secrets -# are not provided. If provided, openshift_master_session_auth_secrets -# and openshift_master_encryption_secrets must be equal length. -# -# Signing secrets, used to authenticate sessions using -# HMAC. Recommended to use secrets with 32 or 64 bytes. -#openshift_master_session_auth_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO'] -# -# Encrypting secrets, used to encrypt sessions. Must be 16, 24, or 32 -# characters long, to select AES-128, AES-192, or AES-256. -#openshift_master_session_encryption_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO'] - -# configure how often node iptables rules are refreshed -#openshift_node_iptables_sync_period=5s - -# Configure nodeIP in the node config -# This is needed in cases where node traffic is desired to go over an -# interface other than the default network interface. -#openshift_set_node_ip=True - -# Configure dnsIP in the node config -#openshift_dns_ip=172.30.0.1 - -# Configure node kubelet arguments. pods-per-core is valid in OpenShift Origin 1.3 or OpenShift Container Platform 3.3 and later. -#openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['85'], 'image-gc-low-threshold': ['80']} - -# Configure logrotate scripts -# See: https://github.com/nickhammond/ansible-logrotate -#logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}] - -# openshift-ansible will wait indefinitely for your input when it detects that the -# value of openshift_hostname resolves to an IP address not bound to any local -# interfaces. This mis-configuration is problematic for any pod leveraging host -# networking and liveness or readiness probes. -# Setting this variable to true will override that check. -#openshift_override_hostname_check=true - -# openshift_use_dnsmasq is deprecated. This must be true, or installs will fail -# in versions >= 3.6 -#openshift_use_dnsmasq=False - -# Define an additional dnsmasq.conf file to deploy to /etc/dnsmasq.d/openshift-ansible.conf -# This is useful for POC environments where DNS may not actually be available yet or to set -# options like 'strict-order' to alter dnsmasq configuration. -#openshift_node_dnsmasq_additional_config_file=/home/bob/ose-dnsmasq.conf - -# Global Proxy Configuration -# These options configure HTTP_PROXY, HTTPS_PROXY, and NOPROXY environment -# variables for docker and master services. -# -# Hosts in the openshift_no_proxy list will NOT use any globally -# configured HTTP(S)_PROXYs. openshift_no_proxy accepts domains -# (.example.com), and hosts (example.com), and IP addresses. -#openshift_http_proxy=http://USER:PASSWORD@IPADDR:PORT -#openshift_https_proxy=https://USER:PASSWORD@IPADDR:PORT -#openshift_no_proxy='.hosts.example.com,some-host.com' -# -# Most environments don't require a proxy between openshift masters, nodes, and -# etcd hosts. So automatically add those hostnames to the openshift_no_proxy list. -# If all of your hosts share a common domain you may wish to disable this and -# specify that domain above instead. -# -# For example, having hosts with FQDNs: m1.ex.com, n1.ex.com, and -# n2.ex.com, one would simply add '.ex.com' to the openshift_no_proxy -# variable (above) and set this value to False -#openshift_generate_no_proxy_hosts=True -# -# These options configure the BuildDefaults admission controller which injects -# configuration into Builds. Proxy related values will default to the global proxy -# config values. You only need to set these if they differ from the global proxy settings. -# See BuildDefaults documentation at -# https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html -#openshift_builddefaults_http_proxy=http://USER:PASSWORD@HOST:PORT -#openshift_builddefaults_https_proxy=https://USER:PASSWORD@HOST:PORT -#openshift_builddefaults_no_proxy=mycorp.com -#openshift_builddefaults_git_http_proxy=http://USER:PASSWORD@HOST:PORT -#openshift_builddefaults_git_https_proxy=https://USER:PASSWORD@HOST:PORT -#openshift_builddefaults_git_no_proxy=mycorp.com -#openshift_builddefaults_image_labels=[{'name':'imagelabelname1','value':'imagelabelvalue1'}] -#openshift_builddefaults_nodeselectors={'nodelabel1':'nodelabelvalue1'} -#openshift_builddefaults_annotations={'annotationkey1':'annotationvalue1'} -#openshift_builddefaults_resources_requests_cpu=100m -#openshift_builddefaults_resources_requests_memory=256Mi -#openshift_builddefaults_resources_limits_cpu=1000m -#openshift_builddefaults_resources_limits_memory=512Mi - -# Or you may optionally define your own build defaults configuration serialized as json -#openshift_builddefaults_json='{"BuildDefaults":{"configuration":{"apiVersion":"v1","env":[{"name":"HTTP_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"NO_PROXY","value":"ose3-master.example.com"}],"gitHTTPProxy":"http://proxy.example.com:3128","gitNoProxy":"ose3-master.example.com","kind":"BuildDefaultsConfig"}}}' - -# These options configure the BuildOverrides admission controller which injects -# configuration into Builds. -# See BuildOverrides documentation at -# https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html -#openshift_buildoverrides_force_pull=true -#openshift_buildoverrides_image_labels=[{'name':'imagelabelname1','value':'imagelabelvalue1'}] -#openshift_buildoverrides_nodeselectors={'nodelabel1':'nodelabelvalue1'} -#openshift_buildoverrides_annotations={'annotationkey1':'annotationvalue1'} - -# Or you may optionally define your own build overrides configuration serialized as json -#openshift_buildoverrides_json='{"BuildOverrides":{"configuration":{"apiVersion":"v1","kind":"BuildDefaultsConfig","forcePull":"true"}}}' - -# Enable template service broker by specifying one of more namespaces whose -# templates will be served by the broker -#openshift_template_service_broker_namespaces=['openshift'] - -# masterConfig.volumeConfig.dynamicProvisioningEnabled, configurable as of 1.2/3.2, enabled by default -#openshift_master_dynamic_provisioning_enabled=False - -# Admission plugin config -#openshift_master_admission_plugin_config={"ProjectRequestLimit":{"configuration":{"apiVersion":"v1","kind":"ProjectRequestLimitConfig","limits":[{"selector":{"admin":"true"}},{"maxProjects":"1"}]}},"PodNodeConstraints":{"configuration":{"apiVersion":"v1","kind":"PodNodeConstraintsConfig"}}} - -# Configure usage of openshift_clock role. -#openshift_clock_enabled=true - -# OpenShift Per-Service Environment Variables -# Environment variables are added to /etc/sysconfig files for -# each OpenShift service: node, master (api and controllers). -# API and controllers environment variables are merged in single -# master environments. -#openshift_master_api_env_vars={"ENABLE_HTTP2": "true"} -#openshift_master_controllers_env_vars={"ENABLE_HTTP2": "true"} -#openshift_node_env_vars={"ENABLE_HTTP2": "true"} - -# Enable API service auditing, available as of 1.3 -#openshift_master_audit_config={"enabled": true} -# -# In case you want more advanced setup for the auditlog you can -# use this line. -# The directory in "auditFilePath" will be created if it's not -# exist -#openshift_master_audit_config={"enabled": true, "auditFilePath": "/var/log/openpaas-oscp-audit/openpaas-oscp-audit.log", "maximumFileRetentionDays": 14, "maximumFileSizeMegabytes": 500, "maximumRetainedFiles": 5} - -# Enable origin repos that point at Centos PAAS SIG, defaults to true, only used -# by deployment_type=origin -#openshift_enable_origin_repo=false - -# Validity of the auto-generated OpenShift certificates in days. -# See also openshift_hosted_registry_cert_expire_days above. -# -#openshift_ca_cert_expire_days=1825 -#openshift_node_cert_expire_days=730 -#openshift_master_cert_expire_days=730 - -# Validity of the auto-generated external etcd certificates in days. -# Controls validity for etcd CA, peer, server and client certificates. -# -#etcd_ca_default_days=1825 -# -# ServiceAccountConfig:LimitSecretRefences rejects pods that reference secrets their service accounts do not reference -# openshift_master_saconfig_limitsecretreferences=false - -# Upgrade Control -# -# By default nodes are upgraded in a serial manner one at a time and all failures -# are fatal, one set of variables for normal nodes, one set of variables for -# nodes that are part of control plane as the number of hosts may be different -# in those two groups. -#openshift_upgrade_nodes_serial=1 -#openshift_upgrade_nodes_max_fail_percentage=0 -#openshift_upgrade_control_plane_nodes_serial=1 -#openshift_upgrade_control_plane_nodes_max_fail_percentage=0 -# -# You can specify the number of nodes to upgrade at once. We do not currently -# attempt to verify that you have capacity to drain this many nodes at once -# so please be careful when specifying these values. You should also verify that -# the expected number of nodes are all schedulable and ready before starting an -# upgrade. If it's not possible to drain the requested nodes the upgrade will -# stall indefinitely until the drain is successful. -# -# If you're upgrading more than one node at a time you can specify the maximum -# percentage of failure within the batch before the upgrade is aborted. Any -# nodes that do fail are ignored for the rest of the playbook run and you should -# take care to investigate the failure and return the node to service so that -# your cluster. -# -# The percentage must exceed the value, this would fail on two failures -# openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=49 -# where as this would not -# openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=50 -# -# Multiple data migrations take place and if they fail they will fail the upgrade -# You may wish to disable these or make them non fatal -# -# openshift_upgrade_pre_storage_migration_enabled=true -# openshift_upgrade_pre_storage_migration_fatal==true -# openshift_upgrade_post_storage_migration_enabled=true -# openshift_upgrade_post_storage_migration_fatal==false - -# host group for masters -[masters] -ose3-master[1:3]-ansible.test.example.com - -[etcd] -ose3-etcd[1:3]-ansible.test.example.com - -# NOTE: Containerized load balancer hosts are not yet supported, if using a global -# containerized=true host variable we must set to false. -[lb] -ose3-lb-ansible.test.example.com containerized=false - -# NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes -# However, in order to ensure that your masters are not burdened with running pods you should -# make them unschedulable by adding openshift_schedulable=False any node that's also a master. -[nodes] -ose3-master[1:3]-ansible.test.example.com -ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}" diff --git a/lookup_plugins/README.md b/lookup_plugins/README.md new file mode 100644 index 000000000..f05d608e5 --- /dev/null +++ b/lookup_plugins/README.md @@ -0,0 +1 @@ +openshift-ansible lookup plugins. diff --git a/lookup_plugins/oo_option.py b/lookup_plugins/oo_option.py deleted file mode 100644 index 4581cb6b8..000000000 --- a/lookup_plugins/oo_option.py +++ /dev/null @@ -1,74 +0,0 @@ -#!/usr/bin/env python2 -# -*- coding: utf-8 -*- -''' -oo_option lookup plugin for openshift-ansible - -Usage: - - - debug: - msg: "{{ lookup('oo_option', '<key>') | default('<default_value>', True) }}" - -This returns, by order of priority: - -* if it exists, the `cli_<key>` ansible variable. This variable is set by `bin/cluster --option <key>=<value> …` -* if it exists, the envirnoment variable named `<key>` -* if none of the above conditions are met, empty string is returned -''' - - -import os - -# pylint: disable=no-name-in-module,import-error,unused-argument,unused-variable,super-init-not-called,too-few-public-methods,missing-docstring -try: - # ansible-2.0 - from ansible.plugins.lookup import LookupBase -except ImportError: - # ansible-1.9.x - class LookupBase(object): - def __init__(self, basedir=None, runner=None, **kwargs): - self.runner = runner - self.basedir = self.runner.basedir - - def get_basedir(self, variables): - return self.basedir - - -# Reason: disable too-few-public-methods because the `run` method is the only -# one required by the Ansible API -# Status: permanently disabled -# pylint: disable=too-few-public-methods -class LookupModule(LookupBase): - ''' oo_option lookup plugin main class ''' - - # Reason: disable unused-argument because Ansible is calling us with many - # parameters we are not interested in. - # The lookup plugins of Ansible have this kwargs “catch-all” parameter - # which is not used - # Status: permanently disabled unless Ansible API evolves - # pylint: disable=unused-argument - def __init__(self, basedir=None, **kwargs): - ''' Constructor ''' - self.basedir = basedir - - # Reason: disable unused-argument because Ansible is calling us with many - # parameters we are not interested in. - # The lookup plugins of Ansible have this kwargs “catch-all” parameter - # which is not used - # Status: permanently disabled unless Ansible API evolves - # pylint: disable=unused-argument - def run(self, terms, variables, **kwargs): - ''' Main execution path ''' - - ret = [] - - for term in terms: - option_name = term.split()[0] - cli_key = 'cli_' + option_name - if 'vars' in variables and cli_key in variables['vars']: - ret.append(variables['vars'][cli_key]) - elif option_name in os.environ: - ret.append(os.environ[option_name]) - else: - ret.append('') - - return ret diff --git a/openshift-ansible.spec b/openshift-ansible.spec index 1f81893d9..2ae7d48a3 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -10,7 +10,7 @@ Name: openshift-ansible Version: 3.7.0 -Release: 0.134.0%{?dist} +Release: 0.147.0%{?dist} Summary: Openshift and Atomic Enterprise Ansible License: ASL 2.0 URL: https://github.com/openshift/openshift-ansible @@ -84,10 +84,6 @@ touch %{buildroot}%{_datadir}/ansible/%{name}/roles/contiv/.empty_dir pushd %{buildroot}%{_datadir}/ansible/%{name}/roles/openshift_master_facts/filter_plugins ln -sf ../../../../../ansible_plugins/filter_plugins/oo_filters.py oo_filters.py popd -# openshift_master_facts symlinks lookup_plugins/oo_option.py from ansible_plugins/lookup_plugins -pushd %{buildroot}%{_datadir}/ansible/%{name}/roles/openshift_master_facts/lookup_plugins -ln -sf ../../../../../ansible_plugins/lookup_plugins/oo_option.py oo_option.py -popd # openshift-ansible-filter-plugins install cp -rp filter_plugins %{buildroot}%{_datadir}/ansible_plugins/ @@ -280,6 +276,155 @@ Atomic OpenShift Utilities includes %changelog +* Tue Oct 10 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.147.0 +- Add PartOf to docker systemd service unit. (mgugino@redhat.com) +- crio: use systemd manager (gscrivan@redhat.com) +- Ensure servingInfo.clientCA is set as ca.crt rather than ca-bundle.crt. + (abutcher@redhat.com) +- crio, docker: use openshift_release when openshift_image_tag is not used + (gscrivan@redhat.com) +- crio: fix typo (gscrivan@redhat.com) +- Update registry_config.j2 (jialiu@redhat.com) +- Update registry_config.j2 (jialiu@redhat.com) + +* Mon Oct 09 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.146.0 +- docker_image_availability: credentials to skopeo (mgugino@redhat.com) +- Rename openshift_cfme role to openshift_management (tbielawa@redhat.com) + +* Mon Oct 09 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.145.0 +- add missing restart node handler to flannel (jchaloup@redhat.com) +- Switch to configmap leader election on 3.7 upgrade (mkhan@redhat.com) +- crio.conf.j2: sync from upstream (gscrivan@redhat.com) +- cri-o: use overlay instead of overlay2 (gscrivan@redhat.com) +- Ensure docker is restarted when iptables is restarted (mgugino@redhat.com) +- Stop including origin and ose hosts example file (sdodson@redhat.com) +- node: make node service PartOf=openvswitch.service when openshift-sdn is used + (dcbw@redhat.com) + +* Fri Oct 06 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.144.0 +- fix typo for default in etcd (mgugino@redhat.com) +- Bumping version of service catalog image for 3.7 (ewolinet@redhat.com) +- remove duplicate [OSEv3:children] group (jfchevrette@gmail.com) +- Fix lint error (tbielawa@redhat.com) +- Update hosts.ose.example (ephillipe@gmail.com) +- Remove the no-longer-used App/DB pv size override variables from inventories + (tbielawa@redhat.com) +- openshift_checks: lb and nfs do not need docker (lmeyer@redhat.com) +- openshift_checks: use oo group names everywhere (lmeyer@redhat.com) +- Add notes about SA token. Improve NFS validation. (tbielawa@redhat.com) +- Hooks for installing CFME during full openshift installation + (tbielawa@redhat.com) +- Documentation (tbielawa@redhat.com) +- Import upstream templates. Do the work. Validate parameters. + (tbielawa@redhat.com) +- CFME 4.6 work begins. CFME 4.5 references added to the release-3.6 branch + (tbielawa@redhat.com) +- Update hosts.origin.example (ephillipe@gmail.com) +- Add logging es prometheus endpoint (jcantril@redhat.com) +- bug 1497401. Default logging and metrics images to 3.7 (jcantril@redhat.com) +- Ensure docker service started prior to credentials (mgugino@redhat.com) +- Adding support for an inventory directory/hybrid inventory + (esauer@redhat.com) +- Remove unused tasks file in openshift_named_certificates (rteague@redhat.com) +- Move node cert playbook into node config path (rteague@redhat.com) +- Move master cert playbooks into master config path (rteague@redhat.com) +- Move etcd cert playbooks into etcd config path (rteague@redhat.com) +- Fix hosted selector variable migration (mgugino@redhat.com) +- Bug 1496271 - Perserve SCC for ES local persistent storage + (jcantril@redhat.com) +- Limit hosts that run openshift_version role (mgugino@redhat.com) +- Update ansible-service-broker config to track latest broker + (fabian@fabianism.us) +- fix master-facts for provisioning (mgugino@redhat.com) +- Make provisioning steps more reusable (mgugino@redhat.com) +- logging: honor openshift_logging_es_cpu_limit (jwozniak@redhat.com) +- Addressing tox issues (ewolinet@redhat.com) +- bug 1482661. Preserve ES dc nodeSelector and supplementalGroups + (jcantril@redhat.com) +- Checking if any openshift_*_storage_kind variables are set to dynamic without + enabling dynamic provisioning (ewolinet@redhat.com) +- Removing setting pvc size and dynamic to remove looped var setting + (ewolinet@redhat.com) + +* Wed Oct 04 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.143.0 +- Limit base-package install during master upgrades (mgugino@redhat.com) +- Fix provisiong scale group and elb logic (mgugino@redhat.com) + +* Tue Oct 03 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.142.0 +- Document that nfs_directory must conform to DNS-1123 (sdodson@redhat.com) +- Move node aws credentials to config.yml (mgugino@redhat.com) +- Use etcd_ip when communicating with the cluster as a peer in etcd scaleup. + (abutcher@redhat.com) +- Ensure openshift.common.portal_net updated during scaleup. + (abutcher@redhat.com) +- docker: fix some tox warnings (gscrivan@redhat.com) +- Require openshift_image_tag in the inventory with openshift-enterprise + (gscrivan@redhat.com) +- crio: use the image_tag on RHEL (gscrivan@redhat.com) +- docker: use the image_tag on RHEL (gscrivan@redhat.com) + +* Tue Oct 03 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.141.0 +- Restore registires to /etc/sysconfig/docker (mgugino@redhat.com) +- Fix Prometheus byo entry point (rteague@redhat.com) +- Update to the openshift_aws style scheme for variables (ccoleman@redhat.com) + +* Tue Oct 03 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.140.0 +- openshift_checks: Fix incorrect list cast (smilner@redhat.com) +- lib/base: Allow for empty option value (jarrpa@redhat.com) + +* Mon Oct 02 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.139.0 +- Fix mispelling in error message yammlint -> yamllint (simo@redhat.com) +- Separate certificate playbooks. (abutcher@redhat.com) +- Reverting using uninstall variables for logging and metrics + (ewolinet@redhat.com) +- Add --image flag to setup-openshift-heketi-storage (ttindell@isenpai.com) + +* Mon Oct 02 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.138.0 +- Fix typo in openshift_default_storage_class/README (hansmi@vshn.ch) +- GlusterFS: make ServiceAccounts privileged when either glusterfs or heketi is + native (jarrpa@redhat.com) +- Fix some provisioning variables (mgugino@redhat.com) + +* Mon Oct 02 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.137.0 +- openshift_node: Add MASTER_SERVICE on system container install + (smilner@redhat.com) +- openshift_node: Set DOCKER_SERVICE for system container (smilner@redhat.com) + +* Sun Oct 01 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.136.0 +- Include openshift_hosted when redeploying router certificates to handle auto- + generated wildcard certificate or custom openshift_hosted_router_certificate. + (abutcher@redhat.com) +- Check for router service annotations when redeploying router certificates. + (abutcher@redhat.com) +- Remove oo_option symlink from specfile. (abutcher@redhat.com) +- Add a README.md to lookup_plugins/ (abutcher@redhat.com) +- Remove oo_option facts. (abutcher@redhat.com) +- block 3.6->3.7 upgrade if storage backend is not set to etcd3 + (jchaloup@redhat.com) +- Changes necessary to support AMI building (mgugino@redhat.com) + +* Sat Sep 30 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.135.0 +- fix whitespace for centos repos (jdetiber@redhat.com) +- Fix registry auth variable (mgugino@redhat.com) +- move health-checks and control-plane-verification before excluders + (jchaloup@redhat.com) +- Fix typo in files (Docker registries) (william17.burton@gmail.com) +- Registering the broker for TSB (ewolinet@redhat.com) +- Quick formatting updates to the logging README. (steveqtran@gmail.com) +- openshift_facts: coerce docker_use_system_container to bool + (smilner@redhat.com) +- Migrate enterprise registry logic to docker role (mgugino@redhat.com) +- minor update to README and removed dead file (steveqtran@gmail.com) +- Added new variables for logging role for remote-syslog plugin + (steveqtran@gmail.com) +- Remove some reminants of Atomic Enterprise (sdodson@redhat.com) +- Allow examples management to be disabled (sdodson@redhat.com) +- rename vars to avoid double negatives and ensuing confusion + (jsanda@redhat.com) +- set prometheus endpoint properties to false by default (jsanda@redhat.com) +- add options to disable prometheus endpoints (jsanda@redhat.com) +- Enable JMX reporting of internal metrics (jsanda@redhat.com) + * Thu Sep 28 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.134.0 - OpenShift-Ansible Installer Checkpointing (rteague@redhat.com) - evaluate etcd_backup_tag variable (jchaloup@redhat.com) diff --git a/playbooks/aws/README.md b/playbooks/aws/README.md index 2b3d4329e..816cb35b4 100644 --- a/playbooks/aws/README.md +++ b/playbooks/aws/README.md @@ -87,11 +87,6 @@ masters nodes etcd -[OSEv3:children] -masters -nodes -etcd - [OSEv3:vars] ################################################################################ # Ensure these variables are set for bootstrap diff --git a/playbooks/aws/openshift-cluster/build_ami.yml b/playbooks/aws/openshift-cluster/build_ami.yml index 86b2a2544..1ab1e8041 100644 --- a/playbooks/aws/openshift-cluster/build_ami.yml +++ b/playbooks/aws/openshift-cluster/build_ami.yml @@ -17,35 +17,17 @@ - name: openshift_aws_region msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}" - - name: create an instance and prepare for ami - include_role: - name: openshift_aws - tasks_from: build_ami.yml - vars: - openshift_aws_node_group_type: compute +- include: provision_vpc.yml - - name: fetch newly created instances - ec2_remote_facts: - region: "{{ openshift_aws_region | default('us-east-1') }}" - filters: - "tag:Name": "{{ openshift_aws_base_ami_name | default('ami_base') }}" - instance-state-name: running - register: instancesout - retries: 20 - delay: 3 - until: instancesout.instances|length > 0 +- include: provision_ssh_keypair.yml - - name: wait for ssh to become available - wait_for: - port: 22 - host: "{{ instancesout.instances[0].public_ip_address }}" - timeout: 300 - search_regex: OpenSSH +- include: provision_sec_group.yml + vars: + openshift_aws_node_group_type: compute - - name: add host to nodes - add_host: - groups: nodes - name: "{{ instancesout.instances[0].public_dns_name }}" +- include: provision_instance.yml + vars: + openshift_aws_node_group_type: compute - hosts: nodes gather_facts: False @@ -54,28 +36,10 @@ set_fact: ansible_ssh_user: "{{ openshift_aws_build_ami_ssh_user | default('root') }}" -- name: normalize groups - include: ../../byo/openshift-cluster/initialize_groups.yml +# This is the part that installs all of the software and configs for the instance +# to become a node. +- include: ../../common/openshift-node/image_prep.yml -- name: run the std_include - include: ../../common/openshift-cluster/evaluate_groups.yml - -- name: run the std_include - include: ../../common/openshift-cluster/initialize_facts.yml - -- name: run the std_include - include: ../../common/openshift-cluster/initialize_openshift_repos.yml - -- name: install node config - include: ../../common/openshift-node/config.yml - -- hosts: localhost - connection: local - become: no - tasks: - - name: seal the ami - include_role: - name: openshift_aws - tasks_from: seal_ami.yml - vars: - openshift_aws_ami_name: "openshift-gi-{{ lookup('pipe', 'date +%Y%m%d%H%M')}}" +- include: seal_ami.yml + vars: + openshift_aws_ami_name: "openshift-gi-{{ lookup('pipe', 'date +%Y%m%d%H%M')}}" diff --git a/playbooks/aws/openshift-cluster/install.yml b/playbooks/aws/openshift-cluster/install.yml index 86d58a68e..4d0bf9531 100644 --- a/playbooks/aws/openshift-cluster/install.yml +++ b/playbooks/aws/openshift-cluster/install.yml @@ -1,68 +1,19 @@ --- -- name: Setup the vpc and the master node group +- name: Setup the master node group hosts: localhost tasks: - - name: Alert user to variables needed - clusterid - debug: - msg: "openshift_aws_clusterid={{ openshift_aws_clusterid | default('default') }}" - - - name: Alert user to variables needed - region - debug: - msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}" - - - name: fetch newly created instances - ec2_remote_facts: - region: "{{ openshift_aws_region | default('us-east-1') }}" - filters: - "tag:clusterid": "{{ openshift_aws_clusterid | default('default') }}" - "tag:host-type": master - instance-state-name: running - register: instancesout - retries: 20 - delay: 3 - until: instancesout.instances|length > 0 - - - name: add new master to masters group - add_host: - groups: "masters,etcd,nodes" - name: "{{ item.public_ip_address }}" - hostname: "{{ openshift_aws_clusterid | default('default') }}-master-{{ item.id[:-5] }}" - with_items: "{{ instancesout.instances }}" - - - name: wait for ssh to become available - wait_for: - port: 22 - host: "{{ item.public_ip_address }}" - timeout: 300 - search_regex: OpenSSH - with_items: "{{ instancesout.instances }}" + - include_role: + name: openshift_aws + tasks_from: setup_master_group.yml - name: set the master facts for hostname to elb hosts: masters gather_facts: no remote_user: root tasks: - - name: fetch elbs - ec2_elb_facts: - region: "{{ openshift_aws_region | default('us-east-1') }}" - names: - - "{{ item }}" - with_items: - - "{{ openshift_aws_clusterid | default('default') }}-master-external" - - "{{ openshift_aws_clusterid | default('default') }}-master-internal" - delegate_to: localhost - register: elbs - - - debug: var=elbs - - - name: set fact - set_fact: - openshift_master_cluster_hostname: "{{ elbs.results[1].elbs[0].dns_name }}" - osm_custom_cors_origins: - - "{{ elbs.results[1].elbs[0].dns_name }}" - - "console.{{ openshift_aws_clusterid | default('default') }}.openshift.com" - - "api.{{ openshift_aws_clusterid | default('default') }}.openshift.com" - with_items: "{{ groups['masters'] }}" + - include_role: + name: openshift_aws + tasks_from: master_facts.yml - name: normalize groups include: ../../byo/openshift-cluster/initialize_groups.yml diff --git a/playbooks/aws/openshift-cluster/provision.yml b/playbooks/aws/openshift-cluster/provision.yml index db7afac6f..4b5bd22ea 100644 --- a/playbooks/aws/openshift-cluster/provision.yml +++ b/playbooks/aws/openshift-cluster/provision.yml @@ -1,5 +1,5 @@ --- -- name: Setup the vpc and the master node group +- name: Setup the elb and the master node group hosts: localhost tasks: @@ -11,7 +11,7 @@ debug: msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}" - - name: create default vpc + - name: provision cluster include_role: name: openshift_aws tasks_from: provision.yml diff --git a/playbooks/aws/openshift-cluster/provision_instance.yml b/playbooks/aws/openshift-cluster/provision_instance.yml new file mode 100644 index 000000000..6e843453c --- /dev/null +++ b/playbooks/aws/openshift-cluster/provision_instance.yml @@ -0,0 +1,12 @@ +--- +# If running this play directly, be sure the variable +# 'openshift_aws_node_group_type' is set correctly for your usage. +# See build_ami.yml for an example. +- hosts: localhost + connection: local + gather_facts: no + tasks: + - name: create an instance and prepare for ami + include_role: + name: openshift_aws + tasks_from: provision_instance.yml diff --git a/playbooks/aws/openshift-cluster/provision_sec_group.yml b/playbooks/aws/openshift-cluster/provision_sec_group.yml new file mode 100644 index 000000000..039357adb --- /dev/null +++ b/playbooks/aws/openshift-cluster/provision_sec_group.yml @@ -0,0 +1,13 @@ +--- +# If running this play directly, be sure the variable +# 'openshift_aws_node_group_type' is set correctly for your usage. +# See build_ami.yml for an example. +- hosts: localhost + connection: local + gather_facts: no + tasks: + - name: create an instance and prepare for ami + include_role: + name: openshift_aws + tasks_from: security_group.yml + when: openshift_aws_create_security_groups | default(True) | bool diff --git a/playbooks/aws/openshift-cluster/provision_ssh_keypair.yml b/playbooks/aws/openshift-cluster/provision_ssh_keypair.yml new file mode 100644 index 000000000..3ec683958 --- /dev/null +++ b/playbooks/aws/openshift-cluster/provision_ssh_keypair.yml @@ -0,0 +1,12 @@ +--- +- hosts: localhost + connection: local + gather_facts: no + tasks: + - name: create an instance and prepare for ami + include_role: + name: openshift_aws + tasks_from: ssh_keys.yml + vars: + openshift_aws_node_group_type: compute + when: openshift_aws_users | default([]) | length > 0 diff --git a/playbooks/aws/openshift-cluster/provision_vpc.yml b/playbooks/aws/openshift-cluster/provision_vpc.yml new file mode 100644 index 000000000..0a23a6d32 --- /dev/null +++ b/playbooks/aws/openshift-cluster/provision_vpc.yml @@ -0,0 +1,10 @@ +--- +- hosts: localhost + connection: local + gather_facts: no + tasks: + - name: create a vpc + include_role: + name: openshift_aws + tasks_from: vpc.yml + when: openshift_aws_create_vpc | default(True) | bool diff --git a/playbooks/aws/openshift-cluster/seal_ami.yml b/playbooks/aws/openshift-cluster/seal_ami.yml new file mode 100644 index 000000000..8239a64fb --- /dev/null +++ b/playbooks/aws/openshift-cluster/seal_ami.yml @@ -0,0 +1,12 @@ +--- +# If running this play directly, be sure the variable +# 'openshift_aws_ami_name' is set correctly for your usage. +# See build_ami.yml for an example. +- hosts: localhost + connection: local + become: no + tasks: + - name: seal the ami + include_role: + name: openshift_aws + tasks_from: seal_ami.yml diff --git a/playbooks/byo/openshift-cluster/openshift-prometheus.yml b/playbooks/byo/openshift-cluster/openshift-prometheus.yml index 15917078d..4d3f7f42c 100644 --- a/playbooks/byo/openshift-cluster/openshift-prometheus.yml +++ b/playbooks/byo/openshift-cluster/openshift-prometheus.yml @@ -1,4 +1,6 @@ --- - include: initialize_groups.yml +- include: ../../common/openshift-cluster/std_include.yml + - include: ../../common/openshift-cluster/openshift_prometheus.yml diff --git a/playbooks/byo/openshift-cluster/redeploy-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-certificates.yml index 073ded6e0..255b0dbf7 100644 --- a/playbooks/byo/openshift-cluster/redeploy-certificates.yml +++ b/playbooks/byo/openshift-cluster/redeploy-certificates.yml @@ -11,11 +11,23 @@ vars: g_check_expiry_hosts: 'oo_etcd_to_config' -- include: ../../common/openshift-cluster/redeploy-certificates/etcd.yml +- include: ../../common/openshift-cluster/redeploy-certificates/etcd-backup.yml -- include: ../../common/openshift-cluster/redeploy-certificates/masters.yml +- include: ../../common/openshift-etcd/certificates.yml + vars: + etcd_certificates_redeploy: true + +- include: ../../common/openshift-cluster/redeploy-certificates/masters-backup.yml + +- include: ../../common/openshift-master/certificates.yml + vars: + openshift_certificates_redeploy: true + +- include: ../../common/openshift-cluster/redeploy-certificates/nodes-backup.yml -- include: ../../common/openshift-cluster/redeploy-certificates/nodes.yml +- include: ../../common/openshift-node/certificates.yml + vars: + openshift_certificates_redeploy: true - include: ../../common/openshift-etcd/restart.yml vars: diff --git a/playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml index 0f86eb997..f4f2ce00d 100644 --- a/playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml +++ b/playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml @@ -11,7 +11,11 @@ vars: g_check_expiry_hosts: 'oo_etcd_to_config' -- include: ../../common/openshift-cluster/redeploy-certificates/etcd.yml +- include: ../../common/openshift-cluster/redeploy-certificates/etcd-backup.yml + +- include: ../../common/openshift-etcd/certificates.yml + vars: + etcd_certificates_redeploy: true - include: ../../common/openshift-etcd/restart.yml vars: diff --git a/playbooks/byo/openshift-cluster/redeploy-master-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-master-certificates.yml index 566e8b261..049bad8e7 100644 --- a/playbooks/byo/openshift-cluster/redeploy-master-certificates.yml +++ b/playbooks/byo/openshift-cluster/redeploy-master-certificates.yml @@ -7,6 +7,10 @@ tags: - always -- include: ../../common/openshift-cluster/redeploy-certificates/masters.yml +- include: ../../common/openshift-cluster/redeploy-certificates/masters-backup.yml + +- include: ../../common/openshift-master/certificates.yml + vars: + openshift_certificates_redeploy: true - include: ../../common/openshift-master/restart.yml diff --git a/playbooks/byo/openshift-cluster/redeploy-node-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-node-certificates.yml index 42777e5e6..345b0c689 100644 --- a/playbooks/byo/openshift-cluster/redeploy-node-certificates.yml +++ b/playbooks/byo/openshift-cluster/redeploy-node-certificates.yml @@ -7,6 +7,10 @@ tags: - always -- include: ../../common/openshift-cluster/redeploy-certificates/nodes.yml +- include: ../../common/openshift-cluster/redeploy-certificates/nodes-backup.yml + +- include: ../../common/openshift-node/certificates.yml + vars: + openshift_certificates_redeploy: true - include: ../../common/openshift-node/restart.yml diff --git a/playbooks/byo/openshift-etcd/certificates.yml b/playbooks/byo/openshift-etcd/certificates.yml new file mode 100644 index 000000000..e35cf243f --- /dev/null +++ b/playbooks/byo/openshift-etcd/certificates.yml @@ -0,0 +1,8 @@ +--- +- include: ../openshift-cluster/initialize_groups.yml + +- include: ../../common/openshift-cluster/std_include.yml + +- include: ../../common/openshift-etcd/ca.yml + +- include: ../../common/openshift-etcd/certificates.yml diff --git a/playbooks/byo/openshift-cfme/config.yml b/playbooks/byo/openshift-management/config.yml index 0e8e7a94d..33a555cc1 100644 --- a/playbooks/byo/openshift-cfme/config.yml +++ b/playbooks/byo/openshift-management/config.yml @@ -5,4 +5,4 @@ - include: ../../common/openshift-cluster/evaluate_groups.yml -- include: ../../common/openshift-cfme/config.yml +- include: ../../common/openshift-management/config.yml diff --git a/playbooks/byo/openshift-cfme/uninstall.yml b/playbooks/byo/openshift-management/uninstall.yml index c8ed16859..ebd6fb261 100644 --- a/playbooks/byo/openshift-cfme/uninstall.yml +++ b/playbooks/byo/openshift-management/uninstall.yml @@ -3,4 +3,4 @@ # tags: # - always -- include: ../../common/openshift-cfme/uninstall.yml +- include: ../../common/openshift-management/uninstall.yml diff --git a/playbooks/byo/openshift-master/certificates.yml b/playbooks/byo/openshift-master/certificates.yml new file mode 100644 index 000000000..e147dcba1 --- /dev/null +++ b/playbooks/byo/openshift-master/certificates.yml @@ -0,0 +1,6 @@ +--- +- include: ../openshift-cluster/initialize_groups.yml + +- include: ../../common/openshift-cluster/std_include.yml + +- include: ../../common/openshift-master/certificates.yml diff --git a/playbooks/byo/openshift-node/certificates.yml b/playbooks/byo/openshift-node/certificates.yml new file mode 100644 index 000000000..3d2de74a9 --- /dev/null +++ b/playbooks/byo/openshift-node/certificates.yml @@ -0,0 +1,6 @@ +--- +- include: ../openshift-cluster/initialize_groups.yml + +- include: ../../common/openshift-cluster/std_include.yml + +- include: ../../common/openshift-node/certificates.yml diff --git a/playbooks/byo/rhel_subscribe.yml b/playbooks/byo/rhel_subscribe.yml index 06f914981..bc3109a31 100644 --- a/playbooks/byo/rhel_subscribe.yml +++ b/playbooks/byo/rhel_subscribe.yml @@ -11,6 +11,6 @@ when: - deployment_type == 'openshift-enterprise' - ansible_distribution == "RedHat" - - lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) | default('no', True) | lower in ['no', 'false'] + - lookup('env', 'rhel_skip_subscription') | default(rhsub_skip, True) | default('no', True) | lower in ['no', 'false'] - role: openshift_repos - role: os_update_latest diff --git a/playbooks/common/openshift-cfme/config.yml b/playbooks/common/openshift-cfme/config.yml deleted file mode 100644 index 533a35d9e..000000000 --- a/playbooks/common/openshift-cfme/config.yml +++ /dev/null @@ -1,44 +0,0 @@ ---- -# TODO: Make this work. The 'name' variable below is undefined -# presently because it's part of the cfme role. This play can't run -# until that's re-worked. -# -# - name: Pre-Pull manageiq-pods docker images -# hosts: nodes -# tasks: -# - name: Ensure the latest manageiq-pods docker image is pulling -# docker_image: -# name: "{{ openshift_cfme_container_image }}" -# # Fire-and-forget method, never timeout -# async: 99999999999 -# # F-a-f, never check on this. True 'background' task. -# poll: 0 - -- name: Configure Masters for CFME Bulk Image Imports - hosts: oo_masters_to_config - serial: 1 - tasks: - - name: Run master cfme tuning playbook - include_role: - name: openshift_cfme - tasks_from: tune_masters - -- name: Setup CFME - hosts: oo_first_master - vars: - r_openshift_cfme_miq_template_content: "{{ lookup('file', 'roles/openshift_cfme/files/miq-template.yaml') | from_yaml}}" - pre_tasks: - - name: Create a temporary place to evaluate the PV templates - command: mktemp -d /tmp/openshift-ansible-XXXXXXX - register: r_openshift_cfme_mktemp - changed_when: false - - name: Ensure the server template was read from disk - debug: - msg="{{ r_openshift_cfme_miq_template_content | from_yaml }}" - - tasks: - - name: Run the CFME Setup Role - include_role: - name: openshift_cfme - vars: - template_dir: "{{ hostvars[groups.masters.0].r_openshift_cfme_mktemp.stdout }}" diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml index bf6f4e7cd..b399ea995 100644 --- a/playbooks/common/openshift-cluster/config.yml +++ b/playbooks/common/openshift-cluster/config.yml @@ -18,10 +18,6 @@ - docker_image_availability - docker_storage -- include: initialize_oo_option_facts.yml - tags: - - always - - include: ../openshift-etcd/config.yml - include: ../openshift-nfs/config.yml @@ -50,6 +46,9 @@ - include: service_catalog.yml when: openshift_enable_service_catalog | default(false) | bool +- include: openshift_management.yml + when: openshift_management_install_management | default(false) | bool + - name: Print deprecated variable warning message if necessary hosts: oo_first_master gather_facts: no diff --git a/playbooks/common/openshift-cluster/initialize_oo_option_facts.yml b/playbooks/common/openshift-cluster/initialize_oo_option_facts.yml deleted file mode 100644 index dab17aaa9..000000000 --- a/playbooks/common/openshift-cluster/initialize_oo_option_facts.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- -- name: Set oo_option facts - hosts: oo_all_hosts - tags: - - always - tasks: - - set_fact: - openshift_docker_options: "{{ lookup('oo_option', 'docker_options') }}" - when: openshift_docker_options is not defined - - set_fact: - openshift_docker_log_driver: "{{ lookup('oo_option', 'docker_log_driver') }}" - when: openshift_docker_log_driver is not defined - - set_fact: - openshift_docker_log_options: "{{ lookup('oo_option', 'docker_log_options') }}" - when: openshift_docker_log_options is not defined - - set_fact: - openshift_docker_selinux_enabled: "{{ lookup('oo_option', 'docker_selinux_enabled') }}" - when: openshift_docker_selinux_enabled is not defined diff --git a/playbooks/common/openshift-cluster/initialize_openshift_version.yml b/playbooks/common/openshift-cluster/initialize_openshift_version.yml index 1b186f181..e6400ea61 100644 --- a/playbooks/common/openshift-cluster/initialize_openshift_version.yml +++ b/playbooks/common/openshift-cluster/initialize_openshift_version.yml @@ -1,6 +1,9 @@ --- +# openshift_install_base_package_group may be set in a play variable to limit +# the host groups the base package is installed on. This is currently used +# for master/control-plane upgrades. - name: Set version_install_base_package true on masters and nodes - hosts: oo_masters_to_config:oo_nodes_to_config + hosts: "{{ openshift_install_base_package_group | default('oo_masters_to_config:oo_nodes_to_config') }}" tasks: - name: Set version_install_base_package true set_fact: @@ -16,8 +19,8 @@ # NOTE: We set this even on etcd hosts as they may also later run as masters, # and we don't want to install wrong version of docker and have to downgrade # later. -- name: Set openshift_version for all hosts - hosts: oo_all_hosts:!oo_first_master +- name: Set openshift_version for etcd, node, and master hosts + hosts: oo_etcd_to_config:oo_nodes_to_config:oo_masters_to_config:!oo_first_master vars: openshift_version: "{{ hostvars[groups.oo_first_master.0].openshift_version }}" pre_tasks: diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml index b9eb380d3..32e5e708a 100644 --- a/playbooks/common/openshift-cluster/openshift_hosted.yml +++ b/playbooks/common/openshift-cluster/openshift_hosted.yml @@ -23,6 +23,7 @@ - include: cockpit-ui.yml - include: openshift_prometheus.yml + when: openshift_hosted_prometheus_deploy | default(False) | bool - name: Hosted Install Checkpoint End hosts: localhost diff --git a/playbooks/common/openshift-cluster/openshift_management.yml b/playbooks/common/openshift-cluster/openshift_management.yml new file mode 100644 index 000000000..6e582920b --- /dev/null +++ b/playbooks/common/openshift-cluster/openshift_management.yml @@ -0,0 +1,25 @@ +--- +- name: Management Install Checkpoint Start + hosts: localhost + connection: local + gather_facts: false + tasks: + - name: Set Management install 'In Progress' + set_stats: + data: + installer_phase_Management: "In Progress" + aggregate: false + +- name: Management + include: ../openshift-management/config.yml + +- name: Management Install Checkpoint End + hosts: localhost + connection: local + gather_facts: false + tasks: + - name: Set Management install 'Complete' + set_stats: + data: + installer_phase_Management: "Complete" + aggregate: false diff --git a/playbooks/common/openshift-cluster/openshift_prometheus.yml b/playbooks/common/openshift-cluster/openshift_prometheus.yml index ed89d3bde..ac2d250a3 100644 --- a/playbooks/common/openshift-cluster/openshift_prometheus.yml +++ b/playbooks/common/openshift-cluster/openshift_prometheus.yml @@ -3,4 +3,3 @@ hosts: oo_first_master roles: - role: openshift_prometheus - when: openshift_hosted_prometheus_deploy | default(False) | bool diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-backup.yml b/playbooks/common/openshift-cluster/redeploy-certificates/etcd-backup.yml new file mode 100644 index 000000000..d738c8207 --- /dev/null +++ b/playbooks/common/openshift-cluster/redeploy-certificates/etcd-backup.yml @@ -0,0 +1,19 @@ +--- +- name: Backup and remove generated etcd certificates + hosts: oo_first_etcd + any_errors_fatal: true + tasks: + - include_role: + name: etcd + tasks_from: backup_generated_certificates + - include_role: + name: etcd + tasks_from: remove_generated_certificates + +- name: Backup deployed etcd certificates + hosts: oo_etcd_to_config + any_errors_fatal: true + tasks: + - include_role: + name: etcd + tasks_from: backup_server_certificates diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml index 3da22bce6..044875d1c 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml @@ -21,20 +21,7 @@ name: etcd tasks_from: remove_ca_certificates -- name: Generate new etcd CA - hosts: oo_first_etcd - roles: - - role: openshift_etcd_facts - tasks: - - include_role: - name: etcd - tasks_from: ca - vars: - etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}" - etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" - etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}" - when: - - etcd_ca_setup | default(True) | bool +- include: ../../openshift-etcd/ca.yml - name: Create temp directory for syncing certs hosts: localhost @@ -72,7 +59,7 @@ name: etcd tasks_from: retrieve_ca_certificates vars: - etcd_sync_cert_dir: hostvars['localhost'].g_etcd_mktemp.stdout + etcd_sync_cert_dir: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}" r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" - name: Distribute etcd CA to masters diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/etcd.yml b/playbooks/common/openshift-cluster/redeploy-certificates/etcd.yml deleted file mode 100644 index 48a5a13ac..000000000 --- a/playbooks/common/openshift-cluster/redeploy-certificates/etcd.yml +++ /dev/null @@ -1,54 +0,0 @@ ---- -- name: Backup and remove generated etcd certificates - hosts: oo_first_etcd - any_errors_fatal: true - tasks: - - include_role: - name: etcd - tasks_from: backup_generated_certificates - - include_role: - name: etcd - tasks_from: remove_generated_certificates - -- name: Backup and removed deployed etcd certificates - hosts: oo_etcd_to_config - any_errors_fatal: true - tasks: - - include_role: - name: etcd - tasks_from: backup_server_certificates - vars: - r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" - -- name: Redeploy etcd certificates - hosts: oo_etcd_to_config - any_errors_fatal: true - roles: - - role: openshift_etcd_facts - tasks: - - include_role: - name: etcd - tasks_from: server_certificates - vars: - etcd_certificates_redeploy: true - etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" - etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}" - etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}" - openshift_ca_host: "{{ groups.oo_first_master.0 }}" - r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" - -- name: Redeploy etcd client certificates for masters - hosts: oo_masters_to_config - any_errors_fatal: true - roles: - - role: openshift_etcd_facts - - role: openshift_etcd_client_certificates - etcd_certificates_redeploy: true - etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" - etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}" - etcd_cert_config_dir: "{{ openshift.common.config_base }}/master" - etcd_cert_prefix: "master.etcd-" - openshift_ca_host: "{{ groups.oo_first_master.0 }}" - openshift_master_count: "{{ openshift.master.master_count | default(groups.oo_masters | length) }}" - r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" - when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/masters-backup.yml b/playbooks/common/openshift-cluster/redeploy-certificates/masters-backup.yml new file mode 100644 index 000000000..4dbc041b0 --- /dev/null +++ b/playbooks/common/openshift-cluster/redeploy-certificates/masters-backup.yml @@ -0,0 +1,38 @@ +--- +- name: Backup and remove master cerftificates + hosts: oo_masters_to_config + any_errors_fatal: true + vars: + openshift_ca_host: "{{ groups.oo_first_master.0 }}" + openshift_master_count: "{{ openshift.master.master_count | default(groups.oo_masters | length) }}" + pre_tasks: + - stat: + path: "{{ openshift.common.config_base }}/generated-configs" + register: openshift_generated_configs_dir_stat + - name: Backup generated certificate and config directories + command: > + tar -czvf /etc/origin/master-node-cert-config-backup-{{ ansible_date_time.epoch }}.tgz + {{ openshift.common.config_base }}/generated-configs + {{ openshift.common.config_base }}/master + when: openshift_generated_configs_dir_stat.stat.exists + delegate_to: "{{ openshift_ca_host }}" + run_once: true + - name: Remove generated certificate directories + file: + path: "{{ item }}" + state: absent + with_items: + - "{{ openshift.common.config_base }}/generated-configs" + - name: Remove generated certificates + file: + path: "{{ openshift.common.config_base }}/master/{{ item }}" + state: absent + with_items: + - "{{ hostvars[inventory_hostname] | certificates_to_synchronize(include_keys=false, include_ca=false) }}" + - "etcd.server.crt" + - "etcd.server.key" + - "master.server.crt" + - "master.server.key" + - "openshift-master.crt" + - "openshift-master.key" + - "openshift-master.kubeconfig" diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/masters.yml b/playbooks/common/openshift-cluster/redeploy-certificates/masters.yml deleted file mode 100644 index 51b196299..000000000 --- a/playbooks/common/openshift-cluster/redeploy-certificates/masters.yml +++ /dev/null @@ -1,63 +0,0 @@ ---- -- name: Redeploy master certificates - hosts: oo_masters_to_config - any_errors_fatal: true - vars: - openshift_ca_host: "{{ groups.oo_first_master.0 }}" - openshift_master_count: "{{ openshift.master.master_count | default(groups.oo_masters | length) }}" - pre_tasks: - - stat: - path: "{{ openshift_generated_configs_dir }}" - register: openshift_generated_configs_dir_stat - - name: Backup generated certificate and config directories - command: > - tar -czvf /etc/origin/master-node-cert-config-backup-{{ ansible_date_time.epoch }}.tgz - {{ openshift_generated_configs_dir }} - {{ openshift.common.config_base }}/master - when: openshift_generated_configs_dir_stat.stat.exists - delegate_to: "{{ openshift_ca_host }}" - run_once: true - - name: Remove generated certificate directories - file: - path: "{{ item }}" - state: absent - with_items: - - "{{ openshift_generated_configs_dir }}" - - name: Remove generated certificates - file: - path: "{{ openshift.common.config_base }}/master/{{ item }}" - state: absent - with_items: - - "{{ hostvars[inventory_hostname] | certificates_to_synchronize(include_keys=false, include_ca=false) }}" - - "etcd.server.crt" - - "etcd.server.key" - - "master.server.crt" - - "master.server.key" - - "openshift-master.crt" - - "openshift-master.key" - - "openshift-master.kubeconfig" - - name: Remove generated etcd client certificates - file: - path: "{{ openshift.common.config_base }}/master/{{ item }}" - state: absent - with_items: - - "master.etcd-client.crt" - - "master.etcd-client.key" - when: groups.oo_etcd_to_config | default([]) | length == 0 - roles: - - role: openshift_master_certificates - openshift_master_etcd_hosts: "{{ hostvars - | oo_select_keys(groups['oo_etcd_to_config'] | default([])) - | oo_collect('openshift.common.hostname') - | default(none, true) }}" - openshift_certificates_redeploy: true - - role: lib_utils - post_tasks: - - yedit: - src: "{{ openshift.common.config_base }}/master/master-config.yaml" - key: servingInfo.namedCertificates - value: "{{ openshift.master.named_certificates | default([]) | oo_named_certificates_list }}" - when: - - ('named_certificates' in openshift.master) - - openshift.master.named_certificates | default([]) | length > 0 - - openshift_master_overwrite_named_certificates | default(false) | bool diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/nodes.yml b/playbooks/common/openshift-cluster/redeploy-certificates/nodes-backup.yml index 4990a03f2..2ad84b3b9 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/nodes.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/nodes-backup.yml @@ -22,8 +22,3 @@ state: absent with_items: - "{{ openshift.common.config_base }}/node/ca.crt" - roles: - - role: openshift_node_certificates - openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}" - openshift_ca_host: "{{ groups.oo_first_master.0 }}" - openshift_certificates_redeploy: true diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml index b54acae6c..2068ed199 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml @@ -44,8 +44,8 @@ - modify_yaml: dest: "{{ openshift.common.config_base }}/master/master-config.yaml" yaml_key: servingInfo.clientCA - yaml_value: ca-bundle.crt - when: (g_master_config_output.content|b64decode|from_yaml).servingInfo.clientCA != 'ca-bundle.crt' + yaml_value: ca.crt + when: (g_master_config_output.content|b64decode|from_yaml).servingInfo.clientCA != 'ca.crt' - modify_yaml: dest: "{{ openshift.common.config_base }}/master/master-config.yaml" yaml_key: etcdClientInfo.ca @@ -105,25 +105,27 @@ - "ca.serial.txt" - "ca-bundle.crt" -- name: Generate new OpenShift CA certificate +- name: Create temporary directory for creating new CA certificate hosts: oo_first_master - pre_tasks: + tasks: - name: Create temporary directory for creating new CA certificate command: > mktemp -d /tmp/openshift-ansible-XXXXXXX register: g_new_openshift_ca_mktemp changed_when: false - roles: - - role: openshift_ca + +- name: Create OpenShift CA + hosts: oo_first_master + vars: # Set openshift_ca_config_dir to a temporary directory where CA # will be created. We'll replace the existing CA with the CA # created in the temporary directory. - openshift_ca_config_dir: "{{ g_new_openshift_ca_mktemp.stdout }}" + openshift_ca_config_dir: "{{ hostvars[groups.oo_first_master.0].g_new_openshift_ca_mktemp.stdout }}" + roles: + - role: openshift_master_facts + - role: openshift_named_certificates + - role: openshift_ca openshift_ca_host: "{{ groups.oo_first_master.0 }}" - openshift_master_hostnames: "{{ hostvars - | oo_select_keys(groups['oo_masters_to_config'] | default([])) - | oo_collect('openshift.common.all_hostnames') - | oo_flatten | unique }}" - name: Create temp directory for syncing certs hosts: localhost diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/router.yml b/playbooks/common/openshift-cluster/redeploy-certificates/router.yml index 748bbbf91..2116c745c 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/router.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/router.yml @@ -7,23 +7,34 @@ tasks: - name: Create temp directory for kubeconfig command: mktemp -d /tmp/openshift-ansible-XXXXXX - register: mktemp + register: router_cert_redeploy_tempdir changed_when: false + - name: Copy admin client config(s) command: > - cp {{ openshift.common.config_base }}/master//admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig + cp {{ openshift.common.config_base }}/master//admin.kubeconfig {{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig changed_when: false - name: Determine if router exists command: > {{ openshift.common.client_binary }} get dc/router -o json - --config={{ mktemp.stdout }}/admin.kubeconfig + --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig -n default register: l_router_dc failed_when: false changed_when: false - - set_fact: + - name: Determine if router service exists + command: > + {{ openshift.common.client_binary }} get svc/router -o json + --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig + -n default + register: l_router_svc + failed_when: false + changed_when: false + + - name: Collect router environment variables and secrets + set_fact: router_env_vars: "{{ ((l_router_dc.stdout | from_json)['spec']['template']['spec']['containers'][0]['env'] | oo_collect('name')) | default([]) }}" @@ -34,20 +45,32 @@ changed_when: false when: l_router_dc.rc == 0 + - name: Collect router service annotations + set_fact: + router_service_annotations: "{{ (l_router_svc.stdout | from_json)['metadata']['annotations'] if 'annotations' in (l_router_svc.stdout | from_json)['metadata'] else [] }}" + when: l_router_svc.rc == 0 + - name: Update router environment variables shell: > {{ openshift.common.client_binary }} env dc/router OPENSHIFT_CA_DATA="$(cat /etc/origin/master/ca.crt)" OPENSHIFT_CERT_DATA="$(cat /etc/origin/master/openshift-router.crt)" OPENSHIFT_KEY_DATA="$(cat /etc/origin/master/openshift-router.key)" - --config={{ mktemp.stdout }}/admin.kubeconfig + --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig -n default - when: l_router_dc.rc == 0 and 'OPENSHIFT_CA_DATA' in router_env_vars and 'OPENSHIFT_CERT_DATA' in router_env_vars and 'OPENSHIFT_KEY_DATA' in router_env_vars + when: + - l_router_dc.rc == 0 + - ('OPENSHIFT_CA_DATA' in router_env_vars) + - ('OPENSHIFT_CERT_DATA' in router_env_vars) + - ('OPENSHIFT_KEY_DATA' in router_env_vars) + # When the router service contains service signer annotations we + # will delete the existing certificate secret and allow OpenShift to + # replace the secret. - block: - name: Delete existing router certificate secret oc_secret: - kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig" + kubeconfig: "{{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig" name: router-certs namespace: default state: absent @@ -58,86 +81,61 @@ {{ openshift.common.client_binary }} annotate service/router service.alpha.openshift.io/serving-cert-secret-name- service.alpha.openshift.io/serving-cert-signed-by- - --config={{ mktemp.stdout }}/admin.kubeconfig + --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig -n default - name: Add serving-cert-secret annotation to router service command: > {{ openshift.common.client_binary }} annotate service/router service.alpha.openshift.io/serving-cert-secret-name=router-certs - --config={{ mktemp.stdout }}/admin.kubeconfig + --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig -n default - when: l_router_dc.rc == 0 and 'router-certs' in router_secrets and openshift_hosted_router_certificate is undefined + when: + - l_router_dc.rc == 0 + - l_router_svc.rc == 0 + - ('router-certs' in router_secrets) + - openshift_hosted_router_certificate is undefined + - ('service.alpha.openshift.io/serving-cert-secret-name') in router_service_annotations + - ('service.alpha.openshift.io/serving-cert-signed-by') in router_service_annotations - - block: - - assert: - that: - - "'certfile' in openshift_hosted_router_certificate" - - "'keyfile' in openshift_hosted_router_certificate" - - "'cafile' in openshift_hosted_router_certificate" - msg: |- - openshift_hosted_router_certificate has been set in the inventory but is - missing one or more required keys. Ensure that 'certfile', 'keyfile', - and 'cafile' keys have been specified for the openshift_hosted_router_certificate - inventory variable. - - - name: Read router certificate and key - become: no - local_action: - module: slurp - src: "{{ item }}" - register: openshift_router_certificate_output - # Defaulting dictionary keys to none to avoid deprecation warnings - # (future fatal errors) during template evaluation. Dictionary keys - # won't be accessed unless openshift_hosted_router_certificate is - # defined and has all keys (certfile, keyfile, cafile) which we - # check above. - with_items: - - "{{ (openshift_hosted_router_certificate | default({'certfile':none})).certfile }}" - - "{{ (openshift_hosted_router_certificate | default({'keyfile':none})).keyfile }}" - - "{{ (openshift_hosted_router_certificate | default({'cafile':none})).cafile }}" - - - name: Write temporary router certificate file - copy: - content: "{% for certificate in openshift_router_certificate_output.results -%}{{ certificate.content | b64decode }}{% endfor -%}" - dest: "{{ mktemp.stdout }}/openshift-hosted-router-certificate.pem" - mode: 0600 - - - name: Write temporary router key file - copy: - content: "{{ (openshift_router_certificate_output.results - | oo_collect('content', {'source':(openshift_hosted_router_certificate | default({'keyfile':none})).keyfile}))[0] | b64decode }}" - dest: "{{ mktemp.stdout }}/openshift-hosted-router-certificate.key" - mode: 0600 - - - name: Replace router-certs secret - shell: > - {{ openshift.common.client_binary }} secrets new router-certs - tls.crt="{{ mktemp.stdout }}/openshift-hosted-router-certificate.pem" - tls.key="{{ mktemp.stdout }}/openshift-hosted-router-certificate.key" - --type=kubernetes.io/tls - --config={{ mktemp.stdout }}/admin.kubeconfig - --confirm - -o json | {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig replace -f - + # When there are no annotations on the router service we will allow + # the openshift_hosted role to either create a new wildcard + # certificate (since we deleted the original) or reapply a custom + # openshift_hosted_router_certificate. + - file: + path: "{{ item }}" + state: absent + with_items: + - /etc/origin/master/openshift-router.crt + - /etc/origin/master/openshift-router.key + when: + - l_router_dc.rc == 0 + - l_router_svc.rc == 0 + - ('router-certs' in router_secrets) + - ('service.alpha.openshift.io/serving-cert-secret-name') not in router_service_annotations + - ('service.alpha.openshift.io/serving-cert-signed-by') not in router_service_annotations - - name: Remove temporary router certificate and key files - file: - path: "{{ item }}" - state: absent - with_items: - - "{{ mktemp.stdout }}/openshift-hosted-router-certificate.pem" - - "{{ mktemp.stdout }}/openshift-hosted-router-certificate.key" - when: l_router_dc.rc == 0 and 'router-certs' in router_secrets and openshift_hosted_router_certificate is defined + - include_role: + name: openshift_hosted + tasks_from: main + vars: + openshift_hosted_manage_registry: false + when: + - l_router_dc.rc == 0 + - l_router_svc.rc == 0 + - ('router-certs' in router_secrets) + - ('service.alpha.openshift.io/serving-cert-secret-name') not in router_service_annotations + - ('service.alpha.openshift.io/serving-cert-signed-by') not in router_service_annotations - name: Redeploy router command: > {{ openshift.common.client_binary }} deploy dc/router --latest - --config={{ mktemp.stdout }}/admin.kubeconfig + --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig -n default - name: Delete temp directory file: - name: "{{ mktemp.stdout }}" + name: "{{ router_cert_redeploy_tempdir.stdout }}" state: absent changed_when: False diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml index c98065cf4..2826951e6 100644 --- a/playbooks/common/openshift-cluster/upgrades/init.yml +++ b/playbooks/common/openshift-cluster/upgrades/init.yml @@ -5,8 +5,6 @@ g_new_master_hosts: [] g_new_node_hosts: [] -- include: ../initialize_oo_option_facts.yml - - include: ../initialize_facts.yml - name: Ensure firewall is not switched during upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml index 54c85f0fb..f64f0e003 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml @@ -68,6 +68,7 @@ # defined, and overriding the normal behavior of protecting the installed version openshift_release: "{{ openshift_upgrade_target }}" openshift_protect_installed_version: False + openshift_install_base_package_group: "oo_masters_to_config" # We skip the docker role at this point in upgrade to prevent # unintended package, container, or config upgrades which trigger diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml index d7cb38d03..43da5b629 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml @@ -68,6 +68,7 @@ # defined, and overriding the normal behavior of protecting the installed version openshift_release: "{{ openshift_upgrade_target }}" openshift_protect_installed_version: False + openshift_install_base_package_group: "oo_masters_to_config" # We skip the docker role at this point in upgrade to prevent # unintended package, container, or config upgrades which trigger diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml index 5fee56615..e9cec9220 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml @@ -55,6 +55,10 @@ tags: - pre_upgrade +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + - include: ../disable_master_excluders.yml tags: - pre_upgrade @@ -68,6 +72,7 @@ # defined, and overriding the normal behavior of protecting the installed version openshift_release: "{{ openshift_upgrade_target }}" openshift_protect_installed_version: False + openshift_install_base_package_group: "oo_masters_to_config" # We skip the docker role at this point in upgrade to prevent # unintended package, container, or config upgrades which trigger @@ -75,10 +80,6 @@ # docker is configured and running. skip_docker_role: True -- include: ../pre/verify_control_plane_running.yml - tags: - - pre_upgrade - - include: ../../../openshift-master/validate_restart.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml index 7c72564b6..27d8515dc 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml @@ -55,6 +55,14 @@ tags: - pre_upgrade +- include: ../pre/verify_health_checks.yml + tags: + - pre_upgrade + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + - include: ../disable_master_excluders.yml tags: - pre_upgrade @@ -68,6 +76,7 @@ # defined, and overriding the normal behavior of protecting the installed version openshift_release: "{{ openshift_upgrade_target }}" openshift_protect_installed_version: False + openshift_install_base_package_group: "oo_masters_to_config" # We skip the docker role at this point in upgrade to prevent # unintended package, container, or config upgrades which trigger @@ -75,14 +84,6 @@ # docker is configured and running. skip_docker_role: True -- include: ../pre/verify_health_checks.yml - tags: - - pre_upgrade - -- include: ../pre/verify_control_plane_running.yml - tags: - - pre_upgrade - - include: ../../../openshift-master/validate_restart.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml index 6c1c7c921..ba6fcc3f8 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml @@ -48,6 +48,10 @@ tags: - pre_upgrade +- include: ../pre/verify_health_checks.yml + tags: + - pre_upgrade + - include: ../disable_node_excluders.yml tags: - pre_upgrade @@ -68,10 +72,6 @@ # docker is configured and running. skip_docker_role: True -- include: ../pre/verify_health_checks.yml - tags: - - pre_upgrade - - name: Verify masters are already upgraded hosts: oo_masters_to_config tags: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml index ed89dbe8d..df59a8782 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml @@ -14,3 +14,8 @@ dest: "{{ openshift.common.config_base}}/master/master-config.yaml" yaml_key: 'kubernetesMasterConfig.admissionConfig' yaml_value: + +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'controllerConfig.election.lockName' + yaml_value: 'openshift-master-controllers' diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml index 87621dc85..f1ca1edb9 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml @@ -21,6 +21,10 @@ tags: - pre_upgrade +- include: ../pre/verify_etcd3_backend.yml + tags: + - pre_upgrade + - name: Update repos and initialize facts on all hosts hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config tags: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml index 6cd3bd3e5..6c4f9671b 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml @@ -59,6 +59,14 @@ tags: - pre_upgrade +- include: ../pre/verify_health_checks.yml + tags: + - pre_upgrade + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + - include: ../disable_master_excluders.yml tags: - pre_upgrade @@ -72,6 +80,7 @@ # defined, and overriding the normal behavior of protecting the installed version openshift_release: "{{ openshift_upgrade_target }}" openshift_protect_installed_version: False + openshift_install_base_package_group: "oo_masters_to_config" # We skip the docker role at this point in upgrade to prevent # unintended package, container, or config upgrades which trigger @@ -79,14 +88,6 @@ # docker is configured and running. skip_docker_role: True -- include: ../pre/verify_health_checks.yml - tags: - - pre_upgrade - -- include: ../pre/verify_control_plane_running.yml - tags: - - pre_upgrade - - include: ../../../openshift-master/validate_restart.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml index e5e04e643..bc080f9a3 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml @@ -48,6 +48,10 @@ tags: - pre_upgrade +- include: ../pre/verify_health_checks.yml + tags: + - pre_upgrade + - include: ../disable_node_excluders.yml tags: - pre_upgrade @@ -68,10 +72,6 @@ # docker is configured and running. skip_docker_role: True -- include: ../pre/verify_health_checks.yml - tags: - - pre_upgrade - - name: Verify masters are already upgraded hosts: oo_masters_to_config tags: diff --git a/playbooks/common/openshift-etcd/ca.yml b/playbooks/common/openshift-etcd/ca.yml new file mode 100644 index 000000000..ac5543be9 --- /dev/null +++ b/playbooks/common/openshift-etcd/ca.yml @@ -0,0 +1,15 @@ +--- +- name: Generate new etcd CA + hosts: oo_first_etcd + roles: + - role: openshift_etcd_facts + tasks: + - include_role: + name: etcd + tasks_from: ca + vars: + etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}" + etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" + etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}" + when: + - etcd_ca_setup | default(True) | bool diff --git a/playbooks/common/openshift-etcd/certificates.yml b/playbooks/common/openshift-etcd/certificates.yml new file mode 100644 index 000000000..31a0f50d8 --- /dev/null +++ b/playbooks/common/openshift-etcd/certificates.yml @@ -0,0 +1,29 @@ +--- +- name: Create etcd server certificates for etcd hosts + hosts: oo_etcd_to_config + any_errors_fatal: true + roles: + - role: openshift_etcd_facts + post_tasks: + - include_role: + name: etcd + tasks_from: server_certificates + vars: + etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" + etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}" + etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}" + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" + +- name: Create etcd client certificates for master hosts + hosts: oo_masters_to_config + any_errors_fatal: true + roles: + - role: openshift_etcd_facts + - role: openshift_etcd_client_certificates + etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" + etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}" + etcd_cert_config_dir: "{{ openshift.common.config_base }}/master" + etcd_cert_prefix: "master.etcd-" + openshift_ca_host: "{{ groups.oo_first_master.0 }}" + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" + when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config diff --git a/playbooks/common/openshift-etcd/config.yml b/playbooks/common/openshift-etcd/config.yml index 2cae231b4..82539dac8 100644 --- a/playbooks/common/openshift-etcd/config.yml +++ b/playbooks/common/openshift-etcd/config.yml @@ -10,6 +10,10 @@ installer_phase_etcd: "In Progress" aggregate: false +- include: ca.yml + +- include: certificates.yml + - name: Configure etcd hosts: oo_etcd_to_config any_errors_fatal: true diff --git a/playbooks/common/openshift-etcd/scaleup.yml b/playbooks/common/openshift-etcd/scaleup.yml index 4f83264d0..b5ba2bbba 100644 --- a/playbooks/common/openshift-etcd/scaleup.yml +++ b/playbooks/common/openshift-etcd/scaleup.yml @@ -20,7 +20,7 @@ /usr/bin/etcdctl --cert-file {{ etcd_peer_cert_file }} --key-file {{ etcd_peer_key_file }} --ca-file {{ etcd_peer_ca_file }} - -C {{ etcd_peer_url_scheme }}://{{ hostvars[etcd_ca_host].etcd_hostname }}:{{ etcd_client_port }} + -C {{ etcd_peer_url_scheme }}://{{ hostvars[etcd_ca_host].etcd_ip }}:{{ etcd_client_port }} member add {{ etcd_hostname }} {{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_port }} delegate_to: "{{ etcd_ca_host }}" failed_when: @@ -30,6 +30,13 @@ retries: 3 delay: 10 until: etcd_add_check.rc == 0 + - include_role: + name: etcd + tasks_from: server_certificates + vars: + etcd_peers: "{{ groups.oo_new_etcd_to_config | default([], true) }}" + etcd_certificates_etcd_hosts: "{{ groups.oo_new_etcd_to_config | default([], true) }}" + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" roles: - role: os_firewall when: etcd_add_check.rc == 0 diff --git a/playbooks/common/openshift-management/config.yml b/playbooks/common/openshift-management/config.yml new file mode 100644 index 000000000..0aaafe440 --- /dev/null +++ b/playbooks/common/openshift-management/config.yml @@ -0,0 +1,15 @@ +--- +- name: Setup CFME + hosts: oo_first_master + pre_tasks: + - name: Create a temporary place to evaluate the PV templates + command: mktemp -d /tmp/openshift-ansible-XXXXXXX + register: r_openshift_management_mktemp + changed_when: false + + tasks: + - name: Run the CFME Setup Role + include_role: + name: openshift_management + vars: + template_dir: "{{ hostvars[groups.masters.0].r_openshift_management_mktemp.stdout }}" diff --git a/playbooks/common/openshift-cfme/filter_plugins b/playbooks/common/openshift-management/filter_plugins index 99a95e4ca..99a95e4ca 120000 --- a/playbooks/common/openshift-cfme/filter_plugins +++ b/playbooks/common/openshift-management/filter_plugins diff --git a/playbooks/common/openshift-cfme/library b/playbooks/common/openshift-management/library index ba40d2f56..ba40d2f56 120000 --- a/playbooks/common/openshift-cfme/library +++ b/playbooks/common/openshift-management/library diff --git a/playbooks/common/openshift-cfme/roles b/playbooks/common/openshift-management/roles index 20c4c58cf..20c4c58cf 120000 --- a/playbooks/common/openshift-cfme/roles +++ b/playbooks/common/openshift-management/roles diff --git a/playbooks/common/openshift-cfme/uninstall.yml b/playbooks/common/openshift-management/uninstall.yml index 78b8e7668..698d93405 100644 --- a/playbooks/common/openshift-cfme/uninstall.yml +++ b/playbooks/common/openshift-management/uninstall.yml @@ -4,5 +4,5 @@ tasks: - name: Run the CFME Uninstall Role Tasks include_role: - name: openshift_cfme + name: openshift_management tasks_from: uninstall diff --git a/playbooks/common/openshift-master/certificates.yml b/playbooks/common/openshift-master/certificates.yml new file mode 100644 index 000000000..f6afbc36f --- /dev/null +++ b/playbooks/common/openshift-master/certificates.yml @@ -0,0 +1,14 @@ +--- +- name: Create OpenShift certificates for master hosts + hosts: oo_masters_to_config + vars: + openshift_ca_host: "{{ groups.oo_first_master.0 }}" + roles: + - role: openshift_master_facts + - role: openshift_named_certificates + - role: openshift_ca + - role: openshift_master_certificates + openshift_master_etcd_hosts: "{{ hostvars + | oo_select_keys(groups['oo_etcd_to_config'] | default([])) + | oo_collect('openshift.common.hostname') + | default(none, true) }}" diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index 38257b803..bc1fee982 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -10,6 +10,8 @@ installer_phase_master: "In Progress" aggregate: false +- include: certificates.yml + - name: Disable excluders hosts: oo_masters_to_config gather_facts: no @@ -20,9 +22,6 @@ - name: Gather and set facts for master hosts hosts: oo_masters_to_config - vars: - t_oo_option_master_debug_level: "{{ lookup('oo_option', 'openshift_master_debug_level') }}" - pre_tasks: # Per https://bugzilla.redhat.com/show_bug.cgi?id=1469336 # @@ -55,33 +54,12 @@ - .config_managed - set_fact: - openshift_master_pod_eviction_timeout: "{{ lookup('oo_option', 'openshift_master_pod_eviction_timeout') | default(none, true) }}" - when: openshift_master_pod_eviction_timeout is not defined - - - set_fact: openshift_master_etcd_port: "{{ (etcd_client_port | default('2379')) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else none }}" openshift_master_etcd_hosts: "{{ hostvars | oo_select_keys(groups['oo_etcd_to_config'] | default([])) | oo_collect('openshift.common.hostname') | default(none, true) }}" - - - set_fact: - openshift_master_debug_level: "{{ t_oo_option_master_debug_level }}" - when: openshift_master_debug_level is not defined and t_oo_option_master_debug_level != "" - - - set_fact: - openshift_master_default_subdomain: "{{ lookup('oo_option', 'openshift_master_default_subdomain') | default(None, true) }}" - when: openshift_master_default_subdomain is not defined - - set_fact: - openshift_hosted_metrics_deploy: "{{ lookup('oo_option', 'openshift_hosted_metrics_deploy') | default(false, true) }}" - when: openshift_hosted_metrics_deploy is not defined - - set_fact: - openshift_hosted_metrics_duration: "{{ lookup('oo_option', 'openshift_hosted_metrics_duration') | default(7) }}" - when: openshift_hosted_metrics_duration is not defined - - set_fact: - openshift_hosted_metrics_resolution: "{{ lookup('oo_option', 'openshift_hosted_metrics_resolution') | default('10s', true) }}" - when: openshift_hosted_metrics_resolution is not defined roles: - openshift_facts post_tasks: @@ -204,15 +182,6 @@ - role: os_firewall - role: openshift_master_facts - role: openshift_hosted_facts - - role: openshift_master_certificates - - role: openshift_etcd_facts - - role: openshift_etcd_client_certificates - etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}" - etcd_cert_config_dir: "{{ openshift.common.config_base }}/master" - etcd_cert_prefix: "master.etcd-" - r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" - etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" - when: groups.oo_etcd_to_config | default([]) | length != 0 - role: openshift_clock - role: openshift_cloud_provider - role: openshift_builddefaults diff --git a/playbooks/common/openshift-master/scaleup.yml b/playbooks/common/openshift-master/scaleup.yml index 8c366e038..f4dc9df8a 100644 --- a/playbooks/common/openshift-master/scaleup.yml +++ b/playbooks/common/openshift-master/scaleup.yml @@ -45,8 +45,12 @@ - include: ../openshift-master/set_network_facts.yml +- include: ../openshift-etcd/certificates.yml + - include: ../openshift-master/config.yml - include: ../openshift-loadbalancer/config.yml +- include: ../openshift-node/certificates.yml + - include: ../openshift-node/config.yml diff --git a/playbooks/common/openshift-master/set_network_facts.yml b/playbooks/common/openshift-master/set_network_facts.yml index 2ad805858..9a6cf26fc 100644 --- a/playbooks/common/openshift-master/set_network_facts.yml +++ b/playbooks/common/openshift-master/set_network_facts.yml @@ -13,7 +13,9 @@ - name: Set network facts for masters hosts: oo_masters_to_config gather_facts: no - tasks: + roles: + - role: openshift_facts + post_tasks: - block: - set_fact: osm_cluster_network_cidr: "{{ (hostvars[groups.oo_first_master.0].g_master_config_slurp.content|b64decode|from_yaml).networkConfig.clusterNetworkCIDR }}" @@ -24,5 +26,9 @@ - set_fact: openshift_portal_net: "{{ (hostvars[groups.oo_first_master.0].g_master_config_slurp.content|b64decode|from_yaml).networkConfig.serviceNetworkCIDR }}" when: openshift_portal_net is not defined + - openshift_facts: + role: common + local_facts: + portal_net: "{{ openshift_portal_net | default(openshift_master_portal_net) | default(None) }}" when: - hostvars[groups.oo_first_master.0].g_master_config_stat.stat.exists | bool diff --git a/playbooks/common/openshift-node/additional_config.yml b/playbooks/common/openshift-node/additional_config.yml new file mode 100644 index 000000000..fe51ef833 --- /dev/null +++ b/playbooks/common/openshift-node/additional_config.yml @@ -0,0 +1,52 @@ +--- +- name: create additional node network plugin groups + hosts: "{{ openshift_node_scale_up_group | default('oo_nodes_to_config') }}" + tasks: + # Creating these node groups will prevent a ton of skipped tasks. + # Create group for flannel nodes + - group_by: + key: oo_nodes_use_{{ (openshift_use_flannel | default(False)) | ternary('flannel','nothing') }} + changed_when: False + # Create group for calico nodes + - group_by: + key: oo_nodes_use_{{ (openshift_use_calico | default(False)) | ternary('calico','nothing') }} + changed_when: False + # Create group for nuage nodes + - group_by: + key: oo_nodes_use_{{ (openshift_use_nuage | default(False)) | ternary('nuage','nothing') }} + changed_when: False + # Create group for contiv nodes + - group_by: + key: oo_nodes_use_{{ (openshift_use_contiv | default(False)) | ternary('contiv','nothing') }} + changed_when: False + +- include: etcd_client_config.yml + vars: + openshift_node_scale_up_group: "oo_nodes_use_flannel:oo_nodes_use_calico:oo_nodes_use_contiv" + +- name: Additional node config + hosts: oo_nodes_use_flannel + roles: + - role: flannel + etcd_urls: "{{ hostvars[groups.oo_first_master.0].openshift.master.etcd_urls }}" + embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}" + when: openshift_use_flannel | default(false) | bool + +- name: Additional node config + hosts: oo_nodes_use_calico + roles: + - role: calico + when: openshift_use_calico | default(false) | bool + +- name: Additional node config + hosts: oo_nodes_use_nuage + roles: + - role: nuage_node + when: openshift_use_nuage | default(false) | bool + +- name: Additional node config + hosts: oo_nodes_use_contiv + roles: + - role: contiv + contiv_role: netplugin + when: openshift_use_contiv | default(false) | bool diff --git a/playbooks/common/openshift-node/certificates.yml b/playbooks/common/openshift-node/certificates.yml new file mode 100644 index 000000000..908885ee6 --- /dev/null +++ b/playbooks/common/openshift-node/certificates.yml @@ -0,0 +1,8 @@ +--- +- name: Create OpenShift certificates for node hosts + hosts: oo_nodes_to_config + gather_facts: no + roles: + - role: openshift_node_certificates + openshift_ca_host: "{{ groups.oo_first_master.0 }}" + when: not openshift_node_bootstrap | default(false) | bool diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml index 15693e633..700aab48c 100644 --- a/playbooks/common/openshift-node/config.yml +++ b/playbooks/common/openshift-node/config.yml @@ -10,106 +10,19 @@ installer_phase_node: "In Progress" aggregate: false -- name: Disable excluders - hosts: oo_nodes_to_config - gather_facts: no - roles: - - role: openshift_excluder - r_openshift_excluder_action: disable - r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" +- include: certificates.yml -- name: Evaluate node groups - hosts: localhost - become: no - connection: local - tasks: - - name: Evaluate oo_containerized_master_nodes - add_host: - name: "{{ item }}" - groups: oo_containerized_master_nodes - ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - ansible_become: "{{ g_sudo | default(omit) }}" - with_items: "{{ groups.oo_nodes_to_config | default([]) }}" - when: - - hostvars[item].openshift is defined - - hostvars[item].openshift.common is defined - - hostvars[item].openshift.common.is_containerized | bool - - (item in groups.oo_nodes_to_config and item in groups.oo_masters_to_config) - changed_when: False +- include: setup.yml -- name: Configure containerized nodes - hosts: oo_containerized_master_nodes - serial: 1 - vars: - openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}" - openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}" - openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" +- include: containerized_nodes.yml - roles: - - role: os_firewall - - role: openshift_node - openshift_ca_host: "{{ groups.oo_first_master.0 }}" +- include: configure_nodes.yml -- name: Configure nodes - hosts: oo_nodes_to_config:!oo_containerized_master_nodes - vars: - openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}" - openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}" - openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - roles: - - role: os_firewall - - role: openshift_node - openshift_ca_host: "{{ groups.oo_first_master.0 }}" +- include: additional_config.yml -- name: Additional node config - hosts: oo_nodes_to_config - vars: - openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}" - roles: - - role: openshift_facts - - role: openshift_etcd_facts - - role: openshift_etcd_client_certificates - etcd_cert_prefix: flannel.etcd- - etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" - etcd_cert_subdir: "openshift-node-{{ openshift.common.hostname }}" - etcd_cert_config_dir: "{{ openshift.common.config_base }}/node" - - role: flannel - etcd_urls: "{{ hostvars[groups.oo_first_master.0].openshift.master.etcd_urls }}" - embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}" - when: openshift_use_flannel | default(false) | bool - - role: calico - when: openshift_use_calico | default(false) | bool - - role: nuage_node - when: openshift_use_nuage | default(false) | bool - - role: contiv - contiv_role: netplugin - when: openshift_use_contiv | default(false) | bool - - role: nickhammond.logrotate - - role: openshift_manage_node - openshift_master_host: "{{ groups.oo_first_master.0 }}" - when: not openshift_node_bootstrap | default(False) - tasks: - - name: Create group for deployment type - group_by: key=oo_nodes_deployment_type_{{ openshift.common.deployment_type }} - changed_when: False +- include: manage_node.yml -- name: Re-enable excluder if it was previously enabled - hosts: oo_nodes_to_config - gather_facts: no - roles: - - role: openshift_excluder - r_openshift_excluder_action: enable - r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" +- include: enable_excluders.yml - name: Node Install Checkpoint End hosts: localhost diff --git a/playbooks/common/openshift-node/configure_nodes.yml b/playbooks/common/openshift-node/configure_nodes.yml new file mode 100644 index 000000000..c96e4921c --- /dev/null +++ b/playbooks/common/openshift-node/configure_nodes.yml @@ -0,0 +1,16 @@ +--- +- name: Configure nodes + hosts: oo_nodes_to_config:!oo_containerized_master_nodes + vars: + openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}" + openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}" + openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + roles: + - role: os_firewall + - role: openshift_node + - role: nickhammond.logrotate diff --git a/playbooks/common/openshift-node/containerized_nodes.yml b/playbooks/common/openshift-node/containerized_nodes.yml new file mode 100644 index 000000000..6fac937e3 --- /dev/null +++ b/playbooks/common/openshift-node/containerized_nodes.yml @@ -0,0 +1,19 @@ +--- +- name: Configure containerized nodes + hosts: oo_containerized_master_nodes + serial: 1 + vars: + openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}" + openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}" + openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + + roles: + - role: os_firewall + - role: openshift_node + openshift_ca_host: "{{ groups.oo_first_master.0 }}" + - role: nickhammond.logrotate diff --git a/playbooks/common/openshift-node/enable_excluders.yml b/playbooks/common/openshift-node/enable_excluders.yml new file mode 100644 index 000000000..5288b14f9 --- /dev/null +++ b/playbooks/common/openshift-node/enable_excluders.yml @@ -0,0 +1,8 @@ +--- +- name: Re-enable excluder if it was previously enabled + hosts: oo_nodes_to_config + gather_facts: no + roles: + - role: openshift_excluder + r_openshift_excluder_action: enable + r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" diff --git a/playbooks/common/openshift-node/etcd_client_config.yml b/playbooks/common/openshift-node/etcd_client_config.yml new file mode 100644 index 000000000..c3fa38a81 --- /dev/null +++ b/playbooks/common/openshift-node/etcd_client_config.yml @@ -0,0 +1,11 @@ +--- +- name: etcd_client node config + hosts: "{{ openshift_node_scale_up_group | default('this_group_does_not_exist') }}" + roles: + - role: openshift_facts + - role: openshift_etcd_facts + - role: openshift_etcd_client_certificates + etcd_cert_prefix: flannel.etcd- + etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" + etcd_cert_subdir: "openshift-node-{{ openshift.common.hostname }}" + etcd_cert_config_dir: "{{ openshift.common.config_base }}/node" diff --git a/playbooks/common/openshift-node/image_prep.yml b/playbooks/common/openshift-node/image_prep.yml new file mode 100644 index 000000000..fc06621ee --- /dev/null +++ b/playbooks/common/openshift-node/image_prep.yml @@ -0,0 +1,21 @@ +--- +- name: normalize groups + include: ../../byo/openshift-cluster/initialize_groups.yml + +- name: run the std_include + include: ../openshift-cluster/evaluate_groups.yml + +- name: run the std_include + include: ../openshift-cluster/initialize_facts.yml + +- name: run the std_include + include: ../openshift-cluster/initialize_openshift_repos.yml + +- name: run node config setup + include: setup.yml + +- name: run node config + include: configure_nodes.yml + +- name: Re-enable excluders + include: enable_excluders.yml diff --git a/playbooks/common/openshift-node/manage_node.yml b/playbooks/common/openshift-node/manage_node.yml new file mode 100644 index 000000000..f48a19a9c --- /dev/null +++ b/playbooks/common/openshift-node/manage_node.yml @@ -0,0 +1,12 @@ +--- +- name: Additional node config + hosts: "{{ openshift_node_scale_up_group | default('oo_nodes_to_config') }}" + vars: + openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}" + roles: + - role: openshift_manage_node + openshift_master_host: "{{ groups.oo_first_master.0 }}" + tasks: + - name: Create group for deployment type + group_by: key=oo_nodes_deployment_type_{{ openshift.common.deployment_type }} + changed_when: False diff --git a/playbooks/common/openshift-node/setup.yml b/playbooks/common/openshift-node/setup.yml new file mode 100644 index 000000000..794c03a67 --- /dev/null +++ b/playbooks/common/openshift-node/setup.yml @@ -0,0 +1,27 @@ +--- +- name: Disable excluders + hosts: oo_nodes_to_config + gather_facts: no + roles: + - role: openshift_excluder + r_openshift_excluder_action: disable + r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" + +- name: Evaluate node groups + hosts: localhost + become: no + connection: local + tasks: + - name: Evaluate oo_containerized_master_nodes + add_host: + name: "{{ item }}" + groups: oo_containerized_master_nodes + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_become: "{{ g_sudo | default(omit) }}" + with_items: "{{ groups.oo_nodes_to_config | default([]) }}" + when: + - hostvars[item].openshift is defined + - hostvars[item].openshift.common is defined + - hostvars[item].openshift.common.is_containerized | bool + - (item in groups.oo_nodes_to_config and item in groups.oo_masters_to_config) + changed_when: False diff --git a/roles/ansible_service_broker/defaults/main.yml b/roles/ansible_service_broker/defaults/main.yml index 9eb9db316..fa982d533 100644 --- a/roles/ansible_service_broker/defaults/main.yml +++ b/roles/ansible_service_broker/defaults/main.yml @@ -6,6 +6,14 @@ ansible_service_broker_log_level: info ansible_service_broker_output_request: false ansible_service_broker_recovery: true ansible_service_broker_bootstrap_on_startup: true -# Recommended you do not enable this for now ansible_service_broker_dev_broker: false +ansible_service_broker_refresh_interval: 600s +# Recommended you do not enable this for now ansible_service_broker_launch_apb_on_bind: false + +ansible_service_broker_image_pull_policy: IfNotPresent +ansible_service_broker_sandbox_role: edit +ansible_service_broker_auto_escalate: true +ansible_service_broker_registry_tag: latest +ansible_service_broker_registry_whitelist: + - '.*-apb$' diff --git a/roles/ansible_service_broker/tasks/install.yml b/roles/ansible_service_broker/tasks/install.yml index b3797ef96..0f4b71124 100644 --- a/roles/ansible_service_broker/tasks/install.yml +++ b/roles/ansible_service_broker/tasks/install.yml @@ -17,16 +17,24 @@ ansible_service_broker_etcd_image_etcd_path: "{{ ansible_service_broker_etcd_image_etcd_path | default(__ansible_service_broker_etcd_image_etcd_path) }}" ansible_service_broker_registry_type: "{{ ansible_service_broker_registry_type | default(__ansible_service_broker_registry_type) }}" + ansible_service_broker_registry_name: "{{ ansible_service_broker_registry_name | default(__ansible_service_broker_registry_name) }}" ansible_service_broker_registry_url: "{{ ansible_service_broker_registry_url | default(__ansible_service_broker_registry_url) }}" ansible_service_broker_registry_user: "{{ ansible_service_broker_registry_user | default(__ansible_service_broker_registry_user) }}" ansible_service_broker_registry_password: "{{ ansible_service_broker_registry_password | default(__ansible_service_broker_registry_password) }}" ansible_service_broker_registry_organization: "{{ ansible_service_broker_registry_organization | default(__ansible_service_broker_registry_organization) }}" + ansible_service_broker_certs_dir: "{{ openshift.common.config_base }}/service-catalog" + - name: set ansible-service-broker image facts using set prefix and tag set_fact: ansible_service_broker_image: "{{ ansible_service_broker_image_prefix }}ansible-service-broker:{{ ansible_service_broker_image_tag }}" ansible_service_broker_etcd_image: "{{ ansible_service_broker_etcd_image_prefix }}etcd:{{ ansible_service_broker_etcd_image_tag }}" +- slurp: + src: "{{ ansible_service_broker_certs_dir }}/ca.crt" + register: catalog_ca + + - include: validate_facts.yml @@ -42,53 +50,119 @@ namespace: openshift-ansible-service-broker state: present -- name: Set SA cluster-role +- name: create ansible-service-broker client serviceaccount + oc_serviceaccount: + name: asb-client + namespace: openshift-ansible-service-broker + state: present + +- name: Create asb-auth cluster role + oc_clusterrole: + state: present + name: asb-auth + rules: + - apiGroups: [""] + resources: ["namespaces"] + verbs: ["create", "delete"] + - apiGroups: ["authorization.openshift.io"] + resources: ["subjectrulesreview"] + verbs: ["create"] + - apiGroups: ["authorization.k8s.io"] + resources: ["subjectaccessreviews"] + verbs: ["create"] + - apiGroups: ["authentication.k8s.io"] + resources: ["tokenreviews"] + verbs: ["create"] + +- name: Create asb-access cluster role + oc_clusterrole: + state: present + name: asb-access + rules: + - nonResourceURLs: ["/ansible-service-broker", "ansible-service-broker/*"] + verbs: ["get", "post", "put", "patch", "delete"] + +- name: Bind admin cluster-role to asb serviceaccount oc_adm_policy_user: state: present - namespace: "openshift-ansible-service-broker" + namespace: openshift-ansible-service-broker resource_kind: cluster-role resource_name: admin user: "system:serviceaccount:openshift-ansible-service-broker:asb" -- name: create ansible-service-broker service - oc_service: - name: asb +- name: Bind auth cluster role to asb service account + oc_adm_policy_user: + state: present namespace: openshift-ansible-service-broker + resource_kind: cluster-role + resource_name: asb-auth + user: "system:serviceaccount:openshift-ansible-service-broker:asb" + +- name: Bind asb-access role to asb-client service account + oc_adm_policy_user: state: present - labels: - app: openshift-ansible-service-broker - service: asb - ports: - - name: port-1338 - port: 1338 - selector: - app: openshift-ansible-service-broker - service: asb + namespace: openshift-ansible-service-broker + resource_kind: cluster-role + resource_name: asb-access + user: "system:serviceaccount:openshift-ansible-service-broker:asb-client" -- name: create etcd service - oc_service: - name: etcd +- name: create asb-client token secret + oc_obj: + name: asb-client + state: present + kind: Secret + content: + path: /tmp/asbclientsecretout + data: + apiVersion: v1 + kind: Secret + metadata: + name: asb-client + annotations: + kubernetes.io/service-account.name: asb-client + type: kubernetes.io/service-account-token + +# Using oc_obj because oc_service doesn't seem to allow annotations +# TODO: Extend oc_service to allow annotations +- name: create ansible-service-broker service + oc_obj: + name: asb namespace: openshift-ansible-service-broker state: present - ports: - - name: etcd-advertise - port: 2379 - selector: - app: openshift-ansible-service-broker - service: etcd + kind: Service + content: + path: /tmp/asbsvcout + data: + apiVersion: v1 + kind: Service + metadata: + name: asb + labels: + app: openshift-ansible-service-broker + service: asb + annotations: + service.alpha.openshift.io/serving-cert-secret-name: asb-tls + spec: + ports: + - name: port-1338 + port: 1338 + targetPort: 1338 + protocol: TCP + selector: + app: openshift-ansible-service-broker + service: asb - name: create route for ansible-service-broker service oc_route: name: asb-1338 namespace: openshift-ansible-service-broker state: present + labels: + app: openshift-ansible-service-broker + service: asb service_name: asb port: 1338 - register: asb_route_out - -- name: get ansible-service-broker route name - set_fact: - ansible_service_broker_route: "{{ asb_route_out.results.results[0].spec.host }}" + tls_termination: Reencrypt - name: create persistent volume claim for etcd oc_obj: @@ -97,7 +171,7 @@ state: present kind: PersistentVolumeClaim content: - path: /tmp/dcout + path: /tmp/pvcout data: apiVersion: v1 kind: PersistentVolumeClaim @@ -111,50 +185,61 @@ requests: storage: 1Gi -- name: create etcd deployment +- name: Create Ansible Service Broker deployment config oc_obj: - name: etcd + name: asb namespace: openshift-ansible-service-broker state: present - kind: Deployment + kind: DeploymentConfig content: path: /tmp/dcout data: - apiVersion: extensions/v1beta1 - kind: Deployment + apiVersion: v1 + kind: DeploymentConfig metadata: - name: etcd - namespace: openshift-ansible-service-broker + name: asb labels: app: openshift-ansible-service-broker - service: etcd + service: asb spec: + replicas: 1 selector: - matchLabels: - app: openshift-ansible-service-broker - service: etcd + app: openshift-ansible-service-broker strategy: - type: RollingUpdate - rollingUpdate: - maxSurge: 1 - maxUnavailable: 1 - replicas: 1 + type: Rolling template: metadata: labels: app: openshift-ansible-service-broker - service: etcd + service: asb spec: - restartPolicy: Always + serviceAccount: asb containers: + - image: "{{ ansible_service_broker_image }}" + name: asb + imagePullPolicy: IfNotPresent + volumeMounts: + - name: config-volume + mountPath: /etc/ansible-service-broker + - name: asb-tls + mountPath: /etc/tls/private + ports: + - containerPort: 1338 + protocol: TCP + env: + - name: BROKER_CONFIG + value: /etc/ansible-service-broker/config.yaml + resources: {} + terminationMessagePath: /tmp/termination-log + - image: "{{ ansible_service_broker_etcd_image }}" name: etcd imagePullPolicy: IfNotPresent terminationMessagePath: /tmp/termination-log workingDir: /etcd args: - - '{{ ansible_service_broker_etcd_image_etcd_path }}' - - --data-dir=/data + - "{{ ansible_service_broker_etcd_image_etcd_path }}" + - "--data-dir=/data" - "--listen-client-urls=http://0.0.0.0:2379" - "--advertise-client-urls=http://0.0.0.0:2379" ports: @@ -170,57 +255,15 @@ - name: etcd persistentVolumeClaim: claimName: etcd - -- name: create ansible-service-broker deployment - oc_obj: - name: asb - namespace: openshift-ansible-service-broker - state: present - kind: Deployment - content: - path: /tmp/dcout - data: - apiVersion: extensions/v1beta1 - kind: Deployment - metadata: - name: asb - namespace: openshift-ansible-service-broker - labels: - app: openshift-ansible-service-broker - service: asb - spec: - strategy: - type: Recreate - replicas: 1 - template: - metadata: - labels: - app: openshift-ansible-service-broker - service: asb - spec: - serviceAccount: asb - restartPolicy: Always - containers: - - image: "{{ ansible_service_broker_image }}" - name: asb - imagePullPolicy: IfNotPresent - volumeMounts: - - name: config-volume - mountPath: /etc/ansible-service-broker - ports: - - containerPort: 1338 - protocol: TCP - env: - - name: BROKER_CONFIG - value: /etc/ansible-service-broker/config.yaml - terminationMessagePath: /tmp/termination-log - volumes: - name: config-volume configMap: name: broker-config items: - key: broker-config path: config.yaml + - name: asb-tls + secret: + secretName: asb-tls # TODO: saw a oc_configmap in the library, but didn't understand how to get it to do the following: @@ -239,42 +282,65 @@ name: broker-config namespace: openshift-ansible-service-broker labels: - app: ansible-service-broker + app: openshift-ansible-service-broker data: broker-config: | registry: - name: "{{ ansible_service_broker_registry_type }}" - url: "{{ ansible_service_broker_registry_url }}" - user: "{{ ansible_service_broker_registry_user }}" - pass: "{{ ansible_service_broker_registry_password }}" - org: "{{ ansible_service_broker_registry_organization }}" + - type: {{ ansible_service_broker_registry_type }} + name: {{ ansible_service_broker_registry_name }} + url: {{ ansible_service_broker_registry_url }} + user: {{ ansible_service_broker_registry_user }} + pass: {{ ansible_service_broker_registry_password }} + org: {{ ansible_service_broker_registry_organization }} + tag: {{ ansible_service_broker_registry_tag }} + white_list: {{ ansible_service_broker_registry_whitelist }} dao: - etcd_host: etcd + etcd_host: 0.0.0.0 etcd_port: 2379 log: logfile: /var/log/ansible-service-broker/asb.log stdout: true - level: "{{ ansible_service_broker_log_level }}" + level: {{ ansible_service_broker_log_level }} color: true - openshift: {} + openshift: + host: "" + ca_file: "" + bearer_token_file: "" + sandbox_role: {{ ansible_service_broker_sandbox_role }} + image_pull_policy: {{ ansible_service_broker_image_pull_policy }} broker: dev_broker: {{ ansible_service_broker_dev_broker | bool | lower }} + bootstrap_on_startup: {{ ansible_service_broker_bootstrap_on_startup | bool | lower }} + refresh_interval: {{ ansible_service_broker_refresh_interval }} launch_apb_on_bind: {{ ansible_service_broker_launch_apb_on_bind | bool | lower }} - recovery: {{ ansible_service_broker_recovery | bool | lower }} output_request: {{ ansible_service_broker_output_request | bool | lower }} - bootstrap_on_startup: {{ ansible_service_broker_bootstrap_on_startup | bool | lower }} + recovery: {{ ansible_service_broker_recovery | bool | lower }} + ssl_cert_key: /etc/tls/private/tls.key + ssl_cert: /etc/tls/private/tls.crt + auto_escalate: {{ ansible_service_broker_auto_escalate }} + auth: + - type: basic + enabled: false + - name: Create the Broker resource in the catalog oc_obj: name: ansible-service-broker state: present - kind: Broker + kind: ServiceBroker content: path: /tmp/brokerout data: apiVersion: servicecatalog.k8s.io/v1alpha1 - kind: Broker + kind: ServiceBroker metadata: name: ansible-service-broker spec: - url: http://asb.openshift-ansible-service-broker.svc:1338 + url: http://asb.openshift-ansible-service-broker.svc:1338/ansible-service-broker + authInfo: + bearer: + secretRef: + name: asb-client + namespace: openshift-ansible-service-broker + kind: Secret + caBundle: "{{ catalog_ca.content }}" diff --git a/roles/ansible_service_broker/tasks/remove.yml b/roles/ansible_service_broker/tasks/remove.yml index 2519f9f4c..f0a6be226 100644 --- a/roles/ansible_service_broker/tasks/remove.yml +++ b/roles/ansible_service_broker/tasks/remove.yml @@ -1,16 +1,57 @@ --- -- name: remove openshift-ansible-service-broker project - oc_project: - name: openshift-ansible-service-broker - state: absent - - name: remove ansible-service-broker serviceaccount oc_serviceaccount: name: asb namespace: openshift-ansible-service-broker state: absent +- name: remove ansible-service-broker client serviceaccount + oc_serviceaccount: + name: asb-client + namespace: openshift-ansible-service-broker + state: absent + +- name: remove asb-auth cluster role + oc_clusterrole: + state: absent + name: asb-auth + +- name: remove asb-access cluster role + oc_clusterrole: + state: absent + name: asb-access + +- name: Unbind admin cluster-role to asb serviceaccount + oc_adm_policy_user: + state: absent + namespace: openshift-ansible-service-broker + resource_kind: cluster-role + resource_name: admin + user: "system:serviceaccount:openshift-ansible-service-broker:asb" + +- name: Unbind auth cluster role to asb service account + oc_adm_policy_user: + state: absent + namespace: openshift-ansible-service-broker + resource_kind: cluster-role + resource_name: asb-auth + user: "system:serviceaccount:openshift-ansible-service-broker:asb" + +- name: Unbind asb-access role to asb-client service account + oc_adm_policy_user: + state: absent + namespace: openshift-ansible-service-broker + resource_kind: cluster-role + resource_name: asb-access + user: "system:serviceaccount:openshift-ansible-service-broker:asb-client" + +- name: remove asb-client token secret + oc_secret: + state: absent + name: asb-client + namespace: openshift-ansible-service-broker + - name: remove ansible-service-broker service oc_service: name: asb @@ -35,19 +76,19 @@ namespace: openshift-ansible-service-broker state: absent -- name: remove etcd deployment +- name: remove Ansible Service Broker deployment config oc_obj: - name: etcd + name: asb namespace: openshift-ansible-service-broker + kind: DeploymentConfig state: absent - kind: Deployment -- name: remove ansible-service-broker deployment +- name: remove secret for broker auth oc_obj: - name: asb + name: asb-auth-secret namespace: openshift-ansible-service-broker + kind: Broker state: absent - kind: Deployment # TODO: saw a oc_configmap in the library, but didn't understand how to get it to do the following: - name: remove config map for ansible-service-broker @@ -62,4 +103,9 @@ oc_obj: name: ansible-service-broker state: absent - kind: Broker + kind: ServiceBroker + +- name: remove openshift-ansible-service-broker project + oc_project: + name: openshift-ansible-service-broker + state: absent diff --git a/roles/ansible_service_broker/vars/default_images.yml b/roles/ansible_service_broker/vars/default_images.yml index 15e448515..3e9639adf 100644 --- a/roles/ansible_service_broker/vars/default_images.yml +++ b/roles/ansible_service_broker/vars/default_images.yml @@ -8,6 +8,7 @@ __ansible_service_broker_etcd_image_tag: latest __ansible_service_broker_etcd_image_etcd_path: /usr/local/bin/etcd __ansible_service_broker_registry_type: dockerhub +__ansible_service_broker_registry_name: dh __ansible_service_broker_registry_url: null __ansible_service_broker_registry_user: null __ansible_service_broker_registry_password: null diff --git a/roles/ansible_service_broker/vars/openshift-enterprise.yml b/roles/ansible_service_broker/vars/openshift-enterprise.yml index ce2ae8365..9c576cb76 100644 --- a/roles/ansible_service_broker/vars/openshift-enterprise.yml +++ b/roles/ansible_service_broker/vars/openshift-enterprise.yml @@ -7,7 +7,9 @@ __ansible_service_broker_etcd_image_prefix: rhel7/ __ansible_service_broker_etcd_image_tag: latest __ansible_service_broker_etcd_image_etcd_path: /bin/etcd + __ansible_service_broker_registry_type: rhcc +__ansible_service_broker_registry_name: rh __ansible_service_broker_registry_url: "https://registry.access.redhat.com" __ansible_service_broker_registry_user: null __ansible_service_broker_registry_password: null diff --git a/roles/docker/handlers/main.yml b/roles/docker/handlers/main.yml index 591367467..866ed0452 100644 --- a/roles/docker/handlers/main.yml +++ b/roles/docker/handlers/main.yml @@ -4,6 +4,7 @@ systemd: name: "{{ openshift.docker.service_name }}" state: restarted + daemon_reload: yes register: r_docker_restart_docker_result until: not r_docker_restart_docker_result | failed retries: 3 diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml index 7ece0e061..f73f90686 100644 --- a/roles/docker/tasks/main.yml +++ b/roles/docker/tasks/main.yml @@ -13,17 +13,17 @@ - name: Use Package Docker if Requested include: package_docker.yml when: - - not l_use_system_container - - not l_use_crio_only + - not l_use_system_container + - not l_use_crio_only - name: Use System Container Docker if Requested include: systemcontainer_docker.yml when: - - l_use_system_container - - not l_use_crio_only + - l_use_system_container + - not l_use_crio_only - name: Add CRI-O usage Requested include: systemcontainer_crio.yml when: - - l_use_crio - - inventory_hostname in groups['oo_masters_to_config'] or inventory_hostname in groups['oo_nodes_to_config'] + - l_use_crio + - inventory_hostname in groups['oo_masters_to_config'] or inventory_hostname in groups['oo_nodes_to_config'] diff --git a/roles/docker/tasks/package_docker.yml b/roles/docker/tasks/package_docker.yml index 4215dc5bd..dbe0b0d28 100644 --- a/roles/docker/tasks/package_docker.yml +++ b/roles/docker/tasks/package_docker.yml @@ -48,7 +48,9 @@ template: dest: "{{ docker_systemd_dir }}/custom.conf" src: custom.conf.j2 - when: not os_firewall_use_firewalld | default(False) | bool + notify: + - restart docker + when: not (os_firewall_use_firewalld | default(False)) | bool - name: Add enterprise registry, if necessary set_fact: @@ -61,19 +63,29 @@ - stat: path=/etc/sysconfig/docker register: docker_check -- name: Comment old registry params in /etc/sysconfig/docker +- name: Set registry params lineinfile: dest: /etc/sysconfig/docker regexp: '^{{ item.reg_conf_var }}=.*$' - line: "#{{ item.reg_conf_var }}=''# Moved to {{ containers_registries_conf_path }}" + line: "{{ item.reg_conf_var }}='{{ item.reg_fact_val | oo_prepend_strings_in_list(item.reg_flag ~ ' ') | join(' ') }}'" + when: + - item.reg_fact_val != [] + - docker_check.stat.isreg is defined + - docker_check.stat.isreg with_items: - reg_conf_var: ADD_REGISTRY + reg_fact_val: "{{ l2_docker_additional_registries }}" + reg_flag: --add-registry - reg_conf_var: BLOCK_REGISTRY + reg_fact_val: "{{ l2_docker_blocked_registries }}" + reg_flag: --block-registry - reg_conf_var: INSECURE_REGISTRY + reg_fact_val: "{{ l2_docker_insecure_registries }}" + reg_flag: --insecure-registry notify: - restart docker -- name: Place additional/blocked/insecure registies in /etc/containers/registries.conf +- name: Place additional/blocked/insecure registries in /etc/containers/registries.conf template: dest: "{{ containers_registries_conf_path }}" src: registries.conf @@ -127,18 +139,6 @@ notify: - restart docker -- name: Check for credentials file for registry auth - stat: - path: "{{ docker_cli_auth_config_path }}/config.json" - when: oreg_auth_user is defined - register: docker_cli_auth_credentials_stat - -- name: Create credentials for docker cli registry auth - command: "docker --config={{ docker_cli_auth_config_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}" - when: - - oreg_auth_user is defined - - (not docker_cli_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool - - name: Start the Docker service systemd: name: docker @@ -153,4 +153,16 @@ - set_fact: docker_service_status_changed: "{{ r_docker_package_docker_start_result | changed }}" +- name: Check for credentials file for registry auth + stat: + path: "{{ docker_cli_auth_config_path }}/config.json" + when: oreg_auth_user is defined + register: docker_cli_auth_credentials_stat + +- name: Create credentials for docker cli registry auth + command: "docker --config={{ docker_cli_auth_config_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}" + when: + - oreg_auth_user is defined + - (not docker_cli_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool + - meta: flush_handlers diff --git a/roles/docker/tasks/systemcontainer_crio.yml b/roles/docker/tasks/systemcontainer_crio.yml index 66ce475e1..fdc6cd24a 100644 --- a/roles/docker/tasks/systemcontainer_crio.yml +++ b/roles/docker/tasks/systemcontainer_crio.yml @@ -1,4 +1,5 @@ --- + # TODO: Much of this file is shared with container engine tasks - set_fact: l_insecure_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l2_docker_insecure_registries)) }}" @@ -13,6 +14,22 @@ l_additional_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l_crio_registries)) }}" when: l2_docker_additional_registries +- set_fact: + l_openshift_image_tag: "{{ openshift_image_tag | string }}" + when: openshift_image_tag is defined + +- set_fact: + l_openshift_image_tag: "latest" + when: + - openshift_image_tag is not defined + - openshift_release == "latest" + +- set_fact: + l_openshift_image_tag: "{{ openshift_release | string }}" + when: + - openshift_image_tag is not defined + - openshift_release != "latest" + - name: Ensure container-selinux is installed package: name: container-selinux @@ -92,16 +109,23 @@ - block: - - name: Set to default prepend + - name: Set CRI-O image defaults set_fact: l_crio_image_prepend: "docker.io/gscrivano" l_crio_image_name: "cri-o-fedora" + l_crio_image_tag: "latest" - name: Use Centos based image when distribution is CentOS set_fact: l_crio_image_name: "cri-o-centos" when: ansible_distribution == "CentOS" + - name: Set CRI-O image tag + set_fact: + l_crio_image_tag: "{{ l_openshift_image_tag }}" + when: + - openshift_deployment_type == 'openshift-enterprise' + - name: Use RHEL based image when distribution is Red Hat set_fact: l_crio_image_prepend: "registry.access.redhat.com/openshift3" @@ -110,7 +134,7 @@ - name: Set the full image name set_fact: - l_crio_image: "{{ l_crio_image_prepend }}/{{ l_crio_image_name }}:latest" + l_crio_image: "{{ l_crio_image_prepend }}/{{ l_crio_image_name }}:{{ l_crio_image_tag }}" # For https://github.com/openshift/aos-cd-jobs/pull/624#pullrequestreview-61816548 - name: Use a specific image if requested @@ -138,7 +162,7 @@ image: "{{ l_crio_image }}" state: latest -- name: Remove CRI-o default configuration files +- name: Remove CRI-O default configuration files file: path: "{{ item }}" state: absent diff --git a/roles/docker/tasks/systemcontainer_docker.yml b/roles/docker/tasks/systemcontainer_docker.yml index 8b43393cb..15c6a55db 100644 --- a/roles/docker/tasks/systemcontainer_docker.yml +++ b/roles/docker/tasks/systemcontainer_docker.yml @@ -1,4 +1,21 @@ --- + +- set_fact: + l_openshift_image_tag: "{{ openshift_image_tag | string }}" + when: openshift_image_tag is defined + +- set_fact: + l_openshift_image_tag: "latest" + when: + - openshift_image_tag is not defined + - openshift_release == "latest" + +- set_fact: + l_openshift_image_tag: "{{ openshift_release | string }}" + when: + - openshift_image_tag is not defined + - openshift_release != "latest" + # If docker_options are provided we should fail. We should not install docker and ignore # the users configuration. NOTE: docker_options == inventory:openshift_docker_options - name: Fail quickly if openshift_docker_options are set @@ -89,6 +106,13 @@ - name: Set to default prepend set_fact: l_docker_image_prepend: "gscrivano" + l_docker_image_tag: "latest" + + - name: Set container engine image tag + set_fact: + l_docker_image_tag: "{{ l_openshift_image_tag }}" + when: + - openshift_deployment_type == 'openshift-enterprise' - name: Use Red Hat Registry for image when distribution is Red Hat set_fact: @@ -102,7 +126,7 @@ - name: Set the full image name set_fact: - l_docker_image: "{{ l_docker_image_prepend }}/{{ openshift.docker.service_name }}:latest" + l_docker_image: "{{ l_docker_image_prepend }}/{{ openshift.docker.service_name }}:{{ l_docker_image_tag }}" # For https://github.com/openshift/openshift-ansible/pull/5354#issuecomment-328552959 - name: Use a specific image if requested diff --git a/roles/docker/templates/crio.conf.j2 b/roles/docker/templates/crio.conf.j2 index b4ee84fd0..b715c2ffa 100644 --- a/roles/docker/templates/crio.conf.j2 +++ b/roles/docker/templates/crio.conf.j2 @@ -13,12 +13,12 @@ runroot = "/var/run/containers/storage" # storage_driver select which storage driver is used to manage storage # of images and containers. -storage_driver = "overlay2" +storage_driver = "overlay" # storage_option is used to pass an option to the storage driver. storage_option = [ {% if ansible_distribution in ['RedHat', 'CentOS'] %} - "overlay2.override_kernel_check=1" + "overlay.override_kernel_check=1" {% endif %} ] @@ -35,6 +35,10 @@ stream_address = "" # stream_port is the port on which the stream server will listen stream_port = "10010" +# file_locking is whether file-based locking will be used instead of +# in-memory locking +file_locking = true + # The "crio.runtime" table contains settings pertaining to the OCI # runtime used and options for how to set up and manage the OCI runtime. [crio.runtime] @@ -67,6 +71,9 @@ runtime_untrusted_workload = "" # container runtime for all containers. default_workload_trust = "trusted" +# no_pivot instructs the runtime to not use pivot_root, but instead use MS_MOVE +no_pivot = false + # conmon is the path to conmon binary, used for managing the runtime. conmon = "/usr/libexec/crio/conmon" @@ -93,6 +100,16 @@ apparmor_profile = "crio-default" # for the runtime. cgroup_manager = "systemd" +# hooks_dir_path is the oci hooks directory for automatically executed hooks +hooks_dir_path = "/usr/share/containers/oci/hooks.d" + +# pids_limit is the number of processes allowed in a container +pids_limit = 1024 + +# log_size_max is the max limit for the container log size in bytes. +# Negative values indicate that no limit is imposed. +log_size_max = -1 + # The "crio.image" table contains settings pertaining to the # management of OCI images. [crio.image] @@ -115,6 +132,10 @@ pause_command = "/pause" # unspecified so that the default system-wide policy will be used. signature_policy = "" +# image_volumes controls how image volumes are handled. +# The valid values are mkdir and ignore. +image_volumes = "mkdir" + # insecure_registries is used to skip TLS verification when pulling images. insecure_registries = [ {{ l_insecure_crio_registries|default("") }} @@ -125,6 +146,7 @@ insecure_registries = [ registries = [ {{ l_additional_crio_registries|default("") }} ] + # The "crio.network" table contains settings pertaining to the # management of CNI plugins. [crio.network] diff --git a/roles/docker/templates/custom.conf.j2 b/roles/docker/templates/custom.conf.j2 index 9b47cb6ab..713412473 100644 --- a/roles/docker/templates/custom.conf.j2 +++ b/roles/docker/templates/custom.conf.j2 @@ -3,3 +3,9 @@ [Unit] Wants=iptables.service After=iptables.service + +# The following line is a work-around to ensure docker is restarted whenever +# iptables is restarted. This ensures the proper iptables rules will be in +# place for docker. +# Note: This will also cause docker to be stopped if iptables is stopped. +PartOf=iptables.service diff --git a/roles/docker/templates/registries.conf b/roles/docker/templates/registries.conf index c55dbd84f..d379b2be0 100644 --- a/roles/docker/templates/registries.conf +++ b/roles/docker/templates/registries.conf @@ -6,7 +6,7 @@ # The default location for this configuration file is /etc/containers/registries.conf. -# The only valid categories are: 'registries', 'insecure_registies', +# The only valid categories are: 'registries', 'insecure_registries', # and 'block_registries'. diff --git a/roles/etcd/defaults/main.yaml b/roles/etcd/defaults/main.yaml index 18164050a..807b9541a 100644 --- a/roles/etcd/defaults/main.yaml +++ b/roles/etcd/defaults/main.yaml @@ -78,7 +78,7 @@ etcd_service: "{{ 'etcd_container' if r_etcd_common_etcd_runtime == 'docker' els etcd_service_file: "/etc/systemd/system/{{ etcd_service }}.service" r_etcd_firewall_enabled: "{{ os_firewall_enabled | default(True) }}" -r_etcd_use_firewalld: "{{ os_firewall_use_firewalld | default(Falsel) }}" +r_etcd_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}" etcd_systemd_dir: "/etc/systemd/system/{{ etcd_service }}.service.d" r_etcd_os_firewall_deny: [] diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml index f643d292d..3e69af314 100644 --- a/roles/etcd/tasks/main.yml +++ b/roles/etcd/tasks/main.yml @@ -1,6 +1,4 @@ --- -- include: server_certificates.yml - - name: Set hostname and ip facts set_fact: # Store etcd_hostname and etcd_ip such that they will be available diff --git a/roles/flannel/handlers/main.yml b/roles/flannel/handlers/main.yml index 02f5a5f64..889069485 100644 --- a/roles/flannel/handlers/main.yml +++ b/roles/flannel/handlers/main.yml @@ -12,3 +12,12 @@ until: not l_docker_restart_docker_in_flannel_result | failed retries: 3 delay: 30 + +- name: restart node + systemd: + name: "{{ openshift.common.service_type }}-node" + state: restarted + register: l_restart_node_result + until: not l_restart_node_result | failed + retries: 3 + delay: 30 diff --git a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py index 033240e62..ac369b882 100644 --- a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py +++ b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py @@ -81,6 +81,7 @@ class CallbackModule(CallbackBase): 'installer_phase_metrics', 'installer_phase_logging', 'installer_phase_servicecatalog', + 'installer_phase_management', ] # Define the attributes of the installer phases @@ -133,6 +134,10 @@ class CallbackModule(CallbackBase): 'title': 'Service Catalog Install', 'playbook': 'playbooks/byo/openshift-cluster/service-catalog.yml' }, + 'installer_phase_management': { + 'title': 'Management Install', + 'playbook': 'playbooks/common/openshift-cluster/openshift_management.yml' + }, } # Find the longest phase title diff --git a/roles/lib_openshift/library/oc_adm_ca_server_cert.py b/roles/lib_openshift/library/oc_adm_ca_server_cert.py index 1e6eb2386..05b2763d5 100644 --- a/roles/lib_openshift/library/oc_adm_ca_server_cert.py +++ b/roles/lib_openshift/library/oc_adm_ca_server_cert.py @@ -1421,7 +1421,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_adm_csr.py b/roles/lib_openshift/library/oc_adm_csr.py index 8c6a81cc8..d1dc4caf8 100644 --- a/roles/lib_openshift/library/oc_adm_csr.py +++ b/roles/lib_openshift/library/oc_adm_csr.py @@ -1399,7 +1399,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_adm_manage_node.py b/roles/lib_openshift/library/oc_adm_manage_node.py index 4a7847e88..152f270ab 100644 --- a/roles/lib_openshift/library/oc_adm_manage_node.py +++ b/roles/lib_openshift/library/oc_adm_manage_node.py @@ -1407,7 +1407,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_adm_policy_group.py b/roles/lib_openshift/library/oc_adm_policy_group.py index b8af5cad9..3082f5890 100644 --- a/roles/lib_openshift/library/oc_adm_policy_group.py +++ b/roles/lib_openshift/library/oc_adm_policy_group.py @@ -1393,7 +1393,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_adm_policy_user.py b/roles/lib_openshift/library/oc_adm_policy_user.py index 3364f8de3..1ceaf5d0d 100644 --- a/roles/lib_openshift/library/oc_adm_policy_user.py +++ b/roles/lib_openshift/library/oc_adm_policy_user.py @@ -1393,7 +1393,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_adm_registry.py b/roles/lib_openshift/library/oc_adm_registry.py index c64d7ffd2..0771aa5a5 100644 --- a/roles/lib_openshift/library/oc_adm_registry.py +++ b/roles/lib_openshift/library/oc_adm_registry.py @@ -1511,7 +1511,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: @@ -1886,13 +1886,15 @@ class SecretConfig(object): namespace, kubeconfig, secrets=None, - stype=None): + stype=None, + annotations=None): ''' constructor for handling secret options ''' self.kubeconfig = kubeconfig self.name = sname self.type = stype self.namespace = namespace self.secrets = secrets + self.annotations = annotations self.data = {} self.create_dict() @@ -1909,6 +1911,8 @@ class SecretConfig(object): if self.secrets: for key, value in self.secrets.items(): self.data['data'][key] = value + if self.annotations: + self.data['metadata']['annotations'] = self.annotations # pylint: disable=too-many-instance-attributes class Secret(Yedit): diff --git a/roles/lib_openshift/library/oc_adm_router.py b/roles/lib_openshift/library/oc_adm_router.py index 492494bda..146f71f68 100644 --- a/roles/lib_openshift/library/oc_adm_router.py +++ b/roles/lib_openshift/library/oc_adm_router.py @@ -1536,7 +1536,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: @@ -2230,13 +2230,15 @@ class SecretConfig(object): namespace, kubeconfig, secrets=None, - stype=None): + stype=None, + annotations=None): ''' constructor for handling secret options ''' self.kubeconfig = kubeconfig self.name = sname self.type = stype self.namespace = namespace self.secrets = secrets + self.annotations = annotations self.data = {} self.create_dict() @@ -2253,6 +2255,8 @@ class SecretConfig(object): if self.secrets: for key, value in self.secrets.items(): self.data['data'][key] = value + if self.annotations: + self.data['metadata']['annotations'] = self.annotations # pylint: disable=too-many-instance-attributes class Secret(Yedit): diff --git a/roles/lib_openshift/library/oc_clusterrole.py b/roles/lib_openshift/library/oc_clusterrole.py index b412ca8af..9761b4b4e 100644 --- a/roles/lib_openshift/library/oc_clusterrole.py +++ b/roles/lib_openshift/library/oc_clusterrole.py @@ -1385,7 +1385,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_configmap.py b/roles/lib_openshift/library/oc_configmap.py index 8bbc22c49..047edffbb 100644 --- a/roles/lib_openshift/library/oc_configmap.py +++ b/roles/lib_openshift/library/oc_configmap.py @@ -1391,7 +1391,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_edit.py b/roles/lib_openshift/library/oc_edit.py index ad17051cb..0b6a8436b 100644 --- a/roles/lib_openshift/library/oc_edit.py +++ b/roles/lib_openshift/library/oc_edit.py @@ -1435,7 +1435,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_env.py b/roles/lib_openshift/library/oc_env.py index 74a84ac89..1f52fba40 100644 --- a/roles/lib_openshift/library/oc_env.py +++ b/roles/lib_openshift/library/oc_env.py @@ -1402,7 +1402,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_group.py b/roles/lib_openshift/library/oc_group.py index eea1516ae..1b63a6c13 100644 --- a/roles/lib_openshift/library/oc_group.py +++ b/roles/lib_openshift/library/oc_group.py @@ -1375,7 +1375,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_image.py b/roles/lib_openshift/library/oc_image.py index dc33d3b8a..94b08d9ce 100644 --- a/roles/lib_openshift/library/oc_image.py +++ b/roles/lib_openshift/library/oc_image.py @@ -1394,7 +1394,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_label.py b/roles/lib_openshift/library/oc_label.py index 88fd9554d..ad837fdb5 100644 --- a/roles/lib_openshift/library/oc_label.py +++ b/roles/lib_openshift/library/oc_label.py @@ -1411,7 +1411,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_obj.py b/roles/lib_openshift/library/oc_obj.py index 8408f9ebc..892546e56 100644 --- a/roles/lib_openshift/library/oc_obj.py +++ b/roles/lib_openshift/library/oc_obj.py @@ -1414,7 +1414,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_objectvalidator.py b/roles/lib_openshift/library/oc_objectvalidator.py index d1be0b534..38df585f0 100644 --- a/roles/lib_openshift/library/oc_objectvalidator.py +++ b/roles/lib_openshift/library/oc_objectvalidator.py @@ -1346,7 +1346,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_process.py b/roles/lib_openshift/library/oc_process.py index 9a281e6cd..70632f86d 100644 --- a/roles/lib_openshift/library/oc_process.py +++ b/roles/lib_openshift/library/oc_process.py @@ -1403,7 +1403,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_project.py b/roles/lib_openshift/library/oc_project.py index b503c330b..4eee748d7 100644 --- a/roles/lib_openshift/library/oc_project.py +++ b/roles/lib_openshift/library/oc_project.py @@ -1400,7 +1400,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_pvc.py b/roles/lib_openshift/library/oc_pvc.py index 7a9e3bf89..2e73a7645 100644 --- a/roles/lib_openshift/library/oc_pvc.py +++ b/roles/lib_openshift/library/oc_pvc.py @@ -1407,7 +1407,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_route.py b/roles/lib_openshift/library/oc_route.py index 875e473ad..e003770d8 100644 --- a/roles/lib_openshift/library/oc_route.py +++ b/roles/lib_openshift/library/oc_route.py @@ -90,6 +90,12 @@ options: required: false default: str aliases: [] + labels: + description: + - The labels to apply on the route + required: false + default: None + aliases: [] tls_termination: description: - The options for termination. e.g. reencrypt @@ -1445,7 +1451,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: @@ -1469,6 +1475,7 @@ class RouteConfig(object): sname, namespace, kubeconfig, + labels=None, destcacert=None, cacert=None, cert=None, @@ -1483,6 +1490,7 @@ class RouteConfig(object): self.kubeconfig = kubeconfig self.name = sname self.namespace = namespace + self.labels = labels self.host = host self.tls_termination = tls_termination self.destcacert = destcacert @@ -1508,6 +1516,8 @@ class RouteConfig(object): self.data['metadata'] = {} self.data['metadata']['name'] = self.name self.data['metadata']['namespace'] = self.namespace + if self.labels: + self.data['metadata']['labels'] = self.labels self.data['spec'] = {} self.data['spec']['host'] = self.host @@ -1715,6 +1725,7 @@ class OCRoute(OpenShiftCLI): rconfig = RouteConfig(params['name'], params['namespace'], params['kubeconfig'], + params['labels'], files['destcacert']['value'], files['cacert']['value'], files['cert']['value'], @@ -1819,6 +1830,7 @@ def main(): state=dict(default='present', type='str', choices=['present', 'absent', 'list']), debug=dict(default=False, type='bool'), + labels=dict(default=None, type='dict'), name=dict(default=None, required=True, type='str'), namespace=dict(default=None, required=True, type='str'), tls_termination=dict(default=None, type='str'), diff --git a/roles/lib_openshift/library/oc_scale.py b/roles/lib_openshift/library/oc_scale.py index ec3635753..c142f1f43 100644 --- a/roles/lib_openshift/library/oc_scale.py +++ b/roles/lib_openshift/library/oc_scale.py @@ -1389,7 +1389,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_secret.py b/roles/lib_openshift/library/oc_secret.py index c010607e8..0614f359d 100644 --- a/roles/lib_openshift/library/oc_secret.py +++ b/roles/lib_openshift/library/oc_secret.py @@ -90,6 +90,12 @@ options: required: false default: default aliases: [] + annotations: + description: + - Annotations to apply to the object + required: false + default: None + aliases: [] files: description: - A list of files provided for secrets @@ -1441,7 +1447,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: @@ -1464,13 +1470,15 @@ class SecretConfig(object): namespace, kubeconfig, secrets=None, - stype=None): + stype=None, + annotations=None): ''' constructor for handling secret options ''' self.kubeconfig = kubeconfig self.name = sname self.type = stype self.namespace = namespace self.secrets = secrets + self.annotations = annotations self.data = {} self.create_dict() @@ -1487,6 +1495,8 @@ class SecretConfig(object): if self.secrets: for key, value in self.secrets.items(): self.data['data'][key] = value + if self.annotations: + self.data['metadata']['annotations'] = self.annotations # pylint: disable=too-many-instance-attributes class Secret(Yedit): @@ -1698,8 +1708,7 @@ class OCSecret(OpenShiftCLI): elif params['contents']: files = Utils.create_tmp_files_from_contents(params['contents']) else: - return {'failed': True, - 'msg': 'Either specify files or contents.'} + files = [{'name': 'null', 'path': os.devnull}] ######## # Create @@ -1783,6 +1792,7 @@ def main(): debug=dict(default=False, type='bool'), namespace=dict(default='default', type='str'), name=dict(default=None, type='str'), + annotations=dict(default=None, type='dict'), type=dict(default=None, type='str'), files=dict(default=None, type='list'), delete_after=dict(default=False, type='bool'), diff --git a/roles/lib_openshift/library/oc_service.py b/roles/lib_openshift/library/oc_service.py index e83a6e26d..3e8aea4f1 100644 --- a/roles/lib_openshift/library/oc_service.py +++ b/roles/lib_openshift/library/oc_service.py @@ -1448,7 +1448,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_serviceaccount.py b/roles/lib_openshift/library/oc_serviceaccount.py index 0d46bbf96..646a39224 100644 --- a/roles/lib_openshift/library/oc_serviceaccount.py +++ b/roles/lib_openshift/library/oc_serviceaccount.py @@ -1387,7 +1387,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_serviceaccount_secret.py b/roles/lib_openshift/library/oc_serviceaccount_secret.py index 662d77ec1..99a8e8f3d 100644 --- a/roles/lib_openshift/library/oc_serviceaccount_secret.py +++ b/roles/lib_openshift/library/oc_serviceaccount_secret.py @@ -1387,7 +1387,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_storageclass.py b/roles/lib_openshift/library/oc_storageclass.py index 574f109e4..e88f3ae8d 100644 --- a/roles/lib_openshift/library/oc_storageclass.py +++ b/roles/lib_openshift/library/oc_storageclass.py @@ -1405,7 +1405,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_user.py b/roles/lib_openshift/library/oc_user.py index e430546ee..7bbe38819 100644 --- a/roles/lib_openshift/library/oc_user.py +++ b/roles/lib_openshift/library/oc_user.py @@ -1447,7 +1447,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_version.py b/roles/lib_openshift/library/oc_version.py index a12620968..63adbd6ac 100644 --- a/roles/lib_openshift/library/oc_version.py +++ b/roles/lib_openshift/library/oc_version.py @@ -1359,7 +1359,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_volume.py b/roles/lib_openshift/library/oc_volume.py index 134b2ad19..3c07f8d4b 100644 --- a/roles/lib_openshift/library/oc_volume.py +++ b/roles/lib_openshift/library/oc_volume.py @@ -1436,7 +1436,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/src/ansible/oc_route.py b/roles/lib_openshift/src/ansible/oc_route.py index f2f5c5095..969cf8bcd 100644 --- a/roles/lib_openshift/src/ansible/oc_route.py +++ b/roles/lib_openshift/src/ansible/oc_route.py @@ -13,6 +13,7 @@ def main(): state=dict(default='present', type='str', choices=['present', 'absent', 'list']), debug=dict(default=False, type='bool'), + labels=dict(default=None, type='dict'), name=dict(default=None, required=True, type='str'), namespace=dict(default=None, required=True, type='str'), tls_termination=dict(default=None, type='str'), diff --git a/roles/lib_openshift/src/ansible/oc_secret.py b/roles/lib_openshift/src/ansible/oc_secret.py index faa7c1772..ee2827e69 100644 --- a/roles/lib_openshift/src/ansible/oc_secret.py +++ b/roles/lib_openshift/src/ansible/oc_secret.py @@ -15,6 +15,7 @@ def main(): debug=dict(default=False, type='bool'), namespace=dict(default='default', type='str'), name=dict(default=None, type='str'), + annotations=dict(default=None, type='dict'), type=dict(default=None, type='str'), files=dict(default=None, type='list'), delete_after=dict(default=False, type='bool'), diff --git a/roles/lib_openshift/src/class/oc_route.py b/roles/lib_openshift/src/class/oc_route.py index 3a1bd732f..dc2f7977b 100644 --- a/roles/lib_openshift/src/class/oc_route.py +++ b/roles/lib_openshift/src/class/oc_route.py @@ -118,6 +118,7 @@ class OCRoute(OpenShiftCLI): rconfig = RouteConfig(params['name'], params['namespace'], params['kubeconfig'], + params['labels'], files['destcacert']['value'], files['cacert']['value'], files['cert']['value'], diff --git a/roles/lib_openshift/src/class/oc_secret.py b/roles/lib_openshift/src/class/oc_secret.py index 4ee6443e9..5322d6241 100644 --- a/roles/lib_openshift/src/class/oc_secret.py +++ b/roles/lib_openshift/src/class/oc_secret.py @@ -142,8 +142,7 @@ class OCSecret(OpenShiftCLI): elif params['contents']: files = Utils.create_tmp_files_from_contents(params['contents']) else: - return {'failed': True, - 'msg': 'Either specify files or contents.'} + files = [{'name': 'null', 'path': os.devnull}] ######## # Create diff --git a/roles/lib_openshift/src/doc/route b/roles/lib_openshift/src/doc/route index a12999c9e..f0d38ab5f 100644 --- a/roles/lib_openshift/src/doc/route +++ b/roles/lib_openshift/src/doc/route @@ -39,6 +39,12 @@ options: required: false default: str aliases: [] + labels: + description: + - The labels to apply on the route + required: false + default: None + aliases: [] tls_termination: description: - The options for termination. e.g. reencrypt diff --git a/roles/lib_openshift/src/doc/secret b/roles/lib_openshift/src/doc/secret index 76b147f6f..a27f90f38 100644 --- a/roles/lib_openshift/src/doc/secret +++ b/roles/lib_openshift/src/doc/secret @@ -39,6 +39,12 @@ options: required: false default: default aliases: [] + annotations: + description: + - Annotations to apply to the object + required: false + default: None + aliases: [] files: description: - A list of files provided for secrets diff --git a/roles/lib_openshift/src/lib/base.py b/roles/lib_openshift/src/lib/base.py index 5a307cdb3..1fb32164e 100644 --- a/roles/lib_openshift/src/lib/base.py +++ b/roles/lib_openshift/src/lib/base.py @@ -597,7 +597,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/src/lib/route.py b/roles/lib_openshift/src/lib/route.py index 3b54a24fb..b106866cb 100644 --- a/roles/lib_openshift/src/lib/route.py +++ b/roles/lib_openshift/src/lib/route.py @@ -11,6 +11,7 @@ class RouteConfig(object): sname, namespace, kubeconfig, + labels=None, destcacert=None, cacert=None, cert=None, @@ -25,6 +26,7 @@ class RouteConfig(object): self.kubeconfig = kubeconfig self.name = sname self.namespace = namespace + self.labels = labels self.host = host self.tls_termination = tls_termination self.destcacert = destcacert @@ -50,6 +52,8 @@ class RouteConfig(object): self.data['metadata'] = {} self.data['metadata']['name'] = self.name self.data['metadata']['namespace'] = self.namespace + if self.labels: + self.data['metadata']['labels'] = self.labels self.data['spec'] = {} self.data['spec']['host'] = self.host diff --git a/roles/lib_openshift/src/lib/secret.py b/roles/lib_openshift/src/lib/secret.py index a1c202442..ad4b6aa36 100644 --- a/roles/lib_openshift/src/lib/secret.py +++ b/roles/lib_openshift/src/lib/secret.py @@ -10,13 +10,15 @@ class SecretConfig(object): namespace, kubeconfig, secrets=None, - stype=None): + stype=None, + annotations=None): ''' constructor for handling secret options ''' self.kubeconfig = kubeconfig self.name = sname self.type = stype self.namespace = namespace self.secrets = secrets + self.annotations = annotations self.data = {} self.create_dict() @@ -33,6 +35,8 @@ class SecretConfig(object): if self.secrets: for key, value in self.secrets.items(): self.data['data'][key] = value + if self.annotations: + self.data['metadata']['annotations'] = self.annotations # pylint: disable=too-many-instance-attributes class Secret(Yedit): diff --git a/roles/lib_openshift/src/test/unit/test_oc_route.py b/roles/lib_openshift/src/test/unit/test_oc_route.py index afdb5e4dc..5699f123b 100755 --- a/roles/lib_openshift/src/test/unit/test_oc_route.py +++ b/roles/lib_openshift/src/test/unit/test_oc_route.py @@ -39,6 +39,7 @@ class OCRouteTest(unittest.TestCase): 'debug': False, 'name': 'test', 'namespace': 'default', + 'labels': {'route': 'route'}, 'tls_termination': 'passthrough', 'dest_cacert_path': None, 'cacert_path': None, @@ -64,7 +65,10 @@ class OCRouteTest(unittest.TestCase): "selfLink": "/oapi/v1/namespaces/default/routes/test", "uid": "1b127c67-ecd9-11e6-96eb-0e0d9bdacd26", "resourceVersion": "439182", - "creationTimestamp": "2017-02-07T01:59:48Z" + "creationTimestamp": "2017-02-07T01:59:48Z", + "labels": { + "route": "route" + } }, "spec": { "host": "test.example", @@ -141,6 +145,7 @@ class OCRouteTest(unittest.TestCase): 'debug': False, 'name': 'test', 'namespace': 'default', + 'labels': {'route': 'route'}, 'tls_termination': 'edge', 'dest_cacert_path': None, 'cacert_path': None, @@ -166,7 +171,8 @@ class OCRouteTest(unittest.TestCase): "namespace": "default", "resourceVersion": "517745", "selfLink": "/oapi/v1/namespaces/default/routes/test", - "uid": "b6f25898-ed77-11e6-9755-0e737db1e63a" + "uid": "b6f25898-ed77-11e6-9755-0e737db1e63a", + "labels": {"route": "route"} }, "spec": { "host": "test.openshift.com", @@ -250,6 +256,7 @@ metadata: self.assertTrue(results['changed']) self.assertEqual(results['state'], 'present') self.assertEqual(results['results']['results'][0]['metadata']['name'], 'test') + self.assertEqual(results['results']['results'][0]['metadata']['labels']['route'], 'route') # Making sure our mock was called as we expected mock_cmd.assert_has_calls([ diff --git a/roles/openshift_aws/README.md b/roles/openshift_aws/README.md index 696efbea5..ff96081fe 100644 --- a/roles/openshift_aws/README.md +++ b/roles/openshift_aws/README.md @@ -23,7 +23,6 @@ From this role: | openshift_aws_ami_copy_wait | False | openshift_aws_users | [] | openshift_aws_launch_config_name | {{ openshift_aws_clusterid }}-{{ openshift_aws_node_group_type }} -| openshift_aws_create_vpc | False | openshift_aws_node_group_type | master | openshift_aws_elb_cert_arn | '' | openshift_aws_kubernetes_cluster_status | owned @@ -72,7 +71,6 @@ Example Playbook vars: openshift_aws_clusterid: test openshift_aws_region: us-east-1 - openshift_aws_create_vpc: true ``` License diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml index 4d88db037..ea09857b0 100644 --- a/roles/openshift_aws/defaults/main.yml +++ b/roles/openshift_aws/defaults/main.yml @@ -1,5 +1,4 @@ --- -openshift_aws_create_vpc: True openshift_aws_create_s3: True openshift_aws_create_iam_cert: True openshift_aws_create_security_groups: True @@ -17,7 +16,6 @@ openshift_aws_build_ami_group: "{{ openshift_aws_clusterid }}" openshift_aws_iam_cert_name: "{{ openshift_aws_clusterid }}-master-external" openshift_aws_iam_cert_path: '' -openshift_aws_iam_cert_chain_path: '' openshift_aws_iam_cert_key_path: '' openshift_aws_scale_group_name: "{{ openshift_aws_clusterid }} openshift {{ openshift_aws_node_group_type }}" @@ -144,6 +142,11 @@ openshift_aws_elb_instance_filter: "tag:host-type": "{{ openshift_aws_node_group_type }}" instance-state-name: running +openshift_aws_launch_config_security_groups: +- "{{ openshift_aws_clusterid }}" # default sg +- "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}" # node type sg +- "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}_k8s" # node type sg k8s + openshift_aws_node_security_groups: default: name: "{{ openshift_aws_clusterid }}" diff --git a/roles/openshift_aws/tasks/elb.yml b/roles/openshift_aws/tasks/elb.yml index a1fdd66fc..7bc3184df 100644 --- a/roles/openshift_aws/tasks/elb.yml +++ b/roles/openshift_aws/tasks/elb.yml @@ -29,9 +29,9 @@ if 'master' in openshift_aws_node_group_type or 'infra' in openshift_aws_node_group_type else openshift_aws_elb_listeners }}" -- name: "Create ELB {{ openshift_aws_elb_name }}" +- name: "Create ELB {{ l_openshift_aws_elb_name }}" ec2_elb_lb: - name: "{{ openshift_aws_elb_name }}" + name: "{{ l_openshift_aws_elb_name }}" state: present security_group_names: "{{ openshift_aws_elb_security_groups }}" idle_timeout: "{{ openshift_aws_elb_idle_timout }}" @@ -49,10 +49,10 @@ # It is necessary to ignore_errors here because the instances are not in 'ready' # state when first added to ELB -- name: "Add instances to ELB {{ openshift_aws_elb_name }}" +- name: "Add instances to ELB {{ l_openshift_aws_elb_name }}" ec2_elb: instance_id: "{{ item.id }}" - ec2_elbs: "{{ openshift_aws_elb_name }}" + ec2_elbs: "{{ l_openshift_aws_elb_name }}" state: present region: "{{ openshift_aws_region }}" wait: False diff --git a/roles/openshift_aws/tasks/iam_cert.yml b/roles/openshift_aws/tasks/iam_cert.yml index cd9772a25..f74a62b8b 100644 --- a/roles/openshift_aws/tasks/iam_cert.yml +++ b/roles/openshift_aws/tasks/iam_cert.yml @@ -11,17 +11,23 @@ - "'failed' in elb_cert_chain" - elb_cert_chain.failed - "'msg' in elb_cert_chain" - - "'already exists and has a different certificate body' in elb_cert_chain.msg" - - "'BotoServerError' in elb_cert_chain.msg" + - "'already exists and has a different certificate body' in elb_cert_chain.msg or 'BotoServerError' in elb_cert_chain.msg or 'Traceback' in elb_cert_chain.msg.module_stderr" when: - openshift_aws_create_iam_cert | bool - openshift_aws_iam_cert_path != '' - openshift_aws_iam_cert_key_path != '' - openshift_aws_elb_cert_arn == '' +- debug: msg="{{ elb_cert_chain }}" + - name: set_fact openshift_aws_elb_cert_arn set_fact: openshift_aws_elb_cert_arn: "{{ elb_cert_chain.arn }}" + when: + - openshift_aws_create_iam_cert | bool + - openshift_aws_iam_cert_path != '' + - openshift_aws_iam_cert_key_path != '' + - openshift_aws_elb_cert_arn == '' - name: wait for cert to propagate pause: diff --git a/roles/openshift_aws/tasks/launch_config.yml b/roles/openshift_aws/tasks/launch_config.yml index 65c5a6cc0..e6be9969c 100644 --- a/roles/openshift_aws/tasks/launch_config.yml +++ b/roles/openshift_aws/tasks/launch_config.yml @@ -4,13 +4,18 @@ when: - openshift_aws_ami is undefined +- name: query vpc + ec2_vpc_net_facts: + region: "{{ openshift_aws_region }}" + filters: + 'tag:Name': "{{ openshift_aws_vpc_name }}" + register: vpcout + - name: fetch the security groups for launch config ec2_group_facts: filters: - group-name: - - "{{ openshift_aws_clusterid }}" # default sg - - "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}" # node type sg - - "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}_k8s" # node type sg k8s + group-name: "{{ openshift_aws_launch_config_security_groups }}" + vpc-id: "{{ vpcout.vpcs[0].id }}" region: "{{ openshift_aws_region }}" register: ec2sgs @@ -21,7 +26,7 @@ region: "{{ openshift_aws_region }}" image_id: "{{ openshift_aws_ami }}" instance_type: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].instance_type }}" - security_groups: "{{ ec2sgs.security_groups | map(attribute='group_id')| list }}" + security_groups: "{{ openshift_aws_launch_config_security_group_id | default(ec2sgs.security_groups | map(attribute='group_id')| list) }}" user_data: |- #cloud-config {% if openshift_aws_node_group_type != 'master' %} diff --git a/roles/openshift_aws/tasks/master_facts.yml b/roles/openshift_aws/tasks/master_facts.yml new file mode 100644 index 000000000..737cfc7a6 --- /dev/null +++ b/roles/openshift_aws/tasks/master_facts.yml @@ -0,0 +1,22 @@ +--- +- name: fetch elbs + ec2_elb_facts: + region: "{{ openshift_aws_region }}" + names: + - "{{ item }}" + with_items: + - "{{ openshift_aws_elb_name }}-external" + - "{{ openshift_aws_elb_name }}-internal" + delegate_to: localhost + register: elbs + +- debug: var=elbs + +- name: set fact + set_fact: + openshift_master_cluster_hostname: "{{ elbs.results[1].elbs[0].dns_name }}" + osm_custom_cors_origins: + - "{{ elbs.results[1].elbs[0].dns_name }}" + - "console.{{ openshift_aws_clusterid | default('default') }}.openshift.com" + - "api.{{ openshift_aws_clusterid | default('default') }}.openshift.com" + with_items: "{{ groups['masters'] }}" diff --git a/roles/openshift_aws/tasks/provision.yml b/roles/openshift_aws/tasks/provision.yml index 189caeaee..a8518d43a 100644 --- a/roles/openshift_aws/tasks/provision.yml +++ b/roles/openshift_aws/tasks/provision.yml @@ -1,16 +1,8 @@ --- -- when: openshift_aws_create_vpc | bool - name: create default vpc - include: vpc.yml - - when: openshift_aws_create_iam_cert | bool name: create the iam_cert for elb certificate include: iam_cert.yml -- when: openshift_aws_users | length > 0 - name: create aws ssh keypair - include: ssh_keys.yml - - when: openshift_aws_create_s3 | bool name: create s3 bucket for registry include: s3.yml @@ -34,14 +26,14 @@ include: elb.yml vars: openshift_aws_elb_direction: internal - openshift_aws_elb_name: "{{ openshift_aws_clusterid }}-{{openshift_aws_node_group_type }}-internal" + l_openshift_aws_elb_name: "{{ openshift_aws_elb_name }}-internal" openshift_aws_elb_scheme: internal - name: create our master external load balancers include: elb.yml vars: openshift_aws_elb_direction: external - openshift_aws_elb_name: "{{ openshift_aws_clusterid }}-{{openshift_aws_node_group_type }}-external" + l_openshift_aws_elb_name: "{{ openshift_aws_elb_name }}-external" openshift_aws_elb_scheme: internet-facing - name: wait for ssh to become available diff --git a/roles/openshift_aws/tasks/build_ami.yml b/roles/openshift_aws/tasks/provision_instance.yml index 48555e5da..1384bae59 100644 --- a/roles/openshift_aws/tasks/build_ami.yml +++ b/roles/openshift_aws/tasks/provision_instance.yml @@ -1,16 +1,4 @@ --- -- when: openshift_aws_create_vpc | bool - name: create a vpc - include: vpc.yml - -- when: openshift_aws_users | length > 0 - name: create aws ssh keypair - include: ssh_keys.yml - -- when: openshift_aws_create_security_groups | bool - name: Create compute security_groups - include: security_group.yml - - name: query vpc ec2_vpc_net_facts: region: "{{ openshift_aws_region }}" @@ -33,7 +21,7 @@ key_name: "{{ openshift_aws_ssh_key_name }}" group: "{{ openshift_aws_build_ami_group }}" instance_type: m4.xlarge - vpc_subnet_id: "{{ subnetout.subnets[0].id }}" + vpc_subnet_id: "{{ openshift_aws_subnet_id | default(subnetout.subnets[0].id) }}" image: "{{ openshift_aws_base_ami }}" volumes: - device_name: /dev/sdb @@ -46,3 +34,30 @@ Name: "{{ openshift_aws_base_ami_name }}" instance_tags: Name: "{{ openshift_aws_base_ami_name }}" + +- name: fetch newly created instances + ec2_remote_facts: + region: "{{ openshift_aws_region }}" + filters: + "tag:Name": "{{ openshift_aws_base_ami_name }}" + instance-state-name: running + register: instancesout + retries: 20 + delay: 3 + until: instancesout.instances|length > 0 + +- name: wait for ssh to become available + wait_for: + port: 22 + host: "{{ instancesout.instances[0].public_ip_address }}" + timeout: 300 + search_regex: OpenSSH + +- name: Pause 10 seconds to ensure ssh actually accepts logins + pause: + seconds: 20 + +- name: add host to nodes + add_host: + groups: nodes + name: "{{ instancesout.instances[0].public_dns_name }}" diff --git a/roles/openshift_aws/tasks/setup_master_group.yml b/roles/openshift_aws/tasks/setup_master_group.yml new file mode 100644 index 000000000..166f3b938 --- /dev/null +++ b/roles/openshift_aws/tasks/setup_master_group.yml @@ -0,0 +1,35 @@ +--- +- name: Alert user to variables needed - clusterid + debug: + msg: "openshift_aws_clusterid={{ openshift_aws_clusterid }}" + +- name: Alert user to variables needed - region + debug: + msg: "openshift_aws_region={{ openshift_aws_region }}" + +- name: fetch newly created instances + ec2_remote_facts: + region: "{{ openshift_aws_region }}" + filters: + "tag:clusterid": "{{ openshift_aws_clusterid }}" + "tag:host-type": master + instance-state-name: running + register: instancesout + retries: 20 + delay: 3 + until: instancesout.instances|length > 0 + +- name: add new master to masters group + add_host: + groups: "masters,etcd,nodes" + name: "{{ item.public_dns_name }}" + hostname: "{{ openshift_aws_clusterid }}-master-{{ item.id[:-5] }}" + with_items: "{{ instancesout.instances }}" + +- name: wait for ssh to become available + wait_for: + port: 22 + host: "{{ item.public_dns_name }}" + timeout: 300 + search_regex: OpenSSH + with_items: "{{ instancesout.instances }}" diff --git a/roles/openshift_ca/defaults/main.yml b/roles/openshift_ca/defaults/main.yml index ecfcc88b3..742b15df4 100644 --- a/roles/openshift_ca/defaults/main.yml +++ b/roles/openshift_ca/defaults/main.yml @@ -1,3 +1,11 @@ --- openshift_ca_cert_expire_days: 1825 openshift_master_cert_expire_days: 730 + +openshift_ca_config_dir: "{{ openshift.common.config_base }}/master" +openshift_ca_cert: "{{ openshift_ca_config_dir }}/ca.crt" +openshift_ca_key: "{{ openshift_ca_config_dir }}/ca.key" +openshift_ca_serial: "{{ openshift_ca_config_dir }}/ca.serial.txt" +openshift_master_loopback_config: "{{ openshift_ca_config_dir }}/openshift-master.kubeconfig" + +openshift_version: "{{ openshift_pkg_version | default('') }}" diff --git a/roles/openshift_ca/meta/main.yml b/roles/openshift_ca/meta/main.yml index dfbdf0cc7..f8b784a63 100644 --- a/roles/openshift_ca/meta/main.yml +++ b/roles/openshift_ca/meta/main.yml @@ -14,4 +14,3 @@ galaxy_info: - system dependencies: - role: openshift_cli -- role: openshift_named_certificates diff --git a/roles/openshift_ca/vars/main.yml b/roles/openshift_ca/vars/main.yml index d04c1766d..4d80bf921 100644 --- a/roles/openshift_ca/vars/main.yml +++ b/roles/openshift_ca/vars/main.yml @@ -1,9 +1,2 @@ --- -openshift_ca_config_dir: "{{ openshift.common.config_base }}/master" -openshift_ca_cert: "{{ openshift_ca_config_dir }}/ca.crt" -openshift_ca_key: "{{ openshift_ca_config_dir }}/ca.key" -openshift_ca_serial: "{{ openshift_ca_config_dir }}/ca.serial.txt" -openshift_version: "{{ openshift_pkg_version | default('') }}" - -openshift_master_loopback_config: "{{ openshift_ca_config_dir }}/openshift-master.kubeconfig" loopback_context_string: "current-context: {{ openshift.master.loopback_context_name }}" diff --git a/roles/openshift_cfme/README.md b/roles/openshift_cfme/README.md deleted file mode 100644 index 8283afed6..000000000 --- a/roles/openshift_cfme/README.md +++ /dev/null @@ -1,404 +0,0 @@ -# OpenShift-Ansible - CFME Role - -# PROOF OF CONCEPT - Alpha Version - -This role is based on the work in the upstream -[manageiq/manageiq-pods](https://github.com/ManageIQ/manageiq-pods) -project. For additional literature on configuration specific to -ManageIQ (optional post-installation tasks), visit the project's -[upstream documentation page](http://manageiq.org/docs/get-started/basic-configuration). - -Please submit a -[new issue](https://github.com/openshift/openshift-ansible/issues/new) -if you run into bugs with this role or wish to request enhancements. - -# Important Notes - -This is an early *proof of concept* role to install the Cloud Forms -Management Engine (ManageIQ) on OpenShift Container Platform (OCP). - -* This role is still in **ALPHA STATUS** -* Many options are hard-coded still (ex: NFS setup) -* Not many configurable options yet -* **Should** be ran on a dedicated cluster -* **Will not run** on undersized infra -* The terms *CFME* and *MIQ* / *ManageIQ* are interchangeable - -## Requirements - -**NOTE:** These requirements are copied from the upstream -[manageiq/manageiq-pods](https://github.com/ManageIQ/manageiq-pods) -project. - -### Prerequisites: - -* - [OpenShift Origin 1.5](https://docs.openshift.com/container-platform/3.5/welcome/index.html) - or - [higher](https://docs.openshift.com/container-platform/latest/welcome/index.html) - provisioned -* NFS or other compatible volume provider -* A cluster-admin user (created by role if required) - -### Cluster Sizing - -In order to avoid random deployment failures due to resource -starvation, we recommend a minimum cluster size for a **test** -environment. - -| Type | Size | CPUs | Memory | -|----------------|---------|----------|----------| -| Masters | `1+` | `8` | `12GB` | -| Nodes | `2+` | `4` | `8GB` | -| PV Storage | `25GB` | `N/A` | `N/A` | - - -![Basic CFME Deployment](img/CFMEBasicDeployment.png) - -**CFME has hard-requirements for memory. CFME will NOT install if your - infrastructure does not meet or exceed the requirements given - above. Do not run this playbook if you do not have the required - memory, you will just waste your time.** - - -### Other sizing considerations - -* Recommendations assume MIQ will be the **only application running** - on this cluster. -* Alternatively, you can provision an infrastructure node to run - registry/metrics/router/logging pods. -* Each MIQ application pod will consume at least `3GB` of RAM on initial - deployment (blank deployment without providers). -* RAM consumption will ramp up higher depending on appliance use, once - providers are added expect higher resource consumption. - - -### Assumptions - -1) You meet/exceed the [cluster sizing](#cluster-sizing) requirements -1) Your NFS server is on your master host -1) Your PV backing NFS storage volume is mounted on `/exports/` - -Required directories that NFS will export to back the PVs: - -* `/exports/miq-pv0[123]` - -If the required directories are not present at install-time, they will -be created using the recommended permissions per the -[upstream documentation](https://github.com/ManageIQ/manageiq-pods#make-persistent-volumes-to-host-the-miq-database-and-application-data): - -* UID/GID: `root`/`root` -* Mode: `0775` - -**IMPORTANT:** If you are using a separate volume (`/dev/vdX`) for NFS - storage, **ensure** it is mounted on `/exports/` **before** running - this role. - - - -## Role Variables - -Core variables in this role: - -| Name | Default value | Description | -|-------------------------------|---------------|---------------| -| `openshift_cfme_install_app` | `False` | `True`: Install everything and create a new CFME app, `False`: Just install all of the templates and scaffolding | - - -Variables you may override have defaults defined in -[defaults/main.yml](defaults/main.yml). - - -# Important Notes - -This is a **tech preview** status role presently. Use it with the same -caution you would give any other pre-release software. - -**Most importantly** follow this one rule: don't re-run the entrypoint -playbook multiple times in a row without cleaning up after previous -runs if some of the CFME steps have ran. This is a known -flake. Cleanup instructions are provided at the bottom of this README. - - -# Usage - -This section describes the basic usage of this role. All parameters -will use their [default values](defaults/main.yml). - -## Pre-flight Checks - -**IMPORTANT:** As documented above in [the prerequisites](#prerequisites), - you **must already** have your OCP cluster up and running. - -**Optional:** The ManageIQ pod is fairly large (about 1.7 GB) so to -save some spin-up time post-deployment, you can begin pre-pulling the -docker image to each of your nodes now: - -``` -root@node0x # docker pull docker.io/manageiq/manageiq-pods:app-latest-fine -``` - -## Getting Started - -1) The *entry point playbook* to install CFME is located in -[the BYO playbooks](../../playbooks/byo/openshift-cfme/config.yml) -directory - -2) Update your existing `hosts` inventory file and ensure the -parameter `openshift_cfme_install_app` is set to `True` under the -`[OSEv3:vars]` block. - -2) Using your existing `hosts` inventory file, run `ansible-playbook` -with the entry point playbook: - -``` -$ ansible-playbook -v -i <INVENTORY_FILE> playbooks/byo/openshift-cfme/config.yml -``` - -## Next Steps - -Once complete, the playbook will let you know: - - -``` -TASK [openshift_cfme : Status update] ********************************************************* -ok: [ho.st.na.me] => { - "msg": "CFME has been deployed. Note that there will be a delay before it is fully initialized.\n" -} -``` - -This will take several minutes (*possibly 10 or more*, depending on -your network connection). However, you can get some insight into the -deployment process during initialization. - -### oc describe pod manageiq-0 - -*Some useful information about the output you will see if you run the -`oc describe pod manageiq-0` command* - -**Readiness probe**s - These will take a while to become -`Healthy`. The initial health probes won't even happen for at least 8 -minutes depending on how long it takes you to pull down the large -images. ManageIQ is a large application so it may take a considerable -amount of time for it to deploy and be marked as `Healthy`. - -If you go to the node you know the application is running on (check -for `Successfully assigned manageiq-0 to <HOST|IP>` in the `describe` -output) you can run a `docker pull` command to monitor the progress of -the image pull: - -``` -[root@cfme-node ~]# docker pull docker.io/manageiq/manageiq-pods:app-latest-fine -Trying to pull repository docker.io/manageiq/manageiq-pods ... -sha256:6c055ca9d3c65cd694d6c0e28986b5239ba56bbdf0488cccdaa283d545258f8a: Pulling from docker.io/manageiq/manageiq-pods -Digest: sha256:6c055ca9d3c65cd694d6c0e28986b5239ba56bbdf0488cccdaa283d545258f8a -Status: Image is up to date for docker.io/manageiq/manageiq-pods:app-latest-fine -``` - -The example above demonstrates the case where the image has been -successfully pulled already. - -If the image isn't completely pulled already then you will see -multiple progress bars detailing each image layer download status. - - -### rsh - -*Useful inspection/progress monitoring techniques with the `oc rsh` -command.* - - -On your master node, switch to the `cfme` project (or whatever you -named it if you overrode the `openshift_cfme_project` variable) and -check on the pod states: - -``` -[root@cfme-master01 ~]# oc project cfme -Now using project "cfme" on server "https://10.10.0.100:8443". - -[root@cfme-master01 ~]# oc get pod -NAME READY STATUS RESTARTS AGE -manageiq-0 0/1 Running 0 14m -memcached-1-3lk7g 1/1 Running 0 14m -postgresql-1-12slb 1/1 Running 0 14m -``` - -Note how the `manageiq-0` pod says `0/1` under the **READY** -column. After some time (depending on your network connection) you'll -be able to `rsh` into the pod to find out more of what's happening in -real time. First, the easy-mode command, run this once `rsh` is -available and then watch until it says `Started Initialize Appliance -Database`: - -``` -[root@cfme-master01 ~]# oc rsh manageiq-0 journalctl -f -u appliance-initialize.service -``` - -For the full explanation of what this means, and more interactive -inspection techniques, keep reading on. - -To obtain a shell on our `manageiq` pod we use this command: - -``` -[root@cfme-master01 ~]# oc rsh manageiq-0 bash -l -``` - -The `rsh` command opens a shell in your pod for you. In this case it's -the pod called `manageiq-0`. `systemd` is managing the services in -this pod so we can use the `list-units` command to see what is running -currently: `# systemctl list-units | grep appliance`. - -If you see the `appliance-initialize` service running, this indicates -that basic setup is still in progress. We can monitor the process with -the `journalctl` command like so: - - -``` -[root@manageiq-0 vmdb]# journalctl -f -u appliance-initialize.service -Jun 14 14:55:52 manageiq-0 appliance-initialize.sh[58]: == Checking deployment status == -Jun 14 14:55:52 manageiq-0 appliance-initialize.sh[58]: No pre-existing EVM configuration found on region PV -Jun 14 14:55:52 manageiq-0 appliance-initialize.sh[58]: == Checking for existing data on server PV == -Jun 14 14:55:52 manageiq-0 appliance-initialize.sh[58]: == Starting New Deployment == -Jun 14 14:55:52 manageiq-0 appliance-initialize.sh[58]: == Applying memcached config == -Jun 14 14:55:53 manageiq-0 appliance-initialize.sh[58]: == Initializing Appliance == -Jun 14 14:55:57 manageiq-0 appliance-initialize.sh[58]: create encryption key -Jun 14 14:55:57 manageiq-0 appliance-initialize.sh[58]: configuring external database -Jun 14 14:55:57 manageiq-0 appliance-initialize.sh[58]: Checking for connections to the database... -Jun 14 14:56:09 manageiq-0 appliance-initialize.sh[58]: Create region starting -Jun 14 14:58:15 manageiq-0 appliance-initialize.sh[58]: Create region complete -Jun 14 14:58:15 manageiq-0 appliance-initialize.sh[58]: == Initializing PV data == -Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: == Initializing PV data backup == -Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: sending incremental file list -Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: created directory /persistent/server-deploy/backup/backup_2017_06_14_145816 -Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/ -Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/ -Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/www/ -Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/www/miq/ -Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/www/miq/vmdb/ -Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/www/miq/vmdb/REGION -Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/www/miq/vmdb/certs/ -Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/www/miq/vmdb/certs/v2_key -Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/www/miq/vmdb/config/ -Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/www/miq/vmdb/config/database.yml -Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: server-data/ -Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: server-data/var/ -Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: server-data/var/www/ -Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: server-data/var/www/miq/ -Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: server-data/var/www/miq/vmdb/ -Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: server-data/var/www/miq/vmdb/GUID -Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: sent 1330 bytes received 136 bytes 2932.00 bytes/sec -Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: total size is 770 speedup is 0.53 -Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: == Restoring PV data symlinks == -Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: /var/www/miq/vmdb/REGION symlink is already in place, skipping -Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: /var/www/miq/vmdb/config/database.yml symlink is already in place, skipping -Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: /var/www/miq/vmdb/certs/v2_key symlink is already in place, skipping -Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: /var/www/miq/vmdb/log symlink is already in place, skipping -Jun 14 14:58:28 manageiq-0 systemctl[304]: Removed symlink /etc/systemd/system/multi-user.target.wants/appliance-initialize.service. -Jun 14 14:58:29 manageiq-0 systemd[1]: Started Initialize Appliance Database. -``` - -Most of what we see here (above) is the initial database seeding -process. This process isn't very quick, so be patient. - -At the bottom of the log there is a special line from the `systemctl` -service, `Removed symlink -/etc/systemd/system/multi-user.target.wants/appliance-initialize.service`. The -`appliance-initialize` service is no longer marked as enabled. This -indicates that the base application initialization is complete now. - -We're not done yet though, there are other ancillary services which -run in this pod to support the application. *Still in the rsh shell*, -Use the `ps` command to monitor for the `httpd` processes -starting. You will see output similar to the following when that stage -has completed: - -``` -[root@manageiq-0 vmdb]# ps aux | grep http -root 1941 0.0 0.1 249820 7640 ? Ss 15:02 0:00 /usr/sbin/httpd -DFOREGROUND -apache 1942 0.0 0.0 250752 6012 ? S 15:02 0:00 /usr/sbin/httpd -DFOREGROUND -apache 1943 0.0 0.0 250472 5952 ? S 15:02 0:00 /usr/sbin/httpd -DFOREGROUND -apache 1944 0.0 0.0 250472 5916 ? S 15:02 0:00 /usr/sbin/httpd -DFOREGROUND -apache 1945 0.0 0.0 250360 5764 ? S 15:02 0:00 /usr/sbin/httpd -DFOREGROUND -``` - -Furthermore, you can find other related processes by just looking for -ones with `MIQ` in their name: - -``` -[root@manageiq-0 vmdb]# ps aux | grep miq -root 333 27.7 4.2 555884 315916 ? Sl 14:58 3:59 MIQ Server -root 1976 0.6 4.0 507224 303740 ? SNl 15:02 0:03 MIQ: MiqGenericWorker id: 1, queue: generic -root 1984 0.6 4.0 507224 304312 ? SNl 15:02 0:03 MIQ: MiqGenericWorker id: 2, queue: generic -root 1992 0.9 4.0 508252 304888 ? SNl 15:02 0:05 MIQ: MiqPriorityWorker id: 3, queue: generic -root 2000 0.7 4.0 510308 304696 ? SNl 15:02 0:04 MIQ: MiqPriorityWorker id: 4, queue: generic -root 2008 1.2 4.0 514000 303612 ? SNl 15:02 0:07 MIQ: MiqScheduleWorker id: 5 -root 2026 0.2 4.0 517504 303644 ? SNl 15:02 0:01 MIQ: MiqEventHandler id: 6, queue: ems -root 2036 0.2 4.0 518532 303768 ? SNl 15:02 0:01 MIQ: MiqReportingWorker id: 7, queue: reporting -root 2044 0.2 4.0 519560 303812 ? SNl 15:02 0:01 MIQ: MiqReportingWorker id: 8, queue: reporting -root 2059 0.2 4.0 528372 303956 ? SNl 15:02 0:01 puma 3.3.0 (tcp://127.0.0.1:5000) [MIQ: Web Server Worker] -root 2067 0.9 4.0 529664 305716 ? SNl 15:02 0:05 puma 3.3.0 (tcp://127.0.0.1:3000) [MIQ: Web Server Worker] -root 2075 0.2 4.0 529408 304056 ? SNl 15:02 0:01 puma 3.3.0 (tcp://127.0.0.1:4000) [MIQ: Web Server Worker] -root 2329 0.0 0.0 10640 972 ? S+ 15:13 0:00 grep --color=auto -i miq -``` - -Finally, *still in the rsh shell*, to test if the application is -running correctly, we can request the application homepage. If the -page is available the page title will be `ManageIQ: Login`: - -``` -[root@manageiq-0 vmdb]# curl -s -k https://localhost | grep -A2 '<title>' -<title> -ManageIQ: Login -</title> -``` - -**Note:** The `-s` flag makes `curl` operations silent and the `-k` -flag to ignore errors about untrusted certificates. - - - -# Additional Upstream Resources - -Below are some useful resources from the upstream project -documentation. You may find these of value. - -* [Verify Setup Was Successful](https://github.com/ManageIQ/manageiq-pods#verifying-the-setup-was-successful) -* [POD Access And Routes](https://github.com/ManageIQ/manageiq-pods#pod-access-and-routes) -* [Troubleshooting](https://github.com/ManageIQ/manageiq-pods#troubleshooting) - - -# Manual Cleanup - -At this time uninstallation/cleanup is still a manual process. You -will have to follow a few steps to fully remove CFME from your -cluster. - -Delete the project: - -* `oc delete project cfme` - -Delete the PVs: - -* `oc delete pv miq-pv01` -* `oc delete pv miq-pv02` -* `oc delete pv miq-pv03` - -Clean out the old PV data: - -* `cd /exports/` -* `find miq* -type f -delete` -* `find miq* -type d -delete` - -Remove the NFS exports: - -* `rm /etc/exports.d/openshift_cfme.exports` -* `exportfs -ar` - -Delete the user: - -* `oc delete user cfme` - -**NOTE:** The `oc delete project cfme` command will return quickly -however it will continue to operate in the background. Continue -running `oc get project` after you've completed the other steps to -monitor the pods and final project termination progress. diff --git a/roles/openshift_cfme/defaults/main.yml b/roles/openshift_cfme/defaults/main.yml deleted file mode 100644 index b82c2e602..000000000 --- a/roles/openshift_cfme/defaults/main.yml +++ /dev/null @@ -1,42 +0,0 @@ ---- -# Namespace for the CFME project (Note: changed post-3.6 to use -# reserved 'openshift-' namespace prefix) -openshift_cfme_project: openshift-cfme -# Namespace/project description -openshift_cfme_project_description: ManageIQ - CloudForms Management Engine -# Basic user assigned the `admin` role for the project -openshift_cfme_user: cfme -# Project system account for enabling privileged pods -openshift_cfme_service_account: "system:serviceaccount:{{ openshift_cfme_project }}:default" -# All the required exports -openshift_cfme_pv_exports: - - miq-pv01 - - miq-pv02 - - miq-pv03 -# PV template files and their created object names -openshift_cfme_pv_data: - - pv_name: miq-pv01 - pv_template: miq-pv-db.yaml - pv_label: CFME DB PV - - pv_name: miq-pv02 - pv_template: miq-pv-region.yaml - pv_label: CFME Region PV - - pv_name: miq-pv03 - pv_template: miq-pv-server.yaml - pv_label: CFME Server PV - -# Tuning parameter to use more than 5 images at once from an ImageStream -openshift_cfme_maxImagesBulkImportedPerRepository: 100 -# TODO: Refactor '_install_app' variable. This is just for testing but -# maybe in the future it should control the entire yes/no for CFME. -# -# Whether or not the manageiq app should be initialized ('oc new-app -# --template=manageiq). If False everything UP TO 'new-app' is ran. -openshift_cfme_install_app: False -# Docker image to pull -openshift_cfme_application_img_name: "{{ 'registry.access.redhat.com/cloudforms45/cfme-openshift-app' if openshift_deployment_type == 'openshift-enterprise' else 'docker.io/manageiq/manageiq-pods' }}" -openshift_cfme_postgresql_img_name: "{{ 'registry.access.redhat.com/cloudforms45/cfme-openshift-postgresql' if openshift_deployment_type == 'openshift-enterprise' else 'docker.io/manageiq/manageiq-pods' }}" -openshift_cfme_memcached_img_name: "{{ 'registry.access.redhat.com/cloudforms45/cfme-openshift-memcached' if openshift_deployment_type == 'openshift-enterprise' else 'docker.io/manageiq/manageiq-pods' }}" -openshift_cfme_application_img_tag: "{{ 'latest' if openshift_deployment_type == 'openshift-enterprise' else 'app-latest-fine' }}" -openshift_cfme_memcached_img_tag: "{{ 'latest' if openshift_deployment_type == 'openshift-enterprise' else 'memcached-latest-fine' }}" -openshift_cfme_postgresql_img_tag: "{{ 'latest' if openshift_deployment_type == 'openshift-enterprise' else 'postgresql-latest-fine' }}" diff --git a/roles/openshift_cfme/files/miq-template.yaml b/roles/openshift_cfme/files/miq-template.yaml deleted file mode 100644 index 8f0d2af38..000000000 --- a/roles/openshift_cfme/files/miq-template.yaml +++ /dev/null @@ -1,566 +0,0 @@ ---- -path: /tmp/miq-template-out -data: - apiVersion: v1 - kind: Template - labels: - template: manageiq - metadata: - name: manageiq - annotations: - description: "ManageIQ appliance with persistent storage" - tags: "instant-app,manageiq,miq" - iconClass: "icon-rails" - objects: - - apiVersion: v1 - kind: Secret - metadata: - name: "${NAME}-secrets" - stringData: - pg-password: "${DATABASE_PASSWORD}" - - apiVersion: v1 - kind: Service - metadata: - annotations: - description: "Exposes and load balances ManageIQ pods" - service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"},{"name":"${MEMCACHED_SERVICE_NAME}","namespace":"","kind":"Service"}]' - name: ${NAME} - spec: - clusterIP: None - ports: - - name: http - port: 80 - protocol: TCP - targetPort: 80 - - name: https - port: 443 - protocol: TCP - targetPort: 443 - selector: - name: ${NAME} - - apiVersion: v1 - kind: Route - metadata: - name: ${NAME} - spec: - host: ${APPLICATION_DOMAIN} - port: - targetPort: https - tls: - termination: passthrough - to: - kind: Service - name: ${NAME} - - apiVersion: v1 - kind: ImageStream - metadata: - name: miq-app - annotations: - description: "Keeps track of the ManageIQ image changes" - spec: - dockerImageRepository: "${APPLICATION_IMG_NAME}" - - apiVersion: v1 - kind: ImageStream - metadata: - name: miq-postgresql - annotations: - description: "Keeps track of the PostgreSQL image changes" - spec: - dockerImageRepository: "${POSTGRESQL_IMG_NAME}" - - apiVersion: v1 - kind: ImageStream - metadata: - name: miq-memcached - annotations: - description: "Keeps track of the Memcached image changes" - spec: - dockerImageRepository: "${MEMCACHED_IMG_NAME}" - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: "${NAME}-${DATABASE_SERVICE_NAME}" - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: ${DATABASE_VOLUME_CAPACITY} - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: "${NAME}-region" - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: ${APPLICATION_REGION_VOLUME_CAPACITY} - - apiVersion: apps/v1beta1 - kind: "StatefulSet" - metadata: - name: ${NAME} - annotations: - description: "Defines how to deploy the ManageIQ appliance" - spec: - serviceName: "${NAME}" - replicas: "${APPLICATION_REPLICA_COUNT}" - template: - metadata: - labels: - name: ${NAME} - name: ${NAME} - spec: - containers: - - name: manageiq - image: "${APPLICATION_IMG_NAME}:${APPLICATION_IMG_TAG}" - livenessProbe: - tcpSocket: - port: 443 - initialDelaySeconds: 480 - timeoutSeconds: 3 - readinessProbe: - httpGet: - path: / - port: 443 - scheme: HTTPS - initialDelaySeconds: 200 - timeoutSeconds: 3 - ports: - - containerPort: 80 - protocol: TCP - - containerPort: 443 - protocol: TCP - securityContext: - privileged: true - volumeMounts: - - - name: "${NAME}-server" - mountPath: "/persistent" - - - name: "${NAME}-region" - mountPath: "/persistent-region" - env: - - - name: "APPLICATION_INIT_DELAY" - value: "${APPLICATION_INIT_DELAY}" - - - name: "DATABASE_SERVICE_NAME" - value: "${DATABASE_SERVICE_NAME}" - - - name: "DATABASE_REGION" - value: "${DATABASE_REGION}" - - - name: "MEMCACHED_SERVICE_NAME" - value: "${MEMCACHED_SERVICE_NAME}" - - - name: "POSTGRESQL_USER" - value: "${DATABASE_USER}" - - - name: "POSTGRESQL_PASSWORD" - valueFrom: - secretKeyRef: - name: "${NAME}-secrets" - key: "pg-password" - - - name: "POSTGRESQL_DATABASE" - value: "${DATABASE_NAME}" - - - name: "POSTGRESQL_MAX_CONNECTIONS" - value: "${POSTGRESQL_MAX_CONNECTIONS}" - - - name: "POSTGRESQL_SHARED_BUFFERS" - value: "${POSTGRESQL_SHARED_BUFFERS}" - resources: - requests: - memory: "${APPLICATION_MEM_REQ}" - cpu: "${APPLICATION_CPU_REQ}" - limits: - memory: "${APPLICATION_MEM_LIMIT}" - lifecycle: - preStop: - exec: - command: - - /opt/manageiq/container-scripts/sync-pv-data - volumes: - - - name: "${NAME}-region" - persistentVolumeClaim: - claimName: ${NAME}-region - volumeClaimTemplates: - - metadata: - name: "${NAME}-server" - annotations: - # Uncomment this if using dynamic volume provisioning. - # https://docs.openshift.org/latest/install_config/persistent_storage/dynamically_provisioning_pvs.html - # volume.alpha.kubernetes.io/storage-class: anything - spec: - accessModes: [ ReadWriteOnce ] - resources: - requests: - storage: "${APPLICATION_VOLUME_CAPACITY}" - - apiVersion: v1 - kind: "Service" - metadata: - name: "${MEMCACHED_SERVICE_NAME}" - annotations: - description: "Exposes the memcached server" - spec: - ports: - - - name: "memcached" - port: 11211 - targetPort: 11211 - selector: - name: "${MEMCACHED_SERVICE_NAME}" - - apiVersion: v1 - kind: "DeploymentConfig" - metadata: - name: "${MEMCACHED_SERVICE_NAME}" - annotations: - description: "Defines how to deploy memcached" - spec: - strategy: - type: "Recreate" - triggers: - - - type: "ImageChange" - imageChangeParams: - automatic: true - containerNames: - - "memcached" - from: - kind: "ImageStreamTag" - name: "miq-memcached:${MEMCACHED_IMG_TAG}" - - - type: "ConfigChange" - replicas: 1 - selector: - name: "${MEMCACHED_SERVICE_NAME}" - template: - metadata: - name: "${MEMCACHED_SERVICE_NAME}" - labels: - name: "${MEMCACHED_SERVICE_NAME}" - spec: - volumes: [] - containers: - - - name: "memcached" - image: "${MEMCACHED_IMG_NAME}:${MEMCACHED_IMG_TAG}" - ports: - - - containerPort: 11211 - readinessProbe: - timeoutSeconds: 1 - initialDelaySeconds: 5 - tcpSocket: - port: 11211 - livenessProbe: - timeoutSeconds: 1 - initialDelaySeconds: 30 - tcpSocket: - port: 11211 - volumeMounts: [] - env: - - - name: "MEMCACHED_MAX_MEMORY" - value: "${MEMCACHED_MAX_MEMORY}" - - - name: "MEMCACHED_MAX_CONNECTIONS" - value: "${MEMCACHED_MAX_CONNECTIONS}" - - - name: "MEMCACHED_SLAB_PAGE_SIZE" - value: "${MEMCACHED_SLAB_PAGE_SIZE}" - resources: - requests: - memory: "${MEMCACHED_MEM_REQ}" - cpu: "${MEMCACHED_CPU_REQ}" - limits: - memory: "${MEMCACHED_MEM_LIMIT}" - - apiVersion: v1 - kind: "Service" - metadata: - name: "${DATABASE_SERVICE_NAME}" - annotations: - description: "Exposes the database server" - spec: - ports: - - - name: "postgresql" - port: 5432 - targetPort: 5432 - selector: - name: "${DATABASE_SERVICE_NAME}" - - apiVersion: v1 - kind: "DeploymentConfig" - metadata: - name: "${DATABASE_SERVICE_NAME}" - annotations: - description: "Defines how to deploy the database" - spec: - strategy: - type: "Recreate" - triggers: - - - type: "ImageChange" - imageChangeParams: - automatic: true - containerNames: - - "postgresql" - from: - kind: "ImageStreamTag" - name: "miq-postgresql:${POSTGRESQL_IMG_TAG}" - - - type: "ConfigChange" - replicas: 1 - selector: - name: "${DATABASE_SERVICE_NAME}" - template: - metadata: - name: "${DATABASE_SERVICE_NAME}" - labels: - name: "${DATABASE_SERVICE_NAME}" - spec: - volumes: - - - name: "miq-pgdb-volume" - persistentVolumeClaim: - claimName: "${NAME}-${DATABASE_SERVICE_NAME}" - containers: - - - name: "postgresql" - image: "${POSTGRESQL_IMG_NAME}:${POSTGRESQL_IMG_TAG}" - ports: - - - containerPort: 5432 - readinessProbe: - timeoutSeconds: 1 - initialDelaySeconds: 15 - exec: - command: - - "/bin/sh" - - "-i" - - "-c" - - "psql -h 127.0.0.1 -U ${POSTGRESQL_USER} -q -d ${POSTGRESQL_DATABASE} -c 'SELECT 1'" - livenessProbe: - timeoutSeconds: 1 - initialDelaySeconds: 60 - tcpSocket: - port: 5432 - volumeMounts: - - - name: "miq-pgdb-volume" - mountPath: "/var/lib/pgsql/data" - env: - - - name: "POSTGRESQL_USER" - value: "${DATABASE_USER}" - - - name: "POSTGRESQL_PASSWORD" - valueFrom: - secretKeyRef: - name: "${NAME}-secrets" - key: "pg-password" - - - name: "POSTGRESQL_DATABASE" - value: "${DATABASE_NAME}" - - - name: "POSTGRESQL_MAX_CONNECTIONS" - value: "${POSTGRESQL_MAX_CONNECTIONS}" - - - name: "POSTGRESQL_SHARED_BUFFERS" - value: "${POSTGRESQL_SHARED_BUFFERS}" - resources: - requests: - memory: "${POSTGRESQL_MEM_REQ}" - cpu: "${POSTGRESQL_CPU_REQ}" - limits: - memory: "${POSTGRESQL_MEM_LIMIT}" - - parameters: - - - name: "NAME" - displayName: Name - required: true - description: "The name assigned to all of the frontend objects defined in this template." - value: manageiq - - - name: "DATABASE_SERVICE_NAME" - displayName: "PostgreSQL Service Name" - required: true - description: "The name of the OpenShift Service exposed for the PostgreSQL container." - value: "postgresql" - - - name: "DATABASE_USER" - displayName: "PostgreSQL User" - required: true - description: "PostgreSQL user that will access the database." - value: "root" - - - name: "DATABASE_PASSWORD" - displayName: "PostgreSQL Password" - required: true - description: "Password for the PostgreSQL user." - from: "[a-zA-Z0-9]{8}" - generate: expression - - - name: "DATABASE_NAME" - required: true - displayName: "PostgreSQL Database Name" - description: "Name of the PostgreSQL database accessed." - value: "vmdb_production" - - - name: "DATABASE_REGION" - required: true - displayName: "Application Database Region" - description: "Database region that will be used for application." - value: "0" - - - name: "MEMCACHED_SERVICE_NAME" - required: true - displayName: "Memcached Service Name" - description: "The name of the OpenShift Service exposed for the Memcached container." - value: "memcached" - - - name: "MEMCACHED_MAX_MEMORY" - displayName: "Memcached Max Memory" - description: "Memcached maximum memory for memcached object storage in MB." - value: "64" - - - name: "MEMCACHED_MAX_CONNECTIONS" - displayName: "Memcached Max Connections" - description: "Memcached maximum number of connections allowed." - value: "1024" - - - name: "MEMCACHED_SLAB_PAGE_SIZE" - displayName: "Memcached Slab Page Size" - description: "Memcached size of each slab page." - value: "1m" - - - name: "POSTGRESQL_MAX_CONNECTIONS" - displayName: "PostgreSQL Max Connections" - description: "PostgreSQL maximum number of database connections allowed." - value: "100" - - - name: "POSTGRESQL_SHARED_BUFFERS" - displayName: "PostgreSQL Shared Buffer Amount" - description: "Amount of memory dedicated for PostgreSQL shared memory buffers." - value: "256MB" - - - name: "APPLICATION_CPU_REQ" - displayName: "Application Min CPU Requested" - required: true - description: "Minimum amount of CPU time the Application container will need (expressed in millicores)." - value: "1000m" - - - name: "POSTGRESQL_CPU_REQ" - displayName: "PostgreSQL Min CPU Requested" - required: true - description: "Minimum amount of CPU time the PostgreSQL container will need (expressed in millicores)." - value: "500m" - - - name: "MEMCACHED_CPU_REQ" - displayName: "Memcached Min CPU Requested" - required: true - description: "Minimum amount of CPU time the Memcached container will need (expressed in millicores)." - value: "200m" - - - name: "APPLICATION_MEM_REQ" - displayName: "Application Min RAM Requested" - required: true - description: "Minimum amount of memory the Application container will need." - value: "6144Mi" - - - name: "POSTGRESQL_MEM_REQ" - displayName: "PostgreSQL Min RAM Requested" - required: true - description: "Minimum amount of memory the PostgreSQL container will need." - value: "1024Mi" - - - name: "MEMCACHED_MEM_REQ" - displayName: "Memcached Min RAM Requested" - required: true - description: "Minimum amount of memory the Memcached container will need." - value: "64Mi" - - - name: "APPLICATION_MEM_LIMIT" - displayName: "Application Max RAM Limit" - required: true - description: "Maximum amount of memory the Application container can consume." - value: "16384Mi" - - - name: "POSTGRESQL_MEM_LIMIT" - displayName: "PostgreSQL Max RAM Limit" - required: true - description: "Maximum amount of memory the PostgreSQL container can consume." - value: "8192Mi" - - - name: "MEMCACHED_MEM_LIMIT" - displayName: "Memcached Max RAM Limit" - required: true - description: "Maximum amount of memory the Memcached container can consume." - value: "256Mi" - - - name: "POSTGRESQL_IMG_NAME" - displayName: "PostgreSQL Image Name" - description: "This is the PostgreSQL image name requested to deploy." - value: "docker.io/manageiq/manageiq-pods" - - - name: "POSTGRESQL_IMG_TAG" - displayName: "PostgreSQL Image Tag" - description: "This is the PostgreSQL image tag/version requested to deploy." - value: "postgresql-latest-fine" - - - name: "MEMCACHED_IMG_NAME" - displayName: "Memcached Image Name" - description: "This is the Memcached image name requested to deploy." - value: "docker.io/manageiq/manageiq-pods" - - - name: "MEMCACHED_IMG_TAG" - displayName: "Memcached Image Tag" - description: "This is the Memcached image tag/version requested to deploy." - value: "memcached-latest-fine" - - - name: "APPLICATION_IMG_NAME" - displayName: "Application Image Name" - description: "This is the Application image name requested to deploy." - value: "docker.io/manageiq/manageiq-pods" - - - name: "APPLICATION_IMG_TAG" - displayName: "Application Image Tag" - description: "This is the Application image tag/version requested to deploy." - value: "app-latest-fine" - - - name: "APPLICATION_DOMAIN" - displayName: "Application Hostname" - description: "The exposed hostname that will route to the application service, if left blank a value will be defaulted." - value: "" - - - name: "APPLICATION_REPLICA_COUNT" - displayName: "Application Replica Count" - description: "This is the number of Application replicas requested to deploy." - value: "1" - - - name: "APPLICATION_INIT_DELAY" - displayName: "Application Init Delay" - required: true - description: "Delay in seconds before we attempt to initialize the application." - value: "15" - - - name: "APPLICATION_VOLUME_CAPACITY" - displayName: "Application Volume Capacity" - required: true - description: "Volume space available for application data." - value: "5Gi" - - - name: "APPLICATION_REGION_VOLUME_CAPACITY" - displayName: "Application Region Volume Capacity" - required: true - description: "Volume space available for region application data." - value: "5Gi" - - - name: "DATABASE_VOLUME_CAPACITY" - displayName: "Database Volume Capacity" - required: true - description: "Volume space available for database." - value: "15Gi" diff --git a/roles/openshift_cfme/files/openshift_cfme.exports b/roles/openshift_cfme/files/openshift_cfme.exports deleted file mode 100644 index 5457d41fc..000000000 --- a/roles/openshift_cfme/files/openshift_cfme.exports +++ /dev/null @@ -1,3 +0,0 @@ -/exports/miq-pv01 *(rw,no_root_squash,no_wdelay) -/exports/miq-pv02 *(rw,no_root_squash,no_wdelay) -/exports/miq-pv03 *(rw,no_root_squash,no_wdelay) diff --git a/roles/openshift_cfme/handlers/main.yml b/roles/openshift_cfme/handlers/main.yml deleted file mode 100644 index 7e90b09a4..000000000 --- a/roles/openshift_cfme/handlers/main.yml +++ /dev/null @@ -1,37 +0,0 @@ ---- -###################################################################### -# NOTE: These are duplicated from roles/openshift_master/handlers/main.yml -# -# TODO: Use the consolidated 'openshift_handlers' role once it's ready -# See: https://github.com/openshift/openshift-ansible/pull/4041#discussion_r118770782 -###################################################################### - -- name: restart master api - systemd: name={{ openshift.common.service_type }}-master-api state=restarted - when: (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' - notify: Verify API Server - -- name: restart master controllers - systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted - when: (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' - -- name: Verify API Server - # Using curl here since the uri module requires python-httplib2 and - # wait_for port doesn't provide health information. - command: > - curl --silent --tlsv1.2 - {% if openshift.common.version_gte_3_2_or_1_2 | bool %} - --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt - {% else %} - --cacert {{ openshift.common.config_base }}/master/ca.crt - {% endif %} - {{ openshift.master.api_url }}/healthz/ready - args: - # Disables the following warning: - # Consider using get_url or uri module rather than running curl - warn: no - register: api_available_output - until: api_available_output.stdout == 'ok' - retries: 120 - delay: 1 - changed_when: false diff --git a/roles/openshift_cfme/img/CFMEBasicDeployment.png b/roles/openshift_cfme/img/CFMEBasicDeployment.png Binary files differdeleted file mode 100644 index a89c1e325..000000000 --- a/roles/openshift_cfme/img/CFMEBasicDeployment.png +++ /dev/null diff --git a/roles/openshift_cfme/tasks/create_pvs.yml b/roles/openshift_cfme/tasks/create_pvs.yml deleted file mode 100644 index 7fa7d3997..000000000 --- a/roles/openshift_cfme/tasks/create_pvs.yml +++ /dev/null @@ -1,36 +0,0 @@ ---- -# Check for existance and then conditionally: -# - evaluate templates -# - PVs -# -# These tasks idempotently create required CFME PV objects. Do not -# call this file directly. This file is intended to be ran as an -# include that has a 'with_items' attached to it. Hence the use below -# of variables like "{{ item.pv_label }}" - -- name: "Check if the {{ item.pv_label }} template has been created already" - oc_obj: - namespace: "{{ openshift_cfme_project }}" - state: list - kind: pv - name: "{{ item.pv_name }}" - register: miq_pv_check - -# Skip all of this if the PV already exists -- block: - - name: "Ensure the {{ item.pv_label }} template is evaluated" - template: - src: "{{ item.pv_template }}.j2" - dest: "{{ template_dir }}/{{ item.pv_template }}" - - - name: "Ensure {{ item.pv_label }} is created" - oc_obj: - namespace: "{{ openshift_cfme_project }}" - kind: pv - name: "{{ item.pv_name }}" - state: present - delete_after: True - files: - - "{{ template_dir }}/{{ item.pv_template }}" - when: - - not miq_pv_check.results.results.0 diff --git a/roles/openshift_cfme/tasks/main.yml b/roles/openshift_cfme/tasks/main.yml deleted file mode 100644 index 74ae16d91..000000000 --- a/roles/openshift_cfme/tasks/main.yml +++ /dev/null @@ -1,117 +0,0 @@ ---- -###################################################################### -# Users, projects, and privileges - -- name: Ensure the CFME user exists - oc_user: - state: present - username: "{{ openshift_cfme_user }}" - -- name: Ensure the CFME namespace exists with CFME user as admin - oc_project: - state: present - name: "{{ openshift_cfme_project }}" - display_name: "{{ openshift_cfme_project_description }}" - admin: "{{ openshift_cfme_user }}" - -- name: Ensure the CFME namespace service account is privileged - oc_adm_policy_user: - namespace: "{{ openshift_cfme_project }}" - user: "{{ openshift_cfme_service_account }}" - resource_kind: scc - resource_name: privileged - state: present - -###################################################################### -# NFS -# In the case that we are not running on a cloud provider, volumes must be statically provisioned - -- include: nfs.yml - when: not (openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce')) - -###################################################################### -# CFME App Template -# -# Note, this is different from the create_pvs.yml tasks in that the -# application template does not require any jinja2 evaluation. -# -# TODO: Handle the case where the server template is updated in -# openshift-ansible and the change needs to be landed on the managed -# cluster. - -- name: Check if the CFME Server template has been created already - oc_obj: - namespace: "{{ openshift_cfme_project }}" - state: list - kind: template - name: manageiq - register: miq_server_check - -- name: Copy over CFME Server template - copy: - src: miq-template.yaml - dest: "{{ template_dir }}/miq-template.yaml" - -- name: Ensure the server template was read from disk - debug: - var=r_openshift_cfme_miq_template_content - -- name: Ensure CFME Server Template exists - oc_obj: - namespace: "{{ openshift_cfme_project }}" - kind: template - name: "manageiq" - state: present - content: "{{ r_openshift_cfme_miq_template_content }}" - -###################################################################### -# Let's do this - -- name: Ensure the CFME Server is created - oc_process: - namespace: "{{ openshift_cfme_project }}" - template_name: manageiq - create: True - params: - APPLICATION_IMG_NAME: "{{ openshift_cfme_application_img_name }}" - POSTGRESQL_IMG_NAME: "{{ openshift_cfme_postgresql_img_name }}" - MEMCACHED_IMG_NAME: "{{ openshift_cfme_memcached_img_name }}" - APPLICATION_IMG_TAG: "{{ openshift_cfme_application_img_tag }}" - POSTGRESQL_IMG_TAG: "{{ openshift_cfme_postgresql_img_tag }}" - MEMCACHED_IMG_TAG: "{{ openshift_cfme_memcached_img_tag }}" - register: cfme_new_app_process - run_once: True - when: - # User said to install CFME in their inventory - - openshift_cfme_install_app | bool - # # The server app doesn't exist already - # - not miq_server_check.results.results.0 - -- debug: - var: cfme_new_app_process - -###################################################################### -# Various cleanup steps - -# TODO: Not sure what to do about this right now. Might be able to -# just delete it? This currently warns about "Unable to find -# '<TEMP_DIR>' in expected paths." -- name: Ensure the temporary PV/App templates are erased - file: - path: "{{ item }}" - state: absent - with_fileglob: - - "{{ template_dir }}/*.yaml" - -- name: Ensure the temporary PV/app template directory is erased - file: - path: "{{ template_dir }}" - state: absent - -###################################################################### - -- name: Status update - debug: - msg: > - CFME has been deployed. Note that there will be a delay before - it is fully initialized. diff --git a/roles/openshift_cfme/tasks/nfs.yml b/roles/openshift_cfme/tasks/nfs.yml deleted file mode 100644 index ca04628a8..000000000 --- a/roles/openshift_cfme/tasks/nfs.yml +++ /dev/null @@ -1,51 +0,0 @@ ---- -# Tasks to statically provision NFS volumes -# Include if not using dynamic volume provisioning - -- name: Set openshift_cfme_nfs_server fact - when: openshift_cfme_nfs_server is not defined - set_fact: - # Hostname/IP of the NFS server. Currently defaults to first master - openshift_cfme_nfs_server: "{{ oo_nfs_to_config.0 }}" - -- name: Ensure the /exports/ directory exists - file: - path: /exports/ - state: directory - mode: 0755 - owner: root - group: root - -- name: Ensure the miq-pv0X export directories exist - file: - path: "/exports/{{ item }}" - state: directory - mode: 0775 - owner: root - group: root - with_items: "{{ openshift_cfme_pv_exports }}" - -- name: Ensure the NFS exports for CFME PVs exist - copy: - src: openshift_cfme.exports - dest: /etc/exports.d/openshift_cfme.exports - register: nfs_exports_updated - -- name: Ensure the NFS export table is refreshed if exports were added - command: exportfs -ar - when: - - nfs_exports_updated.changed - - -###################################################################### -# Create the required CFME PVs. Check out these online docs if you -# need a refresher on includes looping with items: -# * http://docs.ansible.com/ansible/playbooks_loops.html#loops-and-includes-in-2-0 -# * http://stackoverflow.com/a/35128533 -# -# TODO: Handle the case where a PV template is updated in -# openshift-ansible and the change needs to be landed on the managed -# cluster. - -- include: create_pvs.yml - with_items: "{{ openshift_cfme_pv_data }}" diff --git a/roles/openshift_cfme/tasks/tune_masters.yml b/roles/openshift_cfme/tasks/tune_masters.yml deleted file mode 100644 index 02b0f10bf..000000000 --- a/roles/openshift_cfme/tasks/tune_masters.yml +++ /dev/null @@ -1,12 +0,0 @@ ---- -- name: Ensure bulk image import limit is tuned - yedit: - src: /etc/origin/master/master-config.yaml - key: 'imagePolicyConfig.maxImagesBulkImportedPerRepository' - value: "{{ openshift_cfme_maxImagesBulkImportedPerRepository | int() }}" - state: present - backup: True - notify: - - restart master - -- meta: flush_handlers diff --git a/roles/openshift_cfme/tasks/uninstall.yml b/roles/openshift_cfme/tasks/uninstall.yml deleted file mode 100644 index 406b59364..000000000 --- a/roles/openshift_cfme/tasks/uninstall.yml +++ /dev/null @@ -1,46 +0,0 @@ ---- -- include_role: - name: lib_openshift - -- name: Uninstall CFME - ManageIQ - debug: - msg: Uninstalling Cloudforms Management Engine - ManageIQ - -- name: Ensure the CFME project is removed - oc_project: - state: absent - name: "{{ openshift_cfme_project }}" - -- name: Ensure the CFME template is removed - oc_obj: - namespace: "{{ openshift_cfme_project }}" - state: absent - kind: template - name: manageiq - -- name: Ensure the CFME PVs are removed - oc_obj: - state: absent - all_namespaces: True - kind: pv - name: "{{ item }}" - with_items: "{{ openshift_cfme_pv_exports }}" - when: not (openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce')) - -- name: Ensure the CFME user is removed - oc_user: - state: absent - username: "{{ openshift_cfme_user }}" - -- name: Ensure the CFME NFS Exports are removed - file: - path: /etc/exports.d/openshift_cfme.exports - state: absent - register: nfs_exports_removed - when: not (openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce')) - -- name: Ensure the NFS export table is refreshed if exports were removed - command: exportfs -ar - when: - - nfs_exports_removed.changed - - not (openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce')) diff --git a/roles/openshift_cfme/templates/miq-pv-db.yaml.j2 b/roles/openshift_cfme/templates/miq-pv-db.yaml.j2 deleted file mode 100644 index 280f3e97a..000000000 --- a/roles/openshift_cfme/templates/miq-pv-db.yaml.j2 +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: v1 -kind: PersistentVolume -metadata: - name: miq-pv01 -spec: - capacity: - storage: 15Gi - accessModes: - - ReadWriteOnce - nfs: - path: {{ openshift_cfme_nfs_directory }}/miq-pv01 - server: {{ openshift_cfme_nfs_server }} - persistentVolumeReclaimPolicy: Retain diff --git a/roles/openshift_cfme/templates/miq-pv-region.yaml.j2 b/roles/openshift_cfme/templates/miq-pv-region.yaml.j2 deleted file mode 100644 index fe80dffa5..000000000 --- a/roles/openshift_cfme/templates/miq-pv-region.yaml.j2 +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: v1 -kind: PersistentVolume -metadata: - name: miq-pv02 -spec: - capacity: - storage: 5Gi - accessModes: - - ReadWriteOnce - nfs: - path: {{ openshift_cfme_nfs_directory }}/miq-pv02 - server: {{ openshift_cfme_nfs_server }} - persistentVolumeReclaimPolicy: Retain diff --git a/roles/openshift_cfme/templates/miq-pv-server.yaml.j2 b/roles/openshift_cfme/templates/miq-pv-server.yaml.j2 deleted file mode 100644 index f84b67ea9..000000000 --- a/roles/openshift_cfme/templates/miq-pv-server.yaml.j2 +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: v1 -kind: PersistentVolume -metadata: - name: miq-pv03 -spec: - capacity: - storage: 5Gi - accessModes: - - ReadWriteOnce - nfs: - path: {{ openshift_cfme_nfs_directory }}/miq-pv03 - server: {{ openshift_cfme_nfs_server }} - persistentVolumeReclaimPolicy: Retain diff --git a/roles/openshift_default_storage_class/README.md b/roles/openshift_default_storage_class/README.md index 198163127..57e732f37 100644 --- a/roles/openshift_default_storage_class/README.md +++ b/roles/openshift_default_storage_class/README.md @@ -1,7 +1,7 @@ openshift_master_storage_class ========= -A role that deploys configuratons for Openshift StorageClass +A role that deploys configurations for Openshift StorageClass Requirements ------------ diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 11ef9fa97..215ff4b72 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -1669,7 +1669,9 @@ def set_container_facts_if_unset(facts): facts['common']['is_atomic'] = os.path.isfile('/run/ostree-booted') # If openshift_docker_use_system_container is set and is True .... if 'use_system_container' in list(facts['docker'].keys()): - if facts['docker']['use_system_container']: + # use safe_get_bool as the inventory variable may not be a + # valid boolean on it's own. + if safe_get_bool(facts['docker']['use_system_container']): # ... set the service name to container-engine facts['docker']['service_name'] = 'container-engine' diff --git a/roles/openshift_gcp/defaults/main.yml b/roles/openshift_gcp/defaults/main.yml new file mode 100644 index 000000000..18fc453b2 --- /dev/null +++ b/roles/openshift_gcp/defaults/main.yml @@ -0,0 +1,58 @@ +--- +openshift_gcp_prefix: '' + +openshift_gcp_create_network: True +openshift_gcp_create_registry_bucket: True +openshift_gcp_kubernetes_cluster_status: owned # or shared +openshift_gcp_node_group_type: master + +openshift_gcp_ssh_private_key: '' + +openshift_gcp_project: '' +openshift_gcp_clusterid: default +openshift_gcp_region: us-central1 +openshift_gcp_zone: us-central1-a + +openshift_gcp_network_name: "{{ openshift_gcp_prefix }}network" + +openshift_gcp_iam_service_account: '' +openshift_gcp_iam_service_account_keyfile: '' + +openshift_gcp_master_lb_timeout: 2m + +openshift_gcp_infra_network_instance_group: ig-i + +openshift_gcp_image: 'rhel-7' +openshift_gcp_base_image: rhel-7 + +openshift_gcp_registry_bucket_keyfile: '' +openshift_gcp_registry_bucket_name: "{{ openshift_gcp_prefix }}-docker-registry" + +openshift_gcp_node_group_config: + - name: master + suffix: m + tags: ocp-master + machine_type: n1-standard-2 + boot_disk_size: 150 + scale: 1 + - name: infra + suffix: i + tags: ocp-infra-node ocp-node + machine_type: n1-standard-2 + boot_disk_size: 150 + scale: 1 + - name: node + suffix: n + tags: ocp-node + machine_type: n1-standard-2 + boot_disk_size: 150 + scale: 3 + - name: node-flex + suffix: nf + tags: ocp-node + machine_type: n1-standard-2 + boot_disk_size: 150 + scale: 0 + +openshift_gcp_startup_script_file: '' +openshift_gcp_user_data_file: '' diff --git a/roles/openshift_gcp/templates/dns.j2.sh b/roles/openshift_gcp/templates/dns.j2.sh index eacf84b4d..a7475aaf5 100644 --- a/roles/openshift_gcp/templates/dns.j2.sh +++ b/roles/openshift_gcp/templates/dns.j2.sh @@ -2,12 +2,12 @@ set -euo pipefail -dns_zone="{{ dns_managed_zone | default(provision_prefix + 'managed-zone') }}" +dns_zone="{{ dns_managed_zone | default(openshift_gcp_prefix + 'managed-zone') }}" # Check the DNS managed zone in Google Cloud DNS, create it if it doesn't exist -if ! gcloud --project "{{ gce_project_id }}" dns managed-zones describe "${dns_zone}" &>/dev/null; then - gcloud --project "{{ gce_project_id }}" dns managed-zones create "${dns_zone}" --dns-name "{{ public_hosted_zone }}" --description "{{ public_hosted_zone }} domain" >/dev/null +if ! gcloud --project "{{ openshift_gcp_project }}" dns managed-zones describe "${dns_zone}" &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" dns managed-zones create "${dns_zone}" --dns-name "{{ public_hosted_zone }}" --description "{{ public_hosted_zone }} domain" >/dev/null fi # Always output the expected nameservers as a comma delimited list -gcloud --project "{{ gce_project_id }}" dns managed-zones describe "${dns_zone}" --format='value(nameServers)' | tr ';' ',' +gcloud --project "{{ openshift_gcp_project }}" dns managed-zones describe "${dns_zone}" --format='value(nameServers)' | tr ';' ',' diff --git a/roles/openshift_gcp/templates/provision.j2.sh b/roles/openshift_gcp/templates/provision.j2.sh index e68e9683f..d72a11de1 100644 --- a/roles/openshift_gcp/templates/provision.j2.sh +++ b/roles/openshift_gcp/templates/provision.j2.sh @@ -2,36 +2,38 @@ set -euo pipefail -# Create SSH key for GCE -if [ ! -f "{{ gce_ssh_private_key }}" ]; then - ssh-keygen -t rsa -f "{{ gce_ssh_private_key }}" -C gce-provision-cloud-user -N '' - ssh-add "{{ gce_ssh_private_key }}" || true -fi +if [[ -n "{{ openshift_gcp_ssh_private_key }}" ]]; then + # Create SSH key for GCE + if [ ! -f "{{ openshift_gcp_ssh_private_key }}" ]; then + ssh-keygen -t rsa -f "{{ openshift_gcp_ssh_private_key }}" -C gce-provision-cloud-user -N '' + ssh-add "{{ openshift_gcp_ssh_private_key }}" || true + fi -# Check if the ~/.ssh/google_compute_engine.pub key is in the project metadata, and if not, add it there -pub_key=$(cut -d ' ' -f 2 < "{{ gce_ssh_private_key }}.pub") -key_tmp_file='/tmp/ocp-gce-keys' -if ! gcloud --project "{{ gce_project_id }}" compute project-info describe | grep -q "$pub_key"; then - if gcloud --project "{{ gce_project_id }}" compute project-info describe | grep -q ssh-rsa; then - gcloud --project "{{ gce_project_id }}" compute project-info describe | grep ssh-rsa | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' -e 's/value: //' > "$key_tmp_file" + # Check if the ~/.ssh/google_compute_engine.pub key is in the project metadata, and if not, add it there + pub_key=$(cut -d ' ' -f 2 < "{{ openshift_gcp_ssh_private_key }}.pub") + key_tmp_file='/tmp/ocp-gce-keys' + if ! gcloud --project "{{ openshift_gcp_project }}" compute project-info describe | grep -q "$pub_key"; then + if gcloud --project "{{ openshift_gcp_project }}" compute project-info describe | grep -q ssh-rsa; then + gcloud --project "{{ openshift_gcp_project }}" compute project-info describe | grep ssh-rsa | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' -e 's/value: //' > "$key_tmp_file" + fi + echo -n 'cloud-user:' >> "$key_tmp_file" + cat "{{ openshift_gcp_ssh_private_key }}.pub" >> "$key_tmp_file" + gcloud --project "{{ openshift_gcp_project }}" compute project-info add-metadata --metadata-from-file "sshKeys=${key_tmp_file}" + rm -f "$key_tmp_file" fi - echo -n 'cloud-user:' >> "$key_tmp_file" - cat "{{ gce_ssh_private_key }}.pub" >> "$key_tmp_file" - gcloud --project "{{ gce_project_id }}" compute project-info add-metadata --metadata-from-file "sshKeys=${key_tmp_file}" - rm -f "$key_tmp_file" fi metadata="" -if [[ -n "{{ provision_gce_startup_script_file }}" ]]; then - if [[ ! -f "{{ provision_gce_startup_script_file }}" ]]; then - echo "Startup script file missing at {{ provision_gce_startup_script_file }} from=$(pwd)" +if [[ -n "{{ openshift_gcp_startup_script_file }}" ]]; then + if [[ ! -f "{{ openshift_gcp_startup_script_file }}" ]]; then + echo "Startup script file missing at {{ openshift_gcp_startup_script_file }} from=$(pwd)" exit 1 fi - metadata+="--metadata-from-file=startup-script={{ provision_gce_startup_script_file }}" + metadata+="--metadata-from-file=startup-script={{ openshift_gcp_startup_script_file }}" fi -if [[ -n "{{ provision_gce_user_data_file }}" ]]; then - if [[ ! -f "{{ provision_gce_user_data_file }}" ]]; then - echo "User data file missing at {{ provision_gce_user_data_file }}" +if [[ -n "{{ openshift_gcp_user_data_file }}" ]]; then + if [[ ! -f "{{ openshift_gcp_user_data_file }}" ]]; then + echo "User data file missing at {{ openshift_gcp_user_data_file }}" exit 1 fi if [[ -n "${metadata}" ]]; then @@ -39,14 +41,14 @@ if [[ -n "{{ provision_gce_user_data_file }}" ]]; then else metadata="--metadata-from-file=" fi - metadata+="user-data={{ provision_gce_user_data_file }}" + metadata+="user-data={{ openshift_gcp_user_data_file }}" fi # Select image or image family -image="{{ provision_gce_registered_image }}" -if ! gcloud --project "{{ gce_project_id }}" compute images describe "${image}" &>/dev/null; then - if ! gcloud --project "{{ gce_project_id }}" compute images describe-from-family "${image}" &>/dev/null; then - echo "No compute image or image-family found, create an image named '{{ provision_gce_registered_image }}' to continue'" +image="{{ openshift_gcp_image }}" +if ! gcloud --project "{{ openshift_gcp_project }}" compute images describe "${image}" &>/dev/null; then + if ! gcloud --project "{{ openshift_gcp_project }}" compute images describe-from-family "${image}" &>/dev/null; then + echo "No compute image or image-family found, create an image named '{{ openshift_gcp_image }}' to continue'" exit 1 fi image="family/${image}" @@ -54,19 +56,19 @@ fi ### PROVISION THE INFRASTRUCTURE ### -dns_zone="{{ dns_managed_zone | default(provision_prefix + 'managed-zone') }}" +dns_zone="{{ dns_managed_zone | default(openshift_gcp_prefix + 'managed-zone') }}" # Check the DNS managed zone in Google Cloud DNS, create it if it doesn't exist and exit after printing NS servers -if ! gcloud --project "{{ gce_project_id }}" dns managed-zones describe "${dns_zone}" &>/dev/null; then +if ! gcloud --project "{{ openshift_gcp_project }}" dns managed-zones describe "${dns_zone}" &>/dev/null; then echo "DNS zone '${dns_zone}' doesn't exist. Must be configured prior to running this script" exit 1 fi # Create network -if ! gcloud --project "{{ gce_project_id }}" compute networks describe "{{ gce_network_name }}" &>/dev/null; then - gcloud --project "{{ gce_project_id }}" compute networks create "{{ gce_network_name }}" --mode "auto" +if ! gcloud --project "{{ openshift_gcp_project }}" compute networks describe "{{ openshift_gcp_network_name }}" &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" compute networks create "{{ openshift_gcp_network_name }}" --mode "auto" else - echo "Network '{{ gce_network_name }}' already exists" + echo "Network '{{ openshift_gcp_network_name }}' already exists" fi # Firewall rules in a form: @@ -87,56 +89,56 @@ declare -A FW_RULES=( ['infra-node-external']="--allow tcp:80,tcp:443,tcp:1936${range} --target-tags ocp-infra-node" ) for rule in "${!FW_RULES[@]}"; do - ( if ! gcloud --project "{{ gce_project_id }}" compute firewall-rules describe "{{ provision_prefix }}$rule" &>/dev/null; then - gcloud --project "{{ gce_project_id }}" compute firewall-rules create "{{ provision_prefix }}$rule" --network "{{ gce_network_name }}" ${FW_RULES[$rule]} + ( if ! gcloud --project "{{ openshift_gcp_project }}" compute firewall-rules describe "{{ openshift_gcp_prefix }}$rule" &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" compute firewall-rules create "{{ openshift_gcp_prefix }}$rule" --network "{{ openshift_gcp_network_name }}" ${FW_RULES[$rule]} else - echo "Firewall rule '{{ provision_prefix }}${rule}' already exists" + echo "Firewall rule '{{ openshift_gcp_prefix }}${rule}' already exists" fi ) & done # Master IP -( if ! gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-ssl-lb-ip" --global &>/dev/null; then - gcloud --project "{{ gce_project_id }}" compute addresses create "{{ provision_prefix }}master-ssl-lb-ip" --global +( if ! gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-ssl-lb-ip" --global &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" compute addresses create "{{ openshift_gcp_prefix }}master-ssl-lb-ip" --global else - echo "IP '{{ provision_prefix }}master-ssl-lb-ip' already exists" + echo "IP '{{ openshift_gcp_prefix }}master-ssl-lb-ip' already exists" fi ) & # Internal master IP -( if ! gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-network-lb-ip" --region "{{ gce_region_name }}" &>/dev/null; then - gcloud --project "{{ gce_project_id }}" compute addresses create "{{ provision_prefix }}master-network-lb-ip" --region "{{ gce_region_name }}" +( if ! gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-network-lb-ip" --region "{{ openshift_gcp_region }}" &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" compute addresses create "{{ openshift_gcp_prefix }}master-network-lb-ip" --region "{{ openshift_gcp_region }}" else - echo "IP '{{ provision_prefix }}master-network-lb-ip' already exists" + echo "IP '{{ openshift_gcp_prefix }}master-network-lb-ip' already exists" fi ) & # Router IP -( if ! gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}router-network-lb-ip" --region "{{ gce_region_name }}" &>/dev/null; then - gcloud --project "{{ gce_project_id }}" compute addresses create "{{ provision_prefix }}router-network-lb-ip" --region "{{ gce_region_name }}" +( if ! gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}router-network-lb-ip" --region "{{ openshift_gcp_region }}" &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" compute addresses create "{{ openshift_gcp_prefix }}router-network-lb-ip" --region "{{ openshift_gcp_region }}" else - echo "IP '{{ provision_prefix }}router-network-lb-ip' already exists" + echo "IP '{{ openshift_gcp_prefix }}router-network-lb-ip' already exists" fi ) & -{% for node_group in provision_gce_node_groups %} +{% for node_group in openshift_gcp_node_group_config %} # configure {{ node_group.name }} ( - if ! gcloud --project "{{ gce_project_id }}" compute instance-templates describe "{{ provision_prefix }}instance-template-{{ node_group.name }}" &>/dev/null; then - gcloud --project "{{ gce_project_id }}" compute instance-templates create "{{ provision_prefix }}instance-template-{{ node_group.name }}" \ - --machine-type "{{ node_group.machine_type }}" --network "{{ gce_network_name }}" \ - --tags "{{ provision_prefix }}ocp,ocp,{{ node_group.tags }}" \ + if ! gcloud --project "{{ openshift_gcp_project }}" compute instance-templates describe "{{ openshift_gcp_prefix }}instance-template-{{ node_group.name }}" &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" compute instance-templates create "{{ openshift_gcp_prefix }}instance-template-{{ node_group.name }}" \ + --machine-type "{{ node_group.machine_type }}" --network "{{ openshift_gcp_network_name }}" \ + --tags "{{ openshift_gcp_prefix }}ocp,ocp,{{ node_group.tags }}" \ --boot-disk-size "{{ node_group.boot_disk_size }}" --boot-disk-type "pd-ssd" \ --scopes "logging-write,monitoring-write,useraccounts-ro,service-control,service-management,storage-ro,compute-rw" \ --image "${image}" ${metadata} else - echo "Instance template '{{ provision_prefix }}instance-template-{{ node_group.name }}' already exists" + echo "Instance template '{{ openshift_gcp_prefix }}instance-template-{{ node_group.name }}' already exists" fi # Create instance group - if ! gcloud --project "{{ gce_project_id }}" compute instance-groups managed describe "{{ provision_prefix }}ig-{{ node_group.suffix }}" --zone "{{ gce_zone_name }}" &>/dev/null; then - gcloud --project "{{ gce_project_id }}" compute instance-groups managed create "{{ provision_prefix }}ig-{{ node_group.suffix }}" \ - --zone "{{ gce_zone_name }}" --template "{{ provision_prefix }}instance-template-{{ node_group.name }}" --size "{{ node_group.scale }}" + if ! gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed describe "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" --zone "{{ openshift_gcp_zone }}" &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed create "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" \ + --zone "{{ openshift_gcp_zone }}" --template "{{ openshift_gcp_prefix }}instance-template-{{ node_group.name }}" --size "{{ node_group.scale }}" else - echo "Instance group '{{ provision_prefix }}ig-{{ node_group.suffix }}' already exists" + echo "Instance group '{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}' already exists" fi ) & {% endfor %} @@ -147,36 +149,36 @@ for i in `jobs -p`; do wait $i; done # Configure the master external LB rules ( # Master health check -if ! gcloud --project "{{ gce_project_id }}" compute health-checks describe "{{ provision_prefix }}master-ssl-lb-health-check" &>/dev/null; then - gcloud --project "{{ gce_project_id }}" compute health-checks create https "{{ provision_prefix }}master-ssl-lb-health-check" --port "{{ internal_console_port }}" --request-path "/healthz" +if ! gcloud --project "{{ openshift_gcp_project }}" compute health-checks describe "{{ openshift_gcp_prefix }}master-ssl-lb-health-check" &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" compute health-checks create https "{{ openshift_gcp_prefix }}master-ssl-lb-health-check" --port "{{ internal_console_port }}" --request-path "/healthz" else - echo "Health check '{{ provision_prefix }}master-ssl-lb-health-check' already exists" + echo "Health check '{{ openshift_gcp_prefix }}master-ssl-lb-health-check' already exists" fi -gcloud --project "{{ gce_project_id }}" compute instance-groups managed set-named-ports "{{ provision_prefix }}ig-m" \ - --zone "{{ gce_zone_name }}" --named-ports "{{ provision_prefix }}port-name-master:{{ internal_console_port }}" +gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed set-named-ports "{{ openshift_gcp_prefix }}ig-m" \ + --zone "{{ openshift_gcp_zone }}" --named-ports "{{ openshift_gcp_prefix }}port-name-master:{{ internal_console_port }}" # Master backend service -if ! gcloud --project "{{ gce_project_id }}" compute backend-services describe "{{ provision_prefix }}master-ssl-lb-backend" --global &>/dev/null; then - gcloud --project "{{ gce_project_id }}" compute backend-services create "{{ provision_prefix }}master-ssl-lb-backend" --health-checks "{{ provision_prefix }}master-ssl-lb-health-check" --port-name "{{ provision_prefix }}port-name-master" --protocol "TCP" --global --timeout="{{ provision_gce_master_https_timeout | default('2m') }}" - gcloud --project "{{ gce_project_id }}" compute backend-services add-backend "{{ provision_prefix }}master-ssl-lb-backend" --instance-group "{{ provision_prefix }}ig-m" --global --instance-group-zone "{{ gce_zone_name }}" +if ! gcloud --project "{{ openshift_gcp_project }}" compute backend-services describe "{{ openshift_gcp_prefix }}master-ssl-lb-backend" --global &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" compute backend-services create "{{ openshift_gcp_prefix }}master-ssl-lb-backend" --health-checks "{{ openshift_gcp_prefix }}master-ssl-lb-health-check" --port-name "{{ openshift_gcp_prefix }}port-name-master" --protocol "TCP" --global --timeout="{{ openshift_gcp_master_lb_timeout }}" + gcloud --project "{{ openshift_gcp_project }}" compute backend-services add-backend "{{ openshift_gcp_prefix }}master-ssl-lb-backend" --instance-group "{{ openshift_gcp_prefix }}ig-m" --global --instance-group-zone "{{ openshift_gcp_zone }}" else - echo "Backend service '{{ provision_prefix }}master-ssl-lb-backend' already exists" + echo "Backend service '{{ openshift_gcp_prefix }}master-ssl-lb-backend' already exists" fi # Master tcp proxy target -if ! gcloud --project "{{ gce_project_id }}" compute target-tcp-proxies describe "{{ provision_prefix }}master-ssl-lb-target" &>/dev/null; then - gcloud --project "{{ gce_project_id }}" compute target-tcp-proxies create "{{ provision_prefix }}master-ssl-lb-target" --backend-service "{{ provision_prefix }}master-ssl-lb-backend" +if ! gcloud --project "{{ openshift_gcp_project }}" compute target-tcp-proxies describe "{{ openshift_gcp_prefix }}master-ssl-lb-target" &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" compute target-tcp-proxies create "{{ openshift_gcp_prefix }}master-ssl-lb-target" --backend-service "{{ openshift_gcp_prefix }}master-ssl-lb-backend" else - echo "Proxy target '{{ provision_prefix }}master-ssl-lb-target' already exists" + echo "Proxy target '{{ openshift_gcp_prefix }}master-ssl-lb-target' already exists" fi # Master forwarding rule -if ! gcloud --project "{{ gce_project_id }}" compute forwarding-rules describe "{{ provision_prefix }}master-ssl-lb-rule" --global &>/dev/null; then - IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-ssl-lb-ip" --global --format='value(address)') - gcloud --project "{{ gce_project_id }}" compute forwarding-rules create "{{ provision_prefix }}master-ssl-lb-rule" --address "$IP" --global --ports "{{ console_port }}" --target-tcp-proxy "{{ provision_prefix }}master-ssl-lb-target" +if ! gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules describe "{{ openshift_gcp_prefix }}master-ssl-lb-rule" --global &>/dev/null; then + IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-ssl-lb-ip" --global --format='value(address)') + gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules create "{{ openshift_gcp_prefix }}master-ssl-lb-rule" --address "$IP" --global --ports "{{ console_port }}" --target-tcp-proxy "{{ openshift_gcp_prefix }}master-ssl-lb-target" else - echo "Forwarding rule '{{ provision_prefix }}master-ssl-lb-rule' already exists" + echo "Forwarding rule '{{ openshift_gcp_prefix }}master-ssl-lb-rule' already exists" fi ) & @@ -184,25 +186,25 @@ fi # Configure the master internal LB rules ( # Internal master health check -if ! gcloud --project "{{ gce_project_id }}" compute http-health-checks describe "{{ provision_prefix }}master-network-lb-health-check" &>/dev/null; then - gcloud --project "{{ gce_project_id }}" compute http-health-checks create "{{ provision_prefix }}master-network-lb-health-check" --port "8080" --request-path "/healthz" +if ! gcloud --project "{{ openshift_gcp_project }}" compute http-health-checks describe "{{ openshift_gcp_prefix }}master-network-lb-health-check" &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" compute http-health-checks create "{{ openshift_gcp_prefix }}master-network-lb-health-check" --port "8080" --request-path "/healthz" else - echo "Health check '{{ provision_prefix }}master-network-lb-health-check' already exists" + echo "Health check '{{ openshift_gcp_prefix }}master-network-lb-health-check' already exists" fi # Internal master target pool -if ! gcloud --project "{{ gce_project_id }}" compute target-pools describe "{{ provision_prefix }}master-network-lb-pool" --region "{{ gce_region_name }}" &>/dev/null; then - gcloud --project "{{ gce_project_id }}" compute target-pools create "{{ provision_prefix }}master-network-lb-pool" --http-health-check "{{ provision_prefix }}master-network-lb-health-check" --region "{{ gce_region_name }}" +if ! gcloud --project "{{ openshift_gcp_project }}" compute target-pools describe "{{ openshift_gcp_prefix }}master-network-lb-pool" --region "{{ openshift_gcp_region }}" &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" compute target-pools create "{{ openshift_gcp_prefix }}master-network-lb-pool" --http-health-check "{{ openshift_gcp_prefix }}master-network-lb-health-check" --region "{{ openshift_gcp_region }}" else - echo "Target pool '{{ provision_prefix }}master-network-lb-pool' already exists" + echo "Target pool '{{ openshift_gcp_prefix }}master-network-lb-pool' already exists" fi # Internal master forwarding rule -if ! gcloud --project "{{ gce_project_id }}" compute forwarding-rules describe "{{ provision_prefix }}master-network-lb-rule" --region "{{ gce_region_name }}" &>/dev/null; then - IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-network-lb-ip" --region "{{ gce_region_name }}" --format='value(address)') - gcloud --project "{{ gce_project_id }}" compute forwarding-rules create "{{ provision_prefix }}master-network-lb-rule" --address "$IP" --region "{{ gce_region_name }}" --target-pool "{{ provision_prefix }}master-network-lb-pool" +if ! gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules describe "{{ openshift_gcp_prefix }}master-network-lb-rule" --region "{{ openshift_gcp_region }}" &>/dev/null; then + IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-network-lb-ip" --region "{{ openshift_gcp_region }}" --format='value(address)') + gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules create "{{ openshift_gcp_prefix }}master-network-lb-rule" --address "$IP" --region "{{ openshift_gcp_region }}" --target-pool "{{ openshift_gcp_prefix }}master-network-lb-pool" else - echo "Forwarding rule '{{ provision_prefix }}master-network-lb-rule' already exists" + echo "Forwarding rule '{{ openshift_gcp_prefix }}master-network-lb-rule' already exists" fi ) & @@ -210,25 +212,25 @@ fi # Configure the infra node rules ( # Router health check -if ! gcloud --project "{{ gce_project_id }}" compute http-health-checks describe "{{ provision_prefix }}router-network-lb-health-check" &>/dev/null; then - gcloud --project "{{ gce_project_id }}" compute http-health-checks create "{{ provision_prefix }}router-network-lb-health-check" --port "1936" --request-path "/healthz" +if ! gcloud --project "{{ openshift_gcp_project }}" compute http-health-checks describe "{{ openshift_gcp_prefix }}router-network-lb-health-check" &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" compute http-health-checks create "{{ openshift_gcp_prefix }}router-network-lb-health-check" --port "1936" --request-path "/healthz" else - echo "Health check '{{ provision_prefix }}router-network-lb-health-check' already exists" + echo "Health check '{{ openshift_gcp_prefix }}router-network-lb-health-check' already exists" fi # Router target pool -if ! gcloud --project "{{ gce_project_id }}" compute target-pools describe "{{ provision_prefix }}router-network-lb-pool" --region "{{ gce_region_name }}" &>/dev/null; then - gcloud --project "{{ gce_project_id }}" compute target-pools create "{{ provision_prefix }}router-network-lb-pool" --http-health-check "{{ provision_prefix }}router-network-lb-health-check" --region "{{ gce_region_name }}" +if ! gcloud --project "{{ openshift_gcp_project }}" compute target-pools describe "{{ openshift_gcp_prefix }}router-network-lb-pool" --region "{{ openshift_gcp_region }}" &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" compute target-pools create "{{ openshift_gcp_prefix }}router-network-lb-pool" --http-health-check "{{ openshift_gcp_prefix }}router-network-lb-health-check" --region "{{ openshift_gcp_region }}" else - echo "Target pool '{{ provision_prefix }}router-network-lb-pool' already exists" + echo "Target pool '{{ openshift_gcp_prefix }}router-network-lb-pool' already exists" fi # Router forwarding rule -if ! gcloud --project "{{ gce_project_id }}" compute forwarding-rules describe "{{ provision_prefix }}router-network-lb-rule" --region "{{ gce_region_name }}" &>/dev/null; then - IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}router-network-lb-ip" --region "{{ gce_region_name }}" --format='value(address)') - gcloud --project "{{ gce_project_id }}" compute forwarding-rules create "{{ provision_prefix }}router-network-lb-rule" --address "$IP" --region "{{ gce_region_name }}" --target-pool "{{ provision_prefix }}router-network-lb-pool" +if ! gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules describe "{{ openshift_gcp_prefix }}router-network-lb-rule" --region "{{ openshift_gcp_region }}" &>/dev/null; then + IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}router-network-lb-ip" --region "{{ openshift_gcp_region }}" --format='value(address)') + gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules create "{{ openshift_gcp_prefix }}router-network-lb-rule" --address "$IP" --region "{{ openshift_gcp_region }}" --target-pool "{{ openshift_gcp_prefix }}router-network-lb-pool" else - echo "Forwarding rule '{{ provision_prefix }}router-network-lb-rule' already exists" + echo "Forwarding rule '{{ openshift_gcp_prefix }}router-network-lb-rule' already exists" fi ) & @@ -236,11 +238,11 @@ for i in `jobs -p`; do wait $i; done # set the target pools ( -if [[ "ig-m" == "{{ provision_gce_router_network_instance_group }}" ]]; then - gcloud --project "{{ gce_project_id }}" compute instance-groups managed set-target-pools "{{ provision_prefix }}ig-m" --target-pools "{{ provision_prefix }}master-network-lb-pool,{{ provision_prefix }}router-network-lb-pool" --zone "{{ gce_zone_name }}" +if [[ "ig-m" == "{{ openshift_gcp_infra_network_instance_group }}" ]]; then + gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed set-target-pools "{{ openshift_gcp_prefix }}ig-m" --target-pools "{{ openshift_gcp_prefix }}master-network-lb-pool,{{ openshift_gcp_prefix }}router-network-lb-pool" --zone "{{ openshift_gcp_zone }}" else - gcloud --project "{{ gce_project_id }}" compute instance-groups managed set-target-pools "{{ provision_prefix }}ig-m" --target-pools "{{ provision_prefix }}master-network-lb-pool" --zone "{{ gce_zone_name }}" - gcloud --project "{{ gce_project_id }}" compute instance-groups managed set-target-pools "{{ provision_prefix }}{{ provision_gce_router_network_instance_group }}" --target-pools "{{ provision_prefix }}router-network-lb-pool" --zone "{{ gce_zone_name }}" + gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed set-target-pools "{{ openshift_gcp_prefix }}ig-m" --target-pools "{{ openshift_gcp_prefix }}master-network-lb-pool" --zone "{{ openshift_gcp_zone }}" + gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed set-target-pools "{{ openshift_gcp_prefix }}{{ openshift_gcp_infra_network_instance_group }}" --target-pools "{{ openshift_gcp_prefix }}router-network-lb-pool" --zone "{{ openshift_gcp_zone }}" fi ) & @@ -252,42 +254,42 @@ while true; do rm -f $dns # DNS record for master lb - if ! gcloud --project "{{ gce_project_id }}" dns record-sets list -z "${dns_zone}" --name "{{ openshift_master_cluster_public_hostname }}" 2>/dev/null | grep -q "{{ openshift_master_cluster_public_hostname }}"; then - IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-ssl-lb-ip" --global --format='value(address)') + if ! gcloud --project "{{ openshift_gcp_project }}" dns record-sets list -z "${dns_zone}" --name "{{ openshift_master_cluster_public_hostname }}" 2>/dev/null | grep -q "{{ openshift_master_cluster_public_hostname }}"; then + IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-ssl-lb-ip" --global --format='value(address)') if [[ ! -f $dns ]]; then - gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}" + gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}" fi - gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "{{ openshift_master_cluster_public_hostname }}." --type A "$IP" + gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "{{ openshift_master_cluster_public_hostname }}." --type A "$IP" else echo "DNS record for '{{ openshift_master_cluster_public_hostname }}' already exists" fi # DNS record for internal master lb - if ! gcloud --project "{{ gce_project_id }}" dns record-sets list -z "${dns_zone}" --name "{{ openshift_master_cluster_hostname }}" 2>/dev/null | grep -q "{{ openshift_master_cluster_hostname }}"; then - IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-network-lb-ip" --region "{{ gce_region_name }}" --format='value(address)') + if ! gcloud --project "{{ openshift_gcp_project }}" dns record-sets list -z "${dns_zone}" --name "{{ openshift_master_cluster_hostname }}" 2>/dev/null | grep -q "{{ openshift_master_cluster_hostname }}"; then + IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-network-lb-ip" --region "{{ openshift_gcp_region }}" --format='value(address)') if [[ ! -f $dns ]]; then - gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}" + gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}" fi - gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "{{ openshift_master_cluster_hostname }}." --type A "$IP" + gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "{{ openshift_master_cluster_hostname }}." --type A "$IP" else echo "DNS record for '{{ openshift_master_cluster_hostname }}' already exists" fi # DNS record for router lb - if ! gcloud --project "{{ gce_project_id }}" dns record-sets list -z "${dns_zone}" --name "{{ wildcard_zone }}" 2>/dev/null | grep -q "{{ wildcard_zone }}"; then - IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}router-network-lb-ip" --region "{{ gce_region_name }}" --format='value(address)') + if ! gcloud --project "{{ openshift_gcp_project }}" dns record-sets list -z "${dns_zone}" --name "{{ wildcard_zone }}" 2>/dev/null | grep -q "{{ wildcard_zone }}"; then + IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}router-network-lb-ip" --region "{{ openshift_gcp_region }}" --format='value(address)') if [[ ! -f $dns ]]; then - gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}" + gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}" fi - gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "{{ wildcard_zone }}." --type A "$IP" - gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "*.{{ wildcard_zone }}." --type CNAME "{{ wildcard_zone }}." + gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "{{ wildcard_zone }}." --type A "$IP" + gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "*.{{ wildcard_zone }}." --type CNAME "{{ wildcard_zone }}." else echo "DNS record for '{{ wildcard_zone }}' already exists" fi # Commit all DNS changes, retrying if preconditions are not met if [[ -f $dns ]]; then - if ! out="$( gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns execute -z "${dns_zone}" 2>&1 )"; then + if ! out="$( gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns execute -z "${dns_zone}" 2>&1 )"; then rc=$? if [[ "${out}" == *"HTTPError 412: Precondition not met"* ]]; then continue @@ -301,17 +303,17 @@ done # Create bucket for registry ( -if ! gsutil ls -p "{{ gce_project_id }}" "gs://{{ openshift_hosted_registry_storage_gcs_bucket }}" &>/dev/null; then - gsutil mb -p "{{ gce_project_id }}" -l "{{ gce_region_name }}" "gs://{{ openshift_hosted_registry_storage_gcs_bucket }}" +if ! gsutil ls -p "{{ openshift_gcp_project }}" "gs://{{ openshift_gcp_registry_bucket_name }}" &>/dev/null; then + gsutil mb -p "{{ openshift_gcp_project }}" -l "{{ openshift_gcp_region }}" "gs://{{ openshift_gcp_registry_bucket_name }}" else - echo "Bucket '{{ openshift_hosted_registry_storage_gcs_bucket }}' already exists" + echo "Bucket '{{ openshift_gcp_registry_bucket_name }}' already exists" fi ) & # wait until all node groups are stable -{% for node_group in provision_gce_node_groups %} +{% for node_group in openshift_gcp_node_group_config %} # wait for stable {{ node_group.name }} -( gcloud --project "{{ gce_project_id }}" compute instance-groups managed wait-until-stable "{{ provision_prefix }}ig-{{ node_group.suffix }}" --zone "{{ gce_zone_name }}" --timeout=300) & +( gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed wait-until-stable "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" --zone "{{ openshift_gcp_zone }}" --timeout=300) & {% endfor %} diff --git a/roles/openshift_gcp/templates/remove.j2.sh b/roles/openshift_gcp/templates/remove.j2.sh index 41ceab2b5..a1e0affec 100644 --- a/roles/openshift_gcp/templates/remove.j2.sh +++ b/roles/openshift_gcp/templates/remove.j2.sh @@ -18,8 +18,8 @@ function teardown_cmd() { if [[ -z "${found}" ]]; then flag=$((flag+1)) fi - if gcloud --project "{{ gce_project_id }}" ${a[@]::$flag} describe "${name}" ${a[@]:$flag} &>/dev/null; then - gcloud --project "{{ gce_project_id }}" ${a[@]::$flag} delete -q "${name}" ${a[@]:$flag} + if gcloud --project "{{ openshift_gcp_project }}" ${a[@]::$flag} describe "${name}" ${a[@]:$flag} &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" ${a[@]::$flag} delete -q "${name}" ${a[@]:$flag} fi } @@ -33,11 +33,11 @@ function teardown() { } # Preemptively spin down the instances -{% for node_group in provision_gce_node_groups %} +{% for node_group in openshift_gcp_node_group_config %} # scale down {{ node_group.name }} ( # performs a delete and scale down as one operation to ensure maximum parallelism - if ! instances=$( gcloud --project "{{ gce_project_id }}" compute instance-groups managed list-instances "{{ provision_prefix }}ig-{{ node_group.suffix }}" --zone "{{ gce_zone_name }}" --format='value[terminator=","](instance)' ); then + if ! instances=$( gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed list-instances "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" --zone "{{ openshift_gcp_zone }}" --format='value[terminator=","](instance)' ); then exit 0 fi instances="${instances%?}" @@ -45,7 +45,7 @@ function teardown() { echo "warning: No instances in {{ node_group.name }}" 1>&2 exit 0 fi - if ! gcloud --project "{{ gce_project_id }}" compute instance-groups managed delete-instances "{{ provision_prefix }}ig-{{ node_group.suffix }}" --zone "{{ gce_zone_name }}" --instances "${instances}"; then + if ! gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed delete-instances "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" --zone "{{ openshift_gcp_zone }}" --instances "${instances}"; then echo "warning: Unable to scale down the node group {{ node_group.name }}" 1>&2 exit 0 fi @@ -54,15 +54,15 @@ function teardown() { # Bucket for registry ( -if gsutil ls -p "{{ gce_project_id }}" "gs://{{ openshift_hosted_registry_storage_gcs_bucket }}" &>/dev/null; then - gsutil -m rm -r "gs://{{ openshift_hosted_registry_storage_gcs_bucket }}" +if gsutil ls -p "{{ openshift_gcp_project }}" "gs://{{ openshift_gcp_registry_bucket_name }}" &>/dev/null; then + gsutil -m rm -r "gs://{{ openshift_gcp_registry_bucket_name }}" fi ) & # DNS ( -dns_zone="{{ dns_managed_zone | default(provision_prefix + 'managed-zone') }}" -if gcloud --project "{{ gce_project_id }}" dns managed-zones describe "${dns_zone}" &>/dev/null; then +dns_zone="{{ dns_managed_zone | default(openshift_gcp_prefix + 'managed-zone') }}" +if gcloud --project "{{ openshift_gcp_project }}" dns managed-zones describe "${dns_zone}" &>/dev/null; then # Retry DNS changes until they succeed since this may be a shared resource while true; do dns="${TMPDIR:-/tmp}/dns.yaml" @@ -70,16 +70,16 @@ if gcloud --project "{{ gce_project_id }}" dns managed-zones describe "${dns_zon # export all dns records that match into a zone format, and turn each line into a set of args for # record-sets transaction. - gcloud dns record-sets export --project "{{ gce_project_id }}" -z "${dns_zone}" --zone-file-format "${dns}" + gcloud dns record-sets export --project "{{ openshift_gcp_project }}" -z "${dns_zone}" --zone-file-format "${dns}" if grep -F -e '{{ openshift_master_cluster_hostname }}' -e '{{ openshift_master_cluster_public_hostname }}' -e '{{ wildcard_zone }}' "${dns}" | \ awk '{ print "--name", $1, "--ttl", $2, "--type", $4, $5; }' > "${dns}.input" then rm -f "${dns}" - gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}" - cat "${dns}.input" | xargs -L1 gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file="${dns}" remove -z "${dns_zone}" + gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}" + cat "${dns}.input" | xargs -L1 gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file="${dns}" remove -z "${dns_zone}" # Commit all DNS changes, retrying if preconditions are not met - if ! out="$( gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns execute -z "${dns_zone}" 2>&1 )"; then + if ! out="$( gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns execute -z "${dns_zone}" 2>&1 )"; then rc=$? if [[ "${out}" == *"HTTPError 412: Precondition not met"* ]]; then continue @@ -95,25 +95,25 @@ fi ( # Router network rules -teardown "{{ provision_prefix }}router-network-lb-rule" compute forwarding-rules --region "{{ gce_region_name }}" -teardown "{{ provision_prefix }}router-network-lb-pool" compute target-pools --region "{{ gce_region_name }}" -teardown "{{ provision_prefix }}router-network-lb-health-check" compute http-health-checks -teardown "{{ provision_prefix }}router-network-lb-ip" compute addresses --region "{{ gce_region_name }}" +teardown "{{ openshift_gcp_prefix }}router-network-lb-rule" compute forwarding-rules --region "{{ openshift_gcp_region }}" +teardown "{{ openshift_gcp_prefix }}router-network-lb-pool" compute target-pools --region "{{ openshift_gcp_region }}" +teardown "{{ openshift_gcp_prefix }}router-network-lb-health-check" compute http-health-checks +teardown "{{ openshift_gcp_prefix }}router-network-lb-ip" compute addresses --region "{{ openshift_gcp_region }}" # Internal master network rules -teardown "{{ provision_prefix }}master-network-lb-rule" compute forwarding-rules --region "{{ gce_region_name }}" -teardown "{{ provision_prefix }}master-network-lb-pool" compute target-pools --region "{{ gce_region_name }}" -teardown "{{ provision_prefix }}master-network-lb-health-check" compute http-health-checks -teardown "{{ provision_prefix }}master-network-lb-ip" compute addresses --region "{{ gce_region_name }}" +teardown "{{ openshift_gcp_prefix }}master-network-lb-rule" compute forwarding-rules --region "{{ openshift_gcp_region }}" +teardown "{{ openshift_gcp_prefix }}master-network-lb-pool" compute target-pools --region "{{ openshift_gcp_region }}" +teardown "{{ openshift_gcp_prefix }}master-network-lb-health-check" compute http-health-checks +teardown "{{ openshift_gcp_prefix }}master-network-lb-ip" compute addresses --region "{{ openshift_gcp_region }}" ) & ( # Master SSL network rules -teardown "{{ provision_prefix }}master-ssl-lb-rule" compute forwarding-rules --global -teardown "{{ provision_prefix }}master-ssl-lb-target" compute target-tcp-proxies -teardown "{{ provision_prefix }}master-ssl-lb-ip" compute addresses --global -teardown "{{ provision_prefix }}master-ssl-lb-backend" compute backend-services --global -teardown "{{ provision_prefix }}master-ssl-lb-health-check" compute health-checks +teardown "{{ openshift_gcp_prefix }}master-ssl-lb-rule" compute forwarding-rules --global +teardown "{{ openshift_gcp_prefix }}master-ssl-lb-target" compute target-tcp-proxies +teardown "{{ openshift_gcp_prefix }}master-ssl-lb-ip" compute addresses --global +teardown "{{ openshift_gcp_prefix }}master-ssl-lb-backend" compute backend-services --global +teardown "{{ openshift_gcp_prefix }}master-ssl-lb-health-check" compute health-checks ) & #Firewall rules @@ -130,10 +130,10 @@ declare -A FW_RULES=( ['infra-node-external']="" ) for rule in "${!FW_RULES[@]}"; do - ( if gcloud --project "{{ gce_project_id }}" compute firewall-rules describe "{{ provision_prefix }}$rule" &>/dev/null; then + ( if gcloud --project "{{ openshift_gcp_project }}" compute firewall-rules describe "{{ openshift_gcp_prefix }}$rule" &>/dev/null; then # retry a few times because this call can be flaky for i in `seq 1 3`; do - if gcloud -q --project "{{ gce_project_id }}" compute firewall-rules delete "{{ provision_prefix }}$rule"; then + if gcloud -q --project "{{ openshift_gcp_project }}" compute firewall-rules delete "{{ openshift_gcp_prefix }}$rule"; then break fi done @@ -142,15 +142,15 @@ done for i in `jobs -p`; do wait $i; done -{% for node_group in provision_gce_node_groups %} +{% for node_group in openshift_gcp_node_group_config %} # teardown {{ node_group.name }} - any load balancers referencing these groups must be removed ( - teardown "{{ provision_prefix }}ig-{{ node_group.suffix }}" compute instance-groups managed --zone "{{ gce_zone_name }}" - teardown "{{ provision_prefix }}instance-template-{{ node_group.name }}" compute instance-templates + teardown "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" compute instance-groups managed --zone "{{ openshift_gcp_zone }}" + teardown "{{ openshift_gcp_prefix }}instance-template-{{ node_group.name }}" compute instance-templates ) & {% endfor %} for i in `jobs -p`; do wait $i; done # Network -teardown "{{ gce_network_name }}" compute networks +teardown "{{ openshift_gcp_network_name }}" compute networks diff --git a/roles/openshift_health_checker/openshift_checks/disk_availability.py b/roles/openshift_health_checker/openshift_checks/disk_availability.py index cdf56e959..7956559c6 100644 --- a/roles/openshift_health_checker/openshift_checks/disk_availability.py +++ b/roles/openshift_health_checker/openshift_checks/disk_availability.py @@ -15,31 +15,31 @@ class DiskAvailability(OpenShiftCheck): # https://docs.openshift.org/latest/install_config/install/prerequisites.html#system-requirements recommended_disk_space_bytes = { '/var': { - 'masters': 40 * 10**9, - 'nodes': 15 * 10**9, - 'etcd': 20 * 10**9, + 'oo_masters_to_config': 40 * 10**9, + 'oo_nodes_to_config': 15 * 10**9, + 'oo_etcd_to_config': 20 * 10**9, }, # Used to copy client binaries into, # see roles/openshift_cli/library/openshift_container_binary_sync.py. '/usr/local/bin': { - 'masters': 1 * 10**9, - 'nodes': 1 * 10**9, - 'etcd': 1 * 10**9, + 'oo_masters_to_config': 1 * 10**9, + 'oo_nodes_to_config': 1 * 10**9, + 'oo_etcd_to_config': 1 * 10**9, }, # Used as temporary storage in several cases. tempfile.gettempdir(): { - 'masters': 1 * 10**9, - 'nodes': 1 * 10**9, - 'etcd': 1 * 10**9, + 'oo_masters_to_config': 1 * 10**9, + 'oo_nodes_to_config': 1 * 10**9, + 'oo_etcd_to_config': 1 * 10**9, }, } # recommended disk space for each location under an upgrade context recommended_disk_upgrade_bytes = { '/var': { - 'masters': 10 * 10**9, - 'nodes': 5 * 10 ** 9, - 'etcd': 5 * 10 ** 9, + 'oo_masters_to_config': 10 * 10**9, + 'oo_nodes_to_config': 5 * 10 ** 9, + 'oo_etcd_to_config': 5 * 10 ** 9, }, } @@ -61,9 +61,9 @@ class DiskAvailability(OpenShiftCheck): number = float(user_config) user_config = { '/var': { - 'masters': number, - 'nodes': number, - 'etcd': number, + 'oo_masters_to_config': number, + 'oo_nodes_to_config': number, + 'oo_etcd_to_config': number, }, } except TypeError: diff --git a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py index 93a5973d4..7c8ac78fe 100644 --- a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py +++ b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py @@ -1,5 +1,7 @@ """Check that required Docker images are available.""" +from pipes import quote +from ansible.module_utils import six from openshift_checks import OpenShiftCheck from openshift_checks.mixins import DockerHostMixin @@ -32,10 +34,39 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck): # we use python-docker-py to check local docker for images, and skopeo # to look for images available remotely without waiting to pull them. dependencies = ["python-docker-py", "skopeo"] - skopeo_img_check_command = "timeout 10 skopeo inspect --tls-verify=false docker://{registry}/{image}" + # command for checking if remote registries have an image, without docker pull + skopeo_command = "timeout 10 skopeo inspect --tls-verify={tls} {creds} docker://{registry}/{image}" + skopeo_example_command = "skopeo inspect [--tls-verify=false] [--creds=<user>:<pass>] docker://<registry>/<image>" def __init__(self, *args, **kwargs): super(DockerImageAvailability, self).__init__(*args, **kwargs) + + self.registries = dict( + # set of registries that need to be checked insecurely (note: not accounting for CIDR entries) + insecure=set(self.ensure_list("openshift_docker_insecure_registries")), + # set of registries that should never be queried even if given in the image + blocked=set(self.ensure_list("openshift_docker_blocked_registries")), + ) + + # ordered list of registries (according to inventory vars) that docker will try for unscoped images + regs = self.ensure_list("openshift_docker_additional_registries") + # currently one of these registries is added whether the user wants it or not. + deployment_type = self.get_var("openshift_deployment_type") + if deployment_type == "origin" and "docker.io" not in regs: + regs.append("docker.io") + elif deployment_type == 'openshift-enterprise' and "registry.access.redhat.com" not in regs: + regs.append("registry.access.redhat.com") + self.registries["configured"] = regs + + # for the oreg_url registry there may be credentials specified + components = self.get_var("oreg_url", default="").split('/') + self.registries["oreg"] = "" if len(components) < 3 else components[0] + self.skopeo_command_creds = "" + oreg_auth_user = self.get_var('oreg_auth_user', default='') + oreg_auth_password = self.get_var('oreg_auth_password', default='') + if oreg_auth_user != '' and oreg_auth_password != '': + self.skopeo_command_creds = "--creds={}:{}".format(quote(oreg_auth_user), quote(oreg_auth_password)) + # record whether we could reach a registry or not (and remember results) self.reachable_registries = {} @@ -61,26 +92,25 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck): if not missing_images: return {} - registries = self.known_docker_registries() - if not registries: - return {"failed": True, "msg": "Unable to retrieve any docker registries."} - - available_images = self.available_images(missing_images, registries) + available_images = self.available_images(missing_images) unavailable_images = set(missing_images) - set(available_images) if unavailable_images: - registries = [ - reg if self.reachable_registries.get(reg, True) else reg + " (unreachable)" - for reg in registries - ] + unreachable = [reg for reg, reachable in self.reachable_registries.items() if not reachable] + unreachable_msg = "Failed connecting to: {}\n".format(", ".join(unreachable)) + blocked_msg = "Blocked registries: {}\n".format(", ".join(self.registries["blocked"])) msg = ( - "One or more required Docker images are not available:\n {}\n" - "Configured registries: {}\n" - "Checked by: {}" + "One or more required container images are not available:\n {missing}\n" + "Checked with: {cmd}\n" + "Default registries searched: {registries}\n" + "{blocked}" + "{unreachable}" ).format( - ",\n ".join(sorted(unavailable_images)), - ", ".join(registries), - self.skopeo_img_check_command + missing=",\n ".join(sorted(unavailable_images)), + cmd=self.skopeo_example_command, + registries=", ".join(self.registries["configured"]), + blocked=blocked_msg if self.registries["blocked"] else "", + unreachable=unreachable_msg if unreachable else "", ) return dict(failed=True, msg=msg) @@ -113,7 +143,7 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck): # template for images that run on top of OpenShift image_url = "{}/{}-{}:{}".format(image_info["namespace"], image_info["name"], "${component}", "${version}") image_url = self.get_var("oreg_url", default="") or image_url - if 'nodes' in host_groups: + if 'oo_nodes_to_config' in host_groups: for suffix in NODE_IMAGE_SUFFIXES: required.add(image_url.replace("${component}", suffix).replace("${version}", image_tag)) # The registry-console is for some reason not prefixed with ose- like the other components. @@ -124,24 +154,23 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck): # images for containerized components if self.get_var("openshift", "common", "is_containerized"): components = set() - if 'nodes' in host_groups: + if 'oo_nodes_to_config' in host_groups: components.update(["node", "openvswitch"]) - if 'masters' in host_groups: # name is "origin" or "ose" + if 'oo_masters_to_config' in host_groups: # name is "origin" or "ose" components.add(image_info["name"]) for component in components: required.add("{}/{}:{}".format(image_info["namespace"], component, image_tag)) - if 'etcd' in host_groups: # special case, note it is the same for origin/enterprise + if 'oo_etcd_to_config' in host_groups: # special case, note it is the same for origin/enterprise required.add("registry.access.redhat.com/rhel7/etcd") # and no image tag return required def local_images(self, images): """Filter a list of images and return those available locally.""" - registries = self.known_docker_registries() found_images = [] for image in images: # docker could have the image name as-is or prefixed with any registry - imglist = [image] + [reg + "/" + image for reg in registries] + imglist = [image] + [reg + "/" + image for reg in self.registries["configured"]] if self.is_image_local(imglist): found_images.append(image) return found_images @@ -151,29 +180,27 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck): result = self.execute_module("docker_image_facts", {"name": image}) return bool(result.get("images")) and not result.get("failed") - def known_docker_registries(self): - """Build a list of docker registries available according to inventory vars.""" - regs = list(self.get_var("openshift_docker_additional_registries", default=[])) - - deployment_type = self.get_var("openshift_deployment_type") - if deployment_type == "origin" and "docker.io" not in regs: - regs.append("docker.io") - elif deployment_type == 'openshift-enterprise' and "registry.access.redhat.com" not in regs: - regs.append("registry.access.redhat.com") - - return regs - - def available_images(self, images, default_registries): + def ensure_list(self, registry_param): + """Return the task var as a list.""" + # https://bugzilla.redhat.com/show_bug.cgi?id=1497274 + # If the result was a string type, place it into a list. We must do this + # as using list() on a string will split the string into its characters. + # Otherwise cast to a list as was done previously. + registry = self.get_var(registry_param, default=[]) + if not isinstance(registry, six.string_types): + return list(registry) + return self.normalize(registry) + + def available_images(self, images): """Search remotely for images. Returns: list of images found.""" return [ image for image in images - if self.is_available_skopeo_image(image, default_registries) + if self.is_available_skopeo_image(image) ] - def is_available_skopeo_image(self, image, default_registries): + def is_available_skopeo_image(self, image): """Use Skopeo to determine if required image exists in known registry(s).""" - registries = default_registries - + registries = self.registries["configured"] # If image already includes a registry, only use that. # NOTE: This logic would incorrectly identify images that do not use a namespace, e.g. # registry.access.redhat.com/rhel7 as if the registry were a namespace. @@ -184,13 +211,18 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck): registries = [registry] for registry in registries: + if registry in self.registries["blocked"]: + continue # blocked will never be consulted if registry not in self.reachable_registries: self.reachable_registries[registry] = self.connect_to_registry(registry) if not self.reachable_registries[registry]: - continue + continue # do not keep trying unreachable registries + + args = dict(registry=registry, image=image) + args["tls"] = "false" if registry in self.registries["insecure"] else "true" + args["creds"] = self.skopeo_command_creds if registry == self.registries["oreg"] else "" - args = {"_raw_params": self.skopeo_img_check_command.format(registry=registry, image=image)} - result = self.execute_module_with_retries("command", args) + result = self.execute_module_with_retries("command", {"_raw_params": self.skopeo_command.format(**args)}) if result.get("rc", 0) == 0 and not result.get("failed"): return True if result.get("rc") == 124: # RC 124 == timed out; mark unreachable diff --git a/roles/openshift_health_checker/openshift_checks/etcd_traffic.py b/roles/openshift_health_checker/openshift_checks/etcd_traffic.py index b4c8957e9..8b20ccb49 100644 --- a/roles/openshift_health_checker/openshift_checks/etcd_traffic.py +++ b/roles/openshift_health_checker/openshift_checks/etcd_traffic.py @@ -12,7 +12,7 @@ class EtcdTraffic(OpenShiftCheck): def is_active(self): """Skip hosts that do not have etcd in their group names.""" group_names = self.get_var("group_names", default=[]) - valid_group_names = "etcd" in group_names + valid_group_names = "oo_etcd_to_config" in group_names version = self.get_major_minor_version(self.get_var("openshift_image_tag")) valid_version = version in ((3, 4), (3, 5)) diff --git a/roles/openshift_health_checker/openshift_checks/etcd_volume.py b/roles/openshift_health_checker/openshift_checks/etcd_volume.py index 79955cb2f..3d75da6f9 100644 --- a/roles/openshift_health_checker/openshift_checks/etcd_volume.py +++ b/roles/openshift_health_checker/openshift_checks/etcd_volume.py @@ -15,7 +15,11 @@ class EtcdVolume(OpenShiftCheck): etcd_mount_path = "/var/lib/etcd" def is_active(self): - etcd_hosts = self.get_var("groups", "etcd", default=[]) or self.get_var("groups", "masters", default=[]) or [] + etcd_hosts = ( + self.get_var("groups", "oo_etcd_to_config", default=[]) or + self.get_var("groups", "oo_masters_to_config", default=[]) or + [] + ) is_etcd_host = self.get_var("ansible_host") in etcd_hosts return super(EtcdVolume, self).is_active() and is_etcd_host diff --git a/roles/openshift_health_checker/openshift_checks/logging/fluentd_config.py b/roles/openshift_health_checker/openshift_checks/logging/fluentd_config.py index d783e6760..e93cc9028 100644 --- a/roles/openshift_health_checker/openshift_checks/logging/fluentd_config.py +++ b/roles/openshift_health_checker/openshift_checks/logging/fluentd_config.py @@ -46,7 +46,7 @@ class FluentdConfig(LoggingCheck): # if check is running on a master, retrieve all running pods # and check any pod's container for the env var "USE_JOURNAL" group_names = self.get_var("group_names") - if "masters" in group_names: + if "oo_masters_to_config" in group_names: use_journald = self.check_fluentd_env_var() docker_info = self.execute_module("docker_info", {}) diff --git a/roles/openshift_health_checker/openshift_checks/memory_availability.py b/roles/openshift_health_checker/openshift_checks/memory_availability.py index 765ba072d..e7a8ec976 100644 --- a/roles/openshift_health_checker/openshift_checks/memory_availability.py +++ b/roles/openshift_health_checker/openshift_checks/memory_availability.py @@ -14,9 +14,9 @@ class MemoryAvailability(OpenShiftCheck): # Values taken from the official installation documentation: # https://docs.openshift.org/latest/install_config/install/prerequisites.html#system-requirements recommended_memory_bytes = { - "masters": 16 * GIB, - "nodes": 8 * GIB, - "etcd": 8 * GIB, + "oo_masters_to_config": 16 * GIB, + "oo_nodes_to_config": 8 * GIB, + "oo_etcd_to_config": 8 * GIB, } # https://access.redhat.com/solutions/3006511 physical RAM is partly reserved from memtotal memtotal_adjustment = 1 * GIB diff --git a/roles/openshift_health_checker/openshift_checks/mixins.py b/roles/openshift_health_checker/openshift_checks/mixins.py index b90ebf6dd..cfbdea303 100644 --- a/roles/openshift_health_checker/openshift_checks/mixins.py +++ b/roles/openshift_health_checker/openshift_checks/mixins.py @@ -21,9 +21,11 @@ class DockerHostMixin(object): def is_active(self): """Only run on hosts that depend on Docker.""" - is_containerized = self.get_var("openshift", "common", "is_containerized") - is_node = "nodes" in self.get_var("group_names", default=[]) - return super(DockerHostMixin, self).is_active() and (is_containerized or is_node) + group_names = set(self.get_var("group_names", default=[])) + needs_docker = set(["oo_nodes_to_config"]) + if self.get_var("openshift.common.is_containerized"): + needs_docker.update(["oo_masters_to_config", "oo_etcd_to_config"]) + return super(DockerHostMixin, self).is_active() and bool(group_names.intersection(needs_docker)) def ensure_dependencies(self): """ diff --git a/roles/openshift_health_checker/openshift_checks/ovs_version.py b/roles/openshift_health_checker/openshift_checks/ovs_version.py index 363c12def..416805c4d 100644 --- a/roles/openshift_health_checker/openshift_checks/ovs_version.py +++ b/roles/openshift_health_checker/openshift_checks/ovs_version.py @@ -24,7 +24,7 @@ class OvsVersion(NotContainerizedMixin, OpenShiftCheck): def is_active(self): """Skip hosts that do not have package requirements.""" group_names = self.get_var("group_names", default=[]) - master_or_node = 'masters' in group_names or 'nodes' in group_names + master_or_node = 'oo_masters_to_config' in group_names or 'oo_nodes_to_config' in group_names return super(OvsVersion, self).is_active() and master_or_node def run(self): diff --git a/roles/openshift_health_checker/openshift_checks/package_availability.py b/roles/openshift_health_checker/openshift_checks/package_availability.py index 21355c2f0..090e438ff 100644 --- a/roles/openshift_health_checker/openshift_checks/package_availability.py +++ b/roles/openshift_health_checker/openshift_checks/package_availability.py @@ -20,9 +20,9 @@ class PackageAvailability(NotContainerizedMixin, OpenShiftCheck): packages = set() - if "masters" in group_names: + if "oo_masters_to_config" in group_names: packages.update(self.master_packages(rpm_prefix)) - if "nodes" in group_names: + if "oo_nodes_to_config" in group_names: packages.update(self.node_packages(rpm_prefix)) args = {"packages": sorted(set(packages))} diff --git a/roles/openshift_health_checker/openshift_checks/package_version.py b/roles/openshift_health_checker/openshift_checks/package_version.py index d4aec3ed8..2f09b22fc 100644 --- a/roles/openshift_health_checker/openshift_checks/package_version.py +++ b/roles/openshift_health_checker/openshift_checks/package_version.py @@ -36,7 +36,7 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck): def is_active(self): """Skip hosts that do not have package requirements.""" group_names = self.get_var("group_names", default=[]) - master_or_node = 'masters' in group_names or 'nodes' in group_names + master_or_node = 'oo_masters_to_config' in group_names or 'oo_nodes_to_config' in group_names return super(PackageVersion, self).is_active() and master_or_node def run(self): diff --git a/roles/openshift_health_checker/test/action_plugin_test.py b/roles/openshift_health_checker/test/action_plugin_test.py index f14887303..40ad27d5d 100644 --- a/roles/openshift_health_checker/test/action_plugin_test.py +++ b/roles/openshift_health_checker/test/action_plugin_test.py @@ -94,6 +94,7 @@ def skipped(result): {}, ]) def test_action_plugin_missing_openshift_facts(plugin, task_vars, monkeypatch): + monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {}) monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check']) result = plugin.run(tmp=None, task_vars=task_vars) diff --git a/roles/openshift_health_checker/test/disk_availability_test.py b/roles/openshift_health_checker/test/disk_availability_test.py index 9ae679b79..29a325a17 100644 --- a/roles/openshift_health_checker/test/disk_availability_test.py +++ b/roles/openshift_health_checker/test/disk_availability_test.py @@ -4,11 +4,11 @@ from openshift_checks.disk_availability import DiskAvailability, OpenShiftCheckE @pytest.mark.parametrize('group_names,is_active', [ - (['masters'], True), - (['nodes'], True), - (['etcd'], True), - (['masters', 'nodes'], True), - (['masters', 'etcd'], True), + (['oo_masters_to_config'], True), + (['oo_nodes_to_config'], True), + (['oo_etcd_to_config'], True), + (['oo_masters_to_config', 'oo_nodes_to_config'], True), + (['oo_masters_to_config', 'oo_etcd_to_config'], True), ([], False), (['lb'], False), (['nfs'], False), @@ -39,7 +39,7 @@ def test_is_active(group_names, is_active): ]) def test_cannot_determine_available_disk(desc, ansible_mounts, expect_chunks): task_vars = dict( - group_names=['masters'], + group_names=['oo_masters_to_config'], ansible_mounts=ansible_mounts, ) @@ -52,7 +52,7 @@ def test_cannot_determine_available_disk(desc, ansible_mounts, expect_chunks): @pytest.mark.parametrize('group_names,configured_min,ansible_mounts', [ ( - ['masters'], + ['oo_masters_to_config'], 0, [{ 'mount': '/', @@ -60,7 +60,7 @@ def test_cannot_determine_available_disk(desc, ansible_mounts, expect_chunks): }], ), ( - ['nodes'], + ['oo_nodes_to_config'], 0, [{ 'mount': '/', @@ -68,7 +68,7 @@ def test_cannot_determine_available_disk(desc, ansible_mounts, expect_chunks): }], ), ( - ['etcd'], + ['oo_etcd_to_config'], 0, [{ 'mount': '/', @@ -76,7 +76,7 @@ def test_cannot_determine_available_disk(desc, ansible_mounts, expect_chunks): }], ), ( - ['etcd'], + ['oo_etcd_to_config'], 1, # configure lower threshold [{ 'mount': '/', @@ -84,7 +84,7 @@ def test_cannot_determine_available_disk(desc, ansible_mounts, expect_chunks): }], ), ( - ['etcd'], + ['oo_etcd_to_config'], 0, [{ # not enough space on / ... @@ -112,7 +112,7 @@ def test_succeeds_with_recommended_disk_space(group_names, configured_min, ansib @pytest.mark.parametrize('name,group_names,configured_min,ansible_mounts,expect_chunks', [ ( 'test with no space available', - ['masters'], + ['oo_masters_to_config'], 0, [{ 'mount': '/', @@ -122,7 +122,7 @@ def test_succeeds_with_recommended_disk_space(group_names, configured_min, ansib ), ( 'test with a higher configured required value', - ['masters'], + ['oo_masters_to_config'], 100, # set a higher threshold [{ 'mount': '/', @@ -132,7 +132,7 @@ def test_succeeds_with_recommended_disk_space(group_names, configured_min, ansib ), ( 'test with 1GB available, but "0" GB space requirement', - ['nodes'], + ['oo_nodes_to_config'], 0, [{ 'mount': '/', @@ -142,7 +142,7 @@ def test_succeeds_with_recommended_disk_space(group_names, configured_min, ansib ), ( 'test with no space available, but "0" GB space requirement', - ['etcd'], + ['oo_etcd_to_config'], 0, [{ 'mount': '/', @@ -152,7 +152,7 @@ def test_succeeds_with_recommended_disk_space(group_names, configured_min, ansib ), ( 'test with enough space for a node, but not for a master', - ['nodes', 'masters'], + ['oo_nodes_to_config', 'oo_masters_to_config'], 0, [{ 'mount': '/', @@ -162,7 +162,7 @@ def test_succeeds_with_recommended_disk_space(group_names, configured_min, ansib ), ( 'test failure with enough space on "/", but not enough on "/var"', - ['etcd'], + ['oo_etcd_to_config'], 0, [{ # enough space on / ... @@ -194,7 +194,7 @@ def test_fails_with_insufficient_disk_space(name, group_names, configured_min, a @pytest.mark.parametrize('name,group_names,context,ansible_mounts,failed,extra_words', [ ( 'test without enough space for master under "upgrade" context', - ['nodes', 'masters'], + ['oo_nodes_to_config', 'oo_masters_to_config'], "upgrade", [{ 'mount': '/', @@ -206,7 +206,7 @@ def test_fails_with_insufficient_disk_space(name, group_names, configured_min, a ), ( 'test with enough space for master under "upgrade" context', - ['nodes', 'masters'], + ['oo_nodes_to_config', 'oo_masters_to_config'], "upgrade", [{ 'mount': '/', @@ -218,7 +218,7 @@ def test_fails_with_insufficient_disk_space(name, group_names, configured_min, a ), ( 'test with not enough space for master, and non-upgrade context', - ['nodes', 'masters'], + ['oo_nodes_to_config', 'oo_masters_to_config'], "health", [{ 'mount': '/', diff --git a/roles/openshift_health_checker/test/docker_image_availability_test.py b/roles/openshift_health_checker/test/docker_image_availability_test.py index c523ffd5c..dec99e5db 100644 --- a/roles/openshift_health_checker/test/docker_image_availability_test.py +++ b/roles/openshift_health_checker/test/docker_image_availability_test.py @@ -16,19 +16,19 @@ def task_vars(): ), openshift_deployment_type='origin', openshift_image_tag='', - group_names=['nodes', 'masters'], + group_names=['oo_nodes_to_config', 'oo_masters_to_config'], ) @pytest.mark.parametrize('deployment_type, is_containerized, group_names, expect_active', [ - ("origin", True, [], True), - ("openshift-enterprise", True, [], True), ("invalid", True, [], False), ("", True, [], False), ("origin", False, [], False), ("openshift-enterprise", False, [], False), - ("origin", False, ["nodes", "masters"], True), - ("openshift-enterprise", False, ["etcd"], False), + ("origin", False, ["oo_nodes_to_config", "oo_masters_to_config"], True), + ("openshift-enterprise", False, ["oo_etcd_to_config"], False), + ("origin", True, ["nfs"], False), + ("openshift-enterprise", True, ["lb"], False), ]) def test_is_active(task_vars, deployment_type, is_containerized, group_names, expect_active): task_vars['openshift_deployment_type'] = deployment_type @@ -98,40 +98,7 @@ def test_all_images_unavailable(task_vars): actual = check.run() assert actual['failed'] - assert "required Docker images are not available" in actual['msg'] - - -def test_no_known_registries(): - def execute_module(module_name=None, *_): - if module_name == "command": - return { - 'failed': True, - } - - return { - 'changed': False, - } - - def mock_known_docker_registries(): - return [] - - dia = DockerImageAvailability(execute_module, task_vars=dict( - openshift=dict( - common=dict( - service_type='origin', - is_containerized=False, - is_atomic=False, - ) - ), - openshift_docker_additional_registries=["docker.io"], - openshift_deployment_type="openshift-enterprise", - openshift_image_tag='latest', - group_names=['nodes', 'masters'], - )) - dia.known_docker_registries = mock_known_docker_registries - actual = dia.run() - assert actual['failed'] - assert "Unable to retrieve any docker registries." in actual['msg'] + assert "required container images are not available" in actual['msg'] @pytest.mark.parametrize("message,extra_words", [ @@ -172,13 +139,13 @@ def test_skopeo_update_failure(task_vars, message, extra_words): "spam/eggs:v1", ["test.reg"], True, True, False, - {"test.reg": False}, + {"test.reg": False, "docker.io": False}, ), ( "spam/eggs:v1", ["test.reg"], False, True, False, - {"test.reg": True}, + {"test.reg": True, "docker.io": True}, ), ( "eggs.reg/spam/eggs:v1", ["test.reg"], @@ -195,17 +162,19 @@ def test_registry_availability(image, registries, connection_test_failed, skopeo elif module_name == "command": return dict(msg="msg", failed=skopeo_failed) - check = DockerImageAvailability(execute_module, task_vars()) + tv = task_vars() + tv.update({"openshift_docker_additional_registries": registries}) + check = DockerImageAvailability(execute_module, tv) check._module_retry_interval = 0 - available = check.is_available_skopeo_image(image, registries) + available = check.is_available_skopeo_image(image) assert available == expect_success assert expect_registries_reached == check.reachable_registries @pytest.mark.parametrize("deployment_type, is_containerized, groups, oreg_url, expected", [ ( # standard set of stuff required on nodes - "origin", False, ['nodes'], None, + "origin", False, ['oo_nodes_to_config'], "", set([ 'openshift/origin-pod:vtest', 'openshift/origin-deployer:vtest', @@ -215,7 +184,7 @@ def test_registry_availability(image, registries, connection_test_failed, skopeo ]) ), ( # set a different URL for images - "origin", False, ['nodes'], 'foo.io/openshift/origin-${component}:${version}', + "origin", False, ['oo_nodes_to_config'], 'foo.io/openshift/origin-${component}:${version}', set([ 'foo.io/openshift/origin-pod:vtest', 'foo.io/openshift/origin-deployer:vtest', @@ -225,7 +194,7 @@ def test_registry_availability(image, registries, connection_test_failed, skopeo ]) ), ( - "origin", True, ['nodes', 'masters', 'etcd'], None, + "origin", True, ['oo_nodes_to_config', 'oo_masters_to_config', 'oo_etcd_to_config'], "", set([ # images running on top of openshift 'openshift/origin-pod:vtest', @@ -241,7 +210,7 @@ def test_registry_availability(image, registries, connection_test_failed, skopeo ]) ), ( # enterprise images - "openshift-enterprise", True, ['nodes'], 'foo.io/openshift3/ose-${component}:f13ac45', + "openshift-enterprise", True, ['oo_nodes_to_config'], 'foo.io/openshift3/ose-${component}:f13ac45', set([ 'foo.io/openshift3/ose-pod:f13ac45', 'foo.io/openshift3/ose-deployer:f13ac45', @@ -255,7 +224,7 @@ def test_registry_availability(image, registries, connection_test_failed, skopeo ]) ), ( - "openshift-enterprise", True, ['etcd', 'lb'], 'foo.io/openshift3/ose-${component}:f13ac45', + "openshift-enterprise", True, ['oo_etcd_to_config', 'lb'], 'foo.io/openshift3/ose-${component}:f13ac45', set([ 'registry.access.redhat.com/rhel7/etcd', # lb does not yet come in a containerized version @@ -288,7 +257,7 @@ def test_containerized_etcd(): ), ), openshift_deployment_type="origin", - group_names=['etcd'], + group_names=['oo_etcd_to_config'], ) expected = set(['registry.access.redhat.com/rhel7/etcd']) assert expected == DockerImageAvailability(task_vars=task_vars).required_images() diff --git a/roles/openshift_health_checker/test/docker_storage_test.py b/roles/openshift_health_checker/test/docker_storage_test.py index e0dccc062..8fa68c378 100644 --- a/roles/openshift_health_checker/test/docker_storage_test.py +++ b/roles/openshift_health_checker/test/docker_storage_test.py @@ -5,9 +5,9 @@ from openshift_checks.docker_storage import DockerStorage @pytest.mark.parametrize('is_containerized, group_names, is_active', [ - (False, ["masters", "etcd"], False), - (False, ["masters", "nodes"], True), - (True, ["etcd"], True), + (False, ["oo_masters_to_config", "oo_etcd_to_config"], False), + (False, ["oo_masters_to_config", "oo_nodes_to_config"], True), + (True, ["oo_etcd_to_config"], True), ]) def test_is_active(is_containerized, group_names, is_active): task_vars = dict( diff --git a/roles/openshift_health_checker/test/etcd_traffic_test.py b/roles/openshift_health_checker/test/etcd_traffic_test.py index fae3e578d..dd6f4ad81 100644 --- a/roles/openshift_health_checker/test/etcd_traffic_test.py +++ b/roles/openshift_health_checker/test/etcd_traffic_test.py @@ -4,14 +4,14 @@ from openshift_checks.etcd_traffic import EtcdTraffic @pytest.mark.parametrize('group_names,version,is_active', [ - (['masters'], "3.5", False), - (['masters'], "3.6", False), - (['nodes'], "3.4", False), - (['etcd'], "3.4", True), - (['etcd'], "1.5", True), - (['etcd'], "3.1", False), - (['masters', 'nodes'], "3.5", False), - (['masters', 'etcd'], "3.5", True), + (['oo_masters_to_config'], "3.5", False), + (['oo_masters_to_config'], "3.6", False), + (['oo_nodes_to_config'], "3.4", False), + (['oo_etcd_to_config'], "3.4", True), + (['oo_etcd_to_config'], "1.5", True), + (['oo_etcd_to_config'], "3.1", False), + (['oo_masters_to_config', 'oo_nodes_to_config'], "3.5", False), + (['oo_masters_to_config', 'oo_etcd_to_config'], "3.5", True), ([], "3.4", False), ]) def test_is_active(group_names, version, is_active): @@ -23,9 +23,9 @@ def test_is_active(group_names, version, is_active): @pytest.mark.parametrize('group_names,matched,failed,extra_words', [ - (["masters"], True, True, ["Higher than normal", "traffic"]), - (["masters", "etcd"], False, False, []), - (["etcd"], False, False, []), + (["oo_masters_to_config"], True, True, ["Higher than normal", "traffic"]), + (["oo_masters_to_config", "oo_etcd_to_config"], False, False, []), + (["oo_etcd_to_config"], False, False, []), ]) def test_log_matches_high_traffic_msg(group_names, matched, failed, extra_words): def execute_module(module_name, *_): diff --git a/roles/openshift_health_checker/test/fluentd_config_test.py b/roles/openshift_health_checker/test/fluentd_config_test.py index 10db253bc..b5b4858d6 100644 --- a/roles/openshift_health_checker/test/fluentd_config_test.py +++ b/roles/openshift_health_checker/test/fluentd_config_test.py @@ -82,7 +82,7 @@ def test_check_logging_config_non_master(name, use_journald, logging_driver, ext return {} task_vars = dict( - group_names=["nodes", "etcd"], + group_names=["oo_nodes_to_config", "oo_etcd_to_config"], openshift_logging_fluentd_use_journal=use_journald, openshift=dict( common=dict(config_base=""), @@ -128,7 +128,7 @@ def test_check_logging_config_non_master_failed(name, use_journald, logging_driv return {} task_vars = dict( - group_names=["nodes", "etcd"], + group_names=["oo_nodes_to_config", "oo_etcd_to_config"], openshift_logging_fluentd_use_journal=use_journald, openshift=dict( common=dict(config_base=""), @@ -192,7 +192,7 @@ def test_check_logging_config_master(name, pods, logging_driver, extra_words): return {} task_vars = dict( - group_names=["masters"], + group_names=["oo_masters_to_config"], openshift=dict( common=dict(config_base=""), ), @@ -274,7 +274,7 @@ def test_check_logging_config_master_failed(name, pods, logging_driver, words): return {} task_vars = dict( - group_names=["masters"], + group_names=["oo_masters_to_config"], openshift=dict( common=dict(config_base=""), ), @@ -331,7 +331,7 @@ def test_check_logging_config_master_fails_on_unscheduled_deployment(name, pods, return {} task_vars = dict( - group_names=["masters"], + group_names=["oo_masters_to_config"], openshift=dict( common=dict(config_base=""), ), diff --git a/roles/openshift_health_checker/test/memory_availability_test.py b/roles/openshift_health_checker/test/memory_availability_test.py index aee2f0416..5ec83dd79 100644 --- a/roles/openshift_health_checker/test/memory_availability_test.py +++ b/roles/openshift_health_checker/test/memory_availability_test.py @@ -4,11 +4,11 @@ from openshift_checks.memory_availability import MemoryAvailability @pytest.mark.parametrize('group_names,is_active', [ - (['masters'], True), - (['nodes'], True), - (['etcd'], True), - (['masters', 'nodes'], True), - (['masters', 'etcd'], True), + (['oo_masters_to_config'], True), + (['oo_nodes_to_config'], True), + (['oo_etcd_to_config'], True), + (['oo_masters_to_config', 'oo_nodes_to_config'], True), + (['oo_masters_to_config', 'oo_etcd_to_config'], True), ([], False), (['lb'], False), (['nfs'], False), @@ -22,32 +22,32 @@ def test_is_active(group_names, is_active): @pytest.mark.parametrize('group_names,configured_min,ansible_memtotal_mb', [ ( - ['masters'], + ['oo_masters_to_config'], 0, 17200, ), ( - ['nodes'], + ['oo_nodes_to_config'], 0, 8200, ), ( - ['nodes'], + ['oo_nodes_to_config'], 1, # configure lower threshold 2000, # too low for recommended but not for configured ), ( - ['nodes'], + ['oo_nodes_to_config'], 2, # configure threshold where adjustment pushes it over 1900, ), ( - ['etcd'], + ['oo_etcd_to_config'], 0, 8200, ), ( - ['masters', 'nodes'], + ['oo_masters_to_config', 'oo_nodes_to_config'], 0, 17000, ), @@ -66,43 +66,43 @@ def test_succeeds_with_recommended_memory(group_names, configured_min, ansible_m @pytest.mark.parametrize('group_names,configured_min,ansible_memtotal_mb,extra_words', [ ( - ['masters'], + ['oo_masters_to_config'], 0, 0, ['0.0 GiB'], ), ( - ['nodes'], + ['oo_nodes_to_config'], 0, 100, ['0.1 GiB'], ), ( - ['nodes'], + ['oo_nodes_to_config'], 24, # configure higher threshold 20 * 1024, # enough to meet recommended but not configured ['20.0 GiB'], ), ( - ['nodes'], + ['oo_nodes_to_config'], 24, # configure higher threshold 22 * 1024, # not enough for adjustment to push over threshold ['22.0 GiB'], ), ( - ['etcd'], + ['oo_etcd_to_config'], 0, 6 * 1024, ['6.0 GiB'], ), ( - ['etcd', 'masters'], + ['oo_etcd_to_config', 'oo_masters_to_config'], 0, 9 * 1024, # enough memory for etcd, not enough for a master ['9.0 GiB'], ), ( - ['nodes', 'masters'], + ['oo_nodes_to_config', 'oo_masters_to_config'], 0, # enough memory for a node, not enough for a master 11 * 1024, diff --git a/roles/openshift_health_checker/test/ovs_version_test.py b/roles/openshift_health_checker/test/ovs_version_test.py index 602f32989..5a82a43bf 100644 --- a/roles/openshift_health_checker/test/ovs_version_test.py +++ b/roles/openshift_health_checker/test/ovs_version_test.py @@ -67,14 +67,14 @@ def test_ovs_package_version(openshift_release, expected_ovs_version): @pytest.mark.parametrize('group_names,is_containerized,is_active', [ - (['masters'], False, True), + (['oo_masters_to_config'], False, True), # ensure check is skipped on containerized installs - (['masters'], True, False), - (['nodes'], False, True), - (['masters', 'nodes'], False, True), - (['masters', 'etcd'], False, True), + (['oo_masters_to_config'], True, False), + (['oo_nodes_to_config'], False, True), + (['oo_masters_to_config', 'oo_nodes_to_config'], False, True), + (['oo_masters_to_config', 'oo_etcd_to_config'], False, True), ([], False, False), - (['etcd'], False, False), + (['oo_etcd_to_config'], False, False), (['lb'], False, False), (['nfs'], False, False), ]) diff --git a/roles/openshift_health_checker/test/package_availability_test.py b/roles/openshift_health_checker/test/package_availability_test.py index b34e8fbfc..9815acb38 100644 --- a/roles/openshift_health_checker/test/package_availability_test.py +++ b/roles/openshift_health_checker/test/package_availability_test.py @@ -26,7 +26,7 @@ def test_is_active(pkg_mgr, is_containerized, is_active): ( dict( openshift=dict(common=dict(service_type='origin')), - group_names=['masters'], + group_names=['oo_masters_to_config'], ), set(['origin-master']), set(['origin-node']), @@ -34,7 +34,7 @@ def test_is_active(pkg_mgr, is_containerized, is_active): ( dict( openshift=dict(common=dict(service_type='atomic-openshift')), - group_names=['nodes'], + group_names=['oo_nodes_to_config'], ), set(['atomic-openshift-node']), set(['atomic-openshift-master']), @@ -42,7 +42,7 @@ def test_is_active(pkg_mgr, is_containerized, is_active): ( dict( openshift=dict(common=dict(service_type='atomic-openshift')), - group_names=['masters', 'nodes'], + group_names=['oo_masters_to_config', 'oo_nodes_to_config'], ), set(['atomic-openshift-master', 'atomic-openshift-node']), set(), diff --git a/roles/openshift_health_checker/test/package_version_test.py b/roles/openshift_health_checker/test/package_version_test.py index 8564cd4db..3cf4ce033 100644 --- a/roles/openshift_health_checker/test/package_version_test.py +++ b/roles/openshift_health_checker/test/package_version_test.py @@ -97,14 +97,14 @@ def test_docker_package_version(deployment_type, openshift_release, expected_doc @pytest.mark.parametrize('group_names,is_containerized,is_active', [ - (['masters'], False, True), + (['oo_masters_to_config'], False, True), # ensure check is skipped on containerized installs - (['masters'], True, False), - (['nodes'], False, True), - (['masters', 'nodes'], False, True), - (['masters', 'etcd'], False, True), + (['oo_masters_to_config'], True, False), + (['oo_nodes_to_config'], False, True), + (['oo_masters_to_config', 'oo_nodes_to_config'], False, True), + (['oo_masters_to_config', 'oo_etcd_to_config'], False, True), ([], False, False), - (['etcd'], False, False), + (['oo_etcd_to_config'], False, False), (['lb'], False, False), (['nfs'], False, False), ]) diff --git a/roles/openshift_hosted/tasks/router.yml b/roles/openshift_hosted/tasks/router.yml index 2aeecc943..2aceef9e4 100644 --- a/roles/openshift_hosted/tasks/router.yml +++ b/roles/openshift_hosted/tasks/router.yml @@ -52,9 +52,9 @@ certfile: "{{ openshift_master_config_dir ~ '/openshift-router.crt' }}" keyfile: "{{ openshift_master_config_dir ~ '/openshift-router.key' }}" cafile: "{{ openshift_master_config_dir ~ '/ca.crt' }}" - - # End Block - when: ( openshift_hosted_router_create_certificate | bool ) and openshift_hosted_router_certificate == {} + when: + - openshift_hosted_router_create_certificate | bool + - openshift_hosted_router_certificate == {} - name: Create the router service account(s) oc_serviceaccount: diff --git a/roles/openshift_hosted/templates/registry_config.j2 b/roles/openshift_hosted/templates/registry_config.j2 index eae8b328e..222b63b8a 100644 --- a/roles/openshift_hosted/templates/registry_config.j2 +++ b/roles/openshift_hosted/templates/registry_config.j2 @@ -53,7 +53,7 @@ storage: {% if openshift_hosted_registry_storage_swift_domain is defined %} domain: {{ openshift_hosted_registry_storage_swift_domain }} {% endif -%} -{% if openshift_hosted_registry_storage_swift_domainid %} +{% if openshift_hosted_registry_storage_swift_domainid is defined %} domainid: {{ openshift_hosted_registry_storage_swift_domainid }} {% endif -%} {% elif openshift_hosted_registry_storage_provider | default('') == 'gcs' %} @@ -63,7 +63,7 @@ storage: keyfile: /etc/registry/gcs.json {% endif -%} {% if openshift_hosted_registry_storage_gcs_rootdirectory is defined %} - rootdirectory: {{ openshift_hosted_registry_storage_gcs_rootdirectory }} + rootdirectory: {{ openshift_hosted_registry_storage_gcs_rootdirectory | default('/registry') }} {% endif -%} {% endif -%} auth: diff --git a/roles/openshift_hosted_facts/tasks/main.yml b/roles/openshift_hosted_facts/tasks/main.yml index 53d1a8bc7..47dc9171d 100644 --- a/roles/openshift_hosted_facts/tasks/main.yml +++ b/roles/openshift_hosted_facts/tasks/main.yml @@ -1,9 +1,11 @@ --- +# openshift_*_selector variables have been deprecated in favor of +# openshift_hosted_*_selector variables. - set_fact: - openshift_hosted_router_selector: "{{ openshift_hosted_infra_selector }}" + openshift_hosted_router_selector: "{{ openshift_router_selector | default(openshift_hosted_infra_selector) }}" when: openshift_hosted_router_selector is not defined and openshift_hosted_infra_selector is defined - set_fact: - openshift_hosted_registry_selector: "{{ openshift_hosted_infra_selector }}" + openshift_hosted_registry_selector: "{{ openshift_registry_selector | default(openshift_hosted_infra_selector) }}" when: openshift_hosted_registry_selector is not defined and openshift_hosted_infra_selector is defined - name: Set hosted facts diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md index 45477f60d..829c78728 100644 --- a/roles/openshift_logging/README.md +++ b/roles/openshift_logging/README.md @@ -169,7 +169,7 @@ Elasticsearch OPS too, if using an OPS cluster: send the raw logs to mux for processing. We do not currently recommend using this mode, and ansible will warn you about this. - `openshift_logging_mux_hostname`: Default is "mux." + - `openshift_master_default_subdomain`. This is the hostname *external*_ + `openshift_master_default_subdomain`. This is the hostname *external* clients will use to connect to mux, and will be used in the TLS server cert subject. - `openshift_logging_mux_port`: 24284 @@ -201,24 +201,24 @@ Elasticsearch OPS too, if using an OPS cluster: Defaults to '65534'. ### remote syslog forwarding -`openshift_logging_fluentd_remote_syslog`: Set `true` to enable remote syslog forwarding, defaults to `false` -`openshift_logging_fluentd_remote_syslog_host`: Required, hostname or IP of remote syslog server -`openshift_logging_fluentd_remote_syslog_port`: Port of remote syslog server, defaults to `514` -`openshift_logging_fluentd_remote_syslog_severity`: Syslog severity level, defaults to `debug` -`openshift_logging_fluentd_remote_syslog_facility`: Syslog facility, defaults to `local0` -`openshift_logging_fluentd_remote_syslog_remove_tag_prefix`: Remove the prefix from the tag, defaults to `''` (empty) -`openshift_logging_fluentd_remote_syslog_tag_key`: If string specified, use this field from the record to set the key field on the syslog message -`openshift_logging_fluentd_remote_syslog_use_record`: Set `true` to use the severity and facility from the record, defaults to `false` -`openshift_logging_fluentd_remote_syslog_payload_key`: If string is specified, use this field from the record as the payload on the syslog message - -The corresponding openshift_logging_mux_ parameters are below. - -`openshift_logging_mux_remote_syslog`: Set `true` to enable remote syslog forwarding, defaults to `false` -`openshift_logging_mux_remote_syslog_host`: Required, hostname or IP of remote syslog server -`openshift_logging_mux_remote_syslog_port`: Port of remote syslog server, defaults to `514` -`openshift_logging_mux_remote_syslog_severity`: Syslog severity level, defaults to `debug` -`openshift_logging_mux_remote_syslog_facility`: Syslog facility, defaults to `local0` -`openshift_logging_mux_remote_syslog_remove_tag_prefix`: Remove the prefix from the tag, defaults to `''` (empty) -`openshift_logging_mux_remote_syslog_tag_key`: If string specified, use this field from the record to set the key field on the syslog message -`openshift_logging_mux_remote_syslog_use_record`: Set `true` to use the severity and facility from the record, defaults to `false` -`openshift_logging_mux_remote_syslog_payload_key`: If string is specified, use this field from the record as the payload on the syslog message +- `openshift_logging_fluentd_remote_syslog`: Set `true` to enable remote syslog forwarding, defaults to `false` +- `openshift_logging_fluentd_remote_syslog_host`: Required, hostname or IP of remote syslog server +- `openshift_logging_fluentd_remote_syslog_port`: Port of remote syslog server, defaults to `514` +- `openshift_logging_fluentd_remote_syslog_severity`: Syslog severity level, defaults to `debug` +- `openshift_logging_fluentd_remote_syslog_facility`: Syslog facility, defaults to `local0` +- `openshift_logging_fluentd_remote_syslog_remove_tag_prefix`: Remove the prefix from the tag, defaults to `''` (empty) +- `openshift_logging_fluentd_remote_syslog_tag_key`: If string specified, use this field from the record to set the key field on the syslog message +- `openshift_logging_fluentd_remote_syslog_use_record`: Set `true` to use the severity and facility from the record, defaults to `false` +- `openshift_logging_fluentd_remote_syslog_payload_key`: If string is specified, use this field from the record as the payload on the syslog message + +The corresponding openshift\_logging\_mux\_* parameters are below. + +- `openshift_logging_mux_remote_syslog`: Set `true` to enable remote syslog forwarding, defaults to `false` +- `openshift_logging_mux_remote_syslog_host`: Required, hostname or IP of remote syslog server +- `openshift_logging_mux_remote_syslog_port`: Port of remote syslog server, defaults to `514` +- `openshift_logging_mux_remote_syslog_severity`: Syslog severity level, defaults to `debug` +- `openshift_logging_mux_remote_syslog_facility`: Syslog facility, defaults to `local0` +- `openshift_logging_mux_remote_syslog_remove_tag_prefix`: Remove the prefix from the tag, defaults to `''` (empty) +- `openshift_logging_mux_remote_syslog_tag_key`: If string specified, use this field from the record to set the key field on the syslog message +- `openshift_logging_mux_remote_syslog_use_record`: Set `true` to use the severity and facility from the record, defaults to `false` +- `openshift_logging_mux_remote_syslog_payload_key`: If string is specified, use this field from the record as the payload on the syslog message diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml index 5574a1446..6e7e2557f 100644 --- a/roles/openshift_logging/defaults/main.yml +++ b/roles/openshift_logging/defaults/main.yml @@ -8,7 +8,6 @@ openshift_logging_labels: {} openshift_logging_label_key: "" openshift_logging_label_value: "" openshift_logging_install_logging: False -openshift_logging_uninstall_logging: False openshift_logging_purge_logging: False openshift_logging_image_pull_secret: "" @@ -95,7 +94,7 @@ openshift_logging_es_pvc_dynamic: "{{ openshift_logging_elasticsearch_pvc_dynami openshift_logging_es_pvc_size: "{{ openshift_logging_elasticsearch_pvc_size | default('') }}" openshift_logging_es_pvc_prefix: "{{ openshift_logging_elasticsearch_pvc_prefix | default('logging-es') }}" openshift_logging_es_recover_after_time: 5m -openshift_logging_es_storage_group: "{{ openshift_logging_elasticsearch_storage_group | default('65534') }}" +openshift_logging_es_storage_group: "65534" openshift_logging_es_nodeselector: {} # openshift_logging_es_config is a hash to be merged into the defaults for the elasticsearch.yaml openshift_logging_es_config: {} @@ -134,7 +133,7 @@ openshift_logging_es_ops_pvc_dynamic: "{{ openshift_logging_elasticsearch_ops_pv openshift_logging_es_ops_pvc_size: "{{ openshift_logging_elasticsearch_ops_pvc_size | default('') }}" openshift_logging_es_ops_pvc_prefix: "{{ openshift_logging_elasticsearch_ops_pvc_prefix | default('logging-es-ops') }}" openshift_logging_es_ops_recover_after_time: 5m -openshift_logging_es_ops_storage_group: "{{ openshift_logging_elasticsearch_storage_group | default('65534') }}" +openshift_logging_es_ops_storage_group: "65534" openshift_logging_es_ops_nodeselector: {} # for exposing es-ops to external (outside of the cluster) clients diff --git a/roles/openshift_logging/filter_plugins/openshift_logging.py b/roles/openshift_logging/filter_plugins/openshift_logging.py index eac086e81..330e7e59a 100644 --- a/roles/openshift_logging/filter_plugins/openshift_logging.py +++ b/roles/openshift_logging/filter_plugins/openshift_logging.py @@ -45,6 +45,21 @@ def map_from_pairs(source, delim="="): return dict(item.split(delim) for item in source.split(",")) +def serviceaccount_name(qualified_sa): + ''' Returns the simple name from a fully qualified name ''' + return qualified_sa.split(":")[-1] + + +def serviceaccount_namespace(qualified_sa, default=None): + ''' Returns the namespace from a fully qualified name ''' + seg = qualified_sa.split(":") + if len(seg) > 1: + return seg[-2] + if default: + return default + return seg[-1] + + # pylint: disable=too-few-public-methods class FilterModule(object): ''' OpenShift Logging Filters ''' @@ -56,5 +71,7 @@ class FilterModule(object): 'random_word': random_word, 'entry_from_named_pair': entry_from_named_pair, 'map_from_pairs': map_from_pairs, - 'es_storage': es_storage + 'es_storage': es_storage, + 'serviceaccount_name': serviceaccount_name, + 'serviceaccount_namespace': serviceaccount_namespace } diff --git a/roles/openshift_logging/library/openshift_logging_facts.py b/roles/openshift_logging/library/openshift_logging_facts.py index 35accfb78..f10df8da5 100644 --- a/roles/openshift_logging/library/openshift_logging_facts.py +++ b/roles/openshift_logging/library/openshift_logging_facts.py @@ -171,22 +171,25 @@ class OpenshiftLoggingFacts(OCBaseCommand): if comp is not None: spec = dc_item["spec"]["template"]["spec"] facts = dict( + name=name, selector=dc_item["spec"]["selector"], replicas=dc_item["spec"]["replicas"], serviceAccount=spec["serviceAccount"], containers=dict(), volumes=dict() ) + if "nodeSelector" in spec: + facts["nodeSelector"] = spec["nodeSelector"] + if "supplementalGroups" in spec["securityContext"]: + facts["storageGroups"] = spec["securityContext"]["supplementalGroups"] + facts["spec"] = spec if "volumes" in spec: for vol in spec["volumes"]: clone = copy.deepcopy(vol) clone.pop("name", None) facts["volumes"][vol["name"]] = clone for container in spec["containers"]: - facts["containers"][container["name"]] = dict( - image=container["image"], - resources=container["resources"], - ) + facts["containers"][container["name"]] = container self.add_facts_for(comp, "deploymentconfigs", name, facts) def facts_for_services(self, namespace): diff --git a/roles/openshift_logging/tasks/delete_logging.yaml b/roles/openshift_logging/tasks/delete_logging.yaml index 3040d15ca..ffed956a4 100644 --- a/roles/openshift_logging/tasks/delete_logging.yaml +++ b/roles/openshift_logging/tasks/delete_logging.yaml @@ -92,6 +92,7 @@ with_items: - rolebinding-reader - daemonset-admin + - prometheus-metrics-viewer # delete our configmaps - name: delete configmaps diff --git a/roles/openshift_logging/tasks/generate_certs.yaml b/roles/openshift_logging/tasks/generate_certs.yaml index 9c8f0986a..f526fd734 100644 --- a/roles/openshift_logging/tasks/generate_certs.yaml +++ b/roles/openshift_logging/tasks/generate_certs.yaml @@ -139,10 +139,10 @@ # TODO: make idempotent - name: Generate proxy session - set_fact: session_secret={{'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'|random_word(200)}} + set_fact: session_secret={{ 200 | oo_random_word}} check_mode: no # TODO: make idempotent - name: Generate oauth client secret - set_fact: oauth_secret={{'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'|random_word(64)}} + set_fact: oauth_secret={{ 64 | oo_random_word}} check_mode: no diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml index 2695ef030..21fd79c28 100644 --- a/roles/openshift_logging/tasks/install_logging.yaml +++ b/roles/openshift_logging/tasks/install_logging.yaml @@ -69,17 +69,18 @@ vars: generated_certs_dir: "{{openshift.common.config_base}}/logging" openshift_logging_elasticsearch_namespace: "{{ openshift_logging_namespace }}" - openshift_logging_elasticsearch_deployment_name: "{{ item.0 }}" + openshift_logging_elasticsearch_deployment_name: "{{ item.0.name }}" openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_pvc_prefix ~ '-' ~ item.2 if item.1 is none else item.1 }}" openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_cluster_size | int }}" openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}" - openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_pvc_size }}" - openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_pvc_dynamic }}" openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}" + openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_nodeselector if item.0.nodeSelector | default(None) is none else item.0.nodeSelector }}" + openshift_logging_elasticsearch_storage_group: "{{ [openshift_logging_es_storage_group] if item.0.storageGroups | default([]) | length == 0 else item.0.storageGroups }}" + _es_containers: "{{item.0.containers}}" with_together: - - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs }}" + - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.values() }}" - "{{ openshift_logging_facts.elasticsearch.pvcs }}" - "{{ es_indices }}" when: @@ -95,8 +96,6 @@ openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_cluster_size | int }}" openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}" - openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_pvc_size }}" - openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_pvc_dynamic }}" openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}" with_sequence: count={{ openshift_logging_es_cluster_size | int - openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count }} @@ -123,7 +122,7 @@ vars: generated_certs_dir: "{{openshift.common.config_base}}/logging" openshift_logging_elasticsearch_namespace: "{{ openshift_logging_namespace }}" - openshift_logging_elasticsearch_deployment_name: "{{ item.0 }}" + openshift_logging_elasticsearch_deployment_name: "{{ item.0.name }}" openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_ops_pvc_prefix ~ '-' ~ item.2 if item.1 is none else item.1 }}" openshift_logging_elasticsearch_ops_deployment: true openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_ops_cluster_size | int }}" @@ -134,16 +133,18 @@ openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}" openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}" openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}" - openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_ops_nodeselector }}" + openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_ops_nodeselector if item.0.nodeSelector | default(None) is none else item.0.nodeSelector }}" + openshift_logging_elasticsearch_storage_group: "{{ [openshift_logging_es_ops_storage_group] if item.0.storageGroups | default([]) | length == 0 else item.0.storageGroups }}" openshift_logging_es_key: "{{ openshift_logging_es_ops_key }}" openshift_logging_es_cert: "{{ openshift_logging_es_ops_cert }}" openshift_logging_es_ca_ext: "{{ openshift_logging_es_ops_ca_ext }}" openshift_logging_es_hostname: "{{ openshift_logging_es_ops_hostname }}" openshift_logging_es_edge_term_policy: "{{ openshift_logging_es_ops_edge_term_policy | default('') }}" openshift_logging_es_allow_external: "{{ openshift_logging_es_ops_allow_external }}" + _es_containers: "{{item.0.containers}}" with_together: - - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs }}" + - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.values() }}" - "{{ openshift_logging_facts.elasticsearch_ops.pvcs }}" - "{{ es_ops_indices }}" when: diff --git a/roles/openshift_logging/tasks/main.yaml b/roles/openshift_logging/tasks/main.yaml index 0da9771c7..15f6a23e6 100644 --- a/roles/openshift_logging/tasks/main.yaml +++ b/roles/openshift_logging/tasks/main.yaml @@ -36,7 +36,7 @@ - include: delete_logging.yaml when: - - openshift_logging_uninstall_logging | default(false) | bool + - not openshift_logging_install_logging | default(false) | bool - name: Cleaning up local temp dir local_action: file path="{{local_tmp.stdout}}" state=absent diff --git a/roles/openshift_logging/vars/openshift-enterprise.yml b/roles/openshift_logging/vars/openshift-enterprise.yml index 49e8a18af..f60fa8d7d 100644 --- a/roles/openshift_logging/vars/openshift-enterprise.yml +++ b/roles/openshift_logging/vars/openshift-enterprise.yml @@ -1,3 +1,3 @@ --- __openshift_logging_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('registry.access.redhat.com/openshift3/') }}" -__openshift_logging_image_version: "{{ openshift_hosted_logging_deployer_version | default ('v3.6') }}" +__openshift_logging_image_version: "{{ openshift_hosted_logging_deployer_version | default ('v3.7') }}" diff --git a/roles/openshift_logging_elasticsearch/defaults/main.yml b/roles/openshift_logging_elasticsearch/defaults/main.yml index 75bd479be..554aa5bb2 100644 --- a/roles/openshift_logging_elasticsearch/defaults/main.yml +++ b/roles/openshift_logging_elasticsearch/defaults/main.yml @@ -6,7 +6,7 @@ openshift_logging_elasticsearch_image_pull_secret: "{{ openshift_hosted_logging_ openshift_logging_elasticsearch_namespace: logging openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_nodeselector | default('') }}" -openshift_logging_elasticsearch_cpu_limit: 1000m +openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_cpu_limit | default('1000m') }}" openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_memory_limit | default('1Gi') }}" openshift_logging_elasticsearch_recover_after_time: "{{ openshift_logging_es_recover_after_time | default('5m') }}" @@ -33,13 +33,19 @@ openshift_logging_elasticsearch_pvc_size: "" openshift_logging_elasticsearch_pvc_dynamic: false openshift_logging_elasticsearch_pvc_pv_selector: {} openshift_logging_elasticsearch_pvc_access_modes: ['ReadWriteOnce'] -openshift_logging_elasticsearch_storage_group: '65534' +openshift_logging_elasticsearch_storage_group: ['65534'] openshift_logging_es_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_pvc_prefix | default('logging-es') }}" # config the es plugin to write kibana index based on the index mode openshift_logging_elasticsearch_kibana_index_mode: 'unique' +openshift_logging_elasticsearch_proxy_image_prefix: "openshift/oauth-proxy" +openshift_logging_elasticsearch_proxy_image_version: "v1.0.0" +openshift_logging_elasticsearch_proxy_cpu_limit: "100m" +openshift_logging_elasticsearch_proxy_memory_limit: "64Mi" +openshift_logging_elasticsearch_prometheus_sa: "system:serviceaccount:{{openshift_prometheus_namespace | default('prometheus')}}:prometheus" + # this is used to determine if this is an operations deployment or a non-ops deployment # simply used for naming purposes openshift_logging_elasticsearch_ops_deployment: false diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml index 1e800b1d6..df2c17aa0 100644 --- a/roles/openshift_logging_elasticsearch/tasks/main.yaml +++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml @@ -37,6 +37,7 @@ # we want to make sure we have all the necessary components here # service account + - name: Create ES service account oc_serviceaccount: state: present @@ -77,6 +78,38 @@ resource_name: rolebinding-reader user: "system:serviceaccount:{{ openshift_logging_elasticsearch_namespace }}:aggregated-logging-elasticsearch" +- oc_adm_policy_user: + state: present + namespace: "{{ openshift_logging_elasticsearch_namespace }}" + resource_kind: cluster-role + resource_name: system:auth-delegator + user: "system:serviceaccount:{{ openshift_logging_elasticsearch_namespace}}:aggregated-logging-elasticsearch" + +# logging-metrics-reader role +- template: + src: logging-metrics-role.j2 + dest: "{{mktemp.stdout}}/templates/logging-metrics-role.yml" + vars: + namespace: "{{ openshift_logging_elasticsearch_namespace }}" + role_namespace: "{{ openshift_logging_elasticsearch_prometheus_sa | serviceaccount_namespace(openshift_logging_elasticsearch_namespace) }}" + role_user: "{{ openshift_logging_elasticsearch_prometheus_sa | serviceaccount_name }}" + +- name: Create logging-metrics-reader-role + command: > + {{ openshift.common.client_binary }} + --config={{ openshift.common.config_base }}/master/admin.kubeconfig + -n "{{ openshift_logging_elasticsearch_namespace }}" + create -f "{{mktemp.stdout}}/templates/logging-metrics-role.yml" + register: prometheus_out + check_mode: no + ignore_errors: yes + +- fail: + msg: "There was an error creating the logging-metrics-role and binding: {{prometheus_out}}" + when: + - "prometheus_out.stderr | length > 0" + - "'already exists' not in prometheus_out.stderr" + # View role and binding - name: Generate logging-elasticsearch-view-role template: @@ -206,6 +239,32 @@ - port: 9200 targetPort: "restapi" +- name: Set logging-{{ es_component}}-prometheus service + oc_service: + state: present + name: "logging-{{es_component}}-prometheus" + namespace: "{{ openshift_logging_elasticsearch_namespace }}" + labels: + logging-infra: 'support' + ports: + - name: proxy + port: 443 + targetPort: 4443 + selector: + component: "{{ es_component }}-prometheus" + provider: openshift + +- oc_edit: + kind: service + name: "logging-{{es_component}}-prometheus" + namespace: "{{ openshift_logging_elasticsearch_namespace }}" + separator: '#' + content: + metadata#annotations#service.alpha.openshift.io/serving-cert-secret-name: "prometheus-tls" + metadata#annotations#prometheus.io/scrape: "true" + metadata#annotations#prometheus.io/scheme: "https" + metadata#annotations#prometheus.io/path: "_prometheus/metrics" + - name: Check to see if PVC already exists oc_obj: state: list @@ -260,7 +319,7 @@ delete_after: true - set_fact: - es_deploy_name: "logging-{{ es_component }}-{{ openshift_logging_elasticsearch_deployment_type }}-{{ 'abcdefghijklmnopqrstuvwxyz0123456789' | random_word(8) }}" + es_deploy_name: "logging-{{ es_component }}-{{ openshift_logging_elasticsearch_deployment_type }}-{{ 8 | oo_random_word('abcdefghijklmnopqrstuvwxyz0123456789') }}" when: openshift_logging_elasticsearch_deployment_name == "" - set_fact: @@ -281,6 +340,8 @@ es_cpu_limit: "{{ openshift_logging_elasticsearch_cpu_limit }}" es_memory_limit: "{{ openshift_logging_elasticsearch_memory_limit }}" es_node_selector: "{{ openshift_logging_elasticsearch_nodeselector | default({}) }}" + es_storage_groups: "{{ openshift_logging_elasticsearch_storage_group | default([]) }}" + es_container_security_context: "{{ _es_containers.elasticsearch.securityContext if _es_containers is defined and 'elasticsearch' in _es_containers and 'securityContext' in _es_containers.elasticsearch else None }}" deploy_type: "{{ openshift_logging_elasticsearch_deployment_type }}" es_replicas: 1 diff --git a/roles/openshift_logging_elasticsearch/templates/es.j2 b/roles/openshift_logging_elasticsearch/templates/es.j2 index 3c8f390c4..1ed886627 100644 --- a/roles/openshift_logging_elasticsearch/templates/es.j2 +++ b/roles/openshift_logging_elasticsearch/templates/es.j2 @@ -29,7 +29,9 @@ spec: serviceAccountName: aggregated-logging-elasticsearch securityContext: supplementalGroups: - - {{openshift_logging_elasticsearch_storage_group}} +{% for group in es_storage_groups %} + - {{group}} +{% endfor %} {% if es_node_selector is iterable and es_node_selector | length > 0 %} nodeSelector: {% for key, value in es_node_selector.iteritems() %} @@ -37,6 +39,40 @@ spec: {% endfor %} {% endif %} containers: + - name: proxy + image: {{openshift_logging_elasticsearch_proxy_image_prefix}}:{{openshift_logging_elasticsearch_proxy_image_version}} + imagePullPolicy: Always + args: + - --upstream-ca=/etc/elasticsearch/secret/admin-ca + - --https-address=:4443 + - -provider=openshift + - -client-id={{openshift_logging_elasticsearch_prometheus_sa}} + - -client-secret-file=/var/run/secrets/kubernetes.io/serviceaccount/token + - -cookie-secret={{ 16 | oo_random_word | b64encode }} + - -upstream=https://localhost:9200 + - '-openshift-sar={"namespace": "{{ openshift_logging_elasticsearch_namespace}}", "verb": "view", "resource": "prometheus", "group": "metrics.openshift.io"}' + - '-openshift-delegate-urls={"/": {"resource": "prometheus", "verb": "view", "group": "metrics.openshift.io", "namespace": "{{ openshift_logging_elasticsearch_namespace}}"}}' + - --tls-cert=/etc/tls/private/tls.crt + - --tls-key=/etc/tls/private/tls.key + - -pass-access-token + - -pass-user-headers + ports: + - containerPort: 4443 + name: proxy + protocol: TCP + volumeMounts: + - mountPath: /etc/tls/private + name: proxy-tls + readOnly: true + - mountPath: /etc/elasticsearch/secret + name: elasticsearch + readOnly: true + resources: + limits: + cpu: "{{openshift_logging_elasticsearch_proxy_cpu_limit }}" + memory: "{{openshift_logging_elasticsearch_proxy_memory_limit }}" + requests: + memory: "{{openshift_logging_elasticsearch_proxy_memory_limit }}" - name: "elasticsearch" image: {{image}} @@ -49,6 +85,9 @@ spec: {% endif %} requests: memory: "{{es_memory_limit}}" +{% if es_container_security_context %} + securityContext: {{ es_container_security_context | to_yaml }} +{% endif %} ports: - containerPort: 9200 @@ -94,7 +133,7 @@ spec: value: "30" - name: "POD_LABEL" - value: "component={{component}}" + value: "component={{component}}" - name: "IS_MASTER" value: "{% if deploy_type in ['data-master', 'master'] %}true{% else %}false{% endif %}" @@ -102,6 +141,9 @@ spec: - name: "HAS_DATA" value: "{% if deploy_type in ['data-master', 'data-client'] %}true{% else %}false{% endif %}" + - + name: "PROMETHEUS_USER" + value: "{{openshift_logging_elasticsearch_prometheus_sa}}" volumeMounts: - name: elasticsearch @@ -120,6 +162,9 @@ spec: timeoutSeconds: 30 periodSeconds: 5 volumes: + - name: proxy-tls + secret: + secretName: prometheus-tls - name: elasticsearch secret: secretName: logging-elasticsearch diff --git a/roles/openshift_logging_elasticsearch/templates/logging-metrics-role.j2 b/roles/openshift_logging_elasticsearch/templates/logging-metrics-role.j2 new file mode 100644 index 000000000..d9800e5a5 --- /dev/null +++ b/roles/openshift_logging_elasticsearch/templates/logging-metrics-role.j2 @@ -0,0 +1,31 @@ +--- +apiVersion: v1 +kind: List +items: +- apiVersion: rbac.authorization.k8s.io/v1beta1 + kind: Role + metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + name: prometheus-metrics-viewer + namespace: {{ namespace }} + rules: + - apiGroups: + - metrics.openshift.io + resources: + - prometheus + verbs: + - view +- apiVersion: rbac.authorization.k8s.io/v1beta1 + kind: RoleBinding + metadata: + name: prometheus-metrics-viewer + namespace: {{ namespace }} + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: prometheus-metrics-viewer + subjects: + - kind: ServiceAccount + namespace: {{ role_namespace }} + name: {{ role_user }} diff --git a/roles/openshift_manageiq/tasks/main.yaml b/roles/openshift_manageiq/tasks/main.yaml index 7789d2232..088d0b171 100644 --- a/roles/openshift_manageiq/tasks/main.yaml +++ b/roles/openshift_manageiq/tasks/main.yaml @@ -1,8 +1,4 @@ --- -- fail: - msg: "The openshift_manageiq role requires OpenShift Enterprise 3.1 or Origin 1.1." - when: not openshift.common.version_gte_3_1_or_1_1 | bool - - name: Add Management Infrastructure project oc_project: name: management-infra @@ -61,4 +57,3 @@ resource_kind: "{{ item.resource_kind }}" user: "{{ item.user }}" with_items: "{{manage_iq_openshift_3_2_tasks}}" - when: openshift.common.version_gte_3_2_or_1_2 | bool diff --git a/roles/openshift_management/README.md b/roles/openshift_management/README.md new file mode 100644 index 000000000..3a71d9211 --- /dev/null +++ b/roles/openshift_management/README.md @@ -0,0 +1,475 @@ +# CloudForms Availability + +As noted in [Limitations - Product Choice](#product-choice), +[CloudForms](https://www.redhat.com/en/technologies/management/cloudforms) +(CFME) 4.6 is not yet released. Until such time, this role is limited +to installing [ManageIQ](http://manageiq.org) (MIQ), the open source +project that CFME is based on. + +After CFME 4.6 is available to customers this role will enable +(optional) logic which will install CFME or MIQ based on your +deployment type (`openshift_deployment_type`): + +* `openshift-enterprise` → CloudForms +* `origin` → ManageIQ + + +# Table of Contents + + * [Introduction](#introduction) + * [Important Notes](#important-notes) + * [Requirements](#requirements) + * [Role Variables](#role-variables) + * [Getting Started](#getting-started) + * [All Defaults](#all-defaults) + * [External NFS Storage](#external-nfs-storage) + * [Override PV sizes](#override-pv-sizes) + * [Override Memory Requirements](#override-memory-requirements) + * [External PostgreSQL Database](#external-postgresql-database) + * [Limitations](#limitations) + * [Product Choice](#product-choice) + * [Configuration](#configuration) + * [Database](#database) + * [Podified](#podified) + * [External](#external) + * [Storage Classes](#storage-classes) + * [NFS (Default)](#nfs-default) + * [NFS External](#nfs-external) + * [Cloud Provider](#cloud-provider) + * [Preconfigured (Expert Configuration Only)](#preconfigured-expert-configuration-only) + * [Customization](#customization) + * [Uninstall](#uninstall) + * [Additional Information](#additional-information) + +# Introduction + +This role will allow a user to install CFME 4.6 or MIQ on an OCP +3.7 cluster. The role provides customization options for overriding +default deployment parameters. This role allows the user to deploy +different installation flavors: + +* **Fully Podified** - In this way all application services are ran as + pods in the container platform. +* **External Database** - In this way the application utilizes an + externally hosted database server. All other services are ran in the + container platform. + +This role includes the following storage class options: + +* NFS - **Default** - local, on cluster +* NFS External - NFS somewhere else, like a storage appliance +* Cloud Provider - Use automatic storage provisioning from your cloud + provider (*gce* or *aws*) +* Preconfigured - **expert only**, assumes you created everything ahead + of time + +You may skip ahead to the [Getting Started](#getting-started) section +now for examples of how to set up your Ansible inventory for various +deployment configurations. However, you are **strongly urged** to +first read through the [Configuration](#configuration) and +[Customization](#customization) sections as well as the following +[Important Notes](#important-notes). + +## Important Notes + +Not all parameters are present in **both** template versions (podified +db and external db). For example, while the podified database template +has a `POSTGRESQL_MEM_REQ` parameter, no such parameter is present in +the external db template, as there is no need for this information due +to there being no databases that require pods. + +*Be extra careful* if you are overriding template +parameters. Including parameters not defined in a template **will +cause errors**. + +**Container Provider Integration** - If you want add your container +platform (OCP/Origin) as a *Container Provider* in CFME/MIQ then you +must ensure that the infrastructure management hooks are installed. + +* During your OCP/Origin install, ensure that you have the + `openshift_use_manageiq` parameter set to `true` in your inventory + at install time. This will create a `management-infra` project and a + service account user. +* After CFME/MIQ is installed, obtain the `management-admin` service + account token and copy it somewhere safe. + +```bash +$ oc serviceaccounts get-token -n management-infra management-admin +eyJhuGdiOiJSUzI1NiIsInR5dCI6IkpXVCJ9.eyJpd9MiOiJrbWJldm5lbGVzL9NldnZpY2VhY2NvbW50Iiwiy9ViZXJuZXRldy5puy9zZXJ2yWNlYWNju9VubC9uYW1ld9BhY2UiOiJtYW5hZ2VtZW50LWluZnJhIiwiy9ViZXJuZXRldy5puy9zZXJ2yWNlYWNju9VubC9zZWNyZXQuumFtZSI6Im1humFnZW1lunQtYWRtyW4tbG9rZW4tdDBnOTAiLCJrbWJldm5lbGVzLmlvL9NldnZpY2VhY2NvbW50L9NldnZpY2UtYWNju9VubC5uYW1lIjoiuWFuYWbluWVubC1hZG1puiIsImt1YmVyumV0ZXMuyW8vd2VybmljZWFjY291unQvd2VybmljZS1hY2NvbW50LnVpZCI6IjRiZDM2MWQ1LWE1NDAtMTFlNy04YzI5LTUyNTQwMDliMmNkZCIsInN1YiI6InN5d9RluTpzZXJ2yWNlYWNju9VubDptYW5hZ2VtZW50LWluZnJhOm1humFnZW1lunQtYWRtyW4ifQ.B6sZLGD9O4vBu9MHwiG-C_4iEwjBXb7Af8BPw-LNlujDmHhOnQ-Oo4QxQKyj9edynfmDy2yutUyJ2Mm9HfDGWg4C9xhWImHoq6Nl7T5_9djkeGKkK7Ejvg4fA-IkrzEsZeQuluBvXnE6wvP0LCjUo_dx4pPyZJyp46teV9NqKQeDzeysjlMCyqp6AK6-Lj8ILG8YA6d_97HlzL_EgFBLAu0lBSn-uC_9J0gLysqBtK6TI0nExfhv9Bm1_5bdHEbKHPW7xIlYlI9AgmyTyhsQ6SoQWtL2khBjkG9TlPBq9wYJj9bzqgVZlqEfICZxgtXO7sYyuoje4y8lo0YQ0kZmig +``` + +* In the CFME/MIQ web interface, navigate to `Compute` → + `Containers` → `Providers` and select `⚙ Configuration` → `⊕ + Add a new Containers Provider` + +*See the [upstream documentation](http://manageiq.org/docs/reference/latest/doc-Managing_Providers/miq/index.html#containers-providers) for additional information.* + + + +# Requirements + +The **default** requirements are listed in the table below. These can +be overridden through customization parameters (See +[Customization](#customization), below). + +**Note** that the application performance will suffer, or possibly +even fail to deploy, if these requirements are not satisfied. + + +| Item | Requirement | Description | Customization Parameter | +|---------------------|---------------|----------------------------------------------|-------------------------------| +| Application Memory | `≥ 4.0 Gi` | Minimum required memory for the application | `APPLICATION_MEM_REQ` | +| Application Storage | `≥ 5.0 Gi` | Minimum PV size required for the application | `APPLICATION_VOLUME_CAPACITY` | +| PostgreSQL Memory | `≥ 6.0 Gi` | Minimum required memory for the database | `POSTGRESQL_MEM_REQ` | +| PostgreSQL Storage | `≥ 15.0 Gi` | Minimum PV size required for the database | `DATABASE_VOLUME_CAPACITY` | +| Cluster Hosts | `≥ 3` | Number of hosts in your cluster | | + +The implications of this table are summarized below: + +* You need several cluster nodes +* Your cluster nodes must have lots of memory available +* You will need several GiB's of storage available, either locally or + on your cloud provider +* PV sizes can be changed by providing override values to template + parameters (see also: [Customization](#customization)) + +# Role Variables + +The following is a table of the publicly exposed variables that may be +used in your Ansible inventory to control the behavior of this +installer. + + +| Variable | Required | Default | Description | +|------------------------------------------------|:--------:|:------------------------------:|-------------------------------------| +| `openshift_management_project` | **No** | `openshift-management` | Namespace for the installation. | +| `openshift_management_project_description` | **No** | *CloudForms Management Engine* | Namespace/project description. | +| `openshift_management_install_management` | **No** | `false` | Boolean, set to `true` to install the application | +| **PRODUCT CHOICE** | | | | | +| `openshift_management_app_template` | **No** | `miq-template` | The project flavor to install. Choices: <ul><li>`miq-template`: ManageIQ using a podified database</li> <li> `miq-template-ext-db`: ManageIQ using an external database</li> <li>`cfme-template`: CloudForms using a podified database<sup>[1]</sup></li> <li> `cfme-template-ext-db`: CloudForms using an external database.<sup>[1]</sup></li></ul> | +| **STORAGE CLASSES** | | | | | +| `openshift_management_storage_class` | **No** | `nfs` | Storage type to use, choices: <ul><li>`nfs` - Best used for proof-of-concept installs. Will setup NFS on a cluster host (defaults to your first master in the inventory file) to back the required PVCs. The application requires a PVC and the database (which may be hosted externally) may require a second. PVC minimum required sizes are 5GiB for the MIQ application, and 15GiB for the PostgreSQL database (20GiB minimum available space on a volume/partition if used specifically for NFS purposes)</li> <li>`nfs_external` - You are using an external NFS server, such as a netapp appliance. See the [Configuration - Storage Classes](#storage-classes) section below for required information.</li> <li>`preconfigured` - This CFME role will do NOTHING to modify storage settings. This option assumes expert knowledge and that you have done everything required ahead of time.</li> <li>`cloudprovider` - You are using an OCP cloudprovider integration for your storage class. For this to work you must have already configured the required inventory parameters for your cloud provider. Ensure `openshift_cloudprovider_kind` is defined (aws or gce) and that the applicable cloudprovider parameters are provided. | +| `openshift_management_storage_nfs_external_hostname` | **No** | `false` | If you are using an *external NFS server*, such as a netapp appliance, then you must set the hostname here. Leave the value as `false` if you are not using external NFS. <br /> *Additionally*: **External NFS REQUIRES** that you create the NFS exports that will back the application PV and optionally the database PV. +| `openshift_management_storage_nfs_base_dir` | **No** | `/exports/` | If you are using **External NFS** then you may set the base path to the exports location here. <br />**Local NFS Note**: You *may* also change this value if you want to change the default path used for local NFS exports. | +| `openshift_management_storage_nfs_local_hostname` | **No** | `false` | If you do not have an `[nfs]` group in your inventory, or want to simply manually define the local NFS host in your cluster, set this parameter to the hostname of the preferred NFS server. The server must be a part of your OCP/Origin cluster. | +| **CUSTOMIZATION OPTIONS** | | | | | +| `openshift_management_template_parameters` | **No** | `{}` | A dictionary of any parameters you want to override in the application/pv templates. + +* <sup>[1]</sup> The `cfme-template`s will be available and + automatically detected once CFME 4.6 is released + + +# Getting Started + +Below are some inventory snippets that can help you get started right +away. + +If you want to install CFME/MIQ at the same time you install your +OCP/Origin cluster, ensure that `openshift_management_install_management` is set +to `true` in your inventory. Call the standard +`playbooks/byo/config.yml` playbook to begin the cluster and CFME/MIQ +installation. + +If you are installing CFME/MIQ on an *already provisioned cluster* +then you can call the CFME/MIQ playbook directly: + +``` +$ ansible-playbook -v -i <YOUR_INVENTORY> playbooks/byo/openshift-management/config.yml +``` + +*Note: Use `miq-template` in the following examples for ManageIQ installs* + +## All Defaults + +This example is the simplest. All of the default values and choices +are used. This will result in a fully podified CFME installation. All +application components, as well as the PostgreSQL database will be +created as pods in the container platform. + +```ini +[OSEv3:vars] +openshift_management_app_template=cfme-template +``` + +## External NFS Storage + +This is as the previous example, except that instead of using local +NFS services in the cluster it will use an external NFS server (such +as a storage appliance). Note the two new parameters: + +* `openshift_management_storage_class` - set to `nfs_external` +* `openshift_management_storage_nfs_external_hostname` - set to the hostname + of the NFS server + +```ini +[OSEv3:vars] +openshift_management_app_template=cfme-template +openshift_management_storage_class=nfs_external +openshift_management_storage_nfs_external_hostname=nfs.example.com +``` + +If the external NFS host exports directories under a different parent +directory, such as `/exports/hosted/prod` then we would add an +additional parameter, `openshift_management_storage_nfs_base_dir`: + +```ini +# ... +openshift_management_storage_nfs_base_dir=/exports/hosted/prod +``` + +## Override PV sizes + +This example will override the PV sizes. Note that we set the PV sizes +in the template parameters, `openshift_management_template_parameters`. This +ensures that the application/db will be able to make claims on created +PVs without clobbering each other. + +```ini +[OSEv3:vars] +openshift_management_app_template=cfme-template +openshift_management_template_parameters={'APPLICATION_VOLUME_CAPACITY': '10Gi', 'DATABASE_VOLUME_CAPACITY': '25Gi'} +``` + +## Override Memory Requirements + +In a test or proof-of-concept installation you may need to reduce the +application/database memory requirements to fit within your +capacity. Note that reducing memory limits can result in reduced +performance or a complete failure to initialize the application. + +```ini +[OSEv3:vars] +openshift_management_app_template=cfme-template +openshift_management_template_parameters={'APPLICATION_MEM_REQ': '3000Mi', 'POSTGRESQL_MEM_REQ': '1Gi', 'ANSIBLE_MEM_REQ': '512Mi'} +``` + +Here we have instructed the installer to process the application +template with the parameter `APPLICATION_MEM_REQ` set to `3000Mi`, +`POSTGRESQL_MEM_REQ` set to `1Gi`, and `ANSIBLE_MEM_REQ` set to +`512Mi`. + +These parameters can be combined with the PV size override parameters +displayed in the previous example. + +## External PostgreSQL Database + +To use an external database you must change the +`openshift_management_app_template` parameter value to `miq-template-ext-db` +or `cfme-template-ext-db`. + +Additionally, database connection information **must** be supplied in +the `openshift_management_template_parameters` customization parameter. See +[Customization - Database - External](#external) for more +information. + +```ini +[OSEv3:vars] +openshift_management_app_template=cfme-template-ext-db +openshift_management_template_parameters={'DATABASE_USER': 'root', 'DATABASE_PASSWORD': 'r1ck&M0r7y', 'DATABASE_IP': '10.10.10.10', 'DATABASE_PORT': '5432', 'DATABASE_NAME': 'cfme'} +``` + +# Limitations + +This release is the first OpenShift CFME release in the OCP 3.7 +series. It is not complete yet. + +## Product Choice + +Due to staggered release dates, **CFME support is not +integrated**. Presently this role will only deploy a ManageIQ +installation. This role will be updated once CFME 4.6 is released and +this limitation note will be removed. + +# Configuration + +Before you can deploy CFME you must decide *how* you want to deploy +it. There are two major decisions to make: + +1. Do you want an external, or a podified database? +1. Which storage class will back your PVs? + +## Database + +### Podified + +Any `POSTGRES_*` or `DATABASE_*` template parameters in +[miq-template.yaml](files/templates/manageiq/miq-template.yaml) or +[cfme-template.yaml](files/templates/cloudforms/cfme-template.yaml) +may be customized through the `openshift_management_template_parameters` +hash. + +### External + +Any `POSTGRES_*` or `DATABASE_*` template parameters in +[miq-template-ext-db.yaml](files/templates/manageiq/miq-template-ext-db.yaml) +or +[cfme-template-ext-db.yaml](files/templates/cloudforms/cfme-template-ext-db.yaml) +may be customized through the `openshift_management_template_parameters` +hash. + +External PostgreSQL databases require you to provide database +connection parameters. You must set the required connection keys in +the `openshift_management_template_parameters` parameter in your +inventory. The following keys are required: + +* `DATABASE_USER` +* `DATABASE_PASSWORD` +* `DATABASE_IP` +* `DATABASE_PORT` - *note: Most PostgreSQL servers run on port `5432`* +* `DATABASE_NAME` + +Your inventory would contain a line similar to this: + +```ini +[OSEv3:vars] +openshift_management_app_template=cfme-template-ext-db +openshift_management_template_parameters={'DATABASE_USER': 'root', 'DATABASE_PASSWORD': 'r1ck&M0r7y', 'DATABASE_IP': '10.10.10.10', 'DATABASE_PORT': '5432', 'DATABASE_NAME': 'cfme'} +``` + +**Note** the new value for the `openshift_management_app_template` +parameter, `cfme-template-ext-db` (ManageIQ installations would use +`miq-template-ext-db` instead). + +At run time you may run into errors similar to this: + +``` +TASK [openshift_management : Ensure the CFME App is created] *********************************** +task path: /home/tbielawa/rhat/os/openshift-ansible/roles/openshift_management/tasks/main.yml:74 +Tuesday 03 October 2017 15:30:44 -0400 (0:00:00.056) 0:00:12.278 ******* +{"cmd": "/usr/bin/oc create -f /tmp/postgresql-ZPEWQS -n openshift-management", "kind": "Endpoints", "results": {}, "returncode": 1, "stderr": "Error from server (BadRequest): error when creating \"/tmp/postgresql-ZPEWQS\": Endpoints in version \"v1\" cannot be handled as a Endpoints: [pos 218]: json: decNum: got first char 'f'\n", "stdout": ""} +``` + +Or like this: + +``` +TASK [openshift_management : Ensure the CFME App is created] *********************************** +task path: /home/tbielawa/rhat/os/openshift-ansible/roles/openshift_management/tasks/main.yml:74 +Tuesday 03 October 2017 16:05:36 -0400 (0:00:00.052) 0:00:18.948 ******* +fatal: [m01.example.com]: FAILED! => {"changed": true, "failed": true, "msg": +{"cmd": "/usr/bin/oc create -f /tmp/postgresql-igS5sx -n openshift-management", "kind": "Endpoints", "results": {}, "returncode": 1, "stderr": "The Endpoints \"postgresql\" is invalid: subsets[0].addresses[0].ip: Invalid value: \"doo\": must be a valid IP address, (e.g. 10.9.8.7)\n", "stdout": ""}, +``` + +While intimidating at first, there are useful bits of information in +here. Examine the error output closely and we can tell exactly what is +wrong. + +In the first example we see `Endpoints in version \"v1\" cannot be +handled as a Endpoints: [pos 218]: json: decNum: got first char +...`. This is because in my example I used the value `foo` for the +parameter `DATABASE_PORT`. + +In the second example we see `The Endpoints \"postgresql\" is invalid: +subsets[0].addresses[0].ip: Invalid value: \"doo\": must be a valid IP +address ...`. This is because in my example I used the value `doo` in +the `DATABASE_IP` field. + +Luckily for us when the templates are processed behind the scenes they +are also running type checking validation. So, don't worry, just look +closely at the errors and ensure you are providing the correct values +for each parameter. + +## Storage Classes + +OpenShift CFME supports several storage class options. + +### NFS (Default) + +The NFS storage class is best suited for proof-of-concept and +test/demo deployments. It is also the **default** storage class for +deployments. No additional configuration is required for this +choice. + +Customization is provided through the following role variables: + +* `openshift_management_storage_nfs_base_dir` +* `openshift_management_storage_nfs_local_hostname` + +### NFS External + +External NFS leans on pre-configured NFS servers to provide exports +for the required PVs. For external NFS you must have: + +* For CFME: a `cfme-app` and optionally a `cfme-db` (for podified database) exports +* For ManageIQ: an `miq-app` and optionally an `miq-db` (for podified database) exports + +Configuration is provided through the following role variables: + +* `openshift_management_storage_nfs_external_hostname` +* `openshift_management_storage_nfs_base_dir` + +The `openshift_management_storage_nfs_external_hostname` parameter must be +set to the hostname or IP of your external NFS server. + +If `/exports` is not the parent directory to your exports then you +must set the base directory via the +`openshift_management_storage_nfs_base_dir` parameter. + +For example, if your server export is `/exports/hosted/prod/cfme-app` +then you must set +`openshift_management_storage_nfs_base_dir=/exports/hosted/prod`. + +### Cloud Provider + +CFME can also use a cloud provider storage to back required PVs. For +this functionality to work you must have also configured the +`openshift_cloudprovider_kind` variable and all associated parameters +specific to your chosen cloud provider. + +Using this storage class, when the application is created the required +PVs will automatically be provisioned using the configured cloud +provider storage integration. + +There are no additional variables to configure the behavior of this +storage class. + +### Preconfigured (Expert Configuration Only) + +The *preconfigured* storage class implies that you know exactly what +you're doing and that all storage requirements have been taken care +ahead of time. Typically this means that you've already created the +correctly sized PVs. + +There are no additional variables to configure the behavior of this +storage class. + +# Customization + +Application and database parameters may be customized by means of the +`openshift_management_template_parameters` inventory parameter. + +**For example**, if you wanted to reduce the memory requirement of the +PostgreSQL pod then you could configure the parameter like this: + +`openshift_management_template_parameters={'POSTGRESQL_MEM_REQ': '1Gi'}` + +When the CFME template is processed `1Gi` will be used for the value +of the `POSTGRESQL_MEM_REQ` template parameter. + +Any parameter in the `parameters` section of the +[miq-template.yaml](files/templates/manageiq/miq-template.yaml) or +[miq-template-ext-db.yaml](files/templates/manageiq/miq-template-ext-db.yaml) +may be overridden through the `openshift_management_template_parameters` +hash. This applies to **CloudForms** installations as well: +[cfme-template.yaml](files/templates/cloudforms/cfme-template.yaml), +[cfme-template-ext-db.yaml](files/templates/cloudforms/cfme-template-ext-db.yaml). + + +# Uninstall + +This role includes a playbook to uninstall and erase the CFME/MIQ +installation: + +* `playbooks/byo/openshift-management/uninstall.yml` + +# Additional Information + +The upstream project, +[@manageiq/manageiq-pods](https://github.com/ManageIQ/manageiq-pods), +contains a wealth of additional information useful for managing and +operating your CFME installation. Topics include: + +* [Verifying Successful Installation](https://github.com/ManageIQ/manageiq-pods#verifying-the-setup-was-successful) +* [Disabling Image Change Triggers](https://github.com/ManageIQ/manageiq-pods#disable-image-change-triggers) +* [Scaling CFME](https://github.com/ManageIQ/manageiq-pods#scale-miq) +* [Backing up and Restoring the DB](https://github.com/ManageIQ/manageiq-pods#backup-and-restore-of-the-miq-database) +* [Troubleshooting](https://github.com/ManageIQ/manageiq-pods#troubleshooting) diff --git a/roles/openshift_management/defaults/main.yml b/roles/openshift_management/defaults/main.yml new file mode 100644 index 000000000..ebb56313f --- /dev/null +++ b/roles/openshift_management/defaults/main.yml @@ -0,0 +1,90 @@ +--- +# Namespace for the CFME project +openshift_management_project: openshift-management +# Namespace/project description +openshift_management_project_description: CloudForms Management Engine + +###################################################################### +# BASE TEMPLATE AND DATABASE OPTIONS +###################################################################### +# Which flavor of CFME would you like? You may install CFME using a +# podified PostgreSQL server, or you may choose to use an existing +# PostgreSQL server. +# +# Choose 'miq-template' for a podified database install +# Choose 'miq-template-ext-db' for an external database install +openshift_management_app_template: miq-template +# If you are using the miq-template-ext-db template then you must add +# the required database parameters to the +# openshift_management_template_parameters variable. + +###################################################################### +# STORAGE OPTIONS +###################################################################### +# DEFAULT - 'nfs' +# Allowed options: nfs, nfs_external, preconfigured, cloudprovider. +openshift_management_storage_class: nfs +# * nfs - Best used for proof-of-concept installs. Will setup NFS on a +# cluster host (defaults to your first master in the inventory file) +# to back the required PVCs. The application requires a PVC and the +# database (which may be hosted externally) may require a +# second. PVC minimum required sizes are: 5GiB for the MIQ +# application, and 15GiB for the PostgreSQL database (20GiB minimum +# available space on an volume/partition if used specifically for +# NFS purposes) +# +# * nfs_external - You are using an external NFS server, such as a +# netapp appliance. See the STORAGE - NFS OPTIONS section below for +# required information. +# +# * preconfigured - This CFME role will do NOTHING to modify storage +# settings. This option assumes expert knowledge and that you have +# done everything required ahead of time. +# +# * cloudprovider - You are using an OCP cloudprovider integration for +# your storage class. For this to work you must have already +# configured the required inventory parameters for your cloud +# provider +# +# Ensure 'openshift_cloudprovider_kind' is defined (aws or gce) and +# that the applicable cloudprovider parameters are provided. + +#--------------------------------------------------------------------- +# STORAGE - NFS OPTIONS +#--------------------------------------------------------------------- +# [OPTIONAL] - If you are using an EXTERNAL NFS server, such as a +# netapp appliance, then you must set the hostname here. Leave the +# value as 'false' if you are not using external NFS. +openshift_management_storage_nfs_external_hostname: false +# [OPTIONAL] - If you are using external NFS then you must set the base +# path to the exports location here. +# +# Additionally: EXTERNAL NFS REQUIRES that YOU CREATE the nfs exports +# that will back the application PV and optionally the database +# pv. Export path definitions, relative to +# {{ openshift_management_storage_nfs_base_dir }} +# +# LOCAL NFS NOTE: +# +# You may may also change this value if you want to change the default +# path used for local NFS exports. +openshift_management_storage_nfs_base_dir: /exports +# +# LOCAL NFS NOTE: +# +# You may override the automatically selected LOCAL NFS server by +# setting this variable. Useful for testing specific task files. +openshift_management_storage_nfs_local_hostname: false + +###################################################################### +# SCAFFOLDING - These are parameters we pre-seed that a user may or +# may not set later +###################################################################### +# A hash of parameters you want to override or set in the +# miq-template.yaml or miq-template-ext-db.yaml templates. Set this in +# your inventory file as a simple hash. Acceptable values are defined +# under the .parameters list in files/miq-template{-ext-db}.yaml +# Example: +# +# openshift_management_template_parameters={'APPLICATION_MEM_REQ': '512Mi'} +openshift_management_template_parameters: {} diff --git a/roles/openshift_management/files/templates/cloudforms/cfme-backup-job.yaml b/roles/openshift_management/files/templates/cloudforms/cfme-backup-job.yaml new file mode 100644 index 000000000..c3bc1d20c --- /dev/null +++ b/roles/openshift_management/files/templates/cloudforms/cfme-backup-job.yaml @@ -0,0 +1,28 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: cloudforms-backup +spec: + template: + metadata: + name: cloudforms-backup + spec: + containers: + - name: postgresql + image: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-postgresql:latest + command: + - "/opt/rh/cfme-container-scripts/backup_db" + env: + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: cloudforms-secrets + key: database-url + volumeMounts: + - name: cfme-backup-vol + mountPath: "/backups" + volumes: + - name: cfme-backup-vol + persistentVolumeClaim: + claimName: cloudforms-backup + restartPolicy: Never diff --git a/roles/openshift_management/files/templates/cloudforms/cfme-backup-pvc.yaml b/roles/openshift_management/files/templates/cloudforms/cfme-backup-pvc.yaml new file mode 100644 index 000000000..92598ce82 --- /dev/null +++ b/roles/openshift_management/files/templates/cloudforms/cfme-backup-pvc.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: cloudforms-backup +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 15Gi diff --git a/roles/openshift_management/files/templates/cloudforms/cfme-pv-backup-example.yaml b/roles/openshift_management/files/templates/cloudforms/cfme-pv-backup-example.yaml new file mode 100644 index 000000000..4fe349897 --- /dev/null +++ b/roles/openshift_management/files/templates/cloudforms/cfme-pv-backup-example.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: cfme-pv03 +spec: + capacity: + storage: 15Gi + accessModes: + - ReadWriteOnce + nfs: + path: "/exports/cfme-pv03" + server: "<your-nfs-host-here>" + persistentVolumeReclaimPolicy: Retain diff --git a/roles/openshift_management/files/templates/cloudforms/cfme-pv-db-example.yaml b/roles/openshift_management/files/templates/cloudforms/cfme-pv-db-example.yaml new file mode 100644 index 000000000..0cdd821b5 --- /dev/null +++ b/roles/openshift_management/files/templates/cloudforms/cfme-pv-db-example.yaml @@ -0,0 +1,38 @@ +apiVersion: v1 +kind: Template +labels: + template: cloudforms-db-pv +metadata: + name: cloudforms-db-pv + annotations: + description: PV Template for CFME PostgreSQL DB + tags: PVS, CFME +objects: +- apiVersion: v1 + kind: PersistentVolume + metadata: + name: cfme-db + spec: + capacity: + storage: "${PV_SIZE}" + accessModes: + - ReadWriteOnce + nfs: + path: "${BASE_PATH}/cfme-db" + server: "${NFS_HOST}" + persistentVolumeReclaimPolicy: Retain +parameters: +- name: PV_SIZE + displayName: PV Size for DB + required: true + description: The size of the CFME DB PV given in Gi + value: 15Gi +- name: BASE_PATH + displayName: Exports Directory Base Path + required: true + description: The parent directory of your NFS exports + value: "/exports" +- name: NFS_HOST + displayName: NFS Server Hostname + required: true + description: The hostname or IP address of the NFS server diff --git a/roles/openshift_management/files/templates/cloudforms/cfme-pv-server-example.yaml b/roles/openshift_management/files/templates/cloudforms/cfme-pv-server-example.yaml new file mode 100644 index 000000000..527090ae8 --- /dev/null +++ b/roles/openshift_management/files/templates/cloudforms/cfme-pv-server-example.yaml @@ -0,0 +1,38 @@ +apiVersion: v1 +kind: Template +labels: + template: cloudforms-app-pv +metadata: + name: cloudforms-app-pv + annotations: + description: PV Template for CFME Server + tags: PVS, CFME +objects: +- apiVersion: v1 + kind: PersistentVolume + metadata: + name: cfme-app + spec: + capacity: + storage: "${PV_SIZE}" + accessModes: + - ReadWriteOnce + nfs: + path: "${BASE_PATH}/cfme-app" + server: "${NFS_HOST}" + persistentVolumeReclaimPolicy: Retain +parameters: +- name: PV_SIZE + displayName: PV Size for App + required: true + description: The size of the CFME APP PV given in Gi + value: 5Gi +- name: BASE_PATH + displayName: Exports Directory Base Path + required: true + description: The parent directory of your NFS exports + value: "/exports" +- name: NFS_HOST + displayName: NFS Server Hostname + required: true + description: The hostname or IP address of the NFS server diff --git a/roles/openshift_management/files/templates/cloudforms/cfme-restore-job.yaml b/roles/openshift_management/files/templates/cloudforms/cfme-restore-job.yaml new file mode 100644 index 000000000..8b23f8a33 --- /dev/null +++ b/roles/openshift_management/files/templates/cloudforms/cfme-restore-job.yaml @@ -0,0 +1,35 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: cloudforms-restore +spec: + template: + metadata: + name: cloudforms-restore + spec: + containers: + - name: postgresql + image: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-postgresql:latest + command: + - "/opt/rh/cfme-container-scripts/restore_db" + env: + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: cloudforms-secrets + key: database-url + - name: BACKUP_VERSION + value: latest + volumeMounts: + - name: cfme-backup-vol + mountPath: "/backups" + - name: cfme-prod-vol + mountPath: "/restore" + volumes: + - name: cfme-backup-vol + persistentVolumeClaim: + claimName: cloudforms-backup + - name: cfme-prod-vol + persistentVolumeClaim: + claimName: cloudforms-postgresql + restartPolicy: Never diff --git a/roles/openshift_management/files/templates/cloudforms/cfme-scc-sysadmin.yaml b/roles/openshift_management/files/templates/cloudforms/cfme-scc-sysadmin.yaml new file mode 100644 index 000000000..d2ece9298 --- /dev/null +++ b/roles/openshift_management/files/templates/cloudforms/cfme-scc-sysadmin.yaml @@ -0,0 +1,38 @@ +allowHostDirVolumePlugin: false +allowHostIPC: false +allowHostNetwork: false +allowHostPID: false +allowHostPorts: false +allowPrivilegedContainer: false +allowedCapabilities: +apiVersion: v1 +defaultAddCapabilities: +- SYS_ADMIN +fsGroup: + type: RunAsAny +groups: +- system:cluster-admins +kind: SecurityContextConstraints +metadata: + annotations: + kubernetes.io/description: cfme-sysadmin provides all features of the anyuid SCC but allows users to have SYS_ADMIN capabilities. This is the required scc for Pods requiring to run with systemd and the message bus. + creationTimestamp: + name: cfme-sysadmin +priority: 10 +readOnlyRootFilesystem: false +requiredDropCapabilities: +- MKNOD +- SYS_CHROOT +runAsUser: + type: RunAsAny +seLinuxContext: + type: MustRunAs +supplementalGroups: + type: RunAsAny +users: +volumes: +- configMap +- downwardAPI +- emptyDir +- persistentVolumeClaim +- secret diff --git a/roles/openshift_management/files/templates/cloudforms/cfme-template-ext-db.yaml b/roles/openshift_management/files/templates/cloudforms/cfme-template-ext-db.yaml new file mode 100644 index 000000000..4a04f3372 --- /dev/null +++ b/roles/openshift_management/files/templates/cloudforms/cfme-template-ext-db.yaml @@ -0,0 +1,763 @@ +apiVersion: v1 +kind: Template +labels: + template: cloudforms-ext-db +metadata: + name: cloudforms-ext-db + annotations: + description: CloudForms appliance with persistent storage using a external DB host + tags: instant-app,cloudforms,cfme + iconClass: icon-rails +objects: +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: cfme-orchestrator +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: cfme-anyuid +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: cfme-privileged +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: cfme-httpd +- apiVersion: v1 + kind: Secret + metadata: + name: "${NAME}-secrets" + stringData: + pg-password: "${DATABASE_PASSWORD}" + database-url: postgresql://${DATABASE_USER}:${DATABASE_PASSWORD}@${DATABASE_SERVICE_NAME}/${DATABASE_NAME}?encoding=utf8&pool=5&wait_timeout=5 + v2-key: "${V2_KEY}" +- apiVersion: v1 + kind: Secret + metadata: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + stringData: + rabbit-password: "${ANSIBLE_RABBITMQ_PASSWORD}" + secret-key: "${ANSIBLE_SECRET_KEY}" + admin-password: "${ANSIBLE_ADMIN_PASSWORD}" +- apiVersion: v1 + kind: Service + metadata: + annotations: + description: Exposes and load balances CloudForms pods + service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"},{"name":"${MEMCACHED_SERVICE_NAME}","namespace":"","kind":"Service"}]' + name: "${NAME}" + spec: + clusterIP: None + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 80 + selector: + name: "${NAME}" +- apiVersion: v1 + kind: Route + metadata: + name: "${HTTPD_SERVICE_NAME}" + spec: + host: "${APPLICATION_DOMAIN}" + port: + targetPort: http + tls: + termination: edge + insecureEdgeTerminationPolicy: Redirect + to: + kind: Service + name: "${HTTPD_SERVICE_NAME}" +- apiVersion: apps/v1beta1 + kind: StatefulSet + metadata: + name: "${NAME}" + annotations: + description: Defines how to deploy the CloudForms appliance + spec: + serviceName: "${NAME}" + replicas: "${APPLICATION_REPLICA_COUNT}" + template: + metadata: + labels: + name: "${NAME}" + name: "${NAME}" + spec: + containers: + - name: cloudforms + image: "${FRONTEND_APPLICATION_IMG_NAME}:${FRONTEND_APPLICATION_IMG_TAG}" + livenessProbe: + tcpSocket: + port: 80 + initialDelaySeconds: 480 + timeoutSeconds: 3 + readinessProbe: + httpGet: + path: "/" + port: 80 + scheme: HTTP + initialDelaySeconds: 200 + timeoutSeconds: 3 + ports: + - containerPort: 80 + protocol: TCP + volumeMounts: + - name: "${NAME}-server" + mountPath: "/persistent" + env: + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: APPLICATION_INIT_DELAY + value: "${APPLICATION_INIT_DELAY}" + - name: DATABASE_REGION + value: "${DATABASE_REGION}" + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: database-url + - name: V2_KEY + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: v2-key + - name: ANSIBLE_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: admin-password + resources: + requests: + memory: "${APPLICATION_MEM_REQ}" + cpu: "${APPLICATION_CPU_REQ}" + limits: + memory: "${APPLICATION_MEM_LIMIT}" + lifecycle: + preStop: + exec: + command: + - "/opt/rh/cfme-container-scripts/sync-pv-data" + serviceAccount: cfme-orchestrator + serviceAccountName: cfme-orchestrator + terminationGracePeriodSeconds: 90 + volumeClaimTemplates: + - metadata: + name: "${NAME}-server" + annotations: + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "${APPLICATION_VOLUME_CAPACITY}" +- apiVersion: v1 + kind: Service + metadata: + annotations: + description: Headless service for CloudForms backend pods + name: "${NAME}-backend" + spec: + clusterIP: None + selector: + name: "${NAME}-backend" +- apiVersion: apps/v1beta1 + kind: StatefulSet + metadata: + name: "${NAME}-backend" + annotations: + description: Defines how to deploy the CloudForms appliance + spec: + serviceName: "${NAME}-backend" + replicas: 0 + template: + metadata: + labels: + name: "${NAME}-backend" + name: "${NAME}-backend" + spec: + containers: + - name: cloudforms + image: "${BACKEND_APPLICATION_IMG_NAME}:${BACKEND_APPLICATION_IMG_TAG}" + livenessProbe: + exec: + command: + - pidof + - MIQ Server + initialDelaySeconds: 480 + timeoutSeconds: 3 + volumeMounts: + - name: "${NAME}-server" + mountPath: "/persistent" + env: + - name: APPLICATION_INIT_DELAY + value: "${APPLICATION_INIT_DELAY}" + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: database-url + - name: MIQ_SERVER_DEFAULT_ROLES + value: database_operations,event,reporting,scheduler,smartstate,ems_operations,ems_inventory,automate + - name: FRONTEND_SERVICE_NAME + value: "${NAME}" + - name: V2_KEY + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: v2-key + - name: ANSIBLE_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: admin-password + resources: + requests: + memory: "${APPLICATION_MEM_REQ}" + cpu: "${APPLICATION_CPU_REQ}" + limits: + memory: "${APPLICATION_MEM_LIMIT}" + lifecycle: + preStop: + exec: + command: + - "/opt/rh/cfme-container-scripts/sync-pv-data" + serviceAccount: cfme-orchestrator + serviceAccountName: cfme-orchestrator + terminationGracePeriodSeconds: 90 + volumeClaimTemplates: + - metadata: + name: "${NAME}-server" + annotations: + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "${APPLICATION_VOLUME_CAPACITY}" +- apiVersion: v1 + kind: Service + metadata: + name: "${MEMCACHED_SERVICE_NAME}" + annotations: + description: Exposes the memcached server + spec: + ports: + - name: memcached + port: 11211 + targetPort: 11211 + selector: + name: "${MEMCACHED_SERVICE_NAME}" +- apiVersion: v1 + kind: DeploymentConfig + metadata: + name: "${MEMCACHED_SERVICE_NAME}" + annotations: + description: Defines how to deploy memcached + spec: + strategy: + type: Recreate + triggers: + - type: ConfigChange + replicas: 1 + selector: + name: "${MEMCACHED_SERVICE_NAME}" + template: + metadata: + name: "${MEMCACHED_SERVICE_NAME}" + labels: + name: "${MEMCACHED_SERVICE_NAME}" + spec: + volumes: [] + containers: + - name: memcached + image: "${MEMCACHED_IMG_NAME}:${MEMCACHED_IMG_TAG}" + ports: + - containerPort: 11211 + readinessProbe: + timeoutSeconds: 1 + initialDelaySeconds: 5 + tcpSocket: + port: 11211 + livenessProbe: + timeoutSeconds: 1 + initialDelaySeconds: 30 + tcpSocket: + port: 11211 + volumeMounts: [] + env: + - name: MEMCACHED_MAX_MEMORY + value: "${MEMCACHED_MAX_MEMORY}" + - name: MEMCACHED_MAX_CONNECTIONS + value: "${MEMCACHED_MAX_CONNECTIONS}" + - name: MEMCACHED_SLAB_PAGE_SIZE + value: "${MEMCACHED_SLAB_PAGE_SIZE}" + resources: + requests: + memory: "${MEMCACHED_MEM_REQ}" + cpu: "${MEMCACHED_CPU_REQ}" + limits: + memory: "${MEMCACHED_MEM_LIMIT}" +- apiVersion: v1 + kind: Service + metadata: + name: "${DATABASE_SERVICE_NAME}" + annotations: + description: Remote database service + spec: + ports: + - name: postgresql + port: 5432 + targetPort: "${{DATABASE_PORT}}" + selector: {} +- apiVersion: v1 + kind: Endpoints + metadata: + name: "${DATABASE_SERVICE_NAME}" + subsets: + - addresses: + - ip: "${DATABASE_IP}" + ports: + - port: "${{DATABASE_PORT}}" + name: postgresql +- apiVersion: v1 + kind: Service + metadata: + annotations: + description: Exposes and load balances Ansible pods + service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"}]' + name: "${ANSIBLE_SERVICE_NAME}" + spec: + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 80 + - name: https + port: 443 + protocol: TCP + targetPort: 443 + selector: + name: "${ANSIBLE_SERVICE_NAME}" +- apiVersion: v1 + kind: DeploymentConfig + metadata: + name: "${ANSIBLE_SERVICE_NAME}" + annotations: + description: Defines how to deploy the Ansible appliance + spec: + strategy: + type: Recreate + serviceName: "${ANSIBLE_SERVICE_NAME}" + replicas: 0 + template: + metadata: + labels: + name: "${ANSIBLE_SERVICE_NAME}" + name: "${ANSIBLE_SERVICE_NAME}" + spec: + containers: + - name: ansible + image: "${ANSIBLE_IMG_NAME}:${ANSIBLE_IMG_TAG}" + livenessProbe: + tcpSocket: + port: 443 + initialDelaySeconds: 480 + timeoutSeconds: 3 + readinessProbe: + httpGet: + path: "/" + port: 443 + scheme: HTTPS + initialDelaySeconds: 200 + timeoutSeconds: 3 + ports: + - containerPort: 80 + protocol: TCP + - containerPort: 443 + protocol: TCP + securityContext: + privileged: true + env: + - name: ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: admin-password + - name: RABBITMQ_USER_NAME + value: "${ANSIBLE_RABBITMQ_USER_NAME}" + - name: RABBITMQ_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: rabbit-password + - name: ANSIBLE_SECRET_KEY + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: secret-key + - name: DATABASE_SERVICE_NAME + value: "${DATABASE_SERVICE_NAME}" + - name: POSTGRESQL_USER + value: "${DATABASE_USER}" + - name: POSTGRESQL_PASSWORD + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: pg-password + - name: POSTGRESQL_DATABASE + value: "${ANSIBLE_DATABASE_NAME}" + resources: + requests: + memory: "${ANSIBLE_MEM_REQ}" + cpu: "${ANSIBLE_CPU_REQ}" + limits: + memory: "${ANSIBLE_MEM_LIMIT}" + serviceAccount: cfme-privileged + serviceAccountName: cfme-privileged +- apiVersion: v1 + kind: ConfigMap + metadata: + name: "${HTTPD_SERVICE_NAME}-configs" + data: + application.conf: | + # Timeout: The number of seconds before receives and sends time out. + Timeout 120 + + RewriteEngine On + Options SymLinksIfOwnerMatch + + <VirtualHost *:80> + KeepAlive on + ProxyPreserveHost on + ProxyPass /ws/ ws://${NAME}/ws/ + ProxyPassReverse /ws/ ws://${NAME}/ws/ + ProxyPass / http://${NAME}/ + ProxyPassReverse / http://${NAME}/ + </VirtualHost> +- apiVersion: v1 + kind: ConfigMap + metadata: + name: "${HTTPD_SERVICE_NAME}-auth-configs" + data: + auth-type: internal + auth-configuration.conf: | + # External Authentication Configuration File + # + # For details on usage please see https://github.com/ManageIQ/manageiq-pods/blob/master/README.md#configuring-external-authentication +- apiVersion: v1 + kind: Service + metadata: + name: "${HTTPD_SERVICE_NAME}" + annotations: + description: Exposes the httpd server + service.alpha.openshift.io/dependencies: '[{"name":"${NAME}","namespace":"","kind":"Service"}]' + spec: + ports: + - name: http + port: 80 + targetPort: 80 + selector: + name: httpd +- apiVersion: v1 + kind: DeploymentConfig + metadata: + name: "${HTTPD_SERVICE_NAME}" + annotations: + description: Defines how to deploy httpd + spec: + strategy: + type: Recreate + recreateParams: + timeoutSeconds: 1200 + triggers: + - type: ConfigChange + replicas: 1 + selector: + name: "${HTTPD_SERVICE_NAME}" + template: + metadata: + name: "${HTTPD_SERVICE_NAME}" + labels: + name: "${HTTPD_SERVICE_NAME}" + spec: + volumes: + - name: httpd-config + configMap: + name: "${HTTPD_SERVICE_NAME}-configs" + - name: httpd-auth-config + configMap: + name: "${HTTPD_SERVICE_NAME}-auth-configs" + containers: + - name: httpd + image: "${HTTPD_IMG_NAME}:${HTTPD_IMG_TAG}" + ports: + - containerPort: 80 + livenessProbe: + exec: + command: + - pidof + - httpd + initialDelaySeconds: 15 + timeoutSeconds: 3 + readinessProbe: + tcpSocket: + port: 80 + initialDelaySeconds: 10 + timeoutSeconds: 3 + volumeMounts: + - name: httpd-config + mountPath: "${HTTPD_CONFIG_DIR}" + - name: httpd-auth-config + mountPath: "${HTTPD_AUTH_CONFIG_DIR}" + resources: + requests: + memory: "${HTTPD_MEM_REQ}" + cpu: "${HTTPD_CPU_REQ}" + limits: + memory: "${HTTPD_MEM_LIMIT}" + env: + - name: HTTPD_AUTH_TYPE + valueFrom: + configMapKeyRef: + name: "${HTTPD_SERVICE_NAME}-auth-configs" + key: auth-type + lifecycle: + postStart: + exec: + command: + - "/usr/bin/save-container-environment" + serviceAccount: cfme-httpd + serviceAccountName: cfme-httpd +parameters: +- name: NAME + displayName: Name + required: true + description: The name assigned to all of the frontend objects defined in this template. + value: cloudforms +- name: V2_KEY + displayName: CloudForms Encryption Key + required: true + description: Encryption Key for CloudForms Passwords + from: "[a-zA-Z0-9]{43}" + generate: expression +- name: DATABASE_SERVICE_NAME + displayName: PostgreSQL Service Name + required: true + description: The name of the OpenShift Service exposed for the PostgreSQL container. + value: postgresql +- name: DATABASE_USER + displayName: PostgreSQL User + required: true + description: PostgreSQL user that will access the database. + value: root +- name: DATABASE_PASSWORD + displayName: PostgreSQL Password + required: true + description: Password for the PostgreSQL user. + from: "[a-zA-Z0-9]{8}" + generate: expression +- name: DATABASE_IP + displayName: PostgreSQL Server IP + required: true + description: PostgreSQL external server IP used to configure service. + value: '' +- name: DATABASE_PORT + displayName: PostgreSQL Server Port + required: true + description: PostgreSQL external server port used to configure service. + value: '5432' +- name: DATABASE_NAME + required: true + displayName: PostgreSQL Database Name + description: Name of the PostgreSQL database accessed. + value: vmdb_production +- name: DATABASE_REGION + required: true + displayName: Application Database Region + description: Database region that will be used for application. + value: '0' +- name: ANSIBLE_DATABASE_NAME + displayName: Ansible PostgreSQL database name + required: true + description: The database to be used by the Ansible continer + value: awx +- name: MEMCACHED_SERVICE_NAME + required: true + displayName: Memcached Service Name + description: The name of the OpenShift Service exposed for the Memcached container. + value: memcached +- name: MEMCACHED_MAX_MEMORY + displayName: Memcached Max Memory + description: Memcached maximum memory for memcached object storage in MB. + value: '64' +- name: MEMCACHED_MAX_CONNECTIONS + displayName: Memcached Max Connections + description: Memcached maximum number of connections allowed. + value: '1024' +- name: MEMCACHED_SLAB_PAGE_SIZE + displayName: Memcached Slab Page Size + description: Memcached size of each slab page. + value: 1m +- name: ANSIBLE_SERVICE_NAME + displayName: Ansible Service Name + description: The name of the OpenShift Service exposed for the Ansible container. + value: ansible +- name: ANSIBLE_ADMIN_PASSWORD + displayName: Ansible admin User password + required: true + description: The password for the Ansible container admin user + from: "[a-zA-Z0-9]{32}" + generate: expression +- name: ANSIBLE_SECRET_KEY + displayName: Ansible Secret Key + required: true + description: Encryption key for the Ansible container + from: "[a-f0-9]{32}" + generate: expression +- name: ANSIBLE_RABBITMQ_USER_NAME + displayName: RabbitMQ Username + required: true + description: Username for the Ansible RabbitMQ Server + value: ansible +- name: ANSIBLE_RABBITMQ_PASSWORD + displayName: RabbitMQ Server Password + required: true + description: Password for the Ansible RabbitMQ Server + from: "[a-zA-Z0-9]{32}" + generate: expression +- name: APPLICATION_CPU_REQ + displayName: Application Min CPU Requested + required: true + description: Minimum amount of CPU time the Application container will need (expressed in millicores). + value: 1000m +- name: MEMCACHED_CPU_REQ + displayName: Memcached Min CPU Requested + required: true + description: Minimum amount of CPU time the Memcached container will need (expressed in millicores). + value: 200m +- name: ANSIBLE_CPU_REQ + displayName: Ansible Min CPU Requested + required: true + description: Minimum amount of CPU time the Ansible container will need (expressed in millicores). + value: 1000m +- name: APPLICATION_MEM_REQ + displayName: Application Min RAM Requested + required: true + description: Minimum amount of memory the Application container will need. + value: 6144Mi +- name: MEMCACHED_MEM_REQ + displayName: Memcached Min RAM Requested + required: true + description: Minimum amount of memory the Memcached container will need. + value: 64Mi +- name: ANSIBLE_MEM_REQ + displayName: Ansible Min RAM Requested + required: true + description: Minimum amount of memory the Ansible container will need. + value: 2048Mi +- name: APPLICATION_MEM_LIMIT + displayName: Application Max RAM Limit + required: true + description: Maximum amount of memory the Application container can consume. + value: 16384Mi +- name: MEMCACHED_MEM_LIMIT + displayName: Memcached Max RAM Limit + required: true + description: Maximum amount of memory the Memcached container can consume. + value: 256Mi +- name: ANSIBLE_MEM_LIMIT + displayName: Ansible Max RAM Limit + required: true + description: Maximum amount of memory the Ansible container can consume. + value: 8096Mi +- name: MEMCACHED_IMG_NAME + displayName: Memcached Image Name + description: This is the Memcached image name requested to deploy. + value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-memcached +- name: MEMCACHED_IMG_TAG + displayName: Memcached Image Tag + description: This is the Memcached image tag/version requested to deploy. + value: latest +- name: FRONTEND_APPLICATION_IMG_NAME + displayName: Frontend Application Image Name + description: This is the Frontend Application image name requested to deploy. + value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-app-ui +- name: BACKEND_APPLICATION_IMG_NAME + displayName: Backend Application Image Name + description: This is the Backend Application image name requested to deploy. + value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-app +- name: FRONTEND_APPLICATION_IMG_TAG + displayName: Front end Application Image Tag + description: This is the CloudForms Frontend Application image tag/version requested to deploy. + value: latest +- name: BACKEND_APPLICATION_IMG_TAG + displayName: Back end Application Image Tag + description: This is the CloudForms Backend Application image tag/version requested to deploy. + value: latest +- name: ANSIBLE_IMG_NAME + displayName: Ansible Image Name + description: This is the Ansible image name requested to deploy. + value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-embedded-ansible +- name: ANSIBLE_IMG_TAG + displayName: Ansible Image Tag + description: This is the Ansible image tag/version requested to deploy. + value: latest +- name: APPLICATION_DOMAIN + displayName: Application Hostname + description: The exposed hostname that will route to the application service, if left blank a value will be defaulted. + value: '' +- name: APPLICATION_REPLICA_COUNT + displayName: Application Replica Count + description: This is the number of Application replicas requested to deploy. + value: '1' +- name: APPLICATION_INIT_DELAY + displayName: Application Init Delay + required: true + description: Delay in seconds before we attempt to initialize the application. + value: '15' +- name: APPLICATION_VOLUME_CAPACITY + displayName: Application Volume Capacity + required: true + description: Volume space available for application data. + value: 5Gi +- name: HTTPD_SERVICE_NAME + required: true + displayName: Apache httpd Service Name + description: The name of the OpenShift Service exposed for the httpd container. + value: httpd +- name: HTTPD_IMG_NAME + displayName: Apache httpd Image Name + description: This is the httpd image name requested to deploy. + value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-httpd +- name: HTTPD_IMG_TAG + displayName: Apache httpd Image Tag + description: This is the httpd image tag/version requested to deploy. + value: latest +- name: HTTPD_CONFIG_DIR + displayName: Apache httpd Configuration Directory + description: Directory used to store the Apache configuration files. + value: "/etc/httpd/conf.d" +- name: HTTPD_AUTH_CONFIG_DIR + displayName: External Authentication Configuration Directory + description: Directory used to store the external authentication configuration files. + value: "/etc/httpd/auth-conf.d" +- name: HTTPD_CPU_REQ + displayName: Apache httpd Min CPU Requested + required: true + description: Minimum amount of CPU time the httpd container will need (expressed in millicores). + value: 500m +- name: HTTPD_MEM_REQ + displayName: Apache httpd Min RAM Requested + required: true + description: Minimum amount of memory the httpd container will need. + value: 512Mi +- name: HTTPD_MEM_LIMIT + displayName: Apache httpd Max RAM Limit + required: true + description: Maximum amount of memory the httpd container can consume. + value: 8192Mi diff --git a/roles/openshift_management/files/templates/cloudforms/cfme-template.yaml b/roles/openshift_management/files/templates/cloudforms/cfme-template.yaml new file mode 100644 index 000000000..d7c9f5af7 --- /dev/null +++ b/roles/openshift_management/files/templates/cloudforms/cfme-template.yaml @@ -0,0 +1,940 @@ +apiVersion: v1 +kind: Template +labels: + template: cloudforms +metadata: + name: cloudforms + annotations: + description: CloudForms appliance with persistent storage + tags: instant-app,cloudforms,cfme + iconClass: icon-rails +objects: +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: cfme-orchestrator +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: cfme-anyuid +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: cfme-privileged +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: cfme-httpd +- apiVersion: v1 + kind: Secret + metadata: + name: "${NAME}-secrets" + stringData: + pg-password: "${DATABASE_PASSWORD}" + database-url: postgresql://${DATABASE_USER}:${DATABASE_PASSWORD}@${DATABASE_SERVICE_NAME}/${DATABASE_NAME}?encoding=utf8&pool=5&wait_timeout=5 + v2-key: "${V2_KEY}" +- apiVersion: v1 + kind: Secret + metadata: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + stringData: + rabbit-password: "${ANSIBLE_RABBITMQ_PASSWORD}" + secret-key: "${ANSIBLE_SECRET_KEY}" + admin-password: "${ANSIBLE_ADMIN_PASSWORD}" +- apiVersion: v1 + kind: ConfigMap + metadata: + name: "${DATABASE_SERVICE_NAME}-configs" + data: + 01_miq_overrides.conf: | + #------------------------------------------------------------------------------ + # CONNECTIONS AND AUTHENTICATION + #------------------------------------------------------------------------------ + + tcp_keepalives_count = 9 + tcp_keepalives_idle = 3 + tcp_keepalives_interval = 75 + + #------------------------------------------------------------------------------ + # RESOURCE USAGE (except WAL) + #------------------------------------------------------------------------------ + + shared_preload_libraries = 'pglogical,repmgr_funcs' + max_worker_processes = 10 + + #------------------------------------------------------------------------------ + # WRITE AHEAD LOG + #------------------------------------------------------------------------------ + + wal_level = 'logical' + wal_log_hints = on + wal_buffers = 16MB + checkpoint_completion_target = 0.9 + + #------------------------------------------------------------------------------ + # REPLICATION + #------------------------------------------------------------------------------ + + max_wal_senders = 10 + wal_sender_timeout = 0 + max_replication_slots = 10 + hot_standby = on + + #------------------------------------------------------------------------------ + # ERROR REPORTING AND LOGGING + #------------------------------------------------------------------------------ + + log_filename = 'postgresql.log' + log_rotation_age = 0 + log_min_duration_statement = 5000 + log_connections = on + log_disconnections = on + log_line_prefix = '%t:%r:%c:%u@%d:[%p]:' + log_lock_waits = on + + #------------------------------------------------------------------------------ + # AUTOVACUUM PARAMETERS + #------------------------------------------------------------------------------ + + log_autovacuum_min_duration = 0 + autovacuum_naptime = 5min + autovacuum_vacuum_threshold = 500 + autovacuum_analyze_threshold = 500 + autovacuum_vacuum_scale_factor = 0.05 + + #------------------------------------------------------------------------------ + # LOCK MANAGEMENT + #------------------------------------------------------------------------------ + + deadlock_timeout = 5s + + #------------------------------------------------------------------------------ + # VERSION/PLATFORM COMPATIBILITY + #------------------------------------------------------------------------------ + + escape_string_warning = off + standard_conforming_strings = off +- apiVersion: v1 + kind: ConfigMap + metadata: + name: "${HTTPD_SERVICE_NAME}-configs" + data: + application.conf: | + # Timeout: The number of seconds before receives and sends time out. + Timeout 120 + + RewriteEngine On + Options SymLinksIfOwnerMatch + + <VirtualHost *:80> + KeepAlive on + ProxyPreserveHost on + ProxyPass /ws/ ws://${NAME}/ws/ + ProxyPassReverse /ws/ ws://${NAME}/ws/ + ProxyPass / http://${NAME}/ + ProxyPassReverse / http://${NAME}/ + </VirtualHost> +- apiVersion: v1 + kind: ConfigMap + metadata: + name: "${HTTPD_SERVICE_NAME}-auth-configs" + data: + auth-type: internal + auth-configuration.conf: | + # External Authentication Configuration File + # + # For details on usage please see https://github.com/ManageIQ/manageiq-pods/blob/master/README.md#configuring-external-authentication +- apiVersion: v1 + kind: Service + metadata: + annotations: + description: Exposes and load balances CloudForms pods + service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"},{"name":"${MEMCACHED_SERVICE_NAME}","namespace":"","kind":"Service"}]' + name: "${NAME}" + spec: + clusterIP: None + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 80 + selector: + name: "${NAME}" +- apiVersion: v1 + kind: Route + metadata: + name: "${HTTPD_SERVICE_NAME}" + spec: + host: "${APPLICATION_DOMAIN}" + port: + targetPort: http + tls: + termination: edge + insecureEdgeTerminationPolicy: Redirect + to: + kind: Service + name: "${HTTPD_SERVICE_NAME}" +- apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: "${NAME}-${DATABASE_SERVICE_NAME}" + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "${DATABASE_VOLUME_CAPACITY}" +- apiVersion: apps/v1beta1 + kind: StatefulSet + metadata: + name: "${NAME}" + annotations: + description: Defines how to deploy the CloudForms appliance + spec: + serviceName: "${NAME}" + replicas: "${APPLICATION_REPLICA_COUNT}" + template: + metadata: + labels: + name: "${NAME}" + name: "${NAME}" + spec: + containers: + - name: cloudforms + image: "${FRONTEND_APPLICATION_IMG_NAME}:${FRONTEND_APPLICATION_IMG_TAG}" + livenessProbe: + tcpSocket: + port: 80 + initialDelaySeconds: 480 + timeoutSeconds: 3 + readinessProbe: + httpGet: + path: "/" + port: 80 + scheme: HTTP + initialDelaySeconds: 200 + timeoutSeconds: 3 + ports: + - containerPort: 80 + protocol: TCP + volumeMounts: + - name: "${NAME}-server" + mountPath: "/persistent" + env: + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: APPLICATION_INIT_DELAY + value: "${APPLICATION_INIT_DELAY}" + - name: DATABASE_REGION + value: "${DATABASE_REGION}" + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: database-url + - name: V2_KEY + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: v2-key + - name: ANSIBLE_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: admin-password + resources: + requests: + memory: "${APPLICATION_MEM_REQ}" + cpu: "${APPLICATION_CPU_REQ}" + limits: + memory: "${APPLICATION_MEM_LIMIT}" + lifecycle: + preStop: + exec: + command: + - "/opt/rh/cfme-container-scripts/sync-pv-data" + serviceAccount: cfme-orchestrator + serviceAccountName: cfme-orchestrator + terminationGracePeriodSeconds: 90 + volumeClaimTemplates: + - metadata: + name: "${NAME}-server" + annotations: + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "${APPLICATION_VOLUME_CAPACITY}" +- apiVersion: v1 + kind: Service + metadata: + annotations: + description: Headless service for CloudForms backend pods + name: "${NAME}-backend" + spec: + clusterIP: None + selector: + name: "${NAME}-backend" +- apiVersion: apps/v1beta1 + kind: StatefulSet + metadata: + name: "${NAME}-backend" + annotations: + description: Defines how to deploy the CloudForms appliance + spec: + serviceName: "${NAME}-backend" + replicas: 0 + template: + metadata: + labels: + name: "${NAME}-backend" + name: "${NAME}-backend" + spec: + containers: + - name: cloudforms + image: "${BACKEND_APPLICATION_IMG_NAME}:${BACKEND_APPLICATION_IMG_TAG}" + livenessProbe: + exec: + command: + - pidof + - MIQ Server + initialDelaySeconds: 480 + timeoutSeconds: 3 + volumeMounts: + - name: "${NAME}-server" + mountPath: "/persistent" + env: + - name: APPLICATION_INIT_DELAY + value: "${APPLICATION_INIT_DELAY}" + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: database-url + - name: MIQ_SERVER_DEFAULT_ROLES + value: database_operations,event,reporting,scheduler,smartstate,ems_operations,ems_inventory,automate + - name: FRONTEND_SERVICE_NAME + value: "${NAME}" + - name: V2_KEY + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: v2-key + - name: ANSIBLE_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: admin-password + resources: + requests: + memory: "${APPLICATION_MEM_REQ}" + cpu: "${APPLICATION_CPU_REQ}" + limits: + memory: "${APPLICATION_MEM_LIMIT}" + lifecycle: + preStop: + exec: + command: + - "/opt/rh/cfme-container-scripts/sync-pv-data" + serviceAccount: cfme-orchestrator + serviceAccountName: cfme-orchestrator + terminationGracePeriodSeconds: 90 + volumeClaimTemplates: + - metadata: + name: "${NAME}-server" + annotations: + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "${APPLICATION_VOLUME_CAPACITY}" +- apiVersion: v1 + kind: Service + metadata: + name: "${MEMCACHED_SERVICE_NAME}" + annotations: + description: Exposes the memcached server + spec: + ports: + - name: memcached + port: 11211 + targetPort: 11211 + selector: + name: "${MEMCACHED_SERVICE_NAME}" +- apiVersion: v1 + kind: DeploymentConfig + metadata: + name: "${MEMCACHED_SERVICE_NAME}" + annotations: + description: Defines how to deploy memcached + spec: + strategy: + type: Recreate + triggers: + - type: ConfigChange + replicas: 1 + selector: + name: "${MEMCACHED_SERVICE_NAME}" + template: + metadata: + name: "${MEMCACHED_SERVICE_NAME}" + labels: + name: "${MEMCACHED_SERVICE_NAME}" + spec: + volumes: [] + containers: + - name: memcached + image: "${MEMCACHED_IMG_NAME}:${MEMCACHED_IMG_TAG}" + ports: + - containerPort: 11211 + readinessProbe: + timeoutSeconds: 1 + initialDelaySeconds: 5 + tcpSocket: + port: 11211 + livenessProbe: + timeoutSeconds: 1 + initialDelaySeconds: 30 + tcpSocket: + port: 11211 + volumeMounts: [] + env: + - name: MEMCACHED_MAX_MEMORY + value: "${MEMCACHED_MAX_MEMORY}" + - name: MEMCACHED_MAX_CONNECTIONS + value: "${MEMCACHED_MAX_CONNECTIONS}" + - name: MEMCACHED_SLAB_PAGE_SIZE + value: "${MEMCACHED_SLAB_PAGE_SIZE}" + resources: + requests: + memory: "${MEMCACHED_MEM_REQ}" + cpu: "${MEMCACHED_CPU_REQ}" + limits: + memory: "${MEMCACHED_MEM_LIMIT}" +- apiVersion: v1 + kind: Service + metadata: + name: "${DATABASE_SERVICE_NAME}" + annotations: + description: Exposes the database server + spec: + ports: + - name: postgresql + port: 5432 + targetPort: 5432 + selector: + name: "${DATABASE_SERVICE_NAME}" +- apiVersion: v1 + kind: DeploymentConfig + metadata: + name: "${DATABASE_SERVICE_NAME}" + annotations: + description: Defines how to deploy the database + spec: + strategy: + type: Recreate + triggers: + - type: ConfigChange + replicas: 1 + selector: + name: "${DATABASE_SERVICE_NAME}" + template: + metadata: + name: "${DATABASE_SERVICE_NAME}" + labels: + name: "${DATABASE_SERVICE_NAME}" + spec: + volumes: + - name: cfme-pgdb-volume + persistentVolumeClaim: + claimName: "${NAME}-${DATABASE_SERVICE_NAME}" + - name: cfme-pg-configs + configMap: + name: "${DATABASE_SERVICE_NAME}-configs" + containers: + - name: postgresql + image: "${POSTGRESQL_IMG_NAME}:${POSTGRESQL_IMG_TAG}" + ports: + - containerPort: 5432 + readinessProbe: + timeoutSeconds: 1 + initialDelaySeconds: 15 + exec: + command: + - "/bin/sh" + - "-i" + - "-c" + - psql -h 127.0.0.1 -U ${POSTGRESQL_USER} -q -d ${POSTGRESQL_DATABASE} -c 'SELECT 1' + livenessProbe: + timeoutSeconds: 1 + initialDelaySeconds: 60 + tcpSocket: + port: 5432 + volumeMounts: + - name: cfme-pgdb-volume + mountPath: "/var/lib/pgsql/data" + - name: cfme-pg-configs + mountPath: "${POSTGRESQL_CONFIG_DIR}" + env: + - name: POSTGRESQL_USER + value: "${DATABASE_USER}" + - name: POSTGRESQL_PASSWORD + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: pg-password + - name: POSTGRESQL_DATABASE + value: "${DATABASE_NAME}" + - name: POSTGRESQL_MAX_CONNECTIONS + value: "${POSTGRESQL_MAX_CONNECTIONS}" + - name: POSTGRESQL_SHARED_BUFFERS + value: "${POSTGRESQL_SHARED_BUFFERS}" + - name: POSTGRESQL_CONFIG_DIR + value: "${POSTGRESQL_CONFIG_DIR}" + resources: + requests: + memory: "${POSTGRESQL_MEM_REQ}" + cpu: "${POSTGRESQL_CPU_REQ}" + limits: + memory: "${POSTGRESQL_MEM_LIMIT}" +- apiVersion: v1 + kind: Service + metadata: + annotations: + description: Exposes and load balances Ansible pods + service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"}]' + name: "${ANSIBLE_SERVICE_NAME}" + spec: + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 80 + - name: https + port: 443 + protocol: TCP + targetPort: 443 + selector: + name: "${ANSIBLE_SERVICE_NAME}" +- apiVersion: v1 + kind: DeploymentConfig + metadata: + name: "${ANSIBLE_SERVICE_NAME}" + annotations: + description: Defines how to deploy the Ansible appliance + spec: + strategy: + type: Recreate + serviceName: "${ANSIBLE_SERVICE_NAME}" + replicas: 0 + template: + metadata: + labels: + name: "${ANSIBLE_SERVICE_NAME}" + name: "${ANSIBLE_SERVICE_NAME}" + spec: + containers: + - name: ansible + image: "${ANSIBLE_IMG_NAME}:${ANSIBLE_IMG_TAG}" + livenessProbe: + tcpSocket: + port: 443 + initialDelaySeconds: 480 + timeoutSeconds: 3 + readinessProbe: + httpGet: + path: "/" + port: 443 + scheme: HTTPS + initialDelaySeconds: 200 + timeoutSeconds: 3 + ports: + - containerPort: 80 + protocol: TCP + - containerPort: 443 + protocol: TCP + securityContext: + privileged: true + env: + - name: ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: admin-password + - name: RABBITMQ_USER_NAME + value: "${ANSIBLE_RABBITMQ_USER_NAME}" + - name: RABBITMQ_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: rabbit-password + - name: ANSIBLE_SECRET_KEY + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: secret-key + - name: DATABASE_SERVICE_NAME + value: "${DATABASE_SERVICE_NAME}" + - name: POSTGRESQL_USER + value: "${DATABASE_USER}" + - name: POSTGRESQL_PASSWORD + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: pg-password + - name: POSTGRESQL_DATABASE + value: "${ANSIBLE_DATABASE_NAME}" + resources: + requests: + memory: "${ANSIBLE_MEM_REQ}" + cpu: "${ANSIBLE_CPU_REQ}" + limits: + memory: "${ANSIBLE_MEM_LIMIT}" + serviceAccount: cfme-privileged + serviceAccountName: cfme-privileged +- apiVersion: v1 + kind: Service + metadata: + name: "${HTTPD_SERVICE_NAME}" + annotations: + description: Exposes the httpd server + service.alpha.openshift.io/dependencies: '[{"name":"${NAME}","namespace":"","kind":"Service"}]' + spec: + ports: + - name: http + port: 80 + targetPort: 80 + selector: + name: httpd +- apiVersion: v1 + kind: DeploymentConfig + metadata: + name: "${HTTPD_SERVICE_NAME}" + annotations: + description: Defines how to deploy httpd + spec: + strategy: + type: Recreate + recreateParams: + timeoutSeconds: 1200 + triggers: + - type: ConfigChange + replicas: 1 + selector: + name: "${HTTPD_SERVICE_NAME}" + template: + metadata: + name: "${HTTPD_SERVICE_NAME}" + labels: + name: "${HTTPD_SERVICE_NAME}" + spec: + volumes: + - name: httpd-config + configMap: + name: "${HTTPD_SERVICE_NAME}-configs" + - name: httpd-auth-config + configMap: + name: "${HTTPD_SERVICE_NAME}-auth-configs" + containers: + - name: httpd + image: "${HTTPD_IMG_NAME}:${HTTPD_IMG_TAG}" + ports: + - containerPort: 80 + livenessProbe: + exec: + command: + - pidof + - httpd + initialDelaySeconds: 15 + timeoutSeconds: 3 + readinessProbe: + tcpSocket: + port: 80 + initialDelaySeconds: 10 + timeoutSeconds: 3 + volumeMounts: + - name: httpd-config + mountPath: "${HTTPD_CONFIG_DIR}" + - name: httpd-auth-config + mountPath: "${HTTPD_AUTH_CONFIG_DIR}" + resources: + requests: + memory: "${HTTPD_MEM_REQ}" + cpu: "${HTTPD_CPU_REQ}" + limits: + memory: "${HTTPD_MEM_LIMIT}" + env: + - name: HTTPD_AUTH_TYPE + valueFrom: + configMapKeyRef: + name: "${HTTPD_SERVICE_NAME}-auth-configs" + key: auth-type + lifecycle: + postStart: + exec: + command: + - "/usr/bin/save-container-environment" + serviceAccount: cfme-httpd + serviceAccountName: cfme-httpd +parameters: +- name: NAME + displayName: Name + required: true + description: The name assigned to all of the frontend objects defined in this template. + value: cloudforms +- name: V2_KEY + displayName: CloudForms Encryption Key + required: true + description: Encryption Key for CloudForms Passwords + from: "[a-zA-Z0-9]{43}" + generate: expression +- name: DATABASE_SERVICE_NAME + displayName: PostgreSQL Service Name + required: true + description: The name of the OpenShift Service exposed for the PostgreSQL container. + value: postgresql +- name: DATABASE_USER + displayName: PostgreSQL User + required: true + description: PostgreSQL user that will access the database. + value: root +- name: DATABASE_PASSWORD + displayName: PostgreSQL Password + required: true + description: Password for the PostgreSQL user. + from: "[a-zA-Z0-9]{8}" + generate: expression +- name: DATABASE_NAME + required: true + displayName: PostgreSQL Database Name + description: Name of the PostgreSQL database accessed. + value: vmdb_production +- name: DATABASE_REGION + required: true + displayName: Application Database Region + description: Database region that will be used for application. + value: '0' +- name: ANSIBLE_DATABASE_NAME + displayName: Ansible PostgreSQL database name + required: true + description: The database to be used by the Ansible continer + value: awx +- name: MEMCACHED_SERVICE_NAME + required: true + displayName: Memcached Service Name + description: The name of the OpenShift Service exposed for the Memcached container. + value: memcached +- name: MEMCACHED_MAX_MEMORY + displayName: Memcached Max Memory + description: Memcached maximum memory for memcached object storage in MB. + value: '64' +- name: MEMCACHED_MAX_CONNECTIONS + displayName: Memcached Max Connections + description: Memcached maximum number of connections allowed. + value: '1024' +- name: MEMCACHED_SLAB_PAGE_SIZE + displayName: Memcached Slab Page Size + description: Memcached size of each slab page. + value: 1m +- name: POSTGRESQL_CONFIG_DIR + displayName: PostgreSQL Configuration Overrides + description: Directory used to store PostgreSQL configuration overrides. + value: "/var/lib/pgsql/conf.d" +- name: POSTGRESQL_MAX_CONNECTIONS + displayName: PostgreSQL Max Connections + description: PostgreSQL maximum number of database connections allowed. + value: '1000' +- name: POSTGRESQL_SHARED_BUFFERS + displayName: PostgreSQL Shared Buffer Amount + description: Amount of memory dedicated for PostgreSQL shared memory buffers. + value: 1GB +- name: ANSIBLE_SERVICE_NAME + displayName: Ansible Service Name + description: The name of the OpenShift Service exposed for the Ansible container. + value: ansible +- name: ANSIBLE_ADMIN_PASSWORD + displayName: Ansible admin User password + required: true + description: The password for the Ansible container admin user + from: "[a-zA-Z0-9]{32}" + generate: expression +- name: ANSIBLE_SECRET_KEY + displayName: Ansible Secret Key + required: true + description: Encryption key for the Ansible container + from: "[a-f0-9]{32}" + generate: expression +- name: ANSIBLE_RABBITMQ_USER_NAME + displayName: RabbitMQ Username + required: true + description: Username for the Ansible RabbitMQ Server + value: ansible +- name: ANSIBLE_RABBITMQ_PASSWORD + displayName: RabbitMQ Server Password + required: true + description: Password for the Ansible RabbitMQ Server + from: "[a-zA-Z0-9]{32}" + generate: expression +- name: APPLICATION_CPU_REQ + displayName: Application Min CPU Requested + required: true + description: Minimum amount of CPU time the Application container will need (expressed in millicores). + value: 1000m +- name: POSTGRESQL_CPU_REQ + displayName: PostgreSQL Min CPU Requested + required: true + description: Minimum amount of CPU time the PostgreSQL container will need (expressed in millicores). + value: 500m +- name: MEMCACHED_CPU_REQ + displayName: Memcached Min CPU Requested + required: true + description: Minimum amount of CPU time the Memcached container will need (expressed in millicores). + value: 200m +- name: ANSIBLE_CPU_REQ + displayName: Ansible Min CPU Requested + required: true + description: Minimum amount of CPU time the Ansible container will need (expressed in millicores). + value: 1000m +- name: APPLICATION_MEM_REQ + displayName: Application Min RAM Requested + required: true + description: Minimum amount of memory the Application container will need. + value: 6144Mi +- name: POSTGRESQL_MEM_REQ + displayName: PostgreSQL Min RAM Requested + required: true + description: Minimum amount of memory the PostgreSQL container will need. + value: 4Gi +- name: MEMCACHED_MEM_REQ + displayName: Memcached Min RAM Requested + required: true + description: Minimum amount of memory the Memcached container will need. + value: 64Mi +- name: ANSIBLE_MEM_REQ + displayName: Ansible Min RAM Requested + required: true + description: Minimum amount of memory the Ansible container will need. + value: 2048Mi +- name: APPLICATION_MEM_LIMIT + displayName: Application Max RAM Limit + required: true + description: Maximum amount of memory the Application container can consume. + value: 16384Mi +- name: POSTGRESQL_MEM_LIMIT + displayName: PostgreSQL Max RAM Limit + required: true + description: Maximum amount of memory the PostgreSQL container can consume. + value: 8Gi +- name: MEMCACHED_MEM_LIMIT + displayName: Memcached Max RAM Limit + required: true + description: Maximum amount of memory the Memcached container can consume. + value: 256Mi +- name: ANSIBLE_MEM_LIMIT + displayName: Ansible Max RAM Limit + required: true + description: Maximum amount of memory the Ansible container can consume. + value: 8096Mi +- name: POSTGRESQL_IMG_NAME + displayName: PostgreSQL Image Name + description: This is the PostgreSQL image name requested to deploy. + value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-postgresql +- name: POSTGRESQL_IMG_TAG + displayName: PostgreSQL Image Tag + description: This is the PostgreSQL image tag/version requested to deploy. + value: latest +- name: MEMCACHED_IMG_NAME + displayName: Memcached Image Name + description: This is the Memcached image name requested to deploy. + value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-memcached +- name: MEMCACHED_IMG_TAG + displayName: Memcached Image Tag + description: This is the Memcached image tag/version requested to deploy. + value: latest +- name: FRONTEND_APPLICATION_IMG_NAME + displayName: Frontend Application Image Name + description: This is the Frontend Application image name requested to deploy. + value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-app-ui +- name: BACKEND_APPLICATION_IMG_NAME + displayName: Backend Application Image Name + description: This is the Backend Application image name requested to deploy. + value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-app +- name: FRONTEND_APPLICATION_IMG_TAG + displayName: Front end Application Image Tag + description: This is the CloudForms Frontend Application image tag/version requested to deploy. + value: latest +- name: BACKEND_APPLICATION_IMG_TAG + displayName: Back end Application Image Tag + description: This is the CloudForms Backend Application image tag/version requested to deploy. + value: latest +- name: ANSIBLE_IMG_NAME + displayName: Ansible Image Name + description: This is the Ansible image name requested to deploy. + value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-embedded-ansible +- name: ANSIBLE_IMG_TAG + displayName: Ansible Image Tag + description: This is the Ansible image tag/version requested to deploy. + value: latest +- name: APPLICATION_DOMAIN + displayName: Application Hostname + description: The exposed hostname that will route to the application service, if left blank a value will be defaulted. + value: '' +- name: APPLICATION_REPLICA_COUNT + displayName: Application Replica Count + description: This is the number of Application replicas requested to deploy. + value: '1' +- name: APPLICATION_INIT_DELAY + displayName: Application Init Delay + required: true + description: Delay in seconds before we attempt to initialize the application. + value: '15' +- name: APPLICATION_VOLUME_CAPACITY + displayName: Application Volume Capacity + required: true + description: Volume space available for application data. + value: 5Gi +- name: DATABASE_VOLUME_CAPACITY + displayName: Database Volume Capacity + required: true + description: Volume space available for database. + value: 15Gi +- name: HTTPD_SERVICE_NAME + required: true + displayName: Apache httpd Service Name + description: The name of the OpenShift Service exposed for the httpd container. + value: httpd +- name: HTTPD_IMG_NAME + displayName: Apache httpd Image Name + description: This is the httpd image name requested to deploy. + value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-httpd +- name: HTTPD_IMG_TAG + displayName: Apache httpd Image Tag + description: This is the httpd image tag/version requested to deploy. + value: latest +- name: HTTPD_CONFIG_DIR + displayName: Apache Configuration Directory + description: Directory used to store the Apache configuration files. + value: "/etc/httpd/conf.d" +- name: HTTPD_AUTH_CONFIG_DIR + displayName: External Authentication Configuration Directory + description: Directory used to store the external authentication configuration files. + value: "/etc/httpd/auth-conf.d" +- name: HTTPD_CPU_REQ + displayName: Apache httpd Min CPU Requested + required: true + description: Minimum amount of CPU time the httpd container will need (expressed in millicores). + value: 500m +- name: HTTPD_MEM_REQ + displayName: Apache httpd Min RAM Requested + required: true + description: Minimum amount of memory the httpd container will need. + value: 512Mi +- name: HTTPD_MEM_LIMIT + displayName: Apache httpd Max RAM Limit + required: true + description: Maximum amount of memory the httpd container can consume. + value: 8192Mi diff --git a/roles/openshift_management/files/templates/manageiq/miq-backup-job.yaml b/roles/openshift_management/files/templates/manageiq/miq-backup-job.yaml new file mode 100644 index 000000000..044cb73a5 --- /dev/null +++ b/roles/openshift_management/files/templates/manageiq/miq-backup-job.yaml @@ -0,0 +1,28 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: manageiq-backup +spec: + template: + metadata: + name: manageiq-backup + spec: + containers: + - name: postgresql + image: docker.io/manageiq/postgresql:latest + command: + - "/opt/manageiq/container-scripts/backup_db" + env: + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: manageiq-secrets + key: database-url + volumeMounts: + - name: miq-backup-vol + mountPath: "/backups" + volumes: + - name: miq-backup-vol + persistentVolumeClaim: + claimName: manageiq-backup + restartPolicy: Never diff --git a/roles/openshift_management/files/templates/manageiq/miq-backup-pvc.yaml b/roles/openshift_management/files/templates/manageiq/miq-backup-pvc.yaml new file mode 100644 index 000000000..25696ef23 --- /dev/null +++ b/roles/openshift_management/files/templates/manageiq/miq-backup-pvc.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: manageiq-backup +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 15Gi diff --git a/roles/openshift_management/files/templates/manageiq/miq-pv-backup-example.yaml b/roles/openshift_management/files/templates/manageiq/miq-pv-backup-example.yaml new file mode 100644 index 000000000..a5cf54d4e --- /dev/null +++ b/roles/openshift_management/files/templates/manageiq/miq-pv-backup-example.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: miq-pv03 +spec: + capacity: + storage: 15Gi + accessModes: + - ReadWriteOnce + nfs: + path: "/exports/miq-pv03" + server: "<your-nfs-host-here>" + persistentVolumeReclaimPolicy: Retain diff --git a/roles/openshift_management/files/templates/manageiq/miq-pv-db-example.yaml b/roles/openshift_management/files/templates/manageiq/miq-pv-db-example.yaml new file mode 100644 index 000000000..a803bebe2 --- /dev/null +++ b/roles/openshift_management/files/templates/manageiq/miq-pv-db-example.yaml @@ -0,0 +1,38 @@ +apiVersion: v1 +kind: Template +labels: + template: manageiq-db-pv +metadata: + name: manageiq-db-pv + annotations: + description: PV Template for MIQ PostgreSQL DB + tags: PVS, MIQ +objects: +- apiVersion: v1 + kind: PersistentVolume + metadata: + name: miq-db + spec: + capacity: + storage: "${PV_SIZE}" + accessModes: + - ReadWriteOnce + nfs: + path: "${BASE_PATH}/miq-db" + server: "${NFS_HOST}" + persistentVolumeReclaimPolicy: Retain +parameters: +- name: PV_SIZE + displayName: PV Size for DB + required: true + description: The size of the MIQ DB PV given in Gi + value: 15Gi +- name: BASE_PATH + displayName: Exports Directory Base Path + required: true + description: The parent directory of your NFS exports + value: "/exports" +- name: NFS_HOST + displayName: NFS Server Hostname + required: true + description: The hostname or IP address of the NFS server diff --git a/roles/openshift_management/files/templates/manageiq/miq-pv-server-example.yaml b/roles/openshift_management/files/templates/manageiq/miq-pv-server-example.yaml new file mode 100644 index 000000000..1288544d1 --- /dev/null +++ b/roles/openshift_management/files/templates/manageiq/miq-pv-server-example.yaml @@ -0,0 +1,38 @@ +apiVersion: v1 +kind: Template +labels: + template: manageiq-app-pv +metadata: + name: manageiq-app-pv + annotations: + description: PV Template for MIQ Server + tags: PVS, MIQ +objects: +- apiVersion: v1 + kind: PersistentVolume + metadata: + name: miq-app + spec: + capacity: + storage: "${PV_SIZE}" + accessModes: + - ReadWriteOnce + nfs: + path: "${BASE_PATH}/miq-app" + server: "${NFS_HOST}" + persistentVolumeReclaimPolicy: Retain +parameters: +- name: PV_SIZE + displayName: PV Size for App + required: true + description: The size of the MIQ APP PV given in Gi + value: 5Gi +- name: BASE_PATH + displayName: Exports Directory Base Path + required: true + description: The parent directory of your NFS exports + value: "/exports" +- name: NFS_HOST + displayName: NFS Server Hostname + required: true + description: The hostname or IP address of the NFS server diff --git a/roles/openshift_management/files/templates/manageiq/miq-restore-job.yaml b/roles/openshift_management/files/templates/manageiq/miq-restore-job.yaml new file mode 100644 index 000000000..eea284dd4 --- /dev/null +++ b/roles/openshift_management/files/templates/manageiq/miq-restore-job.yaml @@ -0,0 +1,35 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: manageiq-restore +spec: + template: + metadata: + name: manageiq-restore + spec: + containers: + - name: postgresql + image: docker.io/manageiq/postgresql:latest + command: + - "/opt/manageiq/container-scripts/restore_db" + env: + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: manageiq-secrets + key: database-url + - name: BACKUP_VERSION + value: latest + volumeMounts: + - name: miq-backup-vol + mountPath: "/backups" + - name: miq-prod-vol + mountPath: "/restore" + volumes: + - name: miq-backup-vol + persistentVolumeClaim: + claimName: manageiq-backup + - name: miq-prod-vol + persistentVolumeClaim: + claimName: manageiq-postgresql + restartPolicy: Never diff --git a/roles/openshift_management/files/templates/manageiq/miq-template-ext-db.yaml b/roles/openshift_management/files/templates/manageiq/miq-template-ext-db.yaml new file mode 100644 index 000000000..82cd5d49e --- /dev/null +++ b/roles/openshift_management/files/templates/manageiq/miq-template-ext-db.yaml @@ -0,0 +1,771 @@ +apiVersion: v1 +kind: Template +labels: + template: manageiq-ext-db +metadata: + name: manageiq-ext-db + annotations: + description: ManageIQ appliance with persistent storage using a external DB host + tags: instant-app,manageiq,miq + iconClass: icon-rails +objects: +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: miq-orchestrator +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: miq-anyuid +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: miq-privileged +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: miq-httpd +- apiVersion: v1 + kind: Secret + metadata: + name: "${NAME}-secrets" + stringData: + pg-password: "${DATABASE_PASSWORD}" + database-url: postgresql://${DATABASE_USER}:${DATABASE_PASSWORD}@${DATABASE_SERVICE_NAME}/${DATABASE_NAME}?encoding=utf8&pool=5&wait_timeout=5 + v2-key: "${V2_KEY}" +- apiVersion: v1 + kind: Secret + metadata: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + stringData: + rabbit-password: "${ANSIBLE_RABBITMQ_PASSWORD}" + secret-key: "${ANSIBLE_SECRET_KEY}" + admin-password: "${ANSIBLE_ADMIN_PASSWORD}" +- apiVersion: v1 + kind: Service + metadata: + annotations: + description: Exposes and load balances ManageIQ pods + service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"},{"name":"${MEMCACHED_SERVICE_NAME}","namespace":"","kind":"Service"}]' + name: "${NAME}" + spec: + clusterIP: None + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 80 + selector: + name: "${NAME}" +- apiVersion: v1 + kind: Route + metadata: + name: "${HTTPD_SERVICE_NAME}" + spec: + host: "${APPLICATION_DOMAIN}" + port: + targetPort: http + tls: + termination: edge + insecureEdgeTerminationPolicy: Redirect + to: + kind: Service + name: "${HTTPD_SERVICE_NAME}" +- apiVersion: apps/v1beta1 + kind: StatefulSet + metadata: + name: "${NAME}" + annotations: + description: Defines how to deploy the ManageIQ appliance + spec: + serviceName: "${NAME}" + replicas: "${APPLICATION_REPLICA_COUNT}" + template: + metadata: + labels: + name: "${NAME}" + name: "${NAME}" + spec: + containers: + - name: manageiq + image: "${APPLICATION_IMG_NAME}:${FRONTEND_APPLICATION_IMG_TAG}" + livenessProbe: + tcpSocket: + port: 80 + initialDelaySeconds: 480 + timeoutSeconds: 3 + readinessProbe: + httpGet: + path: "/" + port: 80 + scheme: HTTP + initialDelaySeconds: 200 + timeoutSeconds: 3 + ports: + - containerPort: 80 + protocol: TCP + volumeMounts: + - name: "${NAME}-server" + mountPath: "/persistent" + env: + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: APPLICATION_INIT_DELAY + value: "${APPLICATION_INIT_DELAY}" + - name: DATABASE_SERVICE_NAME + value: "${DATABASE_SERVICE_NAME}" + - name: DATABASE_REGION + value: "${DATABASE_REGION}" + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: database-url + - name: MEMCACHED_SERVER + value: "${MEMCACHED_SERVICE_NAME}:11211" + - name: MEMCACHED_SERVICE_NAME + value: "${MEMCACHED_SERVICE_NAME}" + - name: V2_KEY + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: v2-key + - name: ANSIBLE_SERVICE_NAME + value: "${ANSIBLE_SERVICE_NAME}" + - name: ANSIBLE_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: admin-password + resources: + requests: + memory: "${APPLICATION_MEM_REQ}" + cpu: "${APPLICATION_CPU_REQ}" + limits: + memory: "${APPLICATION_MEM_LIMIT}" + lifecycle: + preStop: + exec: + command: + - "/opt/manageiq/container-scripts/sync-pv-data" + serviceAccount: miq-orchestrator + serviceAccountName: miq-orchestrator + terminationGracePeriodSeconds: 90 + volumeClaimTemplates: + - metadata: + name: "${NAME}-server" + annotations: + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "${APPLICATION_VOLUME_CAPACITY}" +- apiVersion: v1 + kind: Service + metadata: + annotations: + description: Headless service for ManageIQ backend pods + name: "${NAME}-backend" + spec: + clusterIP: None + selector: + name: "${NAME}-backend" +- apiVersion: apps/v1beta1 + kind: StatefulSet + metadata: + name: "${NAME}-backend" + annotations: + description: Defines how to deploy the ManageIQ appliance + spec: + serviceName: "${NAME}-backend" + replicas: 0 + template: + metadata: + labels: + name: "${NAME}-backend" + name: "${NAME}-backend" + spec: + containers: + - name: manageiq + image: "${APPLICATION_IMG_NAME}:${BACKEND_APPLICATION_IMG_TAG}" + livenessProbe: + exec: + command: + - pidof + - MIQ Server + initialDelaySeconds: 480 + timeoutSeconds: 3 + volumeMounts: + - name: "${NAME}-server" + mountPath: "/persistent" + env: + - name: APPLICATION_INIT_DELAY + value: "${APPLICATION_INIT_DELAY}" + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: database-url + - name: MIQ_SERVER_DEFAULT_ROLES + value: database_operations,event,reporting,scheduler,smartstate,ems_operations,ems_inventory,automate + - name: FRONTEND_SERVICE_NAME + value: "${NAME}" + - name: MEMCACHED_SERVER + value: "${MEMCACHED_SERVICE_NAME}:11211" + - name: V2_KEY + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: v2-key + - name: ANSIBLE_SERVICE_NAME + value: "${ANSIBLE_SERVICE_NAME}" + - name: ANSIBLE_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: admin-password + resources: + requests: + memory: "${APPLICATION_MEM_REQ}" + cpu: "${APPLICATION_CPU_REQ}" + limits: + memory: "${APPLICATION_MEM_LIMIT}" + lifecycle: + preStop: + exec: + command: + - "/opt/manageiq/container-scripts/sync-pv-data" + serviceAccount: miq-orchestrator + serviceAccountName: miq-orchestrator + terminationGracePeriodSeconds: 90 + volumeClaimTemplates: + - metadata: + name: "${NAME}-server" + annotations: + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "${APPLICATION_VOLUME_CAPACITY}" +- apiVersion: v1 + kind: Service + metadata: + name: "${MEMCACHED_SERVICE_NAME}" + annotations: + description: Exposes the memcached server + spec: + ports: + - name: memcached + port: 11211 + targetPort: 11211 + selector: + name: "${MEMCACHED_SERVICE_NAME}" +- apiVersion: v1 + kind: DeploymentConfig + metadata: + name: "${MEMCACHED_SERVICE_NAME}" + annotations: + description: Defines how to deploy memcached + spec: + strategy: + type: Recreate + triggers: + - type: ConfigChange + replicas: 1 + selector: + name: "${MEMCACHED_SERVICE_NAME}" + template: + metadata: + name: "${MEMCACHED_SERVICE_NAME}" + labels: + name: "${MEMCACHED_SERVICE_NAME}" + spec: + volumes: [] + containers: + - name: memcached + image: "${MEMCACHED_IMG_NAME}:${MEMCACHED_IMG_TAG}" + ports: + - containerPort: 11211 + readinessProbe: + timeoutSeconds: 1 + initialDelaySeconds: 5 + tcpSocket: + port: 11211 + livenessProbe: + timeoutSeconds: 1 + initialDelaySeconds: 30 + tcpSocket: + port: 11211 + volumeMounts: [] + env: + - name: MEMCACHED_MAX_MEMORY + value: "${MEMCACHED_MAX_MEMORY}" + - name: MEMCACHED_MAX_CONNECTIONS + value: "${MEMCACHED_MAX_CONNECTIONS}" + - name: MEMCACHED_SLAB_PAGE_SIZE + value: "${MEMCACHED_SLAB_PAGE_SIZE}" + resources: + requests: + memory: "${MEMCACHED_MEM_REQ}" + cpu: "${MEMCACHED_CPU_REQ}" + limits: + memory: "${MEMCACHED_MEM_LIMIT}" +- apiVersion: v1 + kind: Service + metadata: + name: "${DATABASE_SERVICE_NAME}" + annotations: + description: Remote database service + spec: + ports: + - name: postgresql + port: 5432 + targetPort: "${{DATABASE_PORT}}" + selector: {} +- apiVersion: v1 + kind: Endpoints + metadata: + name: "${DATABASE_SERVICE_NAME}" + subsets: + - addresses: + - ip: "${DATABASE_IP}" + ports: + - port: "${{DATABASE_PORT}}" + name: postgresql +- apiVersion: v1 + kind: Service + metadata: + annotations: + description: Exposes and load balances Ansible pods + service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"}]' + name: "${ANSIBLE_SERVICE_NAME}" + spec: + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 80 + - name: https + port: 443 + protocol: TCP + targetPort: 443 + selector: + name: "${ANSIBLE_SERVICE_NAME}" +- apiVersion: v1 + kind: DeploymentConfig + metadata: + name: "${ANSIBLE_SERVICE_NAME}" + annotations: + description: Defines how to deploy the Ansible appliance + spec: + strategy: + type: Recreate + serviceName: "${ANSIBLE_SERVICE_NAME}" + replicas: 0 + template: + metadata: + labels: + name: "${ANSIBLE_SERVICE_NAME}" + name: "${ANSIBLE_SERVICE_NAME}" + spec: + containers: + - name: ansible + image: "${ANSIBLE_IMG_NAME}:${ANSIBLE_IMG_TAG}" + livenessProbe: + tcpSocket: + port: 443 + initialDelaySeconds: 480 + timeoutSeconds: 3 + readinessProbe: + httpGet: + path: "/" + port: 443 + scheme: HTTPS + initialDelaySeconds: 200 + timeoutSeconds: 3 + ports: + - containerPort: 80 + protocol: TCP + - containerPort: 443 + protocol: TCP + securityContext: + privileged: true + env: + - name: ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: admin-password + - name: RABBITMQ_USER_NAME + value: "${ANSIBLE_RABBITMQ_USER_NAME}" + - name: RABBITMQ_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: rabbit-password + - name: ANSIBLE_SECRET_KEY + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: secret-key + - name: DATABASE_SERVICE_NAME + value: "${DATABASE_SERVICE_NAME}" + - name: POSTGRESQL_USER + value: "${DATABASE_USER}" + - name: POSTGRESQL_PASSWORD + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: pg-password + - name: POSTGRESQL_DATABASE + value: "${ANSIBLE_DATABASE_NAME}" + resources: + requests: + memory: "${ANSIBLE_MEM_REQ}" + cpu: "${ANSIBLE_CPU_REQ}" + limits: + memory: "${ANSIBLE_MEM_LIMIT}" + serviceAccount: miq-privileged + serviceAccountName: miq-privileged +- apiVersion: v1 + kind: ConfigMap + metadata: + name: "${HTTPD_SERVICE_NAME}-configs" + data: + application.conf: | + # Timeout: The number of seconds before receives and sends time out. + Timeout 120 + + RewriteEngine On + Options SymLinksIfOwnerMatch + + <VirtualHost *:80> + KeepAlive on + ProxyPreserveHost on + ProxyPass /ws/ ws://${NAME}/ws/ + ProxyPassReverse /ws/ ws://${NAME}/ws/ + ProxyPass / http://${NAME}/ + ProxyPassReverse / http://${NAME}/ + </VirtualHost> +- apiVersion: v1 + kind: ConfigMap + metadata: + name: "${HTTPD_SERVICE_NAME}-auth-configs" + data: + auth-type: internal + auth-configuration.conf: | + # External Authentication Configuration File + # + # For details on usage please see https://github.com/ManageIQ/manageiq-pods/blob/master/README.md#configuring-external-authentication +- apiVersion: v1 + kind: Service + metadata: + name: "${HTTPD_SERVICE_NAME}" + annotations: + description: Exposes the httpd server + service.alpha.openshift.io/dependencies: '[{"name":"${NAME}","namespace":"","kind":"Service"}]' + spec: + ports: + - name: http + port: 80 + targetPort: 80 + selector: + name: httpd +- apiVersion: v1 + kind: DeploymentConfig + metadata: + name: "${HTTPD_SERVICE_NAME}" + annotations: + description: Defines how to deploy httpd + spec: + strategy: + type: Recreate + recreateParams: + timeoutSeconds: 1200 + triggers: + - type: ConfigChange + replicas: 1 + selector: + name: "${HTTPD_SERVICE_NAME}" + template: + metadata: + name: "${HTTPD_SERVICE_NAME}" + labels: + name: "${HTTPD_SERVICE_NAME}" + spec: + volumes: + - name: httpd-config + configMap: + name: "${HTTPD_SERVICE_NAME}-configs" + - name: httpd-auth-config + configMap: + name: "${HTTPD_SERVICE_NAME}-auth-configs" + containers: + - name: httpd + image: "${HTTPD_IMG_NAME}:${HTTPD_IMG_TAG}" + ports: + - containerPort: 80 + livenessProbe: + exec: + command: + - pidof + - httpd + initialDelaySeconds: 15 + timeoutSeconds: 3 + readinessProbe: + tcpSocket: + port: 80 + initialDelaySeconds: 10 + timeoutSeconds: 3 + volumeMounts: + - name: httpd-config + mountPath: "${HTTPD_CONFIG_DIR}" + - name: httpd-auth-config + mountPath: "${HTTPD_AUTH_CONFIG_DIR}" + resources: + requests: + memory: "${HTTPD_MEM_REQ}" + cpu: "${HTTPD_CPU_REQ}" + limits: + memory: "${HTTPD_MEM_LIMIT}" + env: + - name: HTTPD_AUTH_TYPE + valueFrom: + configMapKeyRef: + name: "${HTTPD_SERVICE_NAME}-auth-configs" + key: auth-type + lifecycle: + postStart: + exec: + command: + - "/usr/bin/save-container-environment" + serviceAccount: miq-anyuid + serviceAccountName: miq-anyuid +parameters: +- name: NAME + displayName: Name + required: true + description: The name assigned to all of the frontend objects defined in this template. + value: manageiq +- name: V2_KEY + displayName: ManageIQ Encryption Key + required: true + description: Encryption Key for ManageIQ Passwords + from: "[a-zA-Z0-9]{43}" + generate: expression +- name: DATABASE_SERVICE_NAME + displayName: PostgreSQL Service Name + required: true + description: The name of the OpenShift Service exposed for the PostgreSQL container. + value: postgresql +- name: DATABASE_USER + displayName: PostgreSQL User + required: true + description: PostgreSQL user that will access the database. + value: root +- name: DATABASE_PASSWORD + displayName: PostgreSQL Password + required: true + description: Password for the PostgreSQL user. + from: "[a-zA-Z0-9]{8}" + generate: expression +- name: DATABASE_IP + displayName: PostgreSQL Server IP + required: true + description: PostgreSQL external server IP used to configure service. + value: '' +- name: DATABASE_PORT + displayName: PostgreSQL Server Port + required: true + description: PostgreSQL external server port used to configure service. + value: '5432' +- name: DATABASE_NAME + required: true + displayName: PostgreSQL Database Name + description: Name of the PostgreSQL database accessed. + value: vmdb_production +- name: DATABASE_REGION + required: true + displayName: Application Database Region + description: Database region that will be used for application. + value: '0' +- name: ANSIBLE_DATABASE_NAME + displayName: Ansible PostgreSQL database name + required: true + description: The database to be used by the Ansible continer + value: awx +- name: MEMCACHED_SERVICE_NAME + required: true + displayName: Memcached Service Name + description: The name of the OpenShift Service exposed for the Memcached container. + value: memcached +- name: MEMCACHED_MAX_MEMORY + displayName: Memcached Max Memory + description: Memcached maximum memory for memcached object storage in MB. + value: '64' +- name: MEMCACHED_MAX_CONNECTIONS + displayName: Memcached Max Connections + description: Memcached maximum number of connections allowed. + value: '1024' +- name: MEMCACHED_SLAB_PAGE_SIZE + displayName: Memcached Slab Page Size + description: Memcached size of each slab page. + value: 1m +- name: ANSIBLE_SERVICE_NAME + displayName: Ansible Service Name + description: The name of the OpenShift Service exposed for the Ansible container. + value: ansible +- name: ANSIBLE_ADMIN_PASSWORD + displayName: Ansible admin User password + required: true + description: The password for the Ansible container admin user + from: "[a-zA-Z0-9]{32}" + generate: expression +- name: ANSIBLE_SECRET_KEY + displayName: Ansible Secret Key + required: true + description: Encryption key for the Ansible container + from: "[a-f0-9]{32}" + generate: expression +- name: ANSIBLE_RABBITMQ_USER_NAME + displayName: RabbitMQ Username + required: true + description: Username for the Ansible RabbitMQ Server + value: ansible +- name: ANSIBLE_RABBITMQ_PASSWORD + displayName: RabbitMQ Server Password + required: true + description: Password for the Ansible RabbitMQ Server + from: "[a-zA-Z0-9]{32}" + generate: expression +- name: APPLICATION_CPU_REQ + displayName: Application Min CPU Requested + required: true + description: Minimum amount of CPU time the Application container will need (expressed in millicores). + value: 1000m +- name: MEMCACHED_CPU_REQ + displayName: Memcached Min CPU Requested + required: true + description: Minimum amount of CPU time the Memcached container will need (expressed in millicores). + value: 200m +- name: ANSIBLE_CPU_REQ + displayName: Ansible Min CPU Requested + required: true + description: Minimum amount of CPU time the Ansible container will need (expressed in millicores). + value: 1000m +- name: APPLICATION_MEM_REQ + displayName: Application Min RAM Requested + required: true + description: Minimum amount of memory the Application container will need. + value: 6144Mi +- name: MEMCACHED_MEM_REQ + displayName: Memcached Min RAM Requested + required: true + description: Minimum amount of memory the Memcached container will need. + value: 64Mi +- name: ANSIBLE_MEM_REQ + displayName: Ansible Min RAM Requested + required: true + description: Minimum amount of memory the Ansible container will need. + value: 2048Mi +- name: APPLICATION_MEM_LIMIT + displayName: Application Max RAM Limit + required: true + description: Maximum amount of memory the Application container can consume. + value: 16384Mi +- name: MEMCACHED_MEM_LIMIT + displayName: Memcached Max RAM Limit + required: true + description: Maximum amount of memory the Memcached container can consume. + value: 256Mi +- name: ANSIBLE_MEM_LIMIT + displayName: Ansible Max RAM Limit + required: true + description: Maximum amount of memory the Ansible container can consume. + value: 8096Mi +- name: MEMCACHED_IMG_NAME + displayName: Memcached Image Name + description: This is the Memcached image name requested to deploy. + value: docker.io/manageiq/memcached +- name: MEMCACHED_IMG_TAG + displayName: Memcached Image Tag + description: This is the Memcached image tag/version requested to deploy. + value: latest +- name: APPLICATION_IMG_NAME + displayName: Application Image Name + description: This is the Application image name requested to deploy. + value: docker.io/manageiq/manageiq-pods +- name: FRONTEND_APPLICATION_IMG_TAG + displayName: Front end Application Image Tag + description: This is the ManageIQ Frontend Application image tag/version requested to deploy. + value: frontend-latest +- name: BACKEND_APPLICATION_IMG_TAG + displayName: Back end Application Image Tag + description: This is the ManageIQ Backend Application image tag/version requested to deploy. + value: backend-latest +- name: ANSIBLE_IMG_NAME + displayName: Ansible Image Name + description: This is the Ansible image name requested to deploy. + value: docker.io/manageiq/embedded-ansible +- name: ANSIBLE_IMG_TAG + displayName: Ansible Image Tag + description: This is the Ansible image tag/version requested to deploy. + value: latest +- name: APPLICATION_DOMAIN + displayName: Application Hostname + description: The exposed hostname that will route to the application service, if left blank a value will be defaulted. + value: '' +- name: APPLICATION_REPLICA_COUNT + displayName: Application Replica Count + description: This is the number of Application replicas requested to deploy. + value: '1' +- name: APPLICATION_INIT_DELAY + displayName: Application Init Delay + required: true + description: Delay in seconds before we attempt to initialize the application. + value: '15' +- name: APPLICATION_VOLUME_CAPACITY + displayName: Application Volume Capacity + required: true + description: Volume space available for application data. + value: 5Gi +- name: HTTPD_SERVICE_NAME + required: true + displayName: Apache httpd Service Name + description: The name of the OpenShift Service exposed for the httpd container. + value: httpd +- name: HTTPD_IMG_NAME + displayName: Apache httpd Image Name + description: This is the httpd image name requested to deploy. + value: docker.io/manageiq/httpd +- name: HTTPD_IMG_TAG + displayName: Apache httpd Image Tag + description: This is the httpd image tag/version requested to deploy. + value: latest +- name: HTTPD_CONFIG_DIR + displayName: Apache httpd Configuration Directory + description: Directory used to store the Apache configuration files. + value: "/etc/httpd/conf.d" +- name: HTTPD_AUTH_CONFIG_DIR + displayName: External Authentication Configuration Directory + description: Directory used to store the external authentication configuration files. + value: "/etc/httpd/auth-conf.d" +- name: HTTPD_CPU_REQ + displayName: Apache httpd Min CPU Requested + required: true + description: Minimum amount of CPU time the httpd container will need (expressed in millicores). + value: 500m +- name: HTTPD_MEM_REQ + displayName: Apache httpd Min RAM Requested + required: true + description: Minimum amount of memory the httpd container will need. + value: 512Mi +- name: HTTPD_MEM_LIMIT + displayName: Apache httpd Max RAM Limit + required: true + description: Maximum amount of memory the httpd container can consume. + value: 8192Mi diff --git a/roles/openshift_management/files/templates/manageiq/miq-template.yaml b/roles/openshift_management/files/templates/manageiq/miq-template.yaml new file mode 100644 index 000000000..3f5a12205 --- /dev/null +++ b/roles/openshift_management/files/templates/manageiq/miq-template.yaml @@ -0,0 +1,948 @@ +apiVersion: v1 +kind: Template +labels: + template: manageiq +metadata: + name: manageiq + annotations: + description: ManageIQ appliance with persistent storage + tags: instant-app,manageiq,miq + iconClass: icon-rails +objects: +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: miq-orchestrator +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: miq-anyuid +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: miq-privileged +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: miq-httpd +- apiVersion: v1 + kind: Secret + metadata: + name: "${NAME}-secrets" + stringData: + pg-password: "${DATABASE_PASSWORD}" + database-url: postgresql://${DATABASE_USER}:${DATABASE_PASSWORD}@${DATABASE_SERVICE_NAME}/${DATABASE_NAME}?encoding=utf8&pool=5&wait_timeout=5 + v2-key: "${V2_KEY}" +- apiVersion: v1 + kind: Secret + metadata: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + stringData: + rabbit-password: "${ANSIBLE_RABBITMQ_PASSWORD}" + secret-key: "${ANSIBLE_SECRET_KEY}" + admin-password: "${ANSIBLE_ADMIN_PASSWORD}" +- apiVersion: v1 + kind: ConfigMap + metadata: + name: "${DATABASE_SERVICE_NAME}-configs" + data: + 01_miq_overrides.conf: | + #------------------------------------------------------------------------------ + # CONNECTIONS AND AUTHENTICATION + #------------------------------------------------------------------------------ + + tcp_keepalives_count = 9 + tcp_keepalives_idle = 3 + tcp_keepalives_interval = 75 + + #------------------------------------------------------------------------------ + # RESOURCE USAGE (except WAL) + #------------------------------------------------------------------------------ + + shared_preload_libraries = 'pglogical,repmgr_funcs' + max_worker_processes = 10 + + #------------------------------------------------------------------------------ + # WRITE AHEAD LOG + #------------------------------------------------------------------------------ + + wal_level = 'logical' + wal_log_hints = on + wal_buffers = 16MB + checkpoint_completion_target = 0.9 + + #------------------------------------------------------------------------------ + # REPLICATION + #------------------------------------------------------------------------------ + + max_wal_senders = 10 + wal_sender_timeout = 0 + max_replication_slots = 10 + hot_standby = on + + #------------------------------------------------------------------------------ + # ERROR REPORTING AND LOGGING + #------------------------------------------------------------------------------ + + log_filename = 'postgresql.log' + log_rotation_age = 0 + log_min_duration_statement = 5000 + log_connections = on + log_disconnections = on + log_line_prefix = '%t:%r:%c:%u@%d:[%p]:' + log_lock_waits = on + + #------------------------------------------------------------------------------ + # AUTOVACUUM PARAMETERS + #------------------------------------------------------------------------------ + + log_autovacuum_min_duration = 0 + autovacuum_naptime = 5min + autovacuum_vacuum_threshold = 500 + autovacuum_analyze_threshold = 500 + autovacuum_vacuum_scale_factor = 0.05 + + #------------------------------------------------------------------------------ + # LOCK MANAGEMENT + #------------------------------------------------------------------------------ + + deadlock_timeout = 5s + + #------------------------------------------------------------------------------ + # VERSION/PLATFORM COMPATIBILITY + #------------------------------------------------------------------------------ + + escape_string_warning = off + standard_conforming_strings = off +- apiVersion: v1 + kind: ConfigMap + metadata: + name: "${HTTPD_SERVICE_NAME}-configs" + data: + application.conf: | + # Timeout: The number of seconds before receives and sends time out. + Timeout 120 + + RewriteEngine On + Options SymLinksIfOwnerMatch + + <VirtualHost *:80> + KeepAlive on + ProxyPreserveHost on + ProxyPass /ws/ ws://${NAME}/ws/ + ProxyPassReverse /ws/ ws://${NAME}/ws/ + ProxyPass / http://${NAME}/ + ProxyPassReverse / http://${NAME}/ + </VirtualHost> +- apiVersion: v1 + kind: ConfigMap + metadata: + name: "${HTTPD_SERVICE_NAME}-auth-configs" + data: + auth-type: internal + auth-configuration.conf: | + # External Authentication Configuration File + # + # For details on usage please see https://github.com/ManageIQ/manageiq-pods/blob/master/README.md#configuring-external-authentication +- apiVersion: v1 + kind: Service + metadata: + annotations: + description: Exposes and load balances ManageIQ pods + service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"},{"name":"${MEMCACHED_SERVICE_NAME}","namespace":"","kind":"Service"}]' + name: "${NAME}" + spec: + clusterIP: None + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 80 + selector: + name: "${NAME}" +- apiVersion: v1 + kind: Route + metadata: + name: "${HTTPD_SERVICE_NAME}" + spec: + host: "${APPLICATION_DOMAIN}" + port: + targetPort: http + tls: + termination: edge + insecureEdgeTerminationPolicy: Redirect + to: + kind: Service + name: "${HTTPD_SERVICE_NAME}" +- apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: "${NAME}-${DATABASE_SERVICE_NAME}" + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "${DATABASE_VOLUME_CAPACITY}" +- apiVersion: apps/v1beta1 + kind: StatefulSet + metadata: + name: "${NAME}" + annotations: + description: Defines how to deploy the ManageIQ appliance + spec: + serviceName: "${NAME}" + replicas: "${APPLICATION_REPLICA_COUNT}" + template: + metadata: + labels: + name: "${NAME}" + name: "${NAME}" + spec: + containers: + - name: manageiq + image: "${APPLICATION_IMG_NAME}:${FRONTEND_APPLICATION_IMG_TAG}" + livenessProbe: + tcpSocket: + port: 80 + initialDelaySeconds: 480 + timeoutSeconds: 3 + readinessProbe: + httpGet: + path: "/" + port: 80 + scheme: HTTP + initialDelaySeconds: 200 + timeoutSeconds: 3 + ports: + - containerPort: 80 + protocol: TCP + volumeMounts: + - name: "${NAME}-server" + mountPath: "/persistent" + env: + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: APPLICATION_INIT_DELAY + value: "${APPLICATION_INIT_DELAY}" + - name: DATABASE_SERVICE_NAME + value: "${DATABASE_SERVICE_NAME}" + - name: DATABASE_REGION + value: "${DATABASE_REGION}" + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: database-url + - name: MEMCACHED_SERVER + value: "${MEMCACHED_SERVICE_NAME}:11211" + - name: MEMCACHED_SERVICE_NAME + value: "${MEMCACHED_SERVICE_NAME}" + - name: V2_KEY + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: v2-key + - name: ANSIBLE_SERVICE_NAME + value: "${ANSIBLE_SERVICE_NAME}" + - name: ANSIBLE_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: admin-password + resources: + requests: + memory: "${APPLICATION_MEM_REQ}" + cpu: "${APPLICATION_CPU_REQ}" + limits: + memory: "${APPLICATION_MEM_LIMIT}" + lifecycle: + preStop: + exec: + command: + - "/opt/manageiq/container-scripts/sync-pv-data" + serviceAccount: miq-orchestrator + serviceAccountName: miq-orchestrator + terminationGracePeriodSeconds: 90 + volumeClaimTemplates: + - metadata: + name: "${NAME}-server" + annotations: + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "${APPLICATION_VOLUME_CAPACITY}" +- apiVersion: v1 + kind: Service + metadata: + annotations: + description: Headless service for ManageIQ backend pods + name: "${NAME}-backend" + spec: + clusterIP: None + selector: + name: "${NAME}-backend" +- apiVersion: apps/v1beta1 + kind: StatefulSet + metadata: + name: "${NAME}-backend" + annotations: + description: Defines how to deploy the ManageIQ appliance + spec: + serviceName: "${NAME}-backend" + replicas: 0 + template: + metadata: + labels: + name: "${NAME}-backend" + name: "${NAME}-backend" + spec: + containers: + - name: manageiq + image: "${APPLICATION_IMG_NAME}:${BACKEND_APPLICATION_IMG_TAG}" + livenessProbe: + exec: + command: + - pidof + - MIQ Server + initialDelaySeconds: 480 + timeoutSeconds: 3 + volumeMounts: + - name: "${NAME}-server" + mountPath: "/persistent" + env: + - name: APPLICATION_INIT_DELAY + value: "${APPLICATION_INIT_DELAY}" + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: database-url + - name: MIQ_SERVER_DEFAULT_ROLES + value: database_operations,event,reporting,scheduler,smartstate,ems_operations,ems_inventory,automate + - name: FRONTEND_SERVICE_NAME + value: "${NAME}" + - name: MEMCACHED_SERVER + value: "${MEMCACHED_SERVICE_NAME}:11211" + - name: V2_KEY + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: v2-key + - name: ANSIBLE_SERVICE_NAME + value: "${ANSIBLE_SERVICE_NAME}" + - name: ANSIBLE_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: admin-password + resources: + requests: + memory: "${APPLICATION_MEM_REQ}" + cpu: "${APPLICATION_CPU_REQ}" + limits: + memory: "${APPLICATION_MEM_LIMIT}" + lifecycle: + preStop: + exec: + command: + - "/opt/manageiq/container-scripts/sync-pv-data" + serviceAccount: miq-orchestrator + serviceAccountName: miq-orchestrator + terminationGracePeriodSeconds: 90 + volumeClaimTemplates: + - metadata: + name: "${NAME}-server" + annotations: + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "${APPLICATION_VOLUME_CAPACITY}" +- apiVersion: v1 + kind: Service + metadata: + name: "${MEMCACHED_SERVICE_NAME}" + annotations: + description: Exposes the memcached server + spec: + ports: + - name: memcached + port: 11211 + targetPort: 11211 + selector: + name: "${MEMCACHED_SERVICE_NAME}" +- apiVersion: v1 + kind: DeploymentConfig + metadata: + name: "${MEMCACHED_SERVICE_NAME}" + annotations: + description: Defines how to deploy memcached + spec: + strategy: + type: Recreate + triggers: + - type: ConfigChange + replicas: 1 + selector: + name: "${MEMCACHED_SERVICE_NAME}" + template: + metadata: + name: "${MEMCACHED_SERVICE_NAME}" + labels: + name: "${MEMCACHED_SERVICE_NAME}" + spec: + volumes: [] + containers: + - name: memcached + image: "${MEMCACHED_IMG_NAME}:${MEMCACHED_IMG_TAG}" + ports: + - containerPort: 11211 + readinessProbe: + timeoutSeconds: 1 + initialDelaySeconds: 5 + tcpSocket: + port: 11211 + livenessProbe: + timeoutSeconds: 1 + initialDelaySeconds: 30 + tcpSocket: + port: 11211 + volumeMounts: [] + env: + - name: MEMCACHED_MAX_MEMORY + value: "${MEMCACHED_MAX_MEMORY}" + - name: MEMCACHED_MAX_CONNECTIONS + value: "${MEMCACHED_MAX_CONNECTIONS}" + - name: MEMCACHED_SLAB_PAGE_SIZE + value: "${MEMCACHED_SLAB_PAGE_SIZE}" + resources: + requests: + memory: "${MEMCACHED_MEM_REQ}" + cpu: "${MEMCACHED_CPU_REQ}" + limits: + memory: "${MEMCACHED_MEM_LIMIT}" +- apiVersion: v1 + kind: Service + metadata: + name: "${DATABASE_SERVICE_NAME}" + annotations: + description: Exposes the database server + spec: + ports: + - name: postgresql + port: 5432 + targetPort: 5432 + selector: + name: "${DATABASE_SERVICE_NAME}" +- apiVersion: v1 + kind: DeploymentConfig + metadata: + name: "${DATABASE_SERVICE_NAME}" + annotations: + description: Defines how to deploy the database + spec: + strategy: + type: Recreate + triggers: + - type: ConfigChange + replicas: 1 + selector: + name: "${DATABASE_SERVICE_NAME}" + template: + metadata: + name: "${DATABASE_SERVICE_NAME}" + labels: + name: "${DATABASE_SERVICE_NAME}" + spec: + volumes: + - name: miq-pgdb-volume + persistentVolumeClaim: + claimName: "${NAME}-${DATABASE_SERVICE_NAME}" + - name: miq-pg-configs + configMap: + name: "${DATABASE_SERVICE_NAME}-configs" + containers: + - name: postgresql + image: "${POSTGRESQL_IMG_NAME}:${POSTGRESQL_IMG_TAG}" + ports: + - containerPort: 5432 + readinessProbe: + timeoutSeconds: 1 + initialDelaySeconds: 15 + exec: + command: + - "/bin/sh" + - "-i" + - "-c" + - psql -h 127.0.0.1 -U ${POSTGRESQL_USER} -q -d ${POSTGRESQL_DATABASE} -c 'SELECT 1' + livenessProbe: + timeoutSeconds: 1 + initialDelaySeconds: 60 + tcpSocket: + port: 5432 + volumeMounts: + - name: miq-pgdb-volume + mountPath: "/var/lib/pgsql/data" + - name: miq-pg-configs + mountPath: "${POSTGRESQL_CONFIG_DIR}" + env: + - name: POSTGRESQL_USER + value: "${DATABASE_USER}" + - name: POSTGRESQL_PASSWORD + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: pg-password + - name: POSTGRESQL_DATABASE + value: "${DATABASE_NAME}" + - name: POSTGRESQL_MAX_CONNECTIONS + value: "${POSTGRESQL_MAX_CONNECTIONS}" + - name: POSTGRESQL_SHARED_BUFFERS + value: "${POSTGRESQL_SHARED_BUFFERS}" + - name: POSTGRESQL_CONFIG_DIR + value: "${POSTGRESQL_CONFIG_DIR}" + resources: + requests: + memory: "${POSTGRESQL_MEM_REQ}" + cpu: "${POSTGRESQL_CPU_REQ}" + limits: + memory: "${POSTGRESQL_MEM_LIMIT}" +- apiVersion: v1 + kind: Service + metadata: + annotations: + description: Exposes and load balances Ansible pods + service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"}]' + name: "${ANSIBLE_SERVICE_NAME}" + spec: + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 80 + - name: https + port: 443 + protocol: TCP + targetPort: 443 + selector: + name: "${ANSIBLE_SERVICE_NAME}" +- apiVersion: v1 + kind: DeploymentConfig + metadata: + name: "${ANSIBLE_SERVICE_NAME}" + annotations: + description: Defines how to deploy the Ansible appliance + spec: + strategy: + type: Recreate + serviceName: "${ANSIBLE_SERVICE_NAME}" + replicas: 0 + template: + metadata: + labels: + name: "${ANSIBLE_SERVICE_NAME}" + name: "${ANSIBLE_SERVICE_NAME}" + spec: + containers: + - name: ansible + image: "${ANSIBLE_IMG_NAME}:${ANSIBLE_IMG_TAG}" + livenessProbe: + tcpSocket: + port: 443 + initialDelaySeconds: 480 + timeoutSeconds: 3 + readinessProbe: + httpGet: + path: "/" + port: 443 + scheme: HTTPS + initialDelaySeconds: 200 + timeoutSeconds: 3 + ports: + - containerPort: 80 + protocol: TCP + - containerPort: 443 + protocol: TCP + securityContext: + privileged: true + env: + - name: ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: admin-password + - name: RABBITMQ_USER_NAME + value: "${ANSIBLE_RABBITMQ_USER_NAME}" + - name: RABBITMQ_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: rabbit-password + - name: ANSIBLE_SECRET_KEY + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: secret-key + - name: DATABASE_SERVICE_NAME + value: "${DATABASE_SERVICE_NAME}" + - name: POSTGRESQL_USER + value: "${DATABASE_USER}" + - name: POSTGRESQL_PASSWORD + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: pg-password + - name: POSTGRESQL_DATABASE + value: "${ANSIBLE_DATABASE_NAME}" + resources: + requests: + memory: "${ANSIBLE_MEM_REQ}" + cpu: "${ANSIBLE_CPU_REQ}" + limits: + memory: "${ANSIBLE_MEM_LIMIT}" + serviceAccount: miq-privileged + serviceAccountName: miq-privileged +- apiVersion: v1 + kind: Service + metadata: + name: "${HTTPD_SERVICE_NAME}" + annotations: + description: Exposes the httpd server + service.alpha.openshift.io/dependencies: '[{"name":"${NAME}","namespace":"","kind":"Service"}]' + spec: + ports: + - name: http + port: 80 + targetPort: 80 + selector: + name: httpd +- apiVersion: v1 + kind: DeploymentConfig + metadata: + name: "${HTTPD_SERVICE_NAME}" + annotations: + description: Defines how to deploy httpd + spec: + strategy: + type: Recreate + recreateParams: + timeoutSeconds: 1200 + triggers: + - type: ConfigChange + replicas: 1 + selector: + name: "${HTTPD_SERVICE_NAME}" + template: + metadata: + name: "${HTTPD_SERVICE_NAME}" + labels: + name: "${HTTPD_SERVICE_NAME}" + spec: + volumes: + - name: httpd-config + configMap: + name: "${HTTPD_SERVICE_NAME}-configs" + - name: httpd-auth-config + configMap: + name: "${HTTPD_SERVICE_NAME}-auth-configs" + containers: + - name: httpd + image: "${HTTPD_IMG_NAME}:${HTTPD_IMG_TAG}" + ports: + - containerPort: 80 + livenessProbe: + exec: + command: + - pidof + - httpd + initialDelaySeconds: 15 + timeoutSeconds: 3 + readinessProbe: + tcpSocket: + port: 80 + initialDelaySeconds: 10 + timeoutSeconds: 3 + volumeMounts: + - name: httpd-config + mountPath: "${HTTPD_CONFIG_DIR}" + - name: httpd-auth-config + mountPath: "${HTTPD_AUTH_CONFIG_DIR}" + resources: + requests: + memory: "${HTTPD_MEM_REQ}" + cpu: "${HTTPD_CPU_REQ}" + limits: + memory: "${HTTPD_MEM_LIMIT}" + env: + - name: HTTPD_AUTH_TYPE + valueFrom: + configMapKeyRef: + name: "${HTTPD_SERVICE_NAME}-auth-configs" + key: auth-type + lifecycle: + postStart: + exec: + command: + - "/usr/bin/save-container-environment" + serviceAccount: miq-anyuid + serviceAccountName: miq-anyuid +parameters: +- name: NAME + displayName: Name + required: true + description: The name assigned to all of the frontend objects defined in this template. + value: manageiq +- name: V2_KEY + displayName: ManageIQ Encryption Key + required: true + description: Encryption Key for ManageIQ Passwords + from: "[a-zA-Z0-9]{43}" + generate: expression +- name: DATABASE_SERVICE_NAME + displayName: PostgreSQL Service Name + required: true + description: The name of the OpenShift Service exposed for the PostgreSQL container. + value: postgresql +- name: DATABASE_USER + displayName: PostgreSQL User + required: true + description: PostgreSQL user that will access the database. + value: root +- name: DATABASE_PASSWORD + displayName: PostgreSQL Password + required: true + description: Password for the PostgreSQL user. + from: "[a-zA-Z0-9]{8}" + generate: expression +- name: DATABASE_NAME + required: true + displayName: PostgreSQL Database Name + description: Name of the PostgreSQL database accessed. + value: vmdb_production +- name: DATABASE_REGION + required: true + displayName: Application Database Region + description: Database region that will be used for application. + value: '0' +- name: ANSIBLE_DATABASE_NAME + displayName: Ansible PostgreSQL database name + required: true + description: The database to be used by the Ansible continer + value: awx +- name: MEMCACHED_SERVICE_NAME + required: true + displayName: Memcached Service Name + description: The name of the OpenShift Service exposed for the Memcached container. + value: memcached +- name: MEMCACHED_MAX_MEMORY + displayName: Memcached Max Memory + description: Memcached maximum memory for memcached object storage in MB. + value: '64' +- name: MEMCACHED_MAX_CONNECTIONS + displayName: Memcached Max Connections + description: Memcached maximum number of connections allowed. + value: '1024' +- name: MEMCACHED_SLAB_PAGE_SIZE + displayName: Memcached Slab Page Size + description: Memcached size of each slab page. + value: 1m +- name: POSTGRESQL_CONFIG_DIR + displayName: PostgreSQL Configuration Overrides + description: Directory used to store PostgreSQL configuration overrides. + value: "/var/lib/pgsql/conf.d" +- name: POSTGRESQL_MAX_CONNECTIONS + displayName: PostgreSQL Max Connections + description: PostgreSQL maximum number of database connections allowed. + value: '1000' +- name: POSTGRESQL_SHARED_BUFFERS + displayName: PostgreSQL Shared Buffer Amount + description: Amount of memory dedicated for PostgreSQL shared memory buffers. + value: 1GB +- name: ANSIBLE_SERVICE_NAME + displayName: Ansible Service Name + description: The name of the OpenShift Service exposed for the Ansible container. + value: ansible +- name: ANSIBLE_ADMIN_PASSWORD + displayName: Ansible admin User password + required: true + description: The password for the Ansible container admin user + from: "[a-zA-Z0-9]{32}" + generate: expression +- name: ANSIBLE_SECRET_KEY + displayName: Ansible Secret Key + required: true + description: Encryption key for the Ansible container + from: "[a-f0-9]{32}" + generate: expression +- name: ANSIBLE_RABBITMQ_USER_NAME + displayName: RabbitMQ Username + required: true + description: Username for the Ansible RabbitMQ Server + value: ansible +- name: ANSIBLE_RABBITMQ_PASSWORD + displayName: RabbitMQ Server Password + required: true + description: Password for the Ansible RabbitMQ Server + from: "[a-zA-Z0-9]{32}" + generate: expression +- name: APPLICATION_CPU_REQ + displayName: Application Min CPU Requested + required: true + description: Minimum amount of CPU time the Application container will need (expressed in millicores). + value: 1000m +- name: POSTGRESQL_CPU_REQ + displayName: PostgreSQL Min CPU Requested + required: true + description: Minimum amount of CPU time the PostgreSQL container will need (expressed in millicores). + value: 500m +- name: MEMCACHED_CPU_REQ + displayName: Memcached Min CPU Requested + required: true + description: Minimum amount of CPU time the Memcached container will need (expressed in millicores). + value: 200m +- name: ANSIBLE_CPU_REQ + displayName: Ansible Min CPU Requested + required: true + description: Minimum amount of CPU time the Ansible container will need (expressed in millicores). + value: 1000m +- name: APPLICATION_MEM_REQ + displayName: Application Min RAM Requested + required: true + description: Minimum amount of memory the Application container will need. + value: 6144Mi +- name: POSTGRESQL_MEM_REQ + displayName: PostgreSQL Min RAM Requested + required: true + description: Minimum amount of memory the PostgreSQL container will need. + value: 4Gi +- name: MEMCACHED_MEM_REQ + displayName: Memcached Min RAM Requested + required: true + description: Minimum amount of memory the Memcached container will need. + value: 64Mi +- name: ANSIBLE_MEM_REQ + displayName: Ansible Min RAM Requested + required: true + description: Minimum amount of memory the Ansible container will need. + value: 2048Mi +- name: APPLICATION_MEM_LIMIT + displayName: Application Max RAM Limit + required: true + description: Maximum amount of memory the Application container can consume. + value: 16384Mi +- name: POSTGRESQL_MEM_LIMIT + displayName: PostgreSQL Max RAM Limit + required: true + description: Maximum amount of memory the PostgreSQL container can consume. + value: 8Gi +- name: MEMCACHED_MEM_LIMIT + displayName: Memcached Max RAM Limit + required: true + description: Maximum amount of memory the Memcached container can consume. + value: 256Mi +- name: ANSIBLE_MEM_LIMIT + displayName: Ansible Max RAM Limit + required: true + description: Maximum amount of memory the Ansible container can consume. + value: 8096Mi +- name: POSTGRESQL_IMG_NAME + displayName: PostgreSQL Image Name + description: This is the PostgreSQL image name requested to deploy. + value: docker.io/manageiq/postgresql +- name: POSTGRESQL_IMG_TAG + displayName: PostgreSQL Image Tag + description: This is the PostgreSQL image tag/version requested to deploy. + value: latest +- name: MEMCACHED_IMG_NAME + displayName: Memcached Image Name + description: This is the Memcached image name requested to deploy. + value: docker.io/manageiq/memcached +- name: MEMCACHED_IMG_TAG + displayName: Memcached Image Tag + description: This is the Memcached image tag/version requested to deploy. + value: latest +- name: APPLICATION_IMG_NAME + displayName: Application Image Name + description: This is the Application image name requested to deploy. + value: docker.io/manageiq/manageiq-pods +- name: FRONTEND_APPLICATION_IMG_TAG + displayName: Front end Application Image Tag + description: This is the ManageIQ Frontend Application image tag/version requested to deploy. + value: frontend-latest +- name: BACKEND_APPLICATION_IMG_TAG + displayName: Back end Application Image Tag + description: This is the ManageIQ Backend Application image tag/version requested to deploy. + value: backend-latest +- name: ANSIBLE_IMG_NAME + displayName: Ansible Image Name + description: This is the Ansible image name requested to deploy. + value: docker.io/manageiq/embedded-ansible +- name: ANSIBLE_IMG_TAG + displayName: Ansible Image Tag + description: This is the Ansible image tag/version requested to deploy. + value: latest +- name: APPLICATION_DOMAIN + displayName: Application Hostname + description: The exposed hostname that will route to the application service, if left blank a value will be defaulted. + value: '' +- name: APPLICATION_REPLICA_COUNT + displayName: Application Replica Count + description: This is the number of Application replicas requested to deploy. + value: '1' +- name: APPLICATION_INIT_DELAY + displayName: Application Init Delay + required: true + description: Delay in seconds before we attempt to initialize the application. + value: '15' +- name: APPLICATION_VOLUME_CAPACITY + displayName: Application Volume Capacity + required: true + description: Volume space available for application data. + value: 5Gi +- name: DATABASE_VOLUME_CAPACITY + displayName: Database Volume Capacity + required: true + description: Volume space available for database. + value: 15Gi +- name: HTTPD_SERVICE_NAME + required: true + displayName: Apache httpd Service Name + description: The name of the OpenShift Service exposed for the httpd container. + value: httpd +- name: HTTPD_IMG_NAME + displayName: Apache httpd Image Name + description: This is the httpd image name requested to deploy. + value: docker.io/manageiq/httpd +- name: HTTPD_IMG_TAG + displayName: Apache httpd Image Tag + description: This is the httpd image tag/version requested to deploy. + value: latest +- name: HTTPD_CONFIG_DIR + displayName: Apache Configuration Directory + description: Directory used to store the Apache configuration files. + value: "/etc/httpd/conf.d" +- name: HTTPD_AUTH_CONFIG_DIR + displayName: External Authentication Configuration Directory + description: Directory used to store the external authentication configuration files. + value: "/etc/httpd/auth-conf.d" +- name: HTTPD_CPU_REQ + displayName: Apache httpd Min CPU Requested + required: true + description: Minimum amount of CPU time the httpd container will need (expressed in millicores). + value: 500m +- name: HTTPD_MEM_REQ + displayName: Apache httpd Min RAM Requested + required: true + description: Minimum amount of memory the httpd container will need. + value: 512Mi +- name: HTTPD_MEM_LIMIT + displayName: Apache httpd Max RAM Limit + required: true + description: Maximum amount of memory the httpd container can consume. + value: 8192Mi diff --git a/roles/openshift_management/handlers/main.yml b/roles/openshift_management/handlers/main.yml new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/roles/openshift_management/handlers/main.yml diff --git a/roles/openshift_cfme/meta/main.yml b/roles/openshift_management/meta/main.yml index 162d817f0..07ad51126 100644 --- a/roles/openshift_cfme/meta/main.yml +++ b/roles/openshift_management/meta/main.yml @@ -16,4 +16,3 @@ galaxy_info: dependencies: - role: lib_openshift - role: lib_utils -- role: openshift_master_facts diff --git a/roles/openshift_management/tasks/accounts.yml b/roles/openshift_management/tasks/accounts.yml new file mode 100644 index 000000000..e45ea8d43 --- /dev/null +++ b/roles/openshift_management/tasks/accounts.yml @@ -0,0 +1,28 @@ +--- +# This role task file is responsible for user/system account creation, +# and ensuring correct access is provided as required. +- name: Ensure the CFME system accounts exist + oc_serviceaccount: + namespace: "{{ openshift_management_project }}" + state: present + name: "{{ openshift_management_flavor_short }}{{ item.name }}" + with_items: + - "{{ __openshift_system_account_sccs }}" + +- name: Ensure the CFME system accounts have all the required SCCs + oc_adm_policy_user: + namespace: "{{ openshift_management_project }}" + user: "system:serviceaccount:{{ openshift_management_project }}:{{ openshift_management_flavor_short }}{{ item.name }}" + resource_kind: scc + resource_name: "{{ item.resource_name }}" + with_items: + - "{{ __openshift_system_account_sccs }}" + +- name: Ensure the CFME system accounts have the required roles + oc_adm_policy_user: + namespace: "{{ openshift_management_project }}" + user: "system:serviceaccount:{{ openshift_management_project }}:{{ openshift_management_flavor_short }}{{ item.name }}" + resource_kind: role + resource_name: "{{ item.resource_name }}" + with_items: + - "{{ __openshift_management_system_account_roles }}" diff --git a/roles/openshift_management/tasks/main.yml b/roles/openshift_management/tasks/main.yml new file mode 100644 index 000000000..86c4d0010 --- /dev/null +++ b/roles/openshift_management/tasks/main.yml @@ -0,0 +1,79 @@ +--- +######################################################################) +# Users, projects, and privileges + +- name: Run pre-install CFME validation checks + include: validate.yml + +- name: "Ensure the CFME '{{ openshift_management_project }}' namespace exists" + oc_project: + state: present + name: "{{ openshift_management_project }}" + display_name: "{{ openshift_management_project_description }}" + +- name: Create and Authorize CFME Accounts + include: accounts.yml + +###################################################################### +# STORAGE - Initialize basic storage class +#--------------------------------------------------------------------- +# * nfs - set up NFS shares on the first master for a proof of concept +- name: Create required NFS exports for CFME app storage + include: storage/nfs.yml + when: openshift_management_storage_class == 'nfs' + +#--------------------------------------------------------------------- +# * external - NFS again, but pointing to a pre-configured NFS server +- name: Note Storage Type - External NFS + debug: + msg: "Setting up external NFS storage, openshift_management_storage_class is {{ openshift_management_storage_class }}" + when: openshift_management_storage_class == 'nfs_external' + +#--------------------------------------------------------------------- +# * cloudprovider - use an existing cloudprovider based storage +- name: Note Storage Type - Cloud Provider + debug: + msg: Validating cloud provider storage type, openshift_management_storage_class is 'cloudprovider' + when: openshift_management_storage_class == 'cloudprovider' + +#--------------------------------------------------------------------- +# * preconfigured - don't do anything, assume it's all there ready to go +- name: Note Storage Type - Preconfigured + debug: + msg: Skipping storage configuration, openshift_management_storage_class is 'preconfigured' + when: openshift_management_storage_class == 'preconfigured' + +###################################################################### +# APPLICATION TEMPLATE +- name: Install the CFME app and PV templates + include: template.yml + +###################################################################### +# APP & DB Storage + +# For local/external NFS backed installations +- name: "Create the required App and DB PVs using {{ openshift_management_storage_class }}" + include: storage/create_nfs_pvs.yml + when: + - openshift_management_storage_class in ['nfs', 'nfs_external'] + +###################################################################### +# CREATE APP +- name: Note the correct ext-db template name + set_fact: + openshift_management_template_name: "{{ openshift_management_flavor }}-ext-db" + when: + - openshift_management_app_template in ['miq-template-ext-db', 'cfme-template-ext-db'] + +- name: Note the correct podified db template name + set_fact: + openshift_management_template_name: "{{ openshift_management_flavor }}" + when: + - openshift_management_app_template in ['miq-template', 'cfme-template'] + +- name: Ensure the CFME App is created + oc_process: + namespace: "{{ openshift_management_project }}" + template_name: "{{ openshift_management_template_name }}" + create: True + params: "{{ openshift_management_template_parameters }}" diff --git a/roles/openshift_management/tasks/storage/create_nfs_pvs.yml b/roles/openshift_management/tasks/storage/create_nfs_pvs.yml new file mode 100644 index 000000000..31c845725 --- /dev/null +++ b/roles/openshift_management/tasks/storage/create_nfs_pvs.yml @@ -0,0 +1,69 @@ +--- +# Create the required PVs for the App and the DB +- name: Note the App PV Size from Template Parameters + set_fact: + openshift_management_app_pv_size: "{{ openshift_management_template_parameters.APPLICATION_VOLUME_CAPACITY }}" + when: + - openshift_management_template_parameters.APPLICATION_VOLUME_CAPACITY is defined + +- name: Note the App PV Size from defaults + set_fact: + openshift_management_app_pv_size: "{{ __openshift_management_app_pv_size }}" + when: + - openshift_management_template_parameters.APPLICATION_VOLUME_CAPACITY is not defined + +- when: openshift_management_app_template in ['miq-template', 'cfme-template'] + block: + - name: Note the DB PV Size from Template Parameters + set_fact: + openshift_management_db_pv_size: "{{ openshift_management_template_parameters.DATABASE_VOLUME_CAPACITY }}" + when: + - openshift_management_template_parameters.DATABASE_VOLUME_CAPACITY is defined + + - name: Note the DB PV Size from defaults + set_fact: + openshift_management_db_pv_size: "{{ __openshift_management_db_pv_size }}" + when: + - openshift_management_template_parameters.DATABASE_VOLUME_CAPACITY is not defined + +- name: Check if the CFME App PV has been created + oc_obj: + namespace: "{{ openshift_management_project }}" + state: list + kind: pv + name: "{{ openshift_management_flavor_short }}-app" + register: miq_app_pv_check + +- name: Check if the CFME DB PV has been created + oc_obj: + namespace: "{{ openshift_management_project }}" + state: list + kind: pv + name: "{{ openshift_management_flavor_short }}-db" + register: miq_db_pv_check + when: + - openshift_management_app_template in ['miq-template', 'cfme-template'] + +- name: Ensure the CFME App PV is created + oc_process: + namespace: "{{ openshift_management_project }}" + template_name: "{{ openshift_management_flavor }}-app-pv" + create: True + params: + PV_SIZE: "{{ openshift_management_app_pv_size }}" + BASE_PATH: "{{ openshift_management_storage_nfs_base_dir }}" + NFS_HOST: "{{ openshift_management_nfs_server }}" + when: miq_app_pv_check.results.results == [{}] + +- name: Ensure the CFME DB PV is created + oc_process: + namespace: "{{ openshift_management_project }}" + template_name: "{{ openshift_management_flavor }}-db-pv" + create: True + params: + PV_SIZE: "{{ openshift_management_db_pv_size }}" + BASE_PATH: "{{ openshift_management_storage_nfs_base_dir }}" + NFS_HOST: "{{ openshift_management_nfs_server }}" + when: + - openshift_management_app_template in ['miq-template', 'cfme-template'] + - miq_db_pv_check.results.results == [{}] diff --git a/roles/openshift_management/tasks/storage/nfs.yml b/roles/openshift_management/tasks/storage/nfs.yml new file mode 100644 index 000000000..696808328 --- /dev/null +++ b/roles/openshift_management/tasks/storage/nfs.yml @@ -0,0 +1,67 @@ +--- +# Tasks to statically provision NFS volumes +# Include if not using dynamic volume provisioning + +- name: Ensure we save the local NFS server if one is provided + set_fact: + openshift_management_nfs_server: "{{ openshift_management_storage_nfs_local_hostname }}" + when: + - openshift_management_storage_nfs_local_hostname is defined + - openshift_management_storage_nfs_local_hostname != False + - openshift_management_storage_class == "nfs" + +- name: Ensure we save the local NFS server + set_fact: + openshift_management_nfs_server: "{{ groups['oo_nfs_to_config'].0 }}" + when: + - openshift_management_nfs_server is not defined + - openshift_management_storage_class == "nfs" + +- name: Ensure we save the external NFS server + set_fact: + openshift_management_nfs_server: "{{ openshift_management_storage_nfs_external_hostname }}" + when: + - openshift_management_storage_class == "nfs_external" + +- name: Failed NFS server detection + assert: + that: + - openshift_management_nfs_server is defined + msg: | + "Unable to detect an NFS server. The 'nfs_external' + openshift_management_storage_class option requires that you set + openshift_management_storage_nfs_external_hostname. NFS hosts detected + for local nfs services: {{ groups['oo_nfs_to_config'] | join(', ') }}" + +- name: Setting up NFS storage + block: + - name: Include the NFS Setup role tasks + include_role: + role: openshift_nfs + tasks_from: setup + vars: + l_nfs_base_dir: "{{ openshift_management_storage_nfs_base_dir }}" + + - name: Create the App export + include_role: + role: openshift_nfs + tasks_from: create_export + vars: + l_nfs_base_dir: "{{ openshift_management_storage_nfs_base_dir }}" + l_nfs_export_config: "{{ openshift_management_flavor_short }}" + l_nfs_export_name: "{{ openshift_management_flavor_short }}-app" + l_nfs_options: "*(rw,no_root_squash,no_wdelay)" + + - name: Create the DB export + include_role: + role: openshift_nfs + tasks_from: create_export + vars: + l_nfs_base_dir: "{{ openshift_management_storage_nfs_base_dir }}" + l_nfs_export_config: "{{ openshift_management_flavor_short }}" + l_nfs_export_name: "{{ openshift_management_flavor_short }}-db" + l_nfs_options: "*(rw,no_root_squash,no_wdelay)" + when: + - openshift_management_app_template in ['miq-template', 'cfme-template'] + + delegate_to: "{{ openshift_management_nfs_server }}" diff --git a/roles/openshift_management/tasks/storage/storage.yml b/roles/openshift_management/tasks/storage/storage.yml new file mode 100644 index 000000000..d8bf7aa3e --- /dev/null +++ b/roles/openshift_management/tasks/storage/storage.yml @@ -0,0 +1,3 @@ +--- +- include: nfs.yml + when: not (openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce')) diff --git a/roles/openshift_management/tasks/template.yml b/roles/openshift_management/tasks/template.yml new file mode 100644 index 000000000..299158ac4 --- /dev/null +++ b/roles/openshift_management/tasks/template.yml @@ -0,0 +1,128 @@ +--- +# Tasks for ensuring the correct CFME templates are landed on the remote system + +###################################################################### +# CFME App Template +# +# Note, this is different from the create_nfs_pvs.yml tasks in that +# the application template does not require any jinja2 evaluation. +# +# TODO: Handle the case where the server or PV templates are updated +# in openshift-ansible and the change needs to be landed on the +# managed cluster. + +###################################################################### +# STANDARD PODIFIED DATABASE TEMPLATE +- when: openshift_management_app_template in ['miq-template', 'cfme-template'] + block: + - name: Check if the CFME Server template has been created already + oc_obj: + namespace: "{{ openshift_management_project }}" + state: list + kind: template + name: "{{ openshift_management_flavor }}" + register: miq_server_check + + - when: miq_server_check.results.results == [{}] + block: + - name: Copy over CFME Server template + copy: + src: "templates/{{ openshift_management_flavor }}/{{ openshift_management_flavor_short }}-template.yaml" + dest: "{{ template_dir }}/" + + - name: Ensure CFME Server Template is created + oc_obj: + namespace: "{{ openshift_management_project }}" + name: "{{ openshift_management_flavor }}" + state: present + kind: template + files: + - "{{ template_dir }}/{{ openshift_management_flavor_short }}-template.yaml" + +###################################################################### +# EXTERNAL DATABASE TEMPLATE +- when: openshift_management_app_template in ['miq-template-ext-db', 'cfme-template'] + block: + - name: Check if the CFME Ext-DB Server template has been created already + oc_obj: + namespace: "{{ openshift_management_project }}" + state: list + kind: template + name: "{{ openshift_management_flavor }}-ext-db" + register: miq_ext_db_server_check + + - when: miq_ext_db_server_check.results.results == [{}] + block: + - name: Copy over CFME Ext-DB Server template + copy: + src: "templates/{{ openshift_management_flavor }}/{{openshift_management_flavor_short}}-template-ext-db.yaml" + dest: "{{ template_dir }}/" + + - name: Ensure CFME Ext-DB Server Template is created + oc_obj: + namespace: "{{ openshift_management_project }}" + name: "{{ openshift_management_flavor }}-ext-db" + state: present + kind: template + files: + - "{{ template_dir }}/{{ openshift_management_flavor_short }}-template-ext-db.yaml" + +# End app template creation. +###################################################################### + +###################################################################### +# Begin conditional PV template creations + +# Required for the application server +- name: Check if the CFME App PV template has been created already + oc_obj: + namespace: "{{ openshift_management_project }}" + state: list + kind: template + name: "{{ openshift_management_flavor }}-app-pv" + register: miq_app_pv_check + +- when: miq_app_pv_check.results.results == [{}] + block: + - name: Copy over CFME App PV template + copy: + src: "templates/{{ openshift_management_flavor }}/{{ openshift_management_flavor_short }}-pv-server-example.yaml" + dest: "{{ template_dir }}/" + + - name: Ensure CFME App PV Template is created + oc_obj: + namespace: "{{ openshift_management_project }}" + name: "{{ openshift_management_flavor }}-app-pv" + state: present + kind: template + files: + - "{{ template_dir }}/{{ openshift_management_flavor_short }}-pv-server-example.yaml" + +#--------------------------------------------------------------------- + +# Required for database if the installation is fully podified +- when: openshift_management_app_template in ['miq-template', 'cfme-template'] + block: + - name: Check if the CFME DB PV template has been created already + oc_obj: + namespace: "{{ openshift_management_project }}" + state: list + kind: template + name: "{{ openshift_management_flavor }}-db-pv" + register: miq_db_pv_check + + - when: miq_db_pv_check.results.results == [{}] + block: + - name: Copy over CFME DB PV template + copy: + src: "templates/{{ openshift_management_flavor }}/{{ openshift_management_flavor_short }}-pv-db-example.yaml" + dest: "{{ template_dir }}/" + + - name: Ensure CFME DB PV Template is created + oc_obj: + namespace: "{{ openshift_management_project }}" + name: "{{ openshift_management_flavor }}-db-pv" + state: present + kind: template + files: + - "{{ template_dir }}/{{ openshift_management_flavor_short }}-pv-db-example.yaml" diff --git a/roles/openshift_management/tasks/uninstall.yml b/roles/openshift_management/tasks/uninstall.yml new file mode 100644 index 000000000..09fbc609f --- /dev/null +++ b/roles/openshift_management/tasks/uninstall.yml @@ -0,0 +1,23 @@ +--- +- name: Start removing all the objects + command: "oc delete -n {{ openshift_management_project }} {{ item }} --all" + with_items: + - rc + - dc + - po + - svc + - pv + - pvc + - statefulsets + - routes + +- name: Remove the project + command: "oc delete -n {{ openshift_management_project }} project {{ openshift_management_project }}" + +- name: Verify project has been destroyed + command: "oc get project {{ openshift_management_project }}" + ignore_errors: True + register: project_terminated + until: project_terminated.stderr.find("NotFound") != -1 + delay: 5 + retries: 30 diff --git a/roles/openshift_management/tasks/validate.yml b/roles/openshift_management/tasks/validate.yml new file mode 100644 index 000000000..8b20bdc5e --- /dev/null +++ b/roles/openshift_management/tasks/validate.yml @@ -0,0 +1,90 @@ +--- +# Validate configuration parameters passed to the openshift_management role + +###################################################################### +# CORE PARAMETERS +- name: Ensure openshift_management_app_template is valid + assert: + that: + - openshift_management_app_template in __openshift_management_app_templates + + msg: | + "openshift_management_app_template must be one of {{ + __openshift_management_app_templates | join(', ') }}" + +- name: Ensure openshift_management_storage_class is a valid type + assert: + that: + - openshift_management_storage_class in __openshift_management_storage_classes + msg: | + "openshift_management_storage_class must be one of {{ + __openshift_management_storage_classes | join(', ') }}" + +###################################################################### +# STORAGE PARAMS - NFS +- name: Ensure external NFS storage has a valid NFS server hostname defined + assert: + that: + - openshift_management_storage_nfs_external_hostname | default(False) + msg: | + The selected storage class 'nfs_external' requires a valid + hostname for the openshift_management_storage_nfs_hostname parameter + when: + - openshift_management_storage_class == 'nfs_external' + +- name: Ensure local NFS storage has a valid NFS server to use + fail: + msg: | + No NFS hosts detected or defined but storage class is set to + 'nfs'. Add hosts to your [nfs] group or define one manually with + the 'openshift_management_storage_nfs_local_hostname' parameter + when: + - openshift_management_storage_class == 'nfs' + # You haven't created any NFS groups + - (groups.nfs is defined and groups.nfs | length == 0) or (groups.nfs is not defined) + # You did not manually specify a host to use + - (openshift_management_storage_nfs_local_hostname is not defined) or (openshift_management_storage_nfs_local_hostname == false) + +###################################################################### +# STORAGE PARAMS -CLOUD PROVIDER +- name: Validate Cloud Provider storage class + assert: + that: + - openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce' + msg: | + openshift_management_storage_class is 'cloudprovider' but you have an + invalid kind defined, '{{ openshift_cloudprovider_kind }}'. See + 'openshift_cloudprovider_kind' in the example inventories for + the required parameters for your selected cloud + provider. Working providers: 'aws' and 'gce'. + when: + - openshift_management_storage_class == 'cloudprovider' + - openshift_cloudprovider_kind is defined + +- name: Validate 'cloudprovider' Storage Class has required parameters defined + assert: + that: + - openshift_cloudprovider_kind is defined + msg: | + openshift_management_storage_class is 'cloudprovider' but you do not + have 'openshift_cloudprovider_kind' defined, this is + required. Search the example inventories for + 'openshift_cloudprovider_kind'. The required parameters for your + selected cloud provider must be defined in your inventory as + well. Working providers: 'aws' and 'gce'. + when: + - openshift_management_storage_class == 'cloudprovider' + +###################################################################### +# DATABASE CONNECTION VALIDATION +- name: Validate all required database parameters were provided for ext-db template + assert: + that: + - item in openshift_management_template_parameters + msg: | + "You are using external database services but a required + database parameter {{ item }} was not found in + 'openshift_management_template_parameters'" + with_items: "{{ __openshift_management_required_db_conn_params }}" + when: + - openshift_management_app_template in ['miq-template-ext-db', 'cfme-template-ext-db'] diff --git a/roles/openshift_management/vars/main.yml b/roles/openshift_management/vars/main.yml new file mode 100644 index 000000000..da3ad0af7 --- /dev/null +++ b/roles/openshift_management/vars/main.yml @@ -0,0 +1,76 @@ +--- +# Misc enumerated values +#--------------------------------------------------------------------- +# Allowed choices for the storage class parameter +__openshift_management_storage_classes: + - nfs + - nfs_external + - preconfigured + - cloudprovider + +#--------------------------------------------------------------------- +# DEFAULT PV SIZES +# How large to make the MIQ application PV +__openshift_management_app_pv_size: 5Gi +# How large to make the MIQ PostgreSQL PV +__openshift_management_db_pv_size: 15Gi + +# Name of the application templates with object/parameter definitions +__openshift_management_app_templates: + - miq-template-ext-db + - miq-template + - cfme-template-ext-db + - cfme-template + +# PostgreSQL database connection parameters +__openshift_management_db_parameters: + - DATABASE_USER + - DATABASE_PASSWORD + - DATABASE_IP + - DATABASE_PORT + - DATABASE_NAME + +# # Commented out until we can support both CFME and MIQ +# # openshift_management_flavor: "{{ 'cloudforms' if openshift_deployment_type == 'openshift-enterprise' else 'manageiq' }}" +#openshift_management_flavor: cloudforms +openshift_management_flavor: manageiq +# TODO: Make this conditional as well based on the prior variable +# # openshift_management_flavor_short: "{{ 'cfme' if openshift_deployment_type == 'openshift-enterprise' else 'miq' }}" +# openshift_management_flavor_short: cfme +openshift_management_flavor_short: miq + +###################################################################### +# ACCOUNTING +###################################################################### +# Service Account SSCs +__openshift_system_account_sccs: + - name: -anyuid + resource_name: anyuid + - name: -orchestrator + resource_name: anyuid + - name: -privileged + resource_name: privileged + - name: -httpd + resource_name: anyuid + +# Service Account Roles +__openshift_management_system_account_roles: + - name: -orchestrator + resource_name: view + - name: -orchestrator + resource_name: edit + +###################################################################### +# DEFAULTS +###################################################################### +# User only has to provide parameters they need to override, we will +# do a hash update method with the provided user parameters to create +# the final connection structure. +# +# TODO: Update user provided configs with this if they are missing fields +__openshift_management_required_db_conn_params: + - DATABASE_USER + - DATABASE_PASSWORD + - DATABASE_IP + - DATABASE_PORT + - DATABASE_NAME diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2 index d045b402b..9b3fbcf49 100644 --- a/roles/openshift_master/templates/master.yaml.v1.j2 +++ b/roles/openshift_master/templates/master.yaml.v1.j2 @@ -251,11 +251,7 @@ servingInfo: bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.api_port }} bindNetwork: tcp4 certFile: master.server.crt -{% if openshift.common.version_gte_3_2_or_1_2 | bool %} - clientCA: ca-bundle.crt -{% else %} clientCA: ca.crt -{% endif %} keyFile: master.server.key maxRequestsInFlight: {{ openshift.master.max_requests_inflight }} requestTimeoutSeconds: 3600 diff --git a/roles/openshift_master_certificates/meta/main.yml b/roles/openshift_master_certificates/meta/main.yml index 018186e86..300b2cbff 100644 --- a/roles/openshift_master_certificates/meta/main.yml +++ b/roles/openshift_master_certificates/meta/main.yml @@ -12,6 +12,4 @@ galaxy_info: categories: - cloud - system -dependencies: -- role: openshift_master_facts -- role: openshift_ca +dependencies: [] diff --git a/roles/openshift_master_facts/defaults/main.yml b/roles/openshift_master_facts/defaults/main.yml index a80313505..d0dcdae4b 100644 --- a/roles/openshift_master_facts/defaults/main.yml +++ b/roles/openshift_master_facts/defaults/main.yml @@ -1,5 +1,5 @@ --- -openshift_master_default_subdomain: "{{ lookup('oo_option', 'openshift_master_default_subdomain') | default(None, true) }}" +openshift_master_default_subdomain: "router.default.svc.cluster.local" openshift_master_admission_plugin_config: openshift.io/ImagePolicy: configuration: diff --git a/roles/openshift_master_facts/lookup_plugins/oo_option.py b/roles/openshift_master_facts/lookup_plugins/oo_option.py deleted file mode 120000 index 5ae43f8dd..000000000 --- a/roles/openshift_master_facts/lookup_plugins/oo_option.py +++ /dev/null @@ -1 +0,0 @@ -../../../lookup_plugins/oo_option.py
\ No newline at end of file diff --git a/roles/openshift_master_facts/tasks/main.yml b/roles/openshift_master_facts/tasks/main.yml index fa228af2a..a95570d38 100644 --- a/roles/openshift_master_facts/tasks/main.yml +++ b/roles/openshift_master_facts/tasks/main.yml @@ -1,5 +1,4 @@ --- - # Ensure the default sub-domain is set: - name: Migrate legacy osm_default_subdomain fact set_fact: diff --git a/roles/openshift_metrics/defaults/main.yaml b/roles/openshift_metrics/defaults/main.yaml index ed0182ba8..8da74430f 100644 --- a/roles/openshift_metrics/defaults/main.yaml +++ b/roles/openshift_metrics/defaults/main.yaml @@ -1,7 +1,6 @@ --- openshift_metrics_start_cluster: True openshift_metrics_install_metrics: False -openshift_metrics_uninstall_metrics: False openshift_metrics_startup_timeout: 500 openshift_metrics_hawkular_replicas: 1 @@ -61,3 +60,6 @@ openshift_metrics_cassandra_pvc_access: "{{ openshift_metrics_storage_access_mod openshift_metrics_hawkular_user_write_access: False openshift_metrics_heapster_allowed_users: system:master-proxy + +openshift_metrics_cassandra_enable_prometheus_endpoint: True +openshift_metrics_hawkular_enable_prometheus_endpoint: True diff --git a/roles/openshift_metrics/tasks/main.yaml b/roles/openshift_metrics/tasks/main.yaml index c92458c50..10509fc1e 100644 --- a/roles/openshift_metrics/tasks/main.yaml +++ b/roles/openshift_metrics/tasks/main.yaml @@ -49,7 +49,7 @@ - include: uninstall_metrics.yaml when: - - openshift_metrics_uninstall_metrics | bool + - not openshift_metrics_install_metrics | bool - include: uninstall_hosa.yaml when: not openshift_metrics_install_hawkular_agent | bool diff --git a/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2 b/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2 index fc82f49b1..6f341bcfb 100644 --- a/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2 +++ b/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2 @@ -56,6 +56,8 @@ spec: value: "/cassandra_data" - name: JVM_OPTS value: "-Dcassandra.commitlog.ignorereplayerrors=true" + - name: ENABLE_PROMETHEUS_ENDPOINT + value: "{{ openshift_metrics_cassandra_enable_prometheus_endpoint }}" - name: TRUSTSTORE_NODES_AUTHORITIES value: "/hawkular-cassandra-certs/tls.peer.truststore.crt" - name: TRUSTSTORE_CLIENT_AUTHORITIES diff --git a/roles/openshift_metrics/templates/hawkular_metrics_rc.j2 b/roles/openshift_metrics/templates/hawkular_metrics_rc.j2 index 9a9363075..59f7fb44a 100644 --- a/roles/openshift_metrics/templates/hawkular_metrics_rc.j2 +++ b/roles/openshift_metrics/templates/hawkular_metrics_rc.j2 @@ -55,6 +55,7 @@ spec: - "-Dcom.datastax.driver.FORCE_NIO=true" - "-DKUBERNETES_MASTER_URL={{openshift_metrics_master_url}}" - "-DUSER_WRITE_ACCESS={{openshift_metrics_hawkular_user_write_access}}" + - "-Dhawkular.metrics.jmx-reporting-enabled" env: - name: POD_NAMESPACE valueFrom: @@ -66,6 +67,8 @@ spec: value: "{{ 17 | oo_random_word }}" - name: TRUSTSTORE_AUTHORITIES value: "/hawkular-metrics-certs/tls.truststore.crt" + - name: ENABLE_PROMETHEUS_ENDPOINT + value: "{{ openshift_metrics_hawkular_enable_prometheus_endpoint }}" - name: OPENSHIFT_KUBE_PING_NAMESPACE valueFrom: fieldRef: diff --git a/roles/openshift_metrics/vars/openshift-enterprise.yml b/roles/openshift_metrics/vars/openshift-enterprise.yml index 68cdf06fe..5a1728de5 100644 --- a/roles/openshift_metrics/vars/openshift-enterprise.yml +++ b/roles/openshift_metrics/vars/openshift-enterprise.yml @@ -1,3 +1,3 @@ --- __openshift_metrics_image_prefix: "registry.access.redhat.com/openshift3/" -__openshift_metrics_image_version: "v3.6" +__openshift_metrics_image_version: "v3.7" diff --git a/roles/openshift_named_certificates/defaults/main.yml b/roles/openshift_named_certificates/defaults/main.yml new file mode 100644 index 000000000..a32e385ec --- /dev/null +++ b/roles/openshift_named_certificates/defaults/main.yml @@ -0,0 +1,6 @@ +--- +openshift_ca_config_dir: "{{ openshift.common.config_base }}/master" +openshift_ca_cert: "{{ openshift_ca_config_dir }}/ca.crt" +openshift_ca_key: "{{ openshift_ca_config_dir }}/ca.key" +openshift_ca_serial: "{{ openshift_ca_config_dir }}/ca.serial.txt" +openshift_version: "{{ openshift_pkg_version | default('') }}" diff --git a/roles/openshift_named_certificates/tasks/named_certificates.yml b/roles/openshift_named_certificates/tasks/named_certificates.yml deleted file mode 100644 index 7b097b443..000000000 --- a/roles/openshift_named_certificates/tasks/named_certificates.yml +++ /dev/null @@ -1,32 +0,0 @@ ---- -- name: Clear named certificates - file: - path: "{{ named_certs_dir }}" - state: absent - when: overwrite_named_certs | bool - -- name: Ensure named certificate directory exists - file: - path: "{{ named_certs_dir }}" - state: directory - mode: 0700 - -- name: Land named certificates - copy: - src: "{{ item.certfile }}" - dest: "{{ named_certs_dir }}" - with_items: "{{ openshift_master_named_certificates | default([]) }}" - -- name: Land named certificate keys - copy: - src: "{{ item.keyfile }}" - dest: "{{ named_certs_dir }}" - mode: 0600 - with_items: "{{ openshift_master_named_certificates | default([]) }}" - -- name: Land named CA certificates - copy: - src: "{{ item }}" - dest: "{{ named_certs_dir }}" - mode: 0600 - with_items: "{{ openshift_master_named_certificates | default([]) | oo_collect('cafile') }}" diff --git a/roles/openshift_named_certificates/vars/main.yml b/roles/openshift_named_certificates/vars/main.yml index 368e9bdac..7f891441d 100644 --- a/roles/openshift_named_certificates/vars/main.yml +++ b/roles/openshift_named_certificates/vars/main.yml @@ -1,10 +1,4 @@ --- -openshift_ca_config_dir: "{{ openshift.common.config_base }}/master" -openshift_ca_cert: "{{ openshift_ca_config_dir }}/ca.crt" -openshift_ca_key: "{{ openshift_ca_config_dir }}/ca.key" -openshift_ca_serial: "{{ openshift_ca_config_dir }}/ca.serial.txt" -openshift_version: "{{ openshift_pkg_version | default('') }}" - overwrite_named_certs: "{{ openshift_master_overwrite_named_certificates | default(false) }}" named_certs_dir: "{{ openshift.common.config_base }}/master/named_certificates/" internal_hostnames: "{{ openshift.common.internal_hostnames }}" diff --git a/roles/openshift_nfs/README.md b/roles/openshift_nfs/README.md new file mode 100644 index 000000000..36ea36385 --- /dev/null +++ b/roles/openshift_nfs/README.md @@ -0,0 +1,17 @@ +OpenShift NFS +============= + +Sets up basic NFS services on a cluster host. + +See [tasks/create_export.yml](tasks/create_export.yml) for +instructions on using the export creation tasks file. + +License +------- + +Apache License, Version 2.0 + +Author Information +------------------ + +Tim Bielawa (tbielawa@redhat.com) diff --git a/roles/openshift_nfs/defaults/main.yml b/roles/openshift_nfs/defaults/main.yml new file mode 100644 index 000000000..ee94c7c57 --- /dev/null +++ b/roles/openshift_nfs/defaults/main.yml @@ -0,0 +1,8 @@ +--- +r_openshift_nfs_firewall_enabled: "{{ os_firewall_enabled | default(True) }}" +r_openshift_nfs_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}" + +r_openshift_nfs_os_firewall_deny: [] +r_openshift_nfs_firewall_allow: +- service: nfs + port: "2049/tcp" diff --git a/roles/openshift_nfs/meta/main.yml b/roles/openshift_nfs/meta/main.yml new file mode 100644 index 000000000..d7b5910f2 --- /dev/null +++ b/roles/openshift_nfs/meta/main.yml @@ -0,0 +1,16 @@ +--- +galaxy_info: + author: Tim Bielawa + description: OpenShift Basic NFS Configuration + company: Red Hat, Inc. + license: Apache License, Version 2.0 + min_ansible_version: 2.2 + platforms: + - name: EL + versions: + - 7 + categories: + - cloud +dependencies: +- role: lib_utils +- role: lib_os_firewall diff --git a/roles/openshift_nfs/tasks/create_export.yml b/roles/openshift_nfs/tasks/create_export.yml new file mode 100644 index 000000000..39323904f --- /dev/null +++ b/roles/openshift_nfs/tasks/create_export.yml @@ -0,0 +1,34 @@ +--- +# Makes a new NFS export +# +# Include signature +# +# include_role: +# role: openshift_nfs +# tasks_from: create_export +# vars: +# l_nfs_base_dir: Base dir to exports +# l_nfs_export_config: Name to prefix the .exports file with +# l_nfs_export_name: Name of sub-directory of the export +# l_nfs_options: Mount Options + +- name: Ensure CFME App NFS export directory exists + file: + path: "{{ l_nfs_base_dir }}/{{ l_nfs_export_name }}" + state: directory + mode: 0777 + owner: nfsnobody + group: nfsnobody + +- name: "Create {{ l_nfs_export_name }} NFS export" + lineinfile: + path: "/etc/exports.d/{{ l_nfs_export_config }}.exports" + create: true + state: present + line: "{{ l_nfs_base_dir }}/{{ l_nfs_export_name }} {{ l_nfs_options }}" + register: created_export + +- name: Re-export NFS filesystems + command: exportfs -ar + when: + - created_export | changed diff --git a/roles/openshift_nfs/tasks/firewall.yml b/roles/openshift_nfs/tasks/firewall.yml new file mode 100644 index 000000000..0898b2b5c --- /dev/null +++ b/roles/openshift_nfs/tasks/firewall.yml @@ -0,0 +1,40 @@ +--- +- when: r_openshift_nfs_firewall_enabled | bool and not r_openshift_nfs_use_firewalld | bool + block: + - name: Add iptables allow rules + os_firewall_manage_iptables: + name: "{{ item.service }}" + action: add + protocol: "{{ item.port.split('/')[1] }}" + port: "{{ item.port.split('/')[0] }}" + when: item.cond | default(True) + with_items: "{{ r_openshift_nfs_firewall_allow }}" + + - name: Remove iptables rules + os_firewall_manage_iptables: + name: "{{ item.service }}" + action: remove + protocol: "{{ item.port.split('/')[1] }}" + port: "{{ item.port.split('/')[0] }}" + when: item.cond | default(True) + with_items: "{{ r_openshift_nfs_os_firewall_deny }}" + +- when: r_openshift_nfs_firewall_enabled | bool and r_openshift_nfs_use_firewalld | bool + block: + - name: Add firewalld allow rules + firewalld: + port: "{{ item.port }}" + permanent: true + immediate: true + state: enabled + when: item.cond | default(True) + with_items: "{{ r_openshift_nfs_firewall_allow }}" + + - name: Remove firewalld allow rules + firewalld: + port: "{{ item.port }}" + permanent: true + immediate: true + state: disabled + when: item.cond | default(True) + with_items: "{{ r_openshift_nfs_os_firewall_deny }}" diff --git a/roles/openshift_nfs/tasks/setup.yml b/roles/openshift_nfs/tasks/setup.yml new file mode 100644 index 000000000..3070de495 --- /dev/null +++ b/roles/openshift_nfs/tasks/setup.yml @@ -0,0 +1,29 @@ +--- +- name: setup firewall + include: firewall.yml + static: yes + +- name: Install nfs-utils + package: name=nfs-utils state=present + +- name: Configure NFS + lineinfile: + dest: /etc/sysconfig/nfs + regexp: '^RPCNFSDARGS=.*$' + line: 'RPCNFSDARGS="-N 2 -N 3"' + register: nfs_config + +- name: Restart nfs-config + systemd: name=nfs-config state=restarted + when: nfs_config | changed + +- name: Ensure exports directory exists + file: + path: "{{ l_nfs_base_dir }}" + state: directory + +- name: Enable and start NFS services + systemd: + name: nfs-server + state: started + enabled: yes diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml index ed3516d04..1214c08e5 100644 --- a/roles/openshift_node/defaults/main.yml +++ b/roles/openshift_node/defaults/main.yml @@ -31,12 +31,9 @@ openshift_node_ami_prep_packages: - python-dbus - PyYAML - yum-utils -- python2-boto -- python2-boto3 - cloud-utils-growpart # gluster - glusterfs-fuse -- heketi-client # nfs - nfs-utils - flannel diff --git a/roles/openshift_node/meta/main.yml b/roles/openshift_node/meta/main.yml index ce5ecb9d0..5bc7b9869 100644 --- a/roles/openshift_node/meta/main.yml +++ b/roles/openshift_node/meta/main.yml @@ -17,7 +17,5 @@ dependencies: - role: lib_os_firewall - role: openshift_clock - role: openshift_docker -- role: openshift_node_certificates - when: not openshift_node_bootstrap - role: openshift_cloud_provider - role: openshift_node_dnsmasq diff --git a/roles/openshift_node/tasks/config.yml b/roles/openshift_node/tasks/config.yml index 2759188f3..e3898b520 100644 --- a/roles/openshift_node/tasks/config.yml +++ b/roles/openshift_node/tasks/config.yml @@ -46,6 +46,22 @@ notify: - restart node +- name: Configure AWS Cloud Provider Settings + lineinfile: + dest: /etc/sysconfig/{{ openshift.common.service_type }}-node + regexp: "{{ item.regex }}" + line: "{{ item.line }}" + create: true + with_items: + - regex: '^AWS_ACCESS_KEY_ID=' + line: "AWS_ACCESS_KEY_ID={{ openshift_cloudprovider_aws_access_key | default('') }}" + - regex: '^AWS_SECRET_ACCESS_KEY=' + line: "AWS_SECRET_ACCESS_KEY={{ openshift_cloudprovider_aws_secret_key | default('') }}" + no_log: True + when: openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined + notify: + - restart node + # Necessary because when you're on a node that's also a master the master will be # restarted after the node restarts docker and it will take up to 60 seconds for # systemd to start the master again diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index e82fb42b8..59b8bb76e 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -76,22 +76,6 @@ include: config.yml when: not openshift_node_bootstrap -- name: Configure AWS Cloud Provider Settings - lineinfile: - dest: /etc/sysconfig/{{ openshift.common.service_type }}-node - regexp: "{{ item.regex }}" - line: "{{ item.line }}" - create: true - with_items: - - regex: '^AWS_ACCESS_KEY_ID=' - line: "AWS_ACCESS_KEY_ID={{ openshift_cloudprovider_aws_access_key | default('') }}" - - regex: '^AWS_SECRET_ACCESS_KEY=' - line: "AWS_SECRET_ACCESS_KEY={{ openshift_cloudprovider_aws_secret_key | default('') }}" - no_log: True - when: openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined - notify: - - restart node - #### Storage class plugins here #### - name: NFS storage plugin configuration include: storage_plugins/nfs.yml diff --git a/roles/openshift_node/tasks/node_system_container.yml b/roles/openshift_node/tasks/node_system_container.yml index 0ca44c292..20d7a9539 100644 --- a/roles/openshift_node/tasks/node_system_container.yml +++ b/roles/openshift_node/tasks/node_system_container.yml @@ -11,4 +11,6 @@ image: "{{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.node.node_system_image }}:{{ openshift_image_tag }}" values: - "DNS_DOMAIN={{ openshift.common.dns_domain }}" + - "DOCKER_SERVICE={{ openshift.docker.service_name }}.service" + - "MASTER_SERVICE={{ openshift.common.service_type }}.service" state: latest diff --git a/roles/openshift_node/tasks/registry_auth.yml b/roles/openshift_node/tasks/registry_auth.yml index 3d2831742..de396fb4b 100644 --- a/roles/openshift_node/tasks/registry_auth.yml +++ b/roles/openshift_node/tasks/registry_auth.yml @@ -21,4 +21,4 @@ when: - openshift.common.is_containerized | bool - oreg_auth_user is defined - - (node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace or oreg_auth_credentials_replace.changed) | bool + - (node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace or node_oreg_auth_credentials_create.changed) | bool diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service index 310d8b29d..561aa01f4 100644 --- a/roles/openshift_node/templates/openshift.docker.node.service +++ b/roles/openshift_node/templates/openshift.docker.node.service @@ -6,6 +6,7 @@ PartOf={{ openshift.docker.service_name }}.service Requires={{ openshift.docker.service_name }}.service {% if openshift_node_use_openshift_sdn %} Wants=openvswitch.service +PartOf=openvswitch.service After=ovsdb-server.service After=ovs-vswitchd.service {% endif %} diff --git a/roles/openshift_node_certificates/meta/main.yml b/roles/openshift_node_certificates/meta/main.yml index 93216c1d2..0440bf11a 100644 --- a/roles/openshift_node_certificates/meta/main.yml +++ b/roles/openshift_node_certificates/meta/main.yml @@ -12,5 +12,4 @@ galaxy_info: categories: - cloud - system -dependencies: -- role: openshift_facts +dependencies: [] diff --git a/roles/openshift_node_facts/tasks/main.yml b/roles/openshift_node_facts/tasks/main.yml index fd4c49504..0d5fa664c 100644 --- a/roles/openshift_node_facts/tasks/main.yml +++ b/roles/openshift_node_facts/tasks/main.yml @@ -1,10 +1,4 @@ --- -- set_fact: - openshift_node_debug_level: "{{ lookup('oo_option', 'openshift_node_debug_level') }}" - when: - - openshift_node_debug_level is not defined - - lookup('oo_option', 'openshift_node_debug_level') != "" - - name: Set node facts openshift_facts: role: "{{ item.role }}" @@ -20,7 +14,7 @@ debug_level: "{{ openshift_node_debug_level | default(openshift.common.debug_level) }}" iptables_sync_period: "{{ openshift_node_iptables_sync_period | default(None) }}" kubelet_args: "{{ openshift_node_kubelet_args | default(None) }}" - labels: "{{ lookup('oo_option', 'openshift_node_labels') | default( openshift_node_labels | default(none), true) }}" + labels: "{{ openshift_node_labels | default(None) }}" registry_url: "{{ oreg_url_node | default(oreg_url) | default(None) }}" schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}" sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}" diff --git a/roles/openshift_node_upgrade/tasks/registry_auth.yml b/roles/openshift_node_upgrade/tasks/registry_auth.yml index 3d2831742..de396fb4b 100644 --- a/roles/openshift_node_upgrade/tasks/registry_auth.yml +++ b/roles/openshift_node_upgrade/tasks/registry_auth.yml @@ -21,4 +21,4 @@ when: - openshift.common.is_containerized | bool - oreg_auth_user is defined - - (node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace or oreg_auth_credentials_replace.changed) | bool + - (node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace or node_oreg_auth_credentials_create.changed) | bool diff --git a/roles/openshift_node_upgrade/templates/openshift.docker.node.service b/roles/openshift_node_upgrade/templates/openshift.docker.node.service index 864e4b5d6..07d1ebc3c 100644 --- a/roles/openshift_node_upgrade/templates/openshift.docker.node.service +++ b/roles/openshift_node_upgrade/templates/openshift.docker.node.service @@ -6,6 +6,7 @@ PartOf={{ openshift.docker.service_name }}.service Requires={{ openshift.docker.service_name }}.service {% if openshift_use_openshift_sdn %} Wants=openvswitch.service +PartOf=openvswitch.service After=ovsdb-server.service After=ovs-vswitchd.service {% endif %} diff --git a/roles/openshift_repos/templates/CentOS-OpenShift-Origin.repo.j2 b/roles/openshift_repos/templates/CentOS-OpenShift-Origin.repo.j2 index 0e2d57cb6..b0c036e7c 100644 --- a/roles/openshift_repos/templates/CentOS-OpenShift-Origin.repo.j2 +++ b/roles/openshift_repos/templates/CentOS-OpenShift-Origin.repo.j2 @@ -8,7 +8,7 @@ gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS [centos-openshift-origin-testing] name=CentOS OpenShift Origin Testing baseurl=http://buildlogs.centos.org/centos/7/paas/x86_64/openshift-origin/ -enabled={% if openshift_repos_enable_testing %}1{% else %}0{% endif %} +enabled={{ 1 if openshift_repos_enable_testing else 0 }} gpgcheck=0 gpgkey=file:///etc/pki/rpm-gpg/openshift-ansible-CentOS-SIG-PaaS diff --git a/roles/openshift_repos/templates/CentOS-OpenShift-Origin14.repo.j2 b/roles/openshift_repos/templates/CentOS-OpenShift-Origin14.repo.j2 index 2470931e1..97e855d58 100644 --- a/roles/openshift_repos/templates/CentOS-OpenShift-Origin14.repo.j2 +++ b/roles/openshift_repos/templates/CentOS-OpenShift-Origin14.repo.j2 @@ -8,7 +8,7 @@ gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS [centos-openshift-origin14-testing] name=CentOS OpenShift Origin Testing baseurl=http://buildlogs.centos.org/centos/7/paas/x86_64/openshift-origin14/ -enabled={% if openshift_repos_enable_testing %}1{% else %}0{% endif %} +enabled={{ 1 if openshift_repos_enable_testing else 0 }} gpgcheck=0 gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS diff --git a/roles/openshift_repos/templates/CentOS-OpenShift-Origin15.repo.j2 b/roles/openshift_repos/templates/CentOS-OpenShift-Origin15.repo.j2 index 901f02cf4..5e756e680 100644 --- a/roles/openshift_repos/templates/CentOS-OpenShift-Origin15.repo.j2 +++ b/roles/openshift_repos/templates/CentOS-OpenShift-Origin15.repo.j2 @@ -8,7 +8,7 @@ gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS [centos-openshift-origin15-testing] name=CentOS OpenShift Origin Testing baseurl=http://buildlogs.centos.org/centos/7/paas/x86_64/openshift-origin15/ -enabled={% if openshift_repos_enable_testing %}1{% else %}0{% endif %} +enabled={{ 1 if openshift_repos_enable_testing else 0 }} gpgcheck=0 gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS diff --git a/roles/openshift_repos/templates/CentOS-OpenShift-Origin36.repo.j2 b/roles/openshift_repos/templates/CentOS-OpenShift-Origin36.repo.j2 index abc4ad1b5..7050c95f5 100644 --- a/roles/openshift_repos/templates/CentOS-OpenShift-Origin36.repo.j2 +++ b/roles/openshift_repos/templates/CentOS-OpenShift-Origin36.repo.j2 @@ -8,7 +8,7 @@ gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS [centos-openshift-origin36-testing] name=CentOS OpenShift Origin Testing baseurl=http://buildlogs.centos.org/centos/7/paas/x86_64/openshift-origin36/ -enabled={% if openshift_repos_enable_testing %}1{% else %}0{% endif %} +enabled={{ 1 if openshift_repos_enable_testing else 0 }} gpgcheck=0 gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS diff --git a/roles/openshift_sanitize_inventory/filter_plugins/openshift_logging.py b/roles/openshift_sanitize_inventory/filter_plugins/openshift_logging.py deleted file mode 100644 index d42c9bdb9..000000000 --- a/roles/openshift_sanitize_inventory/filter_plugins/openshift_logging.py +++ /dev/null @@ -1,25 +0,0 @@ -''' - Openshift Logging class that provides useful filters used in Logging. - - This should be removed after map_from_pairs is no longer used in __deprecations_logging.yml -''' - - -def map_from_pairs(source, delim="="): - ''' Returns a dict given the source and delim delimited ''' - if source == '': - return dict() - - return dict(item.split(delim) for item in source.split(",")) - - -# pylint: disable=too-few-public-methods -class FilterModule(object): - ''' OpenShift Logging Filters ''' - - # pylint: disable=no-self-use, too-few-public-methods - def filters(self): - ''' Returns the names of the filters provided by this class ''' - return { - 'map_from_pairs': map_from_pairs - } diff --git a/roles/openshift_sanitize_inventory/filter_plugins/openshift_sanitize_inventory.py b/roles/openshift_sanitize_inventory/filter_plugins/openshift_sanitize_inventory.py new file mode 100644 index 000000000..72c47b8ee --- /dev/null +++ b/roles/openshift_sanitize_inventory/filter_plugins/openshift_sanitize_inventory.py @@ -0,0 +1,44 @@ +''' + Openshift Sanitize inventory class that provides useful filters used in Logging. +''' + + +import re + + +# This should be removed after map_from_pairs is no longer used in __deprecations_logging.yml +def map_from_pairs(source, delim="="): + ''' Returns a dict given the source and delim delimited ''' + if source == '': + return dict() + + return dict(item.split(delim) for item in source.split(",")) + + +def vars_with_pattern(source, pattern=""): + ''' Returns a list of variables whose name matches the given pattern ''' + if source == '': + return list() + + var_list = list() + + var_pattern = re.compile(pattern) + + for item in source: + if var_pattern.match(item): + var_list.append(item) + + return var_list + + +# pylint: disable=too-few-public-methods +class FilterModule(object): + ''' OpenShift Logging Filters ''' + + # pylint: disable=no-self-use, too-few-public-methods + def filters(self): + ''' Returns the names of the filters provided by this class ''' + return { + 'map_from_pairs': map_from_pairs, + 'vars_with_pattern': vars_with_pattern + } diff --git a/roles/openshift_sanitize_inventory/tasks/unsupported.yml b/roles/openshift_sanitize_inventory/tasks/unsupported.yml index 24e44ea85..39bf1780a 100644 --- a/roles/openshift_sanitize_inventory/tasks/unsupported.yml +++ b/roles/openshift_sanitize_inventory/tasks/unsupported.yml @@ -10,3 +10,25 @@ Starting in 3.6 openshift_use_dnsmasq must be true or critical features will not function. This also means that NetworkManager must be installed enabled and responsible for management of the primary interface. + +- set_fact: + __using_dynamic: True + when: + - hostvars[inventory_hostname][item] in ['dynamic'] + with_items: + - "{{ hostvars[inventory_hostname] | vars_with_pattern(pattern='openshift_.*_storage_kind') }}" + +- name: Ensure that dynamic provisioning is set if using dynamic storage + when: + - dynamic_volumes_check | default(true) | bool + - not openshift_master_dynamic_provisioning_enabled | default(false) | bool + - not openshift_cloudprovider_kind is defined + - __using_dynamic is defined and __using_dynamic | bool + fail: + msg: |- + Using a storage kind of 'dynamic' without enabling dynamic provisioning nor + setting a cloud provider will cause generated PVCs to not be able to bind as + intended. Either update to not use a dynamic storage or set + openshift_master_dynamic_provisioning_enabled to True and set an + openshift_cloudprovider_kind. You can disable this check with + 'dynamic_volumes_check=False'. diff --git a/roles/openshift_service_catalog/vars/openshift-enterprise.yml b/roles/openshift_service_catalog/vars/openshift-enterprise.yml index 4df60e9a8..cab9cc7d8 100644 --- a/roles/openshift_service_catalog/vars/openshift-enterprise.yml +++ b/roles/openshift_service_catalog/vars/openshift-enterprise.yml @@ -1,3 +1,3 @@ --- __openshift_service_catalog_image_prefix: "registry.access.redhat.com/openshift3/ose-" -__openshift_service_catalog_image_version: "v3.6" +__openshift_service_catalog_image_version: "v3.7" diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml index 3f6dab78b..51724f979 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml @@ -18,6 +18,17 @@ node_selector: "{% if glusterfs_use_default_selector %}{{ omit }}{% endif %}" when: glusterfs_is_native or glusterfs_heketi_is_native or glusterfs_storageclass +- name: Add namespace service accounts to privileged SCC + oc_adm_policy_user: + user: "system:serviceaccount:{{ glusterfs_namespace }}:{{ item }}" + resource_kind: scc + resource_name: privileged + state: present + with_items: + - 'default' + - 'router' + when: glusterfs_is_native or glusterfs_heketi_is_native + - name: Delete pre-existing heketi resources oc_obj: namespace: "{{ glusterfs_namespace }}" diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml index 8c3e31fc9..932d06038 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml @@ -55,16 +55,6 @@ - glusterfs_wipe - item.stdout_lines | count > 0 -- name: Add service accounts to privileged SCC - oc_adm_policy_user: - user: "system:serviceaccount:{{ glusterfs_namespace }}:{{ item }}" - resource_kind: scc - resource_name: privileged - state: present - with_items: - - 'default' - - 'router' - - name: Label GlusterFS nodes oc_label: name: "{{ hostvars[item].openshift.node.nodename }}" diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml index 54a6dd7c3..074904bec 100644 --- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml +++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml @@ -1,6 +1,6 @@ --- - name: Create heketi DB volume - command: "{{ glusterfs_heketi_client }} setup-openshift-heketi-storage --listfile /tmp/heketi-storage.json" + command: "{{ glusterfs_heketi_client }} setup-openshift-heketi-storage --image {{ glusterfs_heketi_image}}:{{ glusterfs_heketi_version }} --listfile /tmp/heketi-storage.json" register: setup_storage - name: Copy heketi-storage list diff --git a/roles/rhel_subscribe/tasks/enterprise.yml b/roles/rhel_subscribe/tasks/enterprise.yml index 9738929d2..fa74c9953 100644 --- a/roles/rhel_subscribe/tasks/enterprise.yml +++ b/roles/rhel_subscribe/tasks/enterprise.yml @@ -7,7 +7,7 @@ when: deployment_type == 'openshift-enterprise' - set_fact: - ose_version: "{{ lookup('oo_option', 'ose_version') | default(default_ose_version, True) }}" + ose_version: "{{ lookup('env', 'ose_version') | default(default_ose_version, True) }}" - fail: msg: "{{ ose_version }} is not a valid version for {{ deployment_type }} deployment type" diff --git a/roles/rhel_subscribe/tasks/main.yml b/roles/rhel_subscribe/tasks/main.yml index c43e5513d..b06f51908 100644 --- a/roles/rhel_subscribe/tasks/main.yml +++ b/roles/rhel_subscribe/tasks/main.yml @@ -4,10 +4,10 @@ # to make it able to enable repositories - set_fact: - rhel_subscription_pool: "{{ lookup('oo_option', 'rhel_subscription_pool') | default(rhsub_pool, True) | default('Red Hat OpenShift Container Platform, Premium*', True) }}" - rhel_subscription_user: "{{ lookup('oo_option', 'rhel_subscription_user') | default(rhsub_user, True) | default(omit, True) }}" - rhel_subscription_pass: "{{ lookup('oo_option', 'rhel_subscription_pass') | default(rhsub_pass, True) | default(omit, True) }}" - rhel_subscription_server: "{{ lookup('oo_option', 'rhel_subscription_server') | default(rhsub_server) }}" + rhel_subscription_pool: "{{ lookup('env', 'rhel_subscription_pool') | default(rhsub_pool | default('Red Hat OpenShift Container Platform, Premium*')) }}" + rhel_subscription_user: "{{ lookup('env', 'rhel_subscription_user') | default(rhsub_user | default(omit, True)) }}" + rhel_subscription_pass: "{{ lookup('env', 'rhel_subscription_pass') | default(rhsub_pass | default(omit, True)) }}" + rhel_subscription_server: "{{ lookup('env', 'rhel_subscription_server') | default(rhsub_server | default(omit, True)) }}" - fail: msg: "This role is only supported for Red Hat hosts" diff --git a/roles/template_service_broker/tasks/install.yml b/roles/template_service_broker/tasks/install.yml index a9d22aa06..f5fd6487c 100644 --- a/roles/template_service_broker/tasks/install.yml +++ b/roles/template_service_broker/tasks/install.yml @@ -27,6 +27,7 @@ with_items: - "{{ __tsb_template_file }}" - "{{ __tsb_rbac_file }}" + - "{{ __tsb_broker_file }}" - name: Apply template file shell: > @@ -42,6 +43,33 @@ src: openshift-ansible-catalog-console.js dest: /etc/origin/master/openshift-ansible-catalog-console.js +# Check that the TSB is running +- name: Verify that TSB is running + command: > + curl -k https://apiserver.openshift-template-service-broker.svc/healthz + args: + # Disables the following warning: + # Consider using get_url or uri module rather than running curl + warn: no + register: api_health + until: api_health.stdout == 'ok' + retries: 120 + delay: 1 + changed_when: false + +- set_fact: + openshift_master_config_dir: "{{ openshift.common.config_base }}/master" + when: openshift_master_config_dir is undefined + +- slurp: + src: "{{ openshift_master_config_dir }}/ca.crt" + register: __ca_bundle + +# Register with broker +- name: Register TSB with broker + shell: > + oc process -f "{{ mktemp.stdout }}/{{ __tsb_broker_file }}" --param CA_BUNDLE="{{ __ca_bundle.content }}" | oc apply -f - + - file: state: absent name: "{{ mktemp.stdout }}" diff --git a/roles/template_service_broker/tasks/remove.yml b/roles/template_service_broker/tasks/remove.yml index 207dd9bdb..f3afe65ed 100644 --- a/roles/template_service_broker/tasks/remove.yml +++ b/roles/template_service_broker/tasks/remove.yml @@ -6,11 +6,18 @@ - copy: src: "{{ __tsb_files_location }}/{{ item }}" - dest: "{{ mktemp.stdout }}/{{ __tsb_template_file }}" + dest: "{{ mktemp.stdout }}/{{ item }}" + with_items: + - "{{ __tsb_template_file }}" + - "{{ __tsb_broker_file }}" + +- name: Delete TSB broker + shell: > + oc process -f "{{ mktemp.stdout }}/{{ __tsb_broker_file }}" | oc delete -f - - name: Delete TSB objects shell: > - oc process -f "{{ __tsb_files_location }}/{{ __tsb_template_file }}" | kubectl delete -f - + oc process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}" | kubectl delete -f - - name: empty out tech preview extension file for service console UI copy: diff --git a/roles/template_service_broker/vars/main.yml b/roles/template_service_broker/vars/main.yml index 372ab8f6f..a65340f16 100644 --- a/roles/template_service_broker/vars/main.yml +++ b/roles/template_service_broker/vars/main.yml @@ -4,3 +4,4 @@ __tsb_files_location: "../../../files/origin-components/" __tsb_template_file: "apiserver-template.yaml" __tsb_config_file: "apiserver-config.yaml" __tsb_rbac_file: "rbac-template.yaml" +__tsb_broker_file: "template-service-broker-registration.yaml" @@ -165,7 +165,7 @@ class OpenShiftAnsibleYamlLint(Command): has_warnings = True if has_errors or has_warnings: - print('yammlint issues found') + print('yamllint issues found') raise SystemExit(1) |