diff options
168 files changed, 1710 insertions, 1270 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible index 9a5acc500..b2155c30f 100644 --- a/.tito/packages/openshift-ansible +++ b/.tito/packages/openshift-ansible @@ -1 +1 @@ -3.7.0-0.126.0 ./ +3.7.0-0.127.0 ./ diff --git a/callback_plugins/default.py b/callback_plugins/default.py deleted file mode 100644 index 97ad77724..000000000 --- a/callback_plugins/default.py +++ /dev/null @@ -1,70 +0,0 @@ -'''Plugin to override the default output logic.''' - -# upstream: https://gist.github.com/cliffano/9868180 - -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see <http://www.gnu.org/licenses/>. - - -# For some reason this has to be done -import imp -import os - -ANSIBLE_PATH = imp.find_module('ansible')[1] -DEFAULT_PATH = os.path.join(ANSIBLE_PATH, 'plugins/callback/default.py') -DEFAULT_MODULE = imp.load_source( - 'ansible.plugins.callback.default', - DEFAULT_PATH -) - -try: - from ansible.plugins.callback import CallbackBase - BASECLASS = CallbackBase -except ImportError: # < ansible 2.1 - BASECLASS = DEFAULT_MODULE.CallbackModule - - -class CallbackModule(DEFAULT_MODULE.CallbackModule): # pylint: disable=too-few-public-methods,no-init - ''' - Override for the default callback module. - - Render std err/out outside of the rest of the result which it prints with - indentation. - ''' - CALLBACK_VERSION = 2.0 - CALLBACK_TYPE = 'stdout' - CALLBACK_NAME = 'default' - - def __init__(self, *args, **kwargs): - # pylint: disable=non-parent-init-called - BASECLASS.__init__(self, *args, **kwargs) - - def _dump_results(self, result): - '''Return the text to output for a result.''' - result['_ansible_verbose_always'] = True - - save = {} - for key in ['stdout', 'stdout_lines', 'stderr', 'stderr_lines', 'msg']: - if key in result: - save[key] = result.pop(key) - - output = BASECLASS._dump_results(self, result) # pylint: disable=protected-access - - for key in ['stdout', 'stderr', 'msg']: - if key in save and save[key]: - output += '\n\n%s:\n\n%s\n' % (key.upper(), save[key]) - - for key, value in save.items(): - result[key] = value - - return output diff --git a/files/origin-components/apiserver-config.yaml b/files/origin-components/apiserver-config.yaml new file mode 100644 index 000000000..e4048d1da --- /dev/null +++ b/files/origin-components/apiserver-config.yaml @@ -0,0 +1,4 @@ +kind: TemplateServiceBrokerConfig +apiVersion: config.templateservicebroker.openshift.io/v1 +templateNamespaces: +- openshift diff --git a/files/origin-components/apiserver-template.yaml b/files/origin-components/apiserver-template.yaml new file mode 100644 index 000000000..1b42597af --- /dev/null +++ b/files/origin-components/apiserver-template.yaml @@ -0,0 +1,122 @@ +apiVersion: template.openshift.io/v1 +kind: Template +metadata: + name: template-service-broker-apiserver +parameters: +- name: IMAGE + value: openshift/origin:latest +- name: NAMESPACE + value: openshift-template-service-broker +- name: LOGLEVEL + value: "0" +- name: API_SERVER_CONFIG + value: | + kind: TemplateServiceBrokerConfig + apiVersion: config.templateservicebroker.openshift.io/v1 + templateNamespaces: + - openshift +objects: + +# to create the tsb server +- apiVersion: extensions/v1beta1 + kind: DaemonSet + metadata: + namespace: ${NAMESPACE} + name: apiserver + labels: + apiserver: "true" + spec: + template: + metadata: + name: apiserver + labels: + apiserver: "true" + spec: + serviceAccountName: apiserver + containers: + - name: c + image: ${IMAGE} + imagePullPolicy: IfNotPresent + command: + - "/usr/bin/openshift" + - "start" + - "template-service-broker" + - "--secure-port=8443" + - "--audit-log-path=-" + - "--tls-cert-file=/var/serving-cert/tls.crt" + - "--tls-private-key-file=/var/serving-cert/tls.key" + - "--loglevel=${LOGLEVEL}" + - "--config=/var/apiserver-config/apiserver-config.yaml" + ports: + - containerPort: 8443 + volumeMounts: + - mountPath: /var/serving-cert + name: serving-cert + - mountPath: /var/apiserver-config + name: apiserver-config + readinessProbe: + httpGet: + path: /healthz + port: 8443 + scheme: HTTPS + volumes: + - name: serving-cert + secret: + defaultMode: 420 + secretName: apiserver-serving-cert + - name: apiserver-config + configMap: + defaultMode: 420 + name: apiserver-config + +# to create the config for the TSB +- apiVersion: v1 + kind: ConfigMap + metadata: + namespace: ${NAMESPACE} + name: apiserver-config + data: + apiserver-config.yaml: ${API_SERVER_CONFIG} + +# to be able to assign powers to the process +- apiVersion: v1 + kind: ServiceAccount + metadata: + namespace: ${NAMESPACE} + name: apiserver + +# to be able to expose TSB inside the cluster +- apiVersion: v1 + kind: Service + metadata: + namespace: ${NAMESPACE} + name: apiserver + annotations: + service.alpha.openshift.io/serving-cert-secret-name: apiserver-serving-cert + spec: + selector: + apiserver: "true" + ports: + - port: 443 + targetPort: 8443 + +# This service account will be granted permission to call the TSB. +# The token for this SA will be provided to the service catalog for +# use when calling the TSB. +- apiVersion: v1 + kind: ServiceAccount + metadata: + namespace: ${NAMESPACE} + name: templateservicebroker-client + +# This secret will be populated with a copy of the templateservicebroker-client SA's +# auth token. Since this secret has a static name, it can be referenced more +# easily than the auto-generated secret for the service account. +- apiVersion: v1 + kind: Secret + metadata: + namespace: ${NAMESPACE} + name: templateservicebroker-client + annotations: + kubernetes.io/service-account.name: templateservicebroker-client + type: kubernetes.io/service-account-token diff --git a/files/origin-components/rbac-template.yaml b/files/origin-components/rbac-template.yaml new file mode 100644 index 000000000..0937a9065 --- /dev/null +++ b/files/origin-components/rbac-template.yaml @@ -0,0 +1,92 @@ +apiVersion: template.openshift.io/v1 +kind: Template +metadata: + name: template-service-broker-rbac +parameters: +- name: NAMESPACE + value: openshift-template-service-broker +- name: KUBE_SYSTEM + value: kube-system +objects: + +# Grant the service account permission to call the TSB +- apiVersion: rbac.authorization.k8s.io/v1beta1 + kind: ClusterRoleBinding + metadata: + name: templateservicebroker-client + roleRef: + kind: ClusterRole + name: system:openshift:templateservicebroker-client + subjects: + - kind: ServiceAccount + namespace: ${NAMESPACE} + name: templateservicebroker-client + +# to delegate authentication and authorization +- apiVersion: rbac.authorization.k8s.io/v1beta1 + kind: ClusterRoleBinding + metadata: + name: auth-delegator-${NAMESPACE} + roleRef: + kind: ClusterRole + name: system:auth-delegator + subjects: + - kind: ServiceAccount + namespace: ${NAMESPACE} + name: apiserver + +# to have the template service broker powers +- apiVersion: rbac.authorization.k8s.io/v1beta1 + kind: ClusterRoleBinding + metadata: + name: tsb-${NAMESPACE} + roleRef: + kind: ClusterRole + name: system:openshift:controller:template-service-broker + subjects: + - kind: ServiceAccount + namespace: ${NAMESPACE} + name: apiserver + +# to read the config for terminating authentication +- apiVersion: rbac.authorization.k8s.io/v1beta1 + kind: RoleBinding + metadata: + namespace: ${KUBE_SYSTEM} + name: extension-apiserver-authentication-reader-${NAMESPACE} + roleRef: + kind: Role + name: extension-apiserver-authentication-reader + subjects: + - kind: ServiceAccount + namespace: ${NAMESPACE} + name: apiserver + +# allow the kube service catalog's SA to read the static secret defined +# above, which will contain the token for the SA that can call the TSB. +- apiVersion: rbac.authorization.k8s.io/v1beta1 + kind: Role + metadata: + name: templateservicebroker-auth-reader + namespace: ${NAMESPACE} + rules: + - apiGroups: + - "" + resourceNames: + - templateservicebroker-client + resources: + - secrets + verbs: + - get +- apiVersion: rbac.authorization.k8s.io/v1beta1 + kind: RoleBinding + metadata: + namespace: ${NAMESPACE} + name: templateservicebroker-auth-reader + roleRef: + kind: Role + name: templateservicebroker-auth-reader + subjects: + - kind: ServiceAccount + namespace: kube-service-catalog + name: service-catalog-controller diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py index 902436302..f0f250480 100644 --- a/filter_plugins/oo_filters.py +++ b/filter_plugins/oo_filters.py @@ -716,6 +716,100 @@ def oo_openshift_env(hostvars): # pylint: disable=too-many-branches, too-many-nested-blocks, too-many-statements +def oo_component_persistent_volumes(hostvars, groups, component): + """ Generate list of persistent volumes based on oo_openshift_env + storage options set in host variables for a specific component. + """ + if not issubclass(type(hostvars), dict): + raise errors.AnsibleFilterError("|failed expects hostvars is a dict") + if not issubclass(type(groups), dict): + raise errors.AnsibleFilterError("|failed expects groups is a dict") + + persistent_volume = None + + if component in hostvars['openshift']: + if 'storage' in hostvars['openshift'][component]: + params = hostvars['openshift'][component]['storage'] + kind = params['kind'] + create_pv = params['create_pv'] + if kind is not None and create_pv: + if kind == 'nfs': + host = params['host'] + if host is None: + if 'oo_nfs_to_config' in groups and len(groups['oo_nfs_to_config']) > 0: + host = groups['oo_nfs_to_config'][0] + else: + raise errors.AnsibleFilterError("|failed no storage host detected") + directory = params['nfs']['directory'] + volume = params['volume']['name'] + path = directory + '/' + volume + size = params['volume']['size'] + if 'labels' in params: + labels = params['labels'] + else: + labels = dict() + access_modes = params['access']['modes'] + persistent_volume = dict( + name="{0}-volume".format(volume), + capacity=size, + labels=labels, + access_modes=access_modes, + storage=dict( + nfs=dict( + server=host, + path=path))) + + elif kind == 'openstack': + volume = params['volume']['name'] + size = params['volume']['size'] + if 'labels' in params: + labels = params['labels'] + else: + labels = dict() + access_modes = params['access']['modes'] + filesystem = params['openstack']['filesystem'] + volume_id = params['openstack']['volumeID'] + persistent_volume = dict( + name="{0}-volume".format(volume), + capacity=size, + labels=labels, + access_modes=access_modes, + storage=dict( + cinder=dict( + fsType=filesystem, + volumeID=volume_id))) + + elif kind == 'glusterfs': + volume = params['volume']['name'] + size = params['volume']['size'] + if 'labels' in params: + labels = params['labels'] + else: + labels = dict() + access_modes = params['access']['modes'] + endpoints = params['glusterfs']['endpoints'] + path = params['glusterfs']['path'] + read_only = params['glusterfs']['readOnly'] + persistent_volume = dict( + name="{0}-volume".format(volume), + capacity=size, + labels=labels, + access_modes=access_modes, + storage=dict( + glusterfs=dict( + endpoints=endpoints, + path=path, + readOnly=read_only))) + + elif not (kind == 'object' or kind == 'dynamic'): + msg = "|failed invalid storage kind '{0}' for component '{1}'".format( + kind, + component) + raise errors.AnsibleFilterError(msg) + return persistent_volume + + +# pylint: disable=too-many-branches, too-many-nested-blocks, too-many-statements def oo_persistent_volumes(hostvars, groups, persistent_volumes=None): """ Generate list of persistent volumes based on oo_openshift_env storage options set in host variables. @@ -734,84 +828,122 @@ def oo_persistent_volumes(hostvars, groups, persistent_volumes=None): if 'storage' in hostvars['openshift']['hosted'][component]: params = hostvars['openshift']['hosted'][component]['storage'] kind = params['kind'] - create_pv = params['create_pv'] - if kind is not None and create_pv: - if kind == 'nfs': - host = params['host'] - if host is None: - if 'oo_nfs_to_config' in groups and len(groups['oo_nfs_to_config']) > 0: - host = groups['oo_nfs_to_config'][0] + if 'create_pv' in params: + create_pv = params['create_pv'] + if kind is not None and create_pv: + if kind == 'nfs': + host = params['host'] + if host is None: + if 'oo_nfs_to_config' in groups and len(groups['oo_nfs_to_config']) > 0: + host = groups['oo_nfs_to_config'][0] + else: + raise errors.AnsibleFilterError("|failed no storage host detected") + directory = params['nfs']['directory'] + volume = params['volume']['name'] + path = directory + '/' + volume + size = params['volume']['size'] + if 'labels' in params: + labels = params['labels'] else: - raise errors.AnsibleFilterError("|failed no storage host detected") - directory = params['nfs']['directory'] - volume = params['volume']['name'] - path = directory + '/' + volume - size = params['volume']['size'] - if 'labels' in params: - labels = params['labels'] - else: - labels = dict() - access_modes = params['access']['modes'] - persistent_volume = dict( - name="{0}-volume".format(volume), - capacity=size, - labels=labels, - access_modes=access_modes, - storage=dict( - nfs=dict( - server=host, - path=path))) - persistent_volumes.append(persistent_volume) - elif kind == 'openstack': - volume = params['volume']['name'] - size = params['volume']['size'] - if 'labels' in params: - labels = params['labels'] - else: - labels = dict() - access_modes = params['access']['modes'] - filesystem = params['openstack']['filesystem'] - volume_id = params['openstack']['volumeID'] - persistent_volume = dict( - name="{0}-volume".format(volume), - capacity=size, - labels=labels, - access_modes=access_modes, - storage=dict( - cinder=dict( - fsType=filesystem, - volumeID=volume_id))) - persistent_volumes.append(persistent_volume) - elif kind == 'glusterfs': - volume = params['volume']['name'] - size = params['volume']['size'] - if 'labels' in params: - labels = params['labels'] - else: - labels = dict() - access_modes = params['access']['modes'] - endpoints = params['glusterfs']['endpoints'] - path = params['glusterfs']['path'] - read_only = params['glusterfs']['readOnly'] - persistent_volume = dict( - name="{0}-volume".format(volume), - capacity=size, - labels=labels, - access_modes=access_modes, - storage=dict( - glusterfs=dict( - endpoints=endpoints, - path=path, - readOnly=read_only))) - persistent_volumes.append(persistent_volume) - elif not (kind == 'object' or kind == 'dynamic'): - msg = "|failed invalid storage kind '{0}' for component '{1}'".format( - kind, - component) - raise errors.AnsibleFilterError(msg) + labels = dict() + access_modes = params['access']['modes'] + persistent_volume = dict( + name="{0}-volume".format(volume), + capacity=size, + labels=labels, + access_modes=access_modes, + storage=dict( + nfs=dict( + server=host, + path=path))) + persistent_volumes.append(persistent_volume) + elif kind == 'openstack': + volume = params['volume']['name'] + size = params['volume']['size'] + if 'labels' in params: + labels = params['labels'] + else: + labels = dict() + access_modes = params['access']['modes'] + filesystem = params['openstack']['filesystem'] + volume_id = params['openstack']['volumeID'] + persistent_volume = dict( + name="{0}-volume".format(volume), + capacity=size, + labels=labels, + access_modes=access_modes, + storage=dict( + cinder=dict( + fsType=filesystem, + volumeID=volume_id))) + persistent_volumes.append(persistent_volume) + elif kind == 'glusterfs': + volume = params['volume']['name'] + size = params['volume']['size'] + if 'labels' in params: + labels = params['labels'] + else: + labels = dict() + access_modes = params['access']['modes'] + endpoints = params['glusterfs']['endpoints'] + path = params['glusterfs']['path'] + read_only = params['glusterfs']['readOnly'] + persistent_volume = dict( + name="{0}-volume".format(volume), + capacity=size, + labels=labels, + access_modes=access_modes, + storage=dict( + glusterfs=dict( + endpoints=endpoints, + path=path, + readOnly=read_only))) + persistent_volumes.append(persistent_volume) + elif not (kind == 'object' or kind == 'dynamic'): + msg = "|failed invalid storage kind '{0}' for component '{1}'".format( + kind, + component) + raise errors.AnsibleFilterError(msg) + if 'logging' in hostvars['openshift']: + persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'logging') + if persistent_volume is not None: + persistent_volumes.append(persistent_volume) + if 'loggingops' in hostvars['openshift']: + persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'loggingops') + if persistent_volume is not None: + persistent_volumes.append(persistent_volume) + if 'metrics' in hostvars['openshift']: + persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'metrics') + if persistent_volume is not None: + persistent_volumes.append(persistent_volume) return persistent_volumes +def oo_component_pv_claims(hostvars, component): + """ Generate list of persistent volume claims based on oo_openshift_env + storage options set in host variables for a speicific component. + """ + if not issubclass(type(hostvars), dict): + raise errors.AnsibleFilterError("|failed expects hostvars is a dict") + + if component in hostvars['openshift']: + if 'storage' in hostvars['openshift'][component]: + params = hostvars['openshift'][component]['storage'] + kind = params['kind'] + create_pv = params['create_pv'] + create_pvc = params['create_pvc'] + if kind not in [None, 'object'] and create_pv and create_pvc: + volume = params['volume']['name'] + size = params['volume']['size'] + access_modes = params['access']['modes'] + persistent_volume_claim = dict( + name="{0}-claim".format(volume), + capacity=size, + access_modes=access_modes) + return persistent_volume_claim + return None + + def oo_persistent_volume_claims(hostvars, persistent_volume_claims=None): """ Generate list of persistent volume claims based on oo_openshift_env storage options set in host variables. @@ -828,17 +960,31 @@ def oo_persistent_volume_claims(hostvars, persistent_volume_claims=None): if 'storage' in hostvars['openshift']['hosted'][component]: params = hostvars['openshift']['hosted'][component]['storage'] kind = params['kind'] - create_pv = params['create_pv'] - create_pvc = params['create_pvc'] - if kind not in [None, 'object'] and create_pv and create_pvc: - volume = params['volume']['name'] - size = params['volume']['size'] - access_modes = params['access']['modes'] - persistent_volume_claim = dict( - name="{0}-claim".format(volume), - capacity=size, - access_modes=access_modes) - persistent_volume_claims.append(persistent_volume_claim) + if 'create_pv' in params: + if 'create_pvc' in params: + create_pv = params['create_pv'] + create_pvc = params['create_pvc'] + if kind not in [None, 'object'] and create_pv and create_pvc: + volume = params['volume']['name'] + size = params['volume']['size'] + access_modes = params['access']['modes'] + persistent_volume_claim = dict( + name="{0}-claim".format(volume), + capacity=size, + access_modes=access_modes) + persistent_volume_claims.append(persistent_volume_claim) + if 'logging' in hostvars['openshift']: + persistent_volume_claim = oo_component_pv_claims(hostvars, 'logging') + if persistent_volume_claim is not None: + persistent_volume_claims.append(persistent_volume_claim) + if 'loggingops' in hostvars['openshift']: + persistent_volume_claim = oo_component_pv_claims(hostvars, 'loggingops') + if persistent_volume_claim is not None: + persistent_volume_claims.append(persistent_volume_claim) + if 'metrics' in hostvars['openshift']: + persistent_volume_claim = oo_component_pv_claims(hostvars, 'metrics') + if persistent_volume_claim is not None: + persistent_volume_claims.append(persistent_volume_claim) return persistent_volume_claims diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example index 486fe56a0..9d811fcab 100644 --- a/inventory/byo/hosts.origin.example +++ b/inventory/byo/hosts.origin.example @@ -119,7 +119,7 @@ openshift_release=v3.7 # will be built off of the deployment type and ansible_distribution. Only # use this option if you are sure you know what you are doing! #openshift_docker_systemcontainer_image_override="registry.example.com/container-engine:latest" -#openshift_crio_systemcontainer_image_registry_override="registry.example.com" +#openshift_crio_systemcontainer_image_override="registry.example.com/cri-o:latest" # Items added, as is, to end of /etc/sysconfig/docker OPTIONS # Default value: "--log-driver=journald" #openshift_docker_options="-l warn --ipv6=false" @@ -491,10 +491,10 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # See: https://docs.openshift.com/enterprise/latest/install_config/cluster_metrics.html # # By default metrics are not automatically deployed, set this to enable them -# openshift_hosted_metrics_deploy=true +#openshift_metrics_install_metrics=true # # Storage Options -# If openshift_hosted_metrics_storage_kind is unset then metrics will be stored +# If openshift_metrics_storage_kind is unset then metrics will be stored # in an EmptyDir volume and will be deleted when the cassandra pod terminates. # Storage options A & B currently support only one cassandra pod which is # generally enough for up to 1000 pods. Additional volumes can be created @@ -504,29 +504,29 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # An NFS volume will be created with path "nfs_directory/volume_name" # on the host within the [nfs] host group. For example, the volume # path using these options would be "/exports/metrics" -#openshift_hosted_metrics_storage_kind=nfs -#openshift_hosted_metrics_storage_access_modes=['ReadWriteOnce'] -#openshift_hosted_metrics_storage_nfs_directory=/exports -#openshift_hosted_metrics_storage_nfs_options='*(rw,root_squash)' -#openshift_hosted_metrics_storage_volume_name=metrics -#openshift_hosted_metrics_storage_volume_size=10Gi -#openshift_hosted_metrics_storage_labels={'storage': 'metrics'} +#openshift_metrics_storage_kind=nfs +#openshift_metrics_storage_access_modes=['ReadWriteOnce'] +#openshift_metrics_storage_nfs_directory=/exports +#openshift_metrics_storage_nfs_options='*(rw,root_squash)' +#openshift_metrics_storage_volume_name=metrics +#openshift_metrics_storage_volume_size=10Gi +#openshift_metrics_storage_labels={'storage': 'metrics'} # # Option B - External NFS Host # NFS volume must already exist with path "nfs_directory/_volume_name" on # the storage_host. For example, the remote volume path using these # options would be "nfs.example.com:/exports/metrics" -#openshift_hosted_metrics_storage_kind=nfs -#openshift_hosted_metrics_storage_access_modes=['ReadWriteOnce'] -#openshift_hosted_metrics_storage_host=nfs.example.com -#openshift_hosted_metrics_storage_nfs_directory=/exports -#openshift_hosted_metrics_storage_volume_name=metrics -#openshift_hosted_metrics_storage_volume_size=10Gi -#openshift_hosted_metrics_storage_labels={'storage': 'metrics'} +#openshift_metrics_storage_kind=nfs +#openshift_metrics_storage_access_modes=['ReadWriteOnce'] +#openshift_metrics_storage_host=nfs.example.com +#openshift_metrics_storage_nfs_directory=/exports +#openshift_metrics_storage_volume_name=metrics +#openshift_metrics_storage_volume_size=10Gi +#openshift_metrics_storage_labels={'storage': 'metrics'} # # Option C - Dynamic -- If openshift supports dynamic volume provisioning for # your cloud platform use this. -#openshift_hosted_metrics_storage_kind=dynamic +#openshift_metrics_storage_kind=dynamic # # Other Metrics Options -- Common items you may wish to reconfigure, for the complete # list of options please see roles/openshift_metrics/README.md @@ -535,10 +535,10 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # Defaults to https://hawkular-metrics.{{openshift_master_default_subdomain}}/hawkular/metrics # Currently, you may only alter the hostname portion of the url, alterting the # `/hawkular/metrics` path will break installation of metrics. -#openshift_hosted_metrics_public_url=https://hawkular-metrics.example.com/hawkular/metrics +#openshift_metrics_hawkular_hostname=https://hawkular-metrics.example.com/hawkular/metrics # Configure the prefix and version for the component images -#openshift_hosted_metrics_deployer_prefix=docker.io/openshift/origin- -#openshift_hosted_metrics_deployer_version=v3.7.0 +#openshift_metrics_image_prefix=docker.io/openshift/origin- +#openshift_metrics_image_version=v3.7.0 # # StorageClass # openshift_storageclass_name=gp2 @@ -548,36 +548,36 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # Logging deployment # # Currently logging deployment is disabled by default, enable it by setting this -#openshift_hosted_logging_deploy=true +#openshift_logging_install_logging=true # # Logging storage config # Option A - NFS Host Group # An NFS volume will be created with path "nfs_directory/volume_name" # on the host within the [nfs] host group. For example, the volume # path using these options would be "/exports/logging" -#openshift_hosted_logging_storage_kind=nfs -#openshift_hosted_logging_storage_access_modes=['ReadWriteOnce'] -#openshift_hosted_logging_storage_nfs_directory=/exports -#openshift_hosted_logging_storage_nfs_options='*(rw,root_squash)' -#openshift_hosted_logging_storage_volume_name=logging -#openshift_hosted_logging_storage_volume_size=10Gi -#openshift_hosted_logging_storage_labels={'storage': 'logging'} +#openshift_logging_storage_kind=nfs +#openshift_logging_storage_access_modes=['ReadWriteOnce'] +#openshift_logging_storage_nfs_directory=/exports +#openshift_logging_storage_nfs_options='*(rw,root_squash)' +#openshift_logging_storage_volume_name=logging +#openshift_logging_storage_volume_size=10Gi +#openshift_logging_storage_labels={'storage': 'logging'} # # Option B - External NFS Host # NFS volume must already exist with path "nfs_directory/_volume_name" on # the storage_host. For example, the remote volume path using these # options would be "nfs.example.com:/exports/logging" -#openshift_hosted_logging_storage_kind=nfs -#openshift_hosted_logging_storage_access_modes=['ReadWriteOnce'] -#openshift_hosted_logging_storage_host=nfs.example.com -#openshift_hosted_logging_storage_nfs_directory=/exports -#openshift_hosted_logging_storage_volume_name=logging -#openshift_hosted_logging_storage_volume_size=10Gi -#openshift_hosted_logging_storage_labels={'storage': 'logging'} +#openshift_logging_storage_kind=nfs +#openshift_logging_storage_access_modes=['ReadWriteOnce'] +#openshift_logging_storage_host=nfs.example.com +#openshift_logging_storage_nfs_directory=/exports +#openshift_logging_storage_volume_name=logging +#openshift_logging_storage_volume_size=10Gi +#openshift_logging_storage_labels={'storage': 'logging'} # # Option C - Dynamic -- If openshift supports dynamic volume provisioning for # your cloud platform use this. -#openshift_hosted_logging_storage_kind=dynamic +#openshift_logging_storage_kind=dynamic # # Option D - none -- Logging will use emptydir volumes which are destroyed when # pods are deleted @@ -587,13 +587,13 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # # Configure loggingPublicURL in the master config for aggregate logging, defaults # to kibana.{{ openshift_master_default_subdomain }} -#openshift_hosted_logging_hostname=logging.apps.example.com +#openshift_logging_kibana_hostname=logging.apps.example.com # Configure the number of elastic search nodes, unless you're using dynamic provisioning # this value must be 1 -#openshift_hosted_logging_elasticsearch_cluster_size=1 +#openshift_logging_es_cluster_size=1 # Configure the prefix and version for the component images -#openshift_hosted_logging_deployer_prefix=docker.io/openshift/origin- -#openshift_hosted_logging_deployer_version=v3.7.0 +#openshift_logging_image_prefix=docker.io/openshift/origin- +#openshift_logging_image_version=v3.7.0 # Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet') # os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant' diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example index 92a0927e5..e6deda4ac 100644 --- a/inventory/byo/hosts.ose.example +++ b/inventory/byo/hosts.ose.example @@ -119,7 +119,7 @@ openshift_release=v3.7 # will be built off of the deployment type and ansible_distribution. Only # use this option if you are sure you know what you are doing! #openshift_docker_systemcontainer_image_override="registry.example.com/container-engine:latest" -#openshift_crio_systemcontainer_image_registry_override="registry.example.com" +#openshift_crio_systemcontainer_image_override="registry.example.com/cri-o:latest" # Items added, as is, to end of /etc/sysconfig/docker OPTIONS # Default value: "--log-driver=journald" #openshift_docker_options="-l warn --ipv6=false" @@ -499,10 +499,10 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # See: https://docs.openshift.com/enterprise/latest/install_config/cluster_metrics.html # # By default metrics are not automatically deployed, set this to enable them -# openshift_hosted_metrics_deploy=true +#openshift_metrics_install_metrics=true # # Storage Options -# If openshift_hosted_metrics_storage_kind is unset then metrics will be stored +# If openshift_metrics_storage_kind is unset then metrics will be stored # in an EmptyDir volume and will be deleted when the cassandra pod terminates. # Storage options A & B currently support only one cassandra pod which is # generally enough for up to 1000 pods. Additional volumes can be created @@ -512,29 +512,29 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # An NFS volume will be created with path "nfs_directory/volume_name" # on the host within the [nfs] host group. For example, the volume # path using these options would be "/exports/metrics" -#openshift_hosted_metrics_storage_kind=nfs -#openshift_hosted_metrics_storage_access_modes=['ReadWriteOnce'] -#openshift_hosted_metrics_storage_nfs_directory=/exports -#openshift_hosted_metrics_storage_nfs_options='*(rw,root_squash)' -#openshift_hosted_metrics_storage_volume_name=metrics -#openshift_hosted_metrics_storage_volume_size=10Gi -#openshift_hosted_metrics_storage_labels={'storage': 'metrics'} +#openshift_metrics_storage_kind=nfs +#openshift_metrics_storage_access_modes=['ReadWriteOnce'] +#openshift_metrics_storage_nfs_directory=/exports +#openshift_metrics_storage_nfs_options='*(rw,root_squash)' +#openshift_metrics_storage_volume_name=metrics +#openshift_metrics_storage_volume_size=10Gi +#openshift_metrics_storage_labels={'storage': 'metrics'} # # Option B - External NFS Host # NFS volume must already exist with path "nfs_directory/_volume_name" on # the storage_host. For example, the remote volume path using these # options would be "nfs.example.com:/exports/metrics" -#openshift_hosted_metrics_storage_kind=nfs -#openshift_hosted_metrics_storage_access_modes=['ReadWriteOnce'] -#openshift_hosted_metrics_storage_host=nfs.example.com -#openshift_hosted_metrics_storage_nfs_directory=/exports -#openshift_hosted_metrics_storage_volume_name=metrics -#openshift_hosted_metrics_storage_volume_size=10Gi -#openshift_hosted_metrics_storage_labels={'storage': 'metrics'} +#openshift_metrics_storage_kind=nfs +#openshift_metrics_storage_access_modes=['ReadWriteOnce'] +#openshift_metrics_storage_host=nfs.example.com +#openshift_metrics_storage_nfs_directory=/exports +#openshift_metrics_storage_volume_name=metrics +#openshift_metrics_storage_volume_size=10Gi +#openshift_metrics_storage_labels={'storage': 'metrics'} # # Option C - Dynamic -- If openshift supports dynamic volume provisioning for # your cloud platform use this. -#openshift_hosted_metrics_storage_kind=dynamic +#openshift_metrics_storage_kind=dynamic # # Other Metrics Options -- Common items you may wish to reconfigure, for the complete # list of options please see roles/openshift_metrics/README.md @@ -543,10 +543,10 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # Defaults to https://hawkular-metrics.{{openshift_master_default_subdomain}}/hawkular/metrics # Currently, you may only alter the hostname portion of the url, alterting the # `/hawkular/metrics` path will break installation of metrics. -#openshift_hosted_metrics_public_url=https://hawkular-metrics.example.com/hawkular/metrics +#openshift_metrics_hawkular_hostname=https://hawkular-metrics.example.com/hawkular/metrics # Configure the prefix and version for the component images -#openshift_hosted_metrics_deployer_prefix=registry.example.com:8888/openshift3/ -#openshift_hosted_metrics_deployer_version=3.7.0 +#openshift_metrics_image_prefix=registry.example.com:8888/openshift3/ +#openshift_metrics_image_version=3.7.0 # # StorageClass # openshift_storageclass_name=gp2 @@ -556,36 +556,36 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # Logging deployment # # Currently logging deployment is disabled by default, enable it by setting this -#openshift_hosted_logging_deploy=true +#openshift_logging_install_logging=true # # Logging storage config # Option A - NFS Host Group # An NFS volume will be created with path "nfs_directory/volume_name" # on the host within the [nfs] host group. For example, the volume # path using these options would be "/exports/logging" -#openshift_hosted_logging_storage_kind=nfs -#openshift_hosted_logging_storage_access_modes=['ReadWriteOnce'] -#openshift_hosted_logging_storage_nfs_directory=/exports -#openshift_hosted_logging_storage_nfs_options='*(rw,root_squash)' -#openshift_hosted_logging_storage_volume_name=logging -#openshift_hosted_logging_storage_volume_size=10Gi -#openshift_hosted_logging_storage_labels={'storage': 'logging'} +#openshift_logging_storage_kind=nfs +#openshift_logging_storage_access_modes=['ReadWriteOnce'] +#openshift_logging_storage_nfs_directory=/exports +#openshift_logging_storage_nfs_options='*(rw,root_squash)' +#openshift_logging_storage_volume_name=logging +#openshift_logging_storage_volume_size=10Gi +#openshift_logging_storage_labels={'storage': 'logging'} # # Option B - External NFS Host # NFS volume must already exist with path "nfs_directory/_volume_name" on # the storage_host. For example, the remote volume path using these # options would be "nfs.example.com:/exports/logging" -#openshift_hosted_logging_storage_kind=nfs -#openshift_hosted_logging_storage_access_modes=['ReadWriteOnce'] -#openshift_hosted_logging_storage_host=nfs.example.com -#openshift_hosted_logging_storage_nfs_directory=/exports -#openshift_hosted_logging_storage_volume_name=logging -#openshift_hosted_logging_storage_volume_size=10Gi -#openshift_hosted_logging_storage_labels={'storage': 'logging'} +#openshift_logging_storage_kind=nfs +#openshift_logging_storage_access_modes=['ReadWriteOnce'] +#openshift_logging_storage_host=nfs.example.com +#openshift_logging_storage_nfs_directory=/exports +#openshift_logging_storage_volume_name=logging +#openshift_logging_storage_volume_size=10Gi +#openshift_logging_storage_labels={'storage': 'logging'} # # Option C - Dynamic -- If openshift supports dynamic volume provisioning for # your cloud platform use this. -#openshift_hosted_logging_storage_kind=dynamic +#openshift_logging_storage_kind=dynamic # # Option D - none -- Logging will use emptydir volumes which are destroyed when # pods are deleted @@ -595,13 +595,13 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # # Configure loggingPublicURL in the master config for aggregate logging, defaults # to kibana.{{ openshift_master_default_subdomain }} -#openshift_hosted_logging_hostname=logging.apps.example.com +#openshift_logging_kibana_hostname=logging.apps.example.com # Configure the number of elastic search nodes, unless you're using dynamic provisioning # this value must be 1 -#openshift_hosted_logging_elasticsearch_cluster_size=1 +#openshift_logging_es_cluster_size=1 # Configure the prefix and version for the component images -#openshift_hosted_logging_deployer_prefix=registry.example.com:8888/openshift3/ -#openshift_hosted_logging_deployer_version=3.7.0 +#openshift_logging_image_prefix=registry.example.com:8888/openshift3/ +#openshift_logging_image_version=3.7.0 # Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet') # os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant' diff --git a/openshift-ansible.spec b/openshift-ansible.spec index 3be13145e..b5673cda1 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -10,7 +10,7 @@ Name: openshift-ansible Version: 3.7.0 -Release: 0.126.0%{?dist} +Release: 0.127.0%{?dist} Summary: Openshift and Atomic Enterprise Ansible License: ASL 2.0 URL: https://github.com/openshift/openshift-ansible @@ -280,6 +280,96 @@ Atomic OpenShift Utilities includes %changelog +* Thu Sep 21 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.127.0 +- Updating to always configure api aggregation with installation + (ewolinet@redhat.com) +- Do not reconcile in >= 3.7 (simo@redhat.com) +- Cleanup old deployment types (mgugino@redhat.com) +- crio: ensure no default CNI configuration files are left + (gscrivan@redhat.com) +- node: specify the DNS domain (gscrivan@redhat.com) +- more retries on repoquery_cmd (lmeyer@redhat.com) +- fix etcd back message error (jchaloup@redhat.com) +- openshift_checks: enable providing file outputs (lmeyer@redhat.com) +- Fix registry auth task ordering (mgugino@redhat.com) +- Prometheus role fixes (zgalor@redhat.com) +- papr: Update inventory to include required vars (smilner@redhat.com) +- testing: Skip net vars on integration tests (smilner@redhat.com) +- inventory: Update network variable doc (smilner@redhat.com) +- installer image: use tmp file for vaultpass (lmeyer@redhat.com) +- system container: use ansible root as cwd (lmeyer@redhat.com) +- openshift_sanitize_inventory: Check for required vars (smilner@redhat.com) +- No conversion to boolean and no quoting for include_granted_scopes. + (jpazdziora@redhat.com) +- Correct firewall install for openshift-nfs (rteague@redhat.com) +- inventory: Update versions to 3.7 (smilner@redhat.com) +- Port origin-gce roles for cluster setup to copy AWS provisioning + (ccoleman@redhat.com) +- Bug 1491636 - honor openshift_logging_es_ops_nodeselector + (jwozniak@redhat.com) +- Setup tuned after the node has been restarted. (jmencak@redhat.com) +- Only attempt to start iptables on hosts in the current batch + (sdodson@redhat.com) +- Removing setting of pod presets (ewolinet@redhat.com) +- cri-o: Fix Fedora image name (smilner@redhat.com) +- add retry on repoquery_cmd (lmeyer@redhat.com) +- add retries to repoquery module (lmeyer@redhat.com) +- Rework openshift-cluster into deploy_cluster.yml (rteague@redhat.com) +- inventory generate: fix config doc (lmeyer@redhat.com) +- inventory generate: remove refs to openshift_cluster_user (lmeyer@redhat.com) +- inventory generate: always use kubeconfig, no login (lmeyer@redhat.com) +- Scaffold out the entire build defaults hash (tbielawa@redhat.com) +- Use openshift.common.ip rather than ansible_default_ipv4 in etcd migration + playbook. (abutcher@redhat.com) +- Add IMAGE_VERSION to the image stream tag source (sdodson@redhat.com) +- Add loadbalancer config entry point (rteague@redhat.com) +- pull openshift_master deps out into a play (jchaloup@redhat.com) +- Don't assume storage_migration control variables are already boolean + (mchappel@redhat.com) +- upgrade: Updates warning on missing required variables (smilner@redhat.com) +- Update master config with new client urls during etcd scaleup. + (abutcher@redhat.com) +- Increase rate limiting in journald.conf (maszulik@redhat.com) +- Correct logic for openshift_hosted_*_wait (rteague@redhat.com) +- Adding mangagement-admin SC to admin role for management-infra project + (ewolinet@redhat.com) +- Only install base openshift package on masters and nodes (mgugino@redhat.com) +- Workaround Ansible Jinja2 delimiter warning (rteague@redhat.com) +- openshift-checks: add role symlink (lmeyer@redhat.com) +- double the required disk space for etcd backup (jchaloup@redhat.com) +- openshift_health_check: allow disabling all checks (lmeyer@redhat.com) +- docker_image_availability: fix local image search (lmeyer@redhat.com) +- docker_image_availability: probe registry connectivity (lmeyer@redhat.com) +- openshift_checks: add retries in python (lmeyer@redhat.com) +- add inventory-generator under new sub pkg (jvallejo@redhat.com) +- Re-enabling new tuned profile hierarchy (PR5089) (jmencak@redhat.com) +- Add `openshift_node_open_ports` to allow arbitrary firewall exposure + (ccoleman@redhat.com) +- Fix: authenticated registry support for containerized hosts + (mgugino@redhat.com) +- [Proposal] OpenShift-Ansible Proposal Process (rteague@redhat.com) +- Improve searching when conditions for Jinja2 delimiters (rteague@redhat.com) +- Clarify requirement of having etcd group (sdodson@redhat.com) +- add health checks 3_6,3_7 upgrade path (jvallejo@redhat.com) +- container-engine: Allow full image override (smilner@redhat.com) +- Add openshift_public_hostname length check (mgugino@redhat.com) +- Skip failure dedup instead of crashing (rhcarvalho@gmail.com) +- Properly quote "true" and "false" strings for include_granted_scopes. + (jpazdziora@redhat.com) +- Move sysctl.conf customizations to a separate file (jdesousa@redhat.com) +- Fix new_master or new_node fail check (denverjanke@gmail.com) +- [Proposal] OpenShift-Ansible Playbook Consolidation (rteague@redhat.com) +- GlusterFS: Allow option to use or ignore default node selectors + (jarrpa@redhat.com) +- GlusterFS: Clarify heketi URL documentation (jarrpa@redhat.com) +- GlusterFS: Add files/templates for v3.7 (jarrpa@redhat.com) +- Support setting annotations on Hawkular route (hansmi@vshn.ch) +- add additional preflight checks to upgrade path (jvallejo@redhat.com) +- hot fix for env variable resolve (m.judeikis@gmail.com) +- GlusterFS: Correct firewall port names (jarrpa@redhat.com) +- Make RH subscription more resilient to temporary failures + (lhuard@amadeus.com) + * Mon Sep 11 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.126.0 - Fix rpm version logic for hosts (mgugino@redhat.com) - Revert back to hostnamectl and previous default of not setting hostname diff --git a/playbooks/byo/openshift-cluster/redeploy-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-certificates.yml index a3894e243..073ded6e0 100644 --- a/playbooks/byo/openshift-cluster/redeploy-certificates.yml +++ b/playbooks/byo/openshift-cluster/redeploy-certificates.yml @@ -7,6 +7,10 @@ tags: - always +- include: ../../common/openshift-cluster/redeploy-certificates/check-expiry.yml + vars: + g_check_expiry_hosts: 'oo_etcd_to_config' + - include: ../../common/openshift-cluster/redeploy-certificates/etcd.yml - include: ../../common/openshift-cluster/redeploy-certificates/masters.yml @@ -14,6 +18,8 @@ - include: ../../common/openshift-cluster/redeploy-certificates/nodes.yml - include: ../../common/openshift-etcd/restart.yml + vars: + g_etcd_certificates_expired: "{{ ('expired' in (hostvars | oo_select_keys(groups['etcd']) | oo_collect('check_results.check_results.etcd') | oo_collect('health'))) | bool }}" - include: ../../common/openshift-master/restart.yml diff --git a/playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml index 8516baee8..0f86eb997 100644 --- a/playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml +++ b/playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml @@ -7,8 +7,14 @@ tags: - always +- include: ../../common/openshift-cluster/redeploy-certificates/check-expiry.yml + vars: + g_check_expiry_hosts: 'oo_etcd_to_config' + - include: ../../common/openshift-cluster/redeploy-certificates/etcd.yml - include: ../../common/openshift-etcd/restart.yml + vars: + g_etcd_certificates_expired: "{{ ('expired' in (hostvars | oo_select_keys(groups['etcd']) | oo_collect('check_results.check_results.etcd') | oo_collect('health'))) | bool }}" - include: ../../common/openshift-master/restart.yml diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml index fcceb37b7..804ea8eb8 100644 --- a/playbooks/common/openshift-cluster/config.yml +++ b/playbooks/common/openshift-cluster/config.yml @@ -73,3 +73,11 @@ - openshift_enable_service_catalog | default(false) | bool tags: - servicecatalog + +- name: Print deprecated variable warning message if necessary + hosts: oo_first_master + gather_facts: no + tasks: + - debug: msg="{{__deprecation_message}}" + when: + - __deprecation_message | default ('') | length > 0 diff --git a/playbooks/common/openshift-cluster/initialize_facts.yml b/playbooks/common/openshift-cluster/initialize_facts.yml index 0723575c2..517023ba1 100644 --- a/playbooks/common/openshift-cluster/initialize_facts.yml +++ b/playbooks/common/openshift-cluster/initialize_facts.yml @@ -94,7 +94,7 @@ with_items: - iproute - "{{ 'python3-dbus' if ansible_distribution == 'Fedora' else 'python-dbus' }}" - - PyYAML + - "{{ 'python3-PyYAML' if ansible_distribution == 'Fedora' else 'PyYAML' }}" - yum-utils - name: Ensure various deps for running system containers are installed diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml index 75339f6df..0e970f376 100644 --- a/playbooks/common/openshift-cluster/openshift_hosted.yml +++ b/playbooks/common/openshift-cluster/openshift_hosted.yml @@ -19,31 +19,15 @@ openshift_hosted_router_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}" openshift_hosted_registry_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}" when: "'master' in hostvars[groups.oo_first_master.0].openshift and 'registry_url' in hostvars[groups.oo_first_master.0].openshift.master" - - set_fact: - logging_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}" - logging_ops_hostname: "{{ openshift_hosted_logging_ops_hostname | default('kibana-ops.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}" - logging_master_public_url: "{{ openshift_hosted_logging_master_public_url | default(openshift.master.public_api_url) }}" - logging_elasticsearch_cluster_size: "{{ openshift_hosted_logging_elasticsearch_cluster_size | default(1) }}" - logging_elasticsearch_ops_cluster_size: "{{ openshift_hosted_logging_elasticsearch_ops_cluster_size | default(1) }}" + roles: - role: openshift_default_storage_class when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce') - role: openshift_hosted - role: openshift_metrics - when: openshift_hosted_metrics_deploy | default(false) | bool + when: openshift_metrics_install_metrics | default(false) | bool - role: openshift_logging - when: openshift_hosted_logging_deploy | default(false) | bool - openshift_hosted_logging_hostname: "{{ logging_hostname }}" - openshift_hosted_logging_ops_hostname: "{{ logging_ops_hostname }}" - openshift_hosted_logging_master_public_url: "{{ logging_master_public_url }}" - openshift_hosted_logging_elasticsearch_cluster_size: "{{ logging_elasticsearch_cluster_size }}" - openshift_hosted_logging_elasticsearch_pvc_dynamic: "{{ 'true' if openshift_hosted_logging_storage_kind | default(none) == 'dynamic' else '' }}" - openshift_hosted_logging_elasticsearch_pvc_size: "{{ openshift.hosted.logging.storage.volume.size if openshift_hosted_logging_storage_kind | default(none) in ['dynamic','nfs'] else '' }}" - openshift_hosted_logging_elasticsearch_pvc_prefix: "{{ 'logging-es' if openshift_hosted_logging_storage_kind | default(none) == 'dynamic' else '' }}" - openshift_hosted_logging_elasticsearch_ops_cluster_size: "{{ logging_elasticsearch_ops_cluster_size }}" - openshift_hosted_logging_elasticsearch_ops_pvc_dynamic: "{{ 'true' if openshift_hosted_loggingops_storage_kind | default(none) == 'dynamic' else '' }}" - openshift_hosted_logging_elasticsearch_ops_pvc_size: "{{ openshift.hosted.logging.storage.volume.size if openshift_hosted_logging_storage_kind | default(none) in ['dynamic','nfs' ] else '' }}" - openshift_hosted_logging_elasticsearch_ops_pvc_prefix: "{{ 'logging-es-ops' if openshift_hosted_loggingops_storage_kind | default(none) =='dynamic' else '' }}" + when: openshift_logging_install_logging | default(false) | bool - role: cockpit-ui when: ( openshift.common.version_gte_3_3_or_1_3 | bool ) and ( openshift_hosted_manage_registry | default(true) | bool ) and not (openshift.docker.hosted_registry_insecure | default(false) | bool) @@ -57,8 +41,6 @@ - hosted pre_tasks: - set_fact: - openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}" - - set_fact: openshift_metrics_hawkular_hostname: "{{ g_metrics_hostname | default('hawkular-metrics.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}" tasks: @@ -66,10 +48,10 @@ - include_role: name: openshift_logging tasks_from: update_master_config - when: openshift_hosted_logging_deploy | default(false) | bool + when: openshift_logging_install_logging | default(false) | bool - block: - include_role: name: openshift_metrics tasks_from: update_master_config - when: openshift_hosted_metrics_deploy | default(false) | bool + when: openshift_metrics_install_metrics | default(false) | bool diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/check-expiry.yml b/playbooks/common/openshift-cluster/redeploy-certificates/check-expiry.yml new file mode 100644 index 000000000..4a9fbf7eb --- /dev/null +++ b/playbooks/common/openshift-cluster/redeploy-certificates/check-expiry.yml @@ -0,0 +1,12 @@ +--- +- name: Check cert expirys + hosts: "{{ g_check_expiry_hosts }}" + vars: + openshift_certificate_expiry_show_all: yes + roles: + # Sets 'check_results' per host which contains health status for + # etcd, master and node certificates. We will use 'check_results' + # to determine if any certificates were expired prior to running + # this playbook. Service restarts will be skipped if any + # certificates were previously expired. + - role: openshift_certificate_expiry diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml index 6964e8567..3a8e32ed1 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml @@ -37,10 +37,17 @@ - name: Generate new etcd CA hosts: oo_first_etcd roles: - - role: openshift_etcd_ca - etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}" - etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" - etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}" + - role: openshift_etcd_facts + tasks: + - include_role: + name: etcd + tasks_from: ca + vars: + etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}" + etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" + etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}" + when: + - etcd_ca_setup | default(True) | bool - name: Create temp directory for syncing certs hosts: localhost @@ -146,13 +153,19 @@ changed_when: false - include: ../../openshift-master/restart.yml - # Do not restart masters when master certificates were previously expired. - when: ('expired' not in hostvars - | oo_select_keys(groups['oo_masters_to_config']) - | oo_collect('check_results.check_results.ocp_certs') - | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"})) - and - ('expired' not in hostvars - | oo_select_keys(groups['oo_masters_to_config']) - | oo_collect('check_results.check_results.ocp_certs') - | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"})) + # Do not restart masters when master or etcd certificates were previously expired. + when: + # masters + - ('expired' not in hostvars + | oo_select_keys(groups['oo_masters_to_config']) + | oo_collect('check_results.check_results.ocp_certs') + | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"})) + - ('expired' not in hostvars + | oo_select_keys(groups['oo_masters_to_config']) + | oo_collect('check_results.check_results.ocp_certs') + | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"})) + # etcd + - ('expired' not in (hostvars + | oo_select_keys(groups['etcd']) + | oo_collect('check_results.check_results.etcd') + | oo_collect('health'))) diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/etcd.yml b/playbooks/common/openshift-cluster/redeploy-certificates/etcd.yml index 6b5c805e6..16f0edb06 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/etcd.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/etcd.yml @@ -45,19 +45,23 @@ - name: Redeploy etcd certificates hosts: oo_etcd_to_config any_errors_fatal: true - roles: - - role: openshift_etcd_server_certificates - etcd_certificates_redeploy: true - etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" - etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}" - etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}" - openshift_ca_host: "{{ groups.oo_first_master.0 }}" - r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" + tasks: + - include_role: + name: etcd + tasks_from: server_certificates + vars: + etcd_certificates_redeploy: true + etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" + etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}" + etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}" + openshift_ca_host: "{{ groups.oo_first_master.0 }}" + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" - name: Redeploy etcd client certificates for masters hosts: oo_masters_to_config any_errors_fatal: true roles: + - role: openshift_etcd_facts - role: openshift_etcd_client_certificates etcd_certificates_redeploy: true etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml index 089ae6bbc..b54acae6c 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml @@ -7,7 +7,7 @@ when: not openshift.common.version_gte_3_2_or_1_2 | bool - name: Check cert expirys - hosts: oo_nodes_to_config:oo_masters_to_config + hosts: oo_nodes_to_config:oo_masters_to_config:oo_etcd_to_config vars: openshift_certificate_expiry_show_all: yes roles: @@ -209,16 +209,22 @@ with_items: "{{ client_users }}" - include: ../../openshift-master/restart.yml - # Do not restart masters when master certificates were previously expired. - when: ('expired' not in hostvars - | oo_select_keys(groups['oo_masters_to_config']) - | oo_collect('check_results.check_results.ocp_certs') - | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"})) - and - ('expired' not in hostvars - | oo_select_keys(groups['oo_masters_to_config']) - | oo_collect('check_results.check_results.ocp_certs') - | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"})) + # Do not restart masters when master or etcd certificates were previously expired. + when: + # masters + - ('expired' not in hostvars + | oo_select_keys(groups['oo_masters_to_config']) + | oo_collect('check_results.check_results.ocp_certs') + | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"})) + - ('expired' not in hostvars + | oo_select_keys(groups['oo_masters_to_config']) + | oo_collect('check_results.check_results.ocp_certs') + | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"})) + # etcd + - ('expired' not in (hostvars + | oo_select_keys(groups['etcd']) + | oo_collect('check_results.check_results.etcd') + | oo_collect('health'))) - name: Distribute OpenShift CA certificate to nodes hosts: oo_nodes_to_config @@ -268,13 +274,28 @@ changed_when: false - include: ../../openshift-node/restart.yml - # Do not restart nodes when node certificates were previously expired. - when: ('expired' not in hostvars - | oo_select_keys(groups['oo_nodes_to_config']) - | oo_collect('check_results.check_results.ocp_certs') - | oo_collect('health', {'path':hostvars[groups.oo_nodes_to_config.0].openshift.common.config_base ~ "/node/server.crt"})) - and - ('expired' not in hostvars - | oo_select_keys(groups['oo_nodes_to_config']) - | oo_collect('check_results.check_results.ocp_certs') - | oo_collect('health', {'path':hostvars[groups.oo_nodes_to_config.0].openshift.common.config_base ~ "/node/ca.crt"})) + # Do not restart nodes when node, master or etcd certificates were previously expired. + when: + # nodes + - ('expired' not in hostvars + | oo_select_keys(groups['oo_nodes_to_config']) + | oo_collect('check_results.check_results.ocp_certs') + | oo_collect('health', {'path':hostvars[groups.oo_nodes_to_config.0].openshift.common.config_base ~ "/node/server.crt"})) + - ('expired' not in hostvars + | oo_select_keys(groups['oo_nodes_to_config']) + | oo_collect('check_results.check_results.ocp_certs') + | oo_collect('health', {'path':hostvars[groups.oo_nodes_to_config.0].openshift.common.config_base ~ "/node/ca.crt"})) + # masters + - ('expired' not in hostvars + | oo_select_keys(groups['oo_masters_to_config']) + | oo_collect('check_results.check_results.ocp_certs') + | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"})) + - ('expired' not in hostvars + | oo_select_keys(groups['oo_masters_to_config']) + | oo_collect('check_results.check_results.ocp_certs') + | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"})) + # etcd + - ('expired' not in (hostvars + | oo_select_keys(groups['etcd']) + | oo_collect('check_results.check_results.etcd') + | oo_collect('health'))) diff --git a/playbooks/common/openshift-cluster/service_catalog.yml b/playbooks/common/openshift-cluster/service_catalog.yml index 7bae70de1..529ee99be 100644 --- a/playbooks/common/openshift-cluster/service_catalog.yml +++ b/playbooks/common/openshift-cluster/service_catalog.yml @@ -4,5 +4,6 @@ roles: - openshift_service_catalog - ansible_service_broker + - template_service_broker vars: first_master: "{{ groups.oo_first_master[0] }}" diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml index 9b4a8e413..142ce5f3d 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml @@ -27,13 +27,17 @@ - name: Set fact avail_openshift_version set_fact: - avail_openshift_version: "{{ repoquery_out.results.versions.available_versions.0 }}" + avail_openshift_version: "{{ repoquery_out.results.versions.available_versions_full.0 }}" + - name: Set openshift_pkg_version when not specified + set_fact: + openshift_pkg_version: "-{{ repoquery_out.results.versions.available_versions_full.0 }}" + when: openshift_pkg_version | default('') == '' - name: Verify OpenShift RPMs are available for upgrade fail: msg: "OpenShift {{ avail_openshift_version }} is available, but {{ openshift_upgrade_target }} or greater is required" when: - - avail_openshift_version | default('0.0', True) | version_compare(openshift_release, '<') + - openshift_pkg_version | default('0.0', True) | version_compare(openshift_release, '<') - name: Fail when openshift version does not meet minium requirement for Origin upgrade fail: diff --git a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml index 164baca81..8cc46ab68 100644 --- a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml @@ -8,7 +8,6 @@ # TODO: If the sdn package isn't already installed this will install it, we # should fix that - - name: Upgrade master packages package: name={{ master_pkgs | join(',') }} state=present vars: @@ -16,7 +15,7 @@ - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}" - "{{ openshift.common.service_type }}-master{{ openshift_pkg_version }}" - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version}}" + - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}" - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}" - "tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" - PyYAML diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml index a3d0d6305..30e719d8f 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml @@ -47,6 +47,10 @@ tags: - pre_upgrade +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + - include: ../disable_master_excluders.yml tags: - pre_upgrade @@ -71,10 +75,6 @@ # docker is configured and running. skip_docker_role: True -- include: ../pre/verify_control_plane_running.yml - tags: - - pre_upgrade - - include: ../../../openshift-master/validate_restart.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml index 51acd17da..920dc2ffc 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml @@ -47,6 +47,14 @@ tags: - pre_upgrade +- include: ../pre/verify_health_checks.yml + tags: + - pre_upgrade + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + - include: ../disable_master_excluders.yml tags: - pre_upgrade @@ -71,14 +79,6 @@ # docker is configured and running. skip_docker_role: True -- include: ../pre/verify_health_checks.yml - tags: - - pre_upgrade - -- include: ../pre/verify_control_plane_running.yml - tags: - - pre_upgrade - - include: ../../../openshift-master/validate_restart.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml index 9ec40723a..87621dc85 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml @@ -47,6 +47,14 @@ tags: - pre_upgrade +- include: ../pre/verify_health_checks.yml + tags: + - pre_upgrade + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + - include: ../disable_master_excluders.yml tags: - pre_upgrade @@ -71,14 +79,6 @@ # docker is configured and running. skip_docker_role: True -- include: ../pre/verify_health_checks.yml - tags: - - pre_upgrade - -- include: ../pre/verify_control_plane_running.yml - tags: - - pre_upgrade - - include: ../../../openshift-master/validate_restart.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-etcd/migrate.yml b/playbooks/common/openshift-etcd/migrate.yml index e4ab0aa41..06e0607bd 100644 --- a/playbooks/common/openshift-etcd/migrate.yml +++ b/playbooks/common/openshift-etcd/migrate.yml @@ -1,11 +1,13 @@ --- - name: Run pre-checks hosts: oo_etcd_to_migrate - roles: - - role: etcd_migrate - r_etcd_migrate_action: check - r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" - etcd_peer: "{{ ansible_default_ipv4.address }}" + tasks: + - include_role: + name: etcd + tasks_from: migrate.pre_check + vars: + r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" + etcd_peer: "{{ ansible_default_ipv4.address }}" # TODO: This will be different for release-3.6 branch - name: Prepare masters for etcd data migration @@ -65,25 +67,28 @@ - name: Migrate data on first etcd hosts: oo_etcd_to_migrate[0] gather_facts: no - roles: - - role: etcd_migrate - r_etcd_migrate_action: migrate - r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" - etcd_peer: "{{ openshift.common.ip }}" - etcd_url_scheme: "https" - etcd_peer_url_scheme: "https" + tasks: + - include_role: + name: etcd + tasks_from: migrate + vars: + r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" + etcd_peer: "{{ openshift.common.ip }}" + etcd_url_scheme: "https" + etcd_peer_url_scheme: "https" - name: Clean data stores on remaining etcd hosts hosts: oo_etcd_to_migrate[1:] gather_facts: no - roles: - - role: etcd_migrate - r_etcd_migrate_action: clean_data - r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" - etcd_peer: "{{ openshift.common.ip }}" - etcd_url_scheme: "https" - etcd_peer_url_scheme: "https" - post_tasks: + tasks: + - include_role: + name: etcd + tasks_from: clean_data + vars: + r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" + etcd_peer: "{{ openshift.common.ip }}" + etcd_url_scheme: "https" + etcd_peer_url_scheme: "https" - name: Add etcd hosts delegate_to: localhost add_host: @@ -112,21 +117,23 @@ - name: Add TTLs on the first master hosts: oo_first_master[0] - roles: - - role: etcd_migrate - r_etcd_migrate_action: add_ttls - etcd_peer: "{{ hostvars[groups.oo_etcd_to_migrate.0].openshift.common.ip }}" - etcd_url_scheme: "https" - etcd_peer_url_scheme: "https" + tasks: + - include_role: + name: etcd + tasks_from: migrate.add_ttls + vars: + etcd_peer: "{{ hostvars[groups.oo_etcd_to_migrate.0].openshift.common.ip }}" + etcd_url_scheme: "https" + etcd_peer_url_scheme: "https" when: etcd_migration_failed | length == 0 - name: Configure masters if etcd data migration is succesfull hosts: oo_masters_to_config - roles: - - role: etcd_migrate - r_etcd_migrate_action: configure - when: etcd_migration_failed | length == 0 tasks: + - include_role: + name: etcd + tasks_from: migrate.configure_master + when: etcd_migration_failed | length == 0 - debug: msg: "Skipping master re-configuration since migration failed." when: diff --git a/playbooks/common/openshift-etcd/restart.yml b/playbooks/common/openshift-etcd/restart.yml index af1ef245a..5eaea5ae8 100644 --- a/playbooks/common/openshift-etcd/restart.yml +++ b/playbooks/common/openshift-etcd/restart.yml @@ -7,3 +7,21 @@ service: name: "{{ 'etcd_container' if openshift.common.etcd_runtime == 'docker' else 'etcd' }}" state: restarted + when: + - not g_etcd_certificates_expired | default(false) | bool + +- name: Restart etcd + hosts: oo_etcd_to_config + tasks: + - name: stop etcd + service: + name: "{{ 'etcd_container' if openshift.common.etcd_runtime == 'docker' else 'etcd' }}" + state: stopped + when: + - g_etcd_certificates_expired | default(false) | bool + - name: start etcd + service: + name: "{{ 'etcd_container' if openshift.common.etcd_runtime == 'docker' else 'etcd' }}" + state: started + when: + - g_etcd_certificates_expired | default(false) | bool diff --git a/playbooks/common/openshift-etcd/scaleup.yml b/playbooks/common/openshift-etcd/scaleup.yml index d3fa48bad..4f83264d0 100644 --- a/playbooks/common/openshift-etcd/scaleup.yml +++ b/playbooks/common/openshift-etcd/scaleup.yml @@ -60,15 +60,17 @@ - name: Update master etcd client urls hosts: oo_masters_to_config serial: 1 - tasks: + vars: + etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" + openshift_ca_host: "{{ groups.oo_first_master.0 }}" + openshift_master_etcd_hosts: "{{ hostvars + | oo_select_keys(groups['oo_etcd_to_config'] | union(groups['oo_new_etcd_to_config'])) + | oo_collect('openshift.common.hostname') + | default(none, true) }}" + openshift_master_etcd_port: "{{ (etcd_client_port | default('2379')) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else none }}" + roles: + - role: openshift_master_facts + post_tasks: - include_role: name: openshift_master tasks_from: update_etcd_client_urls - vars: - etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" - openshift_ca_host: "{{ groups.oo_first_master.0 }}" - openshift_master_etcd_hosts: "{{ hostvars - | oo_select_keys(groups['oo_etcd_to_config'] | union(groups['oo_new_etcd_to_config'])) - | oo_collect('openshift.common.hostname') - | default(none, true) }}" - openshift_master_etcd_port: "{{ (etcd_client_port | default('2379')) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else none }}" diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index 3decbd973..2e7646372 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -194,6 +194,7 @@ - role: openshift_master_facts - role: openshift_hosted_facts - role: openshift_master_certificates + - role: openshift_etcd_facts - role: openshift_etcd_client_certificates etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}" etcd_cert_config_dir: "{{ openshift.common.config_base }}/master" @@ -217,6 +218,10 @@ openshift_master_default_registry_value: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value }}" openshift_master_default_registry_value_api: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value_api }}" openshift_master_default_registry_value_controllers: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value_controllers }}" + - role: nuage_ca + when: openshift_use_nuage | default(false) | bool + - role: nuage_common + when: openshift_use_nuage | default(false) | bool - role: nuage_master when: openshift_use_nuage | default(false) | bool - role: calico_master diff --git a/playbooks/common/openshift-master/scaleup.yml b/playbooks/common/openshift-master/scaleup.yml index 17f9ef4bc..8c366e038 100644 --- a/playbooks/common/openshift-master/scaleup.yml +++ b/playbooks/common/openshift-master/scaleup.yml @@ -43,6 +43,8 @@ delay: 1 changed_when: false +- include: ../openshift-master/set_network_facts.yml + - include: ../openshift-master/config.yml - include: ../openshift-loadbalancer/config.yml diff --git a/playbooks/common/openshift-master/set_network_facts.yml b/playbooks/common/openshift-master/set_network_facts.yml new file mode 100644 index 000000000..2ad805858 --- /dev/null +++ b/playbooks/common/openshift-master/set_network_facts.yml @@ -0,0 +1,28 @@ +--- +- name: Read first master\'s config + hosts: oo_first_master + gather_facts: no + tasks: + - stat: + path: "{{ openshift.common.config_base }}/master/master-config.yaml" + register: g_master_config_stat + - slurp: + src: "{{ openshift.common.config_base }}/master/master-config.yaml" + register: g_master_config_slurp + +- name: Set network facts for masters + hosts: oo_masters_to_config + gather_facts: no + tasks: + - block: + - set_fact: + osm_cluster_network_cidr: "{{ (hostvars[groups.oo_first_master.0].g_master_config_slurp.content|b64decode|from_yaml).networkConfig.clusterNetworkCIDR }}" + when: osm_cluster_network_cidr is not defined + - set_fact: + osm_host_subnet_length: "{{ (hostvars[groups.oo_first_master.0].g_master_config_slurp.content|b64decode|from_yaml).networkConfig.hostSubnetLength }}" + when: osm_host_subnet_length is not defined + - set_fact: + openshift_portal_net: "{{ (hostvars[groups.oo_first_master.0].g_master_config_slurp.content|b64decode|from_yaml).networkConfig.serviceNetworkCIDR }}" + when: openshift_portal_net is not defined + when: + - hostvars[groups.oo_first_master.0].g_master_config_stat.stat.exists | bool diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml index 0801c41ff..5207ca9c8 100644 --- a/playbooks/common/openshift-node/config.yml +++ b/playbooks/common/openshift-node/config.yml @@ -65,12 +65,16 @@ vars: openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}" roles: - - role: flannel - etcd_urls: "{{ hostvars[groups.oo_first_master.0].openshift.master.etcd_urls }}" - embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}" + - role: openshift_facts + - role: openshift_etcd_facts + - role: openshift_etcd_client_certificates + etcd_cert_prefix: flannel.etcd- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" etcd_cert_subdir: "openshift-node-{{ openshift.common.hostname }}" etcd_cert_config_dir: "{{ openshift.common.config_base }}/node" + - role: flannel + etcd_urls: "{{ hostvars[groups.oo_first_master.0].openshift.master.etcd_urls }}" + embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}" when: openshift_use_flannel | default(false) | bool - role: calico when: openshift_use_calico | default(false) | bool diff --git a/roles/ansible_service_broker/defaults/main.yml b/roles/ansible_service_broker/defaults/main.yml index 12929b354..9eb9db316 100644 --- a/roles/ansible_service_broker/defaults/main.yml +++ b/roles/ansible_service_broker/defaults/main.yml @@ -1,6 +1,7 @@ --- ansible_service_broker_remove: false +ansible_service_broker_install: false ansible_service_broker_log_level: info ansible_service_broker_output_request: false ansible_service_broker_recovery: true diff --git a/roles/ansible_service_broker/tasks/main.yml b/roles/ansible_service_broker/tasks/main.yml index b46ce8233..d8695bd3a 100644 --- a/roles/ansible_service_broker/tasks/main.yml +++ b/roles/ansible_service_broker/tasks/main.yml @@ -2,7 +2,7 @@ # do any asserts here - include: install.yml - when: not ansible_service_broker_remove|default(false) | bool + when: ansible_service_broker_install | default(false) | bool - include: remove.yml - when: ansible_service_broker_remove|default(false) | bool + when: ansible_service_broker_remove | default(false) | bool diff --git a/roles/calico/tasks/main.yml b/roles/calico/tasks/main.yml index 39f730462..0e3863304 100644 --- a/roles/calico/tasks/main.yml +++ b/roles/calico/tasks/main.yml @@ -2,10 +2,14 @@ - name: Calico Node | Error if invalid cert arguments fail: msg: "Must provide all or none for the following etcd params: calico_etcd_cert_dir, calico_etcd_ca_cert_file, calico_etcd_cert_file, calico_etcd_key_file, calico_etcd_endpoints" - when: (calico_etcd_cert_dir is defined or calico_etcd_ca_cert_file is defined or calico_etcd_cert_file is defined or calico_etcd_key_file is defined or calico_etcd_endpoints is defined) and not (calico_etcd_cert_dir is defined and calico_etcd_ca_cert_file is defined and calico_etcd_cert_file is defined and calico_etcd_key_file is defined and calico_etcd_endpoints is defined) + when: + - calico_etcd_cert_dir is defined or calico_etcd_ca_cert_file is defined or calico_etcd_cert_file is defined or calico_etcd_key_file is defined or calico_etcd_endpoints is defined + - not (calico_etcd_cert_dir is defined and calico_etcd_ca_cert_file is defined and calico_etcd_cert_file is defined and calico_etcd_key_file is defined and calico_etcd_endpoints is defined) - name: Calico Node | Generate OpenShift-etcd certs - include: ../../../roles/etcd_client_certificates/tasks/main.yml + include_role: + name: etcd + tasks_from: client_certificates when: calico_etcd_ca_cert_file is not defined or calico_etcd_cert_file is not defined or calico_etcd_key_file is not defined or calico_etcd_endpoints is not defined or calico_etcd_cert_dir is not defined vars: etcd_cert_prefix: calico.etcd- @@ -28,18 +32,18 @@ msg: "Invalid etcd configuration for calico." when: item is not defined or item == '' with_items: - - calico_etcd_ca_cert_file - - calico_etcd_cert_file - - calico_etcd_key_file - - calico_etcd_endpoints + - calico_etcd_ca_cert_file + - calico_etcd_cert_file + - calico_etcd_key_file + - calico_etcd_endpoints - name: Calico Node | Assure the calico certs are present stat: path: "{{ item }}" with_items: - - "{{ calico_etcd_ca_cert_file }}" - - "{{ calico_etcd_cert_file }}" - - "{{ calico_etcd_key_file }}" + - "{{ calico_etcd_ca_cert_file }}" + - "{{ calico_etcd_cert_file }}" + - "{{ calico_etcd_key_file }}" - name: Calico Node | Configure Calico service unit file template: diff --git a/roles/docker/defaults/main.yml b/roles/docker/defaults/main.yml index 7e206ded1..81f3ee9e4 100644 --- a/roles/docker/defaults/main.yml +++ b/roles/docker/defaults/main.yml @@ -1,6 +1,6 @@ --- docker_cli_auth_config_path: '/root/.docker' -oreg_url: '' -oreg_host: "{{ oreg_url.split('/')[0] if '.' in oreg_url.split('/')[0] else '' }}" +# oreg_url is defined by user input. +oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}" oreg_auth_credentials_replace: False diff --git a/roles/docker/tasks/systemcontainer_crio.yml b/roles/docker/tasks/systemcontainer_crio.yml index 8208fa68d..e6fc2db06 100644 --- a/roles/docker/tasks/systemcontainer_crio.yml +++ b/roles/docker/tasks/systemcontainer_crio.yml @@ -108,18 +108,22 @@ l_crio_image_name: "cri-o" when: ansible_distribution == "RedHat" - # For https://github.com/openshift/openshift-ansible/pull/4049#discussion_r114478504 - - name: Use a testing registry if requested - set_fact: - l_crio_image_prepend: "{{ openshift_crio_systemcontainer_image_registry_override }}" - when: - - openshift_crio_systemcontainer_image_registry_override is defined - - openshift_crio_systemcontainer_image_registry_override != "" - - name: Set the full image name set_fact: l_crio_image: "{{ l_crio_image_prepend }}/{{ l_crio_image_name }}:latest" + # For https://github.com/openshift/aos-cd-jobs/pull/624#pullrequestreview-61816548 + - name: Use a specific image if requested + set_fact: + l_crio_image: "{{ openshift_crio_systemcontainer_image_override }}" + when: + - openshift_crio_systemcontainer_image_override is defined + - openshift_crio_systemcontainer_image_override != "" + + # Be nice and let the user see the variable result + - debug: + var: l_crio_image + # NOTE: no_proxy added as a workaround until https://github.com/projectatomic/atomic/pull/999 is released - name: Pre-pull CRI-O System Container image command: "atomic pull --storage ostree {{ l_crio_image }}" diff --git a/roles/etcd/meta/main.yml b/roles/etcd/meta/main.yml index 9a955c822..537c01c0e 100644 --- a/roles/etcd/meta/main.yml +++ b/roles/etcd/meta/main.yml @@ -18,5 +18,5 @@ galaxy_info: dependencies: - role: lib_openshift - role: lib_os_firewall -- role: etcd_server_certificates +- role: lib_utils - role: etcd_common diff --git a/roles/etcd_migrate/tasks/clean_data.yml b/roles/etcd/tasks/auxiliary/clean_data.yml index 95a0e7c0a..95a0e7c0a 100644 --- a/roles/etcd_migrate/tasks/clean_data.yml +++ b/roles/etcd/tasks/auxiliary/clean_data.yml diff --git a/roles/etcd/tasks/ca.yml b/roles/etcd/tasks/ca.yml new file mode 100644 index 000000000..7cda49069 --- /dev/null +++ b/roles/etcd/tasks/ca.yml @@ -0,0 +1,2 @@ +--- +- include: ca/deploy.yml diff --git a/roles/etcd_ca/tasks/main.yml b/roles/etcd/tasks/ca/deploy.yml index b4dea4a07..3d32290a2 100644 --- a/roles/etcd_ca/tasks/main.yml +++ b/roles/etcd/tasks/ca/deploy.yml @@ -1,6 +1,8 @@ --- - name: Install openssl - package: name=openssl state=present + package: + name: openssl + state: present when: not etcd_is_atomic | bool delegate_to: "{{ etcd_ca_host }}" run_once: true diff --git a/roles/etcd/tasks/clean_data.yml b/roles/etcd/tasks/clean_data.yml new file mode 100644 index 000000000..d131ffd21 --- /dev/null +++ b/roles/etcd/tasks/clean_data.yml @@ -0,0 +1,2 @@ +--- +- include: auxiliary/clean_data.yml diff --git a/roles/etcd/tasks/client_certificates.yml b/roles/etcd/tasks/client_certificates.yml new file mode 100644 index 000000000..2e9c078b9 --- /dev/null +++ b/roles/etcd/tasks/client_certificates.yml @@ -0,0 +1,2 @@ +--- +- include: client_certificates/fetch_from_ca.yml diff --git a/roles/etcd_client_certificates/tasks/main.yml b/roles/etcd/tasks/client_certificates/fetch_from_ca.yml index bbd29ece1..119071a72 100644 --- a/roles/etcd_client_certificates/tasks/main.yml +++ b/roles/etcd/tasks/client_certificates/fetch_from_ca.yml @@ -9,7 +9,7 @@ - fail: msg: > CA certificate {{ etcd_ca_cert }} doesn't exist on CA host - {{ etcd_ca_host }}. Apply 'etcd_ca' role to + {{ etcd_ca_host }}. Apply 'etcd_ca' action from `etcd` role to {{ etcd_ca_host }}. when: not g_ca_cert_stat_result.stat.exists | bool run_once: true diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml index 78e543ef1..870c11ad4 100644 --- a/roles/etcd/tasks/main.yml +++ b/roles/etcd/tasks/main.yml @@ -1,4 +1,6 @@ --- +- include: server_certificates.yml + - name: Set hostname and ip facts set_fact: # Store etcd_hostname and etcd_ip such that they will be available diff --git a/roles/etcd/tasks/migrate.add_ttls.yml b/roles/etcd/tasks/migrate.add_ttls.yml new file mode 100644 index 000000000..bc27e4ea1 --- /dev/null +++ b/roles/etcd/tasks/migrate.add_ttls.yml @@ -0,0 +1,2 @@ +--- +- include: migration/add_ttls.yml diff --git a/roles/etcd/tasks/migrate.configure_master.yml b/roles/etcd/tasks/migrate.configure_master.yml new file mode 100644 index 000000000..3ada6e362 --- /dev/null +++ b/roles/etcd/tasks/migrate.configure_master.yml @@ -0,0 +1,2 @@ +--- +- include: migration/configure_master.yml diff --git a/roles/etcd/tasks/migrate.pre_check.yml b/roles/etcd/tasks/migrate.pre_check.yml new file mode 100644 index 000000000..124d21561 --- /dev/null +++ b/roles/etcd/tasks/migrate.pre_check.yml @@ -0,0 +1,2 @@ +--- +- include: migration/check.yml diff --git a/roles/etcd/tasks/migrate.yml b/roles/etcd/tasks/migrate.yml new file mode 100644 index 000000000..5d5385873 --- /dev/null +++ b/roles/etcd/tasks/migrate.yml @@ -0,0 +1,2 @@ +--- +- include: migration/migrate.yml diff --git a/roles/etcd_migrate/tasks/add_ttls.yml b/roles/etcd/tasks/migration/add_ttls.yml index c10465af9..14625e49e 100644 --- a/roles/etcd_migrate/tasks/add_ttls.yml +++ b/roles/etcd/tasks/migration/add_ttls.yml @@ -8,6 +8,7 @@ accessTokenMaxAgeSeconds: "{{ (g_master_config_output.content|b64decode|from_yaml).oauthConfig.tokenConfig.accessTokenMaxAgeSeconds | default(86400) }}" authroizeTokenMaxAgeSeconds: "{{ (g_master_config_output.content|b64decode|from_yaml).oauthConfig.tokenConfig.authroizeTokenMaxAgeSeconds | default(500) }}" controllerLeaseTTL: "{{ (g_master_config_output.content|b64decode|from_yaml).controllerLeaseTTL | default(30) }}" + - name: Re-introduce leases (as a replacement for key TTLs) command: > oadm migrate etcd-ttl \ diff --git a/roles/etcd_migrate/tasks/check.yml b/roles/etcd/tasks/migration/check.yml index 0804d9e1c..0804d9e1c 100644 --- a/roles/etcd_migrate/tasks/check.yml +++ b/roles/etcd/tasks/migration/check.yml diff --git a/roles/etcd_migrate/tasks/check_cluster_health.yml b/roles/etcd/tasks/migration/check_cluster_health.yml index 201d83f99..201d83f99 100644 --- a/roles/etcd_migrate/tasks/check_cluster_health.yml +++ b/roles/etcd/tasks/migration/check_cluster_health.yml diff --git a/roles/etcd_migrate/tasks/check_cluster_status.yml b/roles/etcd/tasks/migration/check_cluster_status.yml index b69fb5a52..b69fb5a52 100644 --- a/roles/etcd_migrate/tasks/check_cluster_status.yml +++ b/roles/etcd/tasks/migration/check_cluster_status.yml diff --git a/roles/etcd_migrate/tasks/configure.yml b/roles/etcd/tasks/migration/configure_master.yml index a305d5bf3..a305d5bf3 100644 --- a/roles/etcd_migrate/tasks/configure.yml +++ b/roles/etcd/tasks/migration/configure_master.yml diff --git a/roles/etcd_migrate/tasks/migrate.yml b/roles/etcd/tasks/migration/migrate.yml index 54a9c74ff..54a9c74ff 100644 --- a/roles/etcd_migrate/tasks/migrate.yml +++ b/roles/etcd/tasks/migration/migrate.yml diff --git a/roles/etcd/tasks/server_certificates.yml b/roles/etcd/tasks/server_certificates.yml new file mode 100644 index 000000000..f0ba58b6e --- /dev/null +++ b/roles/etcd/tasks/server_certificates.yml @@ -0,0 +1,2 @@ +--- +- include: server_certificates/fetch_from_ca.yml diff --git a/roles/etcd_server_certificates/tasks/main.yml b/roles/etcd/tasks/server_certificates/fetch_from_ca.yml index 4795188a6..064fe1952 100644 --- a/roles/etcd_server_certificates/tasks/main.yml +++ b/roles/etcd/tasks/server_certificates/fetch_from_ca.yml @@ -1,6 +1,12 @@ --- +- include: ../ca/deploy.yml + when: + - etcd_ca_setup | default(True) | bool + - name: Install etcd - package: name=etcd{{ '-' + etcd_version if etcd_version is defined else '' }} state=present + package: + name: "etcd{{ '-' + etcd_version if etcd_version is defined else '' }}" + state: present when: not etcd_is_containerized | bool - name: Check status of etcd certificates diff --git a/roles/etcd_ca/templates/openssl_append.j2 b/roles/etcd/templates/openssl_append.j2 index f28316fc2..f28316fc2 100644 --- a/roles/etcd_ca/templates/openssl_append.j2 +++ b/roles/etcd/templates/openssl_append.j2 diff --git a/roles/etcd_ca/README.md b/roles/etcd_ca/README.md deleted file mode 100644 index 60a880e30..000000000 --- a/roles/etcd_ca/README.md +++ /dev/null @@ -1,34 +0,0 @@ -etcd_ca -======================== - -TODO - -Requirements ------------- - -TODO - -Role Variables --------------- - -TODO - -Dependencies ------------- - -TODO - -Example Playbook ----------------- - -TODO - -License -------- - -Apache License Version 2.0 - -Author Information ------------------- - -Scott Dodson (sdodson@redhat.com) diff --git a/roles/etcd_client_certificates/README.md b/roles/etcd_client_certificates/README.md deleted file mode 100644 index 269d5296d..000000000 --- a/roles/etcd_client_certificates/README.md +++ /dev/null @@ -1,34 +0,0 @@ -OpenShift Etcd Certificates -=========================== - -TODO - -Requirements ------------- - -TODO - -Role Variables --------------- - -TODO - -Dependencies ------------- - -TODO - -Example Playbook ----------------- - -TODO - -License -------- - -Apache License Version 2.0 - -Author Information ------------------- - -Scott Dodson (sdodson@redhat.com) diff --git a/roles/etcd_client_certificates/meta/main.yml b/roles/etcd_client_certificates/meta/main.yml deleted file mode 100644 index efebdb599..000000000 --- a/roles/etcd_client_certificates/meta/main.yml +++ /dev/null @@ -1,16 +0,0 @@ ---- -galaxy_info: - author: Jason DeTiberus - description: Etcd Client Certificates - company: Red Hat, Inc. - license: Apache License, Version 2.0 - min_ansible_version: 2.1 - platforms: - - name: EL - versions: - - 7 - categories: - - cloud - - system -dependencies: -- role: etcd_common diff --git a/roles/etcd_migrate/README.md b/roles/etcd_migrate/README.md deleted file mode 100644 index 369e78ff2..000000000 --- a/roles/etcd_migrate/README.md +++ /dev/null @@ -1,53 +0,0 @@ -Role Name -========= - -Offline etcd migration of data from v2 to v3 - -Requirements ------------- - -It is expected all consumers of the etcd data are not accessing the data. -Otherwise the migrated data can be out-of-sync with the v2 and can result in unhealthy etcd cluster. - -The role itself is responsible for: -- checking etcd cluster health and raft status before the migration -- checking of presence of any v3 data (in that case the migration is stopped) -- migration of v2 data to v3 data (including attaching leases of keys prefixed with "/kubernetes.io/events" and "/kubernetes.io/masterleases" string) -- validation of migrated data (all v2 keys and in v3 keys and are set to the identical value) - -The migration itself requires an etcd member to be down in the process. Once the migration is done, the etcd member is started. - -Role Variables --------------- - -TBD - -Dependencies ------------- - -- etcd_common -- lib_utils - -Example Playbook ----------------- - -```yaml -- name: Migrate etcd data from v2 to v3 - hosts: oo_etcd_to_config - gather_facts: no - tasks: - - include_role: - name: openshift_etcd_migrate - vars: - etcd_peer: "{{ ansible_default_ipv4.address }}" -``` - -License -------- - -Apache License, Version 2.0 - -Author Information ------------------- - -Jan Chaloupka (jchaloup@redhat.com) diff --git a/roles/etcd_migrate/defaults/main.yml b/roles/etcd_migrate/defaults/main.yml deleted file mode 100644 index 05cf41fbb..000000000 --- a/roles/etcd_migrate/defaults/main.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# Default action when calling this role, choices: check, migrate, configure -r_etcd_migrate_action: migrate diff --git a/roles/etcd_migrate/meta/main.yml b/roles/etcd_migrate/meta/main.yml deleted file mode 100644 index f3cabbef6..000000000 --- a/roles/etcd_migrate/meta/main.yml +++ /dev/null @@ -1,17 +0,0 @@ ---- -galaxy_info: - author: Jan Chaloupka - description: Etcd migration - company: Red Hat, Inc. - license: Apache License, Version 2.0 - min_ansible_version: 2.1 - platforms: - - name: EL - versions: - - 7 - categories: - - cloud - - system -dependencies: -- { role: etcd_common } -- { role: lib_utils } diff --git a/roles/etcd_migrate/tasks/main.yml b/roles/etcd_migrate/tasks/main.yml deleted file mode 100644 index e82f6a6b4..000000000 --- a/roles/etcd_migrate/tasks/main.yml +++ /dev/null @@ -1,25 +0,0 @@ ---- -- name: Fail if invalid r_etcd_migrate_action provided - fail: - msg: "etcd_migrate role can only be called with 'check', 'migrate', 'configure', 'add_ttls', or 'clean_data'" - when: r_etcd_migrate_action not in ['check', 'migrate', 'configure', 'add_ttls', 'clean_data'] - -- name: Include main action task file - include: "{{ r_etcd_migrate_action }}.yml" - -# 2. migrate v2 datadir into v3: -# ETCDCTL_API=3 ./etcdctl migrate --data-dir=${data_dir} --no-ttl -# backup the etcd datadir first -# Provide a way for an operator to specify transformer - -# 3. re-configure OpenShift master at /etc/origin/master/master-config.yml -# set storage-backend to “etcd3” -# 4. we could leave the master restart to current logic (there is already the code ready (single vs. HA master)) - -# Run -# etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt --endpoint https://172.16.186.45:2379 cluster-health -# to check the cluster health (from the etcdctl.sh aliases file) - -# Another assumption: -# - in order to migrate all etcd v2 data into v3, we need to shut down the cluster (let's verify that on Wednesday meeting) -# - diff --git a/roles/etcd_server_certificates/README.md b/roles/etcd_server_certificates/README.md deleted file mode 100644 index 269d5296d..000000000 --- a/roles/etcd_server_certificates/README.md +++ /dev/null @@ -1,34 +0,0 @@ -OpenShift Etcd Certificates -=========================== - -TODO - -Requirements ------------- - -TODO - -Role Variables --------------- - -TODO - -Dependencies ------------- - -TODO - -Example Playbook ----------------- - -TODO - -License -------- - -Apache License Version 2.0 - -Author Information ------------------- - -Scott Dodson (sdodson@redhat.com) diff --git a/roles/etcd_server_certificates/meta/main.yml b/roles/etcd_server_certificates/meta/main.yml deleted file mode 100644 index 4b6013a49..000000000 --- a/roles/etcd_server_certificates/meta/main.yml +++ /dev/null @@ -1,17 +0,0 @@ ---- -galaxy_info: - author: Jason DeTiberus - description: Etcd Server Certificates - company: Red Hat, Inc. - license: Apache License, Version 2.0 - min_ansible_version: 2.1 - platforms: - - name: EL - versions: - - 7 - categories: - - cloud - - system -dependencies: -- role: etcd_ca - when: (etcd_ca_setup | default(True) | bool) diff --git a/roles/flannel/README.md b/roles/flannel/README.md index 0c7347603..b9e15e6e0 100644 --- a/roles/flannel/README.md +++ b/roles/flannel/README.md @@ -27,8 +27,6 @@ Role Variables Dependencies ------------ -openshift_facts - Example Playbook ---------------- diff --git a/roles/flannel/meta/main.yml b/roles/flannel/meta/main.yml index 35f825586..51128dba6 100644 --- a/roles/flannel/meta/main.yml +++ b/roles/flannel/meta/main.yml @@ -12,7 +12,4 @@ galaxy_info: categories: - cloud - system -dependencies: -- role: openshift_facts -- role: openshift_etcd_client_certificates - etcd_cert_prefix: flannel.etcd- +dependencies: [] diff --git a/roles/flannel_register/defaults/main.yaml b/roles/flannel_register/defaults/main.yaml index 71c8f38c3..1d0f5df6a 100644 --- a/roles/flannel_register/defaults/main.yaml +++ b/roles/flannel_register/defaults/main.yaml @@ -1,6 +1,6 @@ --- flannel_network: "{{ openshift.master.sdn_cluster_network_cidr }}" -flannel_subnet_len: "{{ 32 - openshift.master.sdn_host_subnet_length }}" +flannel_subnet_len: "{{ 32 - (openshift.master.sdn_host_subnet_length | int) }}" flannel_etcd_key: /openshift.com/network etcd_hosts: "{{ etcd_urls }}" etcd_conf_dir: "{{ openshift.common.config_base }}/master" diff --git a/roles/lib_openshift/library/oc_adm_ca_server_cert.py b/roles/lib_openshift/library/oc_adm_ca_server_cert.py index 45d7444a4..1e6eb2386 100644 --- a/roles/lib_openshift/library/oc_adm_ca_server_cert.py +++ b/roles/lib_openshift/library/oc_adm_ca_server_cert.py @@ -745,7 +745,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_openshift/library/oc_adm_csr.py b/roles/lib_openshift/library/oc_adm_csr.py index 231857cca..8c6a81cc8 100644 --- a/roles/lib_openshift/library/oc_adm_csr.py +++ b/roles/lib_openshift/library/oc_adm_csr.py @@ -723,7 +723,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_openshift/library/oc_adm_manage_node.py b/roles/lib_openshift/library/oc_adm_manage_node.py index 44f3f57d8..4a7847e88 100644 --- a/roles/lib_openshift/library/oc_adm_manage_node.py +++ b/roles/lib_openshift/library/oc_adm_manage_node.py @@ -731,7 +731,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_openshift/library/oc_adm_policy_group.py b/roles/lib_openshift/library/oc_adm_policy_group.py index 687cff579..b8af5cad9 100644 --- a/roles/lib_openshift/library/oc_adm_policy_group.py +++ b/roles/lib_openshift/library/oc_adm_policy_group.py @@ -717,7 +717,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_openshift/library/oc_adm_policy_user.py b/roles/lib_openshift/library/oc_adm_policy_user.py index ddf5d90b7..3364f8de3 100644 --- a/roles/lib_openshift/library/oc_adm_policy_user.py +++ b/roles/lib_openshift/library/oc_adm_policy_user.py @@ -717,7 +717,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_openshift/library/oc_adm_registry.py b/roles/lib_openshift/library/oc_adm_registry.py index c00eee381..c64d7ffd2 100644 --- a/roles/lib_openshift/library/oc_adm_registry.py +++ b/roles/lib_openshift/library/oc_adm_registry.py @@ -835,7 +835,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_openshift/library/oc_adm_router.py b/roles/lib_openshift/library/oc_adm_router.py index 0c925ab0b..492494bda 100644 --- a/roles/lib_openshift/library/oc_adm_router.py +++ b/roles/lib_openshift/library/oc_adm_router.py @@ -860,7 +860,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_openshift/library/oc_clusterrole.py b/roles/lib_openshift/library/oc_clusterrole.py index 567ecfd4e..b412ca8af 100644 --- a/roles/lib_openshift/library/oc_clusterrole.py +++ b/roles/lib_openshift/library/oc_clusterrole.py @@ -709,7 +709,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_openshift/library/oc_configmap.py b/roles/lib_openshift/library/oc_configmap.py index 9515de569..8bbc22c49 100644 --- a/roles/lib_openshift/library/oc_configmap.py +++ b/roles/lib_openshift/library/oc_configmap.py @@ -715,7 +715,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_openshift/library/oc_edit.py b/roles/lib_openshift/library/oc_edit.py index d461e5ae9..ad17051cb 100644 --- a/roles/lib_openshift/library/oc_edit.py +++ b/roles/lib_openshift/library/oc_edit.py @@ -759,7 +759,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_openshift/library/oc_env.py b/roles/lib_openshift/library/oc_env.py index 22ad58725..74a84ac89 100644 --- a/roles/lib_openshift/library/oc_env.py +++ b/roles/lib_openshift/library/oc_env.py @@ -726,7 +726,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_openshift/library/oc_group.py b/roles/lib_openshift/library/oc_group.py index b6c6e47d9..eea1516ae 100644 --- a/roles/lib_openshift/library/oc_group.py +++ b/roles/lib_openshift/library/oc_group.py @@ -699,7 +699,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_openshift/library/oc_image.py b/roles/lib_openshift/library/oc_image.py index f7fc286e0..dc33d3b8a 100644 --- a/roles/lib_openshift/library/oc_image.py +++ b/roles/lib_openshift/library/oc_image.py @@ -718,7 +718,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_openshift/library/oc_label.py b/roles/lib_openshift/library/oc_label.py index 2206878a4..88fd9554d 100644 --- a/roles/lib_openshift/library/oc_label.py +++ b/roles/lib_openshift/library/oc_label.py @@ -735,7 +735,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_openshift/library/oc_obj.py b/roles/lib_openshift/library/oc_obj.py index 126d7a617..8408f9ebc 100644 --- a/roles/lib_openshift/library/oc_obj.py +++ b/roles/lib_openshift/library/oc_obj.py @@ -738,7 +738,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_openshift/library/oc_objectvalidator.py b/roles/lib_openshift/library/oc_objectvalidator.py index d20904d0d..d1be0b534 100644 --- a/roles/lib_openshift/library/oc_objectvalidator.py +++ b/roles/lib_openshift/library/oc_objectvalidator.py @@ -670,7 +670,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_openshift/library/oc_process.py b/roles/lib_openshift/library/oc_process.py index 91199d093..9a281e6cd 100644 --- a/roles/lib_openshift/library/oc_process.py +++ b/roles/lib_openshift/library/oc_process.py @@ -727,7 +727,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_openshift/library/oc_project.py b/roles/lib_openshift/library/oc_project.py index f9b2d81fa..b503c330b 100644 --- a/roles/lib_openshift/library/oc_project.py +++ b/roles/lib_openshift/library/oc_project.py @@ -724,7 +724,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_openshift/library/oc_pvc.py b/roles/lib_openshift/library/oc_pvc.py index 895322ba5..7a9e3bf89 100644 --- a/roles/lib_openshift/library/oc_pvc.py +++ b/roles/lib_openshift/library/oc_pvc.py @@ -731,7 +731,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_openshift/library/oc_route.py b/roles/lib_openshift/library/oc_route.py index 8f8e46e1e..875e473ad 100644 --- a/roles/lib_openshift/library/oc_route.py +++ b/roles/lib_openshift/library/oc_route.py @@ -769,7 +769,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_openshift/library/oc_scale.py b/roles/lib_openshift/library/oc_scale.py index 7130cc5fc..ec3635753 100644 --- a/roles/lib_openshift/library/oc_scale.py +++ b/roles/lib_openshift/library/oc_scale.py @@ -713,7 +713,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_openshift/library/oc_secret.py b/roles/lib_openshift/library/oc_secret.py index 0c4b99e30..c010607e8 100644 --- a/roles/lib_openshift/library/oc_secret.py +++ b/roles/lib_openshift/library/oc_secret.py @@ -765,7 +765,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_openshift/library/oc_service.py b/roles/lib_openshift/library/oc_service.py index 7ab139e85..e83a6e26d 100644 --- a/roles/lib_openshift/library/oc_service.py +++ b/roles/lib_openshift/library/oc_service.py @@ -772,7 +772,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_openshift/library/oc_serviceaccount.py b/roles/lib_openshift/library/oc_serviceaccount.py index 5d539ced4..0d46bbf96 100644 --- a/roles/lib_openshift/library/oc_serviceaccount.py +++ b/roles/lib_openshift/library/oc_serviceaccount.py @@ -711,7 +711,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_openshift/library/oc_serviceaccount_secret.py b/roles/lib_openshift/library/oc_serviceaccount_secret.py index 97e213f46..662d77ec1 100644 --- a/roles/lib_openshift/library/oc_serviceaccount_secret.py +++ b/roles/lib_openshift/library/oc_serviceaccount_secret.py @@ -711,7 +711,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_openshift/library/oc_storageclass.py b/roles/lib_openshift/library/oc_storageclass.py index 9339a85e5..574f109e4 100644 --- a/roles/lib_openshift/library/oc_storageclass.py +++ b/roles/lib_openshift/library/oc_storageclass.py @@ -729,7 +729,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_openshift/library/oc_user.py b/roles/lib_openshift/library/oc_user.py index 2fa349547..e430546ee 100644 --- a/roles/lib_openshift/library/oc_user.py +++ b/roles/lib_openshift/library/oc_user.py @@ -771,7 +771,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_openshift/library/oc_version.py b/roles/lib_openshift/library/oc_version.py index 55e1054e7..a12620968 100644 --- a/roles/lib_openshift/library/oc_version.py +++ b/roles/lib_openshift/library/oc_version.py @@ -683,7 +683,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_openshift/library/oc_volume.py b/roles/lib_openshift/library/oc_volume.py index 63bad57b4..134b2ad19 100644 --- a/roles/lib_openshift/library/oc_volume.py +++ b/roles/lib_openshift/library/oc_volume.py @@ -760,7 +760,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_utils/library/yedit.py b/roles/lib_utils/library/yedit.py index 921bca074..cf5c2e423 100644 --- a/roles/lib_utils/library/yedit.py +++ b/roles/lib_utils/library/yedit.py @@ -793,7 +793,7 @@ class Yedit(object): yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_utils/src/class/yedit.py b/roles/lib_utils/src/class/yedit.py index 957c35a06..0a4fbe07a 100644 --- a/roles/lib_utils/src/class/yedit.py +++ b/roles/lib_utils/src/class/yedit.py @@ -590,7 +590,7 @@ class Yedit(object): yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/nuage_master/meta/main.yml b/roles/nuage_master/meta/main.yml index 3da340c85..e2f7af5ad 100644 --- a/roles/nuage_master/meta/main.yml +++ b/roles/nuage_master/meta/main.yml @@ -13,8 +13,5 @@ galaxy_info: - cloud - system dependencies: -- role: nuage_ca -- role: nuage_common -- role: openshift_etcd_client_certificates - role: lib_openshift - role: lib_os_firewall diff --git a/roles/openshift_etcd_ca/meta/main.yml b/roles/openshift_etcd_ca/meta/main.yml deleted file mode 100644 index f1d669d6b..000000000 --- a/roles/openshift_etcd_ca/meta/main.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- -galaxy_info: - author: Tim Bielawa - description: Meta role around the etcd_ca role - company: Red Hat, Inc. - license: Apache License, Version 2.0 - min_ansible_version: 2.2 - platforms: - - name: EL - versions: - - 7 - categories: - - cloud - - system -dependencies: -- role: openshift_etcd_facts -- role: etcd_ca - when: (etcd_ca_setup | default(True) | bool) diff --git a/roles/openshift_etcd_client_certificates/meta/main.yml b/roles/openshift_etcd_client_certificates/meta/main.yml index 3268c390f..fbc72c8a3 100644 --- a/roles/openshift_etcd_client_certificates/meta/main.yml +++ b/roles/openshift_etcd_client_certificates/meta/main.yml @@ -11,6 +11,4 @@ galaxy_info: - 7 categories: - cloud -dependencies: -- role: openshift_etcd_facts -- role: etcd_client_certificates +dependencies: [] diff --git a/roles/openshift_etcd_client_certificates/tasks/main.yml b/roles/openshift_etcd_client_certificates/tasks/main.yml new file mode 100644 index 000000000..7f8b667f0 --- /dev/null +++ b/roles/openshift_etcd_client_certificates/tasks/main.yml @@ -0,0 +1,4 @@ +--- +- include_role: + name: etcd + tasks_from: client_certificates diff --git a/roles/openshift_etcd_server_certificates/meta/main.yml b/roles/openshift_etcd_server_certificates/meta/main.yml deleted file mode 100644 index 7750f14af..000000000 --- a/roles/openshift_etcd_server_certificates/meta/main.yml +++ /dev/null @@ -1,16 +0,0 @@ ---- -galaxy_info: - author: Jason DeTiberus - description: OpenShift Etcd Server Certificates - company: Red Hat, Inc. - license: Apache License, Version 2.0 - min_ansible_version: 2.1 - platforms: - - name: EL - versions: - - 7 - categories: - - cloud -dependencies: -- role: openshift_etcd_facts -- role: etcd_server_certificates diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index a76751e81..1c2c91a5a 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -493,10 +493,10 @@ def set_selectors(facts): facts['hosted']['metrics'] = {} if 'selector' not in facts['hosted']['metrics'] or facts['hosted']['metrics']['selector'] in [None, 'None']: facts['hosted']['metrics']['selector'] = None - if 'logging' not in facts['hosted']: - facts['hosted']['logging'] = {} - if 'selector' not in facts['hosted']['logging'] or facts['hosted']['logging']['selector'] in [None, 'None']: - facts['hosted']['logging']['selector'] = None + if 'logging' not in facts: + facts['logging'] = {} + if 'selector' not in facts['logging'] or facts['logging']['selector'] in [None, 'None']: + facts['logging']['selector'] = None if 'etcd' not in facts['hosted']: facts['hosted']['etcd'] = {} if 'selector' not in facts['hosted']['etcd'] or facts['hosted']['etcd']['selector'] in [None, 'None']: @@ -1785,7 +1785,10 @@ class OpenShiftFacts(object): 'etcd', 'hosted', 'master', - 'node'] + 'node', + 'logging', + 'loggingops', + 'metrics'] # Disabling too-many-arguments, this should be cleaned up as a TODO item. # pylint: disable=too-many-arguments,no-value-for-parameter @@ -1902,7 +1905,7 @@ class OpenShiftFacts(object): hostname_f = output.strip() if exit_code == 0 else '' hostname_values = [hostname_f, self.system_facts['ansible_nodename'], self.system_facts['ansible_fqdn']] - hostname = choose_hostname(hostname_values, ip_addr) + hostname = choose_hostname(hostname_values, ip_addr).lower() defaults['common'] = dict(ip=ip_addr, public_ip=ip_addr, @@ -1966,66 +1969,6 @@ class OpenShiftFacts(object): if 'hosted' in roles or self.role == 'hosted': defaults['hosted'] = dict( - metrics=dict( - deploy=False, - duration=7, - resolution='10s', - storage=dict( - kind=None, - volume=dict( - name='metrics', - size='10Gi' - ), - nfs=dict( - directory='/exports', - options='*(rw,root_squash)' - ), - host=None, - access=dict( - modes=['ReadWriteOnce'] - ), - create_pv=True, - create_pvc=False - ) - ), - loggingops=dict( - storage=dict( - kind=None, - volume=dict( - name='logging-es-ops', - size='10Gi' - ), - nfs=dict( - directory='/exports', - options='*(rw,root_squash)' - ), - host=None, - access=dict( - modes=['ReadWriteOnce'] - ), - create_pv=True, - create_pvc=False - ) - ), - logging=dict( - storage=dict( - kind=None, - volume=dict( - name='logging-es', - size='10Gi' - ), - nfs=dict( - directory='/exports', - options='*(rw,root_squash)' - ), - host=None, - access=dict( - modes=['ReadWriteOnce'] - ), - create_pv=True, - create_pvc=False - ) - ), etcd=dict( storage=dict( kind=None, @@ -2072,6 +2015,69 @@ class OpenShiftFacts(object): router=dict() ) + defaults['logging'] = dict( + storage=dict( + kind=None, + volume=dict( + name='logging-es', + size='10Gi' + ), + nfs=dict( + directory='/exports', + options='*(rw,root_squash)' + ), + host=None, + access=dict( + modes=['ReadWriteOnce'] + ), + create_pv=True, + create_pvc=False + ) + ) + + defaults['loggingops'] = dict( + storage=dict( + kind=None, + volume=dict( + name='logging-es-ops', + size='10Gi' + ), + nfs=dict( + directory='/exports', + options='*(rw,root_squash)' + ), + host=None, + access=dict( + modes=['ReadWriteOnce'] + ), + create_pv=True, + create_pvc=False + ) + ) + + defaults['metrics'] = dict( + deploy=False, + duration=7, + resolution='10s', + storage=dict( + kind=None, + volume=dict( + name='metrics', + size='10Gi' + ), + nfs=dict( + directory='/exports', + options='*(rw,root_squash)' + ), + host=None, + access=dict( + modes=['ReadWriteOnce'] + ), + create_pv=True, + create_pvc=False + ) + ) + return defaults def guess_host_provider(self): diff --git a/roles/openshift_health_checker/library/ocutil.py b/roles/openshift_health_checker/library/ocutil.py index 2e60735d6..c72f4c5b3 100644 --- a/roles/openshift_health_checker/library/ocutil.py +++ b/roles/openshift_health_checker/library/ocutil.py @@ -40,18 +40,17 @@ def main(): module = AnsibleModule( argument_spec=dict( - namespace=dict(type="str", required=True), + namespace=dict(type="str", required=False), config_file=dict(type="str", required=True), cmd=dict(type="str", required=True), extra_args=dict(type="list", default=[]), ), ) - cmd = [ - locate_oc_binary(), - '--config', module.params["config_file"], - '-n', module.params["namespace"], - ] + shlex.split(module.params["cmd"]) + cmd = [locate_oc_binary(), '--config', module.params["config_file"]] + if module.params["namespace"]: + cmd += ['-n', module.params["namespace"]] + cmd += shlex.split(module.params["cmd"]) + module.params["extra_args"] failed = True try: diff --git a/roles/openshift_health_checker/openshift_checks/__init__.py b/roles/openshift_health_checker/openshift_checks/__init__.py index 28cb53cc5..ce05b44a4 100644 --- a/roles/openshift_health_checker/openshift_checks/__init__.py +++ b/roles/openshift_health_checker/openshift_checks/__init__.py @@ -13,6 +13,7 @@ from importlib import import_module from ansible.module_utils import six from ansible.module_utils.six.moves import reduce # pylint: disable=import-error,redefined-builtin +from ansible.module_utils.six import string_types from ansible.plugins.filter.core import to_bool as ansible_to_bool @@ -110,6 +111,11 @@ class OpenShiftCheck(object): """Returns true if this check applies to the ansible-playbook run.""" return True + def is_first_master(self): + """Determine if running on first master. Returns: bool""" + masters = self.get_var("groups", "oo_first_master", default=None) or [None] + return masters[0] == self.get_var("ansible_host") + @abstractmethod def run(self): """Executes a check against a host and returns a result hash similar to Ansible modules. @@ -283,6 +289,17 @@ class OpenShiftCheck(object): )) @staticmethod + def normalize(name_list): + """Return a clean list of names. + + The input may be a comma-separated string or a sequence. Leading and + trailing whitespace characters are removed. Empty items are discarded. + """ + if isinstance(name_list, string_types): + name_list = name_list.split(',') + return [name.strip() for name in name_list if name.strip()] + + @staticmethod def get_major_minor_version(openshift_image_tag): """Parse and return the deployed version of OpenShift as a tuple.""" if openshift_image_tag and openshift_image_tag[0] == 'v': diff --git a/roles/openshift_health_checker/openshift_checks/diagnostics.py b/roles/openshift_health_checker/openshift_checks/diagnostics.py new file mode 100644 index 000000000..1cfdc1129 --- /dev/null +++ b/roles/openshift_health_checker/openshift_checks/diagnostics.py @@ -0,0 +1,62 @@ +""" +A check to run relevant diagnostics via `oc adm diagnostics`. +""" + +import os + +from openshift_checks import OpenShiftCheck, OpenShiftCheckException + + +DIAGNOSTIC_LIST = ( + "AggregatedLogging ClusterRegistry ClusterRoleBindings ClusterRoles " + "ClusterRouter DiagnosticPod NetworkCheck" +).split() + + +class DiagnosticCheck(OpenShiftCheck): + """A check to run relevant diagnostics via `oc adm diagnostics`.""" + + name = "diagnostics" + tags = ["health"] + + def is_active(self): + return super(DiagnosticCheck, self).is_active() and self.is_first_master() + + def run(self): + if self.exec_diagnostic("ConfigContexts"): + # only run the other diagnostics if that one succeeds (otherwise, all will fail) + diagnostics = self.get_var("openshift_check_diagnostics", default=DIAGNOSTIC_LIST) + for diagnostic in self.normalize(diagnostics): + self.exec_diagnostic(diagnostic) + return {} + + def exec_diagnostic(self, diagnostic): + """ + Execute an 'oc adm diagnostics' command on the remote host. + Raises OcNotFound or registers OcDiagFailed. + Returns True on success or False on failure (non-zero rc). + """ + config_base = self.get_var("openshift.common.config_base") + args = { + "config_file": os.path.join(config_base, "master", "admin.kubeconfig"), + "cmd": "adm diagnostics", + "extra_args": [diagnostic], + } + + result = self.execute_module("ocutil", args, save_as_name=diagnostic + ".failure.json") + self.register_file(diagnostic + ".txt", result['result']) + if result.get("failed"): + if result['result'] == '[Errno 2] No such file or directory': + raise OpenShiftCheckException( + "OcNotFound", + "This host is supposed to be a master but does not have the `oc` command where expected.\n" + "Has an installation been run on this host yet?" + ) + + self.register_failure(OpenShiftCheckException( + 'OcDiagFailed', + 'The {diag} diagnostic reported an error:\n' + '{error}'.format(diag=diagnostic, error=result['result']) + )) + return False + return True diff --git a/roles/openshift_health_checker/openshift_checks/etcd_volume.py b/roles/openshift_health_checker/openshift_checks/etcd_volume.py index e5d93ff3f..79955cb2f 100644 --- a/roles/openshift_health_checker/openshift_checks/etcd_volume.py +++ b/roles/openshift_health_checker/openshift_checks/etcd_volume.py @@ -16,7 +16,7 @@ class EtcdVolume(OpenShiftCheck): def is_active(self): etcd_hosts = self.get_var("groups", "etcd", default=[]) or self.get_var("groups", "masters", default=[]) or [] - is_etcd_host = self.get_var("ansible_ssh_host") in etcd_hosts + is_etcd_host = self.get_var("ansible_host") in etcd_hosts return super(EtcdVolume, self).is_active() and is_etcd_host def run(self): diff --git a/roles/openshift_health_checker/openshift_checks/logging/logging.py b/roles/openshift_health_checker/openshift_checks/logging/logging.py index 06bdfebf6..05ba73ca1 100644 --- a/roles/openshift_health_checker/openshift_checks/logging/logging.py +++ b/roles/openshift_health_checker/openshift_checks/logging/logging.py @@ -30,14 +30,6 @@ class LoggingCheck(OpenShiftCheck): logging_deployed = self.get_var("openshift_hosted_logging_deploy", convert=bool, default=False) return logging_deployed and super(LoggingCheck, self).is_active() and self.is_first_master() - def is_first_master(self): - """Determine if running on first master. Returns: bool""" - # Note: It would be nice to use membership in oo_first_master group, however for now it - # seems best to avoid requiring that setup and just check this is the first master. - hostname = self.get_var("ansible_ssh_host") or [None] - masters = self.get_var("groups", "masters", default=None) or [None] - return masters[0] == hostname - def run(self): return {} diff --git a/roles/openshift_health_checker/test/diagnostics_test.py b/roles/openshift_health_checker/test/diagnostics_test.py new file mode 100644 index 000000000..800889fa7 --- /dev/null +++ b/roles/openshift_health_checker/test/diagnostics_test.py @@ -0,0 +1,50 @@ +import pytest + +from openshift_checks.diagnostics import DiagnosticCheck, OpenShiftCheckException + + +@pytest.fixture() +def task_vars(): + return dict( + openshift=dict( + common=dict(config_base="/etc/origin/") + ) + ) + + +def test_module_succeeds(task_vars): + check = DiagnosticCheck(lambda *_: {"result": "success"}, task_vars) + check.is_first_master = lambda: True + assert check.is_active() + check.exec_diagnostic("spam") + assert not check.failures + + +def test_oc_not_there(task_vars): + def exec_module(*_): + return {"failed": True, "result": "[Errno 2] No such file or directory"} + + check = DiagnosticCheck(exec_module, task_vars) + with pytest.raises(OpenShiftCheckException) as excinfo: + check.exec_diagnostic("spam") + assert excinfo.value.name == "OcNotFound" + + +def test_module_fails(task_vars): + def exec_module(*_): + return {"failed": True, "result": "something broke"} + + check = DiagnosticCheck(exec_module, task_vars) + check.exec_diagnostic("spam") + assert check.failures and check.failures[0].name == "OcDiagFailed" + + +def test_names_executed(task_vars): + task_vars["openshift_check_diagnostics"] = diagnostics = "ConfigContexts,spam,,eggs" + + def exec_module(module, args, *_): + assert "extra_args" in args + assert args["extra_args"][0] in diagnostics + return {"result": "success"} + + DiagnosticCheck(exec_module, task_vars).run() diff --git a/roles/openshift_health_checker/test/logging_check_test.py b/roles/openshift_health_checker/test/logging_check_test.py index 1a1c190f6..59c703214 100644 --- a/roles/openshift_health_checker/test/logging_check_test.py +++ b/roles/openshift_health_checker/test/logging_check_test.py @@ -98,21 +98,19 @@ def test_oc_failure(problem, expect): assert expect in str(excinfo) -groups_with_first_master = dict(masters=['this-host', 'other-host']) -groups_with_second_master = dict(masters=['other-host', 'this-host']) -groups_not_a_master = dict(masters=['other-host']) +groups_with_first_master = dict(oo_first_master=['this-host']) +groups_not_a_master = dict(oo_first_master=['other-host'], oo_masters=['other-host']) @pytest.mark.parametrize('groups, logging_deployed, is_active', [ (groups_with_first_master, True, True), (groups_with_first_master, False, False), (groups_not_a_master, True, False), - (groups_with_second_master, True, False), (groups_not_a_master, True, False), ]) def test_is_active(groups, logging_deployed, is_active): task_vars = dict( - ansible_ssh_host='this-host', + ansible_host='this-host', groups=groups, openshift_hosted_logging_deploy=logging_deployed, ) diff --git a/roles/openshift_hosted_facts/tasks/main.yml b/roles/openshift_hosted_facts/tasks/main.yml index 631bf3e2a..53d1a8bc7 100644 --- a/roles/openshift_hosted_facts/tasks/main.yml +++ b/roles/openshift_hosted_facts/tasks/main.yml @@ -8,9 +8,10 @@ - name: Set hosted facts openshift_facts: - role: hosted + role: "{{ item }}" openshift_env: "{{ hostvars | oo_merge_hostvars(vars, inventory_hostname) | oo_openshift_env }}" openshift_env_structures: - 'openshift.hosted.router.*' + with_items: [hosted, logging, loggingops, metrics] diff --git a/roles/openshift_hosted_logging/README.md b/roles/openshift_hosted_logging/README.md deleted file mode 100644 index 680303853..000000000 --- a/roles/openshift_hosted_logging/README.md +++ /dev/null @@ -1,40 +0,0 @@ -###Required vars: - -- openshift_hosted_logging_hostname: kibana.example.com -- openshift_hosted_logging_elasticsearch_cluster_size: 1 -- openshift_hosted_logging_master_public_url: https://localhost:8443 - -###Optional vars: -- openshift_hosted_logging_image_prefix: logging image prefix. No default. Use this to specify an alternate image repository e.g. my.private.repo:5000/private_openshift/ -- target_registry: DEPRECATED - use openshift_hosted_logging_image_prefix instead -- openshift_hosted_logging_image_version: logging image version suffix. Defaults to the current version of the deployed software. -- openshift_hosted_logging_secret_vars: (defaults to nothing=/dev/null) kibana.crt=/etc/origin/master/ca.crt kibana.key=/etc/origin/master/ca.key ca.crt=/etc/origin/master/ca.crt ca.key=/etc/origin/master/ca.key -- openshift_hosted_logging_fluentd_replicas: (defaults to 1) 3 -- openshift_hosted_logging_cleanup: (defaults to no) Set this to 'yes' in order to cleanup logging components instead of deploying. -- openshift_hosted_logging_elasticsearch_instance_ram: Amount of RAM to reserve per ElasticSearch instance (e.g. 1024M, 2G). Defaults to 8GiB; must be at least 512M (Ref.: [ElasticSearch documentation](https://www.elastic.co/guide/en/elasticsearch/guide/current/hardware.html\#\_memory). -- openshift_hosted_logging_elasticsearch_pvc_size: Size of the PersistentVolumeClaim to create per ElasticSearch ops instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead. -- openshift_hosted_logging_elasticsearch_pvc_prefix: Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size `openshift_hosted_logging_elasticsearch_pvc_size`. -- openshift_hosted_logging_elasticsearch_pvc_dynamic: Set to `true` to have created PersistentVolumeClaims annotated such that their backing storage can be dynamically provisioned (if that is available for your cluster). -- openshift_hosted_logging_elasticsearch_storage_group: Number of a supplemental group ID for access to Elasticsearch storage volumes; backing volumes should allow access by this group ID (defaults to 65534). -- openshift_hosted_logging_elasticsearch_nodeselector: Specify the nodeSelector that Elasticsearch should be use (label=value) -- openshift_hosted_logging_fluentd_nodeselector: The nodeSelector used to determine which nodes to apply the `openshift_hosted_logging_fluentd_nodeselector_label` label to. -- openshift_hosted_logging_fluentd_nodeselector_label: The label applied to nodes included in the Fluentd DaemonSet. Defaults to "logging-infra-fluentd=true". -- openshift_hosted_logging_kibana_nodeselector: Specify the nodeSelector that Kibana should be use (label=value) -- openshift_hosted_logging_curator_nodeselector: Specify the nodeSelector that Curator should be use (label=value) -- openshift_hosted_logging_enable_ops_cluster: If "true", configure a second ES cluster and Kibana for ops logs. -- openshift_hosted_logging_use_journal: *DEPRECATED - DO NOT USE* -- openshift_hosted_logging_journal_source: By default, if this param is unset or empty, logging will use `/var/log/journal` if it exists, or `/run/log/journal` if not. You can use this param to force logging to use a different location. -- openshift_hosted_logging_journal_read_from_head: Set to `true` to have fluentd read from the beginning of the journal, to get historical log data. Default is `false`. *WARNING* Using `true` may take several minutes or even hours, depending on the size of the journal, until any new records show up in Elasticsearch, and will cause fluentd to consume a lot of CPU and RAM resources. - -When `openshift_hosted_logging_enable_ops_cluster` is `True`, there are some -additional vars. These work the same as above for their non-ops counterparts, -but apply to the OPS cluster instance: -- openshift_hosted_logging_ops_hostname: kibana-ops.example.com -- openshift_hosted_logging_elasticsearch_ops_cluster_size -- openshift_hosted_logging_elasticsearch_ops_instance_ram -- openshift_hosted_logging_elasticsearch_ops_pvc_size -- openshift_hosted_logging_elasticsearch_ops_pvc_prefix -- openshift_hosted_logging_elasticsearch_ops_pvc_dynamic -- openshift_hosted_logging_elasticsearch_ops_nodeselector -- openshift_hosted_logging_kibana_ops_nodeselector -- openshift_hosted_logging_curator_ops_nodeselector diff --git a/roles/openshift_hosted_logging/defaults/main.yml b/roles/openshift_hosted_logging/defaults/main.yml deleted file mode 100644 index a01f24df8..000000000 --- a/roles/openshift_hosted_logging/defaults/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -hosted_base: "{{ openshift.common.config_base if openshift.common.is_containerized | bool else '/usr/share/openshift' }}/hosted" diff --git a/roles/openshift_hosted_logging/handlers/main.yml b/roles/openshift_hosted_logging/handlers/main.yml deleted file mode 100644 index d7e83fe9a..000000000 --- a/roles/openshift_hosted_logging/handlers/main.yml +++ /dev/null @@ -1,21 +0,0 @@ ---- -- name: Verify API Server - # Using curl here since the uri module requires python-httplib2 and - # wait_for port doesn't provide health information. - command: > - curl --silent --tlsv1.2 - {% if openshift.common.version_gte_3_2_or_1_2 | bool %} - --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt - {% else %} - --cacert {{ openshift.common.config_base }}/master/ca.crt - {% endif %} - {{ openshift.master.api_url }}/healthz/ready - args: - # Disables the following warning: - # Consider using get_url or uri module rather than running curl - warn: no - register: api_available_output - until: api_available_output.stdout == 'ok' - retries: 120 - delay: 1 - changed_when: false diff --git a/roles/openshift_hosted_logging/meta/main.yaml b/roles/openshift_hosted_logging/meta/main.yaml deleted file mode 100644 index ab07a77c1..000000000 --- a/roles/openshift_hosted_logging/meta/main.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -dependencies: - - { role: openshift_master_facts } diff --git a/roles/openshift_hosted_logging/tasks/cleanup_logging.yaml b/roles/openshift_hosted_logging/tasks/cleanup_logging.yaml deleted file mode 100644 index 70b0d67a4..000000000 --- a/roles/openshift_hosted_logging/tasks/cleanup_logging.yaml +++ /dev/null @@ -1,59 +0,0 @@ ---- -- name: Create temp directory for kubeconfig - command: mktemp -d /tmp/openshift-ansible-XXXXXX - register: mktemp - changed_when: False - -- name: Copy the admin client config(s) - command: > - cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig - changed_when: False - -- name: "Checking for logging project" - command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get project logging" - register: logging_project - failed_when: "'FAILED' in logging_project.stderr" - -- name: "Changing projects" - command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig project logging" - - -- name: "Cleanup any previous logging infrastructure" - command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete --ignore-not-found all --selector logging-infra={{ item }}" - with_items: - - kibana - - fluentd - - elasticsearch - ignore_errors: yes - -- name: "Cleanup existing support infrastructure" - command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete --ignore-not-found all,sa,oauthclient --selector logging-infra=support" - ignore_errors: yes - -- name: "Cleanup existing secrets" - command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete secret logging-fluentd logging-elasticsearch logging-es-proxy logging-kibana logging-kibana-proxy logging-kibana-ops-proxy" - ignore_errors: yes - register: clean_result - failed_when: clean_result.rc == 1 and 'not found' not in clean_result.stderr - -- name: "Cleanup existing logging deployers" - command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete pods --all" - - -- name: "Cleanup logging project" - command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete project logging" - - -- name: "Remove deployer template" - command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete template logging-deployer-template -n openshift" - register: delete_output - failed_when: delete_output.rc == 1 and 'exists' not in delete_output.stderr - - -- name: Delete temp directory - file: - name: "{{ mktemp.stdout }}" - state: absent - changed_when: False - -- debug: msg="Success!" diff --git a/roles/openshift_hosted_logging/tasks/deploy_logging.yaml b/roles/openshift_hosted_logging/tasks/deploy_logging.yaml deleted file mode 100644 index 78b624109..000000000 --- a/roles/openshift_hosted_logging/tasks/deploy_logging.yaml +++ /dev/null @@ -1,177 +0,0 @@ ---- -- debug: msg="WARNING target_registry is deprecated, use openshift_hosted_logging_image_prefix instead" - when: target_registry is defined and target_registry - -- fail: msg="This role requires the following vars to be defined. openshift_hosted_logging_master_public_url, openshift_hosted_logging_hostname, openshift_hosted_logging_elasticsearch_cluster_size" - when: "openshift_hosted_logging_hostname is not defined or - openshift_hosted_logging_elasticsearch_cluster_size is not defined or - openshift_hosted_logging_master_public_url is not defined" - -- name: Create temp directory for kubeconfig - command: mktemp -d /tmp/openshift-ansible-XXXXXX - register: mktemp - changed_when: False - -- name: Copy the admin client config(s) - command: > - cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig - changed_when: False - -- name: "Check for logging project already exists" - command: > - {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get project logging -o jsonpath='{.metadata.name}' - register: logging_project_result - ignore_errors: True - -- name: "Create logging project" - command: > - {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig new-project logging - when: logging_project_result.stdout == "" - -- name: "Changing projects" - command: > - {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig project logging - -- name: "Creating logging deployer secret" - command: > - {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig secrets new logging-deployer {{ openshift_hosted_logging_secret_vars | default('nothing=/dev/null') }} - register: secret_output - failed_when: secret_output.rc == 1 and 'exists' not in secret_output.stderr - -- name: "Create templates for logging accounts and the deployer" - command: > - {{ openshift.common.client_binary }} create --config={{ mktemp.stdout }}/admin.kubeconfig - -f {{ hosted_base }}/logging-deployer.yaml - --config={{ mktemp.stdout }}/admin.kubeconfig - -n logging - register: logging_import_template - failed_when: "'already exists' not in logging_import_template.stderr and logging_import_template.rc != 0" - changed_when: "'created' in logging_import_template.stdout" - -- name: "Process the logging accounts template" - shell: > - {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig - process logging-deployer-account-template | {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f - - register: process_deployer_accounts - failed_when: process_deployer_accounts.rc == 1 and 'already exists' not in process_deployer_accounts.stderr - -- name: "Set permissions for logging-deployer service account" - command: > - {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig - policy add-cluster-role-to-user oauth-editor system:serviceaccount:logging:logging-deployer - register: permiss_output - failed_when: permiss_output.rc == 1 and 'exists' not in permiss_output.stderr - -- name: "Set permissions for fluentd" - command: > - {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig - policy add-scc-to-user privileged system:serviceaccount:logging:aggregated-logging-fluentd - register: fluentd_output - failed_when: fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr - -- name: "Set additional permissions for fluentd" - command: > - {{ openshift.common.client_binary }} adm policy --config={{ mktemp.stdout }}/admin.kubeconfig - add-cluster-role-to-user cluster-reader system:serviceaccount:logging:aggregated-logging-fluentd - register: fluentd2_output - failed_when: fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr - -- name: "Add rolebinding-reader to aggregated-logging-elasticsearch" - command: > - {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig - policy add-cluster-role-to-user rolebinding-reader \ - system:serviceaccount:logging:aggregated-logging-elasticsearch - register: rolebinding_reader_output - failed_when: rolebinding_reader_output == 1 and 'exists' not in rolebinding_reader_output.stderr - -- name: "Create ConfigMap for deployer parameters" - command: > - {{ openshift.common.client_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-deployer {{ deployer_cmap_params }} - register: deployer_configmap_output - failed_when: deployer_configmap_output.rc == 1 and 'exists' not in deployer_configmap_output.stderr - -- name: "Process the deployer template" - shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-app logging-deployer-template {{ oc_new_app_values }}" - register: process_deployer - failed_when: process_deployer.rc == 1 and 'already exists' not in process_deployer.stderr - -- name: "Wait for image pull and deployer pod" - shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods | grep logging-deployer.*Completed" - register: result - until: result.rc == 0 - retries: 20 - delay: 15 - -- name: "Process imagestream template" - shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-app logging-imagestream-template {{ oc_new_app_values }}" - when: tr_or_ohlip is defined and insecure_registry is defined and insecure_registry - register: process_is - failed_when: process_is.rc == 1 and 'already exists' not in process_is.stderr - -- name: "Set insecured registry" - command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig annotate is --all openshift.io/image.insecureRepository=true --overwrite" - when: tr_or_ohlip is defined and insecure_registry is defined and insecure_registry - -- name: "Wait for imagestreams to become available" - shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get is | grep logging-fluentd" - when: tr_or_ohlip is defined and insecure_registry is defined and insecure_registry - register: result - until: result.rc == 0 - failed_when: result.rc == 1 and 'not found' not in result.stderr - retries: 20 - delay: 5 - -- name: "Wait for component pods to be running" - shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods -l component={{ item }} | grep Running" - with_items: - - es - - kibana - - curator - register: result - until: result.rc == 0 - failed_when: result.rc == 1 or 'Error' in result.stderr - retries: 20 - delay: 15 - -- name: "Wait for ops component pods to be running" - shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods -l component={{ item }} | grep Running" - with_items: - - es-ops - - kibana-ops - - curator-ops - when: openshift_hosted_logging_enable_ops_cluster is defined and openshift_hosted_logging_enable_ops_cluster - register: result - until: result.rc == 0 - failed_when: result.rc == 1 or 'Error' in result.stderr - retries: 20 - delay: 15 - -- name: "Wait for fluentd DaemonSet to exist" - shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get daemonset logging-fluentd" - register: result - until: result.rc == 0 - failed_when: result.rc == 1 or 'Error' in result.stderr - retries: 20 - delay: 5 - -- name: "Deploy fluentd by labeling the node" - shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig label node --overwrite=true {{ '-l' ~ openshift_hosted_logging_fluentd_nodeselector if openshift_hosted_logging_fluentd_nodeselector is defined else '--all' }} {{ openshift_hosted_logging_fluentd_nodeselector_label if openshift_hosted_logging_fluentd_nodeselector_label is defined else 'logging-infra-fluentd=true' }}" - -- name: "Wait for fluentd to be running" - shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods -l component=fluentd | grep Running" - register: result - until: result.rc == 0 - failed_when: result.rc == 1 or 'Error' in result.stderr - retries: 20 - delay: 15 - -- include: update_master_config.yaml - -- debug: - msg: "Logging components deployed. Note persistent volume for elasticsearch must be setup manually" - -- name: Delete temp directory - file: - name: "{{ mktemp.stdout }}" - state: absent - changed_when: False diff --git a/roles/openshift_hosted_logging/tasks/main.yaml b/roles/openshift_hosted_logging/tasks/main.yaml deleted file mode 100644 index 42568597a..000000000 --- a/roles/openshift_hosted_logging/tasks/main.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -- name: Cleanup logging deployment - include: "{{ role_path }}/tasks/cleanup_logging.yaml" - when: openshift_hosted_logging_cleanup | default(false) | bool - -- name: Deploy logging - include: "{{ role_path }}/tasks/deploy_logging.yaml" - when: not openshift_hosted_logging_cleanup | default(false) | bool diff --git a/roles/openshift_hosted_logging/tasks/update_master_config.yaml b/roles/openshift_hosted_logging/tasks/update_master_config.yaml deleted file mode 100644 index 1122e059c..000000000 --- a/roles/openshift_hosted_logging/tasks/update_master_config.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -- name: Adding Kibana route information to loggingPublicURL - modify_yaml: - dest: "{{ openshift.common.config_base }}/master/master-config.yaml" - yaml_key: assetConfig.loggingPublicURL - yaml_value: "https://{{ logging_hostname }}" - notify: restart master diff --git a/roles/openshift_hosted_logging/vars/main.yaml b/roles/openshift_hosted_logging/vars/main.yaml deleted file mode 100644 index 4b350b244..000000000 --- a/roles/openshift_hosted_logging/vars/main.yaml +++ /dev/null @@ -1,32 +0,0 @@ ---- -tr_or_ohlip: "{{ openshift_hosted_logging_deployer_prefix | default(target_registry) | default(None) }}" -ip_kv: "{{ '-p IMAGE_PREFIX=' ~ tr_or_ohlip | quote if tr_or_ohlip != '' else '' }}" -iv_kv: "{{ '-p IMAGE_VERSION=' ~ openshift_hosted_logging_deployer_version | quote if openshift_hosted_logging_deployer_version | default(none) is not none else '' }}" -oc_new_app_values: "{{ ip_kv }} {{ iv_kv }}" -openshift_master_config_dir: "{{ openshift.common.config_base }}/master" -kh_cmap_param: "{{ '--from-literal kibana-hostname=' ~ openshift_hosted_logging_hostname | quote if openshift_hosted_logging_hostname | default(none) is not none else '' }}" -kh_ops_cmap_param: "{{ '--from-literal kibana-ops-hostname=' ~ openshift_hosted_logging_ops_hostname | quote if openshift_hosted_logging_ops_hostname | default(none) is not none else '' }}" -pmu_cmap_param: "{{ '--from-literal public-master-url=' ~ openshift_hosted_logging_master_public_url | quote if openshift_hosted_logging_master_public_url | default(none) is not none else '' }}" -es_cs_cmap_param: "{{ '--from-literal es-cluster-size=' ~ openshift_hosted_logging_elasticsearch_cluster_size | string | quote if openshift_hosted_logging_elasticsearch_cluster_size | default(none) is not none else '' }}" -es_ops_cs_cmap_param: "{{ '--from-literal es-ops-cluster-size=' ~ openshift_hosted_logging_elasticsearch_ops_cluster_size | string | quote if openshift_hosted_logging_elasticsearch_ops_cluster_size | default(none) is not none else '' }}" -es_ir_cmap_param: "{{ '--from-literal es-instance-ram=' ~ openshift_hosted_logging_elasticsearch_instance_ram | quote if openshift_hosted_logging_elasticsearch_instance_ram | default(none) is not none else '' }}" -es_ops_ir_cmap_param: "{{ '--from-literal es-ops-instance-ram=' ~ openshift_hosted_logging_elasticsearch_ops_instance_ram | quote if openshift_hosted_logging_elasticsearch_ops_instance_ram | default(none) is not none else '' }}" -es_pvcs_cmap_param: "{{ '--from-literal es-pvc-size=' ~ openshift_hosted_logging_elasticsearch_pvc_size | quote if openshift_hosted_logging_elasticsearch_pvc_size | default(none) is not none else '' }}" -es_ops_pvcs_cmap_param: "{{ '--from-literal es-ops-pvc-size=' ~ openshift_hosted_logging_elasticsearch_ops_pvc_size | quote if openshift_hosted_logging_elasticsearch_ops_pvc_size | default(none) is not none else '' }}" -es_pvcp_cmap_param: "{{ '--from-literal es-pvc-prefix=' ~ openshift_hosted_logging_elasticsearch_pvc_prefix | quote if openshift_hosted_logging_elasticsearch_pvc_prefix | default(none) is not none else '' }}" -es_ops_pvcp_cmap_param: "{{ '--from-literal es-ops-pvc-prefix=' ~ openshift_hosted_logging_elasticsearch_ops_pvc_prefix | quote if openshift_hosted_logging_elasticsearch_ops_pvc_prefix | default(none) is not none else '' }}" -es_pvcd_cmap_param: "{{ '--from-literal es-pvc-dynamic=' ~ openshift_hosted_logging_elasticsearch_pvc_dynamic | quote if openshift_hosted_logging_elasticsearch_pvc_dynamic | default(none) is not none else '' }}" -es_ops_pvcd_cmap_param: "{{ '--from-literal es-ops-pvc-dynamic=' ~ openshift_hosted_logging_elasticsearch_ops_pvc_dynamic | quote if openshift_hosted_logging_elasticsearch_ops_pvc_dynamic | default(none) is not none else '' }}" -es_sg_cmap_param: "{{ '--from-literal storage-group=' ~ openshift_hosted_logging_elasticsearch_storage_group | string | quote if openshift_hosted_logging_elasticsearch_storage_group | default(none) is not none else '' }}" -es_ns_cmap_param: "{{ '--from-literal es-nodeselector=' ~ openshift_hosted_logging_elasticsearch_nodeselector | quote if openshift_hosted_logging_elasticsearch_nodeselector | default(none) is not none else '' }}" -es_ops_ns_cmap_param: "{{ '--from-literal es-ops-nodeselector=' ~ openshift_hosted_logging_elasticsearch_ops_nodeselector | quote if openshift_hosted_logging_elasticsearch_ops_nodeselector | default(none) is not none else '' }}" -fd_ns_cmap_param: "{{ '--from-literal fluentd-nodeselector=' ~ openshift_hosted_logging_fluentd_nodeselector_label | quote if openshift_hosted_logging_fluentd_nodeselector_label | default(none) is not none else 'logging-infra-fluentd=true' }}" -kb_ns_cmap_param: "{{ '--from-literal kibana-nodeselector=' ~ openshift_hosted_logging_kibana_nodeselector | quote if openshift_hosted_logging_kibana_nodeselector | default(none) is not none else '' }}" -kb_ops_ns_cmap_param: "{{ '--from-literal kibana-ops-nodeselector=' ~ openshift_hosted_logging_kibana_ops_nodeselector | quote if openshift_hosted_logging_kibana_ops_nodeselector | default(none) is not none else '' }}" -cr_ns_cmap_param: "{{ '--from-literal curator-nodeselector=' ~ openshift_hosted_logging_curator_nodeselector | quote if openshift_hosted_logging_curator_nodeselector | default(none) is not none else '' }}" -cr_ops_ns_cmap_param: "{{ '--from-literal curator-ops-nodeselector=' ~ openshift_hosted_logging_curator_ops_nodeselector | quote if openshift_hosted_logging_curator_ops_nodeselector | default(none) is not none else '' }}" -ops_cmap_param: "{{ '--from-literal enable-ops-cluster=' ~ openshift_hosted_logging_enable_ops_cluster | string | lower | quote if openshift_hosted_logging_enable_ops_cluster | default(none) is not none else '' }}" -journal_source_cmap_param: "{{ '--from-literal journal-source=' ~ openshift_hosted_logging_journal_source | quote if openshift_hosted_logging_journal_source | default(none) is not none else '' }}" -journal_read_from_head_cmap_param: "{{ '--from-literal journal-read-from-head=' ~ openshift_hosted_logging_journal_read_from_head | string | lower | quote if openshift_hosted_logging_journal_read_from_head | default(none) is not none else '' }}" -ips_cmap_param: "{{ '--from-literal image-pull-secret=' ~ openshift_hosted_logging_image_pull_secret | quote if openshift_hosted_logging_image_pull_secret | default(none) is not none else '' }}" -deployer_cmap_params: "{{ kh_cmap_param }} {{ kh_ops_cmap_param }} {{ pmu_cmap_param }} {{ es_cs_cmap_param }} {{ es_ir_cmap_param }} {{ es_pvcs_cmap_param }} {{ es_pvcp_cmap_param }} {{ es_pvcd_cmap_param }} {{ es_ops_cs_cmap_param }} {{ es_ops_ir_cmap_param }} {{ es_ops_pvcs_cmap_param }} {{ es_ops_pvcp_cmap_param }} {{ es_ops_pvcd_cmap_param }} {{ es_sg_cmap_param }} {{ es_ns_cmap_param }} {{ es_ops_ns_cmap_param }} {{ fd_ns_cmap_param }} {{ kb_ns_cmap_param }} {{ kb_ops_ns_cmap_param }} {{ cr_ns_cmap_param }} {{ cr_ops_ns_cmap_param }} {{ ops_cmap_param }} {{ journal_source_cmap_param }} {{ journal_read_from_head_cmap_param }} {{ ips_cmap_param }}" diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml index 716f0e002..6699e2062 100644 --- a/roles/openshift_logging/defaults/main.yml +++ b/roles/openshift_logging/defaults/main.yml @@ -1,15 +1,17 @@ --- -openshift_logging_use_ops: "{{ openshift_hosted_logging_enable_ops_cluster | default('false') | bool }}" +openshift_logging_use_ops: False openshift_logging_master_url: "https://kubernetes.default.svc.{{ openshift.common.dns_domain }}" -openshift_logging_master_public_url: "{{ openshift_hosted_logging_master_public_url | default('https://' + openshift.common.public_hostname + ':' ~ (openshift_master_api_port | default('8443', true))) }}" +openshift_logging_master_public_url: "{{ 'https://' + openshift.common.public_hostname + ':' ~ (openshift_master_api_port | default('8443', true)) }}" openshift_logging_namespace: logging openshift_logging_nodeselector: null openshift_logging_labels: {} openshift_logging_label_key: "" openshift_logging_label_value: "" -openshift_logging_install_logging: True +openshift_logging_install_logging: False +openshift_logging_uninstall_logging: False + openshift_logging_purge_logging: False -openshift_logging_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}" +openshift_logging_image_pull_secret: "" openshift_logging_curator_default_days: 30 openshift_logging_curator_run_hour: 0 @@ -19,13 +21,13 @@ openshift_logging_curator_script_log_level: INFO openshift_logging_curator_log_level: ERROR openshift_logging_curator_cpu_limit: 100m openshift_logging_curator_memory_limit: null -openshift_logging_curator_nodeselector: "{{ openshift_hosted_logging_curator_nodeselector | default('') | map_from_pairs }}" +openshift_logging_curator_nodeselector: {} openshift_logging_curator_ops_cpu_limit: 100m openshift_logging_curator_ops_memory_limit: null -openshift_logging_curator_ops_nodeselector: "{{ openshift_hosted_logging_curator_ops_nodeselector | default('') | map_from_pairs }}" +openshift_logging_curator_ops_nodeselector: {} -openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}" +openshift_logging_kibana_hostname: "{{ 'kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}" openshift_logging_kibana_cpu_limit: null openshift_logging_kibana_memory_limit: 736Mi openshift_logging_kibana_proxy_debug: false @@ -34,8 +36,8 @@ openshift_logging_kibana_proxy_memory_limit: 96Mi openshift_logging_kibana_replica_count: 1 openshift_logging_kibana_edge_term_policy: Redirect -openshift_logging_kibana_nodeselector: "{{ openshift_hosted_logging_kibana_nodeselector | default('') | map_from_pairs }}" -openshift_logging_kibana_ops_nodeselector: "{{ openshift_hosted_logging_kibana_ops_nodeselector | default('') | map_from_pairs }}" +openshift_logging_kibana_nodeselector: {} +openshift_logging_kibana_ops_nodeselector: {} #The absolute path on the control node to the cert file to use #for the public facing kibana certs @@ -49,7 +51,7 @@ openshift_logging_kibana_key: "" #for the public facing kibana certs openshift_logging_kibana_ca: "" -openshift_logging_kibana_ops_hostname: "{{ openshift_hosted_logging_ops_hostname | default('kibana-ops.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}" +openshift_logging_kibana_ops_hostname: "{{ 'kibana-ops.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}" openshift_logging_kibana_ops_cpu_limit: null openshift_logging_kibana_ops_memory_limit: 736Mi openshift_logging_kibana_ops_proxy_debug: false @@ -69,12 +71,12 @@ openshift_logging_kibana_ops_key: "" #for the public facing ops kibana certs openshift_logging_kibana_ops_ca: "" -openshift_logging_fluentd_nodeselector: "{{ openshift_hosted_logging_fluentd_nodeselector_label | default('logging-infra-fluentd=true') | map_from_pairs }}" +openshift_logging_fluentd_nodeselector: {'logging-infra-fluentd': 'true'} openshift_logging_fluentd_cpu_limit: 100m openshift_logging_fluentd_memory_limit: 512Mi openshift_logging_fluentd_es_copy: false -openshift_logging_fluentd_journal_source: "{{ openshift_hosted_logging_journal_source | default('') }}" -openshift_logging_fluentd_journal_read_from_head: "{{ openshift_hosted_logging_journal_read_from_head | default('') }}" +openshift_logging_fluentd_journal_source: "" +openshift_logging_fluentd_journal_read_from_head: "" openshift_logging_fluentd_hosts: ['--all'] openshift_logging_fluentd_buffer_queue_limit: 1024 openshift_logging_fluentd_buffer_size_limit: 1m @@ -84,18 +86,18 @@ openshift_logging_es_port: 9200 openshift_logging_es_ca: /etc/fluent/keys/ca openshift_logging_es_client_cert: /etc/fluent/keys/cert openshift_logging_es_client_key: /etc/fluent/keys/key -openshift_logging_es_cluster_size: "{{ openshift_hosted_logging_elasticsearch_cluster_size | default(1) }}" +openshift_logging_es_cluster_size: 1 openshift_logging_es_cpu_limit: 1000m # the logging appenders for the root loggers to write ES logs. Valid values: 'file', 'console' openshift_logging_es_log_appenders: ['file'] -openshift_logging_es_memory_limit: "{{ openshift_hosted_logging_elasticsearch_instance_ram | default('8Gi') }}" -openshift_logging_es_pv_selector: "{{ openshift_hosted_logging_storage_labels | default('') }}" -openshift_logging_es_pvc_dynamic: "{{ openshift_hosted_logging_elasticsearch_pvc_dynamic | default(False) }}" -openshift_logging_es_pvc_size: "{{ openshift_hosted_logging_elasticsearch_pvc_size | default('') }}" -openshift_logging_es_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_pvc_prefix | default('logging-es') }}" +openshift_logging_es_memory_limit: "8Gi" +openshift_logging_es_pv_selector: "{{ openshift_logging_storage_labels | default('') }}" +openshift_logging_es_pvc_dynamic: "{{ openshift_logging_elasticsearch_pvc_dynamic | default(False) }}" +openshift_logging_es_pvc_size: "{{ openshift_logging_elasticsearch_pvc_size | default('') }}" +openshift_logging_es_pvc_prefix: "{{ openshift_logging_elasticsearch_pvc_prefix | default('logging-es') }}" openshift_logging_es_recover_after_time: 5m -openshift_logging_es_storage_group: "{{ openshift_hosted_logging_elasticsearch_storage_group | default('65534') }}" -openshift_logging_es_nodeselector: "{{ openshift_hosted_logging_elasticsearch_nodeselector | default('') | map_from_pairs }}" +openshift_logging_es_storage_group: "{{ openshift_logging_elasticsearch_storage_group | default('65534') }}" +openshift_logging_es_nodeselector: {} # openshift_logging_es_config is a hash to be merged into the defaults for the elasticsearch.yaml openshift_logging_es_config: {} openshift_logging_es_number_of_shards: 1 @@ -125,16 +127,16 @@ openshift_logging_es_ops_port: 9200 openshift_logging_es_ops_ca: /etc/fluent/keys/ca openshift_logging_es_ops_client_cert: /etc/fluent/keys/cert openshift_logging_es_ops_client_key: /etc/fluent/keys/key -openshift_logging_es_ops_cluster_size: "{{ openshift_hosted_logging_elasticsearch_ops_cluster_size | default(1) }}" +openshift_logging_es_ops_cluster_size: "{{ openshift_logging_elasticsearch_ops_cluster_size | default(1) }}" openshift_logging_es_ops_cpu_limit: 1000m -openshift_logging_es_ops_memory_limit: "{{ openshift_hosted_logging_elasticsearch_ops_instance_ram | default('8Gi') }}" -openshift_logging_es_ops_pv_selector: "{{ openshift_hosted_loggingops_storage_labels | default('') }}" -openshift_logging_es_ops_pvc_dynamic: "{{ openshift_hosted_logging_elasticsearch_ops_pvc_dynamic | default(False) }}" -openshift_logging_es_ops_pvc_size: "{{ openshift_hosted_logging_elasticsearch_ops_pvc_size | default('') }}" -openshift_logging_es_ops_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_ops_pvc_prefix | default('logging-es-ops') }}" +openshift_logging_es_ops_memory_limit: "8Gi" +openshift_logging_es_ops_pv_selector: "{{ openshift_loggingops_storage_labels | default('') }}" +openshift_logging_es_ops_pvc_dynamic: "{{ openshift_logging_elasticsearch_ops_pvc_dynamic | default(False) }}" +openshift_logging_es_ops_pvc_size: "{{ openshift_logging_elasticsearch_ops_pvc_size | default('') }}" +openshift_logging_es_ops_pvc_prefix: "{{ openshift_logging_elasticsearch_ops_pvc_prefix | default('logging-es-ops') }}" openshift_logging_es_ops_recover_after_time: 5m -openshift_logging_es_ops_storage_group: "{{ openshift_hosted_logging_elasticsearch_storage_group | default('65534') }}" -openshift_logging_es_ops_nodeselector: "{{ openshift_hosted_logging_elasticsearch_ops_nodeselector | default('') | map_from_pairs }}" +openshift_logging_es_ops_storage_group: "{{ openshift_logging_elasticsearch_storage_group | default('65534') }}" +openshift_logging_es_ops_nodeselector: {} # for exposing es-ops to external (outside of the cluster) clients openshift_logging_es_ops_allow_external: False @@ -153,7 +155,7 @@ openshift_logging_es_ops_key: "" openshift_logging_es_ops_ca_ext: "" # storage related defaults -openshift_logging_storage_access_modes: "{{ openshift_hosted_logging_storage_access_modes | default(['ReadWriteOnce']) }}" +openshift_logging_storage_access_modes: ['ReadWriteOnce'] # mux - secure_forward listener service openshift_logging_mux_allow_external: False diff --git a/roles/openshift_logging/tasks/main.yaml b/roles/openshift_logging/tasks/main.yaml index f475024dd..0da9771c7 100644 --- a/roles/openshift_logging/tasks/main.yaml +++ b/roles/openshift_logging/tasks/main.yaml @@ -30,12 +30,13 @@ check_mode: no become: no -- include: "{{ role_path }}/tasks/install_logging.yaml" - when: openshift_logging_install_logging | default(false) | bool +- include: install_logging.yaml + when: + - openshift_logging_install_logging | default(false) | bool -- include: "{{ role_path }}/tasks/delete_logging.yaml" +- include: delete_logging.yaml when: - - not openshift_logging_install_logging | default(false) | bool + - openshift_logging_uninstall_logging | default(false) | bool - name: Cleaning up local temp dir local_action: file path="{{local_tmp.stdout}}" state=absent diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml index 4c8d6fdad..73e935d3f 100644 --- a/roles/openshift_master/defaults/main.yml +++ b/roles/openshift_master/defaults/main.yml @@ -20,8 +20,8 @@ r_openshift_master_os_firewall_allow: port: 4001/tcp cond: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" -oreg_url: '' -oreg_host: "{{ oreg_url.split('/')[0] if '.' in oreg_url.split('/')[0] else '' }}" +# oreg_url is defined by user input +oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}" oreg_auth_credentials_path: "{{ r_openshift_master_data_dir }}/.docker" oreg_auth_credentials_replace: False l_bind_docker_reg_auth: False diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index 94b7df1fc..82b4b420c 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -177,8 +177,6 @@ local_facts: no_proxy_etcd_host_ips: "{{ openshift_no_proxy_etcd_host_ips }}" -- include: registry_auth.yml - - name: Install the systemd units include: systemd_units.yml diff --git a/roles/openshift_master/tasks/registry_auth.yml b/roles/openshift_master/tasks/registry_auth.yml index 96b6c614e..2644f235e 100644 --- a/roles/openshift_master/tasks/registry_auth.yml +++ b/roles/openshift_master/tasks/registry_auth.yml @@ -1,27 +1,35 @@ --- +# We need to setup some variables as this play might be called directly +# from outside of the role. +- set_fact: + oreg_auth_credentials_path: "{{ r_openshift_master_data_dir }}/.docker" + when: oreg_auth_credentials_path is not defined + +- set_fact: + oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}" + when: oreg_host is not defined + - name: Check for credentials file for registry auth stat: path: "{{ oreg_auth_credentials_path }}" when: oreg_auth_user is defined register: master_oreg_auth_credentials_stat -# Container images may need the registry credentials -- name: Setup ro mount of /root/.docker for containerized hosts - set_fact: - l_bind_docker_reg_auth: True +- name: Create credentials for registry auth + command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}" when: - - openshift.common.is_containerized | bool - oreg_auth_user is defined - (not master_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool + register: master_oreg_auth_credentials_create notify: - restart master api - restart master controllers -- name: Create credentials for registry auth - command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}" +# Container images may need the registry credentials +- name: Setup ro mount of /root/.docker for containerized hosts + set_fact: + l_bind_docker_reg_auth: True when: + - openshift.common.is_containerized | bool - oreg_auth_user is defined - - (not master_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool - notify: - - restart master api - - restart master controllers + - (master_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace or master_oreg_auth_credentials_create.changed) | bool diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml index 7a918c57e..8de62c59a 100644 --- a/roles/openshift_master/tasks/systemd_units.yml +++ b/roles/openshift_master/tasks/systemd_units.yml @@ -17,6 +17,8 @@ r_openshift_master_data_dir: "{{ openshift_data_dir | default('/var/lib/origin') }}" when: r_openshift_master_data_dir is not defined +- include: registry_auth.yml + - name: Remove the legacy master service if it exists include: clean_systemd_units.yml diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 index a184a59f6..5d4a99c97 100644 --- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 +++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 @@ -20,7 +20,7 @@ ExecStart=/usr/bin/docker run --rm --privileged --net=host \ -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} \ {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} \ -v /etc/pki:/etc/pki:ro \ - {% if l_bind_docker_reg_auth %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\ + {% if l_bind_docker_reg_auth | default(False) %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\ {{ openshift.master.master_image }}:${IMAGE_VERSION} start master api \ --config=${CONFIG_FILE} $OPTIONS ExecStartPost=/usr/bin/sleep 10 diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 index 2ded05f53..f93f3b565 100644 --- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 +++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 @@ -19,7 +19,7 @@ ExecStart=/usr/bin/docker run --rm --privileged --net=host \ -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} \ {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} \ -v /etc/pki:/etc/pki:ro \ - {% if l_bind_docker_reg_auth %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\ + {% if l_bind_docker_reg_auth | default(False) %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\ {{ openshift.master.master_image }}:${IMAGE_VERSION} start master controllers \ --config=${CONFIG_FILE} $OPTIONS ExecStartPost=/usr/bin/sleep 10 diff --git a/roles/openshift_metrics/defaults/main.yaml b/roles/openshift_metrics/defaults/main.yaml index f45100be3..ed0182ba8 100644 --- a/roles/openshift_metrics/defaults/main.yaml +++ b/roles/openshift_metrics/defaults/main.yaml @@ -1,6 +1,7 @@ --- openshift_metrics_start_cluster: True -openshift_metrics_install_metrics: True +openshift_metrics_install_metrics: False +openshift_metrics_uninstall_metrics: False openshift_metrics_startup_timeout: 500 openshift_metrics_hawkular_replicas: 1 @@ -15,9 +16,9 @@ openshift_metrics_hawkular_nodeselector: "" openshift_metrics_hawkular_route_annotations: {} openshift_metrics_cassandra_replicas: 1 -openshift_metrics_cassandra_storage_type: "{{ openshift_hosted_metrics_storage_kind | default('emptydir') }}" -openshift_metrics_cassandra_pvc_size: "{{ openshift_hosted_metrics_storage_volume_size | default('10Gi') }}" -openshift_metrics_cassandra_pv_selector: "{{ openshift_hosted_metrics_storage_labels | default('') }}" +openshift_metrics_cassandra_storage_type: "{{ openshift_metrics_storage_kind | default('emptydir') }}" +openshift_metrics_cassandra_pvc_size: "{{ openshift_metrics_storage_volume_size | default('10Gi') }}" +openshift_metrics_cassandra_pv_selector: "{{ openshift_metrics_storage_labels | default('') }}" openshift_metrics_cassandra_limits_memory: 2G openshift_metrics_cassandra_limits_cpu: null openshift_metrics_cassandra_requests_memory: 1G @@ -54,8 +55,8 @@ openshift_metrics_master_url: https://kubernetes.default.svc openshift_metrics_node_id: nodename openshift_metrics_project: openshift-infra -openshift_metrics_cassandra_pvc_prefix: "{{ openshift_hosted_metrics_storage_volume_name | default('metrics-cassandra') }}" -openshift_metrics_cassandra_pvc_access: "{{ openshift_hosted_metrics_storage_access_modes | default(['ReadWriteOnce']) }}" +openshift_metrics_cassandra_pvc_prefix: "{{ openshift_metrics_storage_volume_name | default('metrics-cassandra') }}" +openshift_metrics_cassandra_pvc_access: "{{ openshift_metrics_storage_access_modes | default(['ReadWriteOnce']) }}" openshift_metrics_hawkular_user_write_access: False diff --git a/roles/openshift_metrics/tasks/main.yaml b/roles/openshift_metrics/tasks/main.yaml index eaabdd20f..0461039fc 100644 --- a/roles/openshift_metrics/tasks/main.yaml +++ b/roles/openshift_metrics/tasks/main.yaml @@ -43,7 +43,13 @@ check_mode: no tags: metrics_init -- include: "{{ (openshift_metrics_install_metrics | bool) | ternary('install_metrics.yaml','uninstall_metrics.yaml') }}" +- include: install_metrics.yaml + when: + - openshift_metrics_install_metrics | default(false) | bool + +- include: uninstall_metrics.yaml + when: + - openshift_metrics_uninstall_metrics | default(false) | bool - include: uninstall_hosa.yaml when: not openshift_metrics_install_hawkular_agent | bool diff --git a/roles/openshift_metrics/vars/default_images.yml b/roles/openshift_metrics/vars/default_images.yml index 678c4104c..8704ddfa0 100644 --- a/roles/openshift_metrics/vars/default_images.yml +++ b/roles/openshift_metrics/vars/default_images.yml @@ -1,3 +1,3 @@ --- -__openshift_metrics_image_prefix: "{{ openshift_hosted_metrics_deployer_prefix | default('docker.io/openshift/origin-') }}" -__openshift_metrics_image_version: "{{ openshift_hosted_metrics_deployer_version | default('latest') }}" +__openshift_metrics_image_prefix: "docker.io/openshift/origin-" +__openshift_metrics_image_version: "latest" diff --git a/roles/openshift_metrics/vars/openshift-enterprise.yml b/roles/openshift_metrics/vars/openshift-enterprise.yml index f0bdac7d2..68cdf06fe 100644 --- a/roles/openshift_metrics/vars/openshift-enterprise.yml +++ b/roles/openshift_metrics/vars/openshift-enterprise.yml @@ -1,3 +1,3 @@ --- -__openshift_metrics_image_prefix: "{{ openshift_hosted_metrics_deployer_prefix | default('registry.access.redhat.com/openshift3/') }}" -__openshift_metrics_image_version: "{{ openshift_hosted_metrics_deployer_version | default ('v3.6') }}" +__openshift_metrics_image_prefix: "registry.access.redhat.com/openshift3/" +__openshift_metrics_image_version: "v3.6" diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml index 433e92201..ed3516d04 100644 --- a/roles/openshift_node/defaults/main.yml +++ b/roles/openshift_node/defaults/main.yml @@ -82,8 +82,8 @@ default_r_openshift_node_os_firewall_allow: # Allow multiple port ranges to be added to the role r_openshift_node_os_firewall_allow: "{{ default_r_openshift_node_os_firewall_allow | union(openshift_node_open_ports | default([])) }}" -oreg_url: '' -oreg_host: "{{ oreg_url.split('/')[0] if '.' in oreg_url.split('/')[0] else '' }}" +# oreg_url is defined by user input +oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}" oreg_auth_credentials_path: "{{ openshift_node_data_dir }}/.docker" oreg_auth_credentials_replace: False l_bind_docker_reg_auth: False diff --git a/roles/openshift_node/tasks/registry_auth.yml b/roles/openshift_node/tasks/registry_auth.yml index f370bb260..3d2831742 100644 --- a/roles/openshift_node/tasks/registry_auth.yml +++ b/roles/openshift_node/tasks/registry_auth.yml @@ -5,21 +5,20 @@ when: oreg_auth_user is defined register: node_oreg_auth_credentials_stat -# Container images may need the registry credentials -- name: Setup ro mount of /root/.docker for containerized hosts - set_fact: - l_bind_docker_reg_auth: True +- name: Create credentials for registry auth + command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}" when: - - openshift.common.is_containerized | bool - oreg_auth_user is defined - (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool + register: node_oreg_auth_credentials_create notify: - restart node -- name: Create credentials for registry auth - command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}" +# Container images may need the registry credentials +- name: Setup ro mount of /root/.docker for containerized hosts + set_fact: + l_bind_docker_reg_auth: True when: + - openshift.common.is_containerized | bool - oreg_auth_user is defined - - (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool - notify: - - restart node + - (node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace or oreg_auth_credentials_replace.changed) | bool diff --git a/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh b/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh index 61d2a5b51..df02bcf0e 100755 --- a/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh +++ b/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh @@ -114,6 +114,8 @@ EOF echo "nameserver "${def_route_ip}"" >> ${NEW_RESOLV_CONF} if ! grep -q 'search.*cluster.local' ${NEW_RESOLV_CONF}; then sed -i '/^search/ s/$/ cluster.local/' ${NEW_RESOLV_CONF} + elif ! grep -qw search ${NEW_RESOLV_CONF}; then + echo 'search cluster.local' >> ${NEW_RESOLV_CONF} fi cp -Z ${NEW_RESOLV_CONF} /etc/resolv.conf fi diff --git a/filter_plugins/openshift_node.py b/roles/openshift_node_facts/filter_plugins/filters.py index 50c360e97..69069f2dc 100644 --- a/filter_plugins/openshift_node.py +++ b/roles/openshift_node_facts/filter_plugins/filters.py @@ -7,10 +7,10 @@ from ansible import errors class FilterModule(object): - ''' Custom ansible filters for use by openshift_node role''' + ''' Custom ansible filters for use by openshift_node_facts role''' @staticmethod - def get_dns_ip(openshift_dns_ip, hostvars): + def node_get_dns_ip(openshift_dns_ip, hostvars): ''' Navigates the complicated logic of when to set dnsIP In all situations if they've set openshift_dns_ip use that @@ -29,4 +29,4 @@ class FilterModule(object): def filters(self): ''' returns a mapping of filters to methods ''' - return {'get_dns_ip': self.get_dns_ip} + return {'node_get_dns_ip': self.node_get_dns_ip} diff --git a/roles/openshift_node_facts/tasks/main.yml b/roles/openshift_node_facts/tasks/main.yml index c268c945e..fd4c49504 100644 --- a/roles/openshift_node_facts/tasks/main.yml +++ b/roles/openshift_node_facts/tasks/main.yml @@ -30,5 +30,5 @@ ovs_image: "{{ osn_ovs_image | default(None) }}" proxy_mode: "{{ openshift_node_proxy_mode | default('iptables') }}" local_quota_per_fsgroup: "{{ openshift_node_local_quota_per_fsgroup | default(None) }}" - dns_ip: "{{ openshift_dns_ip | default(none) | get_dns_ip(hostvars[inventory_hostname])}}" + dns_ip: "{{ openshift_dns_ip | default(none) | node_get_dns_ip(hostvars[inventory_hostname])}}" env_vars: "{{ openshift_node_env_vars | default(None) }}" diff --git a/roles/openshift_node_upgrade/defaults/main.yml b/roles/openshift_node_upgrade/defaults/main.yml index 3d8704308..6507b015d 100644 --- a/roles/openshift_node_upgrade/defaults/main.yml +++ b/roles/openshift_node_upgrade/defaults/main.yml @@ -4,3 +4,9 @@ os_sdn_network_plugin_name: "redhat/openshift-ovs-subnet" openshift_node_data_dir_default: "{{ openshift_data_dir | default('/var/lib/origin') }}" openshift_node_data_dir: "{{ openshift_node_data_dir_default }}" + +# oreg_url is defined by user input +oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}" +oreg_auth_credentials_path: "{{ openshift_node_data_dir }}/.docker" +oreg_auth_credentials_replace: False +l_bind_docker_reg_auth: False diff --git a/roles/openshift_node_upgrade/tasks/main.yml b/roles/openshift_node_upgrade/tasks/main.yml index e34319186..6bcf3072d 100644 --- a/roles/openshift_node_upgrade/tasks/main.yml +++ b/roles/openshift_node_upgrade/tasks/main.yml @@ -10,6 +10,8 @@ # tasks file for openshift_node_upgrade +- include: registry_auth.yml + - name: Stop node and openvswitch services service: name: "{{ item }}" diff --git a/roles/openshift_node_upgrade/tasks/registry_auth.yml b/roles/openshift_node_upgrade/tasks/registry_auth.yml new file mode 100644 index 000000000..3d2831742 --- /dev/null +++ b/roles/openshift_node_upgrade/tasks/registry_auth.yml @@ -0,0 +1,24 @@ +--- +- name: Check for credentials file for registry auth + stat: + path: "{{ oreg_auth_credentials_path }}" + when: oreg_auth_user is defined + register: node_oreg_auth_credentials_stat + +- name: Create credentials for registry auth + command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}" + when: + - oreg_auth_user is defined + - (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool + register: node_oreg_auth_credentials_create + notify: + - restart node + +# Container images may need the registry credentials +- name: Setup ro mount of /root/.docker for containerized hosts + set_fact: + l_bind_docker_reg_auth: True + when: + - openshift.common.is_containerized | bool + - oreg_auth_user is defined + - (node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace or oreg_auth_credentials_replace.changed) | bool diff --git a/roles/openshift_node_upgrade/templates/openshift.docker.node.service b/roles/openshift_node_upgrade/templates/openshift.docker.node.service index 451412ab0..864e4b5d6 100644 --- a/roles/openshift_node_upgrade/templates/openshift.docker.node.service +++ b/roles/openshift_node_upgrade/templates/openshift.docker.node.service @@ -21,7 +21,22 @@ EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node-dep ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-node ExecStartPre=/usr/bin/cp /etc/origin/node/node-dnsmasq.conf /etc/dnsmasq.d/ ExecStartPre=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:/in-addr.arpa/127.0.0.1,/{{ openshift.common.dns_domain }}/127.0.0.1 -ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift_node_data_dir }}:{{ openshift_node_data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS -v /etc/pki:/etc/pki:ro {{ openshift.node.node_image }}:${IMAGE_VERSION} +ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node \ + --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node \ + -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} \ + -e HOST=/rootfs -e HOST_ETC=/host-etc \ + -v {{ openshift_node_data_dir }}:{{ openshift_node_data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} \ + -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node \ + {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} \ + -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro \ + -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw \ + -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker \ + -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch \ + -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni \ + -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log \ + -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS -v /etc/pki:/etc/pki:ro \ + {% if l_bind_docker_reg_auth %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\ + {{ openshift.node.node_image }}:${IMAGE_VERSION} ExecStartPost=/usr/bin/sleep 10 ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node ExecStopPost=/usr/bin/rm /etc/dnsmasq.d/node-dnsmasq.conf diff --git a/roles/openshift_persistent_volumes/meta/main.yml b/roles/openshift_persistent_volumes/meta/main.yml index 8d3d010e4..19e9a56b7 100644 --- a/roles/openshift_persistent_volumes/meta/main.yml +++ b/roles/openshift_persistent_volumes/meta/main.yml @@ -9,5 +9,4 @@ galaxy_info: - name: EL versions: - 7 -dependencies: -- role: openshift_hosted_facts +dependencies: {} diff --git a/roles/openshift_sanitize_inventory/filter_plugins/openshift_logging.py b/roles/openshift_sanitize_inventory/filter_plugins/openshift_logging.py new file mode 100644 index 000000000..d42c9bdb9 --- /dev/null +++ b/roles/openshift_sanitize_inventory/filter_plugins/openshift_logging.py @@ -0,0 +1,25 @@ +''' + Openshift Logging class that provides useful filters used in Logging. + + This should be removed after map_from_pairs is no longer used in __deprecations_logging.yml +''' + + +def map_from_pairs(source, delim="="): + ''' Returns a dict given the source and delim delimited ''' + if source == '': + return dict() + + return dict(item.split(delim) for item in source.split(",")) + + +# pylint: disable=too-few-public-methods +class FilterModule(object): + ''' OpenShift Logging Filters ''' + + # pylint: disable=no-self-use, too-few-public-methods + def filters(self): + ''' Returns the names of the filters provided by this class ''' + return { + 'map_from_pairs': map_from_pairs + } diff --git a/roles/openshift_sanitize_inventory/library/conditional_set_fact.py b/roles/openshift_sanitize_inventory/library/conditional_set_fact.py new file mode 100644 index 000000000..f61801714 --- /dev/null +++ b/roles/openshift_sanitize_inventory/library/conditional_set_fact.py @@ -0,0 +1,68 @@ +#!/usr/bin/python + +""" Ansible module to help with setting facts conditionally based on other facts """ + +from ansible.module_utils.basic import AnsibleModule + + +DOCUMENTATION = ''' +--- +module: conditional_set_fact + +short_description: This will set a fact if the value is defined + +description: + - "To avoid constant set_fact & when conditions for each var we can use this" + +author: + - Eric Wolinetz ewolinet@redhat.com +''' + + +EXAMPLES = ''' +- name: Conditionally set fact + conditional_set_fact: + fact1: not_defined_variable + +- name: Conditionally set fact + conditional_set_fact: + fact1: not_defined_variable + fact2: defined_variable + +''' + + +def run_module(): + """ The body of the module, we check if the variable name specified as the value + for the key is defined. If it is then we use that value as for the original key """ + + module = AnsibleModule( + argument_spec=dict( + facts=dict(type='dict', required=True), + vars=dict(required=False, type='dict', default=[]) + ), + supports_check_mode=True + ) + + local_facts = dict() + is_changed = False + + for param in module.params['vars']: + other_var = module.params['vars'][param] + + if other_var in module.params['facts']: + local_facts[param] = module.params['facts'][other_var] + if not is_changed: + is_changed = True + + return module.exit_json(changed=is_changed, # noqa: F405 + ansible_facts=local_facts) + + +def main(): + """ main """ + run_module() + + +if __name__ == '__main__': + main() diff --git a/roles/openshift_sanitize_inventory/tasks/__deprecations_logging.yml b/roles/openshift_sanitize_inventory/tasks/__deprecations_logging.yml new file mode 100644 index 000000000..e52ab5f6d --- /dev/null +++ b/roles/openshift_sanitize_inventory/tasks/__deprecations_logging.yml @@ -0,0 +1,48 @@ +--- +# this is used to set the logging variables from deprecated values to the current variables names +# this file should be deleted once variables are no longer honored + +- conditional_set_fact: + facts: "{{ hostvars[inventory_hostname] }}" + vars: + logging_hostname: openshift_hosted_logging_hostname + logging_ops_hostname: openshift_hosted_logging_ops_hostname + logging_elasticsearch_cluster_size: openshift_hosted_logging_elasticsearch_cluster_size + logging_elasticsearch_ops_cluster_size: openshift_hosted_logging_elasticsearch_ops_cluster_size + openshift_logging_storage_kind: openshift_hosted_logging_storage_kind + openshift_logging_storage_host: openshift_hosted_logging_storage_host + openshift_logging_storage_labels: openshift_hosted_logging_storage_labels + openshift_logging_storage_volume_size: openshift_hosted_logging_storage_volume_size + openshift_loggingops_storage_kind: openshift_hosted_loggingops_storage_kind + openshift_loggingops_storage_host: openshift_hosted_loggingops_storage_host + openshift_loggingops_storage_labels: openshift_hosted_loggingops_storage_labels + openshift_loggingops_storage_volume_size: openshift_hosted_loggingops_storage_volume_size + openshift_logging_use_ops: openshift_hosted_logging_enable_ops_cluster + openshift_logging_image_pull_secret: openshift_hosted_logging_image_pull_secret + openshift_logging_kibana_hostname: openshift_hosted_logging_hostname + openshift_logging_kibana_ops_hostname: openshift_hosted_logging_ops_hostname + openshift_logging_fluentd_journal_source: openshift_hosted_logging_journal_source + openshift_logging_fluentd_journal_read_from_head: openshift_hosted_logging_journal_read_from_head + openshift_logging_es_memory_limit: openshift_hosted_logging_elasticsearch_instance_ram + openshift_logging_es_nodeselector: openshift_hosted_logging_elasticsearch_nodeselector + openshift_logging_es_ops_memory_limit: openshift_hosted_logging_elasticsearch_ops_instance_ram + openshift_logging_storage_access_modes: openshift_hosted_logging_storage_access_modes + openshift_logging_master_public_url: openshift_hosted_logging_master_public_url + openshift_logging_image_prefix: openshift_hosted_logging_deployer_prefix + openshift_logging_image_version: openshift_hosted_logging_deployer_version + openshift_logging_install_logging: openshift_hosted_logging_deploy + + +- set_fact: + openshift_logging_elasticsearch_pvc_dynamic: "{{ 'true' if openshift_logging_storage_kind | default(none) == 'dynamic' else '' }}" + openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_storage_volume_size if openshift_logging_storage_kind | default(none) in ['dynamic','nfs'] else '' }}" + openshift_logging_elasticsearch_pvc_prefix: "{{ 'logging-es' if openshift_logging_storage_kind | default(none) == 'dynamic' else '' }}" + openshift_logging_elasticsearch_ops_pvc_dynamic: "{{ 'true' if openshift_loggingops_storage_kind | default(none) == 'dynamic' else '' }}" + openshift_logging_elasticsearch_ops_pvc_size: "{{ openshift_loggingops_storage_volume_size if openshift_loggingops_storage_kind | default(none) in ['dynamic','nfs'] else '' }}" + openshift_logging_elasticsearch_ops_pvc_prefix: "{{ 'logging-es-ops' if openshift_loggingops_storage_kind | default(none) == 'dynamic' else '' }}" + openshift_logging_curator_nodeselector: "{{ openshift_hosted_logging_curator_nodeselector | default('') | map_from_pairs }}" + openshift_logging_curator_ops_nodeselector: "{{ openshift_hosted_logging_curator_ops_nodeselector | default('') | map_from_pairs }}" + openshift_logging_kibana_nodeselector: "{{ openshift_hosted_logging_kibana_nodeselector | default('') | map_from_pairs }}" + openshift_logging_kibana_ops_nodeselector: "{{ openshift_hosted_logging_kibana_ops_nodeselector | default('') | map_from_pairs }}" + openshift_logging_fluentd_nodeselector: "{{ openshift_hosted_logging_fluentd_nodeselector_label | default('logging-infra-fluentd=true') | map_from_pairs }}" + openshift_logging_es_ops_nodeselector: "{{ openshift_hosted_logging_elasticsearch_ops_nodeselector | default('') | map_from_pairs }}" diff --git a/roles/openshift_sanitize_inventory/tasks/__deprecations_metrics.yml b/roles/openshift_sanitize_inventory/tasks/__deprecations_metrics.yml new file mode 100644 index 000000000..279646981 --- /dev/null +++ b/roles/openshift_sanitize_inventory/tasks/__deprecations_metrics.yml @@ -0,0 +1,17 @@ +--- +# this is used to set the metrics variables from deprecated values to the current variables names +# this file should be deleted once variables are no longer honored + +- conditional_set_fact: + facts: "{{ hostvars[inventory_hostname] }}" + vars: + openshift_metrics_storage_access_modes: openshift_hosted_metrics_storage_access_modes + openshift_metrics_storage_host: openshift_hosted_metrics_storage_host + openshift_metrics_storage_nfs_directory: openshift_hosted_metrics_storage_nfs_directory + openshift_metrics_storage_volume_name: openshift_hosted_metrics_storage_volume_name + openshift_metrics_storage_volume_size: openshift_hosted_metrics_storage_volume_size + openshift_metrics_storage_labels: openshift_hosted_metrics_storage_labels + openshift_metrics_image_prefix: openshift_hosted_metrics_deployer_prefix + openshift_metrics_image_version: openshift_hosted_metrics_deployer_version + openshift_metrics_install_metrics: openshift_hosted_metrics_deploy + openshift_metrics_storage_kind: openshift_hosted_metrics_storage_kind diff --git a/roles/openshift_sanitize_inventory/tasks/deprecations.yml b/roles/openshift_sanitize_inventory/tasks/deprecations.yml new file mode 100644 index 000000000..94d3acffc --- /dev/null +++ b/roles/openshift_sanitize_inventory/tasks/deprecations.yml @@ -0,0 +1,21 @@ +--- + +- name: Check for usage of deprecated variables + set_fact: + __deprecation_message: "{{ __deprecation_message | default([]) }} + ['{{ __deprecation_header }} {{ item }} is a deprecated variable and will be no longer be used in the next minor release. Please update your inventory accordingly.']" + when: + - hostvars[inventory_hostname][item] is defined + with_items: "{{ __warn_deprecated_vars }}" + +- block: + - debug: msg="{{__deprecation_message}}" + - pause: + seconds: "{{ 10 }}" + when: + - __deprecation_message | default ('') | length > 0 + +# for with_fileglob Ansible resolves the path relative to the roles/<rolename>/files directory +- name: Assign deprecated variables to correct counterparts + include: "{{ item }}" + with_fileglob: + - "../tasks/__deprecations_*.yml" diff --git a/roles/openshift_sanitize_inventory/tasks/main.yml b/roles/openshift_sanitize_inventory/tasks/main.yml index 47d7be05a..e327ee9f5 100644 --- a/roles/openshift_sanitize_inventory/tasks/main.yml +++ b/roles/openshift_sanitize_inventory/tasks/main.yml @@ -1,4 +1,8 @@ --- +# We should print out deprecations prior to any failures so that if a play does fail for other reasons +# the user would also be aware of any deprecated variables they should note to adjust +- include: deprecations.yml + - name: Abort when conflicting deployment type variables are set when: - deployment_type is defined @@ -12,27 +16,6 @@ deployment_type is deprecated in favor of openshift_deployment_type. Please specify only openshift_deployment_type, or make both the same. -# osm_cluster_network_cidr, osm_host_subnet_length and openshift_portal_net are -# now required to avoid changes that may occur between releases -# -# Note: We will skip these checks when some tests run which don't -# actually do any insalling/upgrading/scaling/etc.. -# Reference: https://bugzilla.redhat.com/show_bug.cgi?id=1451023 -- when: - - not testing_skip_some_requirements|default(False)|bool - assert: - that: - - "osm_cluster_network_cidr is defined" - - "osm_host_subnet_length is defined" - - "openshift_portal_net is defined" - msg: > - osm_cluster_network_cidr, osm_host_subnet_length, and openshift_portal_net are required inventory - variables. If you are upgrading or scaling up these variables should match what is currently used - in the cluster. If you don't remember what these values are you can find them in - /etc/origin/master/master-config.yaml on a master with the names clusterNetworkCIDR - (osm_cluster_network_cidr), hostSubnetLength (osm_host_subnet_length), - and serviceNetworkCIDR (openshift_portal_net). - - name: Standardize on latest variable names set_fact: # goal is to deprecate deployment_type in favor of openshift_deployment_type. diff --git a/roles/openshift_sanitize_inventory/vars/main.yml b/roles/openshift_sanitize_inventory/vars/main.yml index 37e88758d..0fc2372d2 100644 --- a/roles/openshift_sanitize_inventory/vars/main.yml +++ b/roles/openshift_sanitize_inventory/vars/main.yml @@ -2,3 +2,77 @@ # origin uses community packages named 'origin' # openshift-enterprise uses Red Hat packages named 'atomic-openshift' known_openshift_deployment_types: ['origin', 'openshift-enterprise'] + +__deprecation_header: "[DEPRECATION WARNING]:" + +# this is a list of variables that we will be deprecating within the next minor release, this list should be expected to change from release to release +__warn_deprecated_vars: + # logging + - 'openshift_hosted_logging_deploy' + - 'openshift_hosted_logging_hostname' + - 'openshift_hosted_logging_ops_hostname' + - 'openshift_hosted_logging_master_public_url' + - 'openshift_hosted_logging_elasticsearch_cluster_size' + - 'openshift_hosted_logging_elasticsearch_ops_cluster_size' + - 'openshift_hosted_logging_image_pull_secret' + - 'openshift_hosted_logging_enable_ops_cluster' + - 'openshift_hosted_logging_curator_nodeselector' + - 'openshift_hosted_logging_curator_ops_nodeselector' + - 'openshift_hosted_logging_kibana_nodeselector' + - 'openshift_hosted_logging_kibana_ops_nodeselector' + - 'openshift_hosted_logging_fluentd_nodeselector_label' + - 'openshift_hosted_logging_journal_source' + - 'openshift_hosted_logging_journal_read_from_head' + - 'openshift_hosted_logging_elasticsearch_instance_ram' + - 'openshift_hosted_logging_storage_labels' + - 'openshift_hosted_logging_elasticsearch_pvc_dynamic' + - 'openshift_hosted_logging_elasticsearch_pvc_size' + - 'openshift_hosted_logging_elasticsearch_pvc_prefix' + - 'openshift_hosted_logging_elasticsearch_storage_group' + - 'openshift_hosted_logging_elasticsearch_nodeselector' + - 'openshift_hosted_logging_elasticsearch_ops_instance_ram' + - 'openshift_hosted_loggingops_storage_labels' + - 'openshift_hosted_logging_elasticsearch_ops_pvc_dynamic' + - 'openshift_hosted_logging_elasticsearch_ops_pvc_size' + - 'openshift_hosted_logging_elasticsearch_ops_pvc_prefix' + - 'openshift_hosted_logging_elasticsearch_storage_group' + - 'openshift_hosted_logging_elasticsearch_ops_nodeselector' + - 'openshift_hosted_logging_storage_access_modes' + - 'openshift_hosted_logging_storage_kind' + - 'openshift_hosted_loggingops_storage_kind' + - 'openshift_hosted_logging_storage_host' + - 'openshift_hosted_loggingops_storage_host' + - 'openshift_hosted_logging_storage_nfs_directory' + - 'openshift_hosted_loggingops_storage_nfs_directory' + - 'openshift_hosted_logging_storage_volume_name' + - 'openshift_hosted_loggingops_storage_volume_name' + - 'openshift_hosted_logging_storage_volume_size' + - 'openshift_hosted_loggingops_storage_volume_size' + - 'openshift_hosted_logging_enable_ops_cluster' + - 'openshift_hosted_logging_image_pull_secret' + - 'openshift_hosted_logging_curator_nodeselector' + - 'openshift_hosted_logging_curator_ops_nodeselector' + - 'openshift_hosted_logging_kibana_nodeselector' + - 'openshift_hosted_logging_kibana_ops_nodeselector' + - 'openshift_hosted_logging_ops_hostname' + - 'openshift_hosted_logging_fluentd_nodeselector_label' + - 'openshift_hosted_logging_journal_source' + - 'openshift_hosted_logging_journal_read_from_head' + - 'openshift_hosted_logging_elasticsearch_instance_ram' + - 'openshift_hosted_logging_elasticsearch_nodeselector' + - 'openshift_hosted_logging_elasticsearch_ops_instance_ram' + - 'openshift_hosted_logging_elasticsearch_ops_nodeselector' + - 'openshift_hosted_logging_storage_access_modes' + - 'openshift_hosted_logging_deployer_prefix' + - 'openshift_hosted_logging_deployer_version' + # metrics + - 'openshift_hosted_metrics_deploy' + - 'openshift_hosted_metrics_storage_kind' + - 'openshift_hosted_metrics_storage_access_modes' + - 'openshift_hosted_metrics_storage_host' + - 'openshift_hosted_metrics_storage_nfs_directory' + - 'openshift_hosted_metrics_storage_volume_name' + - 'openshift_hosted_metrics_storage_volume_size' + - 'openshift_hosted_metrics_storage_labels' + - 'openshift_hosted_metrics_deployer_prefix' + - 'openshift_hosted_metrics_deployer_version' diff --git a/roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js b/roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js index 16a307c06..d0a9f11dc 100644 --- a/roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js +++ b/roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js @@ -1 +1,2 @@ -window.OPENSHIFT_CONSTANTS.ENABLE_TECH_PREVIEW_FEATURE.service_catalog_landing_page = true; +// empty file so that the master-config can still point to a file that exists +// this file will be replaced by the template service broker role if enabled diff --git a/roles/openshift_service_catalog/tasks/install.yml b/roles/openshift_service_catalog/tasks/install.yml index d134867bd..faf1aea97 100644 --- a/roles/openshift_service_catalog/tasks/install.yml +++ b/roles/openshift_service_catalog/tasks/install.yml @@ -110,15 +110,6 @@ when: - not admin_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['instances', 'bindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not admin_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) -- shell: > - oc get policybindings/kube-system:default -n kube-system || echo "not found" - register: get_kube_system - changed_when: no - -- command: > - oc create policybinding kube-system -n kube-system - when: "'not found' in get_kube_system.stdout" - - oc_adm_policy_user: namespace: kube-service-catalog resource_kind: scc diff --git a/roles/openshift_storage_nfs/tasks/main.yml b/roles/openshift_storage_nfs/tasks/main.yml index 51f8f4e0e..3047fbaf9 100644 --- a/roles/openshift_storage_nfs/tasks/main.yml +++ b/roles/openshift_storage_nfs/tasks/main.yml @@ -31,9 +31,9 @@ group: nfsnobody with_items: - "{{ openshift.hosted.registry }}" - - "{{ openshift.hosted.metrics }}" - - "{{ openshift.hosted.logging }}" - - "{{ openshift.hosted.loggingops }}" + - "{{ openshift.metrics }}" + - "{{ openshift.logging }}" + - "{{ openshift.loggingops }}" - "{{ openshift.hosted.etcd }}" - name: Configure exports diff --git a/roles/openshift_storage_nfs/templates/exports.j2 b/roles/openshift_storage_nfs/templates/exports.j2 index 7e8f70b23..0141e0d25 100644 --- a/roles/openshift_storage_nfs/templates/exports.j2 +++ b/roles/openshift_storage_nfs/templates/exports.j2 @@ -1,5 +1,5 @@ {{ openshift.hosted.registry.storage.nfs.directory }}/{{ openshift.hosted.registry.storage.volume.name }} {{ openshift.hosted.registry.storage.nfs.options }} -{{ openshift.hosted.metrics.storage.nfs.directory }}/{{ openshift.hosted.metrics.storage.volume.name }} {{ openshift.hosted.metrics.storage.nfs.options }} -{{ openshift.hosted.logging.storage.nfs.directory }}/{{ openshift.hosted.logging.storage.volume.name }} {{ openshift.hosted.logging.storage.nfs.options }} -{{ openshift.hosted.loggingops.storage.nfs.directory }}/{{ openshift.hosted.loggingops.storage.volume.name }} {{ openshift.hosted.loggingops.storage.nfs.options }} +{{ openshift.metrics.storage.nfs.directory }}/{{ openshift.metrics.storage.volume.name }} {{ openshift.metrics.storage.nfs.options }} +{{ openshift.logging.storage.nfs.directory }}/{{ openshift.logging.storage.volume.name }} {{ openshift.logging.storage.nfs.options }} +{{ openshift.loggingops.storage.nfs.directory }}/{{ openshift.loggingops.storage.volume.name }} {{ openshift.loggingops.storage.nfs.options }} {{ openshift.hosted.etcd.storage.nfs.directory }}/{{ openshift.hosted.etcd.storage.volume.name }} {{ openshift.hosted.etcd.storage.nfs.options }} diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml index 1ff99adf8..f4e9ff43a 100644 --- a/roles/openshift_version/tasks/main.yml +++ b/roles/openshift_version/tasks/main.yml @@ -166,7 +166,9 @@ - set_fact: openshift_pkg_version: -{{ openshift_version }} - when: openshift_pkg_version is not defined + when: + - openshift_pkg_version is not defined + - openshift_upgrade_target is not defined - fail: msg: openshift_version role was unable to set openshift_version @@ -181,7 +183,10 @@ - fail: msg: openshift_version role was unable to set openshift_pkg_version name: Abort if openshift_pkg_version was not set - when: openshift_pkg_version is not defined + when: + - openshift_pkg_version is not defined + - openshift_upgrade_target is not defined + - fail: msg: "No OpenShift version available; please ensure your systems are fully registered and have access to appropriate yum repositories." diff --git a/roles/rhel_subscribe/tasks/main.yml b/roles/rhel_subscribe/tasks/main.yml index c49512146..c43e5513d 100644 --- a/roles/rhel_subscribe/tasks/main.yml +++ b/roles/rhel_subscribe/tasks/main.yml @@ -62,7 +62,7 @@ when: openshift_pool_id.stdout == '' and openshift_pool_attached is defined and openshift_pool_attached.stdout == '' - name: Attach to OpenShift Pool - command: subscription-manager subscribe --pool {{ openshift_pool_id.stdout_lines[0] }} + command: subscription-manager attach --pool {{ openshift_pool_id.stdout_lines[0] }} register: subscribe_pool until: subscribe_pool | succeeded when: openshift_pool_id.stdout != '' diff --git a/roles/template_service_broker/defaults/main.yml b/roles/template_service_broker/defaults/main.yml new file mode 100644 index 000000000..fb407c4a2 --- /dev/null +++ b/roles/template_service_broker/defaults/main.yml @@ -0,0 +1,4 @@ +--- +# placeholder file? +template_service_broker_remove: False +template_service_broker_install: False diff --git a/roles/template_service_broker/files/openshift-ansible-catalog-console.js b/roles/template_service_broker/files/openshift-ansible-catalog-console.js new file mode 100644 index 000000000..b3a3d3428 --- /dev/null +++ b/roles/template_service_broker/files/openshift-ansible-catalog-console.js @@ -0,0 +1 @@ +window.OPENSHIFT_CONSTANTS.ENABLE_TECH_PREVIEW_FEATURE.template_service_broker = true; diff --git a/roles/template_service_broker/files/remove-openshift-ansible-catalog-console.js b/roles/template_service_broker/files/remove-openshift-ansible-catalog-console.js new file mode 100644 index 000000000..d0a9f11dc --- /dev/null +++ b/roles/template_service_broker/files/remove-openshift-ansible-catalog-console.js @@ -0,0 +1,2 @@ +// empty file so that the master-config can still point to a file that exists +// this file will be replaced by the template service broker role if enabled diff --git a/roles/etcd_ca/meta/main.yml b/roles/template_service_broker/meta/main.yml index e3e2f7781..ab5a0cf08 100644 --- a/roles/etcd_ca/meta/main.yml +++ b/roles/template_service_broker/meta/main.yml @@ -1,7 +1,7 @@ --- galaxy_info: - author: Jason DeTiberus - description: Etcd CA + author: OpenShift Red Hat + description: OpenShift Template Service Broker company: Red Hat, Inc. license: Apache License, Version 2.0 min_ansible_version: 2.1 @@ -11,6 +11,3 @@ galaxy_info: - 7 categories: - cloud - - system -dependencies: -- role: etcd_common diff --git a/roles/template_service_broker/tasks/install.yml b/roles/template_service_broker/tasks/install.yml new file mode 100644 index 000000000..199df83c2 --- /dev/null +++ b/roles/template_service_broker/tasks/install.yml @@ -0,0 +1,47 @@ +--- +# Fact setting +- name: Set default image variables based on deployment type + include_vars: "{{ item }}" + with_first_found: + - "{{ openshift_deployment_type | default(deployment_type) }}.yml" + - "default_images.yml" + +- name: set ansible_service_broker facts + set_fact: + template_service_broker_image: "{{ template_service_broker_image | default(__template_service_broker_image) }}" + +- oc_project: + name: openshift-template-service-broker + state: present + +- command: mktemp -d /tmp/tsb-ansible-XXXXXX + register: mktemp + changed_when: False + become: no + +- copy: + src: "{{ __tsb_files_location }}/{{ item }}" + dest: "{{ mktemp.stdout }}/{{ item }}" + with_items: + - "{{ __tsb_template_file }}" + - "{{ __tsb_rbac_file }}" + +- name: Apply template file + shell: > + oc process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}" --param API_SERVER_CONFIG="{{ lookup('file', __tsb_files_location ~ '/' ~ __tsb_config_file) }}" | kubectl apply -f - + +# reconcile with rbac +- name: Reconcile with RBAC file + shell: > + oc process -f "{{ mktemp.stdout }}/{{ __tsb_rbac_file }}" | oc auth reconcile -f - + +- name: copy tech preview extension file for service console UI + copy: + src: openshift-ansible-catalog-console.js + dest: /etc/origin/master/openshift-ansible-catalog-console.js + +- file: + state: absent + name: "{{ mktemp.stdout }}" + changed_when: False + become: no diff --git a/roles/template_service_broker/tasks/main.yml b/roles/template_service_broker/tasks/main.yml new file mode 100644 index 000000000..d7ca970c7 --- /dev/null +++ b/roles/template_service_broker/tasks/main.yml @@ -0,0 +1,8 @@ +--- +# do any asserts here + +- include: install.yml + when: template_service_broker_install | default(false) | bool + +- include: remove.yml + when: template_service_broker_remove | default(false) | bool diff --git a/roles/template_service_broker/tasks/remove.yml b/roles/template_service_broker/tasks/remove.yml new file mode 100644 index 000000000..207dd9bdb --- /dev/null +++ b/roles/template_service_broker/tasks/remove.yml @@ -0,0 +1,28 @@ +--- +- command: mktemp -d /tmp/tsb-ansible-XXXXXX + register: mktemp + changed_when: False + become: no + +- copy: + src: "{{ __tsb_files_location }}/{{ item }}" + dest: "{{ mktemp.stdout }}/{{ __tsb_template_file }}" + +- name: Delete TSB objects + shell: > + oc process -f "{{ __tsb_files_location }}/{{ __tsb_template_file }}" | kubectl delete -f - + +- name: empty out tech preview extension file for service console UI + copy: + src: remove-openshift-ansible-catalog-console.js + dest: /etc/origin/master/openshift-ansible-catalog-console.js + +- oc_project: + name: openshift-template-service-broker + state: absent + +- file: + state: absent + name: "{{ mktemp.stdout }}" + changed_when: False + become: no diff --git a/roles/template_service_broker/vars/default_images.yml b/roles/template_service_broker/vars/default_images.yml new file mode 100644 index 000000000..807f2822c --- /dev/null +++ b/roles/template_service_broker/vars/default_images.yml @@ -0,0 +1,2 @@ +--- +__template_service_broker_image: "" diff --git a/roles/template_service_broker/vars/main.yml b/roles/template_service_broker/vars/main.yml new file mode 100644 index 000000000..372ab8f6f --- /dev/null +++ b/roles/template_service_broker/vars/main.yml @@ -0,0 +1,6 @@ +--- +__tsb_files_location: "../../../files/origin-components/" + +__tsb_template_file: "apiserver-template.yaml" +__tsb_config_file: "apiserver-config.yaml" +__tsb_rbac_file: "rbac-template.yaml" diff --git a/roles/template_service_broker/vars/openshift-enterprise.yml b/roles/template_service_broker/vars/openshift-enterprise.yml new file mode 100644 index 000000000..807f2822c --- /dev/null +++ b/roles/template_service_broker/vars/openshift-enterprise.yml @@ -0,0 +1,2 @@ +--- +__template_service_broker_image: "" |