diff options
258 files changed, 4579 insertions, 1534 deletions
diff --git a/.papr.inventory b/.papr.inventory index 878d434e2..aa4324c21 100644 --- a/.papr.inventory +++ b/.papr.inventory @@ -11,6 +11,9 @@ openshift_image_tag="{{ lookup('env', 'OPENSHIFT_IMAGE_TAG') }}" openshift_master_default_subdomain="{{ lookup('env', 'RHCI_ocp_node1_IP') }}.xip.io" openshift_check_min_host_disk_gb=1.5 openshift_check_min_host_memory_gb=1.9 +osm_cluster_network_cidr=10.128.0.0/14 +openshift_portal_net=172.30.0.0/16 +osm_host_subnet_length=9 [masters] ocp-master diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible index 810510bdf..b2155c30f 100644 --- a/.tito/packages/openshift-ansible +++ b/.tito/packages/openshift-ansible @@ -1 +1 @@ -3.7.0-0.125.1 ./ +3.7.0-0.127.0 ./ diff --git a/docs/proposals/README.md b/docs/proposals/README.md new file mode 100644 index 000000000..89bbe5163 --- /dev/null +++ b/docs/proposals/README.md @@ -0,0 +1,27 @@ +# OpenShift-Ansible Proposal Process + +## Proposal Decision Tree +TODO: Add details about when a proposal is or is not required. + +## Proposal Process +The following process should be followed when a proposal is needed: + +1. Create a pull request with the initial proposal + * Use the [proposal template][template] + * Name the proposal using two or three topic words with underscores as a separator (i.e. proposal_template.md) + * Place the proposal in the docs/proposals directory +2. Notify the development team of the proposal and request feedback +3. Review the proposal on the OpenShift-Ansible Architecture Meeting +4. Update the proposal as needed and ask for feedback +5. Approved/Closed Phase + * If 75% or more of the active development team give the proposal a :+1: it is Approved + * If 50% or more of the active development team disagrees with the proposal it is Closed + * If the person proposing the proposal no longer wishes to continue they can request it to be Closed + * If there is no activity on a proposal, the active development team may Close the proposal at their discretion + * If none of the above is met the cycle can continue to Step 4. +6. For approved proposals, the current development lead(s) will: + * Update the Pull Request with the result and merge the proposal + * Create a card on the Cluster Lifecycle [Trello board][trello] so it may be scheduled for implementation. + +[template]: proposal_template.md +[trello]: https://trello.com/b/wJYDst6C diff --git a/docs/proposals/playbook_consolidation.md b/docs/proposals/playbook_consolidation.md new file mode 100644 index 000000000..98aedb021 --- /dev/null +++ b/docs/proposals/playbook_consolidation.md @@ -0,0 +1,178 @@ +# OpenShift-Ansible Playbook Consolidation + +## Description +The designation of `byo` is no longer applicable due to being able to deploy on +physical hardware or cloud resources using the playbooks in the `byo` directory. +Consolidation of these directories will make maintaining the code base easier +and provide a more straightforward project for users and developers. + +The main points of this proposal are: +* Consolidate initialization playbooks into one set of playbooks in + `playbooks/init`. +* Collapse the `playbooks/byo` and `playbooks/common` into one set of + directories at `playbooks/openshift-*`. + +This consolidation effort may be more appropriate when the project moves to +using a container as the default installation method. + +## Design + +### Initialization Playbook Consolidation +Currently there are two separate sets of initialization playbooks: +* `playbooks/byo/openshift-cluster/initialize_groups.yml` +* `playbooks/common/openshift-cluster/std_include.yml` + +Although these playbooks are located in the `openshift-cluster` directory they +are shared by all of the `openshift-*` areas. These playbooks would be better +organized in a `playbooks/init` directory collocated with all their related +playbooks. + +In the example below, the following changes have been made: +* `playbooks/byo/openshift-cluster/initialize_groups.yml` renamed to + `playbooks/init/initialize_host_groups.yml` +* `playbooks/common/openshift-cluster/std_include.yml` renamed to + `playbooks/init/main.yml` +* `- include: playbooks/init/initialize_host_groups.yml` has been added to the + top of `playbooks/init/main.yml` +* All other related files for initialization have been moved to `playbooks/init` + +The `initialize_host_groups.yml` playbook is only one play with one task for +importing variables for inventory group conversions. This task could be further +consolidated with the play in `evaluate_groups.yml`. + +The new standard initialization playbook would be +`playbooks/init/main.yml`. + + +``` + +> $ tree openshift-ansible/playbooks/init +. +├── evaluate_groups.yml +├── initialize_facts.yml +├── initialize_host_groups.yml +├── initialize_openshift_repos.yml +├── initialize_openshift_version.yml +├── main.yml +├── roles -> ../../roles +├── validate_hostnames.yml +└── vars + └── cluster_hosts.yml +``` + +```yaml +# openshift-ansible/playbooks/init/main.yml +--- +- include: initialize_host_groups.yml + +- include: evaluate_groups.yml + +- include: initialize_facts.yml + +- include: validate_hostnames.yml + +- include: initialize_openshift_repos.yml + +- include: initialize_openshift_version.yml +``` + +### `byo` and `common` Playbook Consolidation +Historically, the `byo` directory coexisted with other platform directories +which contained playbooks that then called into `common` playbooks to perform +common installation steps for all platforms. Since the other platform +directories have been removed this separation is no longer necessary. + +In the example below, the following changes have been made: +* `playbooks/byo/openshift-master` renamed to + `playbooks/openshift-master` +* `playbooks/common/openshift-master` renamed to + `playbooks/openshift-master/private` +* Original `byo` entry point playbooks have been updated to include their + respective playbooks from `private/`. +* Symbolic links have been updated as necessary + +All user consumable playbooks are in the root of `openshift-master` and no entry +point playbooks exist in the `private` directory. Maintaining the separation +between entry point playbooks and the private playbooks allows individual pieces +of the deployments to be used as needed by other components. + +``` +openshift-ansible/playbooks/openshift-master +> $ tree +. +├── config.yml +├── private +│  ├── additional_config.yml +│  ├── config.yml +│  ├── filter_plugins -> ../../../filter_plugins +│  ├── library -> ../../../library +│  ├── lookup_plugins -> ../../../lookup_plugins +│  ├── restart_hosts.yml +│  ├── restart_services.yml +│  ├── restart.yml +│  ├── roles -> ../../../roles +│  ├── scaleup.yml +│  └── validate_restart.yml +├── restart.yml +└── scaleup.yml +``` + +```yaml +# openshift-ansible/playbooks/openshift-master/config.yml +--- +- include: ../init/main.yml + +- include: private/config.yml +``` + +With the consolidation of the directory structure and component installs being +removed from `openshift-cluster`, that directory is no longer necessary. To +deploy an entire OpenShift cluster, a playbook would be created to tie together +all of the different components. The following example shows how multiple +components would be combined to perform a complete install. + +```yaml +# openshift-ansible/playbooks/deploy_cluster.yml +--- +- include: init/main.yml + +- include: openshift-etcd/private/config.yml + +- include: openshift-nfs/private/config.yml + +- include: openshift-loadbalancer/private/config.yml + +- include: openshift-master/private/config.yml + +- include: openshift-node/private/config.yml + +- include: openshift-glusterfs/private/config.yml + +- include: openshift-hosted/private/config.yml + +- include: openshift-service-catalog/private/config.yml +``` + +## User Story +As a developer of OpenShift-Ansible, +I want simplify the playbook directory structure +so that users can easily find deployment playbooks and developers know where new +features should be developed. + +## Implementation +Given the size of this refactoring effort, it should be broken into smaller +steps which can be completed independently while still maintaining a functional +project. + +Steps: +1. Update and merge consolidation of the initialization playbooks. +2. Update each merge consolidation of each `openshift-*` component area +3. Update and merge consolidation of `openshift-cluster` + +## Acceptance Criteria +* Verify that all entry points playbooks install or configure as expected. +* Verify that CI is updated for testing new playbook locations. +* Verify that repo documentation is updated +* Verify that user documentation is updated + +## References diff --git a/docs/proposals/proposal_template.md b/docs/proposals/proposal_template.md new file mode 100644 index 000000000..ece288037 --- /dev/null +++ b/docs/proposals/proposal_template.md @@ -0,0 +1,30 @@ +# Proposal Title + +## Description +<Short introduction> + +## Rationale +<Summary of main points of Design> + +## Design +<Main content goes here> + +## Checklist +* Item 1 +* Item 2 +* Item 3 + +## User Story +As a developer on OpenShift-Ansible, +I want ... +so that ... + +## Acceptance Criteria +* Verify that ... +* Verify that ... +* Verify that ... + +## References +* Link +* Link +* Link diff --git a/files/origin-components/apiserver-config.yaml b/files/origin-components/apiserver-config.yaml new file mode 100644 index 000000000..e4048d1da --- /dev/null +++ b/files/origin-components/apiserver-config.yaml @@ -0,0 +1,4 @@ +kind: TemplateServiceBrokerConfig +apiVersion: config.templateservicebroker.openshift.io/v1 +templateNamespaces: +- openshift diff --git a/files/origin-components/apiserver-template.yaml b/files/origin-components/apiserver-template.yaml new file mode 100644 index 000000000..1b42597af --- /dev/null +++ b/files/origin-components/apiserver-template.yaml @@ -0,0 +1,122 @@ +apiVersion: template.openshift.io/v1 +kind: Template +metadata: + name: template-service-broker-apiserver +parameters: +- name: IMAGE + value: openshift/origin:latest +- name: NAMESPACE + value: openshift-template-service-broker +- name: LOGLEVEL + value: "0" +- name: API_SERVER_CONFIG + value: | + kind: TemplateServiceBrokerConfig + apiVersion: config.templateservicebroker.openshift.io/v1 + templateNamespaces: + - openshift +objects: + +# to create the tsb server +- apiVersion: extensions/v1beta1 + kind: DaemonSet + metadata: + namespace: ${NAMESPACE} + name: apiserver + labels: + apiserver: "true" + spec: + template: + metadata: + name: apiserver + labels: + apiserver: "true" + spec: + serviceAccountName: apiserver + containers: + - name: c + image: ${IMAGE} + imagePullPolicy: IfNotPresent + command: + - "/usr/bin/openshift" + - "start" + - "template-service-broker" + - "--secure-port=8443" + - "--audit-log-path=-" + - "--tls-cert-file=/var/serving-cert/tls.crt" + - "--tls-private-key-file=/var/serving-cert/tls.key" + - "--loglevel=${LOGLEVEL}" + - "--config=/var/apiserver-config/apiserver-config.yaml" + ports: + - containerPort: 8443 + volumeMounts: + - mountPath: /var/serving-cert + name: serving-cert + - mountPath: /var/apiserver-config + name: apiserver-config + readinessProbe: + httpGet: + path: /healthz + port: 8443 + scheme: HTTPS + volumes: + - name: serving-cert + secret: + defaultMode: 420 + secretName: apiserver-serving-cert + - name: apiserver-config + configMap: + defaultMode: 420 + name: apiserver-config + +# to create the config for the TSB +- apiVersion: v1 + kind: ConfigMap + metadata: + namespace: ${NAMESPACE} + name: apiserver-config + data: + apiserver-config.yaml: ${API_SERVER_CONFIG} + +# to be able to assign powers to the process +- apiVersion: v1 + kind: ServiceAccount + metadata: + namespace: ${NAMESPACE} + name: apiserver + +# to be able to expose TSB inside the cluster +- apiVersion: v1 + kind: Service + metadata: + namespace: ${NAMESPACE} + name: apiserver + annotations: + service.alpha.openshift.io/serving-cert-secret-name: apiserver-serving-cert + spec: + selector: + apiserver: "true" + ports: + - port: 443 + targetPort: 8443 + +# This service account will be granted permission to call the TSB. +# The token for this SA will be provided to the service catalog for +# use when calling the TSB. +- apiVersion: v1 + kind: ServiceAccount + metadata: + namespace: ${NAMESPACE} + name: templateservicebroker-client + +# This secret will be populated with a copy of the templateservicebroker-client SA's +# auth token. Since this secret has a static name, it can be referenced more +# easily than the auto-generated secret for the service account. +- apiVersion: v1 + kind: Secret + metadata: + namespace: ${NAMESPACE} + name: templateservicebroker-client + annotations: + kubernetes.io/service-account.name: templateservicebroker-client + type: kubernetes.io/service-account-token diff --git a/files/origin-components/rbac-template.yaml b/files/origin-components/rbac-template.yaml new file mode 100644 index 000000000..0937a9065 --- /dev/null +++ b/files/origin-components/rbac-template.yaml @@ -0,0 +1,92 @@ +apiVersion: template.openshift.io/v1 +kind: Template +metadata: + name: template-service-broker-rbac +parameters: +- name: NAMESPACE + value: openshift-template-service-broker +- name: KUBE_SYSTEM + value: kube-system +objects: + +# Grant the service account permission to call the TSB +- apiVersion: rbac.authorization.k8s.io/v1beta1 + kind: ClusterRoleBinding + metadata: + name: templateservicebroker-client + roleRef: + kind: ClusterRole + name: system:openshift:templateservicebroker-client + subjects: + - kind: ServiceAccount + namespace: ${NAMESPACE} + name: templateservicebroker-client + +# to delegate authentication and authorization +- apiVersion: rbac.authorization.k8s.io/v1beta1 + kind: ClusterRoleBinding + metadata: + name: auth-delegator-${NAMESPACE} + roleRef: + kind: ClusterRole + name: system:auth-delegator + subjects: + - kind: ServiceAccount + namespace: ${NAMESPACE} + name: apiserver + +# to have the template service broker powers +- apiVersion: rbac.authorization.k8s.io/v1beta1 + kind: ClusterRoleBinding + metadata: + name: tsb-${NAMESPACE} + roleRef: + kind: ClusterRole + name: system:openshift:controller:template-service-broker + subjects: + - kind: ServiceAccount + namespace: ${NAMESPACE} + name: apiserver + +# to read the config for terminating authentication +- apiVersion: rbac.authorization.k8s.io/v1beta1 + kind: RoleBinding + metadata: + namespace: ${KUBE_SYSTEM} + name: extension-apiserver-authentication-reader-${NAMESPACE} + roleRef: + kind: Role + name: extension-apiserver-authentication-reader + subjects: + - kind: ServiceAccount + namespace: ${NAMESPACE} + name: apiserver + +# allow the kube service catalog's SA to read the static secret defined +# above, which will contain the token for the SA that can call the TSB. +- apiVersion: rbac.authorization.k8s.io/v1beta1 + kind: Role + metadata: + name: templateservicebroker-auth-reader + namespace: ${NAMESPACE} + rules: + - apiGroups: + - "" + resourceNames: + - templateservicebroker-client + resources: + - secrets + verbs: + - get +- apiVersion: rbac.authorization.k8s.io/v1beta1 + kind: RoleBinding + metadata: + namespace: ${NAMESPACE} + name: templateservicebroker-auth-reader + roleRef: + kind: Role + name: templateservicebroker-auth-reader + subjects: + - kind: ServiceAccount + namespace: kube-service-catalog + name: service-catalog-controller diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py index 277695f78..f0f250480 100644 --- a/filter_plugins/oo_filters.py +++ b/filter_plugins/oo_filters.py @@ -716,6 +716,100 @@ def oo_openshift_env(hostvars): # pylint: disable=too-many-branches, too-many-nested-blocks, too-many-statements +def oo_component_persistent_volumes(hostvars, groups, component): + """ Generate list of persistent volumes based on oo_openshift_env + storage options set in host variables for a specific component. + """ + if not issubclass(type(hostvars), dict): + raise errors.AnsibleFilterError("|failed expects hostvars is a dict") + if not issubclass(type(groups), dict): + raise errors.AnsibleFilterError("|failed expects groups is a dict") + + persistent_volume = None + + if component in hostvars['openshift']: + if 'storage' in hostvars['openshift'][component]: + params = hostvars['openshift'][component]['storage'] + kind = params['kind'] + create_pv = params['create_pv'] + if kind is not None and create_pv: + if kind == 'nfs': + host = params['host'] + if host is None: + if 'oo_nfs_to_config' in groups and len(groups['oo_nfs_to_config']) > 0: + host = groups['oo_nfs_to_config'][0] + else: + raise errors.AnsibleFilterError("|failed no storage host detected") + directory = params['nfs']['directory'] + volume = params['volume']['name'] + path = directory + '/' + volume + size = params['volume']['size'] + if 'labels' in params: + labels = params['labels'] + else: + labels = dict() + access_modes = params['access']['modes'] + persistent_volume = dict( + name="{0}-volume".format(volume), + capacity=size, + labels=labels, + access_modes=access_modes, + storage=dict( + nfs=dict( + server=host, + path=path))) + + elif kind == 'openstack': + volume = params['volume']['name'] + size = params['volume']['size'] + if 'labels' in params: + labels = params['labels'] + else: + labels = dict() + access_modes = params['access']['modes'] + filesystem = params['openstack']['filesystem'] + volume_id = params['openstack']['volumeID'] + persistent_volume = dict( + name="{0}-volume".format(volume), + capacity=size, + labels=labels, + access_modes=access_modes, + storage=dict( + cinder=dict( + fsType=filesystem, + volumeID=volume_id))) + + elif kind == 'glusterfs': + volume = params['volume']['name'] + size = params['volume']['size'] + if 'labels' in params: + labels = params['labels'] + else: + labels = dict() + access_modes = params['access']['modes'] + endpoints = params['glusterfs']['endpoints'] + path = params['glusterfs']['path'] + read_only = params['glusterfs']['readOnly'] + persistent_volume = dict( + name="{0}-volume".format(volume), + capacity=size, + labels=labels, + access_modes=access_modes, + storage=dict( + glusterfs=dict( + endpoints=endpoints, + path=path, + readOnly=read_only))) + + elif not (kind == 'object' or kind == 'dynamic'): + msg = "|failed invalid storage kind '{0}' for component '{1}'".format( + kind, + component) + raise errors.AnsibleFilterError(msg) + return persistent_volume + + +# pylint: disable=too-many-branches, too-many-nested-blocks, too-many-statements def oo_persistent_volumes(hostvars, groups, persistent_volumes=None): """ Generate list of persistent volumes based on oo_openshift_env storage options set in host variables. @@ -734,84 +828,122 @@ def oo_persistent_volumes(hostvars, groups, persistent_volumes=None): if 'storage' in hostvars['openshift']['hosted'][component]: params = hostvars['openshift']['hosted'][component]['storage'] kind = params['kind'] - create_pv = params['create_pv'] - if kind is not None and create_pv: - if kind == 'nfs': - host = params['host'] - if host is None: - if 'oo_nfs_to_config' in groups and len(groups['oo_nfs_to_config']) > 0: - host = groups['oo_nfs_to_config'][0] + if 'create_pv' in params: + create_pv = params['create_pv'] + if kind is not None and create_pv: + if kind == 'nfs': + host = params['host'] + if host is None: + if 'oo_nfs_to_config' in groups and len(groups['oo_nfs_to_config']) > 0: + host = groups['oo_nfs_to_config'][0] + else: + raise errors.AnsibleFilterError("|failed no storage host detected") + directory = params['nfs']['directory'] + volume = params['volume']['name'] + path = directory + '/' + volume + size = params['volume']['size'] + if 'labels' in params: + labels = params['labels'] else: - raise errors.AnsibleFilterError("|failed no storage host detected") - directory = params['nfs']['directory'] - volume = params['volume']['name'] - path = directory + '/' + volume - size = params['volume']['size'] - if 'labels' in params: - labels = params['labels'] - else: - labels = dict() - access_modes = params['access']['modes'] - persistent_volume = dict( - name="{0}-volume".format(volume), - capacity=size, - labels=labels, - access_modes=access_modes, - storage=dict( - nfs=dict( - server=host, - path=path))) - persistent_volumes.append(persistent_volume) - elif kind == 'openstack': - volume = params['volume']['name'] - size = params['volume']['size'] - if 'labels' in params: - labels = params['labels'] - else: - labels = dict() - access_modes = params['access']['modes'] - filesystem = params['openstack']['filesystem'] - volume_id = params['openstack']['volumeID'] - persistent_volume = dict( - name="{0}-volume".format(volume), - capacity=size, - labels=labels, - access_modes=access_modes, - storage=dict( - cinder=dict( - fsType=filesystem, - volumeID=volume_id))) - persistent_volumes.append(persistent_volume) - elif kind == 'glusterfs': - volume = params['volume']['name'] - size = params['volume']['size'] - if 'labels' in params: - labels = params['labels'] - else: - labels = dict() - access_modes = params['access']['modes'] - endpoints = params['glusterfs']['endpoints'] - path = params['glusterfs']['path'] - read_only = params['glusterfs']['readOnly'] - persistent_volume = dict( - name="{0}-volume".format(volume), - capacity=size, - labels=labels, - access_modes=access_modes, - storage=dict( - glusterfs=dict( - endpoints=endpoints, - path=path, - readOnly=read_only))) - persistent_volumes.append(persistent_volume) - elif not (kind == 'object' or kind == 'dynamic'): - msg = "|failed invalid storage kind '{0}' for component '{1}'".format( - kind, - component) - raise errors.AnsibleFilterError(msg) + labels = dict() + access_modes = params['access']['modes'] + persistent_volume = dict( + name="{0}-volume".format(volume), + capacity=size, + labels=labels, + access_modes=access_modes, + storage=dict( + nfs=dict( + server=host, + path=path))) + persistent_volumes.append(persistent_volume) + elif kind == 'openstack': + volume = params['volume']['name'] + size = params['volume']['size'] + if 'labels' in params: + labels = params['labels'] + else: + labels = dict() + access_modes = params['access']['modes'] + filesystem = params['openstack']['filesystem'] + volume_id = params['openstack']['volumeID'] + persistent_volume = dict( + name="{0}-volume".format(volume), + capacity=size, + labels=labels, + access_modes=access_modes, + storage=dict( + cinder=dict( + fsType=filesystem, + volumeID=volume_id))) + persistent_volumes.append(persistent_volume) + elif kind == 'glusterfs': + volume = params['volume']['name'] + size = params['volume']['size'] + if 'labels' in params: + labels = params['labels'] + else: + labels = dict() + access_modes = params['access']['modes'] + endpoints = params['glusterfs']['endpoints'] + path = params['glusterfs']['path'] + read_only = params['glusterfs']['readOnly'] + persistent_volume = dict( + name="{0}-volume".format(volume), + capacity=size, + labels=labels, + access_modes=access_modes, + storage=dict( + glusterfs=dict( + endpoints=endpoints, + path=path, + readOnly=read_only))) + persistent_volumes.append(persistent_volume) + elif not (kind == 'object' or kind == 'dynamic'): + msg = "|failed invalid storage kind '{0}' for component '{1}'".format( + kind, + component) + raise errors.AnsibleFilterError(msg) + if 'logging' in hostvars['openshift']: + persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'logging') + if persistent_volume is not None: + persistent_volumes.append(persistent_volume) + if 'loggingops' in hostvars['openshift']: + persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'loggingops') + if persistent_volume is not None: + persistent_volumes.append(persistent_volume) + if 'metrics' in hostvars['openshift']: + persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'metrics') + if persistent_volume is not None: + persistent_volumes.append(persistent_volume) return persistent_volumes +def oo_component_pv_claims(hostvars, component): + """ Generate list of persistent volume claims based on oo_openshift_env + storage options set in host variables for a speicific component. + """ + if not issubclass(type(hostvars), dict): + raise errors.AnsibleFilterError("|failed expects hostvars is a dict") + + if component in hostvars['openshift']: + if 'storage' in hostvars['openshift'][component]: + params = hostvars['openshift'][component]['storage'] + kind = params['kind'] + create_pv = params['create_pv'] + create_pvc = params['create_pvc'] + if kind not in [None, 'object'] and create_pv and create_pvc: + volume = params['volume']['name'] + size = params['volume']['size'] + access_modes = params['access']['modes'] + persistent_volume_claim = dict( + name="{0}-claim".format(volume), + capacity=size, + access_modes=access_modes) + return persistent_volume_claim + return None + + def oo_persistent_volume_claims(hostvars, persistent_volume_claims=None): """ Generate list of persistent volume claims based on oo_openshift_env storage options set in host variables. @@ -828,17 +960,31 @@ def oo_persistent_volume_claims(hostvars, persistent_volume_claims=None): if 'storage' in hostvars['openshift']['hosted'][component]: params = hostvars['openshift']['hosted'][component]['storage'] kind = params['kind'] - create_pv = params['create_pv'] - create_pvc = params['create_pvc'] - if kind not in [None, 'object'] and create_pv and create_pvc: - volume = params['volume']['name'] - size = params['volume']['size'] - access_modes = params['access']['modes'] - persistent_volume_claim = dict( - name="{0}-claim".format(volume), - capacity=size, - access_modes=access_modes) - persistent_volume_claims.append(persistent_volume_claim) + if 'create_pv' in params: + if 'create_pvc' in params: + create_pv = params['create_pv'] + create_pvc = params['create_pvc'] + if kind not in [None, 'object'] and create_pv and create_pvc: + volume = params['volume']['name'] + size = params['volume']['size'] + access_modes = params['access']['modes'] + persistent_volume_claim = dict( + name="{0}-claim".format(volume), + capacity=size, + access_modes=access_modes) + persistent_volume_claims.append(persistent_volume_claim) + if 'logging' in hostvars['openshift']: + persistent_volume_claim = oo_component_pv_claims(hostvars, 'logging') + if persistent_volume_claim is not None: + persistent_volume_claims.append(persistent_volume_claim) + if 'loggingops' in hostvars['openshift']: + persistent_volume_claim = oo_component_pv_claims(hostvars, 'loggingops') + if persistent_volume_claim is not None: + persistent_volume_claims.append(persistent_volume_claim) + if 'metrics' in hostvars['openshift']: + persistent_volume_claim = oo_component_pv_claims(hostvars, 'metrics') + if persistent_volume_claim is not None: + persistent_volume_claims.append(persistent_volume_claim) return persistent_volume_claims @@ -877,10 +1023,8 @@ def oo_pods_match_component(pods, deployment_type, component): raise errors.AnsibleFilterError("failed expects component to be a string") image_prefix = 'openshift/origin-' - if deployment_type in ['enterprise', 'online', 'openshift-enterprise']: + if deployment_type == 'openshift-enterprise': image_prefix = 'openshift3/ose-' - elif deployment_type == 'atomic-enterprise': - image_prefix = 'aep3_beta/aep-' matching_pods = [] image_regex = image_prefix + component + r'.*' diff --git a/filter_plugins/openshift_version.py b/filter_plugins/openshift_version.py index 809e82488..c515f1a71 100644 --- a/filter_plugins/openshift_version.py +++ b/filter_plugins/openshift_version.py @@ -33,10 +33,10 @@ def legacy_gte_function_builder(name, versions): returns True/False """ version_gte = False - if 'enterprise' in deployment_type: + if deployment_type == 'openshift-enterprise': if str(version) >= LooseVersion(enterprise_version): version_gte = True - elif 'origin' in deployment_type: + else: if str(version) >= LooseVersion(origin_version): version_gte = True return version_gte diff --git a/images/installer/README_INVENTORY_GENERATOR.md b/images/installer/README_INVENTORY_GENERATOR.md new file mode 100644 index 000000000..9c10e4b71 --- /dev/null +++ b/images/installer/README_INVENTORY_GENERATOR.md @@ -0,0 +1,85 @@ +Dynamic Inventory Generation +============================ + +Script within the openshift-ansible image that can dynamically +generate an Ansible inventory file from an existing cluster. + +## Configure + +User configuration helps to provide additional details when creating an inventory file. +The default location of this file is in `/etc/inventory-generator-config.yaml`. The +following configuration values are either expected or default to the given values when omitted: + +- `master_config_path`: + - specifies where to look for the bind-mounted `master-config.yaml` file in the container + - if omitted or a `null` value is given, its value is defaulted to `/opt/app-root/src/master-config.yaml` + +- `admin_kubeconfig_path`: + - specifies where to look for the bind-mounted `admin.kubeconfig` file in the container + - if omitted or a `null` value is given, its value is defaulted to `/opt/app-root/src/.kube/config` + +- `ansible_ssh_user`: + - specifies the ssh user to be used by Ansible when running the specified `PLAYBOOK_FILE` (see `README_CONTAINER_IMAGE.md` for additional information on this environment variable). + - if omitted, its value is defaulted to `root` + +- `ansible_become_user`: + - specifies a user to "become" on the remote host. Used for privilege escalation. + - If a non-null value is specified, `ansible_become` is implicitly set to `yes` in the resulting inventory file. + +See the supplied sample user configuration file in [`root/etc/inventory-generator-config.yaml`](./root/etc/inventory-generator-config.yaml) for additional optional inventory variables that may be specified. + +## Build + +See `README_CONTAINER_IMAGE.md` for information on building this image. + +## Run + +Given a master node's `master-config.yaml` file, a user configuration file (see "Configure" section), and an `admin.kubeconfig` file, the command below will: + +1. Use `oc` to query the host about additional node information (using the supplied `kubeconfig` file) +2. Generate an inventory file based on information retrieved from `oc get nodes` and the given `master-config.yaml` file. +3. run the specified [openshift-ansible](https://github.com/openshift/openshift-ansible) `health.yml` playbook using the generated inventory file from the previous step + +``` +docker run -u `id -u` \ + -v $HOME/.ssh/id_rsa:/opt/app-root/src/.ssh/id_rsa:Z,ro \ + -v /tmp/origin/master/admin.kubeconfig:/opt/app-root/src/.kube/config:Z \ + -v /tmp/origin/master/master-config.yaml:/opt/app-root/src/master-config.yaml:Z \ + -e OPTS="-v --become-user root" \ + -e PLAYBOOK_FILE=playbooks/byo/openshift-checks/health.yml \ + -e GENERATE_INVENTORY=true \ + -e USER=`whoami` \ + openshift/origin-ansible + +``` + +**Note** In the command above, specifying the `GENERATE_INVENTORY` environment variable will automatically generate the inventory file in an expected location. +An `INVENTORY_FILE` variable (or any other inventory location) does not need to be supplied when generating an inventory. + +## Debug + +To debug the `generate` script, run the above script interactively +and manually execute `/usr/local/bin/generate`: + +``` +... +docker run -u `id -u` \ + -v ... + ... + -it openshift/origin-ansible /bin/bash + +--- + +bash-4.2$ cd $HOME +bash-4.2$ ls +master-config.yaml +bash-4.2$ /usr/local/bin/generate $HOME/generated_hosts +bash-4.2$ ls +generated_hosts master-config.yaml +bash-4.2$ less generated_hosts +... +``` + +## Notes + +See `README_CONTAINER_IMAGE.md` for additional information about this image. diff --git a/images/installer/root/etc/inventory-generator-config.yaml b/images/installer/root/etc/inventory-generator-config.yaml new file mode 100644 index 000000000..d56e3f4d2 --- /dev/null +++ b/images/installer/root/etc/inventory-generator-config.yaml @@ -0,0 +1,20 @@ +--- +# meta config +master_config_path: "/opt/app-root/src/master-config.yaml" +admin_kubeconfig_path: "/opt/app-root/src/.kube/config" + +# default user configuration +ansible_ssh_user: ec2-user +ansible_become: "yes" +ansible_become_user: "root" + +# openshift-ansible inventory vars +openshift_uninstall_images: false +openshift_install_examples: true +openshift_deployment_type: origin + +openshift_release: 3.6 +openshift_image_tag: v3.6.0 +openshift_hosted_logging_deploy: null # defaults to "true" if loggingPublicURL is set in master-config.yaml +openshift_logging_image_version: v3.6.0 +openshift_disable_check: "" diff --git a/images/installer/root/exports/config.json.template b/images/installer/root/exports/config.json.template index 739c0080f..1a009fa7b 100644 --- a/images/installer/root/exports/config.json.template +++ b/images/installer/root/exports/config.json.template @@ -24,7 +24,7 @@ "PLAYBOOK_FILE=$PLAYBOOK_FILE", "ANSIBLE_CONFIG=$ANSIBLE_CONFIG" ], - "cwd": "/opt/app-root/src/", + "cwd": "/usr/share/ansible/openshift-ansible", "rlimits": [ { "type": "RLIMIT_NOFILE", diff --git a/images/installer/root/usr/local/bin/generate b/images/installer/root/usr/local/bin/generate new file mode 100755 index 000000000..3db7a3ee8 --- /dev/null +++ b/images/installer/root/usr/local/bin/generate @@ -0,0 +1,397 @@ +#!/bin/env python + +""" +Attempts to read 'master-config.yaml' and extract remote +host information to dynamically create an inventory file +in order to run Ansible playbooks against that host. +""" + +import os +import re +import shlex +import shutil +import subprocess +import sys +import yaml + +try: + HOME = os.environ['HOME'] +except KeyError: + print 'A required environment variable "$HOME" has not been set' + exit(1) + +DEFAULT_USER_CONFIG_PATH = '/etc/inventory-generator-config.yaml' +DEFAULT_MASTER_CONFIG_PATH = HOME + '/master-config.yaml' +DEFAULT_ADMIN_KUBECONFIG_PATH = HOME + '/.kube/config' + +INVENTORY_FULL_PATH = HOME + '/generated_hosts' +USE_STDOUT = True + +if len(sys.argv) > 1: + INVENTORY_FULL_PATH = sys.argv[1] + USE_STDOUT = False + + +class OpenShiftClientError(Exception): + """Base exception class for OpenShift CLI wrapper""" + pass + + +class InvalidHost(Exception): + """Base exception class for host creation problems.""" + pass + + +class InvalidHostGroup(Exception): + """Base exception class for host-group creation problems.""" + pass + + +class OpenShiftClient: + oc = None + kubeconfig = None + + def __init__(self, kubeconfig=DEFAULT_ADMIN_KUBECONFIG_PATH): + """Find and store path to oc binary""" + # https://github.com/openshift/openshift-ansible/issues/3410 + # oc can be in /usr/local/bin in some cases, but that may not + # be in $PATH due to ansible/sudo + paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ['/usr/local/bin', os.path.expanduser('~/bin')] + + oc_binary_name = 'oc' + oc_binary = None + + # Use shutil.which if it is available, otherwise fallback to a naive path search + try: + which_result = shutil.which(oc_binary_name, path=os.pathsep.join(paths)) + if which_result is not None: + oc_binary = which_result + except AttributeError: + for path in paths: + if os.path.exists(os.path.join(path, oc_binary_name)): + oc_binary = os.path.join(path, oc_binary_name) + break + + if oc_binary is None: + raise OpenShiftClientError('Unable to locate `oc` binary. Not present in PATH.') + + self.oc = oc_binary + self.kubeconfig = kubeconfig + + def call(self, cmd_str): + """Execute a remote call using `oc`""" + cmd = [ + self.oc, + '--config', + self.kubeconfig + ] + shlex.split(cmd_str) + try: + out = subprocess.check_output(list(cmd), stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as err: + raise OpenShiftClientError('[rc {}] {}\n{}'.format(err.returncode, ' '.join(err.cmd), err.output)) + return out + + def whoami(self): + """Retrieve information about the current user in the given kubeconfig""" + return self.call('whoami') + + def get_nodes(self): + """Retrieve remote node information as a yaml object""" + return self.call('get nodes -o yaml') + + +class HostGroup: + groupname = "" + hosts = list() + + def __init__(self, hosts): + if not hosts: + return + first = hosts[0].get_group_name() + for h in hosts: + if h.get_group_name() != first: + raise InvalidHostGroup("Attempt to create HostGroup with hosts of varying groups.") + + self.hosts = hosts + self.groupname = first + + def add_host(self, host): + """Add a new host to this group.""" + self.hosts.append(host) + + def get_group_name(self): + """Return the groupname associated with each aggregated host.""" + return self.groupname + + def get_hosts(self): + """Return aggregated hosts""" + return self.hosts + + def string(self): + """Call the print method for each aggregated host; separated by newlines.""" + infos = "" + for host in self.hosts: + infos += host.string() + "\n" + return infos + + +class Host: + group = "masters" + alias = "" + hostname = "" + public_hostname = "" + ip_addr = "" + public_ip_addr = "" + + def __init__(self, groupname): + if not groupname: + raise InvalidHost("Attempt to create Host with no group name provided.") + self.group = groupname + + def get_group_name(self): + return self.group + + def get_openshift_hostname(self): + return self.hostname + + def host_alias(self, hostalias): + """Set an alias for this host.""" + self.alias = hostalias + + def address(self, ip): + """Set the ip address for this host.""" + self.ip_addr = ip + + def public_address(self, ip): + """Set the external ip address for this host.""" + self.public_ip_addr = ip + + def host_name(self, hname): + self.hostname = parse_hostname(hname) + + def public_host_name(self, phname): + self.public_hostname = parse_hostname(phname) + + def string(self): + """Print an inventory-file compatible string with host information""" + info = "" + if self.alias: + info += self.alias + " " + elif self.hostname: + info += self.hostname + " " + elif self.ip_addr: + info += self.ip_addr + " " + if self.ip_addr: + info += "openshift_ip=" + self.ip_addr + " " + if self.public_ip_addr: + info += "openshift_public_ip=" + self.public_ip_addr + " " + if self.hostname: + info += "openshift_hostname=" + self.hostname + " " + if self.public_hostname: + info += "openshift_public_hostname=" + self.public_hostname + + return info + + +def parse_hostname(host): + """Remove protocol and port from given hostname. + Return parsed string""" + no_proto = re.split('^http(s)?\:\/\/', host) + if no_proto: + host = no_proto[-1] + + no_port = re.split('\:[0-9]+(/)?$', host) + if no_port: + host = no_port[0] + + return host + + +def main(): + """Parse master-config file and populate inventory file.""" + # set default values + USER_CONFIG = os.environ.get('CONFIG') + if not USER_CONFIG: + USER_CONFIG = DEFAULT_USER_CONFIG_PATH + + # read user configuration + try: + config_file_obj = open(USER_CONFIG, 'r') + raw_config_file = config_file_obj.read() + user_config = yaml.load(raw_config_file) + if not user_config: + user_config = dict() + except IOError as err: + print "Unable to find or read user configuration file '{}': {}".format(USER_CONFIG, err) + exit(1) + + master_config_path = user_config.get('master_config_path', DEFAULT_MASTER_CONFIG_PATH) + if not master_config_path: + master_config_path = DEFAULT_MASTER_CONFIG_PATH + + admin_kubeconfig_path = user_config.get('admin_kubeconfig_path', DEFAULT_ADMIN_KUBECONFIG_PATH) + if not admin_kubeconfig_path: + admin_kubeconfig_path = DEFAULT_ADMIN_KUBECONFIG_PATH + + try: + file_obj = open(master_config_path, 'r') + except IOError as err: + print "Unable to find or read host master configuration file '{}': {}".format(master_config_path, err) + exit(1) + + raw_text = file_obj.read() + + y = yaml.load(raw_text) + if y.get("kind", "") != "MasterConfig": + print "Bind-mounted host master configuration file is not of 'kind' MasterConfig. Aborting..." + exit(1) + + # finish reading config file and begin gathering + # cluster information for inventory file + file_obj.close() + + # set inventory values based on user configuration + ansible_ssh_user = user_config.get('ansible_ssh_user', 'root') + ansible_become_user = user_config.get('ansible_become_user') + + openshift_uninstall_images = user_config.get('openshift_uninstall_images', False) + openshift_install_examples = user_config.get('openshift_install_examples', True) + openshift_deployment_type = user_config.get('openshift_deployment_type', 'origin') + + openshift_release = user_config.get('openshift_release') + openshift_image_tag = user_config.get('openshift_image_tag') + openshift_logging_image_version = user_config.get('openshift_logging_image_version') + openshift_disable_check = user_config.get('openshift_disable_check') + + # extract host config info from parsed yaml file + asset_config = y.get("assetConfig") + master_config = y.get("kubernetesMasterConfig") + etcd_config = y.get("etcdClientInfo") + + # if master_config is missing, error out; we expect to be running on a master to be able to + # gather enough information to generate the rest of the inventory file. + if not master_config: + msg = "'kubernetesMasterConfig' missing from '{}'; unable to gather all necessary host information..." + print msg.format(master_config_path) + exit(1) + + master_public_url = y.get("masterPublicURL") + if not master_public_url: + msg = "'kubernetesMasterConfig.masterPublicURL' missing from '{}'; Unable to connect to master host..." + print msg.format(master_config_path) + exit(1) + + oc = OpenShiftClient(admin_kubeconfig_path) + + # ensure kubeconfig is logged in with provided user, or fail with a friendly message otherwise + try: + oc.whoami() + except OpenShiftClientError as err: + msg = ("Unable to obtain user information using the provided kubeconfig file. " + "Current context does not appear to be able to authenticate to the server. " + "Error returned from server:\n\n{}") + print msg.format(str(err)) + exit(1) + + # connect to remote host using the provided config and extract all possible node information + nodes_config = yaml.load(oc.get_nodes()) + + # contains host types (e.g. masters, nodes, etcd) + host_groups = dict() + openshift_hosted_logging_deploy = False + is_etcd_deployed = master_config.get("storage-backend", "") in ["etcd3", "etcd2", "etcd"] + + if asset_config and asset_config.get('loggingPublicURL'): + openshift_hosted_logging_deploy = True + + openshift_hosted_logging_deploy = user_config.get("openshift_hosted_logging_deploy", openshift_hosted_logging_deploy) + + m = Host("masters") + m.address(master_config["masterIP"]) + m.public_host_name(master_public_url) + host_groups["masters"] = HostGroup([m]) + + if nodes_config: + node_hosts = list() + for node in nodes_config.get("items", []): + if node["kind"] != "Node": + continue + + n = Host("nodes") + + address = "" + internal_hostname = "" + for item in node["status"].get("addresses", []): + if not address and item['type'] in ['InternalIP', 'LegacyHostIP']: + address = item['address'] + + if item['type'] == 'Hostname': + internal_hostname = item['address'] + + n.address(address) + n.host_name(internal_hostname) + node_hosts.append(n) + + host_groups["nodes"] = HostGroup(node_hosts) + + if etcd_config: + etcd_hosts = list() + for url in etcd_config.get("urls", []): + e = Host("etcd") + e.host_name(url) + etcd_hosts.append(e) + + host_groups["etcd"] = HostGroup(etcd_hosts) + + # open new inventory file for writing + if USE_STDOUT: + inv_file_obj = sys.stdout + else: + try: + inv_file_obj = open(INVENTORY_FULL_PATH, 'w+') + except IOError as err: + print "Unable to create or open generated inventory file: {}".format(err) + exit(1) + + inv_file_obj.write("[OSEv3:children]\n") + for group in host_groups: + inv_file_obj.write("{}\n".format(group)) + inv_file_obj.write("\n") + + inv_file_obj.write("[OSEv3:vars]\n") + if ansible_ssh_user: + inv_file_obj.write("ansible_ssh_user={}\n".format(ansible_ssh_user)) + if ansible_become_user: + inv_file_obj.write("ansible_become_user={}\n".format(ansible_become_user)) + inv_file_obj.write("ansible_become=yes\n") + + if openshift_uninstall_images: + inv_file_obj.write("openshift_uninstall_images={}\n".format(str(openshift_uninstall_images))) + if openshift_deployment_type: + inv_file_obj.write("openshift_deployment_type={}\n".format(openshift_deployment_type)) + if openshift_install_examples: + inv_file_obj.write("openshift_install_examples={}\n".format(str(openshift_install_examples))) + + if openshift_release: + inv_file_obj.write("openshift_release={}\n".format(str(openshift_release))) + if openshift_image_tag: + inv_file_obj.write("openshift_image_tag={}\n".format(str(openshift_image_tag))) + if openshift_logging_image_version: + inv_file_obj.write("openshift_logging_image_version={}\n".format(str(openshift_logging_image_version))) + if openshift_disable_check: + inv_file_obj.write("openshift_disable_check={}\n".format(str(openshift_disable_check))) + inv_file_obj.write("\n") + + inv_file_obj.write("openshift_hosted_logging_deploy={}\n".format(str(openshift_hosted_logging_deploy))) + inv_file_obj.write("\n") + + for group in host_groups: + inv_file_obj.write("[{}]\n".format(host_groups[group].get_group_name())) + inv_file_obj.write(host_groups[group].string()) + inv_file_obj.write("\n") + + inv_file_obj.close() + + +if __name__ == '__main__': + main() diff --git a/images/installer/root/usr/local/bin/run b/images/installer/root/usr/local/bin/run index 9401ea118..70aa0bac3 100755 --- a/images/installer/root/usr/local/bin/run +++ b/images/installer/root/usr/local/bin/run @@ -24,9 +24,12 @@ elif [[ -v INVENTORY_URL ]]; then elif [[ -v DYNAMIC_SCRIPT_URL ]]; then curl -o ${INVENTORY} ${DYNAMIC_SCRIPT_URL} chmod 755 ${INVENTORY} +elif [[ -v GENERATE_INVENTORY ]]; then + # dynamically generate inventory file using bind-mounted info + /usr/local/bin/generate ${INVENTORY} else echo - echo "One of INVENTORY_FILE, INVENTORY_URL or DYNAMIC_SCRIPT_URL must be provided." + echo "One of INVENTORY_FILE, INVENTORY_URL, GENERATE_INVENTORY, or DYNAMIC_SCRIPT_URL must be provided." exec /usr/local/bin/usage fi INVENTORY_ARG="-i ${INVENTORY}" @@ -36,7 +39,7 @@ if [[ "$ALLOW_ANSIBLE_CONNECTION_LOCAL" = false ]]; then fi if [[ -v VAULT_PASS ]]; then - VAULT_PASS_FILE=.vaultpass + VAULT_PASS_FILE="$(mktemp)" echo ${VAULT_PASS} > ${VAULT_PASS_FILE} VAULT_PASS_ARG="--vault-password-file ${VAULT_PASS_FILE}" fi diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example index c32c47fe1..9d811fcab 100644 --- a/inventory/byo/hosts.origin.example +++ b/inventory/byo/hosts.origin.example @@ -34,17 +34,17 @@ openshift_deployment_type=origin # use this to lookup the latest exact version of the container images, which is the tag actually used to configure # the cluster. For RPM installations we just verify the version detected in your configured repos matches this # release. -openshift_release=v3.6 +openshift_release=v3.7 # Specify an exact container image tag to install or configure. # WARNING: This value will be used for all hosts in containerized environments, even those that have another version installed. # This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up. -#openshift_image_tag=v3.6.0 +#openshift_image_tag=v3.7.0 # Specify an exact rpm version to install or configure. # WARNING: This value will be used for all hosts in RPM based environments, even those that have another version installed. # This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up. -#openshift_pkg_version=-3.6.0 +#openshift_pkg_version=-3.7.0 # This enables all the system containers except for docker: #openshift_use_system_containers=False @@ -118,8 +118,8 @@ openshift_release=v3.6 # Force the registry to use for the docker/crio system container. By default the registry # will be built off of the deployment type and ansible_distribution. Only # use this option if you are sure you know what you are doing! -#openshift_docker_systemcontainer_image_registry_override="registry.example.com" -#openshift_crio_systemcontainer_image_registry_override="registry.example.com" +#openshift_docker_systemcontainer_image_override="registry.example.com/container-engine:latest" +#openshift_crio_systemcontainer_image_override="registry.example.com/cri-o:latest" # Items added, as is, to end of /etc/sysconfig/docker OPTIONS # Default value: "--log-driver=journald" #openshift_docker_options="-l warn --ipv6=false" @@ -491,10 +491,10 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # See: https://docs.openshift.com/enterprise/latest/install_config/cluster_metrics.html # # By default metrics are not automatically deployed, set this to enable them -# openshift_hosted_metrics_deploy=true +#openshift_metrics_install_metrics=true # # Storage Options -# If openshift_hosted_metrics_storage_kind is unset then metrics will be stored +# If openshift_metrics_storage_kind is unset then metrics will be stored # in an EmptyDir volume and will be deleted when the cassandra pod terminates. # Storage options A & B currently support only one cassandra pod which is # generally enough for up to 1000 pods. Additional volumes can be created @@ -504,29 +504,29 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # An NFS volume will be created with path "nfs_directory/volume_name" # on the host within the [nfs] host group. For example, the volume # path using these options would be "/exports/metrics" -#openshift_hosted_metrics_storage_kind=nfs -#openshift_hosted_metrics_storage_access_modes=['ReadWriteOnce'] -#openshift_hosted_metrics_storage_nfs_directory=/exports -#openshift_hosted_metrics_storage_nfs_options='*(rw,root_squash)' -#openshift_hosted_metrics_storage_volume_name=metrics -#openshift_hosted_metrics_storage_volume_size=10Gi -#openshift_hosted_metrics_storage_labels={'storage': 'metrics'} +#openshift_metrics_storage_kind=nfs +#openshift_metrics_storage_access_modes=['ReadWriteOnce'] +#openshift_metrics_storage_nfs_directory=/exports +#openshift_metrics_storage_nfs_options='*(rw,root_squash)' +#openshift_metrics_storage_volume_name=metrics +#openshift_metrics_storage_volume_size=10Gi +#openshift_metrics_storage_labels={'storage': 'metrics'} # # Option B - External NFS Host # NFS volume must already exist with path "nfs_directory/_volume_name" on # the storage_host. For example, the remote volume path using these # options would be "nfs.example.com:/exports/metrics" -#openshift_hosted_metrics_storage_kind=nfs -#openshift_hosted_metrics_storage_access_modes=['ReadWriteOnce'] -#openshift_hosted_metrics_storage_host=nfs.example.com -#openshift_hosted_metrics_storage_nfs_directory=/exports -#openshift_hosted_metrics_storage_volume_name=metrics -#openshift_hosted_metrics_storage_volume_size=10Gi -#openshift_hosted_metrics_storage_labels={'storage': 'metrics'} +#openshift_metrics_storage_kind=nfs +#openshift_metrics_storage_access_modes=['ReadWriteOnce'] +#openshift_metrics_storage_host=nfs.example.com +#openshift_metrics_storage_nfs_directory=/exports +#openshift_metrics_storage_volume_name=metrics +#openshift_metrics_storage_volume_size=10Gi +#openshift_metrics_storage_labels={'storage': 'metrics'} # # Option C - Dynamic -- If openshift supports dynamic volume provisioning for # your cloud platform use this. -#openshift_hosted_metrics_storage_kind=dynamic +#openshift_metrics_storage_kind=dynamic # # Other Metrics Options -- Common items you may wish to reconfigure, for the complete # list of options please see roles/openshift_metrics/README.md @@ -535,10 +535,10 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # Defaults to https://hawkular-metrics.{{openshift_master_default_subdomain}}/hawkular/metrics # Currently, you may only alter the hostname portion of the url, alterting the # `/hawkular/metrics` path will break installation of metrics. -#openshift_hosted_metrics_public_url=https://hawkular-metrics.example.com/hawkular/metrics +#openshift_metrics_hawkular_hostname=https://hawkular-metrics.example.com/hawkular/metrics # Configure the prefix and version for the component images -#openshift_hosted_metrics_deployer_prefix=docker.io/openshift/origin- -#openshift_hosted_metrics_deployer_version=v3.6.0 +#openshift_metrics_image_prefix=docker.io/openshift/origin- +#openshift_metrics_image_version=v3.7.0 # # StorageClass # openshift_storageclass_name=gp2 @@ -548,36 +548,36 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # Logging deployment # # Currently logging deployment is disabled by default, enable it by setting this -#openshift_hosted_logging_deploy=true +#openshift_logging_install_logging=true # # Logging storage config # Option A - NFS Host Group # An NFS volume will be created with path "nfs_directory/volume_name" # on the host within the [nfs] host group. For example, the volume # path using these options would be "/exports/logging" -#openshift_hosted_logging_storage_kind=nfs -#openshift_hosted_logging_storage_access_modes=['ReadWriteOnce'] -#openshift_hosted_logging_storage_nfs_directory=/exports -#openshift_hosted_logging_storage_nfs_options='*(rw,root_squash)' -#openshift_hosted_logging_storage_volume_name=logging -#openshift_hosted_logging_storage_volume_size=10Gi -#openshift_hosted_logging_storage_labels={'storage': 'logging'} +#openshift_logging_storage_kind=nfs +#openshift_logging_storage_access_modes=['ReadWriteOnce'] +#openshift_logging_storage_nfs_directory=/exports +#openshift_logging_storage_nfs_options='*(rw,root_squash)' +#openshift_logging_storage_volume_name=logging +#openshift_logging_storage_volume_size=10Gi +#openshift_logging_storage_labels={'storage': 'logging'} # # Option B - External NFS Host # NFS volume must already exist with path "nfs_directory/_volume_name" on # the storage_host. For example, the remote volume path using these # options would be "nfs.example.com:/exports/logging" -#openshift_hosted_logging_storage_kind=nfs -#openshift_hosted_logging_storage_access_modes=['ReadWriteOnce'] -#openshift_hosted_logging_storage_host=nfs.example.com -#openshift_hosted_logging_storage_nfs_directory=/exports -#openshift_hosted_logging_storage_volume_name=logging -#openshift_hosted_logging_storage_volume_size=10Gi -#openshift_hosted_logging_storage_labels={'storage': 'logging'} +#openshift_logging_storage_kind=nfs +#openshift_logging_storage_access_modes=['ReadWriteOnce'] +#openshift_logging_storage_host=nfs.example.com +#openshift_logging_storage_nfs_directory=/exports +#openshift_logging_storage_volume_name=logging +#openshift_logging_storage_volume_size=10Gi +#openshift_logging_storage_labels={'storage': 'logging'} # # Option C - Dynamic -- If openshift supports dynamic volume provisioning for # your cloud platform use this. -#openshift_hosted_logging_storage_kind=dynamic +#openshift_logging_storage_kind=dynamic # # Option D - none -- Logging will use emptydir volumes which are destroyed when # pods are deleted @@ -587,13 +587,13 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # # Configure loggingPublicURL in the master config for aggregate logging, defaults # to kibana.{{ openshift_master_default_subdomain }} -#openshift_hosted_logging_hostname=logging.apps.example.com +#openshift_logging_kibana_hostname=logging.apps.example.com # Configure the number of elastic search nodes, unless you're using dynamic provisioning # this value must be 1 -#openshift_hosted_logging_elasticsearch_cluster_size=1 +#openshift_logging_es_cluster_size=1 # Configure the prefix and version for the component images -#openshift_hosted_logging_deployer_prefix=docker.io/openshift/origin- -#openshift_hosted_logging_deployer_version=v3.6.0 +#openshift_logging_image_prefix=docker.io/openshift/origin- +#openshift_logging_image_version=v3.7.0 # Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet') # os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant' @@ -613,7 +613,12 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # WORKAROUND : If you must use an overlapping subnet, you can configure a non conflicting # docker0 CIDR range by adding '--bip=192.168.2.1/24' to DOCKER_NETWORK_OPTIONS # environment variable located in /etc/sysconfig/docker-network. -# When upgrading these must be specificed! +# When upgrading or scaling up the following must match whats in your master config! +# Inventory: master yaml field +# osm_cluster_network_cidr: clusterNetworkCIDR +# openshift_portal_net: serviceNetworkCIDR +# When installing osm_cluster_network_cidr and openshift_portal_net must be set. +# Sane examples are provided below. #osm_cluster_network_cidr=10.128.0.0/14 #openshift_portal_net=172.30.0.0/16 @@ -635,7 +640,10 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # Configure number of bits to allocate to each host’s subnet e.g. 9 # would mean a /23 network on the host. -# When upgrading this must be specificed! +# When upgrading or scaling up the following must match whats in your master config! +# Inventory: master yaml field +# osm_host_subnet_length: hostSubnetLength +# When installing osm_host_subnet_length must be set. A sane example is provided below. #osm_host_subnet_length=9 # Configure master API and console ports. diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example index e867ffbae..e6deda4ac 100644 --- a/inventory/byo/hosts.ose.example +++ b/inventory/byo/hosts.ose.example @@ -34,17 +34,17 @@ openshift_deployment_type=openshift-enterprise # use this to lookup the latest exact version of the container images, which is the tag actually used to configure # the cluster. For RPM installations we just verify the version detected in your configured repos matches this # release. -openshift_release=v3.6 +openshift_release=v3.7 # Specify an exact container image tag to install or configure. # WARNING: This value will be used for all hosts in containerized environments, even those that have another version installed. # This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up. -#openshift_image_tag=v3.6.0 +#openshift_image_tag=v3.7.0 # Specify an exact rpm version to install or configure. # WARNING: This value will be used for all hosts in RPM based environments, even those that have another version installed. # This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up. -#openshift_pkg_version=-3.6.0 +#openshift_pkg_version=-3.7.0 # This enables all the system containers except for docker: #openshift_use_system_containers=False @@ -118,8 +118,8 @@ openshift_release=v3.6 # Force the registry to use for the container-engine/crio system container. By default the registry # will be built off of the deployment type and ansible_distribution. Only # use this option if you are sure you know what you are doing! -#openshift_docker_systemcontainer_image_registry_override="registry.example.com" -#openshift_crio_systemcontainer_image_registry_override="registry.example.com" +#openshift_docker_systemcontainer_image_override="registry.example.com/container-engine:latest" +#openshift_crio_systemcontainer_image_override="registry.example.com/cri-o:latest" # Items added, as is, to end of /etc/sysconfig/docker OPTIONS # Default value: "--log-driver=journald" #openshift_docker_options="-l warn --ipv6=false" @@ -499,10 +499,10 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # See: https://docs.openshift.com/enterprise/latest/install_config/cluster_metrics.html # # By default metrics are not automatically deployed, set this to enable them -# openshift_hosted_metrics_deploy=true +#openshift_metrics_install_metrics=true # # Storage Options -# If openshift_hosted_metrics_storage_kind is unset then metrics will be stored +# If openshift_metrics_storage_kind is unset then metrics will be stored # in an EmptyDir volume and will be deleted when the cassandra pod terminates. # Storage options A & B currently support only one cassandra pod which is # generally enough for up to 1000 pods. Additional volumes can be created @@ -512,29 +512,29 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # An NFS volume will be created with path "nfs_directory/volume_name" # on the host within the [nfs] host group. For example, the volume # path using these options would be "/exports/metrics" -#openshift_hosted_metrics_storage_kind=nfs -#openshift_hosted_metrics_storage_access_modes=['ReadWriteOnce'] -#openshift_hosted_metrics_storage_nfs_directory=/exports -#openshift_hosted_metrics_storage_nfs_options='*(rw,root_squash)' -#openshift_hosted_metrics_storage_volume_name=metrics -#openshift_hosted_metrics_storage_volume_size=10Gi -#openshift_hosted_metrics_storage_labels={'storage': 'metrics'} +#openshift_metrics_storage_kind=nfs +#openshift_metrics_storage_access_modes=['ReadWriteOnce'] +#openshift_metrics_storage_nfs_directory=/exports +#openshift_metrics_storage_nfs_options='*(rw,root_squash)' +#openshift_metrics_storage_volume_name=metrics +#openshift_metrics_storage_volume_size=10Gi +#openshift_metrics_storage_labels={'storage': 'metrics'} # # Option B - External NFS Host # NFS volume must already exist with path "nfs_directory/_volume_name" on # the storage_host. For example, the remote volume path using these # options would be "nfs.example.com:/exports/metrics" -#openshift_hosted_metrics_storage_kind=nfs -#openshift_hosted_metrics_storage_access_modes=['ReadWriteOnce'] -#openshift_hosted_metrics_storage_host=nfs.example.com -#openshift_hosted_metrics_storage_nfs_directory=/exports -#openshift_hosted_metrics_storage_volume_name=metrics -#openshift_hosted_metrics_storage_volume_size=10Gi -#openshift_hosted_metrics_storage_labels={'storage': 'metrics'} +#openshift_metrics_storage_kind=nfs +#openshift_metrics_storage_access_modes=['ReadWriteOnce'] +#openshift_metrics_storage_host=nfs.example.com +#openshift_metrics_storage_nfs_directory=/exports +#openshift_metrics_storage_volume_name=metrics +#openshift_metrics_storage_volume_size=10Gi +#openshift_metrics_storage_labels={'storage': 'metrics'} # # Option C - Dynamic -- If openshift supports dynamic volume provisioning for # your cloud platform use this. -#openshift_hosted_metrics_storage_kind=dynamic +#openshift_metrics_storage_kind=dynamic # # Other Metrics Options -- Common items you may wish to reconfigure, for the complete # list of options please see roles/openshift_metrics/README.md @@ -543,10 +543,10 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # Defaults to https://hawkular-metrics.{{openshift_master_default_subdomain}}/hawkular/metrics # Currently, you may only alter the hostname portion of the url, alterting the # `/hawkular/metrics` path will break installation of metrics. -#openshift_hosted_metrics_public_url=https://hawkular-metrics.example.com/hawkular/metrics +#openshift_metrics_hawkular_hostname=https://hawkular-metrics.example.com/hawkular/metrics # Configure the prefix and version for the component images -#openshift_hosted_metrics_deployer_prefix=registry.example.com:8888/openshift3/ -#openshift_hosted_metrics_deployer_version=3.6.0 +#openshift_metrics_image_prefix=registry.example.com:8888/openshift3/ +#openshift_metrics_image_version=3.7.0 # # StorageClass # openshift_storageclass_name=gp2 @@ -556,36 +556,36 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # Logging deployment # # Currently logging deployment is disabled by default, enable it by setting this -#openshift_hosted_logging_deploy=true +#openshift_logging_install_logging=true # # Logging storage config # Option A - NFS Host Group # An NFS volume will be created with path "nfs_directory/volume_name" # on the host within the [nfs] host group. For example, the volume # path using these options would be "/exports/logging" -#openshift_hosted_logging_storage_kind=nfs -#openshift_hosted_logging_storage_access_modes=['ReadWriteOnce'] -#openshift_hosted_logging_storage_nfs_directory=/exports -#openshift_hosted_logging_storage_nfs_options='*(rw,root_squash)' -#openshift_hosted_logging_storage_volume_name=logging -#openshift_hosted_logging_storage_volume_size=10Gi -#openshift_hosted_logging_storage_labels={'storage': 'logging'} +#openshift_logging_storage_kind=nfs +#openshift_logging_storage_access_modes=['ReadWriteOnce'] +#openshift_logging_storage_nfs_directory=/exports +#openshift_logging_storage_nfs_options='*(rw,root_squash)' +#openshift_logging_storage_volume_name=logging +#openshift_logging_storage_volume_size=10Gi +#openshift_logging_storage_labels={'storage': 'logging'} # # Option B - External NFS Host # NFS volume must already exist with path "nfs_directory/_volume_name" on # the storage_host. For example, the remote volume path using these # options would be "nfs.example.com:/exports/logging" -#openshift_hosted_logging_storage_kind=nfs -#openshift_hosted_logging_storage_access_modes=['ReadWriteOnce'] -#openshift_hosted_logging_storage_host=nfs.example.com -#openshift_hosted_logging_storage_nfs_directory=/exports -#openshift_hosted_logging_storage_volume_name=logging -#openshift_hosted_logging_storage_volume_size=10Gi -#openshift_hosted_logging_storage_labels={'storage': 'logging'} +#openshift_logging_storage_kind=nfs +#openshift_logging_storage_access_modes=['ReadWriteOnce'] +#openshift_logging_storage_host=nfs.example.com +#openshift_logging_storage_nfs_directory=/exports +#openshift_logging_storage_volume_name=logging +#openshift_logging_storage_volume_size=10Gi +#openshift_logging_storage_labels={'storage': 'logging'} # # Option C - Dynamic -- If openshift supports dynamic volume provisioning for # your cloud platform use this. -#openshift_hosted_logging_storage_kind=dynamic +#openshift_logging_storage_kind=dynamic # # Option D - none -- Logging will use emptydir volumes which are destroyed when # pods are deleted @@ -595,13 +595,13 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # # Configure loggingPublicURL in the master config for aggregate logging, defaults # to kibana.{{ openshift_master_default_subdomain }} -#openshift_hosted_logging_hostname=logging.apps.example.com +#openshift_logging_kibana_hostname=logging.apps.example.com # Configure the number of elastic search nodes, unless you're using dynamic provisioning # this value must be 1 -#openshift_hosted_logging_elasticsearch_cluster_size=1 +#openshift_logging_es_cluster_size=1 # Configure the prefix and version for the component images -#openshift_hosted_logging_deployer_prefix=registry.example.com:8888/openshift3/ -#openshift_hosted_logging_deployer_version=3.6.0 +#openshift_logging_image_prefix=registry.example.com:8888/openshift3/ +#openshift_logging_image_version=3.7.0 # Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet') # os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant' @@ -621,7 +621,12 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # WORKAROUND : If you must use an overlapping subnet, you can configure a non conflicting # docker0 CIDR range by adding '--bip=192.168.2.1/24' to DOCKER_NETWORK_OPTIONS # environment variable located in /etc/sysconfig/docker-network. -# When upgrading these must be specificed! +# When upgrading or scaling up the following must match whats in your master config! +# Inventory: master yaml field +# osm_cluster_network_cidr: clusterNetworkCIDR +# openshift_portal_net: serviceNetworkCIDR +# When installing osm_cluster_network_cidr and openshift_portal_net must be set. +# Sane examples are provided below. #osm_cluster_network_cidr=10.128.0.0/14 #openshift_portal_net=172.30.0.0/16 @@ -643,7 +648,10 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # Configure number of bits to allocate to each host’s subnet e.g. 9 # would mean a /23 network on the host. -# When upgrading this must be specificed! +# When upgrading or scaling up the following must match whats in your master config! +# Inventory: master yaml field +# osm_host_subnet_length: hostSubnetLength +# When installing osm_host_subnet_length must be set. A sane example is provided below. #osm_host_subnet_length=9 # Configure master API and console ports. diff --git a/openshift-ansible.spec b/openshift-ansible.spec index fa63bad19..b5673cda1 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -10,7 +10,7 @@ Name: openshift-ansible Version: 3.7.0 -Release: 0.125.1%{?dist} +Release: 0.127.0%{?dist} Summary: Openshift and Atomic Enterprise Ansible License: ASL 2.0 URL: https://github.com/openshift/openshift-ansible @@ -280,6 +280,107 @@ Atomic OpenShift Utilities includes %changelog +* Thu Sep 21 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.127.0 +- Updating to always configure api aggregation with installation + (ewolinet@redhat.com) +- Do not reconcile in >= 3.7 (simo@redhat.com) +- Cleanup old deployment types (mgugino@redhat.com) +- crio: ensure no default CNI configuration files are left + (gscrivan@redhat.com) +- node: specify the DNS domain (gscrivan@redhat.com) +- more retries on repoquery_cmd (lmeyer@redhat.com) +- fix etcd back message error (jchaloup@redhat.com) +- openshift_checks: enable providing file outputs (lmeyer@redhat.com) +- Fix registry auth task ordering (mgugino@redhat.com) +- Prometheus role fixes (zgalor@redhat.com) +- papr: Update inventory to include required vars (smilner@redhat.com) +- testing: Skip net vars on integration tests (smilner@redhat.com) +- inventory: Update network variable doc (smilner@redhat.com) +- installer image: use tmp file for vaultpass (lmeyer@redhat.com) +- system container: use ansible root as cwd (lmeyer@redhat.com) +- openshift_sanitize_inventory: Check for required vars (smilner@redhat.com) +- No conversion to boolean and no quoting for include_granted_scopes. + (jpazdziora@redhat.com) +- Correct firewall install for openshift-nfs (rteague@redhat.com) +- inventory: Update versions to 3.7 (smilner@redhat.com) +- Port origin-gce roles for cluster setup to copy AWS provisioning + (ccoleman@redhat.com) +- Bug 1491636 - honor openshift_logging_es_ops_nodeselector + (jwozniak@redhat.com) +- Setup tuned after the node has been restarted. (jmencak@redhat.com) +- Only attempt to start iptables on hosts in the current batch + (sdodson@redhat.com) +- Removing setting of pod presets (ewolinet@redhat.com) +- cri-o: Fix Fedora image name (smilner@redhat.com) +- add retry on repoquery_cmd (lmeyer@redhat.com) +- add retries to repoquery module (lmeyer@redhat.com) +- Rework openshift-cluster into deploy_cluster.yml (rteague@redhat.com) +- inventory generate: fix config doc (lmeyer@redhat.com) +- inventory generate: remove refs to openshift_cluster_user (lmeyer@redhat.com) +- inventory generate: always use kubeconfig, no login (lmeyer@redhat.com) +- Scaffold out the entire build defaults hash (tbielawa@redhat.com) +- Use openshift.common.ip rather than ansible_default_ipv4 in etcd migration + playbook. (abutcher@redhat.com) +- Add IMAGE_VERSION to the image stream tag source (sdodson@redhat.com) +- Add loadbalancer config entry point (rteague@redhat.com) +- pull openshift_master deps out into a play (jchaloup@redhat.com) +- Don't assume storage_migration control variables are already boolean + (mchappel@redhat.com) +- upgrade: Updates warning on missing required variables (smilner@redhat.com) +- Update master config with new client urls during etcd scaleup. + (abutcher@redhat.com) +- Increase rate limiting in journald.conf (maszulik@redhat.com) +- Correct logic for openshift_hosted_*_wait (rteague@redhat.com) +- Adding mangagement-admin SC to admin role for management-infra project + (ewolinet@redhat.com) +- Only install base openshift package on masters and nodes (mgugino@redhat.com) +- Workaround Ansible Jinja2 delimiter warning (rteague@redhat.com) +- openshift-checks: add role symlink (lmeyer@redhat.com) +- double the required disk space for etcd backup (jchaloup@redhat.com) +- openshift_health_check: allow disabling all checks (lmeyer@redhat.com) +- docker_image_availability: fix local image search (lmeyer@redhat.com) +- docker_image_availability: probe registry connectivity (lmeyer@redhat.com) +- openshift_checks: add retries in python (lmeyer@redhat.com) +- add inventory-generator under new sub pkg (jvallejo@redhat.com) +- Re-enabling new tuned profile hierarchy (PR5089) (jmencak@redhat.com) +- Add `openshift_node_open_ports` to allow arbitrary firewall exposure + (ccoleman@redhat.com) +- Fix: authenticated registry support for containerized hosts + (mgugino@redhat.com) +- [Proposal] OpenShift-Ansible Proposal Process (rteague@redhat.com) +- Improve searching when conditions for Jinja2 delimiters (rteague@redhat.com) +- Clarify requirement of having etcd group (sdodson@redhat.com) +- add health checks 3_6,3_7 upgrade path (jvallejo@redhat.com) +- container-engine: Allow full image override (smilner@redhat.com) +- Add openshift_public_hostname length check (mgugino@redhat.com) +- Skip failure dedup instead of crashing (rhcarvalho@gmail.com) +- Properly quote "true" and "false" strings for include_granted_scopes. + (jpazdziora@redhat.com) +- Move sysctl.conf customizations to a separate file (jdesousa@redhat.com) +- Fix new_master or new_node fail check (denverjanke@gmail.com) +- [Proposal] OpenShift-Ansible Playbook Consolidation (rteague@redhat.com) +- GlusterFS: Allow option to use or ignore default node selectors + (jarrpa@redhat.com) +- GlusterFS: Clarify heketi URL documentation (jarrpa@redhat.com) +- GlusterFS: Add files/templates for v3.7 (jarrpa@redhat.com) +- Support setting annotations on Hawkular route (hansmi@vshn.ch) +- add additional preflight checks to upgrade path (jvallejo@redhat.com) +- hot fix for env variable resolve (m.judeikis@gmail.com) +- GlusterFS: Correct firewall port names (jarrpa@redhat.com) +- Make RH subscription more resilient to temporary failures + (lhuard@amadeus.com) + +* Mon Sep 11 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.126.0 +- Fix rpm version logic for hosts (mgugino@redhat.com) +- Revert back to hostnamectl and previous default of not setting hostname + (sdodson@redhat.com) +- Correct include path to not follow symlink (rteague@redhat.com) +- Fix include path for docker upgrade tasks (rteague@redhat.com) +- Fix issue with etcd_common when using pre_upgrade tag (rteague@redhat.com) +- inventory: Denote new required upgrade variables (smilner@redhat.com) +- upgrade: Verify required network items are set (smilner@redhat.com) +- ami build process calls openshift-node/config.yml (kwoodson@redhat.com) + * Fri Sep 08 2017 Scott Dodson <sdodson@redhat.com> 3.7.0-0.125.1 - Consolidating AWS roles and variables underneath openshift_aws role. (kwoodson@redhat.com) diff --git a/playbooks/byo/openshift-checks/roles b/playbooks/byo/openshift-checks/roles new file mode 120000 index 000000000..20c4c58cf --- /dev/null +++ b/playbooks/byo/openshift-checks/roles @@ -0,0 +1 @@ +../../../roles
\ No newline at end of file diff --git a/playbooks/byo/openshift-loadbalancer/config.yml b/playbooks/byo/openshift-loadbalancer/config.yml new file mode 100644 index 000000000..32c828f97 --- /dev/null +++ b/playbooks/byo/openshift-loadbalancer/config.yml @@ -0,0 +1,6 @@ +--- +- include: ../openshift-cluster/initialize_groups.yml + +- include: ../../common/openshift-cluster/std_include.yml + +- include: ../../common/openshift-loadbalancer/config.yml diff --git a/playbooks/byo/openshift-master/scaleup.yml b/playbooks/byo/openshift-master/scaleup.yml index 2179d1416..a09edd55a 100644 --- a/playbooks/byo/openshift-master/scaleup.yml +++ b/playbooks/byo/openshift-master/scaleup.yml @@ -1,7 +1,7 @@ --- - include: ../openshift-cluster/initialize_groups.yml -- name: Ensure there are new_masters +- name: Ensure there are new_masters or new_nodes hosts: localhost connection: local become: no @@ -13,7 +13,7 @@ add hosts to the new_masters and new_nodes host groups to add masters. when: - - (g_new_master_hosts | default([]) | length == 0) or (g_new_node_hosts | default([]) | length == 0) + - (g_new_master_hosts | default([]) | length == 0) and (g_new_node_hosts | default([]) | length == 0) - include: ../../common/openshift-cluster/std_include.yml diff --git a/playbooks/byo/openshift-nfs/config.yml b/playbooks/byo/openshift-nfs/config.yml new file mode 100644 index 000000000..93b24411e --- /dev/null +++ b/playbooks/byo/openshift-nfs/config.yml @@ -0,0 +1,6 @@ +--- +- include: ../openshift-cluster/initialize_groups.yml + +- include: ../../common/openshift-cluster/std_include.yml + +- include: ../../common/openshift-nfs/config.yml diff --git a/playbooks/byo/rhel_subscribe.yml b/playbooks/byo/rhel_subscribe.yml index 1b14ff32e..06f914981 100644 --- a/playbooks/byo/rhel_subscribe.yml +++ b/playbooks/byo/rhel_subscribe.yml @@ -8,9 +8,9 @@ hosts: OSEv3 roles: - role: rhel_subscribe - when: deployment_type in ['atomic-enterprise', 'enterprise', 'openshift-enterprise'] and - ansible_distribution == "RedHat" and - lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) | - default('no', True) | lower in ['no', 'false'] - - openshift_repos - - os_update_latest + when: + - deployment_type == 'openshift-enterprise' + - ansible_distribution == "RedHat" + - lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) | default('no', True) | lower in ['no', 'false'] + - role: openshift_repos + - role: os_update_latest diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml index bbd5a0185..804ea8eb8 100644 --- a/playbooks/common/openshift-cluster/config.yml +++ b/playbooks/common/openshift-cluster/config.yml @@ -57,8 +57,27 @@ tags: - hosted +- name: Configure API Aggregation on masters + hosts: oo_masters + serial: 1 + tasks: + - block: + - include_role: + name: openshift_service_catalog + tasks_from: wire_aggregator + vars: + first_master: "{{ groups.oo_first_master[0] }}" + - include: service_catalog.yml when: - openshift_enable_service_catalog | default(false) | bool tags: - servicecatalog + +- name: Print deprecated variable warning message if necessary + hosts: oo_first_master + gather_facts: no + tasks: + - debug: msg="{{__deprecation_message}}" + when: + - __deprecation_message | default ('') | length > 0 diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml index 16a733899..e55b2f964 100644 --- a/playbooks/common/openshift-cluster/evaluate_groups.yml +++ b/playbooks/common/openshift-cluster/evaluate_groups.yml @@ -43,11 +43,14 @@ - name: Evaluate groups - Fail if no etcd hosts group is defined fail: msg: > - No etcd hosts defined. Running an all-in-one master is deprecated and - will no longer be supported in a future upgrade. + Running etcd as an embedded service is no longer supported. If this is a + new install please define an 'etcd' group with either one or three + hosts. These hosts may be the same hosts as your masters. If this is an + upgrade you may set openshift_master_unsupported_embedded_etcd=true + until a migration playbook becomes available. when: - - g_etcd_hosts | default([]) | length == 0 - - not openshift_master_unsupported_all_in_one | default(False) + - g_etcd_hosts | default([]) | length not in [3,1] + - not openshift_master_unsupported_embedded_etcd | default(False) - not openshift_node_bootstrap | default(False) - name: Evaluate oo_all_hosts diff --git a/playbooks/common/openshift-cluster/initialize_facts.yml b/playbooks/common/openshift-cluster/initialize_facts.yml index 0723575c2..517023ba1 100644 --- a/playbooks/common/openshift-cluster/initialize_facts.yml +++ b/playbooks/common/openshift-cluster/initialize_facts.yml @@ -94,7 +94,7 @@ with_items: - iproute - "{{ 'python3-dbus' if ansible_distribution == 'Fedora' else 'python-dbus' }}" - - PyYAML + - "{{ 'python3-PyYAML' if ansible_distribution == 'Fedora' else 'PyYAML' }}" - yum-utils - name: Ensure various deps for running system containers are installed diff --git a/playbooks/common/openshift-cluster/initialize_openshift_version.yml b/playbooks/common/openshift-cluster/initialize_openshift_version.yml index 7af6b25bc..1b186f181 100644 --- a/playbooks/common/openshift-cluster/initialize_openshift_version.yml +++ b/playbooks/common/openshift-cluster/initialize_openshift_version.yml @@ -1,4 +1,12 @@ --- +- name: Set version_install_base_package true on masters and nodes + hosts: oo_masters_to_config:oo_nodes_to_config + tasks: + - name: Set version_install_base_package true + set_fact: + version_install_base_package: True + when: version_install_base_package is not defined + # NOTE: requires openshift_facts be run - name: Determine openshift_version to configure on first master hosts: oo_first_master diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml index 75339f6df..0e970f376 100644 --- a/playbooks/common/openshift-cluster/openshift_hosted.yml +++ b/playbooks/common/openshift-cluster/openshift_hosted.yml @@ -19,31 +19,15 @@ openshift_hosted_router_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}" openshift_hosted_registry_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}" when: "'master' in hostvars[groups.oo_first_master.0].openshift and 'registry_url' in hostvars[groups.oo_first_master.0].openshift.master" - - set_fact: - logging_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}" - logging_ops_hostname: "{{ openshift_hosted_logging_ops_hostname | default('kibana-ops.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}" - logging_master_public_url: "{{ openshift_hosted_logging_master_public_url | default(openshift.master.public_api_url) }}" - logging_elasticsearch_cluster_size: "{{ openshift_hosted_logging_elasticsearch_cluster_size | default(1) }}" - logging_elasticsearch_ops_cluster_size: "{{ openshift_hosted_logging_elasticsearch_ops_cluster_size | default(1) }}" + roles: - role: openshift_default_storage_class when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce') - role: openshift_hosted - role: openshift_metrics - when: openshift_hosted_metrics_deploy | default(false) | bool + when: openshift_metrics_install_metrics | default(false) | bool - role: openshift_logging - when: openshift_hosted_logging_deploy | default(false) | bool - openshift_hosted_logging_hostname: "{{ logging_hostname }}" - openshift_hosted_logging_ops_hostname: "{{ logging_ops_hostname }}" - openshift_hosted_logging_master_public_url: "{{ logging_master_public_url }}" - openshift_hosted_logging_elasticsearch_cluster_size: "{{ logging_elasticsearch_cluster_size }}" - openshift_hosted_logging_elasticsearch_pvc_dynamic: "{{ 'true' if openshift_hosted_logging_storage_kind | default(none) == 'dynamic' else '' }}" - openshift_hosted_logging_elasticsearch_pvc_size: "{{ openshift.hosted.logging.storage.volume.size if openshift_hosted_logging_storage_kind | default(none) in ['dynamic','nfs'] else '' }}" - openshift_hosted_logging_elasticsearch_pvc_prefix: "{{ 'logging-es' if openshift_hosted_logging_storage_kind | default(none) == 'dynamic' else '' }}" - openshift_hosted_logging_elasticsearch_ops_cluster_size: "{{ logging_elasticsearch_ops_cluster_size }}" - openshift_hosted_logging_elasticsearch_ops_pvc_dynamic: "{{ 'true' if openshift_hosted_loggingops_storage_kind | default(none) == 'dynamic' else '' }}" - openshift_hosted_logging_elasticsearch_ops_pvc_size: "{{ openshift.hosted.logging.storage.volume.size if openshift_hosted_logging_storage_kind | default(none) in ['dynamic','nfs' ] else '' }}" - openshift_hosted_logging_elasticsearch_ops_pvc_prefix: "{{ 'logging-es-ops' if openshift_hosted_loggingops_storage_kind | default(none) =='dynamic' else '' }}" + when: openshift_logging_install_logging | default(false) | bool - role: cockpit-ui when: ( openshift.common.version_gte_3_3_or_1_3 | bool ) and ( openshift_hosted_manage_registry | default(true) | bool ) and not (openshift.docker.hosted_registry_insecure | default(false) | bool) @@ -57,8 +41,6 @@ - hosted pre_tasks: - set_fact: - openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}" - - set_fact: openshift_metrics_hawkular_hostname: "{{ g_metrics_hostname | default('hawkular-metrics.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}" tasks: @@ -66,10 +48,10 @@ - include_role: name: openshift_logging tasks_from: update_master_config - when: openshift_hosted_logging_deploy | default(false) | bool + when: openshift_logging_install_logging | default(false) | bool - block: - include_role: name: openshift_metrics tasks_from: update_master_config - when: openshift_hosted_metrics_deploy | default(false) | bool + when: openshift_metrics_install_metrics | default(false) | bool diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml index 6964e8567..58bbcc658 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml @@ -37,10 +37,17 @@ - name: Generate new etcd CA hosts: oo_first_etcd roles: - - role: openshift_etcd_ca - etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}" - etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" - etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}" + - role: openshift_etcd_facts + tasks: + - include_role: + name: etcd + tasks_from: ca + vars: + etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}" + etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" + etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}" + when: + - etcd_ca_setup | default(True) | bool - name: Create temp directory for syncing certs hosts: localhost diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/etcd.yml b/playbooks/common/openshift-cluster/redeploy-certificates/etcd.yml index 6b5c805e6..16f0edb06 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/etcd.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/etcd.yml @@ -45,19 +45,23 @@ - name: Redeploy etcd certificates hosts: oo_etcd_to_config any_errors_fatal: true - roles: - - role: openshift_etcd_server_certificates - etcd_certificates_redeploy: true - etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" - etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}" - etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}" - openshift_ca_host: "{{ groups.oo_first_master.0 }}" - r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" + tasks: + - include_role: + name: etcd + tasks_from: server_certificates + vars: + etcd_certificates_redeploy: true + etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" + etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}" + etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}" + openshift_ca_host: "{{ groups.oo_first_master.0 }}" + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" - name: Redeploy etcd client certificates for masters hosts: oo_masters_to_config any_errors_fatal: true roles: + - role: openshift_etcd_facts - role: openshift_etcd_client_certificates etcd_certificates_redeploy: true etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" diff --git a/playbooks/common/openshift-cluster/sanity_checks.yml b/playbooks/common/openshift-cluster/sanity_checks.yml index 7e28a11e8..26716a92d 100644 --- a/playbooks/common/openshift-cluster/sanity_checks.yml +++ b/playbooks/common/openshift-cluster/sanity_checks.yml @@ -45,3 +45,7 @@ - fail: msg: openshift_hostname must be 63 characters or less when: openshift_hostname is defined and openshift_hostname | length > 63 + + - fail: + msg: openshift_public_hostname must be 63 characters or less + when: openshift_public_hostname is defined and openshift_public_hostname | length > 63 diff --git a/playbooks/common/openshift-cluster/service_catalog.yml b/playbooks/common/openshift-cluster/service_catalog.yml index 599350258..529ee99be 100644 --- a/playbooks/common/openshift-cluster/service_catalog.yml +++ b/playbooks/common/openshift-cluster/service_catalog.yml @@ -1,20 +1,9 @@ --- - -- name: Update Master configs - hosts: oo_masters - serial: 1 - tasks: - - block: - - include_role: - name: openshift_service_catalog - tasks_from: wire_aggregator - vars: - first_master: "{{ groups.oo_first_master[0] }}" - - name: Service Catalog hosts: oo_first_master roles: - openshift_service_catalog - ansible_service_broker + - template_service_broker vars: first_master: "{{ groups.oo_first_master[0] }}" diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml index b2a2eac9a..52345a9ba 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml @@ -18,12 +18,16 @@ - name: Get current version of Docker command: "{{ repoquery_cmd }} --installed --qf '%{version}' docker" register: curr_docker_version + retries: 4 + until: curr_docker_version | succeeded changed_when: false - name: Get latest available version of Docker command: > {{ repoquery_cmd }} --qf '%{version}' "docker" register: avail_docker_version + retries: 4 + until: avail_docker_version | succeeded # Don't expect docker rpm to be available on hosts that don't already have it installed: when: pkg_check.rc == 0 failed_when: false diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml index 616ba04f8..2cc6c9019 100644 --- a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml +++ b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml @@ -2,7 +2,7 @@ - name: Backup etcd hosts: oo_etcd_hosts_to_backup roles: - - role: openshift_facts + - role: openshift_etcd_facts - role: etcd_common r_etcd_common_action: backup r_etcd_common_backup_tag: etcd_backup_tag diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml index 497709d25..ad6325ca0 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml @@ -11,3 +11,4 @@ checks: - disk_availability - memory_availability + - docker_image_availability diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml index 78923e04f..3c0017891 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml @@ -5,24 +5,9 @@ tasks: - fail: msg: > - This upgrade is only supported for origin, openshift-enterprise, and online + This upgrade is only supported for origin and openshift-enterprise deployment types - when: deployment_type not in ['origin','openshift-enterprise', 'online'] - - # osm_cluster_network_cidr, osm_host_subnet_length and openshift_portal_net are - # required when upgrading to avoid changes that may occur between releases - # Reference: https://bugzilla.redhat.com/show_bug.cgi?id=1451023 - - assert: - that: - - "osm_cluster_network_cidr is defined" - - "osm_host_subnet_length is defined" - - "openshift_portal_net is defined" - msg: > - osm_cluster_network_cidr, osm_host_subnet_length, and openshift_portal_net are required inventory - variables when upgrading. These variables should match what is currently used in the cluster. If - you don't remember what these values are you can find them in /etc/origin/master/master-config.yaml - on a master with the names clusterNetworkCIDR (osm_cluster_network_cidr), - osm_host_subnet_length (hostSubnetLength), and openshift_portal_net (hostSubnetLength). + when: deployment_type not in ['origin','openshift-enterprise'] # Error out in situations where the user has older versions specified in their # inventory in any of the openshift_release, openshift_image_tag, and diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml index 9b4a8e413..142ce5f3d 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml @@ -27,13 +27,17 @@ - name: Set fact avail_openshift_version set_fact: - avail_openshift_version: "{{ repoquery_out.results.versions.available_versions.0 }}" + avail_openshift_version: "{{ repoquery_out.results.versions.available_versions_full.0 }}" + - name: Set openshift_pkg_version when not specified + set_fact: + openshift_pkg_version: "-{{ repoquery_out.results.versions.available_versions_full.0 }}" + when: openshift_pkg_version | default('') == '' - name: Verify OpenShift RPMs are available for upgrade fail: msg: "OpenShift {{ avail_openshift_version }} is available, but {{ openshift_upgrade_target }} or greater is required" when: - - avail_openshift_version | default('0.0', True) | version_compare(openshift_release, '<') + - openshift_pkg_version | default('0.0', True) | version_compare(openshift_release, '<') - name: Fail when openshift version does not meet minium requirement for Origin upgrade fail: diff --git a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml index 164baca81..8cc46ab68 100644 --- a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml @@ -8,7 +8,6 @@ # TODO: If the sdn package isn't already installed this will install it, we # should fix that - - name: Upgrade master packages package: name={{ master_pkgs | join(',') }} state=present vars: @@ -16,7 +15,7 @@ - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}" - "{{ openshift.common.service_type }}-master{{ openshift_pkg_version }}" - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version}}" + - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}" - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}" - "tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" - PyYAML diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index 18f10437d..4e73293f0 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -13,11 +13,11 @@ {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig migrate storage --include=* --confirm register: l_pb_upgrade_control_plane_pre_upgrade_storage - when: openshift_upgrade_pre_storage_migration_enabled | default(true,true) | bool + when: openshift_upgrade_pre_storage_migration_enabled | default(true) | bool failed_when: - - openshift_upgrade_pre_storage_migration_enabled | default(true,true) | bool + - openshift_upgrade_pre_storage_migration_enabled | default(true) | bool - l_pb_upgrade_control_plane_pre_upgrade_storage.rc != 0 - - openshift_upgrade_pre_storage_migration_fatal | default(true,true) | bool + - openshift_upgrade_pre_storage_migration_fatal | default(true) | bool # If facts cache were for some reason deleted, this fact may not be set, and if not set # it will always default to true. This causes problems for the etcd data dir fact detection @@ -151,11 +151,11 @@ {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig migrate storage --include=clusterpolicies --confirm register: l_pb_upgrade_control_plane_post_upgrade_storage - when: openshift_upgrade_post_storage_migration_enabled | default(true,true) | bool + when: openshift_upgrade_post_storage_migration_enabled | default(true) | bool failed_when: - - openshift_upgrade_post_storage_migration_enabled | default(true,true) | bool + - openshift_upgrade_post_storage_migration_enabled | default(true) | bool - l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0 - - openshift_upgrade_post_storage_migration_fatal | default(false,true) | bool + - openshift_upgrade_post_storage_migration_fatal | default(false) | bool run_once: true delegate_to: "{{ groups.oo_first_master.0 }}" @@ -189,8 +189,6 @@ roles: - { role: openshift_cli } vars: - origin_reconcile_bindings: "{{ deployment_type == 'origin' and openshift_version | version_compare('1.0.6', '>') }}" - ent_reconcile_bindings: true openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" # Another spot where we assume docker is running and do not want to accidentally trigger an unsafe # restart. @@ -201,6 +199,7 @@ {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig policy reconcile-cluster-roles --additive-only=true --confirm -o name register: reconcile_cluster_role_result + when: not openshift.common.version_gte_3_7 | bool changed_when: - reconcile_cluster_role_result.stdout != '' - reconcile_cluster_role_result.rc == 0 @@ -215,7 +214,7 @@ --exclude-groups=system:unauthenticated --exclude-users=system:anonymous --additive-only=true --confirm -o name - when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool + when: not openshift.common.version_gte_3_7 | bool register: reconcile_bindings_result changed_when: - reconcile_bindings_result.stdout != '' @@ -230,7 +229,7 @@ changed_when: - reconcile_jenkins_role_binding_result.stdout != '' - reconcile_jenkins_role_binding_result.rc == 0 - when: openshift.common.version_gte_3_4_or_1_4 | bool + when: (not openshift.common.version_gte_3_7 | bool) and (openshift.common.version_gte_3_4_or_1_4 | bool) - name: Reconcile Security Context Constraints command: > @@ -247,11 +246,11 @@ migrate storage --include=* --confirm run_once: true register: l_pb_upgrade_control_plane_post_upgrade_storage - when: openshift_upgrade_post_storage_migration_enabled | default(true,true) | bool + when: openshift_upgrade_post_storage_migration_enabled | default(true) | bool failed_when: - - openshift_upgrade_post_storage_migration_enabled | default(true,true) | bool + - openshift_upgrade_post_storage_migration_enabled | default(true) | bool - l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0 - - openshift_upgrade_post_storage_migration_fatal | default(false,true) | bool + - openshift_upgrade_post_storage_migration_fatal | default(false) | bool - set_fact: reconcile_complete: True diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml index 9fe059ac9..7c72564b6 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml @@ -75,6 +75,10 @@ # docker is configured and running. skip_docker_role: True +- include: ../pre/verify_health_checks.yml + tags: + - pre_upgrade + - include: ../pre/verify_control_plane_running.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml index 1b10d4e37..6c1c7c921 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml @@ -68,6 +68,10 @@ # docker is configured and running. skip_docker_role: True +- include: ../pre/verify_health_checks.yml + tags: + - pre_upgrade + - name: Verify masters are already upgraded hosts: oo_masters_to_config tags: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml index f97f34c3b..3549cf6c3 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml @@ -75,6 +75,10 @@ # docker is configured and running. skip_docker_role: True +- include: ../pre/verify_health_checks.yml + tags: + - pre_upgrade + - include: ../pre/verify_control_plane_running.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml index e95b90cd5..e5e04e643 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml @@ -68,6 +68,10 @@ # docker is configured and running. skip_docker_role: True +- include: ../pre/verify_health_checks.yml + tags: + - pre_upgrade + - name: Verify masters are already upgraded hosts: oo_masters_to_config tags: diff --git a/playbooks/common/openshift-etcd/migrate.yml b/playbooks/common/openshift-etcd/migrate.yml index a2af7bb21..e4ab0aa41 100644 --- a/playbooks/common/openshift-etcd/migrate.yml +++ b/playbooks/common/openshift-etcd/migrate.yml @@ -69,7 +69,7 @@ - role: etcd_migrate r_etcd_migrate_action: migrate r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" - etcd_peer: "{{ ansible_default_ipv4.address }}" + etcd_peer: "{{ openshift.common.ip }}" etcd_url_scheme: "https" etcd_peer_url_scheme: "https" @@ -80,7 +80,7 @@ - role: etcd_migrate r_etcd_migrate_action: clean_data r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" - etcd_peer: "{{ ansible_default_ipv4.address }}" + etcd_peer: "{{ openshift.common.ip }}" etcd_url_scheme: "https" etcd_peer_url_scheme: "https" post_tasks: @@ -115,7 +115,7 @@ roles: - role: etcd_migrate r_etcd_migrate_action: add_ttls - etcd_peer: "{{ hostvars[groups.oo_etcd_to_migrate.0].ansible_default_ipv4.address }}" + etcd_peer: "{{ hostvars[groups.oo_etcd_to_migrate.0].openshift.common.ip }}" etcd_url_scheme: "https" etcd_peer_url_scheme: "https" when: etcd_migration_failed | length == 0 diff --git a/playbooks/common/openshift-etcd/scaleup.yml b/playbooks/common/openshift-etcd/scaleup.yml index 5f8bb1c7a..d3fa48bad 100644 --- a/playbooks/common/openshift-etcd/scaleup.yml +++ b/playbooks/common/openshift-etcd/scaleup.yml @@ -23,6 +23,9 @@ -C {{ etcd_peer_url_scheme }}://{{ hostvars[etcd_ca_host].etcd_hostname }}:{{ etcd_client_port }} member add {{ etcd_hostname }} {{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_port }} delegate_to: "{{ etcd_ca_host }}" + failed_when: + - etcd_add_check.rc == 1 + - ("peerURL exists" not in etcd_add_check.stderr) register: etcd_add_check retries: 3 delay: 10 @@ -53,3 +56,19 @@ retries: 3 delay: 30 until: scaleup_health.rc == 0 + +- name: Update master etcd client urls + hosts: oo_masters_to_config + serial: 1 + tasks: + - include_role: + name: openshift_master + tasks_from: update_etcd_client_urls + vars: + etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" + openshift_ca_host: "{{ groups.oo_first_master.0 }}" + openshift_master_etcd_hosts: "{{ hostvars + | oo_select_keys(groups['oo_etcd_to_config'] | union(groups['oo_new_etcd_to_config'])) + | oo_collect('openshift.common.hostname') + | default(none, true) }}" + openshift_master_etcd_port: "{{ (etcd_client_port | default('2379')) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else none }}" diff --git a/playbooks/common/openshift-master/additional_config.yml b/playbooks/common/openshift-master/additional_config.yml index 7468c78f0..de467a722 100644 --- a/playbooks/common/openshift-master/additional_config.yml +++ b/playbooks/common/openshift-master/additional_config.yml @@ -17,7 +17,10 @@ - role: openshift_manageiq when: openshift_use_manageiq | default(false) | bool - role: cockpit - when: not openshift.common.is_atomic and ( deployment_type in ['atomic-enterprise','openshift-enterprise'] ) and - (osm_use_cockpit | bool or osm_use_cockpit is undefined ) and ( openshift.common.deployment_subtype != 'registry' ) + when: + - openshift.common.is_atomic + - deployment_type == 'openshift-enterprise' + - osm_use_cockpit is undefined or osm_use_cockpit | bool + - openshift.common.deployment_subtype != 'registry' - role: flannel_register when: openshift_use_flannel | default(false) | bool diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index c77d7bb87..2e7646372 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -35,7 +35,9 @@ file: path: "/etc/origin/{{ item }}" state: absent - when: rpmgenerated_config.stat.exists == true and deployment_type in ['openshift-enterprise', 'atomic-enterprise'] + when: + - rpmgenerated_config.stat.exists == true + - deployment_type == 'openshift-enterprise' with_items: - master - node @@ -179,39 +181,51 @@ openshift_master_count: "{{ openshift.master.master_count }}" openshift_master_session_auth_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_auth_secrets }}" openshift_master_session_encryption_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_encryption_secrets }}" - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - openshift_no_proxy_etcd_host_ips: "{{ hostvars | oo_select_keys(groups['oo_etcd_to_config'] | default([])) - | oo_collect('openshift.common.ip') | default([]) | join(',') - }}" - roles: - - role: os_firewall - - role: openshift_master openshift_ca_host: "{{ groups.oo_first_master.0 }}" openshift_master_etcd_hosts: "{{ hostvars | oo_select_keys(groups['oo_etcd_to_config'] | default([])) | oo_collect('openshift.common.hostname') | default(none, true) }}" - openshift_master_hosts: "{{ groups.oo_masters_to_config }}" - r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" - etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" + openshift_no_proxy_etcd_host_ips: "{{ hostvars | oo_select_keys(groups['oo_etcd_to_config'] | default([])) + | oo_collect('openshift.common.ip') | default([]) | join(',') + }}" + roles: + - role: os_firewall + - role: openshift_master_facts + - role: openshift_hosted_facts + - role: openshift_master_certificates + - role: openshift_etcd_facts + - role: openshift_etcd_client_certificates etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}" etcd_cert_config_dir: "{{ openshift.common.config_base }}/master" etcd_cert_prefix: "master.etcd-" + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" + etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" + when: groups.oo_etcd_to_config | default([]) | length != 0 + - role: openshift_clock + - role: openshift_cloud_provider + - role: openshift_builddefaults + - role: openshift_buildoverrides + - role: nickhammond.logrotate + - role: contiv + contiv_role: netmaster + when: openshift_use_contiv | default(False) | bool + - role: openshift_master + openshift_master_hosts: "{{ groups.oo_masters_to_config }}" r_openshift_master_clean_install: "{{ hostvars[groups.oo_first_master.0].l_clean_install }}" r_openshift_master_etcd3_storage: "{{ hostvars[groups.oo_first_master.0].l_etcd3_enabled }}" openshift_master_is_scaleup_host: "{{ g_openshift_master_is_scaleup | default(false) }}" openshift_master_default_registry_value: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value }}" openshift_master_default_registry_value_api: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value_api }}" openshift_master_default_registry_value_controllers: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value_controllers }}" + - role: nuage_ca + when: openshift_use_nuage | default(false) | bool + - role: nuage_common + when: openshift_use_nuage | default(false) | bool - role: nuage_master when: openshift_use_nuage | default(false) | bool - role: calico_master when: openshift_use_calico | default(false) | bool - post_tasks: - name: Create group for deployment type group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }} diff --git a/playbooks/common/openshift-master/scaleup.yml b/playbooks/common/openshift-master/scaleup.yml index 17f9ef4bc..8c366e038 100644 --- a/playbooks/common/openshift-master/scaleup.yml +++ b/playbooks/common/openshift-master/scaleup.yml @@ -43,6 +43,8 @@ delay: 1 changed_when: false +- include: ../openshift-master/set_network_facts.yml + - include: ../openshift-master/config.yml - include: ../openshift-loadbalancer/config.yml diff --git a/playbooks/common/openshift-master/set_network_facts.yml b/playbooks/common/openshift-master/set_network_facts.yml new file mode 100644 index 000000000..2ad805858 --- /dev/null +++ b/playbooks/common/openshift-master/set_network_facts.yml @@ -0,0 +1,28 @@ +--- +- name: Read first master\'s config + hosts: oo_first_master + gather_facts: no + tasks: + - stat: + path: "{{ openshift.common.config_base }}/master/master-config.yaml" + register: g_master_config_stat + - slurp: + src: "{{ openshift.common.config_base }}/master/master-config.yaml" + register: g_master_config_slurp + +- name: Set network facts for masters + hosts: oo_masters_to_config + gather_facts: no + tasks: + - block: + - set_fact: + osm_cluster_network_cidr: "{{ (hostvars[groups.oo_first_master.0].g_master_config_slurp.content|b64decode|from_yaml).networkConfig.clusterNetworkCIDR }}" + when: osm_cluster_network_cidr is not defined + - set_fact: + osm_host_subnet_length: "{{ (hostvars[groups.oo_first_master.0].g_master_config_slurp.content|b64decode|from_yaml).networkConfig.hostSubnetLength }}" + when: osm_host_subnet_length is not defined + - set_fact: + openshift_portal_net: "{{ (hostvars[groups.oo_first_master.0].g_master_config_slurp.content|b64decode|from_yaml).networkConfig.serviceNetworkCIDR }}" + when: openshift_portal_net is not defined + when: + - hostvars[groups.oo_first_master.0].g_master_config_stat.stat.exists | bool diff --git a/playbooks/common/openshift-nfs/config.yml b/playbooks/common/openshift-nfs/config.yml index 000e46e80..64ea0d3c4 100644 --- a/playbooks/common/openshift-nfs/config.yml +++ b/playbooks/common/openshift-nfs/config.yml @@ -2,5 +2,5 @@ - name: Configure nfs hosts: oo_nfs_to_config roles: - - role: openshift_facts + - role: os_firewall - role: openshift_storage_nfs diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml index 0801c41ff..5207ca9c8 100644 --- a/playbooks/common/openshift-node/config.yml +++ b/playbooks/common/openshift-node/config.yml @@ -65,12 +65,16 @@ vars: openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}" roles: - - role: flannel - etcd_urls: "{{ hostvars[groups.oo_first_master.0].openshift.master.etcd_urls }}" - embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}" + - role: openshift_facts + - role: openshift_etcd_facts + - role: openshift_etcd_client_certificates + etcd_cert_prefix: flannel.etcd- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" etcd_cert_subdir: "openshift-node-{{ openshift.common.hostname }}" etcd_cert_config_dir: "{{ openshift.common.config_base }}/node" + - role: flannel + etcd_urls: "{{ hostvars[groups.oo_first_master.0].openshift.master.etcd_urls }}" + embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}" when: openshift_use_flannel | default(false) | bool - role: calico when: openshift_use_calico | default(false) | bool diff --git a/playbooks/gcp/openshift-cluster/provision.yml b/playbooks/gcp/openshift-cluster/provision.yml new file mode 100644 index 000000000..a3d1d46a6 --- /dev/null +++ b/playbooks/gcp/openshift-cluster/provision.yml @@ -0,0 +1,19 @@ +--- +- name: Ensure all cloud resources necessary for the cluster, including instances, have been started + hosts: localhost + connection: local + gather_facts: no + tasks: + + - name: provision a GCP cluster in the specified project + include_role: + name: openshift_gcp + +- name: normalize groups + include: ../../byo/openshift-cluster/initialize_groups.yml + +- name: run the std_include + include: ../../common/openshift-cluster/std_include.yml + +- name: run the config + include: ../../common/openshift-cluster/config.yml diff --git a/roles/ansible_service_broker/defaults/main.yml b/roles/ansible_service_broker/defaults/main.yml index 12929b354..9eb9db316 100644 --- a/roles/ansible_service_broker/defaults/main.yml +++ b/roles/ansible_service_broker/defaults/main.yml @@ -1,6 +1,7 @@ --- ansible_service_broker_remove: false +ansible_service_broker_install: false ansible_service_broker_log_level: info ansible_service_broker_output_request: false ansible_service_broker_recovery: true diff --git a/roles/ansible_service_broker/tasks/main.yml b/roles/ansible_service_broker/tasks/main.yml index b46ce8233..d8695bd3a 100644 --- a/roles/ansible_service_broker/tasks/main.yml +++ b/roles/ansible_service_broker/tasks/main.yml @@ -2,7 +2,7 @@ # do any asserts here - include: install.yml - when: not ansible_service_broker_remove|default(false) | bool + when: ansible_service_broker_install | default(false) | bool - include: remove.yml - when: ansible_service_broker_remove|default(false) | bool + when: ansible_service_broker_remove | default(false) | bool diff --git a/roles/calico/tasks/main.yml b/roles/calico/tasks/main.yml index 39f730462..0e3863304 100644 --- a/roles/calico/tasks/main.yml +++ b/roles/calico/tasks/main.yml @@ -2,10 +2,14 @@ - name: Calico Node | Error if invalid cert arguments fail: msg: "Must provide all or none for the following etcd params: calico_etcd_cert_dir, calico_etcd_ca_cert_file, calico_etcd_cert_file, calico_etcd_key_file, calico_etcd_endpoints" - when: (calico_etcd_cert_dir is defined or calico_etcd_ca_cert_file is defined or calico_etcd_cert_file is defined or calico_etcd_key_file is defined or calico_etcd_endpoints is defined) and not (calico_etcd_cert_dir is defined and calico_etcd_ca_cert_file is defined and calico_etcd_cert_file is defined and calico_etcd_key_file is defined and calico_etcd_endpoints is defined) + when: + - calico_etcd_cert_dir is defined or calico_etcd_ca_cert_file is defined or calico_etcd_cert_file is defined or calico_etcd_key_file is defined or calico_etcd_endpoints is defined + - not (calico_etcd_cert_dir is defined and calico_etcd_ca_cert_file is defined and calico_etcd_cert_file is defined and calico_etcd_key_file is defined and calico_etcd_endpoints is defined) - name: Calico Node | Generate OpenShift-etcd certs - include: ../../../roles/etcd_client_certificates/tasks/main.yml + include_role: + name: etcd + tasks_from: client_certificates when: calico_etcd_ca_cert_file is not defined or calico_etcd_cert_file is not defined or calico_etcd_key_file is not defined or calico_etcd_endpoints is not defined or calico_etcd_cert_dir is not defined vars: etcd_cert_prefix: calico.etcd- @@ -28,18 +32,18 @@ msg: "Invalid etcd configuration for calico." when: item is not defined or item == '' with_items: - - calico_etcd_ca_cert_file - - calico_etcd_cert_file - - calico_etcd_key_file - - calico_etcd_endpoints + - calico_etcd_ca_cert_file + - calico_etcd_cert_file + - calico_etcd_key_file + - calico_etcd_endpoints - name: Calico Node | Assure the calico certs are present stat: path: "{{ item }}" with_items: - - "{{ calico_etcd_ca_cert_file }}" - - "{{ calico_etcd_cert_file }}" - - "{{ calico_etcd_key_file }}" + - "{{ calico_etcd_ca_cert_file }}" + - "{{ calico_etcd_cert_file }}" + - "{{ calico_etcd_key_file }}" - name: Calico Node | Configure Calico service unit file template: diff --git a/roles/docker/defaults/main.yml b/roles/docker/defaults/main.yml index ed97d539c..81f3ee9e4 100644 --- a/roles/docker/defaults/main.yml +++ b/roles/docker/defaults/main.yml @@ -1 +1,6 @@ --- +docker_cli_auth_config_path: '/root/.docker' + +# oreg_url is defined by user input. +oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}" +oreg_auth_credentials_replace: False diff --git a/roles/docker/tasks/package_docker.yml b/roles/docker/tasks/package_docker.yml index bc52ab60c..16aea5067 100644 --- a/roles/docker/tasks/package_docker.yml +++ b/roles/docker/tasks/package_docker.yml @@ -3,6 +3,8 @@ command: "{{ repoquery_cmd }} --installed --qf '%{version}' docker" when: not openshift.common.is_atomic | bool register: curr_docker_version + retries: 4 + until: curr_docker_version | succeeded changed_when: false - name: Error out if Docker pre-installed but too old @@ -117,6 +119,18 @@ notify: - restart docker +- name: Check for credentials file for registry auth + stat: + path: "{{ docker_cli_auth_config_path }}/config.json" + when: oreg_auth_user is defined + register: docker_cli_auth_credentials_stat + +- name: Create credentials for docker cli registry auth + command: "docker --config={{ docker_cli_auth_config_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}" + when: + - oreg_auth_user is defined + - (not docker_cli_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool + - name: Start the Docker service systemd: name: docker diff --git a/roles/docker/tasks/systemcontainer_crio.yml b/roles/docker/tasks/systemcontainer_crio.yml index 24ca0d9f8..e6fc2db06 100644 --- a/roles/docker/tasks/systemcontainer_crio.yml +++ b/roles/docker/tasks/systemcontainer_crio.yml @@ -95,7 +95,7 @@ - name: Set to default prepend set_fact: l_crio_image_prepend: "docker.io/gscrivano" - l_crio_image_name: "crio-o-fedora" + l_crio_image_name: "cri-o-fedora" - name: Use Centos based image when distribution is CentOS set_fact: @@ -108,18 +108,22 @@ l_crio_image_name: "cri-o" when: ansible_distribution == "RedHat" - # For https://github.com/openshift/openshift-ansible/pull/4049#discussion_r114478504 - - name: Use a testing registry if requested - set_fact: - l_crio_image_prepend: "{{ openshift_crio_systemcontainer_image_registry_override }}" - when: - - openshift_crio_systemcontainer_image_registry_override is defined - - openshift_crio_systemcontainer_image_registry_override != "" - - name: Set the full image name set_fact: l_crio_image: "{{ l_crio_image_prepend }}/{{ l_crio_image_name }}:latest" + # For https://github.com/openshift/aos-cd-jobs/pull/624#pullrequestreview-61816548 + - name: Use a specific image if requested + set_fact: + l_crio_image: "{{ openshift_crio_systemcontainer_image_override }}" + when: + - openshift_crio_systemcontainer_image_override is defined + - openshift_crio_systemcontainer_image_override != "" + + # Be nice and let the user see the variable result + - debug: + var: l_crio_image + # NOTE: no_proxy added as a workaround until https://github.com/projectatomic/atomic/pull/999 is released - name: Pre-pull CRI-O System Container image command: "atomic pull --storage ostree {{ l_crio_image }}" @@ -134,6 +138,14 @@ image: "{{ l_crio_image }}" state: latest +- name: Remove CRI-o default configuration files + file: + path: "{{ item }}" + state: absent + with_items: + - /etc/cni/net.d/200-loopback.conf + - /etc/cni/net.d/100-crio-bridge.conf + - name: Create the CRI-O configuration template: dest: /etc/crio/crio.conf diff --git a/roles/docker/tasks/systemcontainer_docker.yml b/roles/docker/tasks/systemcontainer_docker.yml index 57a84bc2c..146e5f430 100644 --- a/roles/docker/tasks/systemcontainer_docker.yml +++ b/roles/docker/tasks/systemcontainer_docker.yml @@ -100,18 +100,22 @@ l_docker_image_prepend: "registry.fedoraproject.org/f25" when: ansible_distribution == 'Fedora' - # For https://github.com/openshift/openshift-ansible/pull/4049#discussion_r114478504 - - name: Use a testing registry if requested - set_fact: - l_docker_image_prepend: "{{ openshift_docker_systemcontainer_image_registry_override }}" - when: - - openshift_docker_systemcontainer_image_registry_override is defined - - openshift_docker_systemcontainer_image_registry_override != "" - - name: Set the full image name set_fact: l_docker_image: "{{ l_docker_image_prepend }}/{{ openshift.docker.service_name }}:latest" + # For https://github.com/openshift/openshift-ansible/pull/5354#issuecomment-328552959 + - name: Use a specific image if requested + set_fact: + l_docker_image: "{{ openshift_docker_systemcontainer_image_override }}" + when: + - openshift_docker_systemcontainer_image_override is defined + - openshift_docker_systemcontainer_image_override != "" + + # Be nice and let the user see the variable result + - debug: + var: l_docker_image + # NOTE: no_proxy added as a workaround until https://github.com/projectatomic/atomic/pull/999 is released - name: Pre-pull Container Engine System Container image command: "atomic pull --storage ostree {{ l_docker_image }}" diff --git a/roles/etcd/meta/main.yml b/roles/etcd/meta/main.yml index 9a955c822..d69366a39 100644 --- a/roles/etcd/meta/main.yml +++ b/roles/etcd/meta/main.yml @@ -18,5 +18,4 @@ galaxy_info: dependencies: - role: lib_openshift - role: lib_os_firewall -- role: etcd_server_certificates - role: etcd_common diff --git a/roles/etcd/tasks/ca.yml b/roles/etcd/tasks/ca.yml new file mode 100644 index 000000000..7cda49069 --- /dev/null +++ b/roles/etcd/tasks/ca.yml @@ -0,0 +1,2 @@ +--- +- include: ca/deploy.yml diff --git a/roles/etcd_ca/tasks/main.yml b/roles/etcd/tasks/ca/deploy.yml index b4dea4a07..3d32290a2 100644 --- a/roles/etcd_ca/tasks/main.yml +++ b/roles/etcd/tasks/ca/deploy.yml @@ -1,6 +1,8 @@ --- - name: Install openssl - package: name=openssl state=present + package: + name: openssl + state: present when: not etcd_is_atomic | bool delegate_to: "{{ etcd_ca_host }}" run_once: true diff --git a/roles/etcd/tasks/client_certificates.yml b/roles/etcd/tasks/client_certificates.yml new file mode 100644 index 000000000..2e9c078b9 --- /dev/null +++ b/roles/etcd/tasks/client_certificates.yml @@ -0,0 +1,2 @@ +--- +- include: client_certificates/fetch_from_ca.yml diff --git a/roles/etcd_client_certificates/tasks/main.yml b/roles/etcd/tasks/client_certificates/fetch_from_ca.yml index bbd29ece1..119071a72 100644 --- a/roles/etcd_client_certificates/tasks/main.yml +++ b/roles/etcd/tasks/client_certificates/fetch_from_ca.yml @@ -9,7 +9,7 @@ - fail: msg: > CA certificate {{ etcd_ca_cert }} doesn't exist on CA host - {{ etcd_ca_host }}. Apply 'etcd_ca' role to + {{ etcd_ca_host }}. Apply 'etcd_ca' action from `etcd` role to {{ etcd_ca_host }}. when: not g_ca_cert_stat_result.stat.exists | bool run_once: true diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml index 78e543ef1..870c11ad4 100644 --- a/roles/etcd/tasks/main.yml +++ b/roles/etcd/tasks/main.yml @@ -1,4 +1,6 @@ --- +- include: server_certificates.yml + - name: Set hostname and ip facts set_fact: # Store etcd_hostname and etcd_ip such that they will be available diff --git a/roles/etcd/tasks/server_certificates.yml b/roles/etcd/tasks/server_certificates.yml new file mode 100644 index 000000000..f0ba58b6e --- /dev/null +++ b/roles/etcd/tasks/server_certificates.yml @@ -0,0 +1,2 @@ +--- +- include: server_certificates/fetch_from_ca.yml diff --git a/roles/etcd_server_certificates/tasks/main.yml b/roles/etcd/tasks/server_certificates/fetch_from_ca.yml index 4795188a6..064fe1952 100644 --- a/roles/etcd_server_certificates/tasks/main.yml +++ b/roles/etcd/tasks/server_certificates/fetch_from_ca.yml @@ -1,6 +1,12 @@ --- +- include: ../ca/deploy.yml + when: + - etcd_ca_setup | default(True) | bool + - name: Install etcd - package: name=etcd{{ '-' + etcd_version if etcd_version is defined else '' }} state=present + package: + name: "etcd{{ '-' + etcd_version if etcd_version is defined else '' }}" + state: present when: not etcd_is_containerized | bool - name: Check status of etcd certificates diff --git a/roles/etcd_ca/templates/openssl_append.j2 b/roles/etcd/templates/openssl_append.j2 index f28316fc2..f28316fc2 100644 --- a/roles/etcd_ca/templates/openssl_append.j2 +++ b/roles/etcd/templates/openssl_append.j2 diff --git a/roles/etcd_ca/README.md b/roles/etcd_ca/README.md deleted file mode 100644 index 60a880e30..000000000 --- a/roles/etcd_ca/README.md +++ /dev/null @@ -1,34 +0,0 @@ -etcd_ca -======================== - -TODO - -Requirements ------------- - -TODO - -Role Variables --------------- - -TODO - -Dependencies ------------- - -TODO - -Example Playbook ----------------- - -TODO - -License -------- - -Apache License Version 2.0 - -Author Information ------------------- - -Scott Dodson (sdodson@redhat.com) diff --git a/roles/etcd_client_certificates/README.md b/roles/etcd_client_certificates/README.md deleted file mode 100644 index 269d5296d..000000000 --- a/roles/etcd_client_certificates/README.md +++ /dev/null @@ -1,34 +0,0 @@ -OpenShift Etcd Certificates -=========================== - -TODO - -Requirements ------------- - -TODO - -Role Variables --------------- - -TODO - -Dependencies ------------- - -TODO - -Example Playbook ----------------- - -TODO - -License -------- - -Apache License Version 2.0 - -Author Information ------------------- - -Scott Dodson (sdodson@redhat.com) diff --git a/roles/etcd_client_certificates/meta/main.yml b/roles/etcd_client_certificates/meta/main.yml deleted file mode 100644 index efebdb599..000000000 --- a/roles/etcd_client_certificates/meta/main.yml +++ /dev/null @@ -1,16 +0,0 @@ ---- -galaxy_info: - author: Jason DeTiberus - description: Etcd Client Certificates - company: Red Hat, Inc. - license: Apache License, Version 2.0 - min_ansible_version: 2.1 - platforms: - - name: EL - versions: - - 7 - categories: - - cloud - - system -dependencies: -- role: etcd_common diff --git a/roles/etcd_common/defaults/main.yml b/roles/etcd_common/defaults/main.yml index 89993f7ea..b67411f40 100644 --- a/roles/etcd_common/defaults/main.yml +++ b/roles/etcd_common/defaults/main.yml @@ -56,7 +56,7 @@ etcd_is_containerized: False etcd_is_thirdparty: False # etcd dir vars -etcd_data_dir: "{{ '/var/lib/origin/openshift.local.etcd' if r_etcd_common_embedded_etcd | bool else '/var/lib/etcd/' if openshift.common.etcd_runtime != 'runc' else '/var/lib/etcd/etcd.etcd/' }}" +etcd_data_dir: "{{ '/var/lib/origin/openshift.local.etcd' if r_etcd_common_embedded_etcd | bool else '/var/lib/etcd/' if r_etcd_common_etcd_runtime != 'runc' else '/var/lib/etcd/etcd.etcd/' }}" # etcd ports and protocols etcd_client_port: 2379 diff --git a/roles/etcd_common/tasks/backup.yml b/roles/etcd_common/tasks/backup.yml index 2bc486d3f..42d27c081 100644 --- a/roles/etcd_common/tasks/backup.yml +++ b/roles/etcd_common/tasks/backup.yml @@ -29,7 +29,6 @@ - name: Check current etcd disk usage shell: du --exclude='*openshift-backup*' -k {{ l_etcd_data_dir }} | tail -n 1 | cut -f1 register: l_etcd_disk_usage - when: r_etcd_common_embedded_etcd | bool # AUDIT:changed_when: `false` because we are only inspecting # state, not manipulating anything changed_when: false @@ -37,9 +36,9 @@ - name: Abort if insufficient disk space for etcd backup fail: msg: > - {{ l_etcd_disk_usage.stdout }} Kb disk space required for etcd backup, + {{ l_etcd_disk_usage.stdout|int*2 }} Kb disk space required for etcd backup, {{ l_avail_disk.stdout }} Kb available. - when: (r_etcd_common_embedded_etcd | bool) and (l_etcd_disk_usage.stdout|int > l_avail_disk.stdout|int) + when: l_etcd_disk_usage.stdout|int*2 > l_avail_disk.stdout|int # For non containerized and non embedded we should have the correct version of # etcd installed already. So don't do anything. diff --git a/roles/etcd_server_certificates/README.md b/roles/etcd_server_certificates/README.md deleted file mode 100644 index 269d5296d..000000000 --- a/roles/etcd_server_certificates/README.md +++ /dev/null @@ -1,34 +0,0 @@ -OpenShift Etcd Certificates -=========================== - -TODO - -Requirements ------------- - -TODO - -Role Variables --------------- - -TODO - -Dependencies ------------- - -TODO - -Example Playbook ----------------- - -TODO - -License -------- - -Apache License Version 2.0 - -Author Information ------------------- - -Scott Dodson (sdodson@redhat.com) diff --git a/roles/etcd_server_certificates/meta/main.yml b/roles/etcd_server_certificates/meta/main.yml deleted file mode 100644 index 4b6013a49..000000000 --- a/roles/etcd_server_certificates/meta/main.yml +++ /dev/null @@ -1,17 +0,0 @@ ---- -galaxy_info: - author: Jason DeTiberus - description: Etcd Server Certificates - company: Red Hat, Inc. - license: Apache License, Version 2.0 - min_ansible_version: 2.1 - platforms: - - name: EL - versions: - - 7 - categories: - - cloud - - system -dependencies: -- role: etcd_ca - when: (etcd_ca_setup | default(True) | bool) diff --git a/roles/flannel/README.md b/roles/flannel/README.md index 0c7347603..b9e15e6e0 100644 --- a/roles/flannel/README.md +++ b/roles/flannel/README.md @@ -27,8 +27,6 @@ Role Variables Dependencies ------------ -openshift_facts - Example Playbook ---------------- diff --git a/roles/flannel/meta/main.yml b/roles/flannel/meta/main.yml index 35f825586..51128dba6 100644 --- a/roles/flannel/meta/main.yml +++ b/roles/flannel/meta/main.yml @@ -12,7 +12,4 @@ galaxy_info: categories: - cloud - system -dependencies: -- role: openshift_facts -- role: openshift_etcd_client_certificates - etcd_cert_prefix: flannel.etcd- +dependencies: [] diff --git a/roles/lib_openshift/library/oc_adm_ca_server_cert.py b/roles/lib_openshift/library/oc_adm_ca_server_cert.py index 45d7444a4..1e6eb2386 100644 --- a/roles/lib_openshift/library/oc_adm_ca_server_cert.py +++ b/roles/lib_openshift/library/oc_adm_ca_server_cert.py @@ -745,7 +745,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_openshift/library/oc_adm_csr.py b/roles/lib_openshift/library/oc_adm_csr.py index 231857cca..8c6a81cc8 100644 --- a/roles/lib_openshift/library/oc_adm_csr.py +++ b/roles/lib_openshift/library/oc_adm_csr.py @@ -723,7 +723,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_openshift/library/oc_adm_manage_node.py b/roles/lib_openshift/library/oc_adm_manage_node.py index 44f3f57d8..4a7847e88 100644 --- a/roles/lib_openshift/library/oc_adm_manage_node.py +++ b/roles/lib_openshift/library/oc_adm_manage_node.py @@ -731,7 +731,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_openshift/library/oc_adm_policy_group.py b/roles/lib_openshift/library/oc_adm_policy_group.py index 687cff579..b8af5cad9 100644 --- a/roles/lib_openshift/library/oc_adm_policy_group.py +++ b/roles/lib_openshift/library/oc_adm_policy_group.py @@ -717,7 +717,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_openshift/library/oc_adm_policy_user.py b/roles/lib_openshift/library/oc_adm_policy_user.py index ddf5d90b7..3364f8de3 100644 --- a/roles/lib_openshift/library/oc_adm_policy_user.py +++ b/roles/lib_openshift/library/oc_adm_policy_user.py @@ -717,7 +717,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_openshift/library/oc_adm_registry.py b/roles/lib_openshift/library/oc_adm_registry.py index c00eee381..c64d7ffd2 100644 --- a/roles/lib_openshift/library/oc_adm_registry.py +++ b/roles/lib_openshift/library/oc_adm_registry.py @@ -835,7 +835,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_openshift/library/oc_adm_router.py b/roles/lib_openshift/library/oc_adm_router.py index 0c925ab0b..492494bda 100644 --- a/roles/lib_openshift/library/oc_adm_router.py +++ b/roles/lib_openshift/library/oc_adm_router.py @@ -860,7 +860,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_openshift/library/oc_clusterrole.py b/roles/lib_openshift/library/oc_clusterrole.py index 567ecfd4e..b412ca8af 100644 --- a/roles/lib_openshift/library/oc_clusterrole.py +++ b/roles/lib_openshift/library/oc_clusterrole.py @@ -709,7 +709,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_openshift/library/oc_configmap.py b/roles/lib_openshift/library/oc_configmap.py index 9515de569..8bbc22c49 100644 --- a/roles/lib_openshift/library/oc_configmap.py +++ b/roles/lib_openshift/library/oc_configmap.py @@ -715,7 +715,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_openshift/library/oc_edit.py b/roles/lib_openshift/library/oc_edit.py index d461e5ae9..ad17051cb 100644 --- a/roles/lib_openshift/library/oc_edit.py +++ b/roles/lib_openshift/library/oc_edit.py @@ -759,7 +759,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_openshift/library/oc_env.py b/roles/lib_openshift/library/oc_env.py index 22ad58725..74a84ac89 100644 --- a/roles/lib_openshift/library/oc_env.py +++ b/roles/lib_openshift/library/oc_env.py @@ -726,7 +726,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_openshift/library/oc_group.py b/roles/lib_openshift/library/oc_group.py index b6c6e47d9..eea1516ae 100644 --- a/roles/lib_openshift/library/oc_group.py +++ b/roles/lib_openshift/library/oc_group.py @@ -699,7 +699,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_openshift/library/oc_image.py b/roles/lib_openshift/library/oc_image.py index f7fc286e0..dc33d3b8a 100644 --- a/roles/lib_openshift/library/oc_image.py +++ b/roles/lib_openshift/library/oc_image.py @@ -718,7 +718,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_openshift/library/oc_label.py b/roles/lib_openshift/library/oc_label.py index 2206878a4..88fd9554d 100644 --- a/roles/lib_openshift/library/oc_label.py +++ b/roles/lib_openshift/library/oc_label.py @@ -735,7 +735,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_openshift/library/oc_obj.py b/roles/lib_openshift/library/oc_obj.py index 126d7a617..8408f9ebc 100644 --- a/roles/lib_openshift/library/oc_obj.py +++ b/roles/lib_openshift/library/oc_obj.py @@ -738,7 +738,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_openshift/library/oc_objectvalidator.py b/roles/lib_openshift/library/oc_objectvalidator.py index d20904d0d..d1be0b534 100644 --- a/roles/lib_openshift/library/oc_objectvalidator.py +++ b/roles/lib_openshift/library/oc_objectvalidator.py @@ -670,7 +670,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_openshift/library/oc_process.py b/roles/lib_openshift/library/oc_process.py index 91199d093..9a281e6cd 100644 --- a/roles/lib_openshift/library/oc_process.py +++ b/roles/lib_openshift/library/oc_process.py @@ -727,7 +727,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_openshift/library/oc_project.py b/roles/lib_openshift/library/oc_project.py index f9b2d81fa..b503c330b 100644 --- a/roles/lib_openshift/library/oc_project.py +++ b/roles/lib_openshift/library/oc_project.py @@ -724,7 +724,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_openshift/library/oc_pvc.py b/roles/lib_openshift/library/oc_pvc.py index 895322ba5..7a9e3bf89 100644 --- a/roles/lib_openshift/library/oc_pvc.py +++ b/roles/lib_openshift/library/oc_pvc.py @@ -731,7 +731,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_openshift/library/oc_route.py b/roles/lib_openshift/library/oc_route.py index 8f8e46e1e..875e473ad 100644 --- a/roles/lib_openshift/library/oc_route.py +++ b/roles/lib_openshift/library/oc_route.py @@ -769,7 +769,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_openshift/library/oc_scale.py b/roles/lib_openshift/library/oc_scale.py index 7130cc5fc..ec3635753 100644 --- a/roles/lib_openshift/library/oc_scale.py +++ b/roles/lib_openshift/library/oc_scale.py @@ -713,7 +713,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_openshift/library/oc_secret.py b/roles/lib_openshift/library/oc_secret.py index 0c4b99e30..c010607e8 100644 --- a/roles/lib_openshift/library/oc_secret.py +++ b/roles/lib_openshift/library/oc_secret.py @@ -765,7 +765,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_openshift/library/oc_service.py b/roles/lib_openshift/library/oc_service.py index 7ab139e85..e83a6e26d 100644 --- a/roles/lib_openshift/library/oc_service.py +++ b/roles/lib_openshift/library/oc_service.py @@ -772,7 +772,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_openshift/library/oc_serviceaccount.py b/roles/lib_openshift/library/oc_serviceaccount.py index 5d539ced4..0d46bbf96 100644 --- a/roles/lib_openshift/library/oc_serviceaccount.py +++ b/roles/lib_openshift/library/oc_serviceaccount.py @@ -711,7 +711,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_openshift/library/oc_serviceaccount_secret.py b/roles/lib_openshift/library/oc_serviceaccount_secret.py index 97e213f46..662d77ec1 100644 --- a/roles/lib_openshift/library/oc_serviceaccount_secret.py +++ b/roles/lib_openshift/library/oc_serviceaccount_secret.py @@ -711,7 +711,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_openshift/library/oc_storageclass.py b/roles/lib_openshift/library/oc_storageclass.py index 9339a85e5..574f109e4 100644 --- a/roles/lib_openshift/library/oc_storageclass.py +++ b/roles/lib_openshift/library/oc_storageclass.py @@ -729,7 +729,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_openshift/library/oc_user.py b/roles/lib_openshift/library/oc_user.py index 2fa349547..e430546ee 100644 --- a/roles/lib_openshift/library/oc_user.py +++ b/roles/lib_openshift/library/oc_user.py @@ -771,7 +771,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_openshift/library/oc_version.py b/roles/lib_openshift/library/oc_version.py index 55e1054e7..a12620968 100644 --- a/roles/lib_openshift/library/oc_version.py +++ b/roles/lib_openshift/library/oc_version.py @@ -683,7 +683,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_openshift/library/oc_volume.py b/roles/lib_openshift/library/oc_volume.py index 63bad57b4..134b2ad19 100644 --- a/roles/lib_openshift/library/oc_volume.py +++ b/roles/lib_openshift/library/oc_volume.py @@ -760,7 +760,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_openshift/src/test/integration/oc_configmap.yml b/roles/lib_openshift/src/test/integration/oc_configmap.yml index c0d200e73..6a452ccec 100755 --- a/roles/lib_openshift/src/test/integration/oc_configmap.yml +++ b/roles/lib_openshift/src/test/integration/oc_configmap.yml @@ -55,7 +55,7 @@ config: "{{ filename }}" from_literal: foo: notbar - deployment_type: online + deployment_type: openshift-enterprise - name: fetch the updated configmap oc_configmap: @@ -70,7 +70,7 @@ assert: that: - cmout.results.results[0].metadata.name == 'configmaptest' - - cmout.results.results[0].data.deployment_type == 'online' + - cmout.results.results[0].data.deployment_type == 'openshift-enterprise' - cmout.results.results[0].data.foo == 'notbar' ###### end update test ########### diff --git a/roles/lib_openshift/src/test/unit/test_oc_configmap.py b/roles/lib_openshift/src/test/unit/test_oc_configmap.py index 318fd6167..27042c64b 100755 --- a/roles/lib_openshift/src/test/unit/test_oc_configmap.py +++ b/roles/lib_openshift/src/test/unit/test_oc_configmap.py @@ -79,7 +79,7 @@ class OCConfigMapTest(unittest.TestCase): ''' Testing a configmap create ''' params = copy.deepcopy(OCConfigMapTest.params) params['from_file'] = {'test': '/root/file'} - params['from_literal'] = {'foo': 'bar', 'deployment_type': 'online'} + params['from_literal'] = {'foo': 'bar', 'deployment_type': 'openshift-enterprise'} configmap = '''{ "apiVersion": "v1", @@ -100,7 +100,7 @@ class OCConfigMapTest(unittest.TestCase): "apiVersion": "v1", "data": { "foo": "bar", - "deployment_type": "online", + "deployment_type": "openshift-enterprise", "test": "this is a file\\n" }, "kind": "ConfigMap", @@ -128,7 +128,7 @@ class OCConfigMapTest(unittest.TestCase): self.assertTrue(results['changed']) self.assertEqual(results['results']['results'][0]['metadata']['name'], 'configmap') - self.assertEqual(results['results']['results'][0]['data']['deployment_type'], 'online') + self.assertEqual(results['results']['results'][0]['data']['deployment_type'], 'openshift-enterprise') @unittest.skipIf(six.PY3, 'py2 test only') @mock.patch('os.path.exists') diff --git a/roles/lib_utils/library/repoquery.py b/roles/lib_utils/library/repoquery.py index 95a305b58..e5ac1f74f 100644 --- a/roles/lib_utils/library/repoquery.py +++ b/roles/lib_utils/library/repoquery.py @@ -35,6 +35,7 @@ import os # noqa: F401 import re # noqa: F401 import shutil # noqa: F401 import tempfile # noqa: F401 +import time # noqa: F401 try: import ruamel.yaml as yaml # noqa: F401 @@ -618,17 +619,22 @@ def main(): show_duplicates=dict(default=False, required=False, type='bool'), match_version=dict(default=None, required=False, type='str'), ignore_excluders=dict(default=False, required=False, type='bool'), + retries=dict(default=4, required=False, type='int'), + retry_interval=dict(default=5, required=False, type='int'), ), supports_check_mode=False, required_if=[('show_duplicates', True, ['name'])], ) - rval = Repoquery.run_ansible(module.params, module.check_mode) - - if 'failed' in rval: - module.fail_json(**rval) - - module.exit_json(**rval) + tries = 1 + while True: + rval = Repoquery.run_ansible(module.params, module.check_mode) + if 'failed' not in rval: + module.exit_json(**rval) + elif tries > module.params['retries']: + module.fail_json(**rval) + tries += 1 + time.sleep(module.params['retry_interval']) if __name__ == "__main__": diff --git a/roles/lib_utils/library/yedit.py b/roles/lib_utils/library/yedit.py index baf72fe47..cf5c2e423 100644 --- a/roles/lib_utils/library/yedit.py +++ b/roles/lib_utils/library/yedit.py @@ -35,6 +35,7 @@ import os # noqa: F401 import re # noqa: F401 import shutil # noqa: F401 import tempfile # noqa: F401 +import time # noqa: F401 try: import ruamel.yaml as yaml # noqa: F401 @@ -792,7 +793,7 @@ class Yedit(object): yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_utils/src/ansible/repoquery.py b/roles/lib_utils/src/ansible/repoquery.py index 40773b1c1..5f5b93639 100644 --- a/roles/lib_utils/src/ansible/repoquery.py +++ b/roles/lib_utils/src/ansible/repoquery.py @@ -19,17 +19,22 @@ def main(): show_duplicates=dict(default=False, required=False, type='bool'), match_version=dict(default=None, required=False, type='str'), ignore_excluders=dict(default=False, required=False, type='bool'), + retries=dict(default=4, required=False, type='int'), + retry_interval=dict(default=5, required=False, type='int'), ), supports_check_mode=False, required_if=[('show_duplicates', True, ['name'])], ) - rval = Repoquery.run_ansible(module.params, module.check_mode) - - if 'failed' in rval: - module.fail_json(**rval) - - module.exit_json(**rval) + tries = 1 + while True: + rval = Repoquery.run_ansible(module.params, module.check_mode) + if 'failed' not in rval: + module.exit_json(**rval) + elif tries > module.params['retries']: + module.fail_json(**rval) + tries += 1 + time.sleep(module.params['retry_interval']) if __name__ == "__main__": diff --git a/roles/lib_utils/src/class/yedit.py b/roles/lib_utils/src/class/yedit.py index 957c35a06..0a4fbe07a 100644 --- a/roles/lib_utils/src/class/yedit.py +++ b/roles/lib_utils/src/class/yedit.py @@ -590,7 +590,7 @@ class Yedit(object): yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_utils/src/lib/import.py b/roles/lib_utils/src/lib/import.py index 567f8c9e0..07a04b7ae 100644 --- a/roles/lib_utils/src/lib/import.py +++ b/roles/lib_utils/src/lib/import.py @@ -10,6 +10,7 @@ import os # noqa: F401 import re # noqa: F401 import shutil # noqa: F401 import tempfile # noqa: F401 +import time # noqa: F401 try: import ruamel.yaml as yaml # noqa: F401 diff --git a/roles/nuage_master/meta/main.yml b/roles/nuage_master/meta/main.yml index 3da340c85..e2f7af5ad 100644 --- a/roles/nuage_master/meta/main.yml +++ b/roles/nuage_master/meta/main.yml @@ -13,8 +13,5 @@ galaxy_info: - cloud - system dependencies: -- role: nuage_ca -- role: nuage_common -- role: openshift_etcd_client_certificates - role: lib_openshift - role: lib_os_firewall diff --git a/roles/openshift_etcd_ca/meta/main.yml b/roles/openshift_etcd_ca/meta/main.yml deleted file mode 100644 index f1d669d6b..000000000 --- a/roles/openshift_etcd_ca/meta/main.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- -galaxy_info: - author: Tim Bielawa - description: Meta role around the etcd_ca role - company: Red Hat, Inc. - license: Apache License, Version 2.0 - min_ansible_version: 2.2 - platforms: - - name: EL - versions: - - 7 - categories: - - cloud - - system -dependencies: -- role: openshift_etcd_facts -- role: etcd_ca - when: (etcd_ca_setup | default(True) | bool) diff --git a/roles/openshift_etcd_client_certificates/meta/main.yml b/roles/openshift_etcd_client_certificates/meta/main.yml index 3268c390f..fbc72c8a3 100644 --- a/roles/openshift_etcd_client_certificates/meta/main.yml +++ b/roles/openshift_etcd_client_certificates/meta/main.yml @@ -11,6 +11,4 @@ galaxy_info: - 7 categories: - cloud -dependencies: -- role: openshift_etcd_facts -- role: etcd_client_certificates +dependencies: [] diff --git a/roles/openshift_etcd_client_certificates/tasks/main.yml b/roles/openshift_etcd_client_certificates/tasks/main.yml new file mode 100644 index 000000000..7f8b667f0 --- /dev/null +++ b/roles/openshift_etcd_client_certificates/tasks/main.yml @@ -0,0 +1,4 @@ +--- +- include_role: + name: etcd + tasks_from: client_certificates diff --git a/roles/openshift_etcd_server_certificates/meta/main.yml b/roles/openshift_etcd_server_certificates/meta/main.yml deleted file mode 100644 index 7750f14af..000000000 --- a/roles/openshift_etcd_server_certificates/meta/main.yml +++ /dev/null @@ -1,16 +0,0 @@ ---- -galaxy_info: - author: Jason DeTiberus - description: OpenShift Etcd Server Certificates - company: Red Hat, Inc. - license: Apache License, Version 2.0 - min_ansible_version: 2.1 - platforms: - - name: EL - versions: - - 7 - categories: - - cloud -dependencies: -- role: openshift_etcd_facts -- role: etcd_server_certificates diff --git a/roles/openshift_examples/README.md b/roles/openshift_examples/README.md index 8cc479c73..014cef111 100644 --- a/roles/openshift_examples/README.md +++ b/roles/openshift_examples/README.md @@ -21,13 +21,13 @@ Facts Role Variables -------------- -| Name | Default value | | -|-------------------------------------|-----------------------------------------------------|------------------------------------------| -| openshift_examples_load_centos | true when openshift_deployment_typenot 'enterprise' | Load centos image streams | -| openshift_examples_load_rhel | true if openshift_deployment_type is 'enterprise' | Load rhel image streams | -| openshift_examples_load_db_templates| true | Loads database templates | -| openshift_examples_load_quickstarts | true | Loads quickstarts ie: nodejs, rails, etc | -| openshift_examples_load_xpaas | false | Loads xpass streams and templates | +| Name | Default value | | +|-------------------------------------|----------------------------------------------------------------|------------------------------------------| +| openshift_examples_load_centos | true when openshift_deployment_type not 'openshift-enterprise' | Load centos image streams | +| openshift_examples_load_rhel | true if openshift_deployment_type is 'openshift-enterprise' | Load rhel image streams | +| openshift_examples_load_db_templates| true | Loads database templates | +| openshift_examples_load_quickstarts | true | Loads quickstarts ie: nodejs, rails, etc | +| openshift_examples_load_xpaas | false | Loads xpass streams and templates | Dependencies diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index ebfa6bb8f..1c2c91a5a 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -477,11 +477,7 @@ def set_selectors(facts): facts if they were not already present """ - deployment_type = facts['common']['deployment_type'] - if deployment_type == 'online': - selector = "type=infra" - else: - selector = "region=infra" + selector = "region=infra" if 'hosted' not in facts: facts['hosted'] = {} @@ -497,10 +493,10 @@ def set_selectors(facts): facts['hosted']['metrics'] = {} if 'selector' not in facts['hosted']['metrics'] or facts['hosted']['metrics']['selector'] in [None, 'None']: facts['hosted']['metrics']['selector'] = None - if 'logging' not in facts['hosted']: - facts['hosted']['logging'] = {} - if 'selector' not in facts['hosted']['logging'] or facts['hosted']['logging']['selector'] in [None, 'None']: - facts['hosted']['logging']['selector'] = None + if 'logging' not in facts: + facts['logging'] = {} + if 'selector' not in facts['logging'] or facts['logging']['selector'] in [None, 'None']: + facts['logging']['selector'] = None if 'etcd' not in facts['hosted']: facts['hosted']['etcd'] = {} if 'selector' not in facts['hosted']['etcd'] or facts['hosted']['etcd']['selector'] in [None, 'None']: @@ -568,7 +564,7 @@ def set_identity_providers_if_unset(facts): name='allow_all', challenge=True, login=True, kind='AllowAllPasswordIdentityProvider' ) - if deployment_type in ['enterprise', 'atomic-enterprise', 'openshift-enterprise']: + if deployment_type == 'openshift-enterprise': identity_provider = dict( name='deny_all', challenge=True, login=True, kind='DenyAllPasswordIdentityProvider' @@ -770,13 +766,11 @@ def set_deployment_facts_if_unset(facts): service_type = 'atomic-openshift' if deployment_type == 'origin': service_type = 'origin' - elif deployment_type in ['enterprise']: - service_type = 'openshift' facts['common']['service_type'] = service_type if 'docker' in facts: deployment_type = facts['common']['deployment_type'] - if deployment_type in ['enterprise', 'atomic-enterprise', 'openshift-enterprise']: + if deployment_type == 'openshift-enterprise': addtl_regs = facts['docker'].get('additional_registries', []) ent_reg = 'registry.access.redhat.com' if ent_reg not in addtl_regs: @@ -787,30 +781,21 @@ def set_deployment_facts_if_unset(facts): deployment_type = facts['common']['deployment_type'] if 'registry_url' not in facts[role]: registry_url = 'openshift/origin-${component}:${version}' - if deployment_type in ['enterprise', 'online', 'openshift-enterprise']: + if deployment_type == 'openshift-enterprise': registry_url = 'openshift3/ose-${component}:${version}' - elif deployment_type == 'atomic-enterprise': - registry_url = 'aep3_beta/aep-${component}:${version}' facts[role]['registry_url'] = registry_url if 'master' in facts: deployment_type = facts['common']['deployment_type'] openshift_features = ['Builder', 'S2IBuilder', 'WebConsole'] - if 'disabled_features' in facts['master']: - if deployment_type == 'atomic-enterprise': - curr_disabled_features = set(facts['master']['disabled_features']) - facts['master']['disabled_features'] = list(curr_disabled_features.union(openshift_features)) - else: + if 'disabled_features' not in facts['master']: if facts['common']['deployment_subtype'] == 'registry': facts['master']['disabled_features'] = openshift_features if 'node' in facts: deployment_type = facts['common']['deployment_type'] if 'storage_plugin_deps' not in facts['node']: - if deployment_type in ['openshift-enterprise', 'atomic-enterprise', 'origin']: - facts['node']['storage_plugin_deps'] = ['ceph', 'glusterfs', 'iscsi'] - else: - facts['node']['storage_plugin_deps'] = [] + facts['node']['storage_plugin_deps'] = ['ceph', 'glusterfs', 'iscsi'] return facts @@ -1602,11 +1587,13 @@ def set_builddefaults_facts(facts): builddefaults['git_no_proxy'] = builddefaults['no_proxy'] # If we're actually defining a builddefaults config then create admission_plugin_config # then merge builddefaults[config] structure into admission_plugin_config + + # 'config' is the 'openshift_builddefaults_json' inventory variable if 'config' in builddefaults: if 'admission_plugin_config' not in facts['master']: - facts['master']['admission_plugin_config'] = dict() + # Scaffold out the full expected datastructure + facts['master']['admission_plugin_config'] = {'BuildDefaults': {'configuration': {'env': {}}}} facts['master']['admission_plugin_config'].update(builddefaults['config']) - # if the user didn't actually provide proxy values, delete the proxy env variable defaults. delete_empty_keys(facts['master']['admission_plugin_config']['BuildDefaults']['configuration']['env']) return facts @@ -1669,7 +1656,7 @@ def set_container_facts_if_unset(facts): facts """ deployment_type = facts['common']['deployment_type'] - if deployment_type in ['enterprise', 'openshift-enterprise']: + if deployment_type == 'openshift-enterprise': master_image = 'openshift3/ose' cli_image = master_image node_image = 'openshift3/node' @@ -1679,16 +1666,6 @@ def set_container_facts_if_unset(facts): router_image = 'openshift3/ose-haproxy-router' registry_image = 'openshift3/ose-docker-registry' deployer_image = 'openshift3/ose-deployer' - elif deployment_type == 'atomic-enterprise': - master_image = 'aep3_beta/aep' - cli_image = master_image - node_image = 'aep3_beta/node' - ovs_image = 'aep3_beta/openvswitch' - etcd_image = 'registry.access.redhat.com/rhel7/etcd' - pod_image = 'aep3_beta/aep-pod' - router_image = 'aep3_beta/aep-haproxy-router' - registry_image = 'aep3_beta/aep-docker-registry' - deployer_image = 'aep3_beta/aep-deployer' else: master_image = 'openshift/origin' cli_image = master_image @@ -1808,7 +1785,10 @@ class OpenShiftFacts(object): 'etcd', 'hosted', 'master', - 'node'] + 'node', + 'logging', + 'loggingops', + 'metrics'] # Disabling too-many-arguments, this should be cleaned up as a TODO item. # pylint: disable=too-many-arguments,no-value-for-parameter @@ -1925,7 +1905,7 @@ class OpenShiftFacts(object): hostname_f = output.strip() if exit_code == 0 else '' hostname_values = [hostname_f, self.system_facts['ansible_nodename'], self.system_facts['ansible_fqdn']] - hostname = choose_hostname(hostname_values, ip_addr) + hostname = choose_hostname(hostname_values, ip_addr).lower() defaults['common'] = dict(ip=ip_addr, public_ip=ip_addr, @@ -1989,66 +1969,6 @@ class OpenShiftFacts(object): if 'hosted' in roles or self.role == 'hosted': defaults['hosted'] = dict( - metrics=dict( - deploy=False, - duration=7, - resolution='10s', - storage=dict( - kind=None, - volume=dict( - name='metrics', - size='10Gi' - ), - nfs=dict( - directory='/exports', - options='*(rw,root_squash)' - ), - host=None, - access=dict( - modes=['ReadWriteOnce'] - ), - create_pv=True, - create_pvc=False - ) - ), - loggingops=dict( - storage=dict( - kind=None, - volume=dict( - name='logging-es-ops', - size='10Gi' - ), - nfs=dict( - directory='/exports', - options='*(rw,root_squash)' - ), - host=None, - access=dict( - modes=['ReadWriteOnce'] - ), - create_pv=True, - create_pvc=False - ) - ), - logging=dict( - storage=dict( - kind=None, - volume=dict( - name='logging-es', - size='10Gi' - ), - nfs=dict( - directory='/exports', - options='*(rw,root_squash)' - ), - host=None, - access=dict( - modes=['ReadWriteOnce'] - ), - create_pv=True, - create_pvc=False - ) - ), etcd=dict( storage=dict( kind=None, @@ -2095,6 +2015,69 @@ class OpenShiftFacts(object): router=dict() ) + defaults['logging'] = dict( + storage=dict( + kind=None, + volume=dict( + name='logging-es', + size='10Gi' + ), + nfs=dict( + directory='/exports', + options='*(rw,root_squash)' + ), + host=None, + access=dict( + modes=['ReadWriteOnce'] + ), + create_pv=True, + create_pvc=False + ) + ) + + defaults['loggingops'] = dict( + storage=dict( + kind=None, + volume=dict( + name='logging-es-ops', + size='10Gi' + ), + nfs=dict( + directory='/exports', + options='*(rw,root_squash)' + ), + host=None, + access=dict( + modes=['ReadWriteOnce'] + ), + create_pv=True, + create_pvc=False + ) + ) + + defaults['metrics'] = dict( + deploy=False, + duration=7, + resolution='10s', + storage=dict( + kind=None, + volume=dict( + name='metrics', + size='10Gi' + ), + nfs=dict( + directory='/exports', + options='*(rw,root_squash)' + ), + host=None, + access=dict( + modes=['ReadWriteOnce'] + ), + create_pv=True, + create_pvc=False + ) + ) + return defaults def guess_host_provider(self): diff --git a/roles/openshift_gcp/tasks/main.yaml b/roles/openshift_gcp/tasks/main.yaml new file mode 100644 index 000000000..ad205ba33 --- /dev/null +++ b/roles/openshift_gcp/tasks/main.yaml @@ -0,0 +1,43 @@ +# +# This role relies on gcloud invoked via templated bash in order to +# provide a high performance deployment option. The next logical step +# is to transition to a deployment manager template which is then instantiated. +# TODO: use a formal set of role parameters consistent with openshift_aws +# +--- +- name: Templatize DNS script + template: src=dns.j2.sh dest=/tmp/openshift_gcp_provision_dns.sh mode=u+rx +- name: Templatize provision script + template: src=provision.j2.sh dest=/tmp/openshift_gcp_provision.sh mode=u+rx +- name: Templatize de-provision script + template: src=remove.j2.sh dest=/tmp/openshift_gcp_provision_remove.sh mode=u+rx + when: + - state | default('present') == 'absent' + +- name: Provision GCP DNS domain + command: /tmp/openshift_gcp_provision_dns.sh + args: + chdir: "{{ playbook_dir }}/files" + register: dns_provision + when: + - state | default('present') == 'present' + +- name: Ensure that DNS resolves to the hosted zone + assert: + that: + - "lookup('dig', public_hosted_zone, 'qtype=NS', wantlist=True) | sort | join(',') == dns_provision.stdout" + msg: "The DNS domain {{ public_hosted_zone }} defined in 'public_hosted_zone' must have NS records pointing to the Google nameservers: '{{ dns_provision.stdout }}' instead of '{{ lookup('dig', public_hosted_zone, 'qtype=NS') }}'." + when: + - state | default('present') == 'present' + +- name: Provision GCP resources + command: /tmp/openshift_gcp_provision.sh + args: + chdir: "{{ playbook_dir }}/files" + when: + - state | default('present') == 'present' + +- name: De-provision GCP resources + command: /tmp/openshift_gcp_provision_remove.sh + when: + - state | default('present') == 'absent' diff --git a/roles/openshift_gcp/templates/dns.j2.sh b/roles/openshift_gcp/templates/dns.j2.sh new file mode 100644 index 000000000..eacf84b4d --- /dev/null +++ b/roles/openshift_gcp/templates/dns.j2.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +set -euo pipefail + +dns_zone="{{ dns_managed_zone | default(provision_prefix + 'managed-zone') }}" + +# Check the DNS managed zone in Google Cloud DNS, create it if it doesn't exist +if ! gcloud --project "{{ gce_project_id }}" dns managed-zones describe "${dns_zone}" &>/dev/null; then + gcloud --project "{{ gce_project_id }}" dns managed-zones create "${dns_zone}" --dns-name "{{ public_hosted_zone }}" --description "{{ public_hosted_zone }} domain" >/dev/null +fi + +# Always output the expected nameservers as a comma delimited list +gcloud --project "{{ gce_project_id }}" dns managed-zones describe "${dns_zone}" --format='value(nameServers)' | tr ';' ',' diff --git a/roles/openshift_gcp/templates/provision.j2.sh b/roles/openshift_gcp/templates/provision.j2.sh new file mode 100644 index 000000000..e68e9683f --- /dev/null +++ b/roles/openshift_gcp/templates/provision.j2.sh @@ -0,0 +1,318 @@ +#!/bin/bash + +set -euo pipefail + +# Create SSH key for GCE +if [ ! -f "{{ gce_ssh_private_key }}" ]; then + ssh-keygen -t rsa -f "{{ gce_ssh_private_key }}" -C gce-provision-cloud-user -N '' + ssh-add "{{ gce_ssh_private_key }}" || true +fi + +# Check if the ~/.ssh/google_compute_engine.pub key is in the project metadata, and if not, add it there +pub_key=$(cut -d ' ' -f 2 < "{{ gce_ssh_private_key }}.pub") +key_tmp_file='/tmp/ocp-gce-keys' +if ! gcloud --project "{{ gce_project_id }}" compute project-info describe | grep -q "$pub_key"; then + if gcloud --project "{{ gce_project_id }}" compute project-info describe | grep -q ssh-rsa; then + gcloud --project "{{ gce_project_id }}" compute project-info describe | grep ssh-rsa | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' -e 's/value: //' > "$key_tmp_file" + fi + echo -n 'cloud-user:' >> "$key_tmp_file" + cat "{{ gce_ssh_private_key }}.pub" >> "$key_tmp_file" + gcloud --project "{{ gce_project_id }}" compute project-info add-metadata --metadata-from-file "sshKeys=${key_tmp_file}" + rm -f "$key_tmp_file" +fi + +metadata="" +if [[ -n "{{ provision_gce_startup_script_file }}" ]]; then + if [[ ! -f "{{ provision_gce_startup_script_file }}" ]]; then + echo "Startup script file missing at {{ provision_gce_startup_script_file }} from=$(pwd)" + exit 1 + fi + metadata+="--metadata-from-file=startup-script={{ provision_gce_startup_script_file }}" +fi +if [[ -n "{{ provision_gce_user_data_file }}" ]]; then + if [[ ! -f "{{ provision_gce_user_data_file }}" ]]; then + echo "User data file missing at {{ provision_gce_user_data_file }}" + exit 1 + fi + if [[ -n "${metadata}" ]]; then + metadata+="," + else + metadata="--metadata-from-file=" + fi + metadata+="user-data={{ provision_gce_user_data_file }}" +fi + +# Select image or image family +image="{{ provision_gce_registered_image }}" +if ! gcloud --project "{{ gce_project_id }}" compute images describe "${image}" &>/dev/null; then + if ! gcloud --project "{{ gce_project_id }}" compute images describe-from-family "${image}" &>/dev/null; then + echo "No compute image or image-family found, create an image named '{{ provision_gce_registered_image }}' to continue'" + exit 1 + fi + image="family/${image}" +fi + +### PROVISION THE INFRASTRUCTURE ### + +dns_zone="{{ dns_managed_zone | default(provision_prefix + 'managed-zone') }}" + +# Check the DNS managed zone in Google Cloud DNS, create it if it doesn't exist and exit after printing NS servers +if ! gcloud --project "{{ gce_project_id }}" dns managed-zones describe "${dns_zone}" &>/dev/null; then + echo "DNS zone '${dns_zone}' doesn't exist. Must be configured prior to running this script" + exit 1 +fi + +# Create network +if ! gcloud --project "{{ gce_project_id }}" compute networks describe "{{ gce_network_name }}" &>/dev/null; then + gcloud --project "{{ gce_project_id }}" compute networks create "{{ gce_network_name }}" --mode "auto" +else + echo "Network '{{ gce_network_name }}' already exists" +fi + +# Firewall rules in a form: +# ['name']='parameters for "gcloud compute firewall-rules create"' +# For all possible parameters see: gcloud compute firewall-rules create --help +range="" +if [[ -n "{{ openshift_node_port_range }}" ]]; then + range=",tcp:{{ openshift_node_port_range }},udp:{{ openshift_node_port_range }}" +fi +declare -A FW_RULES=( + ['icmp']='--allow icmp' + ['ssh-external']='--allow tcp:22' + ['ssh-internal']='--allow tcp:22 --source-tags bastion' + ['master-internal']="--allow tcp:2224,tcp:2379,tcp:2380,tcp:4001,udp:4789,udp:5404,udp:5405,tcp:8053,udp:8053,tcp:8444,tcp:10250,tcp:10255,udp:10255,tcp:24224,udp:24224 --source-tags ocp --target-tags ocp-master" + ['master-external']="--allow tcp:80,tcp:443,tcp:1936,tcp:8080,tcp:8443${range} --target-tags ocp-master" + ['node-internal']="--allow udp:4789,tcp:10250,tcp:10255,udp:10255 --source-tags ocp --target-tags ocp-node,ocp-infra-node" + ['infra-node-internal']="--allow tcp:5000 --source-tags ocp --target-tags ocp-infra-node" + ['infra-node-external']="--allow tcp:80,tcp:443,tcp:1936${range} --target-tags ocp-infra-node" +) +for rule in "${!FW_RULES[@]}"; do + ( if ! gcloud --project "{{ gce_project_id }}" compute firewall-rules describe "{{ provision_prefix }}$rule" &>/dev/null; then + gcloud --project "{{ gce_project_id }}" compute firewall-rules create "{{ provision_prefix }}$rule" --network "{{ gce_network_name }}" ${FW_RULES[$rule]} + else + echo "Firewall rule '{{ provision_prefix }}${rule}' already exists" + fi ) & +done + + +# Master IP +( if ! gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-ssl-lb-ip" --global &>/dev/null; then + gcloud --project "{{ gce_project_id }}" compute addresses create "{{ provision_prefix }}master-ssl-lb-ip" --global +else + echo "IP '{{ provision_prefix }}master-ssl-lb-ip' already exists" +fi ) & + +# Internal master IP +( if ! gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-network-lb-ip" --region "{{ gce_region_name }}" &>/dev/null; then + gcloud --project "{{ gce_project_id }}" compute addresses create "{{ provision_prefix }}master-network-lb-ip" --region "{{ gce_region_name }}" +else + echo "IP '{{ provision_prefix }}master-network-lb-ip' already exists" +fi ) & + +# Router IP +( if ! gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}router-network-lb-ip" --region "{{ gce_region_name }}" &>/dev/null; then + gcloud --project "{{ gce_project_id }}" compute addresses create "{{ provision_prefix }}router-network-lb-ip" --region "{{ gce_region_name }}" +else + echo "IP '{{ provision_prefix }}router-network-lb-ip' already exists" +fi ) & + + +{% for node_group in provision_gce_node_groups %} +# configure {{ node_group.name }} +( + if ! gcloud --project "{{ gce_project_id }}" compute instance-templates describe "{{ provision_prefix }}instance-template-{{ node_group.name }}" &>/dev/null; then + gcloud --project "{{ gce_project_id }}" compute instance-templates create "{{ provision_prefix }}instance-template-{{ node_group.name }}" \ + --machine-type "{{ node_group.machine_type }}" --network "{{ gce_network_name }}" \ + --tags "{{ provision_prefix }}ocp,ocp,{{ node_group.tags }}" \ + --boot-disk-size "{{ node_group.boot_disk_size }}" --boot-disk-type "pd-ssd" \ + --scopes "logging-write,monitoring-write,useraccounts-ro,service-control,service-management,storage-ro,compute-rw" \ + --image "${image}" ${metadata} + else + echo "Instance template '{{ provision_prefix }}instance-template-{{ node_group.name }}' already exists" + fi + + # Create instance group + if ! gcloud --project "{{ gce_project_id }}" compute instance-groups managed describe "{{ provision_prefix }}ig-{{ node_group.suffix }}" --zone "{{ gce_zone_name }}" &>/dev/null; then + gcloud --project "{{ gce_project_id }}" compute instance-groups managed create "{{ provision_prefix }}ig-{{ node_group.suffix }}" \ + --zone "{{ gce_zone_name }}" --template "{{ provision_prefix }}instance-template-{{ node_group.name }}" --size "{{ node_group.scale }}" + else + echo "Instance group '{{ provision_prefix }}ig-{{ node_group.suffix }}' already exists" + fi +) & +{% endfor %} + +for i in `jobs -p`; do wait $i; done + + +# Configure the master external LB rules +( +# Master health check +if ! gcloud --project "{{ gce_project_id }}" compute health-checks describe "{{ provision_prefix }}master-ssl-lb-health-check" &>/dev/null; then + gcloud --project "{{ gce_project_id }}" compute health-checks create https "{{ provision_prefix }}master-ssl-lb-health-check" --port "{{ internal_console_port }}" --request-path "/healthz" +else + echo "Health check '{{ provision_prefix }}master-ssl-lb-health-check' already exists" +fi + +gcloud --project "{{ gce_project_id }}" compute instance-groups managed set-named-ports "{{ provision_prefix }}ig-m" \ + --zone "{{ gce_zone_name }}" --named-ports "{{ provision_prefix }}port-name-master:{{ internal_console_port }}" + +# Master backend service +if ! gcloud --project "{{ gce_project_id }}" compute backend-services describe "{{ provision_prefix }}master-ssl-lb-backend" --global &>/dev/null; then + gcloud --project "{{ gce_project_id }}" compute backend-services create "{{ provision_prefix }}master-ssl-lb-backend" --health-checks "{{ provision_prefix }}master-ssl-lb-health-check" --port-name "{{ provision_prefix }}port-name-master" --protocol "TCP" --global --timeout="{{ provision_gce_master_https_timeout | default('2m') }}" + gcloud --project "{{ gce_project_id }}" compute backend-services add-backend "{{ provision_prefix }}master-ssl-lb-backend" --instance-group "{{ provision_prefix }}ig-m" --global --instance-group-zone "{{ gce_zone_name }}" +else + echo "Backend service '{{ provision_prefix }}master-ssl-lb-backend' already exists" +fi + +# Master tcp proxy target +if ! gcloud --project "{{ gce_project_id }}" compute target-tcp-proxies describe "{{ provision_prefix }}master-ssl-lb-target" &>/dev/null; then + gcloud --project "{{ gce_project_id }}" compute target-tcp-proxies create "{{ provision_prefix }}master-ssl-lb-target" --backend-service "{{ provision_prefix }}master-ssl-lb-backend" +else + echo "Proxy target '{{ provision_prefix }}master-ssl-lb-target' already exists" +fi + +# Master forwarding rule +if ! gcloud --project "{{ gce_project_id }}" compute forwarding-rules describe "{{ provision_prefix }}master-ssl-lb-rule" --global &>/dev/null; then + IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-ssl-lb-ip" --global --format='value(address)') + gcloud --project "{{ gce_project_id }}" compute forwarding-rules create "{{ provision_prefix }}master-ssl-lb-rule" --address "$IP" --global --ports "{{ console_port }}" --target-tcp-proxy "{{ provision_prefix }}master-ssl-lb-target" +else + echo "Forwarding rule '{{ provision_prefix }}master-ssl-lb-rule' already exists" +fi +) & + + +# Configure the master internal LB rules +( +# Internal master health check +if ! gcloud --project "{{ gce_project_id }}" compute http-health-checks describe "{{ provision_prefix }}master-network-lb-health-check" &>/dev/null; then + gcloud --project "{{ gce_project_id }}" compute http-health-checks create "{{ provision_prefix }}master-network-lb-health-check" --port "8080" --request-path "/healthz" +else + echo "Health check '{{ provision_prefix }}master-network-lb-health-check' already exists" +fi + +# Internal master target pool +if ! gcloud --project "{{ gce_project_id }}" compute target-pools describe "{{ provision_prefix }}master-network-lb-pool" --region "{{ gce_region_name }}" &>/dev/null; then + gcloud --project "{{ gce_project_id }}" compute target-pools create "{{ provision_prefix }}master-network-lb-pool" --http-health-check "{{ provision_prefix }}master-network-lb-health-check" --region "{{ gce_region_name }}" +else + echo "Target pool '{{ provision_prefix }}master-network-lb-pool' already exists" +fi + +# Internal master forwarding rule +if ! gcloud --project "{{ gce_project_id }}" compute forwarding-rules describe "{{ provision_prefix }}master-network-lb-rule" --region "{{ gce_region_name }}" &>/dev/null; then + IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-network-lb-ip" --region "{{ gce_region_name }}" --format='value(address)') + gcloud --project "{{ gce_project_id }}" compute forwarding-rules create "{{ provision_prefix }}master-network-lb-rule" --address "$IP" --region "{{ gce_region_name }}" --target-pool "{{ provision_prefix }}master-network-lb-pool" +else + echo "Forwarding rule '{{ provision_prefix }}master-network-lb-rule' already exists" +fi +) & + + +# Configure the infra node rules +( +# Router health check +if ! gcloud --project "{{ gce_project_id }}" compute http-health-checks describe "{{ provision_prefix }}router-network-lb-health-check" &>/dev/null; then + gcloud --project "{{ gce_project_id }}" compute http-health-checks create "{{ provision_prefix }}router-network-lb-health-check" --port "1936" --request-path "/healthz" +else + echo "Health check '{{ provision_prefix }}router-network-lb-health-check' already exists" +fi + +# Router target pool +if ! gcloud --project "{{ gce_project_id }}" compute target-pools describe "{{ provision_prefix }}router-network-lb-pool" --region "{{ gce_region_name }}" &>/dev/null; then + gcloud --project "{{ gce_project_id }}" compute target-pools create "{{ provision_prefix }}router-network-lb-pool" --http-health-check "{{ provision_prefix }}router-network-lb-health-check" --region "{{ gce_region_name }}" +else + echo "Target pool '{{ provision_prefix }}router-network-lb-pool' already exists" +fi + +# Router forwarding rule +if ! gcloud --project "{{ gce_project_id }}" compute forwarding-rules describe "{{ provision_prefix }}router-network-lb-rule" --region "{{ gce_region_name }}" &>/dev/null; then + IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}router-network-lb-ip" --region "{{ gce_region_name }}" --format='value(address)') + gcloud --project "{{ gce_project_id }}" compute forwarding-rules create "{{ provision_prefix }}router-network-lb-rule" --address "$IP" --region "{{ gce_region_name }}" --target-pool "{{ provision_prefix }}router-network-lb-pool" +else + echo "Forwarding rule '{{ provision_prefix }}router-network-lb-rule' already exists" +fi +) & + +for i in `jobs -p`; do wait $i; done + +# set the target pools +( +if [[ "ig-m" == "{{ provision_gce_router_network_instance_group }}" ]]; then + gcloud --project "{{ gce_project_id }}" compute instance-groups managed set-target-pools "{{ provision_prefix }}ig-m" --target-pools "{{ provision_prefix }}master-network-lb-pool,{{ provision_prefix }}router-network-lb-pool" --zone "{{ gce_zone_name }}" +else + gcloud --project "{{ gce_project_id }}" compute instance-groups managed set-target-pools "{{ provision_prefix }}ig-m" --target-pools "{{ provision_prefix }}master-network-lb-pool" --zone "{{ gce_zone_name }}" + gcloud --project "{{ gce_project_id }}" compute instance-groups managed set-target-pools "{{ provision_prefix }}{{ provision_gce_router_network_instance_group }}" --target-pools "{{ provision_prefix }}router-network-lb-pool" --zone "{{ gce_zone_name }}" +fi +) & + +# configure DNS +( +# Retry DNS changes until they succeed since this may be a shared resource +while true; do + dns="${TMPDIR:-/tmp}/dns.yaml" + rm -f $dns + + # DNS record for master lb + if ! gcloud --project "{{ gce_project_id }}" dns record-sets list -z "${dns_zone}" --name "{{ openshift_master_cluster_public_hostname }}" 2>/dev/null | grep -q "{{ openshift_master_cluster_public_hostname }}"; then + IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-ssl-lb-ip" --global --format='value(address)') + if [[ ! -f $dns ]]; then + gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}" + fi + gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "{{ openshift_master_cluster_public_hostname }}." --type A "$IP" + else + echo "DNS record for '{{ openshift_master_cluster_public_hostname }}' already exists" + fi + + # DNS record for internal master lb + if ! gcloud --project "{{ gce_project_id }}" dns record-sets list -z "${dns_zone}" --name "{{ openshift_master_cluster_hostname }}" 2>/dev/null | grep -q "{{ openshift_master_cluster_hostname }}"; then + IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-network-lb-ip" --region "{{ gce_region_name }}" --format='value(address)') + if [[ ! -f $dns ]]; then + gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}" + fi + gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "{{ openshift_master_cluster_hostname }}." --type A "$IP" + else + echo "DNS record for '{{ openshift_master_cluster_hostname }}' already exists" + fi + + # DNS record for router lb + if ! gcloud --project "{{ gce_project_id }}" dns record-sets list -z "${dns_zone}" --name "{{ wildcard_zone }}" 2>/dev/null | grep -q "{{ wildcard_zone }}"; then + IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}router-network-lb-ip" --region "{{ gce_region_name }}" --format='value(address)') + if [[ ! -f $dns ]]; then + gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}" + fi + gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "{{ wildcard_zone }}." --type A "$IP" + gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "*.{{ wildcard_zone }}." --type CNAME "{{ wildcard_zone }}." + else + echo "DNS record for '{{ wildcard_zone }}' already exists" + fi + + # Commit all DNS changes, retrying if preconditions are not met + if [[ -f $dns ]]; then + if ! out="$( gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns execute -z "${dns_zone}" 2>&1 )"; then + rc=$? + if [[ "${out}" == *"HTTPError 412: Precondition not met"* ]]; then + continue + fi + exit $rc + fi + fi + break +done +) & + +# Create bucket for registry +( +if ! gsutil ls -p "{{ gce_project_id }}" "gs://{{ openshift_hosted_registry_storage_gcs_bucket }}" &>/dev/null; then + gsutil mb -p "{{ gce_project_id }}" -l "{{ gce_region_name }}" "gs://{{ openshift_hosted_registry_storage_gcs_bucket }}" +else + echo "Bucket '{{ openshift_hosted_registry_storage_gcs_bucket }}' already exists" +fi +) & + +# wait until all node groups are stable +{% for node_group in provision_gce_node_groups %} +# wait for stable {{ node_group.name }} +( gcloud --project "{{ gce_project_id }}" compute instance-groups managed wait-until-stable "{{ provision_prefix }}ig-{{ node_group.suffix }}" --zone "{{ gce_zone_name }}" --timeout=300) & +{% endfor %} + + +for i in `jobs -p`; do wait $i; done diff --git a/roles/openshift_gcp/templates/remove.j2.sh b/roles/openshift_gcp/templates/remove.j2.sh new file mode 100644 index 000000000..41ceab2b5 --- /dev/null +++ b/roles/openshift_gcp/templates/remove.j2.sh @@ -0,0 +1,156 @@ +#!/bin/bash + +set -euo pipefail + +function teardown_cmd() { + a=( $@ ) + local name=$1 + a=( "${a[@]:1}" ) + local flag=0 + local found= + for i in ${a[@]}; do + if [[ "$i" == "--"* ]]; then + found=true + break + fi + flag=$((flag+1)) + done + if [[ -z "${found}" ]]; then + flag=$((flag+1)) + fi + if gcloud --project "{{ gce_project_id }}" ${a[@]::$flag} describe "${name}" ${a[@]:$flag} &>/dev/null; then + gcloud --project "{{ gce_project_id }}" ${a[@]::$flag} delete -q "${name}" ${a[@]:$flag} + fi +} + +function teardown() { + for i in `seq 1 20`; do + if teardown_cmd $@; then + break + fi + sleep 0.5 + done +} + +# Preemptively spin down the instances +{% for node_group in provision_gce_node_groups %} +# scale down {{ node_group.name }} +( + # performs a delete and scale down as one operation to ensure maximum parallelism + if ! instances=$( gcloud --project "{{ gce_project_id }}" compute instance-groups managed list-instances "{{ provision_prefix }}ig-{{ node_group.suffix }}" --zone "{{ gce_zone_name }}" --format='value[terminator=","](instance)' ); then + exit 0 + fi + instances="${instances%?}" + if [[ -z "${instances}" ]]; then + echo "warning: No instances in {{ node_group.name }}" 1>&2 + exit 0 + fi + if ! gcloud --project "{{ gce_project_id }}" compute instance-groups managed delete-instances "{{ provision_prefix }}ig-{{ node_group.suffix }}" --zone "{{ gce_zone_name }}" --instances "${instances}"; then + echo "warning: Unable to scale down the node group {{ node_group.name }}" 1>&2 + exit 0 + fi +) & +{% endfor %} + +# Bucket for registry +( +if gsutil ls -p "{{ gce_project_id }}" "gs://{{ openshift_hosted_registry_storage_gcs_bucket }}" &>/dev/null; then + gsutil -m rm -r "gs://{{ openshift_hosted_registry_storage_gcs_bucket }}" +fi +) & + +# DNS +( +dns_zone="{{ dns_managed_zone | default(provision_prefix + 'managed-zone') }}" +if gcloud --project "{{ gce_project_id }}" dns managed-zones describe "${dns_zone}" &>/dev/null; then + # Retry DNS changes until they succeed since this may be a shared resource + while true; do + dns="${TMPDIR:-/tmp}/dns.yaml" + rm -f "${dns}" + + # export all dns records that match into a zone format, and turn each line into a set of args for + # record-sets transaction. + gcloud dns record-sets export --project "{{ gce_project_id }}" -z "${dns_zone}" --zone-file-format "${dns}" + if grep -F -e '{{ openshift_master_cluster_hostname }}' -e '{{ openshift_master_cluster_public_hostname }}' -e '{{ wildcard_zone }}' "${dns}" | \ + awk '{ print "--name", $1, "--ttl", $2, "--type", $4, $5; }' > "${dns}.input" + then + rm -f "${dns}" + gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}" + cat "${dns}.input" | xargs -L1 gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file="${dns}" remove -z "${dns_zone}" + + # Commit all DNS changes, retrying if preconditions are not met + if ! out="$( gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns execute -z "${dns_zone}" 2>&1 )"; then + rc=$? + if [[ "${out}" == *"HTTPError 412: Precondition not met"* ]]; then + continue + fi + exit $rc + fi + fi + rm "${dns}.input" + break + done +fi +) & + +( +# Router network rules +teardown "{{ provision_prefix }}router-network-lb-rule" compute forwarding-rules --region "{{ gce_region_name }}" +teardown "{{ provision_prefix }}router-network-lb-pool" compute target-pools --region "{{ gce_region_name }}" +teardown "{{ provision_prefix }}router-network-lb-health-check" compute http-health-checks +teardown "{{ provision_prefix }}router-network-lb-ip" compute addresses --region "{{ gce_region_name }}" + +# Internal master network rules +teardown "{{ provision_prefix }}master-network-lb-rule" compute forwarding-rules --region "{{ gce_region_name }}" +teardown "{{ provision_prefix }}master-network-lb-pool" compute target-pools --region "{{ gce_region_name }}" +teardown "{{ provision_prefix }}master-network-lb-health-check" compute http-health-checks +teardown "{{ provision_prefix }}master-network-lb-ip" compute addresses --region "{{ gce_region_name }}" +) & + +( +# Master SSL network rules +teardown "{{ provision_prefix }}master-ssl-lb-rule" compute forwarding-rules --global +teardown "{{ provision_prefix }}master-ssl-lb-target" compute target-tcp-proxies +teardown "{{ provision_prefix }}master-ssl-lb-ip" compute addresses --global +teardown "{{ provision_prefix }}master-ssl-lb-backend" compute backend-services --global +teardown "{{ provision_prefix }}master-ssl-lb-health-check" compute health-checks +) & + +#Firewall rules +#['name']='parameters for "gcloud compute firewall-rules create"' +#For all possible parameters see: gcloud compute firewall-rules create --help +declare -A FW_RULES=( + ['icmp']="" + ['ssh-external']="" + ['ssh-internal']="" + ['master-internal']="" + ['master-external']="" + ['node-internal']="" + ['infra-node-internal']="" + ['infra-node-external']="" +) +for rule in "${!FW_RULES[@]}"; do + ( if gcloud --project "{{ gce_project_id }}" compute firewall-rules describe "{{ provision_prefix }}$rule" &>/dev/null; then + # retry a few times because this call can be flaky + for i in `seq 1 3`; do + if gcloud -q --project "{{ gce_project_id }}" compute firewall-rules delete "{{ provision_prefix }}$rule"; then + break + fi + done + fi ) & +done + +for i in `jobs -p`; do wait $i; done + +{% for node_group in provision_gce_node_groups %} +# teardown {{ node_group.name }} - any load balancers referencing these groups must be removed +( + teardown "{{ provision_prefix }}ig-{{ node_group.suffix }}" compute instance-groups managed --zone "{{ gce_zone_name }}" + teardown "{{ provision_prefix }}instance-template-{{ node_group.name }}" compute instance-templates +) & +{% endfor %} + +for i in `jobs -p`; do wait $i; done + +# Network +teardown "{{ gce_network_name }}" compute networks diff --git a/roles/openshift_gcp_image_prep/files/partition.conf b/roles/openshift_gcp_image_prep/files/partition.conf new file mode 100644 index 000000000..b87e5e0b6 --- /dev/null +++ b/roles/openshift_gcp_image_prep/files/partition.conf @@ -0,0 +1,3 @@ +[Service] +ExecStartPost=-/usr/bin/growpart /dev/sda 1 +ExecStartPost=-/sbin/xfs_growfs / diff --git a/roles/openshift_gcp_image_prep/tasks/main.yaml b/roles/openshift_gcp_image_prep/tasks/main.yaml new file mode 100644 index 000000000..fee5ab618 --- /dev/null +++ b/roles/openshift_gcp_image_prep/tasks/main.yaml @@ -0,0 +1,18 @@ +--- +# GCE instances are starting with xfs AND barrier=1, which is only for extfs. +- name: Remove barrier=1 from XFS fstab entries + lineinfile: + path: /etc/fstab + regexp: '^(.+)xfs(.+?),?barrier=1,?(.*?)$' + line: '\1xfs\2 \4' + backrefs: yes + +- name: Ensure the root filesystem has XFS group quota turned on + lineinfile: + path: /boot/grub2/grub.cfg + regexp: '^(.*)linux16 (.*)$' + line: '\1linux16 \2 rootflags=gquota' + backrefs: yes + +- name: Ensure the root partition grows on startup + copy: src=partition.conf dest=/etc/systemd/system/google-instance-setup.service.d/ diff --git a/roles/openshift_health_checker/action_plugins/openshift_health_check.py b/roles/openshift_health_checker/action_plugins/openshift_health_check.py index 8d35db6b5..326176273 100644 --- a/roles/openshift_health_checker/action_plugins/openshift_health_check.py +++ b/roles/openshift_health_checker/action_plugins/openshift_health_check.py @@ -3,7 +3,10 @@ Ansible action plugin to execute health checks in OpenShift clusters. """ import sys import os +import base64 import traceback +import errno +import json from collections import defaultdict from ansible.plugins.action import ActionBase @@ -38,8 +41,13 @@ class ActionModule(ActionBase): # storing the information we need in the result. result['playbook_context'] = task_vars.get('r_openshift_health_checker_playbook_context') + # if the user wants to write check results to files, they provide this directory: + output_dir = task_vars.get("openshift_checks_output_dir") + if output_dir: + output_dir = os.path.join(output_dir, task_vars["ansible_host"]) + try: - known_checks = self.load_known_checks(tmp, task_vars) + known_checks = self.load_known_checks(tmp, task_vars, output_dir) args = self._task.args requested_checks = normalize(args.get('checks', [])) @@ -65,21 +73,20 @@ class ActionModule(ActionBase): for name in resolved_checks: display.banner("CHECK [{} : {}]".format(name, task_vars["ansible_host"])) - check = known_checks[name] - check_results[name] = run_check(name, check, user_disabled_checks) - if check.changed: - check_results[name]["changed"] = True + check_results[name] = run_check(name, known_checks[name], user_disabled_checks, output_dir) result["changed"] = any(r.get("changed") for r in check_results.values()) if any(r.get("failed") for r in check_results.values()): result["failed"] = True result["msg"] = "One or more checks failed" + write_result_to_output_dir(output_dir, result) return result - def load_known_checks(self, tmp, task_vars): + def load_known_checks(self, tmp, task_vars, output_dir=None): """Find all existing checks and return a mapping of names to instances.""" load_checks() + want_full_results = bool(output_dir) known_checks = {} for cls in OpenShiftCheck.subclasses(): @@ -90,7 +97,12 @@ class ActionModule(ActionBase): "duplicate check name '{}' in: '{}' and '{}'" "".format(name, full_class_name(cls), full_class_name(other_cls)) ) - known_checks[name] = cls(execute_module=self._execute_module, tmp=tmp, task_vars=task_vars) + known_checks[name] = cls( + execute_module=self._execute_module, + tmp=tmp, + task_vars=task_vars, + want_full_results=want_full_results + ) return known_checks @@ -185,9 +197,11 @@ def normalize(checks): return [name.strip() for name in checks if name.strip()] -def run_check(name, check, user_disabled_checks): +def run_check(name, check, user_disabled_checks, output_dir=None): """Run a single check if enabled and return a result dict.""" - if name in user_disabled_checks: + + # determine if we're going to run the check (not inactive or disabled) + if name in user_disabled_checks or '*' in user_disabled_checks: return dict(skipped=True, skipped_reason="Disabled by user request") # pylint: disable=broad-except; capturing exceptions broadly is intentional, @@ -201,12 +215,134 @@ def run_check(name, check, user_disabled_checks): if not is_active: return dict(skipped=True, skipped_reason="Not active for this host") + # run the check + result = {} try: - return check.run() + result = check.run() except OpenShiftCheckException as exc: - return dict(failed=True, msg=str(exc)) + check.register_failure(exc) + except Exception as exc: + check.register_failure("\n".join([str(exc), traceback.format_exc()])) + + # process the check state; compose the result hash, write files as needed + if check.changed: + result["changed"] = True + if check.failures or result.get("failed"): + if "msg" in result: # failure result has msg; combine with any registered failures + check.register_failure(result.get("msg")) + result["failures"] = [(fail.name, str(fail)) for fail in check.failures] + result["failed"] = True + result["msg"] = "\n".join(str(fail) for fail in check.failures) + write_to_output_file(output_dir, name + ".failures.json", result["failures"]) + if check.logs: + write_to_output_file(output_dir, name + ".log.json", check.logs) + if check.files_to_save: + write_files_to_save(output_dir, check) + + return result + + +def prepare_output_dir(dirname): + """Create the directory, including parents. Return bool for success/failure.""" + try: + os.makedirs(dirname) + return True + except OSError as exc: + # trying to create existing dir leads to error; + # that error is fine, but for any other, assume the dir is not there + return exc.errno == errno.EEXIST + + +def copy_remote_file_to_dir(check, file_to_save, output_dir, fname): + """Copy file from remote host to local file in output_dir, if given.""" + if not output_dir or not prepare_output_dir(output_dir): + return + local_file = os.path.join(output_dir, fname) + + # pylint: disable=broad-except; do not need to do anything about failure to write dir/file + # and do not want exceptions to break anything. + try: + # NOTE: it would have been nice to copy the file directly without loading it into + # memory, but there does not seem to be a good way to do this via ansible. + result = check.execute_module("slurp", dict(src=file_to_save), register=False) + if result.get("failed"): + display.warning("Could not retrieve file {}: {}".format(file_to_save, result.get("msg"))) + return + + content = result["content"] + if result.get("encoding") == "base64": + content = base64.b64decode(content) + with open(local_file, "wb") as outfile: + outfile.write(content) + except Exception as exc: + display.warning("Failed writing remote {} to local {}: {}".format(file_to_save, local_file, exc)) + return + + +def _no_fail(obj): + # pylint: disable=broad-except; do not want serialization to fail for any reason + try: + return str(obj) + except Exception: + return "[not serializable]" + + +def write_to_output_file(output_dir, filename, data): + """If output_dir provided, write data to file. Serialize as JSON if data is not a string.""" + + if not output_dir or not prepare_output_dir(output_dir): + return + filename = os.path.join(output_dir, filename) + try: + with open(filename, 'w') as outfile: + if isinstance(data, string_types): + outfile.write(data) + else: + json.dump(data, outfile, sort_keys=True, indent=4, default=_no_fail) + # pylint: disable=broad-except; do not want serialization/write to break for any reason + except Exception as exc: + display.warning("Could not write output file {}: {}".format(filename, exc)) + + +def write_result_to_output_dir(output_dir, result): + """If output_dir provided, write the result as json to result.json. + + Success/failure of the write is recorded as "output_files" in the result hash afterward. + Otherwise this is much like write_to_output_file. + """ + + if not output_dir: + return + if not prepare_output_dir(output_dir): + result["output_files"] = "Error creating output directory " + output_dir + return + + filename = os.path.join(output_dir, "result.json") + try: + with open(filename, 'w') as outfile: + json.dump(result, outfile, sort_keys=True, indent=4, default=_no_fail) + result["output_files"] = "Check results for this host written to " + filename + # pylint: disable=broad-except; do not want serialization/write to break for any reason except Exception as exc: - return dict(failed=True, msg=str(exc), exception=traceback.format_exc()) + result["output_files"] = "Error writing check results to {}:\n{}".format(filename, exc) + + +def write_files_to_save(output_dir, check): + """Write files to check subdir in output dir.""" + if not output_dir: + return + output_dir = os.path.join(output_dir, check.name) + seen_file = defaultdict(lambda: 0) + for file_to_save in check.files_to_save: + fname = file_to_save.filename + while seen_file[fname]: # just to be sure we never re-write a file, append numbers as needed + seen_file[fname] += 1 + fname = "{}.{}".format(fname, seen_file[fname]) + seen_file[fname] += 1 + if file_to_save.remote_filename: + copy_remote_file_to_dir(check, file_to_save.remote_filename, output_dir, fname) + else: + write_to_output_file(output_dir, fname, file_to_save.contents) def full_class_name(cls): diff --git a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py index 349655966..dcaf87eca 100644 --- a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py +++ b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py @@ -10,6 +10,7 @@ import traceback from ansible.plugins.callback import CallbackBase from ansible import constants as C from ansible.utils.color import stringc +from ansible.module_utils.six import string_types FAILED_NO_MSG = u'Failed without returning a message.' @@ -140,11 +141,19 @@ def deduplicate_failures(failures): Returns a new list of failures such that identical failures from different hosts are grouped together in a single entry. The relative order of failures is preserved. + + If failures is unhashable, the original list of failures is returned. """ groups = defaultdict(list) for failure in failures: group_key = tuple(sorted((key, value) for key, value in failure.items() if key != 'host')) - groups[group_key].append(failure) + try: + groups[group_key].append(failure) + except TypeError: + # abort and return original list of failures when failures has an + # unhashable type. + return failures + result = [] for failure in failures: group_key = tuple(sorted((key, value) for key, value in failure.items() if key != 'host')) @@ -159,7 +168,10 @@ def format_failure(failure): """Return a list of pretty-formatted text entries describing a failure, including relevant information about it. Expect that the list of text entries will be joined by a newline separator when output to the user.""" - host = u', '.join(failure['host']) + if isinstance(failure['host'], string_types): + host = failure['host'] + else: + host = u', '.join(failure['host']) play = failure['play'] task = failure['task'] msg = failure['msg'] diff --git a/roles/openshift_health_checker/library/ocutil.py b/roles/openshift_health_checker/library/ocutil.py index 2e60735d6..c72f4c5b3 100644 --- a/roles/openshift_health_checker/library/ocutil.py +++ b/roles/openshift_health_checker/library/ocutil.py @@ -40,18 +40,17 @@ def main(): module = AnsibleModule( argument_spec=dict( - namespace=dict(type="str", required=True), + namespace=dict(type="str", required=False), config_file=dict(type="str", required=True), cmd=dict(type="str", required=True), extra_args=dict(type="list", default=[]), ), ) - cmd = [ - locate_oc_binary(), - '--config', module.params["config_file"], - '-n', module.params["namespace"], - ] + shlex.split(module.params["cmd"]) + cmd = [locate_oc_binary(), '--config', module.params["config_file"]] + if module.params["namespace"]: + cmd += ['-n', module.params["namespace"]] + cmd += shlex.split(module.params["cmd"]) + module.params["extra_args"] failed = True try: diff --git a/roles/openshift_health_checker/openshift_checks/__init__.py b/roles/openshift_health_checker/openshift_checks/__init__.py index 02ee1d0f9..ce05b44a4 100644 --- a/roles/openshift_health_checker/openshift_checks/__init__.py +++ b/roles/openshift_health_checker/openshift_checks/__init__.py @@ -2,14 +2,18 @@ Health checks for OpenShift clusters. """ +import json import operator import os +import time +import collections from abc import ABCMeta, abstractmethod, abstractproperty from importlib import import_module from ansible.module_utils import six from ansible.module_utils.six.moves import reduce # pylint: disable=import-error,redefined-builtin +from ansible.module_utils.six import string_types from ansible.plugins.filter.core import to_bool as ansible_to_bool @@ -27,7 +31,7 @@ class OpenShiftCheckException(Exception): class OpenShiftCheckExceptionList(OpenShiftCheckException): - """A container for multiple logging errors that may be detected in one check.""" + """A container for multiple errors that may be detected in one check.""" def __init__(self, errors): self.errors = errors super(OpenShiftCheckExceptionList, self).__init__( @@ -40,26 +44,53 @@ class OpenShiftCheckExceptionList(OpenShiftCheckException): return self.errors[index] +FileToSave = collections.namedtuple("FileToSave", "filename contents remote_filename") + + +# pylint: disable=too-many-instance-attributes; all represent significantly different state. +# Arguably they could be separated into two hashes, one for storing parameters, and one for +# storing result state; but that smells more like clutter than clarity. @six.add_metaclass(ABCMeta) class OpenShiftCheck(object): - """ - A base class for defining checks for an OpenShift cluster environment. + """A base class for defining checks for an OpenShift cluster environment. - Expect optional params: method execute_module, dict task_vars, and string tmp. + Optional init params: method execute_module, dict task_vars, and string tmp execute_module is expected to have a signature compatible with _execute_module from ansible plugins/action/__init__.py, e.g.: def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None, *args): This is stored so that it can be invoked in subclasses via check.execute_module("name", args) which provides the check's stored task_vars and tmp. + + Optional init param: want_full_results + If the check can gather logs, tarballs, etc., do so when True; but no need to spend + the time if they're not wanted (won't be written to output directory). """ - def __init__(self, execute_module=None, task_vars=None, tmp=None): + def __init__(self, execute_module=None, task_vars=None, tmp=None, want_full_results=False): + # store a method for executing ansible modules from the check self._execute_module = execute_module + # the task variables and tmpdir passed into the health checker task self.task_vars = task_vars or {} self.tmp = tmp + # a boolean for disabling the gathering of results (files, computations) that won't + # actually be recorded/used + self.want_full_results = want_full_results + + # mainly for testing purposes; see execute_module_with_retries + self._module_retries = 3 + self._module_retry_interval = 5 # seconds + # state to be recorded for inspection after the check runs: + # # set to True when the check changes the host, for accurate total "changed" count self.changed = False + # list of OpenShiftCheckException for check to report (alternative to returning a failed result) + self.failures = [] + # list of FileToSave - files the check specifies to be written locally if so configured + self.files_to_save = [] + # log messages for the check - tuples of (description, msg) where msg is serializable. + # These are intended to be a sequential record of what the check observed and determined. + self.logs = [] @abstractproperty def name(self): @@ -80,9 +111,20 @@ class OpenShiftCheck(object): """Returns true if this check applies to the ansible-playbook run.""" return True + def is_first_master(self): + """Determine if running on first master. Returns: bool""" + masters = self.get_var("groups", "oo_first_master", default=None) or [None] + return masters[0] == self.get_var("ansible_host") + @abstractmethod def run(self): - """Executes a check, normally implemented as a module.""" + """Executes a check against a host and returns a result hash similar to Ansible modules. + + Actually the direction ahead is to record state in the attributes and + not bother building a result hash. Instead, return an empty hash and let + the action plugin fill it in. Or raise an OpenShiftCheckException. + Returning a hash may become deprecated if it does not prove necessary. + """ return {} @classmethod @@ -94,7 +136,43 @@ class OpenShiftCheck(object): for subclass in subclass.subclasses(): yield subclass - def execute_module(self, module_name=None, module_args=None): + def register_failure(self, error): + """Record in the check that a failure occurred. + + Recorded failures are merged into the result hash for now. They are also saved to output directory + (if provided) <check>.failures.json and registered as a log entry for context <check>.log.json. + """ + # It should be an exception; make it one if not + if not isinstance(error, OpenShiftCheckException): + error = OpenShiftCheckException(str(error)) + self.failures.append(error) + # duplicate it in the logs so it can be seen in the context of any + # information that led to the failure + self.register_log("failure: " + error.name, str(error)) + + def register_log(self, context, msg): + """Record an entry for the check log. + + Notes are intended to serve as context of the whole sequence of what the check observed. + They are be saved as an ordered list in a local check log file. + They are not to included in the result or in the ansible log; it's just for the record. + """ + self.logs.append([context, msg]) + + def register_file(self, filename, contents=None, remote_filename=""): + """Record a file that a check makes available to be saved individually to output directory. + + Either file contents should be passed in, or a file to be copied from the remote host + should be specified. Contents that are not a string are to be serialized as JSON. + + NOTE: When copying a file from remote host, it is slurped into memory as base64, meaning + you should avoid using this on huge files (more than say 10M). + """ + if contents is None and not remote_filename: + raise OpenShiftCheckException("File data/source not specified; this is a bug in the check.") + self.files_to_save.append(FileToSave(filename, contents, remote_filename)) + + def execute_module(self, module_name=None, module_args=None, save_as_name=None, register=True): """Invoke an Ansible module from a check. Invoke stored _execute_module, normally copied from the action @@ -106,6 +184,12 @@ class OpenShiftCheck(object): Ansible version). So e.g. check.execute_module("foo", dict(arg1=...)) + + save_as_name specifies a file name for saving the result to an output directory, + if needed, and is intended to uniquely identify the result of invoking execute_module. + If not provided, the module name will be used. + If register is set False, then the result won't be registered in logs or files to save. + Return: result hash from module execution. """ if self._execute_module is None: @@ -113,7 +197,33 @@ class OpenShiftCheck(object): self.__class__.__name__ + " invoked execute_module without providing the method at initialization." ) - return self._execute_module(module_name, module_args, self.tmp, self.task_vars) + result = self._execute_module(module_name, module_args, self.tmp, self.task_vars) + if result.get("changed"): + self.changed = True + for output in ["result", "stdout"]: + # output is often JSON; attempt to decode + try: + result[output + "_json"] = json.loads(result[output]) + except (KeyError, ValueError): + pass + + if register: + self.register_log("execute_module: " + module_name, result) + self.register_file(save_as_name or module_name + ".json", result) + return result + + def execute_module_with_retries(self, module_name, module_args): + """Run execute_module and retry on failure.""" + result = {} + tries = 0 + while True: + res = self.execute_module(module_name, module_args) + if tries > self._module_retries or not res.get("failed"): + result.update(res) + return result + result["last_failed"] = res + tries += 1 + time.sleep(self._module_retry_interval) def get_var(self, *keys, **kwargs): """Get deeply nested values from task_vars. @@ -171,8 +281,23 @@ class OpenShiftCheck(object): 'There is a bug in this check. While trying to convert variable \n' ' "{var}={value}"\n' 'the given converter cannot be used or failed unexpectedly:\n' - '{error}'.format(var=".".join(keys), value=value, error=error) - ) + '{type}: {error}'.format( + var=".".join(keys), + value=value, + type=error.__class__.__name__, + error=error + )) + + @staticmethod + def normalize(name_list): + """Return a clean list of names. + + The input may be a comma-separated string or a sequence. Leading and + trailing whitespace characters are removed. Empty items are discarded. + """ + if isinstance(name_list, string_types): + name_list = name_list.split(',') + return [name.strip() for name in name_list if name.strip()] @staticmethod def get_major_minor_version(openshift_image_tag): @@ -214,7 +339,9 @@ class OpenShiftCheck(object): mount_point = os.path.dirname(mount_point) try: - return mount_for_path[mount_point] + mount = mount_for_path[mount_point] + self.register_log("mount point for " + path, mount) + return mount except KeyError: known_mounts = ', '.join('"{}"'.format(mount) for mount in sorted(mount_for_path)) raise OpenShiftCheckException( @@ -242,7 +369,7 @@ def load_checks(path=None, subpkg=""): modules = modules + load_checks(os.path.join(path, name), subpkg + "." + name) continue - if name.endswith(".py") and not name.startswith(".") and name not in LOADER_EXCLUDES: + if name.endswith(".py") and name not in LOADER_EXCLUDES: modules.append(import_module(__package__ + subpkg + "." + name[:-3])) return modules diff --git a/roles/openshift_health_checker/openshift_checks/diagnostics.py b/roles/openshift_health_checker/openshift_checks/diagnostics.py new file mode 100644 index 000000000..1cfdc1129 --- /dev/null +++ b/roles/openshift_health_checker/openshift_checks/diagnostics.py @@ -0,0 +1,62 @@ +""" +A check to run relevant diagnostics via `oc adm diagnostics`. +""" + +import os + +from openshift_checks import OpenShiftCheck, OpenShiftCheckException + + +DIAGNOSTIC_LIST = ( + "AggregatedLogging ClusterRegistry ClusterRoleBindings ClusterRoles " + "ClusterRouter DiagnosticPod NetworkCheck" +).split() + + +class DiagnosticCheck(OpenShiftCheck): + """A check to run relevant diagnostics via `oc adm diagnostics`.""" + + name = "diagnostics" + tags = ["health"] + + def is_active(self): + return super(DiagnosticCheck, self).is_active() and self.is_first_master() + + def run(self): + if self.exec_diagnostic("ConfigContexts"): + # only run the other diagnostics if that one succeeds (otherwise, all will fail) + diagnostics = self.get_var("openshift_check_diagnostics", default=DIAGNOSTIC_LIST) + for diagnostic in self.normalize(diagnostics): + self.exec_diagnostic(diagnostic) + return {} + + def exec_diagnostic(self, diagnostic): + """ + Execute an 'oc adm diagnostics' command on the remote host. + Raises OcNotFound or registers OcDiagFailed. + Returns True on success or False on failure (non-zero rc). + """ + config_base = self.get_var("openshift.common.config_base") + args = { + "config_file": os.path.join(config_base, "master", "admin.kubeconfig"), + "cmd": "adm diagnostics", + "extra_args": [diagnostic], + } + + result = self.execute_module("ocutil", args, save_as_name=diagnostic + ".failure.json") + self.register_file(diagnostic + ".txt", result['result']) + if result.get("failed"): + if result['result'] == '[Errno 2] No such file or directory': + raise OpenShiftCheckException( + "OcNotFound", + "This host is supposed to be a master but does not have the `oc` command where expected.\n" + "Has an installation been run on this host yet?" + ) + + self.register_failure(OpenShiftCheckException( + 'OcDiagFailed', + 'The {diag} diagnostic reported an error:\n' + '{error}'.format(diag=diagnostic, error=result['result']) + )) + return False + return True diff --git a/roles/openshift_health_checker/openshift_checks/disk_availability.py b/roles/openshift_health_checker/openshift_checks/disk_availability.py index f302fd14b..cdf56e959 100644 --- a/roles/openshift_health_checker/openshift_checks/disk_availability.py +++ b/roles/openshift_health_checker/openshift_checks/disk_availability.py @@ -70,6 +70,10 @@ class DiskAvailability(OpenShiftCheck): # If it is not a number, then it should be a nested dict. pass + self.register_log("recommended thresholds", self.recommended_disk_space_bytes) + if user_config: + self.register_log("user-configured thresholds", user_config) + # TODO: as suggested in # https://github.com/openshift/openshift-ansible/pull/4436#discussion_r122180021, # maybe we could support checking disk availability in paths that are @@ -113,10 +117,7 @@ class DiskAvailability(OpenShiftCheck): 'in your Ansible inventory, and lower the recommended disk space availability\n' 'if necessary for this upgrade.').format(config_bytes) - return { - 'failed': True, - 'msg': msg, - } + self.register_failure(msg) return {} diff --git a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py index 866c74d7c..98372d979 100644 --- a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py +++ b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py @@ -32,7 +32,12 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck): # we use python-docker-py to check local docker for images, and skopeo # to look for images available remotely without waiting to pull them. dependencies = ["python-docker-py", "skopeo"] - skopeo_img_check_command = "timeout 10 skopeo inspect --tls-verify=false" + skopeo_img_check_command = "timeout 10 skopeo inspect --tls-verify=false docker://{registry}/{image}" + + def __init__(self, *args, **kwargs): + super(DockerImageAvailability, self).__init__(*args, **kwargs) + # record whether we could reach a registry or not (and remember results) + self.reachable_registries = {} def is_active(self): """Skip hosts with unsupported deployment types.""" @@ -64,15 +69,21 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck): unavailable_images = set(missing_images) - set(available_images) if unavailable_images: - return { - "failed": True, - "msg": ( - "One or more required Docker images are not available:\n {}\n" - "Configured registries: {}\n" - "Checked by: {}" - ).format(",\n ".join(sorted(unavailable_images)), ", ".join(registries), - self.skopeo_img_check_command), - } + registries = [ + reg if self.reachable_registries.get(reg, True) else reg + " (unreachable)" + for reg in registries + ] + msg = ( + "One or more required Docker images are not available:\n {}\n" + "Configured registries: {}\n" + "Checked by: {}" + ).format( + ",\n ".join(sorted(unavailable_images)), + ", ".join(registries), + self.skopeo_img_check_command + ) + + return dict(failed=True, msg=msg) return {} @@ -98,8 +109,6 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck): # containerized etcd may not have openshift_image_tag, see bz 1466622 image_tag = self.get_var("openshift_image_tag", default="latest") image_info = DEPLOYMENT_IMAGE_INFO[deployment_type] - if not image_info: - return required # template for images that run on top of OpenShift image_url = "{}/{}-{}:{}".format(image_info["namespace"], image_info["name"], "${component}", "${version}") @@ -128,31 +137,31 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck): def local_images(self, images): """Filter a list of images and return those available locally.""" - return [ - image for image in images - if self.is_image_local(image) - ] + registries = self.known_docker_registries() + found_images = [] + for image in images: + # docker could have the image name as-is or prefixed with any registry + imglist = [image] + [reg + "/" + image for reg in registries] + if self.is_image_local(imglist): + found_images.append(image) + return found_images def is_image_local(self, image): """Check if image is already in local docker index.""" result = self.execute_module("docker_image_facts", {"name": image}) - if result.get("failed", False): - return False - - return bool(result.get("images", [])) + return bool(result.get("images")) and not result.get("failed") def known_docker_registries(self): """Build a list of docker registries available according to inventory vars.""" - docker_facts = self.get_var("openshift", "docker") - regs = set(docker_facts["additional_registries"]) + regs = list(self.get_var("openshift.docker.additional_registries", default=[])) deployment_type = self.get_var("openshift_deployment_type") - if deployment_type == "origin": - regs.update(["docker.io"]) - elif "enterprise" in deployment_type: - regs.update(["registry.access.redhat.com"]) + if deployment_type == "origin" and "docker.io" not in regs: + regs.append("docker.io") + elif deployment_type == 'openshift-enterprise' and "registry.access.redhat.com" not in regs: + regs.append("registry.access.redhat.com") - return list(regs) + return regs def available_images(self, images, default_registries): """Search remotely for images. Returns: list of images found.""" @@ -165,17 +174,35 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck): """Use Skopeo to determine if required image exists in known registry(s).""" registries = default_registries - # if image already includes a registry, only use that + # If image already includes a registry, only use that. + # NOTE: This logic would incorrectly identify images that do not use a namespace, e.g. + # registry.access.redhat.com/rhel7 as if the registry were a namespace. + # It's not clear that there's any way to distinguish them, but fortunately + # the current set of images all look like [registry/]namespace/name[:version]. if image.count("/") > 1: registry, image = image.split("/", 1) registries = [registry] for registry in registries: - args = { - "_raw_params": self.skopeo_img_check_command + " docker://{}/{}".format(registry, image) - } - result = self.execute_module("command", args) + if registry not in self.reachable_registries: + self.reachable_registries[registry] = self.connect_to_registry(registry) + if not self.reachable_registries[registry]: + continue + + args = {"_raw_params": self.skopeo_img_check_command.format(registry=registry, image=image)} + result = self.execute_module_with_retries("command", args) if result.get("rc", 0) == 0 and not result.get("failed"): return True + if result.get("rc") == 124: # RC 124 == timed out; mark unreachable + self.reachable_registries[registry] = False return False + + def connect_to_registry(self, registry): + """Use ansible wait_for module to test connectivity from host to registry. Returns bool.""" + # test a simple TCP connection + host, _, port = registry.partition(":") + port = port or 443 + args = dict(host=host, port=port, state="started", timeout=30) + result = self.execute_module("wait_for", args) + return result.get("rc", 0) == 0 and not result.get("failed") diff --git a/roles/openshift_health_checker/openshift_checks/etcd_volume.py b/roles/openshift_health_checker/openshift_checks/etcd_volume.py index e5d93ff3f..79955cb2f 100644 --- a/roles/openshift_health_checker/openshift_checks/etcd_volume.py +++ b/roles/openshift_health_checker/openshift_checks/etcd_volume.py @@ -16,7 +16,7 @@ class EtcdVolume(OpenShiftCheck): def is_active(self): etcd_hosts = self.get_var("groups", "etcd", default=[]) or self.get_var("groups", "masters", default=[]) or [] - is_etcd_host = self.get_var("ansible_ssh_host") in etcd_hosts + is_etcd_host = self.get_var("ansible_host") in etcd_hosts return super(EtcdVolume, self).is_active() and is_etcd_host def run(self): diff --git a/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py b/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py index 7fc843fd7..986a01f38 100644 --- a/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py +++ b/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py @@ -72,7 +72,7 @@ class Elasticsearch(LoggingCheck): for pod_name in pods_by_name.keys(): # Compare what each ES node reports as master and compare for split brain get_master_cmd = self._build_es_curl_cmd(pod_name, "https://localhost:9200/_cat/master") - master_name_str = self.exec_oc(get_master_cmd, []) + master_name_str = self.exec_oc(get_master_cmd, [], save_as_name="get_master_names.json") master_names = (master_name_str or '').split(' ') if len(master_names) > 1: es_master_names.add(master_names[1]) @@ -113,7 +113,7 @@ class Elasticsearch(LoggingCheck): # get ES cluster nodes node_cmd = self._build_es_curl_cmd(list(pods_by_name.keys())[0], 'https://localhost:9200/_nodes') - cluster_node_data = self.exec_oc(node_cmd, []) + cluster_node_data = self.exec_oc(node_cmd, [], save_as_name="get_es_nodes.json") try: cluster_nodes = json.loads(cluster_node_data)['nodes'] except (ValueError, KeyError): @@ -142,7 +142,7 @@ class Elasticsearch(LoggingCheck): errors = [] for pod_name in pods_by_name.keys(): cluster_health_cmd = self._build_es_curl_cmd(pod_name, 'https://localhost:9200/_cluster/health?pretty=true') - cluster_health_data = self.exec_oc(cluster_health_cmd, []) + cluster_health_data = self.exec_oc(cluster_health_cmd, [], save_as_name='get_es_health.json') try: health_res = json.loads(cluster_health_data) if not health_res or not health_res.get('status'): @@ -171,7 +171,7 @@ class Elasticsearch(LoggingCheck): errors = [] for pod_name in pods_by_name.keys(): df_cmd = 'exec {} -- df --output=ipcent,pcent /elasticsearch/persistent'.format(pod_name) - disk_output = self.exec_oc(df_cmd, []) + disk_output = self.exec_oc(df_cmd, [], save_as_name='get_pv_diskspace.json') lines = disk_output.splitlines() # expecting one header looking like 'IUse% Use%' and one body line body_re = r'\s*(\d+)%?\s+(\d+)%?\s*$' diff --git a/roles/openshift_health_checker/openshift_checks/logging/logging.py b/roles/openshift_health_checker/openshift_checks/logging/logging.py index ecd8adb64..05ba73ca1 100644 --- a/roles/openshift_health_checker/openshift_checks/logging/logging.py +++ b/roles/openshift_health_checker/openshift_checks/logging/logging.py @@ -30,14 +30,6 @@ class LoggingCheck(OpenShiftCheck): logging_deployed = self.get_var("openshift_hosted_logging_deploy", convert=bool, default=False) return logging_deployed and super(LoggingCheck, self).is_active() and self.is_first_master() - def is_first_master(self): - """Determine if running on first master. Returns: bool""" - # Note: It would be nice to use membership in oo_first_master group, however for now it - # seems best to avoid requiring that setup and just check this is the first master. - hostname = self.get_var("ansible_ssh_host") or [None] - masters = self.get_var("groups", "masters", default=None) or [None] - return masters[0] == hostname - def run(self): return {} @@ -78,7 +70,7 @@ class LoggingCheck(OpenShiftCheck): """Returns the namespace in which logging is configured to deploy.""" return self.get_var("openshift_logging_namespace", default="logging") - def exec_oc(self, cmd_str="", extra_args=None): + def exec_oc(self, cmd_str="", extra_args=None, save_as_name=None): """ Execute an 'oc' command in the remote host. Returns: output of command and namespace, @@ -92,7 +84,7 @@ class LoggingCheck(OpenShiftCheck): "extra_args": list(extra_args) if extra_args else [], } - result = self.execute_module("ocutil", args) + result = self.execute_module("ocutil", args, save_as_name=save_as_name) if result.get("failed"): if result['result'] == '[Errno 2] No such file or directory': raise CouldNotUseOc( diff --git a/roles/openshift_health_checker/openshift_checks/logging/logging_index_time.py b/roles/openshift_health_checker/openshift_checks/logging/logging_index_time.py index d781db649..cacdf4213 100644 --- a/roles/openshift_health_checker/openshift_checks/logging/logging_index_time.py +++ b/roles/openshift_health_checker/openshift_checks/logging/logging_index_time.py @@ -104,7 +104,7 @@ class LoggingIndexTime(LoggingCheck): "https://logging-es:9200/project.{namespace}*/_count?q=message:{uuid}" ) exec_cmd = exec_cmd.format(pod_name=pod_name, namespace=self.logging_namespace(), uuid=uuid) - result = self.exec_oc(exec_cmd, []) + result = self.exec_oc(exec_cmd, [], save_as_name="query_for_uuid.json") try: count = json.loads(result)["count"] diff --git a/roles/openshift_health_checker/openshift_checks/mixins.py b/roles/openshift_health_checker/openshift_checks/mixins.py index e9bae60a3..b90ebf6dd 100644 --- a/roles/openshift_health_checker/openshift_checks/mixins.py +++ b/roles/openshift_health_checker/openshift_checks/mixins.py @@ -36,7 +36,7 @@ class DockerHostMixin(object): # NOTE: we would use the "package" module but it's actually an action plugin # and it's not clear how to invoke one of those. This is about the same anyway: - result = self.execute_module( + result = self.execute_module_with_retries( self.get_var("ansible_pkg_mgr", default="yum"), {"name": self.dependencies, "state": "present"}, ) @@ -49,5 +49,4 @@ class DockerHostMixin(object): " {deps}\n{msg}" ).format(deps=',\n '.join(self.dependencies), msg=msg) failed = result.get("failed", False) or result.get("rc", 0) != 0 - self.changed = result.get("changed", False) return msg, failed diff --git a/roles/openshift_health_checker/openshift_checks/package_availability.py b/roles/openshift_health_checker/openshift_checks/package_availability.py index a86180b00..21355c2f0 100644 --- a/roles/openshift_health_checker/openshift_checks/package_availability.py +++ b/roles/openshift_health_checker/openshift_checks/package_availability.py @@ -26,7 +26,7 @@ class PackageAvailability(NotContainerizedMixin, OpenShiftCheck): packages.update(self.node_packages(rpm_prefix)) args = {"packages": sorted(set(packages))} - return self.execute_module("check_yum_update", args) + return self.execute_module_with_retries("check_yum_update", args) @staticmethod def master_packages(rpm_prefix): diff --git a/roles/openshift_health_checker/openshift_checks/package_update.py b/roles/openshift_health_checker/openshift_checks/package_update.py index 1e9aecbe0..8464e8a5e 100644 --- a/roles/openshift_health_checker/openshift_checks/package_update.py +++ b/roles/openshift_health_checker/openshift_checks/package_update.py @@ -11,4 +11,4 @@ class PackageUpdate(NotContainerizedMixin, OpenShiftCheck): def run(self): args = {"packages": []} - return self.execute_module("check_yum_update", args) + return self.execute_module_with_retries("check_yum_update", args) diff --git a/roles/openshift_health_checker/openshift_checks/package_version.py b/roles/openshift_health_checker/openshift_checks/package_version.py index 0b795b6c4..d4aec3ed8 100644 --- a/roles/openshift_health_checker/openshift_checks/package_version.py +++ b/roles/openshift_health_checker/openshift_checks/package_version.py @@ -76,7 +76,7 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck): ], } - return self.execute_module("aos_version", args) + return self.execute_module_with_retries("aos_version", args) def get_required_ovs_version(self): """Return the correct Open vSwitch version(s) for the current OpenShift version.""" diff --git a/roles/openshift_health_checker/test/action_plugin_test.py b/roles/openshift_health_checker/test/action_plugin_test.py index c109ebd24..f14887303 100644 --- a/roles/openshift_health_checker/test/action_plugin_test.py +++ b/roles/openshift_health_checker/test/action_plugin_test.py @@ -3,10 +3,12 @@ import pytest from ansible.playbook.play_context import PlayContext from openshift_health_check import ActionModule, resolve_checks -from openshift_checks import OpenShiftCheckException +from openshift_health_check import copy_remote_file_to_dir, write_result_to_output_dir, write_to_output_file +from openshift_checks import OpenShiftCheckException, FileToSave -def fake_check(name='fake_check', tags=None, is_active=True, run_return=None, run_exception=None, changed=False): +def fake_check(name='fake_check', tags=None, is_active=True, run_return=None, run_exception=None, + run_logs=None, run_files=None, changed=False, get_var_return=None): """Returns a new class that is compatible with OpenShiftCheck for testing.""" _name, _tags = name, tags @@ -14,12 +16,16 @@ def fake_check(name='fake_check', tags=None, is_active=True, run_return=None, ru class FakeCheck(object): name = _name tags = _tags or [] - changed = False - def __init__(self, execute_module=None, task_vars=None, tmp=None): - pass + def __init__(self, **_): + self.changed = False + self.failures = [] + self.logs = run_logs or [] + self.files_to_save = run_files or [] def is_active(self): + if isinstance(is_active, Exception): + raise is_active return is_active def run(self): @@ -28,6 +34,13 @@ def fake_check(name='fake_check', tags=None, is_active=True, run_return=None, ru raise run_exception return run_return + def get_var(*args, **_): + return get_var_return + + def register_failure(self, exc): + self.failures.append(OpenShiftCheckException(str(exc))) + return + return FakeCheck @@ -98,23 +111,33 @@ def test_action_plugin_cannot_load_checks_with_the_same_name(plugin, task_vars, assert failed(result, msg_has=['duplicate', 'duplicate_name', 'FakeCheck']) -def test_action_plugin_skip_non_active_checks(plugin, task_vars, monkeypatch): - checks = [fake_check(is_active=False)] +@pytest.mark.parametrize('is_active, skipped_reason', [ + (False, "Not active for this host"), + (Exception("borked"), "exception"), +]) +def test_action_plugin_skip_non_active_checks(is_active, skipped_reason, plugin, task_vars, monkeypatch): + checks = [fake_check(is_active=is_active)] monkeypatch.setattr('openshift_checks.OpenShiftCheck.subclasses', classmethod(lambda cls: checks)) result = plugin.run(tmp=None, task_vars=task_vars) - assert result['checks']['fake_check'] == dict(skipped=True, skipped_reason="Not active for this host") + assert result['checks']['fake_check'].get('skipped') + assert skipped_reason in result['checks']['fake_check'].get('skipped_reason') assert not failed(result) assert not changed(result) assert not skipped(result) -def test_action_plugin_skip_disabled_checks(plugin, task_vars, monkeypatch): +@pytest.mark.parametrize('to_disable', [ + 'fake_check', + ['fake_check', 'spam'], + '*,spam,eggs', +]) +def test_action_plugin_skip_disabled_checks(to_disable, plugin, task_vars, monkeypatch): checks = [fake_check('fake_check', is_active=True)] monkeypatch.setattr('openshift_checks.OpenShiftCheck.subclasses', classmethod(lambda cls: checks)) - task_vars['openshift_disable_check'] = 'fake_check' + task_vars['openshift_disable_check'] = to_disable result = plugin.run(tmp=None, task_vars=task_vars) assert result['checks']['fake_check'] == dict(skipped=True, skipped_reason="Disabled by user request") @@ -123,10 +146,21 @@ def test_action_plugin_skip_disabled_checks(plugin, task_vars, monkeypatch): assert not skipped(result) +def test_action_plugin_run_list_checks(monkeypatch): + task = FakeTask('openshift_health_check', {'checks': []}) + plugin = ActionModule(task, None, PlayContext(), None, None, None) + monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {}) + result = plugin.run() + + assert failed(result, msg_has="Available checks") + assert not changed(result) + assert not skipped(result) + + def test_action_plugin_run_check_ok(plugin, task_vars, monkeypatch): check_return_value = {'ok': 'test'} - check_class = fake_check(run_return=check_return_value) - monkeypatch.setattr(plugin, 'load_known_checks', lambda tmp, task_vars: {'fake_check': check_class()}) + check_class = fake_check(run_return=check_return_value, run_files=[None]) + monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {'fake_check': check_class()}) monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check']) result = plugin.run(tmp=None, task_vars=task_vars) @@ -140,7 +174,7 @@ def test_action_plugin_run_check_ok(plugin, task_vars, monkeypatch): def test_action_plugin_run_check_changed(plugin, task_vars, monkeypatch): check_return_value = {'ok': 'test'} check_class = fake_check(run_return=check_return_value, changed=True) - monkeypatch.setattr(plugin, 'load_known_checks', lambda tmp, task_vars: {'fake_check': check_class()}) + monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {'fake_check': check_class()}) monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check']) result = plugin.run(tmp=None, task_vars=task_vars) @@ -153,9 +187,9 @@ def test_action_plugin_run_check_changed(plugin, task_vars, monkeypatch): def test_action_plugin_run_check_fail(plugin, task_vars, monkeypatch): - check_return_value = {'failed': True} + check_return_value = {'failed': True, 'msg': 'this is a failure'} check_class = fake_check(run_return=check_return_value) - monkeypatch.setattr(plugin, 'load_known_checks', lambda tmp, task_vars: {'fake_check': check_class()}) + monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {'fake_check': check_class()}) monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check']) result = plugin.run(tmp=None, task_vars=task_vars) @@ -166,24 +200,51 @@ def test_action_plugin_run_check_fail(plugin, task_vars, monkeypatch): assert not skipped(result) -def test_action_plugin_run_check_exception(plugin, task_vars, monkeypatch): +@pytest.mark.parametrize('exc_class, expect_traceback', [ + (OpenShiftCheckException, False), + (Exception, True), +]) +def test_action_plugin_run_check_exception(plugin, task_vars, exc_class, expect_traceback, monkeypatch): exception_msg = 'fake check has an exception' - run_exception = OpenShiftCheckException(exception_msg) + run_exception = exc_class(exception_msg) check_class = fake_check(run_exception=run_exception, changed=True) - monkeypatch.setattr(plugin, 'load_known_checks', lambda tmp, task_vars: {'fake_check': check_class()}) + monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {'fake_check': check_class()}) monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check']) result = plugin.run(tmp=None, task_vars=task_vars) assert failed(result['checks']['fake_check'], msg_has=exception_msg) + assert expect_traceback == ("Traceback" in result['checks']['fake_check']['msg']) assert failed(result, msg_has=['failed']) assert changed(result['checks']['fake_check']) assert changed(result) assert not skipped(result) +def test_action_plugin_run_check_output_dir(plugin, task_vars, tmpdir, monkeypatch): + check_class = fake_check( + run_return={}, + run_logs=[('thing', 'note')], + run_files=[ + FileToSave('save.file', 'contents', None), + FileToSave('save.file', 'duplicate', None), + FileToSave('copy.file', None, 'foo'), # note: copy runs execute_module => exception + ], + ) + task_vars['openshift_checks_output_dir'] = str(tmpdir) + check_class.get_var = lambda self, name, **_: task_vars.get(name) + monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {'fake_check': check_class()}) + monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check']) + + plugin.run(tmp=None, task_vars=task_vars) + assert any(path.basename == task_vars['ansible_host'] for path in tmpdir.listdir()) + assert any(path.basename == 'fake_check.log.json' for path in tmpdir.visit()) + assert any(path.basename == 'save.file' for path in tmpdir.visit()) + assert any(path.basename == 'save.file.2' for path in tmpdir.visit()) + + def test_action_plugin_resolve_checks_exception(plugin, task_vars, monkeypatch): - monkeypatch.setattr(plugin, 'load_known_checks', lambda tmp, task_vars: {}) + monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {}) result = plugin.run(tmp=None, task_vars=task_vars) @@ -249,3 +310,38 @@ def test_resolve_checks_failure(names, all_checks, words_in_exception): resolve_checks(names, all_checks) for word in words_in_exception: assert word in str(excinfo.value) + + +@pytest.mark.parametrize('give_output_dir, result, expect_file', [ + (False, None, False), + (True, dict(content="c3BhbQo=", encoding="base64"), True), + (True, dict(content="encoding error", encoding="base64"), False), + (True, dict(content="spam", no_encoding=None), True), + (True, dict(failed=True, msg="could not slurp"), False), +]) +def test_copy_remote_file_to_dir(give_output_dir, result, expect_file, tmpdir): + check = fake_check()() + check.execute_module = lambda *args, **_: result + copy_remote_file_to_dir(check, "remote_file", str(tmpdir) if give_output_dir else "", "local_file") + assert expect_file == any(path.basename == "local_file" for path in tmpdir.listdir()) + + +def test_write_to_output_exceptions(tmpdir, monkeypatch, capsys): + + class Spam(object): + def __str__(self): + raise Exception("break str") + + test = {1: object(), 2: Spam()} + test[3] = test + write_result_to_output_dir(str(tmpdir), test) + assert "Error writing" in test["output_files"] + + output_dir = tmpdir.join("eggs") + output_dir.write("spam") # so now it's not a dir + write_to_output_file(str(output_dir), "somefile", "somedata") + assert "Could not write" in capsys.readouterr()[1] + + monkeypatch.setattr("openshift_health_check.prepare_output_dir", lambda *_: False) + write_result_to_output_dir(str(tmpdir), test) + assert "Error creating" in test["output_files"] diff --git a/roles/openshift_health_checker/test/diagnostics_test.py b/roles/openshift_health_checker/test/diagnostics_test.py new file mode 100644 index 000000000..800889fa7 --- /dev/null +++ b/roles/openshift_health_checker/test/diagnostics_test.py @@ -0,0 +1,50 @@ +import pytest + +from openshift_checks.diagnostics import DiagnosticCheck, OpenShiftCheckException + + +@pytest.fixture() +def task_vars(): + return dict( + openshift=dict( + common=dict(config_base="/etc/origin/") + ) + ) + + +def test_module_succeeds(task_vars): + check = DiagnosticCheck(lambda *_: {"result": "success"}, task_vars) + check.is_first_master = lambda: True + assert check.is_active() + check.exec_diagnostic("spam") + assert not check.failures + + +def test_oc_not_there(task_vars): + def exec_module(*_): + return {"failed": True, "result": "[Errno 2] No such file or directory"} + + check = DiagnosticCheck(exec_module, task_vars) + with pytest.raises(OpenShiftCheckException) as excinfo: + check.exec_diagnostic("spam") + assert excinfo.value.name == "OcNotFound" + + +def test_module_fails(task_vars): + def exec_module(*_): + return {"failed": True, "result": "something broke"} + + check = DiagnosticCheck(exec_module, task_vars) + check.exec_diagnostic("spam") + assert check.failures and check.failures[0].name == "OcDiagFailed" + + +def test_names_executed(task_vars): + task_vars["openshift_check_diagnostics"] = diagnostics = "ConfigContexts,spam,,eggs" + + def exec_module(module, args, *_): + assert "extra_args" in args + assert args["extra_args"][0] in diagnostics + return {"result": "success"} + + DiagnosticCheck(exec_module, task_vars).run() diff --git a/roles/openshift_health_checker/test/disk_availability_test.py b/roles/openshift_health_checker/test/disk_availability_test.py index f4fd2dfed..9ae679b79 100644 --- a/roles/openshift_health_checker/test/disk_availability_test.py +++ b/roles/openshift_health_checker/test/disk_availability_test.py @@ -183,11 +183,12 @@ def test_fails_with_insufficient_disk_space(name, group_names, configured_min, a ansible_mounts=ansible_mounts, ) - result = DiskAvailability(fake_execute_module, task_vars).run() + check = DiskAvailability(fake_execute_module, task_vars) + check.run() - assert result['failed'] + assert check.failures for chunk in 'below recommended'.split() + expect_chunks: - assert chunk in result.get('msg', '') + assert chunk in str(check.failures[0]) @pytest.mark.parametrize('name,group_names,context,ansible_mounts,failed,extra_words', [ @@ -237,11 +238,11 @@ def test_min_required_space_changes_with_upgrade_context(name, group_names, cont ) check = DiskAvailability(fake_execute_module, task_vars) - result = check.run() + check.run() - assert result.get("failed", False) == failed + assert bool(check.failures) == failed for word in extra_words: - assert word in result.get('msg', '') + assert word in str(check.failures[0]) def fake_execute_module(*args): diff --git a/roles/openshift_health_checker/test/docker_image_availability_test.py b/roles/openshift_health_checker/test/docker_image_availability_test.py index 8d0a53df9..952fa9aa6 100644 --- a/roles/openshift_health_checker/test/docker_image_availability_test.py +++ b/roles/openshift_health_checker/test/docker_image_availability_test.py @@ -3,11 +3,26 @@ import pytest from openshift_checks.docker_image_availability import DockerImageAvailability +@pytest.fixture() +def task_vars(): + return dict( + openshift=dict( + common=dict( + service_type='origin', + is_containerized=False, + is_atomic=False, + ), + docker=dict(), + ), + openshift_deployment_type='origin', + openshift_image_tag='', + group_names=['nodes', 'masters'], + ) + + @pytest.mark.parametrize('deployment_type, is_containerized, group_names, expect_active', [ ("origin", True, [], True), ("openshift-enterprise", True, [], True), - ("enterprise", True, [], False), - ("online", True, [], False), ("invalid", True, [], False), ("", True, [], False), ("origin", False, [], False), @@ -15,12 +30,10 @@ from openshift_checks.docker_image_availability import DockerImageAvailability ("origin", False, ["nodes", "masters"], True), ("openshift-enterprise", False, ["etcd"], False), ]) -def test_is_active(deployment_type, is_containerized, group_names, expect_active): - task_vars = dict( - openshift=dict(common=dict(is_containerized=is_containerized)), - openshift_deployment_type=deployment_type, - group_names=group_names, - ) +def test_is_active(task_vars, deployment_type, is_containerized, group_names, expect_active): + task_vars['openshift_deployment_type'] = deployment_type + task_vars['openshift']['common']['is_containerized'] = is_containerized + task_vars['group_names'] = group_names assert DockerImageAvailability(None, task_vars).is_active() == expect_active @@ -30,10 +43,10 @@ def test_is_active(deployment_type, is_containerized, group_names, expect_active (True, False), (False, True), ]) -def test_all_images_available_locally(is_containerized, is_atomic): +def test_all_images_available_locally(task_vars, is_containerized, is_atomic): def execute_module(module_name, module_args, *_): if module_name == "yum": - return {"changed": True} + return {} assert module_name == "docker_image_facts" assert 'name' in module_args @@ -42,19 +55,9 @@ def test_all_images_available_locally(is_containerized, is_atomic): 'images': [module_args['name']], } - result = DockerImageAvailability(execute_module, task_vars=dict( - openshift=dict( - common=dict( - service_type='origin', - is_containerized=is_containerized, - is_atomic=is_atomic, - ), - docker=dict(additional_registries=["docker.io"]), - ), - openshift_deployment_type='origin', - openshift_image_tag='3.4', - group_names=['nodes', 'masters'], - )).run() + task_vars['openshift']['common']['is_containerized'] = is_containerized + task_vars['openshift']['common']['is_atomic'] = is_atomic + result = DockerImageAvailability(execute_module, task_vars).run() assert not result.get('failed', False) @@ -63,30 +66,42 @@ def test_all_images_available_locally(is_containerized, is_atomic): False, True, ]) -def test_all_images_available_remotely(available_locally): +def test_all_images_available_remotely(task_vars, available_locally): def execute_module(module_name, *_): if module_name == 'docker_image_facts': return {'images': [], 'failed': available_locally} - return {'changed': False} + return {} - result = DockerImageAvailability(execute_module, task_vars=dict( - openshift=dict( - common=dict( - service_type='origin', - is_containerized=False, - is_atomic=False, - ), - docker=dict(additional_registries=["docker.io", "registry.access.redhat.com"]), - ), - openshift_deployment_type='origin', - openshift_image_tag='v3.4', - group_names=['nodes', 'masters'], - )).run() + task_vars['openshift']['docker']['additional_registries'] = ["docker.io", "registry.access.redhat.com"] + task_vars['openshift_image_tag'] = 'v3.4' + check = DockerImageAvailability(execute_module, task_vars) + check._module_retry_interval = 0 + result = check.run() assert not result.get('failed', False) -def test_all_images_unavailable(): +def test_all_images_unavailable(task_vars): + def execute_module(module_name=None, *args): + if module_name == "wait_for": + return {} + elif module_name == "command": + return {'failed': True} + + return {} # docker_image_facts failure + + task_vars['openshift']['docker']['additional_registries'] = ["docker.io"] + task_vars['openshift_deployment_type'] = "openshift-enterprise" + task_vars['openshift_image_tag'] = 'latest' + check = DockerImageAvailability(execute_module, task_vars) + check._module_retry_interval = 0 + actual = check.run() + + assert actual['failed'] + assert "required Docker images are not available" in actual['msg'] + + +def test_no_known_registries(): def execute_module(module_name=None, *_): if module_name == "command": return { @@ -97,7 +112,10 @@ def test_all_images_unavailable(): 'changed': False, } - actual = DockerImageAvailability(execute_module, task_vars=dict( + def mock_known_docker_registries(): + return [] + + dia = DockerImageAvailability(execute_module, task_vars=dict( openshift=dict( common=dict( service_type='origin', @@ -109,10 +127,11 @@ def test_all_images_unavailable(): openshift_deployment_type="openshift-enterprise", openshift_image_tag='latest', group_names=['nodes', 'masters'], - )).run() - + )) + dia.known_docker_registries = mock_known_docker_registries + actual = dia.run() assert actual['failed'] - assert "required Docker images are not available" in actual['msg'] + assert "Unable to retrieve any docker registries." in actual['msg'] @pytest.mark.parametrize("message,extra_words", [ @@ -125,62 +144,63 @@ def test_all_images_unavailable(): ["dependencies can be installed via `yum`"] ), ]) -def test_skopeo_update_failure(message, extra_words): +def test_skopeo_update_failure(task_vars, message, extra_words): def execute_module(module_name=None, *_): if module_name == "yum": return { "failed": True, "msg": message, - "changed": False, } - return {'changed': False} + return {} - actual = DockerImageAvailability(execute_module, task_vars=dict( - openshift=dict( - common=dict( - service_type='origin', - is_containerized=False, - is_atomic=False, - ), - docker=dict(additional_registries=["unknown.io"]), - ), - openshift_deployment_type="openshift-enterprise", - openshift_image_tag='', - group_names=['nodes', 'masters'], - )).run() + task_vars['openshift']['docker']['additional_registries'] = ["unknown.io"] + task_vars['openshift_deployment_type'] = "openshift-enterprise" + check = DockerImageAvailability(execute_module, task_vars) + check._module_retry_interval = 0 + actual = check.run() assert actual["failed"] for word in extra_words: assert word in actual["msg"] -@pytest.mark.parametrize("deployment_type,registries", [ - ("origin", ["unknown.io"]), - ("openshift-enterprise", ["registry.access.redhat.com"]), - ("openshift-enterprise", []), -]) -def test_registry_availability(deployment_type, registries): +@pytest.mark.parametrize( + "image, registries, connection_test_failed, skopeo_failed, " + "expect_success, expect_registries_reached", [ + ( + "spam/eggs:v1", ["test.reg"], + True, True, + False, + {"test.reg": False}, + ), + ( + "spam/eggs:v1", ["test.reg"], + False, True, + False, + {"test.reg": True}, + ), + ( + "eggs.reg/spam/eggs:v1", ["test.reg"], + False, False, + True, + {"eggs.reg": True}, + ), + ]) +def test_registry_availability(image, registries, connection_test_failed, skopeo_failed, + expect_success, expect_registries_reached): def execute_module(module_name=None, *_): - return { - 'changed': False, - } + if module_name == "wait_for": + return dict(msg="msg", failed=connection_test_failed) + elif module_name == "command": + return dict(msg="msg", failed=skopeo_failed) - actual = DockerImageAvailability(execute_module, task_vars=dict( - openshift=dict( - common=dict( - service_type='origin', - is_containerized=False, - is_atomic=False, - ), - docker=dict(additional_registries=registries), - ), - openshift_deployment_type=deployment_type, - openshift_image_tag='', - group_names=['nodes', 'masters'], - )).run() + check = DockerImageAvailability(execute_module, task_vars()) + check._module_retry_interval = 0 - assert not actual.get("failed", False) + available = check.is_available_skopeo_image(image, registries) + assert available == expect_success + assert expect_registries_reached == check.reachable_registries @pytest.mark.parametrize("deployment_type, is_containerized, groups, oreg_url, expected", [ @@ -257,7 +277,7 @@ def test_required_images(deployment_type, is_containerized, groups, oreg_url, ex openshift_image_tag='vtest', ) - assert expected == DockerImageAvailability("DUMMY", task_vars).required_images() + assert expected == DockerImageAvailability(task_vars=task_vars).required_images() def test_containerized_etcd(): @@ -271,4 +291,4 @@ def test_containerized_etcd(): group_names=['etcd'], ) expected = set(['registry.access.redhat.com/rhel7/etcd']) - assert expected == DockerImageAvailability("DUMMY", task_vars).required_images() + assert expected == DockerImageAvailability(task_vars=task_vars).required_images() diff --git a/roles/openshift_health_checker/test/elasticsearch_test.py b/roles/openshift_health_checker/test/elasticsearch_test.py index 09bacd9ac..3fa5e8929 100644 --- a/roles/openshift_health_checker/test/elasticsearch_test.py +++ b/roles/openshift_health_checker/test/elasticsearch_test.py @@ -72,7 +72,7 @@ def test_check_elasticsearch(): assert_error_in_list('NoRunningPods', excinfo.value) # canned oc responses to match so all the checks pass - def exec_oc(cmd, args): + def exec_oc(cmd, args, **_): if '_cat/master' in cmd: return 'name logging-es' elif '/_nodes' in cmd: @@ -97,7 +97,7 @@ def test_check_running_es_pods(): def test_check_elasticsearch_masters(): pods = [plain_es_pod] - check = canned_elasticsearch(task_vars_config_base, lambda *_: plain_es_pod['_test_master_name_str']) + check = canned_elasticsearch(task_vars_config_base, lambda *args, **_: plain_es_pod['_test_master_name_str']) assert not check.check_elasticsearch_masters(pods_by_name(pods)) @@ -117,7 +117,7 @@ def test_check_elasticsearch_masters(): ]) def test_check_elasticsearch_masters_error(pods, expect_error): test_pods = list(pods) - check = canned_elasticsearch(task_vars_config_base, lambda *_: test_pods.pop(0)['_test_master_name_str']) + check = canned_elasticsearch(task_vars_config_base, lambda *args, **_: test_pods.pop(0)['_test_master_name_str']) assert_error_in_list(expect_error, check.check_elasticsearch_masters(pods_by_name(pods))) @@ -129,7 +129,7 @@ es_node_list = { def test_check_elasticsearch_node_list(): - check = canned_elasticsearch(task_vars_config_base, lambda *_: json.dumps(es_node_list)) + check = canned_elasticsearch(task_vars_config_base, lambda *args, **_: json.dumps(es_node_list)) assert not check.check_elasticsearch_node_list(pods_by_name([plain_es_pod])) @@ -151,13 +151,13 @@ def test_check_elasticsearch_node_list(): ), ]) def test_check_elasticsearch_node_list_errors(pods, node_list, expect_error): - check = canned_elasticsearch(task_vars_config_base, lambda cmd, args: json.dumps(node_list)) + check = canned_elasticsearch(task_vars_config_base, lambda cmd, args, **_: json.dumps(node_list)) assert_error_in_list(expect_error, check.check_elasticsearch_node_list(pods_by_name(pods))) def test_check_elasticsearch_cluster_health(): test_health_data = [{"status": "green"}] - check = canned_elasticsearch(exec_oc=lambda *_: json.dumps(test_health_data.pop(0))) + check = canned_elasticsearch(exec_oc=lambda *args, **_: json.dumps(test_health_data.pop(0))) assert not check.check_es_cluster_health(pods_by_name([plain_es_pod])) @@ -175,12 +175,12 @@ def test_check_elasticsearch_cluster_health(): ]) def test_check_elasticsearch_cluster_health_errors(pods, health_data, expect_error): test_health_data = list(health_data) - check = canned_elasticsearch(exec_oc=lambda *_: json.dumps(test_health_data.pop(0))) + check = canned_elasticsearch(exec_oc=lambda *args, **_: json.dumps(test_health_data.pop(0))) assert_error_in_list(expect_error, check.check_es_cluster_health(pods_by_name(pods))) def test_check_elasticsearch_diskspace(): - check = canned_elasticsearch(exec_oc=lambda *_: 'IUse% Use%\n 3% 4%\n') + check = canned_elasticsearch(exec_oc=lambda *args, **_: 'IUse% Use%\n 3% 4%\n') assert not check.check_elasticsearch_diskspace(pods_by_name([plain_es_pod])) @@ -199,5 +199,5 @@ def test_check_elasticsearch_diskspace(): ), ]) def test_check_elasticsearch_diskspace_errors(disk_data, expect_error): - check = canned_elasticsearch(exec_oc=lambda *_: disk_data) + check = canned_elasticsearch(exec_oc=lambda *args, **_: disk_data) assert_error_in_list(expect_error, check.check_elasticsearch_diskspace(pods_by_name([plain_es_pod]))) diff --git a/roles/openshift_health_checker/test/logging_check_test.py b/roles/openshift_health_checker/test/logging_check_test.py index 1a1c190f6..59c703214 100644 --- a/roles/openshift_health_checker/test/logging_check_test.py +++ b/roles/openshift_health_checker/test/logging_check_test.py @@ -98,21 +98,19 @@ def test_oc_failure(problem, expect): assert expect in str(excinfo) -groups_with_first_master = dict(masters=['this-host', 'other-host']) -groups_with_second_master = dict(masters=['other-host', 'this-host']) -groups_not_a_master = dict(masters=['other-host']) +groups_with_first_master = dict(oo_first_master=['this-host']) +groups_not_a_master = dict(oo_first_master=['other-host'], oo_masters=['other-host']) @pytest.mark.parametrize('groups, logging_deployed, is_active', [ (groups_with_first_master, True, True), (groups_with_first_master, False, False), (groups_not_a_master, True, False), - (groups_with_second_master, True, False), (groups_not_a_master, True, False), ]) def test_is_active(groups, logging_deployed, is_active): task_vars = dict( - ansible_ssh_host='this-host', + ansible_host='this-host', groups=groups, openshift_hosted_logging_deploy=logging_deployed, ) diff --git a/roles/openshift_health_checker/test/logging_index_time_test.py b/roles/openshift_health_checker/test/logging_index_time_test.py index 22566b295..c48ade9b8 100644 --- a/roles/openshift_health_checker/test/logging_index_time_test.py +++ b/roles/openshift_health_checker/test/logging_index_time_test.py @@ -102,7 +102,7 @@ def test_with_running_pods(): ), ], ids=lambda argval: argval[0]) def test_wait_until_cmd_or_err_succeeds(name, json_response, uuid, timeout): - check = canned_loggingindextime(lambda *_: json.dumps(json_response)) + check = canned_loggingindextime(lambda *args, **_: json.dumps(json_response)) check.wait_until_cmd_or_err(plain_running_elasticsearch_pod, uuid, timeout) @@ -131,7 +131,7 @@ def test_wait_until_cmd_or_err_succeeds(name, json_response, uuid, timeout): ) ], ids=lambda argval: argval[0]) def test_wait_until_cmd_or_err(name, json_response, timeout, expect_error): - check = canned_loggingindextime(lambda *_: json.dumps(json_response)) + check = canned_loggingindextime(lambda *args, **_: json.dumps(json_response)) with pytest.raises(OpenShiftCheckException) as error: check.wait_until_cmd_or_err(plain_running_elasticsearch_pod, SAMPLE_UUID, timeout) @@ -139,7 +139,7 @@ def test_wait_until_cmd_or_err(name, json_response, timeout, expect_error): def test_curl_kibana_with_uuid(): - check = canned_loggingindextime(lambda *_: json.dumps({"statusCode": 404})) + check = canned_loggingindextime(lambda *args, **_: json.dumps({"statusCode": 404})) check.generate_uuid = lambda: SAMPLE_UUID assert SAMPLE_UUID == check.curl_kibana_with_uuid(plain_running_kibana_pod) @@ -161,7 +161,7 @@ def test_curl_kibana_with_uuid(): ), ], ids=lambda argval: argval[0]) def test_failed_curl_kibana_with_uuid(name, json_response, expect_error): - check = canned_loggingindextime(lambda *_: json.dumps(json_response)) + check = canned_loggingindextime(lambda *args, **_: json.dumps(json_response)) check.generate_uuid = lambda: SAMPLE_UUID with pytest.raises(OpenShiftCheckException) as error: diff --git a/roles/openshift_health_checker/test/openshift_check_test.py b/roles/openshift_health_checker/test/openshift_check_test.py index 789784c77..bc0c3b26c 100644 --- a/roles/openshift_health_checker/test/openshift_check_test.py +++ b/roles/openshift_health_checker/test/openshift_check_test.py @@ -106,13 +106,40 @@ def test_get_var_convert(task_vars, keys, convert, expected): assert dummy_check(task_vars).get_var(*keys, convert=convert) == expected -@pytest.mark.parametrize("keys, convert", [ - (("bar", "baz"), int), - (("bar.baz"), float), - (("foo"), "bogus"), - (("foo"), lambda a, b: 1), - (("foo"), lambda a: 1 / 0), +def convert_oscexc(_): + raise OpenShiftCheckException("known failure") + + +def convert_exc(_): + raise Exception("failure unknown") + + +@pytest.mark.parametrize("keys, convert, expect_text", [ + (("bar", "baz"), int, "Cannot convert"), + (("bar.baz",), float, "Cannot convert"), + (("foo",), "bogus", "TypeError"), + (("foo",), lambda a, b: 1, "TypeError"), + (("foo",), lambda a: 1 / 0, "ZeroDivisionError"), + (("foo",), convert_oscexc, "known failure"), + (("foo",), convert_exc, "failure unknown"), ]) -def test_get_var_convert_error(task_vars, keys, convert): - with pytest.raises(OpenShiftCheckException): +def test_get_var_convert_error(task_vars, keys, convert, expect_text): + with pytest.raises(OpenShiftCheckException) as excinfo: dummy_check(task_vars).get_var(*keys, convert=convert) + assert expect_text in str(excinfo.value) + + +def test_register(task_vars): + check = dummy_check(task_vars) + + check.register_failure(OpenShiftCheckException("spam")) + assert "spam" in str(check.failures[0]) + + with pytest.raises(OpenShiftCheckException) as excinfo: + check.register_file("spam") # no file contents specified + assert "not specified" in str(excinfo.value) + + # normally execute_module registers the result file; test disabling that + check._execute_module = lambda *args, **_: dict() + check.execute_module("eggs", module_args={}, register=False) + assert not check.files_to_save diff --git a/roles/openshift_health_checker/test/ovs_version_test.py b/roles/openshift_health_checker/test/ovs_version_test.py index e1bf29d2a..602f32989 100644 --- a/roles/openshift_health_checker/test/ovs_version_test.py +++ b/roles/openshift_health_checker/test/ovs_version_test.py @@ -50,7 +50,7 @@ def test_ovs_package_version(openshift_release, expected_ovs_version): openshift_release=openshift_release, openshift_image_tag='v' + openshift_release, ) - return_value = object() + return_value = {} # note: check.execute_module modifies return hash contents def execute_module(module_name=None, module_args=None, *_): assert module_name == 'rpm_version' diff --git a/roles/openshift_health_checker/test/package_availability_test.py b/roles/openshift_health_checker/test/package_availability_test.py index 1fe648b75..b34e8fbfc 100644 --- a/roles/openshift_health_checker/test/package_availability_test.py +++ b/roles/openshift_health_checker/test/package_availability_test.py @@ -49,14 +49,14 @@ def test_is_active(pkg_mgr, is_containerized, is_active): ), ]) def test_package_availability(task_vars, must_have_packages, must_not_have_packages): - return_value = object() + return_value = {} def execute_module(module_name=None, module_args=None, *_): assert module_name == 'check_yum_update' assert 'packages' in module_args assert set(module_args['packages']).issuperset(must_have_packages) assert not set(module_args['packages']).intersection(must_not_have_packages) - return return_value + return {'foo': return_value} result = PackageAvailability(execute_module, task_vars).run() - assert result is return_value + assert result['foo'] is return_value diff --git a/roles/openshift_health_checker/test/package_update_test.py b/roles/openshift_health_checker/test/package_update_test.py index 06489b0d7..85d3c9cab 100644 --- a/roles/openshift_health_checker/test/package_update_test.py +++ b/roles/openshift_health_checker/test/package_update_test.py @@ -2,14 +2,14 @@ from openshift_checks.package_update import PackageUpdate def test_package_update(): - return_value = object() + return_value = {} def execute_module(module_name=None, module_args=None, *_): assert module_name == 'check_yum_update' assert 'packages' in module_args # empty list of packages means "generic check if 'yum update' will work" assert module_args['packages'] == [] - return return_value + return {'foo': return_value} result = PackageUpdate(execute_module).run() - assert result is return_value + assert result['foo'] is return_value diff --git a/roles/openshift_health_checker/test/package_version_test.py b/roles/openshift_health_checker/test/package_version_test.py index e871f39f0..8564cd4db 100644 --- a/roles/openshift_health_checker/test/package_version_test.py +++ b/roles/openshift_health_checker/test/package_version_test.py @@ -52,7 +52,7 @@ def test_invalid_openshift_release_format(): ]) def test_package_version(openshift_release): - return_value = object() + return_value = {"foo": object()} def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None, *_): assert module_name == 'aos_version' @@ -66,7 +66,7 @@ def test_package_version(openshift_release): check = PackageVersion(execute_module, task_vars_for(openshift_release, 'origin')) result = check.run() - assert result is return_value + assert result == return_value @pytest.mark.parametrize('deployment_type,openshift_release,expected_docker_version', [ @@ -79,7 +79,7 @@ def test_package_version(openshift_release): ]) def test_docker_package_version(deployment_type, openshift_release, expected_docker_version): - return_value = object() + return_value = {"foo": object()} def execute_module(module_name=None, module_args=None, *_): assert module_name == 'aos_version' @@ -93,7 +93,7 @@ def test_docker_package_version(deployment_type, openshift_release, expected_doc check = PackageVersion(execute_module, task_vars_for(openshift_release, deployment_type)) result = check.run() - assert result is return_value + assert result == return_value @pytest.mark.parametrize('group_names,is_containerized,is_active', [ diff --git a/roles/openshift_health_checker/test/zz_failure_summary_test.py b/roles/openshift_health_checker/test/zz_failure_summary_test.py index 0fc258133..69f27653c 100644 --- a/roles/openshift_health_checker/test/zz_failure_summary_test.py +++ b/roles/openshift_health_checker/test/zz_failure_summary_test.py @@ -65,6 +65,21 @@ import pytest }, ], ), + # if a failure contain an unhashable value, it will not be deduplicated + ( + [ + { + 'host': 'master1', + 'msg': {'unhashable': 'value'}, + }, + ], + [ + { + 'host': 'master1', + 'msg': {'unhashable': 'value'}, + }, + ], + ), ]) def test_deduplicate_failures(failures, deduplicated): assert deduplicate_failures(failures) == deduplicated diff --git a/roles/openshift_hosted/defaults/main.yml b/roles/openshift_hosted/defaults/main.yml index 08c1d849e..712a2a591 100644 --- a/roles/openshift_hosted/defaults/main.yml +++ b/roles/openshift_hosted/defaults/main.yml @@ -5,8 +5,8 @@ r_openshift_hosted_router_use_firewalld: "{{ os_firewall_use_firewalld | default r_openshift_hosted_registry_firewall_enabled: "{{ os_firewall_enabled | default(True) }}" r_openshift_hosted_registry_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}" -openshift_hosted_router_wait: "{{ not openshift_master_bootstrap_enabled | default(True) }}" -openshift_hosted_registry_wait: "{{ not openshift_master_bootstrap_enabled | default(True) }}" +openshift_hosted_router_wait: "{{ not (openshift_master_bootstrap_enabled | default(False)) }}" +openshift_hosted_registry_wait: "{{ not (openshift_master_bootstrap_enabled | default(False)) }}" registry_volume_claim: 'registry-claim' diff --git a/roles/openshift_hosted/tasks/registry/registry.yml b/roles/openshift_hosted/tasks/registry/registry.yml index d73c290ff..48f53aef8 100644 --- a/roles/openshift_hosted/tasks/registry/registry.yml +++ b/roles/openshift_hosted/tasks/registry/registry.yml @@ -137,7 +137,7 @@ edits: "{{ openshift_hosted_registry_edits }}" force: "{{ True|bool in openshift_hosted_registry_force }}" -- when: openshift_hosted_registry_wait +- when: openshift_hosted_registry_wait | bool block: - name: Ensure OpenShift registry correctly rolls out (best-effort today) command: | diff --git a/roles/openshift_hosted/tasks/registry/secure.yml b/roles/openshift_hosted/tasks/registry/secure.yml index a8a6f6fc8..434b679df 100644 --- a/roles/openshift_hosted/tasks/registry/secure.yml +++ b/roles/openshift_hosted/tasks/registry/secure.yml @@ -1,7 +1,7 @@ --- - name: Configure facts for docker-registry set_fact: - openshift_hosted_registry_routecertificates: "{{ ('routecertificates' in openshift.hosted.registry.keys()) | ternary(openshift.hosted.registry.routecertificates, {}) }}" + openshift_hosted_registry_routecertificates: "{{ ('routecertificates' in openshift.hosted.registry.keys()) | ternary(openshift_hosted_registry_routecertificates, {}) }}" openshift_hosted_registry_routehost: "{{ ('routehost' in openshift.hosted.registry.keys()) | ternary(openshift.hosted.registry.routehost, False) }}" openshift_hosted_registry_routetermination: "{{ ('routetermination' in openshift.hosted.registry.keys()) | ternary(openshift.hosted.registry.routetermination, 'passthrough') }}" diff --git a/roles/openshift_hosted/tasks/router/router.yml b/roles/openshift_hosted/tasks/router/router.yml index 68ec7233e..2a42b5a7c 100644 --- a/roles/openshift_hosted/tasks/router/router.yml +++ b/roles/openshift_hosted/tasks/router/router.yml @@ -94,7 +94,7 @@ stats_port: "{{ item.stats_port }}" with_items: "{{ openshift_hosted_routers }}" -- when: openshift_hosted_router_wait +- when: openshift_hosted_router_wait | bool block: - name: Ensure OpenShift router correctly rolls out (best-effort today) command: | diff --git a/roles/openshift_hosted_facts/tasks/main.yml b/roles/openshift_hosted_facts/tasks/main.yml index 631bf3e2a..53d1a8bc7 100644 --- a/roles/openshift_hosted_facts/tasks/main.yml +++ b/roles/openshift_hosted_facts/tasks/main.yml @@ -8,9 +8,10 @@ - name: Set hosted facts openshift_facts: - role: hosted + role: "{{ item }}" openshift_env: "{{ hostvars | oo_merge_hostvars(vars, inventory_hostname) | oo_openshift_env }}" openshift_env_structures: - 'openshift.hosted.router.*' + with_items: [hosted, logging, loggingops, metrics] diff --git a/roles/openshift_hosted_logging/README.md b/roles/openshift_hosted_logging/README.md deleted file mode 100644 index 680303853..000000000 --- a/roles/openshift_hosted_logging/README.md +++ /dev/null @@ -1,40 +0,0 @@ -###Required vars: - -- openshift_hosted_logging_hostname: kibana.example.com -- openshift_hosted_logging_elasticsearch_cluster_size: 1 -- openshift_hosted_logging_master_public_url: https://localhost:8443 - -###Optional vars: -- openshift_hosted_logging_image_prefix: logging image prefix. No default. Use this to specify an alternate image repository e.g. my.private.repo:5000/private_openshift/ -- target_registry: DEPRECATED - use openshift_hosted_logging_image_prefix instead -- openshift_hosted_logging_image_version: logging image version suffix. Defaults to the current version of the deployed software. -- openshift_hosted_logging_secret_vars: (defaults to nothing=/dev/null) kibana.crt=/etc/origin/master/ca.crt kibana.key=/etc/origin/master/ca.key ca.crt=/etc/origin/master/ca.crt ca.key=/etc/origin/master/ca.key -- openshift_hosted_logging_fluentd_replicas: (defaults to 1) 3 -- openshift_hosted_logging_cleanup: (defaults to no) Set this to 'yes' in order to cleanup logging components instead of deploying. -- openshift_hosted_logging_elasticsearch_instance_ram: Amount of RAM to reserve per ElasticSearch instance (e.g. 1024M, 2G). Defaults to 8GiB; must be at least 512M (Ref.: [ElasticSearch documentation](https://www.elastic.co/guide/en/elasticsearch/guide/current/hardware.html\#\_memory). -- openshift_hosted_logging_elasticsearch_pvc_size: Size of the PersistentVolumeClaim to create per ElasticSearch ops instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead. -- openshift_hosted_logging_elasticsearch_pvc_prefix: Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size `openshift_hosted_logging_elasticsearch_pvc_size`. -- openshift_hosted_logging_elasticsearch_pvc_dynamic: Set to `true` to have created PersistentVolumeClaims annotated such that their backing storage can be dynamically provisioned (if that is available for your cluster). -- openshift_hosted_logging_elasticsearch_storage_group: Number of a supplemental group ID for access to Elasticsearch storage volumes; backing volumes should allow access by this group ID (defaults to 65534). -- openshift_hosted_logging_elasticsearch_nodeselector: Specify the nodeSelector that Elasticsearch should be use (label=value) -- openshift_hosted_logging_fluentd_nodeselector: The nodeSelector used to determine which nodes to apply the `openshift_hosted_logging_fluentd_nodeselector_label` label to. -- openshift_hosted_logging_fluentd_nodeselector_label: The label applied to nodes included in the Fluentd DaemonSet. Defaults to "logging-infra-fluentd=true". -- openshift_hosted_logging_kibana_nodeselector: Specify the nodeSelector that Kibana should be use (label=value) -- openshift_hosted_logging_curator_nodeselector: Specify the nodeSelector that Curator should be use (label=value) -- openshift_hosted_logging_enable_ops_cluster: If "true", configure a second ES cluster and Kibana for ops logs. -- openshift_hosted_logging_use_journal: *DEPRECATED - DO NOT USE* -- openshift_hosted_logging_journal_source: By default, if this param is unset or empty, logging will use `/var/log/journal` if it exists, or `/run/log/journal` if not. You can use this param to force logging to use a different location. -- openshift_hosted_logging_journal_read_from_head: Set to `true` to have fluentd read from the beginning of the journal, to get historical log data. Default is `false`. *WARNING* Using `true` may take several minutes or even hours, depending on the size of the journal, until any new records show up in Elasticsearch, and will cause fluentd to consume a lot of CPU and RAM resources. - -When `openshift_hosted_logging_enable_ops_cluster` is `True`, there are some -additional vars. These work the same as above for their non-ops counterparts, -but apply to the OPS cluster instance: -- openshift_hosted_logging_ops_hostname: kibana-ops.example.com -- openshift_hosted_logging_elasticsearch_ops_cluster_size -- openshift_hosted_logging_elasticsearch_ops_instance_ram -- openshift_hosted_logging_elasticsearch_ops_pvc_size -- openshift_hosted_logging_elasticsearch_ops_pvc_prefix -- openshift_hosted_logging_elasticsearch_ops_pvc_dynamic -- openshift_hosted_logging_elasticsearch_ops_nodeselector -- openshift_hosted_logging_kibana_ops_nodeselector -- openshift_hosted_logging_curator_ops_nodeselector diff --git a/roles/openshift_hosted_logging/defaults/main.yml b/roles/openshift_hosted_logging/defaults/main.yml deleted file mode 100644 index a01f24df8..000000000 --- a/roles/openshift_hosted_logging/defaults/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -hosted_base: "{{ openshift.common.config_base if openshift.common.is_containerized | bool else '/usr/share/openshift' }}/hosted" diff --git a/roles/openshift_hosted_logging/handlers/main.yml b/roles/openshift_hosted_logging/handlers/main.yml deleted file mode 100644 index d7e83fe9a..000000000 --- a/roles/openshift_hosted_logging/handlers/main.yml +++ /dev/null @@ -1,21 +0,0 @@ ---- -- name: Verify API Server - # Using curl here since the uri module requires python-httplib2 and - # wait_for port doesn't provide health information. - command: > - curl --silent --tlsv1.2 - {% if openshift.common.version_gte_3_2_or_1_2 | bool %} - --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt - {% else %} - --cacert {{ openshift.common.config_base }}/master/ca.crt - {% endif %} - {{ openshift.master.api_url }}/healthz/ready - args: - # Disables the following warning: - # Consider using get_url or uri module rather than running curl - warn: no - register: api_available_output - until: api_available_output.stdout == 'ok' - retries: 120 - delay: 1 - changed_when: false diff --git a/roles/openshift_hosted_logging/meta/main.yaml b/roles/openshift_hosted_logging/meta/main.yaml deleted file mode 100644 index ab07a77c1..000000000 --- a/roles/openshift_hosted_logging/meta/main.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -dependencies: - - { role: openshift_master_facts } diff --git a/roles/openshift_hosted_logging/tasks/cleanup_logging.yaml b/roles/openshift_hosted_logging/tasks/cleanup_logging.yaml deleted file mode 100644 index 70b0d67a4..000000000 --- a/roles/openshift_hosted_logging/tasks/cleanup_logging.yaml +++ /dev/null @@ -1,59 +0,0 @@ ---- -- name: Create temp directory for kubeconfig - command: mktemp -d /tmp/openshift-ansible-XXXXXX - register: mktemp - changed_when: False - -- name: Copy the admin client config(s) - command: > - cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig - changed_when: False - -- name: "Checking for logging project" - command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get project logging" - register: logging_project - failed_when: "'FAILED' in logging_project.stderr" - -- name: "Changing projects" - command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig project logging" - - -- name: "Cleanup any previous logging infrastructure" - command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete --ignore-not-found all --selector logging-infra={{ item }}" - with_items: - - kibana - - fluentd - - elasticsearch - ignore_errors: yes - -- name: "Cleanup existing support infrastructure" - command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete --ignore-not-found all,sa,oauthclient --selector logging-infra=support" - ignore_errors: yes - -- name: "Cleanup existing secrets" - command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete secret logging-fluentd logging-elasticsearch logging-es-proxy logging-kibana logging-kibana-proxy logging-kibana-ops-proxy" - ignore_errors: yes - register: clean_result - failed_when: clean_result.rc == 1 and 'not found' not in clean_result.stderr - -- name: "Cleanup existing logging deployers" - command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete pods --all" - - -- name: "Cleanup logging project" - command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete project logging" - - -- name: "Remove deployer template" - command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete template logging-deployer-template -n openshift" - register: delete_output - failed_when: delete_output.rc == 1 and 'exists' not in delete_output.stderr - - -- name: Delete temp directory - file: - name: "{{ mktemp.stdout }}" - state: absent - changed_when: False - -- debug: msg="Success!" diff --git a/roles/openshift_hosted_logging/tasks/deploy_logging.yaml b/roles/openshift_hosted_logging/tasks/deploy_logging.yaml deleted file mode 100644 index 78b624109..000000000 --- a/roles/openshift_hosted_logging/tasks/deploy_logging.yaml +++ /dev/null @@ -1,177 +0,0 @@ ---- -- debug: msg="WARNING target_registry is deprecated, use openshift_hosted_logging_image_prefix instead" - when: target_registry is defined and target_registry - -- fail: msg="This role requires the following vars to be defined. openshift_hosted_logging_master_public_url, openshift_hosted_logging_hostname, openshift_hosted_logging_elasticsearch_cluster_size" - when: "openshift_hosted_logging_hostname is not defined or - openshift_hosted_logging_elasticsearch_cluster_size is not defined or - openshift_hosted_logging_master_public_url is not defined" - -- name: Create temp directory for kubeconfig - command: mktemp -d /tmp/openshift-ansible-XXXXXX - register: mktemp - changed_when: False - -- name: Copy the admin client config(s) - command: > - cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig - changed_when: False - -- name: "Check for logging project already exists" - command: > - {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get project logging -o jsonpath='{.metadata.name}' - register: logging_project_result - ignore_errors: True - -- name: "Create logging project" - command: > - {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig new-project logging - when: logging_project_result.stdout == "" - -- name: "Changing projects" - command: > - {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig project logging - -- name: "Creating logging deployer secret" - command: > - {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig secrets new logging-deployer {{ openshift_hosted_logging_secret_vars | default('nothing=/dev/null') }} - register: secret_output - failed_when: secret_output.rc == 1 and 'exists' not in secret_output.stderr - -- name: "Create templates for logging accounts and the deployer" - command: > - {{ openshift.common.client_binary }} create --config={{ mktemp.stdout }}/admin.kubeconfig - -f {{ hosted_base }}/logging-deployer.yaml - --config={{ mktemp.stdout }}/admin.kubeconfig - -n logging - register: logging_import_template - failed_when: "'already exists' not in logging_import_template.stderr and logging_import_template.rc != 0" - changed_when: "'created' in logging_import_template.stdout" - -- name: "Process the logging accounts template" - shell: > - {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig - process logging-deployer-account-template | {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f - - register: process_deployer_accounts - failed_when: process_deployer_accounts.rc == 1 and 'already exists' not in process_deployer_accounts.stderr - -- name: "Set permissions for logging-deployer service account" - command: > - {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig - policy add-cluster-role-to-user oauth-editor system:serviceaccount:logging:logging-deployer - register: permiss_output - failed_when: permiss_output.rc == 1 and 'exists' not in permiss_output.stderr - -- name: "Set permissions for fluentd" - command: > - {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig - policy add-scc-to-user privileged system:serviceaccount:logging:aggregated-logging-fluentd - register: fluentd_output - failed_when: fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr - -- name: "Set additional permissions for fluentd" - command: > - {{ openshift.common.client_binary }} adm policy --config={{ mktemp.stdout }}/admin.kubeconfig - add-cluster-role-to-user cluster-reader system:serviceaccount:logging:aggregated-logging-fluentd - register: fluentd2_output - failed_when: fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr - -- name: "Add rolebinding-reader to aggregated-logging-elasticsearch" - command: > - {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig - policy add-cluster-role-to-user rolebinding-reader \ - system:serviceaccount:logging:aggregated-logging-elasticsearch - register: rolebinding_reader_output - failed_when: rolebinding_reader_output == 1 and 'exists' not in rolebinding_reader_output.stderr - -- name: "Create ConfigMap for deployer parameters" - command: > - {{ openshift.common.client_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-deployer {{ deployer_cmap_params }} - register: deployer_configmap_output - failed_when: deployer_configmap_output.rc == 1 and 'exists' not in deployer_configmap_output.stderr - -- name: "Process the deployer template" - shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-app logging-deployer-template {{ oc_new_app_values }}" - register: process_deployer - failed_when: process_deployer.rc == 1 and 'already exists' not in process_deployer.stderr - -- name: "Wait for image pull and deployer pod" - shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods | grep logging-deployer.*Completed" - register: result - until: result.rc == 0 - retries: 20 - delay: 15 - -- name: "Process imagestream template" - shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-app logging-imagestream-template {{ oc_new_app_values }}" - when: tr_or_ohlip is defined and insecure_registry is defined and insecure_registry - register: process_is - failed_when: process_is.rc == 1 and 'already exists' not in process_is.stderr - -- name: "Set insecured registry" - command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig annotate is --all openshift.io/image.insecureRepository=true --overwrite" - when: tr_or_ohlip is defined and insecure_registry is defined and insecure_registry - -- name: "Wait for imagestreams to become available" - shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get is | grep logging-fluentd" - when: tr_or_ohlip is defined and insecure_registry is defined and insecure_registry - register: result - until: result.rc == 0 - failed_when: result.rc == 1 and 'not found' not in result.stderr - retries: 20 - delay: 5 - -- name: "Wait for component pods to be running" - shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods -l component={{ item }} | grep Running" - with_items: - - es - - kibana - - curator - register: result - until: result.rc == 0 - failed_when: result.rc == 1 or 'Error' in result.stderr - retries: 20 - delay: 15 - -- name: "Wait for ops component pods to be running" - shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods -l component={{ item }} | grep Running" - with_items: - - es-ops - - kibana-ops - - curator-ops - when: openshift_hosted_logging_enable_ops_cluster is defined and openshift_hosted_logging_enable_ops_cluster - register: result - until: result.rc == 0 - failed_when: result.rc == 1 or 'Error' in result.stderr - retries: 20 - delay: 15 - -- name: "Wait for fluentd DaemonSet to exist" - shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get daemonset logging-fluentd" - register: result - until: result.rc == 0 - failed_when: result.rc == 1 or 'Error' in result.stderr - retries: 20 - delay: 5 - -- name: "Deploy fluentd by labeling the node" - shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig label node --overwrite=true {{ '-l' ~ openshift_hosted_logging_fluentd_nodeselector if openshift_hosted_logging_fluentd_nodeselector is defined else '--all' }} {{ openshift_hosted_logging_fluentd_nodeselector_label if openshift_hosted_logging_fluentd_nodeselector_label is defined else 'logging-infra-fluentd=true' }}" - -- name: "Wait for fluentd to be running" - shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods -l component=fluentd | grep Running" - register: result - until: result.rc == 0 - failed_when: result.rc == 1 or 'Error' in result.stderr - retries: 20 - delay: 15 - -- include: update_master_config.yaml - -- debug: - msg: "Logging components deployed. Note persistent volume for elasticsearch must be setup manually" - -- name: Delete temp directory - file: - name: "{{ mktemp.stdout }}" - state: absent - changed_when: False diff --git a/roles/openshift_hosted_logging/tasks/main.yaml b/roles/openshift_hosted_logging/tasks/main.yaml deleted file mode 100644 index 42568597a..000000000 --- a/roles/openshift_hosted_logging/tasks/main.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -- name: Cleanup logging deployment - include: "{{ role_path }}/tasks/cleanup_logging.yaml" - when: openshift_hosted_logging_cleanup | default(false) | bool - -- name: Deploy logging - include: "{{ role_path }}/tasks/deploy_logging.yaml" - when: not openshift_hosted_logging_cleanup | default(false) | bool diff --git a/roles/openshift_hosted_logging/tasks/update_master_config.yaml b/roles/openshift_hosted_logging/tasks/update_master_config.yaml deleted file mode 100644 index 1122e059c..000000000 --- a/roles/openshift_hosted_logging/tasks/update_master_config.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -- name: Adding Kibana route information to loggingPublicURL - modify_yaml: - dest: "{{ openshift.common.config_base }}/master/master-config.yaml" - yaml_key: assetConfig.loggingPublicURL - yaml_value: "https://{{ logging_hostname }}" - notify: restart master diff --git a/roles/openshift_hosted_logging/vars/main.yaml b/roles/openshift_hosted_logging/vars/main.yaml deleted file mode 100644 index 4b350b244..000000000 --- a/roles/openshift_hosted_logging/vars/main.yaml +++ /dev/null @@ -1,32 +0,0 @@ ---- -tr_or_ohlip: "{{ openshift_hosted_logging_deployer_prefix | default(target_registry) | default(None) }}" -ip_kv: "{{ '-p IMAGE_PREFIX=' ~ tr_or_ohlip | quote if tr_or_ohlip != '' else '' }}" -iv_kv: "{{ '-p IMAGE_VERSION=' ~ openshift_hosted_logging_deployer_version | quote if openshift_hosted_logging_deployer_version | default(none) is not none else '' }}" -oc_new_app_values: "{{ ip_kv }} {{ iv_kv }}" -openshift_master_config_dir: "{{ openshift.common.config_base }}/master" -kh_cmap_param: "{{ '--from-literal kibana-hostname=' ~ openshift_hosted_logging_hostname | quote if openshift_hosted_logging_hostname | default(none) is not none else '' }}" -kh_ops_cmap_param: "{{ '--from-literal kibana-ops-hostname=' ~ openshift_hosted_logging_ops_hostname | quote if openshift_hosted_logging_ops_hostname | default(none) is not none else '' }}" -pmu_cmap_param: "{{ '--from-literal public-master-url=' ~ openshift_hosted_logging_master_public_url | quote if openshift_hosted_logging_master_public_url | default(none) is not none else '' }}" -es_cs_cmap_param: "{{ '--from-literal es-cluster-size=' ~ openshift_hosted_logging_elasticsearch_cluster_size | string | quote if openshift_hosted_logging_elasticsearch_cluster_size | default(none) is not none else '' }}" -es_ops_cs_cmap_param: "{{ '--from-literal es-ops-cluster-size=' ~ openshift_hosted_logging_elasticsearch_ops_cluster_size | string | quote if openshift_hosted_logging_elasticsearch_ops_cluster_size | default(none) is not none else '' }}" -es_ir_cmap_param: "{{ '--from-literal es-instance-ram=' ~ openshift_hosted_logging_elasticsearch_instance_ram | quote if openshift_hosted_logging_elasticsearch_instance_ram | default(none) is not none else '' }}" -es_ops_ir_cmap_param: "{{ '--from-literal es-ops-instance-ram=' ~ openshift_hosted_logging_elasticsearch_ops_instance_ram | quote if openshift_hosted_logging_elasticsearch_ops_instance_ram | default(none) is not none else '' }}" -es_pvcs_cmap_param: "{{ '--from-literal es-pvc-size=' ~ openshift_hosted_logging_elasticsearch_pvc_size | quote if openshift_hosted_logging_elasticsearch_pvc_size | default(none) is not none else '' }}" -es_ops_pvcs_cmap_param: "{{ '--from-literal es-ops-pvc-size=' ~ openshift_hosted_logging_elasticsearch_ops_pvc_size | quote if openshift_hosted_logging_elasticsearch_ops_pvc_size | default(none) is not none else '' }}" -es_pvcp_cmap_param: "{{ '--from-literal es-pvc-prefix=' ~ openshift_hosted_logging_elasticsearch_pvc_prefix | quote if openshift_hosted_logging_elasticsearch_pvc_prefix | default(none) is not none else '' }}" -es_ops_pvcp_cmap_param: "{{ '--from-literal es-ops-pvc-prefix=' ~ openshift_hosted_logging_elasticsearch_ops_pvc_prefix | quote if openshift_hosted_logging_elasticsearch_ops_pvc_prefix | default(none) is not none else '' }}" -es_pvcd_cmap_param: "{{ '--from-literal es-pvc-dynamic=' ~ openshift_hosted_logging_elasticsearch_pvc_dynamic | quote if openshift_hosted_logging_elasticsearch_pvc_dynamic | default(none) is not none else '' }}" -es_ops_pvcd_cmap_param: "{{ '--from-literal es-ops-pvc-dynamic=' ~ openshift_hosted_logging_elasticsearch_ops_pvc_dynamic | quote if openshift_hosted_logging_elasticsearch_ops_pvc_dynamic | default(none) is not none else '' }}" -es_sg_cmap_param: "{{ '--from-literal storage-group=' ~ openshift_hosted_logging_elasticsearch_storage_group | string | quote if openshift_hosted_logging_elasticsearch_storage_group | default(none) is not none else '' }}" -es_ns_cmap_param: "{{ '--from-literal es-nodeselector=' ~ openshift_hosted_logging_elasticsearch_nodeselector | quote if openshift_hosted_logging_elasticsearch_nodeselector | default(none) is not none else '' }}" -es_ops_ns_cmap_param: "{{ '--from-literal es-ops-nodeselector=' ~ openshift_hosted_logging_elasticsearch_ops_nodeselector | quote if openshift_hosted_logging_elasticsearch_ops_nodeselector | default(none) is not none else '' }}" -fd_ns_cmap_param: "{{ '--from-literal fluentd-nodeselector=' ~ openshift_hosted_logging_fluentd_nodeselector_label | quote if openshift_hosted_logging_fluentd_nodeselector_label | default(none) is not none else 'logging-infra-fluentd=true' }}" -kb_ns_cmap_param: "{{ '--from-literal kibana-nodeselector=' ~ openshift_hosted_logging_kibana_nodeselector | quote if openshift_hosted_logging_kibana_nodeselector | default(none) is not none else '' }}" -kb_ops_ns_cmap_param: "{{ '--from-literal kibana-ops-nodeselector=' ~ openshift_hosted_logging_kibana_ops_nodeselector | quote if openshift_hosted_logging_kibana_ops_nodeselector | default(none) is not none else '' }}" -cr_ns_cmap_param: "{{ '--from-literal curator-nodeselector=' ~ openshift_hosted_logging_curator_nodeselector | quote if openshift_hosted_logging_curator_nodeselector | default(none) is not none else '' }}" -cr_ops_ns_cmap_param: "{{ '--from-literal curator-ops-nodeselector=' ~ openshift_hosted_logging_curator_ops_nodeselector | quote if openshift_hosted_logging_curator_ops_nodeselector | default(none) is not none else '' }}" -ops_cmap_param: "{{ '--from-literal enable-ops-cluster=' ~ openshift_hosted_logging_enable_ops_cluster | string | lower | quote if openshift_hosted_logging_enable_ops_cluster | default(none) is not none else '' }}" -journal_source_cmap_param: "{{ '--from-literal journal-source=' ~ openshift_hosted_logging_journal_source | quote if openshift_hosted_logging_journal_source | default(none) is not none else '' }}" -journal_read_from_head_cmap_param: "{{ '--from-literal journal-read-from-head=' ~ openshift_hosted_logging_journal_read_from_head | string | lower | quote if openshift_hosted_logging_journal_read_from_head | default(none) is not none else '' }}" -ips_cmap_param: "{{ '--from-literal image-pull-secret=' ~ openshift_hosted_logging_image_pull_secret | quote if openshift_hosted_logging_image_pull_secret | default(none) is not none else '' }}" -deployer_cmap_params: "{{ kh_cmap_param }} {{ kh_ops_cmap_param }} {{ pmu_cmap_param }} {{ es_cs_cmap_param }} {{ es_ir_cmap_param }} {{ es_pvcs_cmap_param }} {{ es_pvcp_cmap_param }} {{ es_pvcd_cmap_param }} {{ es_ops_cs_cmap_param }} {{ es_ops_ir_cmap_param }} {{ es_ops_pvcs_cmap_param }} {{ es_ops_pvcp_cmap_param }} {{ es_ops_pvcd_cmap_param }} {{ es_sg_cmap_param }} {{ es_ns_cmap_param }} {{ es_ops_ns_cmap_param }} {{ fd_ns_cmap_param }} {{ kb_ns_cmap_param }} {{ kb_ops_ns_cmap_param }} {{ cr_ns_cmap_param }} {{ cr_ops_ns_cmap_param }} {{ ops_cmap_param }} {{ journal_source_cmap_param }} {{ journal_read_from_head_cmap_param }} {{ ips_cmap_param }}" diff --git a/roles/openshift_hosted_templates/files/v1.3/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.3/enterprise/registry-console.yaml index 11478263c..72754df2e 100644 --- a/roles/openshift_hosted_templates/files/v1.3/enterprise/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v1.3/enterprise/registry-console.yaml @@ -89,7 +89,7 @@ objects: - annotations: null from: kind: DockerImage - name: ${IMAGE_PREFIX}registry-console + name: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION} name: ${IMAGE_VERSION} - kind: OAuthClient apiVersion: v1 diff --git a/roles/openshift_hosted_templates/files/v1.3/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.3/origin/registry-console.yaml index 80cc4233b..6811ece28 100644 --- a/roles/openshift_hosted_templates/files/v1.3/origin/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v1.3/origin/registry-console.yaml @@ -89,7 +89,7 @@ objects: - annotations: null from: kind: DockerImage - name: ${IMAGE_NAME} + name: ${IMAGE_NAME}:${IMAGE_VERSION} name: ${IMAGE_VERSION} - kind: OAuthClient apiVersion: v1 diff --git a/roles/openshift_hosted_templates/files/v1.4/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.4/enterprise/registry-console.yaml index 0e3d006a7..298f8039e 100644 --- a/roles/openshift_hosted_templates/files/v1.4/enterprise/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v1.4/enterprise/registry-console.yaml @@ -89,7 +89,7 @@ objects: - annotations: null from: kind: DockerImage - name: ${IMAGE_PREFIX}registry-console + name: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION} name: ${IMAGE_VERSION} - kind: OAuthClient apiVersion: v1 diff --git a/roles/openshift_hosted_templates/files/v1.4/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.4/origin/registry-console.yaml index 80cc4233b..6811ece28 100644 --- a/roles/openshift_hosted_templates/files/v1.4/origin/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v1.4/origin/registry-console.yaml @@ -89,7 +89,7 @@ objects: - annotations: null from: kind: DockerImage - name: ${IMAGE_NAME} + name: ${IMAGE_NAME}:${IMAGE_VERSION} name: ${IMAGE_VERSION} - kind: OAuthClient apiVersion: v1 diff --git a/roles/openshift_hosted_templates/files/v1.5/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.5/enterprise/registry-console.yaml index 28feac4e6..dace26793 100644 --- a/roles/openshift_hosted_templates/files/v1.5/enterprise/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v1.5/enterprise/registry-console.yaml @@ -89,7 +89,7 @@ objects: - annotations: null from: kind: DockerImage - name: ${IMAGE_PREFIX}registry-console + name: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION} name: ${IMAGE_VERSION} - kind: OAuthClient apiVersion: v1 diff --git a/roles/openshift_hosted_templates/files/v1.5/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.5/origin/registry-console.yaml index 80cc4233b..6811ece28 100644 --- a/roles/openshift_hosted_templates/files/v1.5/origin/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v1.5/origin/registry-console.yaml @@ -89,7 +89,7 @@ objects: - annotations: null from: kind: DockerImage - name: ${IMAGE_NAME} + name: ${IMAGE_NAME}:${IMAGE_VERSION} name: ${IMAGE_VERSION} - kind: OAuthClient apiVersion: v1 diff --git a/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml index 8bf98ba41..f821efd6b 100644 --- a/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml @@ -89,7 +89,7 @@ objects: - annotations: null from: kind: DockerImage - name: ${IMAGE_PREFIX}registry-console + name: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION} name: ${IMAGE_VERSION} - kind: OAuthClient apiVersion: v1 diff --git a/roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml index 80cc4233b..6811ece28 100644 --- a/roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml @@ -89,7 +89,7 @@ objects: - annotations: null from: kind: DockerImage - name: ${IMAGE_NAME} + name: ${IMAGE_NAME}:${IMAGE_VERSION} name: ${IMAGE_VERSION} - kind: OAuthClient apiVersion: v1 diff --git a/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml index bbaf76c17..019d836fe 100644 --- a/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml @@ -89,7 +89,7 @@ objects: - annotations: null from: kind: DockerImage - name: ${IMAGE_PREFIX}registry-console + name: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION} name: ${IMAGE_VERSION} - kind: OAuthClient apiVersion: v1 diff --git a/roles/openshift_hosted_templates/files/v3.7/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.7/origin/registry-console.yaml index 80cc4233b..6811ece28 100644 --- a/roles/openshift_hosted_templates/files/v3.7/origin/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v3.7/origin/registry-console.yaml @@ -89,7 +89,7 @@ objects: - annotations: null from: kind: DockerImage - name: ${IMAGE_NAME} + name: ${IMAGE_NAME}:${IMAGE_VERSION} name: ${IMAGE_VERSION} - kind: OAuthClient apiVersion: v1 diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml index 716f0e002..6699e2062 100644 --- a/roles/openshift_logging/defaults/main.yml +++ b/roles/openshift_logging/defaults/main.yml @@ -1,15 +1,17 @@ --- -openshift_logging_use_ops: "{{ openshift_hosted_logging_enable_ops_cluster | default('false') | bool }}" +openshift_logging_use_ops: False openshift_logging_master_url: "https://kubernetes.default.svc.{{ openshift.common.dns_domain }}" -openshift_logging_master_public_url: "{{ openshift_hosted_logging_master_public_url | default('https://' + openshift.common.public_hostname + ':' ~ (openshift_master_api_port | default('8443', true))) }}" +openshift_logging_master_public_url: "{{ 'https://' + openshift.common.public_hostname + ':' ~ (openshift_master_api_port | default('8443', true)) }}" openshift_logging_namespace: logging openshift_logging_nodeselector: null openshift_logging_labels: {} openshift_logging_label_key: "" openshift_logging_label_value: "" -openshift_logging_install_logging: True +openshift_logging_install_logging: False +openshift_logging_uninstall_logging: False + openshift_logging_purge_logging: False -openshift_logging_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}" +openshift_logging_image_pull_secret: "" openshift_logging_curator_default_days: 30 openshift_logging_curator_run_hour: 0 @@ -19,13 +21,13 @@ openshift_logging_curator_script_log_level: INFO openshift_logging_curator_log_level: ERROR openshift_logging_curator_cpu_limit: 100m openshift_logging_curator_memory_limit: null -openshift_logging_curator_nodeselector: "{{ openshift_hosted_logging_curator_nodeselector | default('') | map_from_pairs }}" +openshift_logging_curator_nodeselector: {} openshift_logging_curator_ops_cpu_limit: 100m openshift_logging_curator_ops_memory_limit: null -openshift_logging_curator_ops_nodeselector: "{{ openshift_hosted_logging_curator_ops_nodeselector | default('') | map_from_pairs }}" +openshift_logging_curator_ops_nodeselector: {} -openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}" +openshift_logging_kibana_hostname: "{{ 'kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}" openshift_logging_kibana_cpu_limit: null openshift_logging_kibana_memory_limit: 736Mi openshift_logging_kibana_proxy_debug: false @@ -34,8 +36,8 @@ openshift_logging_kibana_proxy_memory_limit: 96Mi openshift_logging_kibana_replica_count: 1 openshift_logging_kibana_edge_term_policy: Redirect -openshift_logging_kibana_nodeselector: "{{ openshift_hosted_logging_kibana_nodeselector | default('') | map_from_pairs }}" -openshift_logging_kibana_ops_nodeselector: "{{ openshift_hosted_logging_kibana_ops_nodeselector | default('') | map_from_pairs }}" +openshift_logging_kibana_nodeselector: {} +openshift_logging_kibana_ops_nodeselector: {} #The absolute path on the control node to the cert file to use #for the public facing kibana certs @@ -49,7 +51,7 @@ openshift_logging_kibana_key: "" #for the public facing kibana certs openshift_logging_kibana_ca: "" -openshift_logging_kibana_ops_hostname: "{{ openshift_hosted_logging_ops_hostname | default('kibana-ops.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}" +openshift_logging_kibana_ops_hostname: "{{ 'kibana-ops.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}" openshift_logging_kibana_ops_cpu_limit: null openshift_logging_kibana_ops_memory_limit: 736Mi openshift_logging_kibana_ops_proxy_debug: false @@ -69,12 +71,12 @@ openshift_logging_kibana_ops_key: "" #for the public facing ops kibana certs openshift_logging_kibana_ops_ca: "" -openshift_logging_fluentd_nodeselector: "{{ openshift_hosted_logging_fluentd_nodeselector_label | default('logging-infra-fluentd=true') | map_from_pairs }}" +openshift_logging_fluentd_nodeselector: {'logging-infra-fluentd': 'true'} openshift_logging_fluentd_cpu_limit: 100m openshift_logging_fluentd_memory_limit: 512Mi openshift_logging_fluentd_es_copy: false -openshift_logging_fluentd_journal_source: "{{ openshift_hosted_logging_journal_source | default('') }}" -openshift_logging_fluentd_journal_read_from_head: "{{ openshift_hosted_logging_journal_read_from_head | default('') }}" +openshift_logging_fluentd_journal_source: "" +openshift_logging_fluentd_journal_read_from_head: "" openshift_logging_fluentd_hosts: ['--all'] openshift_logging_fluentd_buffer_queue_limit: 1024 openshift_logging_fluentd_buffer_size_limit: 1m @@ -84,18 +86,18 @@ openshift_logging_es_port: 9200 openshift_logging_es_ca: /etc/fluent/keys/ca openshift_logging_es_client_cert: /etc/fluent/keys/cert openshift_logging_es_client_key: /etc/fluent/keys/key -openshift_logging_es_cluster_size: "{{ openshift_hosted_logging_elasticsearch_cluster_size | default(1) }}" +openshift_logging_es_cluster_size: 1 openshift_logging_es_cpu_limit: 1000m # the logging appenders for the root loggers to write ES logs. Valid values: 'file', 'console' openshift_logging_es_log_appenders: ['file'] -openshift_logging_es_memory_limit: "{{ openshift_hosted_logging_elasticsearch_instance_ram | default('8Gi') }}" -openshift_logging_es_pv_selector: "{{ openshift_hosted_logging_storage_labels | default('') }}" -openshift_logging_es_pvc_dynamic: "{{ openshift_hosted_logging_elasticsearch_pvc_dynamic | default(False) }}" -openshift_logging_es_pvc_size: "{{ openshift_hosted_logging_elasticsearch_pvc_size | default('') }}" -openshift_logging_es_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_pvc_prefix | default('logging-es') }}" +openshift_logging_es_memory_limit: "8Gi" +openshift_logging_es_pv_selector: "{{ openshift_logging_storage_labels | default('') }}" +openshift_logging_es_pvc_dynamic: "{{ openshift_logging_elasticsearch_pvc_dynamic | default(False) }}" +openshift_logging_es_pvc_size: "{{ openshift_logging_elasticsearch_pvc_size | default('') }}" +openshift_logging_es_pvc_prefix: "{{ openshift_logging_elasticsearch_pvc_prefix | default('logging-es') }}" openshift_logging_es_recover_after_time: 5m -openshift_logging_es_storage_group: "{{ openshift_hosted_logging_elasticsearch_storage_group | default('65534') }}" -openshift_logging_es_nodeselector: "{{ openshift_hosted_logging_elasticsearch_nodeselector | default('') | map_from_pairs }}" +openshift_logging_es_storage_group: "{{ openshift_logging_elasticsearch_storage_group | default('65534') }}" +openshift_logging_es_nodeselector: {} # openshift_logging_es_config is a hash to be merged into the defaults for the elasticsearch.yaml openshift_logging_es_config: {} openshift_logging_es_number_of_shards: 1 @@ -125,16 +127,16 @@ openshift_logging_es_ops_port: 9200 openshift_logging_es_ops_ca: /etc/fluent/keys/ca openshift_logging_es_ops_client_cert: /etc/fluent/keys/cert openshift_logging_es_ops_client_key: /etc/fluent/keys/key -openshift_logging_es_ops_cluster_size: "{{ openshift_hosted_logging_elasticsearch_ops_cluster_size | default(1) }}" +openshift_logging_es_ops_cluster_size: "{{ openshift_logging_elasticsearch_ops_cluster_size | default(1) }}" openshift_logging_es_ops_cpu_limit: 1000m -openshift_logging_es_ops_memory_limit: "{{ openshift_hosted_logging_elasticsearch_ops_instance_ram | default('8Gi') }}" -openshift_logging_es_ops_pv_selector: "{{ openshift_hosted_loggingops_storage_labels | default('') }}" -openshift_logging_es_ops_pvc_dynamic: "{{ openshift_hosted_logging_elasticsearch_ops_pvc_dynamic | default(False) }}" -openshift_logging_es_ops_pvc_size: "{{ openshift_hosted_logging_elasticsearch_ops_pvc_size | default('') }}" -openshift_logging_es_ops_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_ops_pvc_prefix | default('logging-es-ops') }}" +openshift_logging_es_ops_memory_limit: "8Gi" +openshift_logging_es_ops_pv_selector: "{{ openshift_loggingops_storage_labels | default('') }}" +openshift_logging_es_ops_pvc_dynamic: "{{ openshift_logging_elasticsearch_ops_pvc_dynamic | default(False) }}" +openshift_logging_es_ops_pvc_size: "{{ openshift_logging_elasticsearch_ops_pvc_size | default('') }}" +openshift_logging_es_ops_pvc_prefix: "{{ openshift_logging_elasticsearch_ops_pvc_prefix | default('logging-es-ops') }}" openshift_logging_es_ops_recover_after_time: 5m -openshift_logging_es_ops_storage_group: "{{ openshift_hosted_logging_elasticsearch_storage_group | default('65534') }}" -openshift_logging_es_ops_nodeselector: "{{ openshift_hosted_logging_elasticsearch_ops_nodeselector | default('') | map_from_pairs }}" +openshift_logging_es_ops_storage_group: "{{ openshift_logging_elasticsearch_storage_group | default('65534') }}" +openshift_logging_es_ops_nodeselector: {} # for exposing es-ops to external (outside of the cluster) clients openshift_logging_es_ops_allow_external: False @@ -153,7 +155,7 @@ openshift_logging_es_ops_key: "" openshift_logging_es_ops_ca_ext: "" # storage related defaults -openshift_logging_storage_access_modes: "{{ openshift_hosted_logging_storage_access_modes | default(['ReadWriteOnce']) }}" +openshift_logging_storage_access_modes: ['ReadWriteOnce'] # mux - secure_forward listener service openshift_logging_mux_allow_external: False diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml index a77df9986..de5e25061 100644 --- a/roles/openshift_logging/tasks/install_logging.yaml +++ b/roles/openshift_logging/tasks/install_logging.yaml @@ -134,6 +134,7 @@ openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}" openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}" openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}" + openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_ops_nodeselector }}" openshift_logging_es_key: "{{ openshift_logging_es_ops_key }}" openshift_logging_es_cert: "{{ openshift_logging_es_ops_cert }}" openshift_logging_es_ca_ext: "{{ openshift_logging_es_ops_ca_ext }}" @@ -165,6 +166,7 @@ openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}" openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}" openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}" + openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_ops_nodeselector }}" openshift_logging_es_key: "{{ openshift_logging_es_ops_key }}" openshift_logging_es_cert: "{{ openshift_logging_es_ops_cert }}" openshift_logging_es_ca_ext: "{{ openshift_logging_es_ops_ca_ext }}" diff --git a/roles/openshift_logging/tasks/main.yaml b/roles/openshift_logging/tasks/main.yaml index f475024dd..0da9771c7 100644 --- a/roles/openshift_logging/tasks/main.yaml +++ b/roles/openshift_logging/tasks/main.yaml @@ -30,12 +30,13 @@ check_mode: no become: no -- include: "{{ role_path }}/tasks/install_logging.yaml" - when: openshift_logging_install_logging | default(false) | bool +- include: install_logging.yaml + when: + - openshift_logging_install_logging | default(false) | bool -- include: "{{ role_path }}/tasks/delete_logging.yaml" +- include: delete_logging.yaml when: - - not openshift_logging_install_logging | default(false) | bool + - openshift_logging_uninstall_logging | default(false) | bool - name: Cleaning up local temp dir local_action: file path="{{local_tmp.stdout}}" state=absent diff --git a/roles/openshift_manageiq/vars/main.yml b/roles/openshift_manageiq/vars/main.yml index 7ccc2fc3b..f142f89f0 100644 --- a/roles/openshift_manageiq/vars/main.yml +++ b/roles/openshift_manageiq/vars/main.yml @@ -3,6 +3,9 @@ manage_iq_tasks: - resource_kind: role resource_name: admin user: management-admin +- resource_kind: role + resource_name: admin + user: system:serviceaccount:management-infra:management-admin - resource_kind: cluster-role resource_name: management-infra-admin user: system:serviceaccount:management-infra:management-admin diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml index 71bb09a76..73e935d3f 100644 --- a/roles/openshift_master/defaults/main.yml +++ b/roles/openshift_master/defaults/main.yml @@ -20,11 +20,11 @@ r_openshift_master_os_firewall_allow: port: 4001/tcp cond: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" -oreg_url: '' -oreg_host: "{{ oreg_url.split('/')[0] if '.' in oreg_url.split('/')[0] else '' }}" +# oreg_url is defined by user input +oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}" oreg_auth_credentials_path: "{{ r_openshift_master_data_dir }}/.docker" oreg_auth_credentials_replace: False - +l_bind_docker_reg_auth: False # NOTE # r_openshift_master_*_default may be defined external to this role. diff --git a/roles/openshift_master/meta/main.yml b/roles/openshift_master/meta/main.yml index b0237141b..a657668a9 100644 --- a/roles/openshift_master/meta/main.yml +++ b/roles/openshift_master/meta/main.yml @@ -14,19 +14,3 @@ galaxy_info: dependencies: - role: lib_openshift - role: lib_os_firewall -- role: openshift_master_facts -- role: openshift_hosted_facts -- role: openshift_master_certificates -- role: openshift_etcd_client_certificates - etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}" - etcd_cert_config_dir: "{{ openshift.common.config_base }}/master" - etcd_cert_prefix: "master.etcd-" - when: groups.oo_etcd_to_config | default([]) | length != 0 -- role: openshift_clock -- role: openshift_cloud_provider -- role: openshift_builddefaults -- role: openshift_buildoverrides -- role: nickhammond.logrotate -- role: contiv - contiv_role: netmaster - when: openshift_use_contiv | default(False) | bool diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index 121261e94..94b7df1fc 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -177,9 +177,33 @@ local_facts: no_proxy_etcd_host_ips: "{{ openshift_no_proxy_etcd_host_ips }}" +- include: registry_auth.yml + - name: Install the systemd units include: systemd_units.yml +- name: Checking for journald.conf + stat: path=/etc/systemd/journald.conf + register: journald_conf_file + +- name: Update journald setup + replace: + dest: /etc/systemd/journald.conf + regexp: '^(\#| )?{{ item.var }}=\s*.*?$' + replace: ' {{ item.var }}={{ item.val }}' + backup: yes + with_items: "{{ journald_vars_to_replace | default([]) }}" + when: journald_conf_file.stat.exists + register: journald_update + +# I need to restart journald immediatelly, otherwise it gets into way during +# further steps in ansible +- name: Restart journald + systemd: + name: systemd-journald + state: restarted + when: journald_update | changed + - name: Install Master system container include: system_container.yml when: @@ -200,7 +224,7 @@ - restart master api - set_fact: - translated_identity_providers: "{{ openshift.master.identity_providers | translate_idps('v1', openshift.common.version, openshift.common.deployment_type) }}" + translated_identity_providers: "{{ openshift.master.identity_providers | translate_idps('v1') }}" # TODO: add the validate parameter when there is a validation command to run - name: Create master config @@ -229,22 +253,6 @@ - restart master controllers when: openshift_master_bootstrap_enabled | default(False) -- name: Check for credentials file for registry auth - stat: - path: "{{oreg_auth_credentials_path }}" - when: - - oreg_auth_user is defined - register: master_oreg_auth_credentials_stat - -- name: Create credentials for registry auth - command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}" - when: - - oreg_auth_user is defined - - (not master_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool - notify: - - restart master api - - restart master controllers - - include: set_loopback_context.yml when: - openshift.common.version_gte_3_2_or_1_2 diff --git a/roles/openshift_master/tasks/registry_auth.yml b/roles/openshift_master/tasks/registry_auth.yml new file mode 100644 index 000000000..96b6c614e --- /dev/null +++ b/roles/openshift_master/tasks/registry_auth.yml @@ -0,0 +1,27 @@ +--- +- name: Check for credentials file for registry auth + stat: + path: "{{ oreg_auth_credentials_path }}" + when: oreg_auth_user is defined + register: master_oreg_auth_credentials_stat + +# Container images may need the registry credentials +- name: Setup ro mount of /root/.docker for containerized hosts + set_fact: + l_bind_docker_reg_auth: True + when: + - openshift.common.is_containerized | bool + - oreg_auth_user is defined + - (not master_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool + notify: + - restart master api + - restart master controllers + +- name: Create credentials for registry auth + command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}" + when: + - oreg_auth_user is defined + - (not master_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool + notify: + - restart master api + - restart master controllers diff --git a/roles/openshift_master/tasks/update_etcd_client_urls.yml b/roles/openshift_master/tasks/update_etcd_client_urls.yml new file mode 100644 index 000000000..1ab105808 --- /dev/null +++ b/roles/openshift_master/tasks/update_etcd_client_urls.yml @@ -0,0 +1,8 @@ +--- +- yedit: + src: "{{ openshift.common.config_base }}/master/master-config.yaml" + key: 'etcdClientInfo.urls' + value: "{{ openshift.master.etcd_urls }}" + notify: + - restart master api + - restart master controllers diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 index f06448d71..a184a59f6 100644 --- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 +++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 @@ -12,7 +12,17 @@ Requires={{ openshift.docker.service_name }}.service EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-api Environment=GOTRACEBACK=crash ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type}}-master-api -ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-api --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-api -v {{ r_openshift_master_data_dir }}:{{ r_openshift_master_data_dir }} -v /var/log:/var/log -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/pki:/etc/pki:ro {{ openshift.master.master_image }}:${IMAGE_VERSION} start master api --config=${CONFIG_FILE} $OPTIONS +ExecStart=/usr/bin/docker run --rm --privileged --net=host \ + --name {{ openshift.common.service_type }}-master-api \ + --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-api \ + -v {{ r_openshift_master_data_dir }}:{{ r_openshift_master_data_dir }} \ + -v /var/log:/var/log -v /var/run/docker.sock:/var/run/docker.sock \ + -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} \ + {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} \ + -v /etc/pki:/etc/pki:ro \ + {% if l_bind_docker_reg_auth %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\ + {{ openshift.master.master_image }}:${IMAGE_VERSION} start master api \ + --config=${CONFIG_FILE} $OPTIONS ExecStartPost=/usr/bin/sleep 10 ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master-api LimitNOFILE=131072 diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 index b7f36491b..2ded05f53 100644 --- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 +++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 @@ -11,7 +11,17 @@ PartOf={{ openshift.docker.service_name }}.service EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers Environment=GOTRACEBACK=crash ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type}}-master-controllers -ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-controllers --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers -v {{ r_openshift_master_data_dir }}:{{ r_openshift_master_data_dir }} -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/pki:/etc/pki:ro {{ openshift.master.master_image }}:${IMAGE_VERSION} start master controllers --config=${CONFIG_FILE} $OPTIONS +ExecStart=/usr/bin/docker run --rm --privileged --net=host \ + --name {{ openshift.common.service_type }}-master-controllers \ + --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers \ + -v {{ r_openshift_master_data_dir }}:{{ r_openshift_master_data_dir }} \ + -v /var/run/docker.sock:/var/run/docker.sock \ + -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} \ + {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} \ + -v /etc/pki:/etc/pki:ro \ + {% if l_bind_docker_reg_auth %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\ + {{ openshift.master.master_image }}:${IMAGE_VERSION} start master controllers \ + --config=${CONFIG_FILE} $OPTIONS ExecStartPost=/usr/bin/sleep 10 ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master-controllers LimitNOFILE=131072 diff --git a/roles/openshift_master/vars/main.yml b/roles/openshift_master/vars/main.yml index cf39b73f6..0c681c764 100644 --- a/roles/openshift_master/vars/main.yml +++ b/roles/openshift_master/vars/main.yml @@ -20,3 +20,22 @@ openshift_master_valid_grant_methods: - deny openshift_master_is_scaleup_host: False + +# These defaults assume forcing journald persistence, fsync to disk once +# a second, rate-limiting to 10,000 logs a second, no forwarding to +# syslog or wall, using 8GB of disk space maximum, using 10MB journal +# files, keeping only a days worth of logs per journal file, and +# retaining journal files no longer than a month. +journald_vars_to_replace: +- { var: Storage, val: persistent } +- { var: Compress, val: yes } +- { var: SyncIntervalSec, val: 1s } +- { var: RateLimitInterval, val: 1s } +- { var: RateLimitBurst, val: 10000 } +- { var: SystemMaxUse, val: 8G } +- { var: SystemKeepFree, val: 20% } +- { var: SystemMaxFileSize, val: 10M } +- { var: MaxRetentionSec, val: 1month } +- { var: MaxFileSec, val: 1day } +- { var: ForwardToSyslog, val: no } +- { var: ForwardToWall, val: no } diff --git a/roles/openshift_master_facts/filter_plugins/openshift_master.py b/roles/openshift_master_facts/filter_plugins/openshift_master.py index e767772ce..f7f3ac2b1 100644 --- a/roles/openshift_master_facts/filter_plugins/openshift_master.py +++ b/roles/openshift_master_facts/filter_plugins/openshift_master.py @@ -6,10 +6,6 @@ Custom filters for use in openshift-master import copy import sys -# pylint import-error disabled because pylint cannot find the package -# when installed in a virtualenv -from distutils.version import LooseVersion # pylint: disable=no-name-in-module,import-error - from ansible import errors from ansible.parsing.yaml.dumper import AnsibleDumper from ansible.plugins.filter.core import to_bool as ansible_bool @@ -82,23 +78,8 @@ class IdentityProviderBase(object): self._allow_additional = True @staticmethod - def validate_idp_list(idp_list, openshift_version, deployment_type): + def validate_idp_list(idp_list): ''' validates a list of idps ''' - login_providers = [x.name for x in idp_list if x.login] - - multiple_logins_unsupported = False - if len(login_providers) > 1: - if deployment_type in ['enterprise', 'online', 'atomic-enterprise', 'openshift-enterprise']: - if LooseVersion(openshift_version) < LooseVersion('3.2'): - multiple_logins_unsupported = True - if deployment_type in ['origin']: - if LooseVersion(openshift_version) < LooseVersion('1.2'): - multiple_logins_unsupported = True - if multiple_logins_unsupported: - raise errors.AnsibleFilterError("|failed multiple providers are " - "not allowed for login. login " - "providers: {0}".format(', '.join(login_providers))) - names = [x.name for x in idp_list] if len(set(names)) != len(names): raise errors.AnsibleFilterError("|failed more than one provider configured with the same name") @@ -380,11 +361,6 @@ class OpenIDIdentityProvider(IdentityProviderOauthBase): if 'extra_authorize_parameters' in self._idp: self._idp['extraAuthorizeParameters'] = self._idp.pop('extra_authorize_parameters') - if 'extraAuthorizeParameters' in self._idp: - if 'include_granted_scopes' in self._idp['extraAuthorizeParameters']: - val = ansible_bool(self._idp['extraAuthorizeParameters'].pop('include_granted_scopes')) - self._idp['extraAuthorizeParameters']['include_granted_scopes'] = val - def validate(self): ''' validate this idp instance ''' IdentityProviderOauthBase.validate(self) @@ -476,7 +452,7 @@ class FilterModule(object): ''' Custom ansible filters for use by the openshift_master role''' @staticmethod - def translate_idps(idps, api_version, openshift_version, deployment_type): + def translate_idps(idps, api_version): ''' Translates a list of dictionaries into a valid identityProviders config ''' idp_list = [] @@ -492,7 +468,7 @@ class FilterModule(object): idp_inst.set_provider_items() idp_list.append(idp_inst) - IdentityProviderBase.validate_idp_list(idp_list, openshift_version, deployment_type) + IdentityProviderBase.validate_idp_list(idp_list) return u(yaml.dump([idp.to_dict() for idp in idp_list], allow_unicode=True, default_flow_style=False, diff --git a/roles/openshift_metrics/README.md b/roles/openshift_metrics/README.md index 1f10de4a2..ed698daca 100644 --- a/roles/openshift_metrics/README.md +++ b/roles/openshift_metrics/README.md @@ -39,6 +39,8 @@ For default values, see [`defaults/main.yaml`](defaults/main.yaml). - `openshift_metrics_hawkular_replicas:` The number of replicas for Hawkular metrics. +- `openshift_metrics_hawkular_route_annotations`: Dictionary with annotations for the Hawkular route. + - `openshift_metrics_cassandra_replicas`: The number of Cassandra nodes to deploy for the initial cluster. diff --git a/roles/openshift_metrics/defaults/main.yaml b/roles/openshift_metrics/defaults/main.yaml index d9a17ae7f..ed0182ba8 100644 --- a/roles/openshift_metrics/defaults/main.yaml +++ b/roles/openshift_metrics/defaults/main.yaml @@ -1,6 +1,7 @@ --- openshift_metrics_start_cluster: True -openshift_metrics_install_metrics: True +openshift_metrics_install_metrics: False +openshift_metrics_uninstall_metrics: False openshift_metrics_startup_timeout: 500 openshift_metrics_hawkular_replicas: 1 @@ -12,11 +13,12 @@ openshift_metrics_hawkular_cert: "" openshift_metrics_hawkular_key: "" openshift_metrics_hawkular_ca: "" openshift_metrics_hawkular_nodeselector: "" +openshift_metrics_hawkular_route_annotations: {} openshift_metrics_cassandra_replicas: 1 -openshift_metrics_cassandra_storage_type: "{{ openshift_hosted_metrics_storage_kind | default('emptydir') }}" -openshift_metrics_cassandra_pvc_size: "{{ openshift_hosted_metrics_storage_volume_size | default('10Gi') }}" -openshift_metrics_cassandra_pv_selector: "{{ openshift_hosted_metrics_storage_labels | default('') }}" +openshift_metrics_cassandra_storage_type: "{{ openshift_metrics_storage_kind | default('emptydir') }}" +openshift_metrics_cassandra_pvc_size: "{{ openshift_metrics_storage_volume_size | default('10Gi') }}" +openshift_metrics_cassandra_pv_selector: "{{ openshift_metrics_storage_labels | default('') }}" openshift_metrics_cassandra_limits_memory: 2G openshift_metrics_cassandra_limits_cpu: null openshift_metrics_cassandra_requests_memory: 1G @@ -53,8 +55,8 @@ openshift_metrics_master_url: https://kubernetes.default.svc openshift_metrics_node_id: nodename openshift_metrics_project: openshift-infra -openshift_metrics_cassandra_pvc_prefix: "{{ openshift_hosted_metrics_storage_volume_name | default('metrics-cassandra') }}" -openshift_metrics_cassandra_pvc_access: "{{ openshift_hosted_metrics_storage_access_modes | default(['ReadWriteOnce']) }}" +openshift_metrics_cassandra_pvc_prefix: "{{ openshift_metrics_storage_volume_name | default('metrics-cassandra') }}" +openshift_metrics_cassandra_pvc_access: "{{ openshift_metrics_storage_access_modes | default(['ReadWriteOnce']) }}" openshift_metrics_hawkular_user_write_access: False diff --git a/roles/openshift_metrics/tasks/install_hawkular.yaml b/roles/openshift_metrics/tasks/install_hawkular.yaml index 6b37f85ab..b63f5ca8c 100644 --- a/roles/openshift_metrics/tasks/install_hawkular.yaml +++ b/roles/openshift_metrics/tasks/install_hawkular.yaml @@ -40,6 +40,7 @@ dest: "{{ mktemp.stdout }}/templates/hawkular-metrics-route.yaml" vars: name: hawkular-metrics + annotations: "{{ openshift_metrics_hawkular_route_annotations }}" labels: metrics-infra: hawkular-metrics host: "{{ openshift_metrics_hawkular_hostname }}" diff --git a/roles/openshift_metrics/tasks/main.yaml b/roles/openshift_metrics/tasks/main.yaml index eaabdd20f..0461039fc 100644 --- a/roles/openshift_metrics/tasks/main.yaml +++ b/roles/openshift_metrics/tasks/main.yaml @@ -43,7 +43,13 @@ check_mode: no tags: metrics_init -- include: "{{ (openshift_metrics_install_metrics | bool) | ternary('install_metrics.yaml','uninstall_metrics.yaml') }}" +- include: install_metrics.yaml + when: + - openshift_metrics_install_metrics | default(false) | bool + +- include: uninstall_metrics.yaml + when: + - openshift_metrics_uninstall_metrics | default(false) | bool - include: uninstall_hosa.yaml when: not openshift_metrics_install_hawkular_agent | bool diff --git a/roles/openshift_metrics/tasks/pre_install.yaml b/roles/openshift_metrics/tasks/pre_install.yaml index 2e2013d40..d6756f9b9 100644 --- a/roles/openshift_metrics/tasks/pre_install.yaml +++ b/roles/openshift_metrics/tasks/pre_install.yaml @@ -10,7 +10,7 @@ is invalid, must be one of: emptydir, pv, dynamic when: - openshift_metrics_cassandra_storage_type not in openshift_metrics_cassandra_storage_types - - "not {{ openshift_metrics_heapster_standalone | bool }}" + - not (openshift_metrics_heapster_standalone | bool) - name: list existing secrets command: > diff --git a/roles/openshift_metrics/templates/route.j2 b/roles/openshift_metrics/templates/route.j2 index 423ab54a3..253d6ecf5 100644 --- a/roles/openshift_metrics/templates/route.j2 +++ b/roles/openshift_metrics/templates/route.j2 @@ -2,6 +2,9 @@ apiVersion: v1 kind: Route metadata: name: {{ name }} +{% if annotations is defined %} + annotations: {{ annotations | to_yaml }} +{% endif %} {% if labels is defined and labels %} labels: {% for k, v in labels.iteritems() %} diff --git a/roles/openshift_metrics/vars/default_images.yml b/roles/openshift_metrics/vars/default_images.yml index 678c4104c..8704ddfa0 100644 --- a/roles/openshift_metrics/vars/default_images.yml +++ b/roles/openshift_metrics/vars/default_images.yml @@ -1,3 +1,3 @@ --- -__openshift_metrics_image_prefix: "{{ openshift_hosted_metrics_deployer_prefix | default('docker.io/openshift/origin-') }}" -__openshift_metrics_image_version: "{{ openshift_hosted_metrics_deployer_version | default('latest') }}" +__openshift_metrics_image_prefix: "docker.io/openshift/origin-" +__openshift_metrics_image_version: "latest" diff --git a/roles/openshift_metrics/vars/openshift-enterprise.yml b/roles/openshift_metrics/vars/openshift-enterprise.yml index f0bdac7d2..68cdf06fe 100644 --- a/roles/openshift_metrics/vars/openshift-enterprise.yml +++ b/roles/openshift_metrics/vars/openshift-enterprise.yml @@ -1,3 +1,3 @@ --- -__openshift_metrics_image_prefix: "{{ openshift_hosted_metrics_deployer_prefix | default('registry.access.redhat.com/openshift3/') }}" -__openshift_metrics_image_version: "{{ openshift_hosted_metrics_deployer_version | default ('v3.6') }}" +__openshift_metrics_image_prefix: "registry.access.redhat.com/openshift3/" +__openshift_metrics_image_version: "v3.6" diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml index f1e64f3aa..ed3516d04 100644 --- a/roles/openshift_node/defaults/main.yml +++ b/roles/openshift_node/defaults/main.yml @@ -60,7 +60,7 @@ openshift_deployment_type: origin openshift_node_bootstrap: False r_openshift_node_os_firewall_deny: [] -r_openshift_node_os_firewall_allow: +default_r_openshift_node_os_firewall_allow: - service: Kubernetes kubelet port: 10250/tcp - service: http @@ -79,12 +79,14 @@ r_openshift_node_os_firewall_allow: - service: Kubernetes service NodePort UDP port: "{{ openshift_node_port_range | default('') }}/udp" cond: "{{ openshift_node_port_range is defined }}" +# Allow multiple port ranges to be added to the role +r_openshift_node_os_firewall_allow: "{{ default_r_openshift_node_os_firewall_allow | union(openshift_node_open_ports | default([])) }}" -oreg_url: '' -oreg_host: "{{ oreg_url.split('/')[0] if '.' in oreg_url.split('/')[0] else '' }}" +# oreg_url is defined by user input +oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}" oreg_auth_credentials_path: "{{ openshift_node_data_dir }}/.docker" oreg_auth_credentials_replace: False - +l_bind_docker_reg_auth: False # NOTE # r_openshift_node_*_default may be defined external to this role. diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml index 855b0a8d8..25a6fc721 100644 --- a/roles/openshift_node/handlers/main.yml +++ b/roles/openshift_node/handlers/main.yml @@ -29,8 +29,5 @@ - not (node_service_status_changed | default(false) | bool) - not openshift_node_bootstrap -- name: reload sysctl.conf - command: /sbin/sysctl -p - - name: reload systemd units command: systemctl daemon-reload diff --git a/roles/openshift_node/tasks/config.yml b/roles/openshift_node/tasks/config.yml index 7af3f54b5..2759188f3 100644 --- a/roles/openshift_node/tasks/config.yml +++ b/roles/openshift_node/tasks/config.yml @@ -2,18 +2,6 @@ - name: Install the systemd units include: systemd_units.yml -- name: Check for tuned package - command: rpm -q tuned - args: - warn: no - register: tuned_installed - changed_when: false - failed_when: false - -- name: Set atomic-guest tuned profile - command: "tuned-adm profile atomic-guest" - when: tuned_installed.rc == 0 and openshift.common.is_atomic | bool - - name: Start and enable openvswitch service systemd: name: openvswitch.service @@ -107,5 +95,9 @@ msg: Node failed to start please inspect the logs and try again when: node_start_result | failed +- name: Setup tuned + include: tuned.yml + static: yes + - set_fact: node_service_status_changed: "{{ node_start_result | changed }}" diff --git a/roles/openshift_node/tasks/install.yml b/roles/openshift_node/tasks/install.yml index 02b8ee67c..265bf2c46 100644 --- a/roles/openshift_node/tasks/install.yml +++ b/roles/openshift_node/tasks/install.yml @@ -1,11 +1,9 @@ --- -# We have to add tuned-profiles in the same transaction otherwise we run into depsolving -# problems because the rpms don't pin the version properly. This was fixed in 3.1 packaging. - when: not openshift.common.is_containerized | bool block: - name: Install Node package package: - name: "{{ openshift.common.service_type }}-node{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }},tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}" + name: "{{ openshift.common.service_type }}-node{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}" state: present - name: Install sdn-ovs package diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index 22ff6dfd2..e82fb42b8 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -2,7 +2,8 @@ - fail: msg: "SELinux is disabled, This deployment type requires that SELinux is enabled." when: - - (not ansible_selinux or ansible_selinux.status != 'enabled') and deployment_type in ['enterprise', 'online', 'atomic-enterprise', 'openshift-enterprise'] + - (not ansible_selinux or ansible_selinux.status != 'enabled') + - deployment_type == 'openshift-enterprise' - not openshift_use_crio | default(false) - name: setup firewall @@ -59,38 +60,22 @@ # The atomic-openshift-node service will set this parameter on # startup, but if the network service is restarted this setting is # lost. Reference: https://bugzilla.redhat.com/show_bug.cgi?id=1372388 -# -# Use lineinfile w/ a handler for this task until -# https://github.com/ansible/ansible/pull/24277 is included in an -# ansible release and we can use the sysctl module. -- name: Persist net.ipv4.ip_forward sysctl entry - lineinfile: dest=/etc/sysctl.conf regexp='^net.ipv4.ip_forward' line='net.ipv4.ip_forward=1' - notify: - - reload sysctl.conf +- sysctl: + name: net.ipv4.ip_forward + value: 1 + sysctl_file: "/etc/sysctl.d/99-openshift.conf" + reload: yes - name: include bootstrap node config include: bootstrap.yml when: openshift_node_bootstrap +- include: registry_auth.yml + - name: include standard node config include: config.yml when: not openshift_node_bootstrap -- name: Check for credentials file for registry auth - stat: - path: "{{oreg_auth_credentials_path }}" - when: - - oreg_auth_user is defined - register: node_oreg_auth_credentials_stat - -- name: Create credentials for registry auth - command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}" - when: - - oreg_auth_user is defined - - (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool - notify: - - restart node - - name: Configure AWS Cloud Provider Settings lineinfile: dest: /etc/sysconfig/{{ openshift.common.service_type }}-node diff --git a/roles/openshift_node/tasks/node_system_container.yml b/roles/openshift_node/tasks/node_system_container.yml index b2dceedbe..0ca44c292 100644 --- a/roles/openshift_node/tasks/node_system_container.yml +++ b/roles/openshift_node/tasks/node_system_container.yml @@ -9,4 +9,6 @@ oc_atomic_container: name: "{{ openshift.common.service_type }}-node" image: "{{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.node.node_system_image }}:{{ openshift_image_tag }}" + values: + - "DNS_DOMAIN={{ openshift.common.dns_domain }}" state: latest diff --git a/roles/openshift_node/tasks/registry_auth.yml b/roles/openshift_node/tasks/registry_auth.yml new file mode 100644 index 000000000..f370bb260 --- /dev/null +++ b/roles/openshift_node/tasks/registry_auth.yml @@ -0,0 +1,25 @@ +--- +- name: Check for credentials file for registry auth + stat: + path: "{{ oreg_auth_credentials_path }}" + when: oreg_auth_user is defined + register: node_oreg_auth_credentials_stat + +# Container images may need the registry credentials +- name: Setup ro mount of /root/.docker for containerized hosts + set_fact: + l_bind_docker_reg_auth: True + when: + - openshift.common.is_containerized | bool + - oreg_auth_user is defined + - (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool + notify: + - restart node + +- name: Create credentials for registry auth + command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}" + when: + - oreg_auth_user is defined + - (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool + notify: + - restart node diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service index 57094f28e..4ab10b95f 100644 --- a/roles/openshift_node/templates/openshift.docker.node.service +++ b/roles/openshift_node/templates/openshift.docker.node.service @@ -21,7 +21,22 @@ EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node-dep ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-node ExecStartPre=/usr/bin/cp /etc/origin/node/node-dnsmasq.conf /etc/dnsmasq.d/ ExecStartPre=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:/in-addr.arpa/127.0.0.1,/{{ openshift.common.dns_domain }}/127.0.0.1 -ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift_node_data_dir }}:{{ openshift_node_data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS -v /etc/pki:/etc/pki:ro {{ openshift.node.node_image }}:${IMAGE_VERSION} +ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node \ + --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node \ + -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} \ + -e HOST=/rootfs -e HOST_ETC=/host-etc \ + -v {{ openshift_node_data_dir }}:{{ openshift_node_data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} \ + -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node \ + {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} \ + -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro \ + -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw \ + -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker \ + -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch \ + -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni \ + -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log \ + -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS -v /etc/pki:/etc/pki:ro \ + {% if l_bind_docker_reg_auth %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\ + {{ openshift.node.node_image }}:${IMAGE_VERSION} ExecStartPost=/usr/bin/sleep 10 ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node ExecStopPost=/usr/bin/rm /etc/dnsmasq.d/node-dnsmasq.conf diff --git a/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh b/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh index 61d2a5b51..df02bcf0e 100755 --- a/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh +++ b/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh @@ -114,6 +114,8 @@ EOF echo "nameserver "${def_route_ip}"" >> ${NEW_RESOLV_CONF} if ! grep -q 'search.*cluster.local' ${NEW_RESOLV_CONF}; then sed -i '/^search/ s/$/ cluster.local/' ${NEW_RESOLV_CONF} + elif ! grep -qw search ${NEW_RESOLV_CONF}; then + echo 'search cluster.local' >> ${NEW_RESOLV_CONF} fi cp -Z ${NEW_RESOLV_CONF} /etc/resolv.conf fi diff --git a/roles/openshift_persistent_volumes/meta/main.yml b/roles/openshift_persistent_volumes/meta/main.yml index 8d3d010e4..19e9a56b7 100644 --- a/roles/openshift_persistent_volumes/meta/main.yml +++ b/roles/openshift_persistent_volumes/meta/main.yml @@ -9,5 +9,4 @@ galaxy_info: - name: EL versions: - 7 -dependencies: -- role: openshift_hosted_facts +dependencies: {} diff --git a/roles/openshift_prometheus/defaults/main.yaml b/roles/openshift_prometheus/defaults/main.yaml index 18d6a1645..5aa8aecec 100644 --- a/roles/openshift_prometheus/defaults/main.yaml +++ b/roles/openshift_prometheus/defaults/main.yaml @@ -11,7 +11,7 @@ openshift_prometheus_node_selector: {"region":"infra"} openshift_prometheus_image_proxy: "openshift/oauth-proxy:v1.0.0" openshift_prometheus_image_prometheus: "openshift/prometheus:v2.0.0-dev" openshift_prometheus_image_alertmanager: "openshift/prometheus-alertmanager:dev" -openshift_prometheus_image_alertbuffer: "ilackarms/message-buffer" +openshift_prometheus_image_alertbuffer: "openshift/prometheus-alert-buffer:v0.0.1" # additional prometheus rules file openshift_prometheus_additional_rules_file: null diff --git a/roles/openshift_prometheus/tasks/install_prometheus.yaml b/roles/openshift_prometheus/tasks/install_prometheus.yaml index 93bdda3e8..a9bce2fb1 100644 --- a/roles/openshift_prometheus/tasks/install_prometheus.yaml +++ b/roles/openshift_prometheus/tasks/install_prometheus.yaml @@ -107,7 +107,10 @@ - name: annotate prometheus service command: > {{ openshift.common.client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }} - service prometheus 'service.alpha.openshift.io/serving-cert-secret-name=prometheus-tls' + service prometheus + prometheus.io/scrape='true' + prometheus.io/scheme=https + service.alpha.openshift.io/serving-cert-secret-name=prometheus-tls - name: annotate alerts service command: > diff --git a/roles/openshift_repos/README.md b/roles/openshift_repos/README.md index abd1997dd..ce3b51454 100644 --- a/roles/openshift_repos/README.md +++ b/roles/openshift_repos/README.md @@ -1,4 +1,4 @@ -OpenShift Repos +OpenShift Repos ================ Configures repositories for an OpenShift installation @@ -12,10 +12,10 @@ rhel-7-server-extra-rpms, and rhel-7-server-ose-3.0-rpms repos. Role Variables -------------- -| Name | Default value | | -|-------------------------------|---------------|------------------------------------| -| openshift_deployment_type | None | Possible values enterprise, origin | -| openshift_additional_repos | {} | TODO | +| Name | Default value | | +|-------------------------------|---------------|----------------------------------------------| +| openshift_deployment_type | None | Possible values openshift-enterprise, origin | +| openshift_additional_repos | {} | TODO | Dependencies ------------ diff --git a/roles/openshift_sanitize_inventory/filter_plugins/openshift_logging.py b/roles/openshift_sanitize_inventory/filter_plugins/openshift_logging.py new file mode 100644 index 000000000..d42c9bdb9 --- /dev/null +++ b/roles/openshift_sanitize_inventory/filter_plugins/openshift_logging.py @@ -0,0 +1,25 @@ +''' + Openshift Logging class that provides useful filters used in Logging. + + This should be removed after map_from_pairs is no longer used in __deprecations_logging.yml +''' + + +def map_from_pairs(source, delim="="): + ''' Returns a dict given the source and delim delimited ''' + if source == '': + return dict() + + return dict(item.split(delim) for item in source.split(",")) + + +# pylint: disable=too-few-public-methods +class FilterModule(object): + ''' OpenShift Logging Filters ''' + + # pylint: disable=no-self-use, too-few-public-methods + def filters(self): + ''' Returns the names of the filters provided by this class ''' + return { + 'map_from_pairs': map_from_pairs + } diff --git a/roles/openshift_sanitize_inventory/library/conditional_set_fact.py b/roles/openshift_sanitize_inventory/library/conditional_set_fact.py new file mode 100644 index 000000000..f61801714 --- /dev/null +++ b/roles/openshift_sanitize_inventory/library/conditional_set_fact.py @@ -0,0 +1,68 @@ +#!/usr/bin/python + +""" Ansible module to help with setting facts conditionally based on other facts """ + +from ansible.module_utils.basic import AnsibleModule + + +DOCUMENTATION = ''' +--- +module: conditional_set_fact + +short_description: This will set a fact if the value is defined + +description: + - "To avoid constant set_fact & when conditions for each var we can use this" + +author: + - Eric Wolinetz ewolinet@redhat.com +''' + + +EXAMPLES = ''' +- name: Conditionally set fact + conditional_set_fact: + fact1: not_defined_variable + +- name: Conditionally set fact + conditional_set_fact: + fact1: not_defined_variable + fact2: defined_variable + +''' + + +def run_module(): + """ The body of the module, we check if the variable name specified as the value + for the key is defined. If it is then we use that value as for the original key """ + + module = AnsibleModule( + argument_spec=dict( + facts=dict(type='dict', required=True), + vars=dict(required=False, type='dict', default=[]) + ), + supports_check_mode=True + ) + + local_facts = dict() + is_changed = False + + for param in module.params['vars']: + other_var = module.params['vars'][param] + + if other_var in module.params['facts']: + local_facts[param] = module.params['facts'][other_var] + if not is_changed: + is_changed = True + + return module.exit_json(changed=is_changed, # noqa: F405 + ansible_facts=local_facts) + + +def main(): + """ main """ + run_module() + + +if __name__ == '__main__': + main() diff --git a/roles/openshift_sanitize_inventory/tasks/__deprecations_logging.yml b/roles/openshift_sanitize_inventory/tasks/__deprecations_logging.yml new file mode 100644 index 000000000..e52ab5f6d --- /dev/null +++ b/roles/openshift_sanitize_inventory/tasks/__deprecations_logging.yml @@ -0,0 +1,48 @@ +--- +# this is used to set the logging variables from deprecated values to the current variables names +# this file should be deleted once variables are no longer honored + +- conditional_set_fact: + facts: "{{ hostvars[inventory_hostname] }}" + vars: + logging_hostname: openshift_hosted_logging_hostname + logging_ops_hostname: openshift_hosted_logging_ops_hostname + logging_elasticsearch_cluster_size: openshift_hosted_logging_elasticsearch_cluster_size + logging_elasticsearch_ops_cluster_size: openshift_hosted_logging_elasticsearch_ops_cluster_size + openshift_logging_storage_kind: openshift_hosted_logging_storage_kind + openshift_logging_storage_host: openshift_hosted_logging_storage_host + openshift_logging_storage_labels: openshift_hosted_logging_storage_labels + openshift_logging_storage_volume_size: openshift_hosted_logging_storage_volume_size + openshift_loggingops_storage_kind: openshift_hosted_loggingops_storage_kind + openshift_loggingops_storage_host: openshift_hosted_loggingops_storage_host + openshift_loggingops_storage_labels: openshift_hosted_loggingops_storage_labels + openshift_loggingops_storage_volume_size: openshift_hosted_loggingops_storage_volume_size + openshift_logging_use_ops: openshift_hosted_logging_enable_ops_cluster + openshift_logging_image_pull_secret: openshift_hosted_logging_image_pull_secret + openshift_logging_kibana_hostname: openshift_hosted_logging_hostname + openshift_logging_kibana_ops_hostname: openshift_hosted_logging_ops_hostname + openshift_logging_fluentd_journal_source: openshift_hosted_logging_journal_source + openshift_logging_fluentd_journal_read_from_head: openshift_hosted_logging_journal_read_from_head + openshift_logging_es_memory_limit: openshift_hosted_logging_elasticsearch_instance_ram + openshift_logging_es_nodeselector: openshift_hosted_logging_elasticsearch_nodeselector + openshift_logging_es_ops_memory_limit: openshift_hosted_logging_elasticsearch_ops_instance_ram + openshift_logging_storage_access_modes: openshift_hosted_logging_storage_access_modes + openshift_logging_master_public_url: openshift_hosted_logging_master_public_url + openshift_logging_image_prefix: openshift_hosted_logging_deployer_prefix + openshift_logging_image_version: openshift_hosted_logging_deployer_version + openshift_logging_install_logging: openshift_hosted_logging_deploy + + +- set_fact: + openshift_logging_elasticsearch_pvc_dynamic: "{{ 'true' if openshift_logging_storage_kind | default(none) == 'dynamic' else '' }}" + openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_storage_volume_size if openshift_logging_storage_kind | default(none) in ['dynamic','nfs'] else '' }}" + openshift_logging_elasticsearch_pvc_prefix: "{{ 'logging-es' if openshift_logging_storage_kind | default(none) == 'dynamic' else '' }}" + openshift_logging_elasticsearch_ops_pvc_dynamic: "{{ 'true' if openshift_loggingops_storage_kind | default(none) == 'dynamic' else '' }}" + openshift_logging_elasticsearch_ops_pvc_size: "{{ openshift_loggingops_storage_volume_size if openshift_loggingops_storage_kind | default(none) in ['dynamic','nfs'] else '' }}" + openshift_logging_elasticsearch_ops_pvc_prefix: "{{ 'logging-es-ops' if openshift_loggingops_storage_kind | default(none) == 'dynamic' else '' }}" + openshift_logging_curator_nodeselector: "{{ openshift_hosted_logging_curator_nodeselector | default('') | map_from_pairs }}" + openshift_logging_curator_ops_nodeselector: "{{ openshift_hosted_logging_curator_ops_nodeselector | default('') | map_from_pairs }}" + openshift_logging_kibana_nodeselector: "{{ openshift_hosted_logging_kibana_nodeselector | default('') | map_from_pairs }}" + openshift_logging_kibana_ops_nodeselector: "{{ openshift_hosted_logging_kibana_ops_nodeselector | default('') | map_from_pairs }}" + openshift_logging_fluentd_nodeselector: "{{ openshift_hosted_logging_fluentd_nodeselector_label | default('logging-infra-fluentd=true') | map_from_pairs }}" + openshift_logging_es_ops_nodeselector: "{{ openshift_hosted_logging_elasticsearch_ops_nodeselector | default('') | map_from_pairs }}" diff --git a/roles/openshift_sanitize_inventory/tasks/__deprecations_metrics.yml b/roles/openshift_sanitize_inventory/tasks/__deprecations_metrics.yml new file mode 100644 index 000000000..279646981 --- /dev/null +++ b/roles/openshift_sanitize_inventory/tasks/__deprecations_metrics.yml @@ -0,0 +1,17 @@ +--- +# this is used to set the metrics variables from deprecated values to the current variables names +# this file should be deleted once variables are no longer honored + +- conditional_set_fact: + facts: "{{ hostvars[inventory_hostname] }}" + vars: + openshift_metrics_storage_access_modes: openshift_hosted_metrics_storage_access_modes + openshift_metrics_storage_host: openshift_hosted_metrics_storage_host + openshift_metrics_storage_nfs_directory: openshift_hosted_metrics_storage_nfs_directory + openshift_metrics_storage_volume_name: openshift_hosted_metrics_storage_volume_name + openshift_metrics_storage_volume_size: openshift_hosted_metrics_storage_volume_size + openshift_metrics_storage_labels: openshift_hosted_metrics_storage_labels + openshift_metrics_image_prefix: openshift_hosted_metrics_deployer_prefix + openshift_metrics_image_version: openshift_hosted_metrics_deployer_version + openshift_metrics_install_metrics: openshift_hosted_metrics_deploy + openshift_metrics_storage_kind: openshift_hosted_metrics_storage_kind diff --git a/roles/openshift_sanitize_inventory/tasks/deprecations.yml b/roles/openshift_sanitize_inventory/tasks/deprecations.yml new file mode 100644 index 000000000..94d3acffc --- /dev/null +++ b/roles/openshift_sanitize_inventory/tasks/deprecations.yml @@ -0,0 +1,21 @@ +--- + +- name: Check for usage of deprecated variables + set_fact: + __deprecation_message: "{{ __deprecation_message | default([]) }} + ['{{ __deprecation_header }} {{ item }} is a deprecated variable and will be no longer be used in the next minor release. Please update your inventory accordingly.']" + when: + - hostvars[inventory_hostname][item] is defined + with_items: "{{ __warn_deprecated_vars }}" + +- block: + - debug: msg="{{__deprecation_message}}" + - pause: + seconds: "{{ 10 }}" + when: + - __deprecation_message | default ('') | length > 0 + +# for with_fileglob Ansible resolves the path relative to the roles/<rolename>/files directory +- name: Assign deprecated variables to correct counterparts + include: "{{ item }}" + with_fileglob: + - "../tasks/__deprecations_*.yml" diff --git a/roles/openshift_sanitize_inventory/tasks/main.yml b/roles/openshift_sanitize_inventory/tasks/main.yml index 59ce505d3..e327ee9f5 100644 --- a/roles/openshift_sanitize_inventory/tasks/main.yml +++ b/roles/openshift_sanitize_inventory/tasks/main.yml @@ -1,4 +1,8 @@ --- +# We should print out deprecations prior to any failures so that if a play does fail for other reasons +# the user would also be aware of any deprecated variables they should note to adjust +- include: deprecations.yml + - name: Abort when conflicting deployment type variables are set when: - deployment_type is defined diff --git a/roles/openshift_sanitize_inventory/vars/main.yml b/roles/openshift_sanitize_inventory/vars/main.yml index da48e42c1..0fc2372d2 100644 --- a/roles/openshift_sanitize_inventory/vars/main.yml +++ b/roles/openshift_sanitize_inventory/vars/main.yml @@ -1,7 +1,78 @@ --- # origin uses community packages named 'origin' -# online currently uses 'openshift' packages -# enterprise is used for OSE 3.0 < 3.1 which uses packages named 'openshift' -# atomic-enterprise uses Red Hat packages named 'atomic-openshift' -# openshift-enterprise uses Red Hat packages named 'atomic-openshift' starting with OSE 3.1 -known_openshift_deployment_types: ['origin', 'online', 'enterprise', 'atomic-enterprise', 'openshift-enterprise'] +# openshift-enterprise uses Red Hat packages named 'atomic-openshift' +known_openshift_deployment_types: ['origin', 'openshift-enterprise'] + +__deprecation_header: "[DEPRECATION WARNING]:" + +# this is a list of variables that we will be deprecating within the next minor release, this list should be expected to change from release to release +__warn_deprecated_vars: + # logging + - 'openshift_hosted_logging_deploy' + - 'openshift_hosted_logging_hostname' + - 'openshift_hosted_logging_ops_hostname' + - 'openshift_hosted_logging_master_public_url' + - 'openshift_hosted_logging_elasticsearch_cluster_size' + - 'openshift_hosted_logging_elasticsearch_ops_cluster_size' + - 'openshift_hosted_logging_image_pull_secret' + - 'openshift_hosted_logging_enable_ops_cluster' + - 'openshift_hosted_logging_curator_nodeselector' + - 'openshift_hosted_logging_curator_ops_nodeselector' + - 'openshift_hosted_logging_kibana_nodeselector' + - 'openshift_hosted_logging_kibana_ops_nodeselector' + - 'openshift_hosted_logging_fluentd_nodeselector_label' + - 'openshift_hosted_logging_journal_source' + - 'openshift_hosted_logging_journal_read_from_head' + - 'openshift_hosted_logging_elasticsearch_instance_ram' + - 'openshift_hosted_logging_storage_labels' + - 'openshift_hosted_logging_elasticsearch_pvc_dynamic' + - 'openshift_hosted_logging_elasticsearch_pvc_size' + - 'openshift_hosted_logging_elasticsearch_pvc_prefix' + - 'openshift_hosted_logging_elasticsearch_storage_group' + - 'openshift_hosted_logging_elasticsearch_nodeselector' + - 'openshift_hosted_logging_elasticsearch_ops_instance_ram' + - 'openshift_hosted_loggingops_storage_labels' + - 'openshift_hosted_logging_elasticsearch_ops_pvc_dynamic' + - 'openshift_hosted_logging_elasticsearch_ops_pvc_size' + - 'openshift_hosted_logging_elasticsearch_ops_pvc_prefix' + - 'openshift_hosted_logging_elasticsearch_storage_group' + - 'openshift_hosted_logging_elasticsearch_ops_nodeselector' + - 'openshift_hosted_logging_storage_access_modes' + - 'openshift_hosted_logging_storage_kind' + - 'openshift_hosted_loggingops_storage_kind' + - 'openshift_hosted_logging_storage_host' + - 'openshift_hosted_loggingops_storage_host' + - 'openshift_hosted_logging_storage_nfs_directory' + - 'openshift_hosted_loggingops_storage_nfs_directory' + - 'openshift_hosted_logging_storage_volume_name' + - 'openshift_hosted_loggingops_storage_volume_name' + - 'openshift_hosted_logging_storage_volume_size' + - 'openshift_hosted_loggingops_storage_volume_size' + - 'openshift_hosted_logging_enable_ops_cluster' + - 'openshift_hosted_logging_image_pull_secret' + - 'openshift_hosted_logging_curator_nodeselector' + - 'openshift_hosted_logging_curator_ops_nodeselector' + - 'openshift_hosted_logging_kibana_nodeselector' + - 'openshift_hosted_logging_kibana_ops_nodeselector' + - 'openshift_hosted_logging_ops_hostname' + - 'openshift_hosted_logging_fluentd_nodeselector_label' + - 'openshift_hosted_logging_journal_source' + - 'openshift_hosted_logging_journal_read_from_head' + - 'openshift_hosted_logging_elasticsearch_instance_ram' + - 'openshift_hosted_logging_elasticsearch_nodeselector' + - 'openshift_hosted_logging_elasticsearch_ops_instance_ram' + - 'openshift_hosted_logging_elasticsearch_ops_nodeselector' + - 'openshift_hosted_logging_storage_access_modes' + - 'openshift_hosted_logging_deployer_prefix' + - 'openshift_hosted_logging_deployer_version' + # metrics + - 'openshift_hosted_metrics_deploy' + - 'openshift_hosted_metrics_storage_kind' + - 'openshift_hosted_metrics_storage_access_modes' + - 'openshift_hosted_metrics_storage_host' + - 'openshift_hosted_metrics_storage_nfs_directory' + - 'openshift_hosted_metrics_storage_volume_name' + - 'openshift_hosted_metrics_storage_volume_size' + - 'openshift_hosted_metrics_storage_labels' + - 'openshift_hosted_metrics_deployer_prefix' + - 'openshift_hosted_metrics_deployer_version' diff --git a/roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js b/roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js index 1f25cc39f..d0a9f11dc 100644 --- a/roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js +++ b/roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js @@ -1,2 +1,2 @@ -window.OPENSHIFT_CONSTANTS.ENABLE_TECH_PREVIEW_FEATURE.service_catalog_landing_page = true; -window.OPENSHIFT_CONSTANTS.ENABLE_TECH_PREVIEW_FEATURE.pod_presets = true; +// empty file so that the master-config can still point to a file that exists +// this file will be replaced by the template service broker role if enabled diff --git a/roles/openshift_service_catalog/tasks/install.yml b/roles/openshift_service_catalog/tasks/install.yml index 746c73eaf..faf1aea97 100644 --- a/roles/openshift_service_catalog/tasks/install.yml +++ b/roles/openshift_service_catalog/tasks/install.yml @@ -6,8 +6,6 @@ register: mktemp changed_when: False -- include: wire_aggregator.yml - - name: Set default image variables based on deployment_type include_vars: "{{ item }}" with_first_found: @@ -112,15 +110,6 @@ when: - not admin_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['instances', 'bindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not admin_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) -- shell: > - oc get policybindings/kube-system:default -n kube-system || echo "not found" - register: get_kube_system - changed_when: no - -- command: > - oc create policybinding kube-system -n kube-system - when: "'not found' in get_kube_system.stdout" - - oc_adm_policy_user: namespace: kube-service-catalog resource_kind: scc diff --git a/roles/openshift_service_catalog/tasks/wire_aggregator.yml b/roles/openshift_service_catalog/tasks/wire_aggregator.yml index 1c788470a..6431c6d3f 100644 --- a/roles/openshift_service_catalog/tasks/wire_aggregator.yml +++ b/roles/openshift_service_catalog/tasks/wire_aggregator.yml @@ -18,11 +18,10 @@ changed_when: false delegate_to: "{{ first_master }}" - # TODO: this currently has a bug where hostnames are required - name: Creating First Master Aggregator signer certs command: > - oc adm ca create-signer-cert + {{ hostvars[first_master].openshift.common.client_binary }} adm ca create-signer-cert --cert=/etc/origin/master/front-proxy-ca.crt --key=/etc/origin/master/front-proxy-ca.key --serial=/etc/origin/master/ca.serial.txt @@ -79,7 +78,7 @@ - name: Create first master api-client config for Aggregator command: > - oc adm create-api-client-config + {{ hostvars[first_master].openshift.common.client_binary }} adm create-api-client-config --certificate-authority=/etc/origin/master/front-proxy-ca.crt --signer-cert=/etc/origin/master/front-proxy-ca.crt --signer-key=/etc/origin/master/front-proxy-ca.key diff --git a/roles/openshift_storage_glusterfs/README.md b/roles/openshift_storage_glusterfs/README.md index a059745a6..d0bc0e028 100644 --- a/roles/openshift_storage_glusterfs/README.md +++ b/roles/openshift_storage_glusterfs/README.md @@ -76,10 +76,11 @@ GlusterFS cluster into a new or existing OpenShift cluster: | Name | Default value | Description | |--------------------------------------------------|-------------------------|-----------------------------------------| | openshift_storage_glusterfs_timeout | 300 | Seconds to wait for pods to become ready -| openshift_storage_glusterfs_namespace | 'glusterfs' | Namespace in which to create GlusterFS resources +| openshift_storage_glusterfs_namespace | 'glusterfs' | Namespace/project in which to create GlusterFS resources | openshift_storage_glusterfs_is_native | True | GlusterFS should be containerized | openshift_storage_glusterfs_name | 'storage' | A name to identify the GlusterFS cluster, which will be used in resource names | openshift_storage_glusterfs_nodeselector | 'glusterfs=storage-host'| Selector to determine which nodes will host GlusterFS pods in native mode. **NOTE:** The label value is taken from the cluster name +| openshift_storage_glusterfs_use_default_selector | False | Whether to use a default node selector for the GlusterFS namespace/project. If False, the namespace/project will have no restricting node selector. If True, uses pre-existing or default (e.g. osm_default_node_selector) node selectors. **NOTE:** If True, nodes which will host GlusterFS pods must already have the additional labels. | openshift_storage_glusterfs_storageclass | True | Automatically create a StorageClass for each GlusterFS cluster | openshift_storage_glusterfs_image | 'gluster/gluster-centos'| Container image to use for GlusterFS pods, enterprise default is 'rhgs3/rhgs-server-rhel7' | openshift_storage_glusterfs_version | 'latest' | Container image version to use for GlusterFS pods @@ -91,7 +92,7 @@ GlusterFS cluster into a new or existing OpenShift cluster: | openshift_storage_glusterfs_heketi_admin_key | auto-generated | String to use as secret key for performing heketi commands as admin | openshift_storage_glusterfs_heketi_user_key | auto-generated | String to use as secret key for performing heketi commands as user that can only view or modify volumes | openshift_storage_glusterfs_heketi_topology_load | True | Load the GlusterFS topology information into heketi -| openshift_storage_glusterfs_heketi_url | Undefined | When heketi is native, this sets the hostname portion of the final heketi route URL. When heketi is external, this is the full URL to the heketi service. +| openshift_storage_glusterfs_heketi_url | Undefined | When heketi is native, this sets the hostname portion of the final heketi route URL. When heketi is external, this is the FQDN or IP address to the heketi service. | openshift_storage_glusterfs_heketi_port | 8080 | TCP port for external heketi service **NOTE:** This has no effect in native mode | openshift_storage_glusterfs_heketi_executor | 'kubernetes' | Selects how a native heketi service will manage GlusterFS nodes: 'kubernetes' for native nodes, 'ssh' for external nodes | openshift_storage_glusterfs_heketi_ssh_port | 22 | SSH port for external GlusterFS nodes via native heketi diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml index 0b3d3aef1..148549887 100644 --- a/roles/openshift_storage_glusterfs/defaults/main.yml +++ b/roles/openshift_storage_glusterfs/defaults/main.yml @@ -3,6 +3,7 @@ openshift_storage_glusterfs_timeout: 300 openshift_storage_glusterfs_is_native: True openshift_storage_glusterfs_name: 'storage' openshift_storage_glusterfs_nodeselector: "glusterfs={{ openshift_storage_glusterfs_name }}-host" +openshift_storage_glusterfs_use_default_selector: False openshift_storage_glusterfs_storageclass: True openshift_storage_glusterfs_image: "{{ 'rhgs3/rhgs-server-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/gluster-centos' | quote }}" openshift_storage_glusterfs_version: 'latest' @@ -31,6 +32,7 @@ openshift_storage_glusterfs_registry_namespace: "{{ openshift.hosted.registry.na openshift_storage_glusterfs_registry_is_native: "{{ openshift_storage_glusterfs_is_native }}" openshift_storage_glusterfs_registry_name: 'registry' openshift_storage_glusterfs_registry_nodeselector: "glusterfs={{ openshift_storage_glusterfs_registry_name }}-host" +openshift_storage_glusterfs_registry_use_default_selector: "{{ openshift_storage_glusterfs_use_default_selector }}" openshift_storage_glusterfs_registry_storageclass: False openshift_storage_glusterfs_registry_image: "{{ openshift_storage_glusterfs_image }}" openshift_storage_glusterfs_registry_version: "{{ openshift_storage_glusterfs_version }}" @@ -58,9 +60,9 @@ r_openshift_storage_glusterfs_os_firewall_deny: [] r_openshift_storage_glusterfs_os_firewall_allow: - service: glusterfs_sshd port: "2222/tcp" -- service: glusterfs_daemon - port: "24007/tcp" - service: glusterfs_management + port: "24007/tcp" +- service: glusterfs_rdma port: "24008/tcp" - service: glusterfs_bricks port: "49152-49251/tcp" diff --git a/roles/openshift_storage_glusterfs/files/v3.7/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.7/deploy-heketi-template.yml new file mode 100644 index 000000000..9ebb0d5ec --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v3.7/deploy-heketi-template.yml @@ -0,0 +1,143 @@ +--- +kind: Template +apiVersion: v1 +metadata: + name: deploy-heketi + labels: + glusterfs: heketi-template + deploy-heketi: support + annotations: + description: Bootstrap Heketi installation + tags: glusterfs,heketi,installation +objects: +- kind: Service + apiVersion: v1 + metadata: + name: deploy-heketi-${CLUSTER_NAME} + labels: + glusterfs: deploy-heketi-${CLUSTER_NAME}-service + deploy-heketi: support + annotations: + description: Exposes Heketi service + spec: + ports: + - name: deploy-heketi-${CLUSTER_NAME} + port: 8080 + targetPort: 8080 + selector: + glusterfs: deploy-heketi-${CLUSTER_NAME}-pod +- kind: Route + apiVersion: v1 + metadata: + name: ${HEKETI_ROUTE} + labels: + glusterfs: deploy-heketi-${CLUSTER_NAME}-route + deploy-heketi: support + spec: + to: + kind: Service + name: deploy-heketi-${CLUSTER_NAME} +- kind: DeploymentConfig + apiVersion: v1 + metadata: + name: deploy-heketi-${CLUSTER_NAME} + labels: + glusterfs: deploy-heketi-${CLUSTER_NAME}-dc + deploy-heketi: support + annotations: + description: Defines how to deploy Heketi + spec: + replicas: 1 + selector: + glusterfs: deploy-heketi-${CLUSTER_NAME}-pod + triggers: + - type: ConfigChange + strategy: + type: Recreate + template: + metadata: + name: deploy-heketi + labels: + glusterfs: deploy-heketi-${CLUSTER_NAME}-pod + deploy-heketi: support + spec: + serviceAccountName: heketi-${CLUSTER_NAME}-service-account + containers: + - name: heketi + image: ${IMAGE_NAME}:${IMAGE_VERSION} + env: + - name: HEKETI_USER_KEY + value: ${HEKETI_USER_KEY} + - name: HEKETI_ADMIN_KEY + value: ${HEKETI_ADMIN_KEY} + - name: HEKETI_EXECUTOR + value: ${HEKETI_EXECUTOR} + - name: HEKETI_FSTAB + value: /var/lib/heketi/fstab + - name: HEKETI_SNAPSHOT_LIMIT + value: '14' + - name: HEKETI_KUBE_GLUSTER_DAEMONSET + value: '1' + - name: HEKETI_KUBE_NAMESPACE + value: ${HEKETI_KUBE_NAMESPACE} + ports: + - containerPort: 8080 + volumeMounts: + - name: db + mountPath: /var/lib/heketi + - name: topology + mountPath: ${TOPOLOGY_PATH} + - name: config + mountPath: /etc/heketi + readinessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 3 + httpGet: + path: /hello + port: 8080 + livenessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 30 + httpGet: + path: /hello + port: 8080 + volumes: + - name: db + - name: topology + secret: + secretName: heketi-${CLUSTER_NAME}-topology-secret + - name: config + secret: + secretName: heketi-${CLUSTER_NAME}-config-secret +parameters: +- name: HEKETI_USER_KEY + displayName: Heketi User Secret + description: Set secret for those creating volumes as type _user_ +- name: HEKETI_ADMIN_KEY + displayName: Heketi Administrator Secret + description: Set secret for administration of the Heketi service as user _admin_ +- name: HEKETI_EXECUTOR + displayName: heketi executor type + description: Set the executor type, kubernetes or ssh + value: kubernetes +- name: HEKETI_KUBE_NAMESPACE + displayName: Namespace + description: Set the namespace where the GlusterFS pods reside + value: default +- name: HEKETI_ROUTE + displayName: heketi route name + description: Set the hostname for the route URL + value: "heketi-glusterfs" +- name: IMAGE_NAME + displayName: heketi container image name + required: True +- name: IMAGE_VERSION + displayName: heketi container image version + required: True +- name: CLUSTER_NAME + displayName: GlusterFS cluster name + description: A unique name to identify this heketi service, useful for running multiple heketi instances + value: glusterfs +- name: TOPOLOGY_PATH + displayName: heketi topology file location + required: True diff --git a/roles/openshift_storage_glusterfs/files/v3.7/glusterfs-template.yml b/roles/openshift_storage_glusterfs/files/v3.7/glusterfs-template.yml new file mode 100644 index 000000000..8c5e1ded3 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v3.7/glusterfs-template.yml @@ -0,0 +1,136 @@ +--- +kind: Template +apiVersion: v1 +metadata: + name: glusterfs + labels: + glusterfs: template + annotations: + description: GlusterFS DaemonSet template + tags: glusterfs +objects: +- kind: DaemonSet + apiVersion: extensions/v1beta1 + metadata: + name: glusterfs-${CLUSTER_NAME} + labels: + glusterfs: ${CLUSTER_NAME}-daemonset + annotations: + description: GlusterFS DaemonSet + tags: glusterfs + spec: + selector: + matchLabels: + glusterfs: ${CLUSTER_NAME}-pod + template: + metadata: + name: glusterfs-${CLUSTER_NAME} + labels: + glusterfs: ${CLUSTER_NAME}-pod + glusterfs-node: pod + spec: + nodeSelector: "${{NODE_LABELS}}" + hostNetwork: true + containers: + - name: glusterfs + image: ${IMAGE_NAME}:${IMAGE_VERSION} + imagePullPolicy: IfNotPresent + volumeMounts: + - name: glusterfs-heketi + mountPath: "/var/lib/heketi" + - name: glusterfs-run + mountPath: "/run" + - name: glusterfs-lvm + mountPath: "/run/lvm" + - name: glusterfs-etc + mountPath: "/etc/glusterfs" + - name: glusterfs-logs + mountPath: "/var/log/glusterfs" + - name: glusterfs-config + mountPath: "/var/lib/glusterd" + - name: glusterfs-dev + mountPath: "/dev" + - name: glusterfs-misc + mountPath: "/var/lib/misc/glusterfsd" + - name: glusterfs-cgroup + mountPath: "/sys/fs/cgroup" + readOnly: true + - name: glusterfs-ssl + mountPath: "/etc/ssl" + readOnly: true + securityContext: + capabilities: {} + privileged: true + readinessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 40 + exec: + command: + - "/bin/bash" + - "-c" + - systemctl status glusterd.service + periodSeconds: 25 + successThreshold: 1 + failureThreshold: 15 + livenessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 40 + exec: + command: + - "/bin/bash" + - "-c" + - systemctl status glusterd.service + periodSeconds: 25 + successThreshold: 1 + failureThreshold: 15 + resources: {} + terminationMessagePath: "/dev/termination-log" + volumes: + - name: glusterfs-heketi + hostPath: + path: "/var/lib/heketi" + - name: glusterfs-run + emptyDir: {} + - name: glusterfs-lvm + hostPath: + path: "/run/lvm" + - name: glusterfs-etc + hostPath: + path: "/etc/glusterfs" + - name: glusterfs-logs + hostPath: + path: "/var/log/glusterfs" + - name: glusterfs-config + hostPath: + path: "/var/lib/glusterd" + - name: glusterfs-dev + hostPath: + path: "/dev" + - name: glusterfs-misc + hostPath: + path: "/var/lib/misc/glusterfsd" + - name: glusterfs-cgroup + hostPath: + path: "/sys/fs/cgroup" + - name: glusterfs-ssl + hostPath: + path: "/etc/ssl" + restartPolicy: Always + terminationGracePeriodSeconds: 30 + dnsPolicy: ClusterFirst + securityContext: {} +parameters: +- name: NODE_LABELS + displayName: Daemonset Node Labels + description: Labels which define the daemonset node selector. Must contain at least one label of the format \'glusterfs=<CLUSTER_NAME>-host\' + value: '{ "glusterfs": "storage-host" }' +- name: IMAGE_NAME + displayName: GlusterFS container image name + required: True +- name: IMAGE_VERSION + displayName: GlusterFS container image version + required: True +- name: CLUSTER_NAME + displayName: GlusterFS cluster name + description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances + value: storage diff --git a/roles/openshift_storage_glusterfs/files/v3.7/heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.7/heketi-template.yml new file mode 100644 index 000000000..61b6a8c13 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v3.7/heketi-template.yml @@ -0,0 +1,134 @@ +--- +kind: Template +apiVersion: v1 +metadata: + name: heketi + labels: + glusterfs: heketi-template + annotations: + description: Heketi service deployment template + tags: glusterfs,heketi +objects: +- kind: Service + apiVersion: v1 + metadata: + name: heketi-${CLUSTER_NAME} + labels: + glusterfs: heketi-${CLUSTER_NAME}-service + annotations: + description: Exposes Heketi service + spec: + ports: + - name: heketi + port: 8080 + targetPort: 8080 + selector: + glusterfs: heketi-${CLUSTER_NAME}-pod +- kind: Route + apiVersion: v1 + metadata: + name: ${HEKETI_ROUTE} + labels: + glusterfs: heketi-${CLUSTER_NAME}-route + spec: + to: + kind: Service + name: heketi-${CLUSTER_NAME} +- kind: DeploymentConfig + apiVersion: v1 + metadata: + name: heketi-${CLUSTER_NAME} + labels: + glusterfs: heketi-${CLUSTER_NAME}-dc + annotations: + description: Defines how to deploy Heketi + spec: + replicas: 1 + selector: + glusterfs: heketi-${CLUSTER_NAME}-pod + triggers: + - type: ConfigChange + strategy: + type: Recreate + template: + metadata: + name: heketi-${CLUSTER_NAME} + labels: + glusterfs: heketi-${CLUSTER_NAME}-pod + spec: + serviceAccountName: heketi-${CLUSTER_NAME}-service-account + containers: + - name: heketi + image: ${IMAGE_NAME}:${IMAGE_VERSION} + imagePullPolicy: IfNotPresent + env: + - name: HEKETI_USER_KEY + value: ${HEKETI_USER_KEY} + - name: HEKETI_ADMIN_KEY + value: ${HEKETI_ADMIN_KEY} + - name: HEKETI_EXECUTOR + value: ${HEKETI_EXECUTOR} + - name: HEKETI_FSTAB + value: /var/lib/heketi/fstab + - name: HEKETI_SNAPSHOT_LIMIT + value: '14' + - name: HEKETI_KUBE_GLUSTER_DAEMONSET + value: '1' + - name: HEKETI_KUBE_NAMESPACE + value: ${HEKETI_KUBE_NAMESPACE} + ports: + - containerPort: 8080 + volumeMounts: + - name: db + mountPath: /var/lib/heketi + - name: config + mountPath: /etc/heketi + readinessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 3 + httpGet: + path: /hello + port: 8080 + livenessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 30 + httpGet: + path: /hello + port: 8080 + volumes: + - name: db + glusterfs: + endpoints: heketi-db-${CLUSTER_NAME}-endpoints + path: heketidbstorage + - name: config + secret: + secretName: heketi-${CLUSTER_NAME}-config-secret +parameters: +- name: HEKETI_USER_KEY + displayName: Heketi User Secret + description: Set secret for those creating volumes as type _user_ +- name: HEKETI_ADMIN_KEY + displayName: Heketi Administrator Secret + description: Set secret for administration of the Heketi service as user _admin_ +- name: HEKETI_EXECUTOR + displayName: heketi executor type + description: Set the executor type, kubernetes or ssh + value: kubernetes +- name: HEKETI_KUBE_NAMESPACE + displayName: Namespace + description: Set the namespace where the GlusterFS pods reside + value: default +- name: HEKETI_ROUTE + displayName: heketi route name + description: Set the hostname for the route URL + value: "heketi-glusterfs" +- name: IMAGE_NAME + displayName: heketi container image name + required: True +- name: IMAGE_VERSION + displayName: heketi container image version + required: True +- name: CLUSTER_NAME + displayName: GlusterFS cluster name + description: A unique name to identify this heketi service, useful for running multiple heketi instances + value: glusterfs diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml index a31c5bd5e..bc0dde17d 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml @@ -15,6 +15,7 @@ oc_project: state: present name: "{{ glusterfs_namespace }}" + node_selector: "{% if glusterfs_use_default_selector %}{{ omit }}{% endif %}" when: glusterfs_is_native or glusterfs_heketi_is_native or glusterfs_storageclass - name: Delete pre-existing heketi resources diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml index 7a2987883..012c722ff 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml @@ -5,6 +5,7 @@ glusterfs_is_native: "{{ openshift_storage_glusterfs_is_native | bool }}" glusterfs_name: "{{ openshift_storage_glusterfs_name }}" glusterfs_nodeselector: "{{ openshift_storage_glusterfs_nodeselector | default(['storagenode', openshift_storage_glusterfs_name] | join('=')) | map_from_pairs }}" + glusterfs_use_default_selector: "{{ openshift_storage_glusterfs_use_default_selector }}" glusterfs_storageclass: "{{ openshift_storage_glusterfs_storageclass }}" glusterfs_image: "{{ openshift_storage_glusterfs_image }}" glusterfs_version: "{{ openshift_storage_glusterfs_version }}" diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml index 17f87578d..1bcab8e49 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml @@ -5,6 +5,7 @@ glusterfs_is_native: "{{ openshift_storage_glusterfs_registry_is_native | bool }}" glusterfs_name: "{{ openshift_storage_glusterfs_registry_name }}" glusterfs_nodeselector: "{{ openshift_storage_glusterfs_registry_nodeselector | default(['storagenode', openshift_storage_glusterfs_registry_name] | join('=')) | map_from_pairs }}" + glusterfs_use_default_selector: "{{ openshift_storage_glusterfs_registry_use_default_selector }}" glusterfs_storageclass: "{{ openshift_storage_glusterfs_registry_storageclass }}" glusterfs_image: "{{ openshift_storage_glusterfs_registry_image }}" glusterfs_version: "{{ openshift_storage_glusterfs_registry_version }}" diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-endpoints.yml.j2 new file mode 100644 index 000000000..11c9195bb --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-endpoints.yml.j2 @@ -0,0 +1,12 @@ +--- +apiVersion: v1 +kind: Endpoints +metadata: + name: glusterfs-{{ glusterfs_name }}-endpoints +subsets: +- addresses: +{% for node in glusterfs_nodes %} + - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }} +{% endfor %} + ports: + - port: 1 diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-service.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-service.yml.j2 new file mode 100644 index 000000000..3f869d2b7 --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-service.yml.j2 @@ -0,0 +1,10 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: glusterfs-{{ glusterfs_name }}-endpoints +spec: + ports: + - port: 1 +status: + loadBalancer: {} diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-storageclass.yml.j2 new file mode 100644 index 000000000..095fb780f --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-storageclass.yml.j2 @@ -0,0 +1,13 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: glusterfs-{{ glusterfs_name }} +provisioner: kubernetes.io/glusterfs +parameters: + resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}" + restuser: "admin" +{% if glusterfs_heketi_admin_key is defined %} + secretNamespace: "{{ glusterfs_namespace }}" + secretName: "heketi-{{ glusterfs_name }}-admin-secret" +{%- endif -%} diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/heketi-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/heketi-endpoints.yml.j2 new file mode 100644 index 000000000..99cbdf748 --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.7/heketi-endpoints.yml.j2 @@ -0,0 +1,12 @@ +--- +apiVersion: v1 +kind: Endpoints +metadata: + name: heketi-db-{{ glusterfs_name }}-endpoints +subsets: +- addresses: +{% for node in glusterfs_nodes %} + - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }} +{% endfor %} + ports: + - port: 1 diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/heketi-service.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/heketi-service.yml.j2 new file mode 100644 index 000000000..dcb896441 --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.7/heketi-service.yml.j2 @@ -0,0 +1,10 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: heketi-db-{{ glusterfs_name }}-endpoints +spec: + ports: + - port: 1 +status: + loadBalancer: {} diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/heketi.json.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/heketi.json.j2 new file mode 100644 index 000000000..579b11bb7 --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.7/heketi.json.j2 @@ -0,0 +1,36 @@ +{ + "_port_comment": "Heketi Server Port Number", + "port" : "8080", + + "_use_auth": "Enable JWT authorization. Please enable for deployment", + "use_auth" : false, + + "_jwt" : "Private keys for access", + "jwt" : { + "_admin" : "Admin has access to all APIs", + "admin" : { + "key" : "My Secret" + }, + "_user" : "User only has access to /volumes endpoint", + "user" : { + "key" : "My Secret" + } + }, + + "_glusterfs_comment": "GlusterFS Configuration", + "glusterfs" : { + + "_executor_comment": "Execute plugin. Possible choices: mock, kubernetes, ssh", + "executor" : "{{ glusterfs_heketi_executor }}", + + "_db_comment": "Database file name", + "db" : "/var/lib/heketi/heketi.db", + + "sshexec" : { + "keyfile" : "/etc/heketi/private_key", + "port" : "{{ glusterfs_heketi_ssh_port }}", + "user" : "{{ glusterfs_heketi_ssh_user }}", + "sudo" : {{ glusterfs_heketi_ssh_sudo | lower }} + } + } +} diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/topology.json.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/topology.json.j2 new file mode 100644 index 000000000..d6c28f6dd --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.7/topology.json.j2 @@ -0,0 +1,49 @@ +{ + "clusters": [ +{%- set clusters = {} -%} +{%- for node in glusterfs_nodes -%} + {%- set cluster = hostvars[node].glusterfs_cluster if 'glusterfs_cluster' in node else '1' -%} + {%- if cluster in clusters -%} + {%- set _dummy = clusters[cluster].append(node) -%} + {%- else -%} + {%- set _dummy = clusters.update({cluster: [ node, ]}) -%} + {%- endif -%} +{%- endfor -%} +{%- for cluster in clusters -%} + { + "nodes": [ +{%- for node in clusters[cluster] -%} + { + "node": { + "hostnames": { + "manage": [ +{%- if 'glusterfs_hostname' in hostvars[node] -%} + "{{ hostvars[node].glusterfs_hostname }}" +{%- elif 'openshift' in hostvars[node] -%} + "{{ hostvars[node].openshift.node.nodename }}" +{%- else -%} + "{{ node }}" +{%- endif -%} + ], + "storage": [ +{%- if 'glusterfs_ip' in hostvars[node] -%} + "{{ hostvars[node].glusterfs_ip }}" +{%- else -%} + "{{ hostvars[node].openshift.common.ip }}" +{%- endif -%} + ] + }, + "zone": {{ hostvars[node].glusterfs_zone | default(1) }} + }, + "devices": [ +{%- for device in hostvars[node].glusterfs_devices -%} + "{{ device }}"{% if not loop.last %},{% endif %} +{%- endfor -%} + ] + }{% if not loop.last %},{% endif %} +{%- endfor -%} + ] + }{% if not loop.last %},{% endif %} +{%- endfor -%} + ] +} diff --git a/roles/openshift_storage_nfs/tasks/main.yml b/roles/openshift_storage_nfs/tasks/main.yml index 51f8f4e0e..3047fbaf9 100644 --- a/roles/openshift_storage_nfs/tasks/main.yml +++ b/roles/openshift_storage_nfs/tasks/main.yml @@ -31,9 +31,9 @@ group: nfsnobody with_items: - "{{ openshift.hosted.registry }}" - - "{{ openshift.hosted.metrics }}" - - "{{ openshift.hosted.logging }}" - - "{{ openshift.hosted.loggingops }}" + - "{{ openshift.metrics }}" + - "{{ openshift.logging }}" + - "{{ openshift.loggingops }}" - "{{ openshift.hosted.etcd }}" - name: Configure exports diff --git a/roles/openshift_storage_nfs/templates/exports.j2 b/roles/openshift_storage_nfs/templates/exports.j2 index 7e8f70b23..0141e0d25 100644 --- a/roles/openshift_storage_nfs/templates/exports.j2 +++ b/roles/openshift_storage_nfs/templates/exports.j2 @@ -1,5 +1,5 @@ {{ openshift.hosted.registry.storage.nfs.directory }}/{{ openshift.hosted.registry.storage.volume.name }} {{ openshift.hosted.registry.storage.nfs.options }} -{{ openshift.hosted.metrics.storage.nfs.directory }}/{{ openshift.hosted.metrics.storage.volume.name }} {{ openshift.hosted.metrics.storage.nfs.options }} -{{ openshift.hosted.logging.storage.nfs.directory }}/{{ openshift.hosted.logging.storage.volume.name }} {{ openshift.hosted.logging.storage.nfs.options }} -{{ openshift.hosted.loggingops.storage.nfs.directory }}/{{ openshift.hosted.loggingops.storage.volume.name }} {{ openshift.hosted.loggingops.storage.nfs.options }} +{{ openshift.metrics.storage.nfs.directory }}/{{ openshift.metrics.storage.volume.name }} {{ openshift.metrics.storage.nfs.options }} +{{ openshift.logging.storage.nfs.directory }}/{{ openshift.logging.storage.volume.name }} {{ openshift.logging.storage.nfs.options }} +{{ openshift.loggingops.storage.nfs.directory }}/{{ openshift.loggingops.storage.volume.name }} {{ openshift.loggingops.storage.nfs.options }} {{ openshift.hosted.etcd.storage.nfs.directory }}/{{ openshift.hosted.etcd.storage.volume.name }} {{ openshift.hosted.etcd.storage.nfs.options }} diff --git a/roles/openshift_version/defaults/main.yml b/roles/openshift_version/defaults/main.yml index 01a1a7472..53d10f1f8 100644 --- a/roles/openshift_version/defaults/main.yml +++ b/roles/openshift_version/defaults/main.yml @@ -1,2 +1,3 @@ --- openshift_protect_installed_version: True +version_install_base_package: False diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml index 204abe27e..f4e9ff43a 100644 --- a/roles/openshift_version/tasks/main.yml +++ b/roles/openshift_version/tasks/main.yml @@ -5,11 +5,15 @@ is_containerized: "{{ openshift.common.is_containerized | default(False) | bool }}" is_atomic: "{{ openshift.common.is_atomic | default(False) | bool }}" +# This is only needed on masters and nodes; version_install_base_package +# should be set by a play externally. - name: Install the base package for versioning package: name: "{{ openshift.common.service_type }}{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}" state: present - when: not is_containerized | bool + when: + - not is_containerized | bool + - version_install_base_package | bool # Block attempts to install origin without specifying some kind of version information. # This is because the latest tags for origin are usually alpha builds, which should not @@ -162,7 +166,9 @@ - set_fact: openshift_pkg_version: -{{ openshift_version }} - when: openshift_pkg_version is not defined + when: + - openshift_pkg_version is not defined + - openshift_upgrade_target is not defined - fail: msg: openshift_version role was unable to set openshift_version @@ -177,7 +183,10 @@ - fail: msg: openshift_version role was unable to set openshift_pkg_version name: Abort if openshift_pkg_version was not set - when: openshift_pkg_version is not defined + when: + - openshift_pkg_version is not defined + - openshift_upgrade_target is not defined + - fail: msg: "No OpenShift version available; please ensure your systems are fully registered and have access to appropriate yum repositories." diff --git a/roles/os_firewall/tasks/iptables.yml b/roles/os_firewall/tasks/iptables.yml index 0af5abf38..2d74f2e48 100644 --- a/roles/os_firewall/tasks/iptables.yml +++ b/roles/os_firewall/tasks/iptables.yml @@ -33,7 +33,7 @@ register: result delegate_to: "{{item}}" run_once: true - with_items: "{{ ansible_play_hosts }}" + with_items: "{{ ansible_play_batch }}" - name: need to pause here, otherwise the iptables service starting can sometimes cause ssh to fail pause: diff --git a/roles/rhel_subscribe/tasks/enterprise.yml b/roles/rhel_subscribe/tasks/enterprise.yml index 39d59db70..9738929d2 100644 --- a/roles/rhel_subscribe/tasks/enterprise.yml +++ b/roles/rhel_subscribe/tasks/enterprise.yml @@ -3,20 +3,17 @@ command: subscription-manager repos --disable="*" - set_fact: - default_ose_version: '3.0' - when: deployment_type == 'enterprise' - -- set_fact: default_ose_version: '3.6' - when: deployment_type in ['atomic-enterprise', 'openshift-enterprise'] + when: deployment_type == 'openshift-enterprise' - set_fact: ose_version: "{{ lookup('oo_option', 'ose_version') | default(default_ose_version, True) }}" - fail: msg: "{{ ose_version }} is not a valid version for {{ deployment_type }} deployment type" - when: ( deployment_type == 'enterprise' and ose_version not in ['3.0'] ) or - ( deployment_type in ['atomic-enterprise', 'openshift-enterprise'] and ose_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6'] ) + when: + - deployment_type == 'openshift-enterprise' + - ose_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6'] ) - name: Enable RHEL repositories command: subscription-manager repos \ diff --git a/roles/rhel_subscribe/tasks/main.yml b/roles/rhel_subscribe/tasks/main.yml index 453044a6e..c43e5513d 100644 --- a/roles/rhel_subscribe/tasks/main.yml +++ b/roles/rhel_subscribe/tasks/main.yml @@ -41,15 +41,19 @@ redhat_subscription: username: "{{ rhel_subscription_user }}" password: "{{ rhel_subscription_pass }}" + register: rh_subscription + until: rh_subscription | succeeded - name: Retrieve the OpenShift Pool ID command: subscription-manager list --available --matches="{{ rhel_subscription_pool }}" --pool-only register: openshift_pool_id + until: openshift_pool_id | succeeded changed_when: False - name: Determine if OpenShift Pool Already Attached command: subscription-manager list --consumed --matches="{{ rhel_subscription_pool }}" --pool-only register: openshift_pool_attached + until: openshift_pool_attached | succeeded changed_when: False when: openshift_pool_id.stdout == '' @@ -58,10 +62,12 @@ when: openshift_pool_id.stdout == '' and openshift_pool_attached is defined and openshift_pool_attached.stdout == '' - name: Attach to OpenShift Pool - command: subscription-manager subscribe --pool {{ openshift_pool_id.stdout_lines[0] }} + command: subscription-manager attach --pool {{ openshift_pool_id.stdout_lines[0] }} + register: subscribe_pool + until: subscribe_pool | succeeded when: openshift_pool_id.stdout != '' - include: enterprise.yml when: - - deployment_type in [ 'enterprise', 'atomic-enterprise', 'openshift-enterprise' ] + - deployment_type == 'openshift-enterprise' - not ostree_booted.stat.exists | bool diff --git a/roles/template_service_broker/defaults/main.yml b/roles/template_service_broker/defaults/main.yml new file mode 100644 index 000000000..fb407c4a2 --- /dev/null +++ b/roles/template_service_broker/defaults/main.yml @@ -0,0 +1,4 @@ +--- +# placeholder file? +template_service_broker_remove: False +template_service_broker_install: False diff --git a/roles/template_service_broker/files/openshift-ansible-catalog-console.js b/roles/template_service_broker/files/openshift-ansible-catalog-console.js new file mode 100644 index 000000000..b3a3d3428 --- /dev/null +++ b/roles/template_service_broker/files/openshift-ansible-catalog-console.js @@ -0,0 +1 @@ +window.OPENSHIFT_CONSTANTS.ENABLE_TECH_PREVIEW_FEATURE.template_service_broker = true; diff --git a/roles/template_service_broker/files/remove-openshift-ansible-catalog-console.js b/roles/template_service_broker/files/remove-openshift-ansible-catalog-console.js new file mode 100644 index 000000000..d0a9f11dc --- /dev/null +++ b/roles/template_service_broker/files/remove-openshift-ansible-catalog-console.js @@ -0,0 +1,2 @@ +// empty file so that the master-config can still point to a file that exists +// this file will be replaced by the template service broker role if enabled diff --git a/roles/etcd_ca/meta/main.yml b/roles/template_service_broker/meta/main.yml index e3e2f7781..ab5a0cf08 100644 --- a/roles/etcd_ca/meta/main.yml +++ b/roles/template_service_broker/meta/main.yml @@ -1,7 +1,7 @@ --- galaxy_info: - author: Jason DeTiberus - description: Etcd CA + author: OpenShift Red Hat + description: OpenShift Template Service Broker company: Red Hat, Inc. license: Apache License, Version 2.0 min_ansible_version: 2.1 @@ -11,6 +11,3 @@ galaxy_info: - 7 categories: - cloud - - system -dependencies: -- role: etcd_common diff --git a/roles/template_service_broker/tasks/install.yml b/roles/template_service_broker/tasks/install.yml new file mode 100644 index 000000000..199df83c2 --- /dev/null +++ b/roles/template_service_broker/tasks/install.yml @@ -0,0 +1,47 @@ +--- +# Fact setting +- name: Set default image variables based on deployment type + include_vars: "{{ item }}" + with_first_found: + - "{{ openshift_deployment_type | default(deployment_type) }}.yml" + - "default_images.yml" + +- name: set ansible_service_broker facts + set_fact: + template_service_broker_image: "{{ template_service_broker_image | default(__template_service_broker_image) }}" + +- oc_project: + name: openshift-template-service-broker + state: present + +- command: mktemp -d /tmp/tsb-ansible-XXXXXX + register: mktemp + changed_when: False + become: no + +- copy: + src: "{{ __tsb_files_location }}/{{ item }}" + dest: "{{ mktemp.stdout }}/{{ item }}" + with_items: + - "{{ __tsb_template_file }}" + - "{{ __tsb_rbac_file }}" + +- name: Apply template file + shell: > + oc process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}" --param API_SERVER_CONFIG="{{ lookup('file', __tsb_files_location ~ '/' ~ __tsb_config_file) }}" | kubectl apply -f - + +# reconcile with rbac +- name: Reconcile with RBAC file + shell: > + oc process -f "{{ mktemp.stdout }}/{{ __tsb_rbac_file }}" | oc auth reconcile -f - + +- name: copy tech preview extension file for service console UI + copy: + src: openshift-ansible-catalog-console.js + dest: /etc/origin/master/openshift-ansible-catalog-console.js + +- file: + state: absent + name: "{{ mktemp.stdout }}" + changed_when: False + become: no diff --git a/roles/template_service_broker/tasks/main.yml b/roles/template_service_broker/tasks/main.yml new file mode 100644 index 000000000..d7ca970c7 --- /dev/null +++ b/roles/template_service_broker/tasks/main.yml @@ -0,0 +1,8 @@ +--- +# do any asserts here + +- include: install.yml + when: template_service_broker_install | default(false) | bool + +- include: remove.yml + when: template_service_broker_remove | default(false) | bool diff --git a/roles/template_service_broker/tasks/remove.yml b/roles/template_service_broker/tasks/remove.yml new file mode 100644 index 000000000..207dd9bdb --- /dev/null +++ b/roles/template_service_broker/tasks/remove.yml @@ -0,0 +1,28 @@ +--- +- command: mktemp -d /tmp/tsb-ansible-XXXXXX + register: mktemp + changed_when: False + become: no + +- copy: + src: "{{ __tsb_files_location }}/{{ item }}" + dest: "{{ mktemp.stdout }}/{{ __tsb_template_file }}" + +- name: Delete TSB objects + shell: > + oc process -f "{{ __tsb_files_location }}/{{ __tsb_template_file }}" | kubectl delete -f - + +- name: empty out tech preview extension file for service console UI + copy: + src: remove-openshift-ansible-catalog-console.js + dest: /etc/origin/master/openshift-ansible-catalog-console.js + +- oc_project: + name: openshift-template-service-broker + state: absent + +- file: + state: absent + name: "{{ mktemp.stdout }}" + changed_when: False + become: no diff --git a/roles/template_service_broker/vars/default_images.yml b/roles/template_service_broker/vars/default_images.yml new file mode 100644 index 000000000..807f2822c --- /dev/null +++ b/roles/template_service_broker/vars/default_images.yml @@ -0,0 +1,2 @@ +--- +__template_service_broker_image: "" diff --git a/roles/template_service_broker/vars/main.yml b/roles/template_service_broker/vars/main.yml new file mode 100644 index 000000000..372ab8f6f --- /dev/null +++ b/roles/template_service_broker/vars/main.yml @@ -0,0 +1,6 @@ +--- +__tsb_files_location: "../../../files/origin-components/" + +__tsb_template_file: "apiserver-template.yaml" +__tsb_config_file: "apiserver-config.yaml" +__tsb_rbac_file: "rbac-template.yaml" diff --git a/roles/template_service_broker/vars/openshift-enterprise.yml b/roles/template_service_broker/vars/openshift-enterprise.yml new file mode 100644 index 000000000..807f2822c --- /dev/null +++ b/roles/template_service_broker/vars/openshift-enterprise.yml @@ -0,0 +1,2 @@ +--- +__template_service_broker_image: "" @@ -48,6 +48,27 @@ def find_files(base_dir, exclude_dirs, include_dirs, file_regex): return found +def recursive_search(search_list, field): + """ + Takes a list with nested dicts, and searches all dicts for a key of the + field provided. If the items in the list are not dicts, the items are not + processed. + """ + fields_found = [] + + for item in search_list: + if isinstance(item, dict): + for key, value in item.items(): + if key == field: + fields_found.append(value) + elif isinstance(value, list): + results = recursive_search(value, field) + for result in results: + fields_found.append(result) + + return fields_found + + def find_entrypoint_playbooks(): '''find entry point playbooks as defined by openshift-ansible''' playbooks = set() @@ -248,37 +269,73 @@ class OpenShiftAnsibleSyntaxCheck(Command): ''' finalize_options ''' pass + def deprecate_jinja2_in_when(self, yaml_contents, yaml_file): + ''' Check for Jinja2 templating delimiters in when conditions ''' + test_result = False + failed_items = [] + + search_results = recursive_search(yaml_contents, 'when') + for item in search_results: + if isinstance(item, str): + if '{{' in item or '{%' in item: + failed_items.append(item) + else: + for sub_item in item: + if '{{' in sub_item or '{%' in sub_item: + failed_items.append(sub_item) + + if len(failed_items) > 0: + print('{}Error: Usage of Jinja2 templating delimiters in when ' + 'conditions is deprecated in Ansible 2.3.\n' + ' File: {}'.format(self.FAIL, yaml_file)) + for item in failed_items: + print(' Found: "{}"'.format(item)) + print(self.ENDC) + test_result = True + + return test_result + + def deprecate_include(self, yaml_contents, yaml_file): + ''' Check for usage of include directive ''' + test_result = False + + search_results = recursive_search(yaml_contents, 'include') + + if len(search_results) > 0: + print('{}Error: The `include` directive is deprecated in Ansible 2.4.\n' + 'https://github.com/ansible/ansible/blob/devel/CHANGELOG.md\n' + ' File: {}'.format(self.FAIL, yaml_file)) + for item in search_results: + print(' Found: "include: {}"'.format(item)) + print(self.ENDC) + test_result = True + + return test_result + def run(self): ''' run command ''' has_errors = False print('Ansible Deprecation Checks') - exclude_dirs = ['adhoc', 'files', 'meta', 'test', 'tests', 'vars', '.tox'] + exclude_dirs = ['adhoc', 'files', 'meta', 'test', 'tests', 'vars', 'defaults', '.tox'] for yaml_file in find_files( os.getcwd(), exclude_dirs, None, r'\.ya?ml$'): with open(yaml_file, 'r') as contents: - for task in yaml.safe_load(contents) or {}: - if not isinstance(task, dict): - # Skip yaml files which are not a dictionary of tasks - continue - if 'when' in task: - if '{{' in task['when'] or '{%' in task['when']: - print('{}Error: Usage of Jinja2 templating delimiters ' - 'in when conditions is deprecated in Ansible 2.3.\n' - ' File: {}\n' - ' Found: "{}"{}'.format( - self.FAIL, yaml_file, - task['when'], self.ENDC)) - has_errors = True - # TODO (rteague): This test will be enabled once we move to Ansible 2.4 - # if 'include' in task: - # print('{}Error: The `include` directive is deprecated in Ansible 2.4.\n' - # 'https://github.com/ansible/ansible/blob/devel/CHANGELOG.md\n' - # ' File: {}\n' - # ' Found: "include: {}"{}'.format( - # self.FAIL, yaml_file, task['include'], self.ENDC)) - # has_errors = True + yaml_contents = yaml.safe_load(contents) + if not isinstance(yaml_contents, list): + continue + + # Check for Jinja2 templating delimiters in when conditions + result = self.deprecate_jinja2_in_when(yaml_contents, yaml_file) + has_errors = result or has_errors + + # TODO (rteague): This test will be enabled once we move to Ansible 2.4 + # result = self.deprecate_include(yaml_contents, yaml_file) + # has_errors = result or has_errors + + if not has_errors: + print('...PASSED') print('Ansible Playbook Entry Point Syntax Checks') for playbook in find_entrypoint_playbooks(): diff --git a/test/integration/openshift_health_checker/common.go b/test/integration/openshift_health_checker/common.go index a92d6861d..8b79c48cb 100644 --- a/test/integration/openshift_health_checker/common.go +++ b/test/integration/openshift_health_checker/common.go @@ -25,7 +25,7 @@ func (p PlaybookTest) Run(t *testing.T) { // A PlaybookTest is intended to be run in parallel with other tests. t.Parallel() - cmd := exec.Command("ansible-playbook", "-i", "/dev/null", p.Path) + cmd := exec.Command("ansible-playbook", "-e", "testing_skip_some_requirements=1", "-i", "/dev/null", p.Path) cmd.Env = append(os.Environ(), "ANSIBLE_FORCE_COLOR=1") b, err := cmd.CombinedOutput() diff --git a/test/openshift_version_tests.py b/test/openshift_version_tests.py index 393a4d6ba..6095beb95 100644 --- a/test/openshift_version_tests.py +++ b/test/openshift_version_tests.py @@ -17,39 +17,39 @@ class OpenShiftVersionTests(unittest.TestCase): # Static tests for legacy filters. legacy_gte_tests = [{'name': 'oo_version_gte_3_1_or_1_1', - 'positive_enterprise_version': '3.2.0', - 'negative_enterprise_version': '3.0.0', + 'positive_openshift-enterprise_version': '3.2.0', + 'negative_openshift-enterprise_version': '3.0.0', 'positive_origin_version': '1.2.0', 'negative_origin_version': '1.0.0'}, {'name': 'oo_version_gte_3_1_1_or_1_1_1', - 'positive_enterprise_version': '3.2.0', - 'negative_enterprise_version': '3.1.0', + 'positive_openshift-enterprise_version': '3.2.0', + 'negative_openshift-enterprise_version': '3.1.0', 'positive_origin_version': '1.2.0', 'negative_origin_version': '1.1.0'}, {'name': 'oo_version_gte_3_2_or_1_2', - 'positive_enterprise_version': '3.3.0', - 'negative_enterprise_version': '3.1.0', + 'positive_openshift-enterprise_version': '3.3.0', + 'negative_openshift-enterprise_version': '3.1.0', 'positive_origin_version': '1.3.0', 'negative_origin_version': '1.1.0'}, {'name': 'oo_version_gte_3_3_or_1_3', - 'positive_enterprise_version': '3.4.0', - 'negative_enterprise_version': '3.2.0', + 'positive_openshift-enterprise_version': '3.4.0', + 'negative_openshift-enterprise_version': '3.2.0', 'positive_origin_version': '1.4.0', 'negative_origin_version': '1.2.0'}, {'name': 'oo_version_gte_3_4_or_1_4', - 'positive_enterprise_version': '3.5.0', - 'negative_enterprise_version': '3.3.0', + 'positive_openshift-enterprise_version': '3.5.0', + 'negative_openshift-enterprise_version': '3.3.0', 'positive_origin_version': '1.5.0', 'negative_origin_version': '1.3.0'}, {'name': 'oo_version_gte_3_5_or_1_5', - 'positive_enterprise_version': '3.6.0', - 'negative_enterprise_version': '3.4.0', + 'positive_openshift-enterprise_version': '3.6.0', + 'negative_openshift-enterprise_version': '3.4.0', 'positive_origin_version': '3.6.0', 'negative_origin_version': '1.4.0'}] def test_legacy_gte_filters(self): for test in self.legacy_gte_tests: - for deployment_type in ['enterprise', 'origin']: + for deployment_type in ['openshift-enterprise', 'origin']: # Test negative case per deployment_type self.assertFalse( self.openshift_version_filters._filters[test['name']]( @@ -70,3 +70,7 @@ class OpenShiftVersionTests(unittest.TestCase): self.assertFalse( self.openshift_version_filters._filters["oo_version_gte_{}_{}".format(major, minor)]( "{}.{}".format(major, minor))) + + def test_get_filters(self): + self.assertTrue( + self.openshift_version_filters.filters() == self.openshift_version_filters._filters) diff --git a/utils/docs/config.md b/utils/docs/config.md index 3677ffe2e..6d0c6896e 100644 --- a/utils/docs/config.md +++ b/utils/docs/config.md @@ -52,7 +52,6 @@ Indicates the version of configuration this file was written with. Current imple The OpenShift variant to install. Currently valid options are: * openshift-enterprise - * atomic-enterprise ### variant_version (optional) |