diff options
26 files changed, 472 insertions, 50 deletions
diff --git a/playbooks/cluster-operator/aws/components.yml b/playbooks/cluster-operator/aws/components.yml new file mode 100644 index 000000000..8587aac45 --- /dev/null +++ b/playbooks/cluster-operator/aws/components.yml @@ -0,0 +1,24 @@ +--- +- name: Alert user to variables needed + hosts: localhost + tasks: + - name: Alert user to variables needed - clusterid + debug: + msg: "openshift_aws_clusterid={{ openshift_aws_clusterid | default('default') }}" + + - name: Alert user to variables needed - region + debug: + msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}" + +- name: Setup the master node group + hosts: localhost + tasks: + - import_role: + name: openshift_aws + tasks_from: setup_master_group.yml + +- name: run the init + import_playbook: ../../init/main.yml + +- name: Include the components playbook to finish the hosted configuration + import_playbook: ../../common/private/components.yml diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml index 86cde2844..3144e9ef5 100644 --- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml @@ -130,7 +130,7 @@ # Step 2: Set a fact to be used to determine if we should run the redeploy of registry certs - name: set a fact to include the registry certs playbook if needed set_fact: - openshift_hosted_rollout_certs_and_registry: "{{ cert_output.rc == 0 }}" + openshift_hosted_rollout_certs_and_registry: "{{ cert_output.rc != 0 }}" # Run the redeploy certs based upon the certificates. Defaults to False for insecure registries - when: (hostvars[groups.oo_first_master.0].openshift_hosted_rollout_certs_and_registry | default(False)) | bool @@ -165,3 +165,10 @@ msg: "WARNING the shared-resource-viewer role could not be upgraded to 3.6 spec because it's marked protected, please see https://bugzilla.redhat.com/show_bug.cgi?id=1493213" when: - __shared_resource_viewer_protected | default(false) + +- name: Upgrade Service Catalog + hosts: oo_first_master + roles: + - role: openshift_service_catalog + when: + - openshift_enable_service_catalog | default(true) | bool diff --git a/playbooks/common/private/components.yml b/playbooks/common/private/components.yml index 089645d07..739be93c5 100644 --- a/playbooks/common/private/components.yml +++ b/playbooks/common/private/components.yml @@ -20,7 +20,9 @@ - import_playbook: ../../openshift-hosted/private/config.yml - import_playbook: ../../openshift-web-console/private/config.yml - when: openshift_web_console_install | default(true) | bool + when: + - openshift_web_console_install | default(true) | bool + - openshift.common.version_gte_3_9 - import_playbook: ../../openshift-metrics/private/config.yml when: openshift_metrics_install_metrics | default(false) | bool diff --git a/playbooks/openshift-etcd/scaleup.yml b/playbooks/openshift-etcd/scaleup.yml index 3e2fca8d4..1b2229baa 100644 --- a/playbooks/openshift-etcd/scaleup.yml +++ b/playbooks/openshift-etcd/scaleup.yml @@ -45,6 +45,7 @@ vars: skip_version: True l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_new_etcd_to_config" + l_sanity_check_hosts: "{{ groups['oo_new_etcd_to_config'] | union(groups['oo_masters_to_config']) | union(groups['oo_etcd_to_config']) }}" l_openshift_version_set_hosts: "all:!all" l_openshift_version_check_hosts: "all:!all" when: diff --git a/playbooks/openstack/openshift-cluster/install.yml b/playbooks/openstack/openshift-cluster/install.yml index 2ab7d14a0..cb6bf4d11 100644 --- a/playbooks/openstack/openshift-cluster/install.yml +++ b/playbooks/openstack/openshift-cluster/install.yml @@ -8,8 +8,7 @@ # values here. We do it in the OSEv3 group vars. Do we need to add # some logic here? -- name: run the cluster deploy - import_playbook: ../../prerequisites.yml +- import_playbook: ../../prerequisites.yml - name: run the cluster deploy import_playbook: ../../deploy_cluster.yml diff --git a/playbooks/openstack/openshift-cluster/provision.yml b/playbooks/openstack/openshift-cluster/provision.yml index 73c1926a0..44e3d00c0 100644 --- a/playbooks/openstack/openshift-cluster/provision.yml +++ b/playbooks/openstack/openshift-cluster/provision.yml @@ -26,9 +26,6 @@ - name: Gather facts for the new nodes setup: -- import_playbook: ../../init/basic_facts.yml -- import_playbook: ../../init/cluster_facts.yml - # TODO(shadower): consider splitting this up so people can stop here # and configure their DNS if they have to. @@ -43,7 +40,10 @@ - openshift_openstack_external_nsupdate_keys is defined - openshift_openstack_external_nsupdate_keys.private is defined or openshift_openstack_external_nsupdate_keys.public is defined -- name: Prepare the Nodes in the cluster for installation + +- import_playbook: ../../init/basic_facts.yml + +- name: Optionally subscribe the RHEL nodes hosts: oo_all_hosts become: yes gather_facts: yes @@ -63,6 +63,12 @@ - ansible_distribution == "RedHat" - rh_subscribed is defined + +- name: Prepare the Nodes in the cluster for installation + hosts: oo_all_hosts + become: yes + gather_facts: yes + tasks: - name: Install dependencies import_role: name: openshift_openstack diff --git a/roles/kuryr/tasks/master.yaml b/roles/kuryr/tasks/master.yaml index 1cc6d2375..4f9dd82de 100644 --- a/roles/kuryr/tasks/master.yaml +++ b/roles/kuryr/tasks/master.yaml @@ -1,6 +1,7 @@ --- - name: Perform OpenShift ServiceAccount config include_tasks: serviceaccount.yaml + run_once: true - name: Create kuryr manifests tempdir command: mktemp -d @@ -32,6 +33,7 @@ namespace: "{{ kuryr_namespace }}" files: - "{{ manifests_tmpdir.stdout }}/configmap.yaml" + run_once: true - name: Apply Controller Deployment manifest oc_obj: @@ -41,6 +43,7 @@ namespace: "{{ kuryr_namespace }}" files: - "{{ manifests_tmpdir.stdout }}/controller-deployment.yaml" + run_once: true - name: Apply kuryr-cni DaemonSet manifest oc_obj: @@ -50,3 +53,4 @@ namespace: "{{ kuryr_namespace }}" files: - "{{ manifests_tmpdir.stdout }}/cni-daemonset.yaml" + run_once: true diff --git a/roles/lib_utils/action_plugins/sanity_checks.py b/roles/lib_utils/action_plugins/sanity_checks.py index 09ce55e8f..ce54debc2 100644 --- a/roles/lib_utils/action_plugins/sanity_checks.py +++ b/roles/lib_utils/action_plugins/sanity_checks.py @@ -54,6 +54,12 @@ class ActionModule(ActionBase): def template_var(self, hostvars, host, varname): """Retrieve a variable from hostvars and template it. If undefined, return None type.""" + # We will set the current host and variable checked for easy debugging + # if there are any unhandled exceptions. + # pylint: disable=W0201 + self.last_checked_var = varname + # pylint: disable=W0201 + self.last_checked_host = host res = hostvars[host].get(varname) if res is None: return None @@ -156,6 +162,11 @@ class ActionModule(ActionBase): # pylint: disable=W0201 self.task_vars = task_vars or {} + # pylint: disable=W0201 + self.last_checked_host = "none" + # pylint: disable=W0201 + self.last_checked_var = "none" + # self._task.args holds task parameters. # check_hosts is a parameter to this plugin, and should provide # a list of hosts. @@ -172,7 +183,13 @@ class ActionModule(ActionBase): # We loop through each host in the provided list check_hosts for host in check_hosts: - self.run_checks(hostvars, host) + try: + self.run_checks(hostvars, host) + except Exception as uncaught_e: + msg = "last_checked_host: {}, last_checked_var: {};" + msg = msg.format(self.last_checked_host, self.last_checked_var) + msg += str(uncaught_e) + raise errors.AnsibleModuleError(msg) result["changed"] = False result["failed"] = False diff --git a/roles/openshift_cloud_provider/defaults/main.yml b/roles/openshift_cloud_provider/defaults/main.yml index 37cbf5603..cda6acd90 100644 --- a/roles/openshift_cloud_provider/defaults/main.yml +++ b/roles/openshift_cloud_provider/defaults/main.yml @@ -2,3 +2,4 @@ openshift_gcp_project: '' openshift_gcp_prefix: '' openshift_gcp_network_name: "{{ openshift_gcp_prefix }}network" +openshift_gcp_multizone: False diff --git a/roles/openshift_cloud_provider/tasks/gce.yml b/roles/openshift_cloud_provider/tasks/gce.yml index 9e1c31b1d..8b9c1b42a 100644 --- a/roles/openshift_cloud_provider/tasks/gce.yml +++ b/roles/openshift_cloud_provider/tasks/gce.yml @@ -1,11 +1,13 @@ --- - name: check variables are passed fail: - msg: "Ensure correct variables are defined for gcp. {{ item }}" - when: item == '' + msg: "Ensure correct variables are defined for gcp. {{ item.name }}" + when: item.value == '' with_items: - - "{{ openshift_gcp_project }}" - - "{{ openshift_gcp_prefix }}" + - name: openshift_gcp_project + value: "{{ openshift_gcp_project }}" + - name: openshift_gcp_prefix + value: "{{ openshift_gcp_prefix }}" # Work around ini_file create option in 2.2 which defaults to no - name: Create cloud config file @@ -28,4 +30,4 @@ - { key: 'network-name', value: '{{ openshift_gcp_network_name }}' } - { key: 'node-tags', value: '{{ openshift_gcp_prefix }}ocp' } - { key: 'node-instance-prefix', value: '{{ openshift_gcp_prefix }}' } - - { key: 'multizone', value: 'false' } + - { key: 'multizone', value: '{{ openshift_gcp_multizone | string }}' } diff --git a/roles/openshift_etcd_facts/tasks/set_etcd_ca_host.yml b/roles/openshift_etcd_facts/tasks/set_etcd_ca_host.yml index bf8d28a9b..624ad714e 100644 --- a/roles/openshift_etcd_facts/tasks/set_etcd_ca_host.yml +++ b/roles/openshift_etcd_facts/tasks/set_etcd_ca_host.yml @@ -14,10 +14,10 @@ # and /etc/etcd/generated_certs directories. - set_fact: __etcd_ca_dir_hosts: "{{ __etcd_ca_host_stat.results - | lib_utils_oo_collect('_ansible_delegated_vars.ansible_host', + | lib_utils_oo_collect('_ansible_delegated_vars.inventory_hostname', filters={'stat.path':'/etc/etcd/ca','stat.exists':True}) }}" __etcd_generated_certs_dir_hosts: "{{ __etcd_ca_host_stat.results - | lib_utils_oo_collect('_ansible_delegated_vars.ansible_host', + | lib_utils_oo_collect('_ansible_delegated_vars.inventory_hostname', filters={'stat.path':'/etc/etcd/generated_certs','stat.exists':True}) }}" run_once: true diff --git a/roles/openshift_gcp/defaults/main.yml b/roles/openshift_gcp/defaults/main.yml index 18fc453b2..f0cbb2f32 100644 --- a/roles/openshift_gcp/defaults/main.yml +++ b/roles/openshift_gcp/defaults/main.yml @@ -56,3 +56,5 @@ openshift_gcp_node_group_config: openshift_gcp_startup_script_file: '' openshift_gcp_user_data_file: '' + +openshift_gcp_multizone: False diff --git a/roles/openshift_gcp/tasks/node_cloud_config.yml b/roles/openshift_gcp/tasks/node_cloud_config.yml index 4e982f497..c38a052ea 100644 --- a/roles/openshift_gcp/tasks/node_cloud_config.yml +++ b/roles/openshift_gcp/tasks/node_cloud_config.yml @@ -9,4 +9,4 @@ - { key: 'network-name', value: '{{ openshift_gcp_network_name }}' } - { key: 'node-tags', value: '{{ openshift_gcp_prefix }}ocp' } - { key: 'node-instance-prefix', value: '{{ openshift_gcp_prefix }}' } - - { key: 'multizone', value: 'false' } + - { key: 'multizone', value: '{{ openshift_gcp_multizone | string }}' } diff --git a/roles/openshift_node/README.md b/roles/openshift_node/README.md index 87ceb8103..c61742bc2 100644 --- a/roles/openshift_node/README.md +++ b/roles/openshift_node/README.md @@ -15,10 +15,17 @@ Role Variables -------------- From this role: -| Name | Default value | | -|----------------------------|-----------------------|----------------------------------------------------------| -| oreg_url | UNDEF (Optional) | Default docker registry to use | -| oreg_url_node | UNDEF (Optional) | Default docker registry to use, specifically on the node | +| Name | Default value | | +|------------------------------|-----------------------|----------------------------------------------------------| +| openshift_node_start_options | UNDEF (Optional) | Options to pass to node start cmdline | +| oreg_url | UNDEF (Optional) | Default docker registry to use | +| oreg_url_node | UNDEF (Optional) | Default docker registry to use, specifically on the node | + +openshift_node_start_options can be used for passing any start node option, e.g.: + +--enable=kubelet,plugins + +Which would have a node running without kube-proxy and dns. Dependencies ------------ diff --git a/roles/openshift_node/files/networkmanager/99-origin-dns.sh b/roles/openshift_node/files/networkmanager/99-origin-dns.sh index f4e48b5b7..acf3e2f38 100755 --- a/roles/openshift_node/files/networkmanager/99-origin-dns.sh +++ b/roles/openshift_node/files/networkmanager/99-origin-dns.sh @@ -116,8 +116,9 @@ EOF echo "nameserver "${def_route_ip}"" >> ${NEW_RESOLV_CONF} if ! grep -qw search ${NEW_RESOLV_CONF}; then echo 'search cluster.local' >> ${NEW_RESOLV_CONF} - elif ! grep -q 'search.*cluster.local' ${NEW_RESOLV_CONF}; then - sed -i '/^search/ s/$/ cluster.local/' ${NEW_RESOLV_CONF} + elif ! grep -q 'search cluster.local' ${NEW_RESOLV_CONF}; then + # cluster.local should be in first three DNS names so that glibc resolver would work + sed -i -e 's/^search \(.\+\)\( cluster\.local\)\{0,1\}$/search cluster.local \1/' ${NEW_RESOLV_CONF} fi cp -Z ${NEW_RESOLV_CONF} /etc/resolv.conf fi diff --git a/roles/openshift_node/tasks/config/configure-node-settings.yml b/roles/openshift_node/tasks/config/configure-node-settings.yml index ebc1426d3..dcdbeb220 100644 --- a/roles/openshift_node/tasks/config/configure-node-settings.yml +++ b/roles/openshift_node/tasks/config/configure-node-settings.yml @@ -7,7 +7,7 @@ create: true with_items: - regex: '^OPTIONS=' - line: "OPTIONS=--loglevel={{ openshift_node_debug_level }}" + line: "OPTIONS=--loglevel={{ openshift_node_debug_level }} {{ openshift_node_start_options | default('') }}" - regex: '^CONFIG_FILE=' line: "CONFIG_FILE={{ openshift.common.config_base }}/node/node-config.yaml" - regex: '^IMAGE_VERSION=' diff --git a/roles/openshift_openstack/defaults/main.yml b/roles/openshift_openstack/defaults/main.yml index 75f1300e1..6c7e5b543 100644 --- a/roles/openshift_openstack/defaults/main.yml +++ b/roles/openshift_openstack/defaults/main.yml @@ -21,16 +21,15 @@ openshift_openstack_cluster_node_labels: openshift_openstack_install_debug_packages: false openshift_openstack_required_packages: - - docker - NetworkManager - - wget - - git - - net-tools - - bind-utils - - bridge-utils openshift_openstack_debug_packages: - bash-completion + - bind-utils + - bridge-utils + - git + - net-tools - vim-enhanced + - wget # container-storage-setup openshift_openstack_container_storage_setup: diff --git a/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml index 7b705c2d4..34af652c2 100644 --- a/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml +++ b/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml @@ -73,13 +73,11 @@ objects: - name: HEKETI_EXECUTOR value: ${HEKETI_EXECUTOR} - name: HEKETI_FSTAB - value: /var/lib/heketi/fstab + value: ${HEKETI_FSTAB} - name: HEKETI_SNAPSHOT_LIMIT value: '14' - name: HEKETI_KUBE_GLUSTER_DAEMONSET value: '1' - - name: HEKETI_KUBE_NAMESPACE - value: ${HEKETI_KUBE_NAMESPACE} ports: - containerPort: 8080 volumeMounts: @@ -115,10 +113,10 @@ parameters: displayName: heketi executor type description: Set the executor type, kubernetes or ssh value: kubernetes -- name: HEKETI_KUBE_NAMESPACE - displayName: Namespace - description: Set the namespace where the GlusterFS pods reside - value: default +- name: HEKETI_FSTAB + displayName: heketi fstab path + description: Set the fstab path, file that is populated with bricks that heketi creates + value: /var/lib/heketi/fstab - name: HEKETI_ROUTE displayName: heketi route name description: Set the hostname for the route URL diff --git a/roles/openshift_storage_glusterfs/files/v3.6/gluster-s3-pvcs-template.yml b/roles/openshift_storage_glusterfs/files/v3.6/gluster-s3-pvcs-template.yml new file mode 100644 index 000000000..064b51473 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v3.6/gluster-s3-pvcs-template.yml @@ -0,0 +1,67 @@ +--- +kind: Template +apiVersion: v1 +metadata: + name: gluster-s3-pvcs + labels: + glusterfs: s3-pvcs-template + gluster-s3: pvcs-template + annotations: + description: Gluster S3 service template + tags: glusterfs,heketi,gluster-s3 +objects: +- kind: PersistentVolumeClaim + apiVersion: v1 + metadata: + name: "${PVC}" + labels: + glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-storage + gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-pvc + annotations: + volume.beta.kubernetes.io/storage-class: "glusterfs-${CLUSTER_NAME}" + spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: "${PVC_SIZE}" +- kind: PersistentVolumeClaim + apiVersion: v1 + metadata: + name: "${META_PVC}" + labels: + glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-storage + gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-meta-pvc + annotations: + volume.beta.kubernetes.io/storage-class: "glusterfs-${CLUSTER_NAME}" + spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: "${META_PVC_SIZE}" +parameters: +- name: S3_ACCOUNT + displayName: S3 Account Name + description: S3 storage account which will provide storage on GlusterFS volumes + required: true +- name: PVC + displayName: Primary GlusterFS-backed PVC + description: GlusterFS-backed PVC for object storage + required: true +- name: PVC_SIZE + displayName: Primary GlusterFS-backed PVC capacity + description: Capacity for GlusterFS-backed PVC for object storage + value: 2Gi +- name: META_PVC + displayName: Metadata GlusterFS-backed PVC + description: GlusterFS-backed PVC for object storage metadata + required: true +- name: META_PVC_SIZE + displayName: Metadata GlusterFS-backed PVC capacity + description: Capacity for GlusterFS-backed PVC for object storage metadata + value: 1Gi +- name: CLUSTER_NAME + displayName: GlusterFS cluster name + description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances + value: storage diff --git a/roles/openshift_storage_glusterfs/files/v3.6/gluster-s3-template.yml b/roles/openshift_storage_glusterfs/files/v3.6/gluster-s3-template.yml new file mode 100644 index 000000000..896a1b226 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v3.6/gluster-s3-template.yml @@ -0,0 +1,140 @@ +--- +kind: Template +apiVersion: v1 +metadata: + name: gluster-s3 + labels: + glusterfs: s3-template + gluster-s3: template + annotations: + description: Gluster S3 service template + tags: glusterfs,heketi,gluster-s3 +objects: +- kind: Service + apiVersion: v1 + metadata: + name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-service + labels: + glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-service + gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-service + spec: + ports: + - protocol: TCP + port: 8080 + targetPort: 8080 + selector: + glusterfs: s3-pod + type: ClusterIP + sessionAffinity: None + status: + loadBalancer: {} +- kind: Route + apiVersion: v1 + metadata: + name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-route + labels: + glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-route + gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-route + spec: + to: + kind: Service + name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-service +- kind: DeploymentConfig + apiVersion: v1 + metadata: + name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-dc + labels: + glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-dc + gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-dc + annotations: + openshift.io/scc: privileged + description: Defines how to deploy gluster s3 object storage + spec: + replicas: 1 + selector: + glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-pod + template: + metadata: + name: gluster-${CLUSTER_NAME}-${S3_ACCOUNT}-s3 + labels: + glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-pod + gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-pod + spec: + containers: + - name: gluster-s3 + image: ${IMAGE_NAME}:${IMAGE_VERSION} + imagePullPolicy: IfNotPresent + ports: + - name: gluster + containerPort: 8080 + protocol: TCP + env: + - name: S3_ACCOUNT + value: "${S3_ACCOUNT}" + - name: S3_USER + value: "${S3_USER}" + - name: S3_PASSWORD + value: "${S3_PASSWORD}" + resources: {} + volumeMounts: + - name: gluster-vol1 + mountPath: "/mnt/gluster-object/${S3_ACCOUNT}" + - name: gluster-vol2 + mountPath: "/mnt/gluster-object/gsmetadata" + - name: glusterfs-cgroup + readOnly: true + mountPath: "/sys/fs/cgroup" + terminationMessagePath: "/dev/termination-log" + securityContext: + privileged: true + volumes: + - name: glusterfs-cgroup + hostPath: + path: "/sys/fs/cgroup" + - name: gluster-vol1 + persistentVolumeClaim: + claimName: ${PVC} + - name: gluster-vol2 + persistentVolumeClaim: + claimName: ${META_PVC} + restartPolicy: Always + terminationGracePeriodSeconds: 30 + dnsPolicy: ClusterFirst + serviceAccountName: default + serviceAccount: default + securityContext: {} +parameters: +- name: IMAGE_NAME + displayName: glusterblock provisioner container image name + required: True +- name: IMAGE_VERSION + displayName: glusterblock provisioner container image version + required: True +- name: CLUSTER_NAME + displayName: GlusterFS cluster name + description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances + value: storage +- name: S3_ACCOUNT + displayName: S3 Account Name + description: S3 storage account which will provide storage on GlusterFS volumes + required: true +- name: S3_USER + displayName: S3 User + description: S3 user who can access the S3 storage account + required: true +- name: S3_PASSWORD + displayName: S3 User Password + description: Password for the S3 user + required: true +- name: PVC + displayName: Primary GlusterFS-backed PVC + description: GlusterFS-backed PVC for object storage + value: gluster-s3-claim +- name: META_PVC + displayName: Metadata GlusterFS-backed PVC + description: GlusterFS-backed PVC for object storage metadata + value: gluster-s3-meta-claim +- name: CLUSTER_NAME + displayName: GlusterFS cluster name + description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances + value: storage diff --git a/roles/openshift_storage_glusterfs/files/v3.6/glusterblock-provisioner.yml b/roles/openshift_storage_glusterfs/files/v3.6/glusterblock-provisioner.yml new file mode 100644 index 000000000..63dd5cce6 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v3.6/glusterblock-provisioner.yml @@ -0,0 +1,104 @@ +--- +kind: Template +apiVersion: v1 +metadata: + name: glusterblock-provisioner + labels: + glusterfs: block-template + glusterblock: template + annotations: + description: glusterblock provisioner template + tags: glusterfs +objects: +- kind: ClusterRole + apiVersion: v1 + metadata: + name: glusterblock-provisioner-runner + labels: + glusterfs: block-provisioner-runner-clusterrole + glusterblock: provisioner-runner-clusterrole + rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: [""] + resources: ["services"] + verbs: ["get"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "create", "delete"] + - apiGroups: [""] + resources: ["routes"] + verbs: ["get", "list"] +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: glusterblock-${CLUSTER_NAME}-provisioner + labels: + glusterfs: block-${CLUSTER_NAME}-provisioner-sa + glusterblock: ${CLUSTER_NAME}-provisioner-sa +- apiVersion: v1 + kind: ClusterRoleBinding + metadata: + name: glusterblock-${CLUSTER_NAME}-provisioner + roleRef: + name: glusterblock-provisioner-runner + subjects: + - kind: ServiceAccount + name: glusterblock-${CLUSTER_NAME}-provisioner + namespace: ${NAMESPACE} +- kind: DeploymentConfig + apiVersion: v1 + metadata: + name: glusterblock-${CLUSTER_NAME}-provisioner-dc + labels: + glusterfs: block-${CLUSTER_NAME}-provisioner-dc + glusterblock: ${CLUSTER_NAME}-provisioner-dc + annotations: + description: Defines how to deploy the glusterblock provisioner pod. + spec: + replicas: 1 + selector: + glusterfs: block-${CLUSTER_NAME}-provisioner-pod + triggers: + - type: ConfigChange + strategy: + type: Recreate + template: + metadata: + name: glusterblock-provisioner + labels: + glusterfs: block-${CLUSTER_NAME}-provisioner-pod + spec: + serviceAccountName: glusterblock-${CLUSTER_NAME}-provisioner + containers: + - name: glusterblock-provisioner + image: ${IMAGE_NAME}:${IMAGE_VERSION} + imagePullPolicy: IfNotPresent + env: + - name: PROVISIONER_NAME + value: gluster.org/glusterblock +parameters: +- name: IMAGE_NAME + displayName: glusterblock provisioner container image name + required: True +- name: IMAGE_VERSION + displayName: glusterblock provisioner container image version + required: True +- name: NAMESPACE + displayName: glusterblock provisioner namespace + description: The namespace in which these resources are being created + required: True +- name: CLUSTER_NAME + displayName: GlusterFS cluster name + description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances + value: storage diff --git a/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-template.yml b/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-template.yml index 8c5e1ded3..09850a2c2 100644 --- a/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-template.yml +++ b/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-template.yml @@ -35,6 +35,15 @@ objects: - name: glusterfs image: ${IMAGE_NAME}:${IMAGE_VERSION} imagePullPolicy: IfNotPresent + env: + - name: GB_GLFS_LRU_COUNT + value: "${GB_GLFS_LRU_COUNT}" + - name: TCMU_LOGDIR + value: "${TCMU_LOGDIR}" + resources: + requests: + memory: 100Mi + cpu: 100m volumeMounts: - name: glusterfs-heketi mountPath: "/var/lib/heketi" @@ -83,7 +92,6 @@ objects: periodSeconds: 25 successThreshold: 1 failureThreshold: 15 - resources: {} terminationMessagePath: "/dev/termination-log" volumes: - name: glusterfs-heketi @@ -134,3 +142,13 @@ parameters: displayName: GlusterFS cluster name description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances value: storage +- name: GB_GLFS_LRU_COUNT + displayName: Maximum number of block hosting volumes + description: This value is to set maximum number of block hosting volumes. + value: "15" + required: true +- name: TCMU_LOGDIR + displayName: Tcmu runner log directory + description: This value is to set tcmu runner log directory + value: "/var/log/glusterfs/gluster-block" + required: true diff --git a/roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml index 61b6a8c13..28cdb2982 100644 --- a/roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml +++ b/roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml @@ -15,6 +15,7 @@ objects: name: heketi-${CLUSTER_NAME} labels: glusterfs: heketi-${CLUSTER_NAME}-service + heketi: ${CLUSTER_NAME}-service annotations: description: Exposes Heketi service spec: @@ -30,6 +31,7 @@ objects: name: ${HEKETI_ROUTE} labels: glusterfs: heketi-${CLUSTER_NAME}-route + heketi: ${CLUSTER_NAME}-route spec: to: kind: Service @@ -40,6 +42,7 @@ objects: name: heketi-${CLUSTER_NAME} labels: glusterfs: heketi-${CLUSTER_NAME}-dc + heketi: ${CLUSTER_NAME}-dc annotations: description: Defines how to deploy Heketi spec: @@ -55,6 +58,7 @@ objects: name: heketi-${CLUSTER_NAME} labels: glusterfs: heketi-${CLUSTER_NAME}-pod + heketi: ${CLUSTER_NAME}-pod spec: serviceAccountName: heketi-${CLUSTER_NAME}-service-account containers: @@ -69,13 +73,11 @@ objects: - name: HEKETI_EXECUTOR value: ${HEKETI_EXECUTOR} - name: HEKETI_FSTAB - value: /var/lib/heketi/fstab + value: ${HEKETI_FSTAB} - name: HEKETI_SNAPSHOT_LIMIT value: '14' - name: HEKETI_KUBE_GLUSTER_DAEMONSET value: '1' - - name: HEKETI_KUBE_NAMESPACE - value: ${HEKETI_KUBE_NAMESPACE} ports: - containerPort: 8080 volumeMounts: @@ -114,10 +116,10 @@ parameters: displayName: heketi executor type description: Set the executor type, kubernetes or ssh value: kubernetes -- name: HEKETI_KUBE_NAMESPACE - displayName: Namespace - description: Set the namespace where the GlusterFS pods reside - value: default +- name: HEKETI_FSTAB + displayName: heketi fstab path + description: Set the fstab path, file that is populated with bricks that heketi creates + value: /var/lib/heketi/fstab - name: HEKETI_ROUTE displayName: heketi route name description: Set the hostname for the route URL diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/gluster-block-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/gluster-block-storageclass.yml.j2 new file mode 100644 index 000000000..02ed8fa8d --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.6/gluster-block-storageclass.yml.j2 @@ -0,0 +1,19 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: glusterfs-{{ glusterfs_name }}-block +{% if glusterfs_block_storageclass_default is defined and glusterfs_block_storageclass_default %} + annotations: + storageclass.kubernetes.io/is-default-class: "true" +{% endif %} +provisioner: gluster.org/glusterblock +parameters: + resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}" + restuser: "admin" + chapauthenabled: "true" + hacount: "3" +{% if glusterfs_heketi_admin_key is defined %} + restsecretnamespace: "{{ glusterfs_namespace }}" + restsecretname: "heketi-{{ glusterfs_name }}-admin-secret-block" +{%- endif -%} diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2 index ca87807fe..095fb780f 100644 --- a/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2 +++ b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2 @@ -3,10 +3,6 @@ apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: glusterfs-{{ glusterfs_name }} -{% if glusterfs_storageclass_default is defined and glusterfs_storageclass_default %} - annotations: - storageclass.kubernetes.io/is-default-class: "true" -{% endif %} provisioner: kubernetes.io/glusterfs parameters: resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}" diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/heketi.json.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/heketi.json.j2 index 579b11bb7..565e9be98 100644 --- a/roles/openshift_storage_glusterfs/templates/v3.6/heketi.json.j2 +++ b/roles/openshift_storage_glusterfs/templates/v3.6/heketi.json.j2 @@ -31,6 +31,12 @@ "port" : "{{ glusterfs_heketi_ssh_port }}", "user" : "{{ glusterfs_heketi_ssh_user }}", "sudo" : {{ glusterfs_heketi_ssh_sudo | lower }} - } + }, + + "_auto_create_block_hosting_volume": "Creates Block Hosting volumes automatically if not found or exsisting volume exhausted", + "auto_create_block_hosting_volume": {{ glusterfs_block_host_vol_create | lower }}, + + "_block_hosting_volume_size": "New block hosting volume will be created in size mentioned, This is considered only if auto-create is enabled.", + "block_hosting_volume_size": {{ glusterfs_block_host_vol_size }} } } |