diff options
65 files changed, 574 insertions, 191 deletions
@@ -1,3 +1,5 @@ .vault-pass .vagrant *.retry +tmp_role.yml +tmp_play.yml diff --git a/anslib/archive/gluster_paths.sh b/anslib/archive/gluster_paths.sh new file mode 100755 index 0000000..4c3ca0b --- /dev/null +++ b/anslib/archive/gluster_paths.sh @@ -0,0 +1,9 @@ +#! /bin/bash + +# This is not enough. We also need to separate /dev otherwise port clashes (may be cgroups). Not sure about the side effects. +function fixpath_func { + sed -i.orig -e "/ path: \"\/\(var\|etc\|run\)/ s/\//_/g ; s/\"_/\"\/var\/lib\/heketi\//" $1 +} + +export -f fixpath_func +find openshift-ansible/roles/openshift_storage_glusterfs/files/v3.7 -name glusterfs-template.yml -print0 | xargs -0 -L 1 -I {} bash -c 'fixpath_func "$@"' _ {} diff --git a/anslib/link_vars.sh b/anslib/link_vars.sh new file mode 100755 index 0000000..01a9fe9 --- /dev/null +++ b/anslib/link_vars.sh @@ -0,0 +1,26 @@ + +function mklink_func { +# if [ -d openshift-cluster ]; then +# mklink_func "openshift-cluster" +# fi + + name=$(basename "$1") +# [[ $name == "private" ]] && exit + [[ $name == "vars" ]] && exit + [[ $name == "roles" ]] && exit + + ( + cd $1 + path=.. + while [ ! -f "$path/link_vars.sh" ]; do + path="../$path" + done + path="../$path" + + ln -sf "$path/group_vars" + ln -sf "$path/host_vars" + ) +} + +export -f mklink_func +find openshift-ansible/playbooks -mindepth 0 -maxdepth 2 -type d -print0 | xargs -0 -L 1 -I {} bash -c 'mklink_func "$@"' _ {} diff --git a/anslib/openshift-ansible b/anslib/openshift-ansible -Subproject f676f1cab3046fa9a288e2b8f79cda066da3e8e +Subproject ec3bc59ab20c4c21b7cfd27065c1a9b811bd951 diff --git a/anslib/patches/ansible_broker-ds-sc.patch b/anslib/patches/ansible_broker-ds-sc.patch new file mode 100644 index 0000000..9f99056 --- /dev/null +++ b/anslib/patches/ansible_broker-ds-sc.patch @@ -0,0 +1,10 @@ +--- a/roles/ansible_service_broker/tasks/install.yml ++++ b/roles/ansible_service_broker/tasks/install.yml +@@ -211,6 +211,7 @@ + access_modes: + - ReadWriteOnce + volume_capacity: 1G ++ storage_class_name: glusterfs-storage + + - name: Search for existing Ansible Service Broker deployment config + oc_obj: diff --git a/anslib/patches/glusterfs-ds-mountrun.patch b/anslib/patches/glusterfs-ds-mountrun.patch new file mode 100644 index 0000000..02f7e15 --- /dev/null +++ b/anslib/patches/glusterfs-ds-mountrun.patch @@ -0,0 +1,25 @@ +--- a/roles/openshift_storage_glusterfs/files/v3.7/glusterfs-template.yml ++++ b/roles/openshift_storage_glusterfs/files/v3.7/glusterfs-template.yml +@@ -49,6 +49,8 @@ objects: + mountPath: "/var/lib/heketi" + - name: glusterfs-run + mountPath: "/run" ++# - name: glusterfs-socket ++# mountPath: "/run/glusterd.socket" + - name: glusterfs-lvm + mountPath: "/run/lvm" + - name: glusterfs-etc +@@ -98,7 +100,12 @@ objects: + hostPath: + path: "/var/lib/heketi" + - name: glusterfs-run +- emptyDir: {} ++ hostPath: ++ path: "/run/glusterd" ++# emptyDir: {} ++# - name: glusterfs-socket ++# hostPath: ++# path: "/run/glusterd.socket" + - name: glusterfs-lvm + hostPath: + path: "/run/lvm" diff --git a/anslib/patches/template_broker-ds-badcheck.patch b/anslib/patches/template_broker-ds-badcheck.patch new file mode 100644 index 0000000..477ec66 --- /dev/null +++ b/anslib/patches/template_broker-ds-badcheck.patch @@ -0,0 +1,23 @@ +diff --git a/roles/template_service_broker/tasks/install.yml b/roles/template_service_broker/tasks/install.yml +index d674d24..1eff3bc 100644 +--- a/roles/template_service_broker/tasks/install.yml ++++ b/roles/template_service_broker/tasks/install.yml +@@ -1,11 +1,12 @@ + --- ++#DS + # Fact setting +-- name: Ensure that Template Service Broker has nodes to run on +- fail: +- msg: |- +- No schedulable nodes found matching node selector for Template Service Broker - '{{ template_service_broker_selector }}' +- when: +- - openshift_schedulable_node_labels | lib_utils_oo_has_no_matching_selector(template_service_broker_selector) ++#- name: Ensure that Template Service Broker has nodes to run on ++# fail: ++# msg: |- ++# No schedulable nodes found matching node selector for Template Service Broker - '{{ template_service_broker_selector }}' ++# when: ++# - openshift_schedulable_node_labels | lib_utils_oo_has_no_matching_selector(template_service_broker_selector) + + - name: Set default image variables based on openshift_deployment_type + include_vars: "{{ item }}" diff --git a/group_vars/OSEv3.yml b/group_vars/OSEv3.yml index 2f7fdc1..6a23094 100644 --- a/group_vars/OSEv3.yml +++ b/group_vars/OSEv3.yml @@ -1,59 +1,106 @@ ### Deployment Type deployment_type: "origin" -#openshift_release: "v1.4" -#openshift_image_tag: "v1.4.1" -openshift_release: "v1.5" -#openshift_image_tag: "v1.5.0" -openshift_image_tag: "v1.5.0-rc.0" -openshift_hosted_metrics_deployer_version: "v1.5.0-rc.0" - -containerized: true +#openshift_release: "v1.5" +openshift_release: "v3.7" +#openshift_image_tag: "v1.5.0-rc.0" +openshift_image_tag: "v3.7.1" +#openshift_hosted_metrics_deployer_version: "v1.5.0-rc.0" +#openshift_hosted_metrics_deployer_version: "v3.7.1" + +#containerized: true +containerized: false openshift_master_cluster_method: "native" os_firewall_use_firewalld: true +#Recommended to avoid: No package matching 'origin-docker-excluder-1.5.0*' found available +#enable_excluders: false +#enable_docker_excluder: false + + +### Network & DNS configuration +openshift_master_cluster_hostname: "{{ ands_openshift_lb }}" +#openshift_master_cluster_public_hostname: "{{ ands_openshift_lb }}" +openshift_master_default_subdomain: "{{ ands_openshift_subdomain }}" +openshift_master_ingress_ip_network_cidr: "{{ ands_openshift_ingress_network }}" +#openshift_portal_net: +#osm_host_subnet_length: + +openshift_ip: "{{ ands_openshift_network | ipaddr(ands_host_id) | ipaddr('address') }}" +openshift_public_ip: "{{ ands_openshift_public_network | ipaddr(ands_host_id) | ipaddr('address') }}" +openshift_hostname: "{{ ansible_hostname }}" +openshift_public_hostname: "{{ ansible_hostname }}.{{ ands_cluster_domain }}" +#openshift_hostname: "{{ ands_openshift_network | ipaddr(ands_host_id) | ipaddr('address') }}" +#openshift_public_hostname: "{{ ands_openshift_public_network | ipaddr(ands_host_id) | ipaddr('address') }}" + #Check configuration to fight dynamic IPs openshift_dns_ip: "{{ ands_ipfailover_vips[0] | ipaddr('address') }}" openshift_set_hostname: true openshift_set_node_ip: true -#Recommended to avoid: No package matching 'origin-docker-excluder-1.5.0*' found available -enable_excluders: false -enable_docker_excluder: false - -### Configuration -osm_use_cockpit: true -osm_cockpit_plugins: ['cockpit-kubernetes'] -osn_storage_plugin_deps: ['ceph', 'glusterfs'] -openshift_hosted_metrics_deploy: true -ppenshift_hosted_metrics_storage_kind: dynamic -#openshift_use_manageiq: true -openshift_install_examples: true - -# Required for IPFailover -openshift_clock_enabled: true +### Node configuration +openshift_schedulable: true +openshift_node_labels: "{{ ands_openshift_labels }}" +#openshift_hosted_infra_selector: "region=infra" # Fine tunning openshift_master_pod_eviction_timeout: 30s + ### Authentication openshift_master_identity_providers: [{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}] openshift_master_htpasswd_users: {'pdv': '$apr1$ACvj6uUa$Nm1Vq8hZq3RzTtaYpAHv01', 'csa': '$apr1$IqEwdnzy$UAdd8ZSFnXommBbj29w3c0'} -# Network & DNS configuration -openshift_master_cluster_hostname: "{{ ands_openshift_lb }}" -#openshift_master_cluster_public_hostname: "{{ ands_openshift_lb }}" -openshift_master_default_subdomain: "{{ ands_openshift_subdomain }}" -openshift_master_ingress_ip_network_cidr: "{{ ands_openshift_ingress_network }}" -# Node configuration -openshift_schedulable: true +### Certificates & Security +openshift_ca_cert_expire_days: 3650 +openshift_hosted_registry_cert_expire_days: 3650 +openshift_node_cert_expire_days: 3650 +openshift_master_cert_expire_days: 3650 +etcd_ca_default_days: 3650 -openshift_ip: "{{ ands_openshift_network | ipaddr(ands_host_id) | ipaddr('address') }}" -openshift_hostname: "{{ ansible_hostname }}" -openshift_public_hostname: "{{ ansible_hostname }}.{{ ands_cluster_domain }}" +### Dynamic Storage +openshift_storage_glusterfs_image: chsa/gluster-centos +openshift_storage_glusterfs_version: "{{ glusterfs_version }}" -#openshift_public_ip: "{{ ands_openshift_network | ipaddr(ands_host_id) | ipaddr('address') }}" -#openshift_hostname: "{{ ands_openshift_network | ipaddr(ands_host_id) | ipaddr('address') }}" -#openshift_public_hostname: "{{ ands_openshift_public_network | ipaddr(ands_host_id) | ipaddr('address') }}" -openshift_node_labels: "{{ ands_openshift_labels }}" +#openshift_storage_glusterfs_version: '3.12.5' # Latest 3.10.1 +#openshift_storage_glusterfs_is_native: True + +#openshift_storage_glusterfs_is_native: False +#openshift_storage_glusterfs_is_missing: False +#openshift_storage_glusterfs_heketi_is_native: True + +#openshift_storage_glusterfs_heketi_url: +#openshift_storage_glusterfs_heketi_is_missing: False +#openshift_storage_glusterfs_heketi_executor: 'ssh' +#openshift_storage_glusterfs_heketi_ssh_port: 22 +#openshift_storage_glusterfs_heketi_ssh_user: 'root' +#openshift_storage_glusterfs_heketi_ssh_keyfile: "{{ omit }}" + +### Modules & Configuration +openshift_master_dynamic_provisioning_enabled: true +#openshift_metrics_install_metrics: false +openshift_metrics_install_metrics: true +openshift_metrics_storage_kind: dynamic +openshift_metrics_cassanda_pvc_storage_class_name: glusterfs-storage +#openshift_metrics_storage_volume_size: +#openshift_metrics_hawkular_hostname: https://hawkular-metrics.{{openshift_master_default_subdomain}}/hawkular/metrics + +#Problematic and resource intensive +#openshift_logging_install_logging: true +#openshift_logging_storage_kind: dynamic +#openshift_logging_es_pvc_storage_class_name: glusterfs-storage # Does not work +#openshift_logging_es_pvc_size: 1Gi +#openshift_master_logging_public_url: + +#Catalog services +#openshift_enable_service_catalog: false +#ansible_service_broker_install: false +#openshift_hosted_etcd_storage_kind +#openshift_hosted_etcd_storage_volume_size + +openshift_install_examples: true + +# Required for IPFailover +openshift_clock_enabled: true + diff --git a/group_vars/all.yml b/group_vars/all.yml index f29206f..aef2251 100644 --- a/group_vars/all.yml +++ b/group_vars/all.yml @@ -1,3 +1,4 @@ ansible_ssh_user: root -glusterfs_version: 39 +ansible_ssh_private_key_file: /home/csa/.ssh/id_dsa +glusterfs_version: 312 diff --git a/group_vars/ands.yml b/group_vars/ands.yml index 58570c6..d3121c1 100644 --- a/group_vars/ands.yml +++ b/group_vars/ands.yml @@ -2,4 +2,4 @@ #ands_master_id: "{{ ('masters' in group_names) | ternary(groups.masters.index(('masters' in group_names) | ternary(inventory_hostname, groups.masters[0])), -1) }}" ands_storage_hostname: "{{ ands_storage_network | default(false) | ternary(ands_storage_network | default('') | ipaddr(ands_host_id) | ipaddr('address'), ansible_fqdn) }}" - +ands_configure_heketi: false diff --git a/group_vars/glusterfs.yml b/group_vars/glusterfs.yml new file mode 100644 index 0000000..0593d31 --- /dev/null +++ b/group_vars/glusterfs.yml @@ -0,0 +1,6 @@ +# This should be here, the variables from the role are not propogated to hostvars + +glusterfs_ip: "{{ ands_storage_network | default(false) | ternary(ands_storage_network | default('') | ipaddr(ands_host_id) | ipaddr('address'), openshift_ip) }}" +#glusterfs_devices: "[ {{ ands_heketi_dev }} ]" + +glusterfs_devices: [ "/dev/mapper/ands-ands_heketi" ] diff --git a/group_vars/staging.yml b/group_vars/staging.yml index 72683ff..b35440a 100644 --- a/group_vars/staging.yml +++ b/group_vars/staging.yml @@ -5,7 +5,7 @@ ands_cluster_domain: ipe.kit.edu #ands_openshift_subdomain: openshift.ipe.kit.edu ands_openshift_lb: openshift.suren.me ands_openshift_subdomain: openshift.suren.me -ands_openshift_network: 192.168.212.0/24 +ands_openshift_network: 192.168.213.0/24 ands_openshift_public_network: 192.168.226.0/24 ands_openshift_ingress_network: 192.168.216.0/24 @@ -21,7 +21,5 @@ vagrant_cpu_cores: 4 vagrant_mem_size: 16 vagrant_disk_size: 240 - -#ands_disable_dynamic_provisioning: true #ands_provision_without_dns: true ands_prefer_docker: true diff --git a/group_vars/testing.yml b/group_vars/testing.yml index ac7ee89..f0e4770 100644 --- a/group_vars/testing.yml +++ b/group_vars/testing.yml @@ -5,7 +5,7 @@ ands_openshift_lb: katrin.suren.me #ands_openshift_subdomain: katrin.suren.me ands_openshift_subdomain: apps.suren.me #ands_openshift_network: 192.168.26.0/24 -ands_openshift_network: 192.168.12.0/24 +ands_openshift_network: 192.168.13.0/24 ands_openshift_public_network: 192.168.26.0/24 ands_openshift_ingress_network: 192.168.16.0/24 @@ -18,3 +18,5 @@ katrin_openvpn_network: "192.168.0.0/16" vagrant_hostname_template: ipekatrin +#ands_provision_without_dns: true +ands_prefer_docker: true diff --git a/inventories/staging.erb b/inventories/staging.erb index 5779304..dc3bcb2 100644 --- a/inventories/staging.erb +++ b/inventories/staging.erb @@ -4,20 +4,30 @@ [simple_storage_nodes] 192.168.226.[3:3] +[external_storage_servers] +#192.168.226.[4:4] + [simple_nodes] [staging:children] nodes +external_storage_servers vagrant [virtual:children] nodes +external_storage_servers + [OSEv3:children] masters nodes etcd +[glusterfs:children] +masters +simple_storage_nodes + [etcd:children] masters simple_storage_nodes @@ -31,11 +41,22 @@ masters simple_storage_nodes simple_nodes + +#[lb] +#master1.example.com +#[nfs] +#master1.example.com +#[glusterfs_registry] +#192.168.10.14 glusterfs_ip=192.168.10.14 glusterfs_devices='[ "/dev/xvdc", "/dev/xvdd" ]' + + [ands_servers:children] nodes +external_storage_servers [ands_storage_servers:children] storage_nodes +external_storage_servers [ands:children] ands_servers diff --git a/inventories/testing.erb b/inventories/testing.erb index 0f6e2c5..b8b5f48 100644 --- a/inventories/testing.erb +++ b/inventories/testing.erb @@ -5,22 +5,31 @@ ipekatrin[1:2].katrin.kit.edu ipekatrin[3:3].katrin.kit.edu #ipetest.katrin.kit.edu ands_host_id=5 +[external_storage_servers] +#ipekatrin[4:4].katrin.kit.edu + [simple_nodes] #ipekatrin[3:3].katrin.kit.edu #strnage_name.katrin.kit.edu ands_host_id=1 [testing:children] nodes +external_storage_servers vagrant [virtual:children] nodes +external_storage_servers [OSEv3:children] masters nodes etcd +[glusterfs:children] +masters +simple_storage_nodes + [etcd:children] masters simple_storage_nodes @@ -36,9 +45,11 @@ simple_nodes [ands_servers:children] nodes +external_storage_servers [ands_storage_servers:children] storage_nodes +external_storage_servers [ands:children] ands_servers @@ -19,6 +19,7 @@ Actions: local - configure local ssh client prepare - perform all required pre-configuration before setting openshift openshift - setup OpenShift cluster + gluster - configure gluster software and volumes configure - configures OpenShift cluster (Storage, Users, OpenVPN tunnels) projects - installs configuration files and OpenShift resources for KaaS and other configured projects @@ -39,7 +40,7 @@ Actions: Tasks: hostnames, users, ssh, storage, heketi Custom actions - playbook.yml - execute the specified playbook + playbook.yml - execute the specified playbook (after ands_facts) role - generates temporary playbook and executes the role END @@ -57,7 +58,22 @@ apply() { shift 1 if [ -f "$action" ]; then - playbook=$action + if [[ "$action" == anslib/* ]]; then + echo "Executing a library playbook '$action'" + cat <<END > playbooks/tmp_play.yml +- name: Common setup procedures + hosts: $group + remote_user: root + roles: + - ands_facts + +- import_playbook: ../$action +END + playbook="playbooks/tmp_play.yml" + clean="playbooks/tmp_play.*" + else + playbook=$action + fi elif [ -d "roles/$action" ]; then role=$action else @@ -80,7 +96,8 @@ apply() { END fi - ansible-playbook --vault-password-file .vault-pass -i $inventory $playbook $@ +# ansible-playbook -vvv --vault-password-file .vault-pass -i $inventory $playbook $@ + ansible-playbook --vault-password-file .vault-pass -i $inventory $playbook $@ if [ -n "$clean" ]; then rm -rf "$clean" diff --git a/playbooks/ands-gluster.yml b/playbooks/ands-gluster.yml new file mode 100644 index 0000000..8aa30fc --- /dev/null +++ b/playbooks/ands-gluster.yml @@ -0,0 +1,15 @@ +- name: Common setup procedures + hosts: ands + roles: + - role: ands_facts + + +- name: Configure GlusterFS cluster + hosts: ands_servers + roles: + - role: glusterfs + vars: + glusterfs_network: "{{ ands_storage_network }}" + glusterfs_servers: "{{ ands_storage_servers }}" + glusterfs_bricks_path: "{{ ands_data_path }}/glusterfs" + glusterfs_domains: "{{ ands_storage_domains }}" diff --git a/playbooks/ands-prepare.yml b/playbooks/ands-prepare.yml index 9041563..b3a025b 100644 --- a/playbooks/ands-prepare.yml +++ b/playbooks/ands-prepare.yml @@ -36,12 +36,12 @@ vars: docker_exclude_vgs: "{{ [ ands_data_vg ] }}" -- name: Configure GlusterFS cluster - hosts: ands_servers - roles: - - role: glusterfs - vars: - glusterfs_network: "{{ ands_storage_network }}" - glusterfs_servers: "{{ ands_storage_servers }}" - glusterfs_bricks_path: "{{ ands_data_path }}/glusterfs" - glusterfs_domains: "{{ ands_storage_domains }}" +#- name: Configure GlusterFS cluster +# hosts: ands_servers +# roles: +# - role: glusterfs +# vars: +# glusterfs_network: "{{ ands_storage_network }}" +# glusterfs_servers: "{{ ands_storage_servers }}" +# glusterfs_bricks_path: "{{ ands_data_path }}/glusterfs" +# glusterfs_domains: "{{ ands_storage_domains }}" diff --git a/playbooks/ands-vm-conf.yml b/playbooks/ands-vm-conf.yml new file mode 100644 index 0000000..2b1020b --- /dev/null +++ b/playbooks/ands-vm-conf.yml @@ -0,0 +1,5 @@ +- name: Common setup procedures + hosts: ands_servers + roles: + - role: ands_vagrant_vmconf + diff --git a/playbooks/ands-vm-setup.yml b/playbooks/ands-vm-setup.yml new file mode 100644 index 0000000..d97916d --- /dev/null +++ b/playbooks/ands-vm-setup.yml @@ -0,0 +1,5 @@ +- name: Common setup procedures + hosts: vagrant + roles: + - role: ands_vagrant_vm + diff --git a/playbooks/openshift-deploy-cluster.yml b/playbooks/openshift-deploy-cluster.yml new file mode 120000 index 0000000..2a18fca --- /dev/null +++ b/playbooks/openshift-deploy-cluster.yml @@ -0,0 +1 @@ +../anslib/openshift-ansible/playbooks/deploy_cluster.yml
\ No newline at end of file diff --git a/playbooks/openshift-install.yml b/playbooks/openshift-install.yml index edbd2d8..f3a81ea 100644 --- a/playbooks/openshift-install.yml +++ b/playbooks/openshift-install.yml @@ -12,8 +12,12 @@ vars: node_id: "{{ hostvars[groups['masters'][0]]['ands_host_id'] }}" +- import_playbook: ../anslib/openshift-ansible/playbooks/prerequisites.yml -- include: ../anslib/openshift-ansible/playbooks/byo/config.yml +#- include: ../anslib/openshift-ansible/playbooks/byo/config.yml +#- include: ../anslib/openshift-ansible/playbooks/deploy_cluster.yml +- import_playbook: ../anslib/openshift-ansible/playbooks/deploy_cluster.yml +#- import_playbook: openshift-deploy-cluster.yml - name: Remove temporary entries in /etc/hosts hosts: nodes:!masters diff --git a/playbooks/openshift-redeploy-certificates.yml b/playbooks/openshift-redeploy-certificates.yml index ebc5150..f812372 120000 --- a/playbooks/openshift-redeploy-certificates.yml +++ b/playbooks/openshift-redeploy-certificates.yml @@ -1 +1 @@ -../anslib/openshift-ansible/playbooks/byo/openshift-cluster/redeploy-certificates.yml
\ No newline at end of file +../anslib/openshift-ansible/playbooks/redeploy-certificates.yml
\ No newline at end of file diff --git a/playbooks/openshift-setup-projects.yml b/playbooks/openshift-setup-projects.yml index 46d6767..a8af9c1 100644 --- a/playbooks/openshift-setup-projects.yml +++ b/playbooks/openshift-setup-projects.yml @@ -15,6 +15,7 @@ hosts: masters roles: - { role: ands_openshift, subrole: users } + - { role: ands_openshift, subrole: storage } - { role: ands_kaas } vars: kaas_projects: "{{ ands_openshift_projects.keys() }}" diff --git a/roles/ands_facts/defaults/main.yml b/roles/ands_facts/defaults/main.yml index 257685d..ac61876 100644 --- a/roles/ands_facts/defaults/main.yml +++ b/roles/ands_facts/defaults/main.yml @@ -1,11 +1,11 @@ +ands_configure_heketi: false ands_data_device_default_threshold: 10 -ands_empty_lv: { 'vg': '' } +ands_storage_servers: "{{ groups.ands_storage_servers | map('extract', hostvars, 'ands_storage_hostname') | list }}" +#openshift_storage_nodes: "{{ groups.storage_nodes | map('extract', hostvars, 'ands_storage_hostname') | list }}" +ands_empty_lv: { 'vg': '' } ands_data_lv: "ands_data" ands_data_vg: "{{ ( ansible_lvm['lvs'][ands_data_lv] | default(ands_empty_lv) )['vg'] }}" ands_heketi_lv: "ands_heketi" ands_heketi_vg: "{{ ( ansible_lvm['lvs'][ands_heketi_lv] | default(ands_empty_lv) )['vg'] }}" - -ands_storage_servers: "{{ groups.ands_storage_servers | map('extract', hostvars, 'ands_storage_hostname') | list }}" -#openshift_storage_nodes: "{{ groups.storage_nodes | map('extract', hostvars, 'ands_storage_hostname') | list }}" diff --git a/roles/ands_facts/tasks/detect_data_path.yml b/roles/ands_facts/tasks/detect_data_path.yml index 0837e12..c509d1f 100644 --- a/roles/ands_facts/tasks/detect_data_path.yml +++ b/roles/ands_facts/tasks/detect_data_path.yml @@ -7,4 +7,4 @@ - not ands_data_path is defined - ansible_lvm.lvs[ands_data_lv] is defined - ansible_lvm.lvs[ands_data_lv].size_g > ( ands_data_device_threshold | default(ands_data_device_default_threshold) ) - - item.device == "/dev/mapper/{{ands_data_vg}}-{{ands_data_lv}}" + - item.device == ands_data_dev diff --git a/roles/ands_facts/tasks/main.yml b/roles/ands_facts/tasks/main.yml index 52cc5bc..cf995a0 100644 --- a/roles/ands_facts/tasks/main.yml +++ b/roles/ands_facts/tasks/main.yml @@ -1,11 +1,11 @@ - include_vars: dir="vars" -- include: detect_data_path.yml - when: not ands_data_path is defined - - name: Detect Heketi set_fact: ands_storage_domains="{{ ands_storage_domains | union([ands_heketi_domain]) }}" - when: ansible_lvm.lvs[ands_heketi_lv] is defined + when: + - ands_configure_heketi + - ands_heketi_domain is defined + - ansible_lvm.lvs[ands_heketi_lv] is defined - name: Set some facts set_fact: @@ -31,5 +31,29 @@ ands_heketi_lv: "{{ ands_heketi_lv }}" when: ands_heketi_lv != "" +- name: Set some facts + set_fact: + ands_data_dev: "/dev/mapper/{{ands_data_vg}}-{{ands_data_lv}}" + when: + - ands_data_vg != "" + - ands_data_lv != "" + +- name: set some facts + set_fact: + ands_heketi_dev: "/dev/mapper/{{ands_heketi_vg}}-{{ands_heketi_lv}}" + when: + - ands_heketi_vg != "" + - ands_heketi_lv != "" + +- name: set some facts + set_fact: + glusterfs_devices: [ "{{ ands_heketi_dev }}" ] + when: + - ands_heketi_vg != "" + - ands_heketi_lv != "" + +- include_tasks: detect_data_path.yml + when: not ands_data_path is defined + #- command: yum-complete-transaction --cleanup-only diff --git a/roles/ands_kaas/tasks/do_project.yml b/roles/ands_kaas/tasks/do_project.yml new file mode 100644 index 0000000..61b91d2 --- /dev/null +++ b/roles/ands_kaas/tasks/do_project.yml @@ -0,0 +1,62 @@ +--- +- name: Ensure OpenShift template directory exists + file: path="{{ kaas_template_path }}" state="directory" mode=0755 owner=root group=root + +- name: Configure KaaS volumes + include_tasks: volume.yml + run_once: true +# delegate_to: "{{ groups.masters[0] }}" + with_dict: "{{ kaas_project_config.volumes | default(kaas_openshift_volumes) }}" + loop_control: + loop_var: osv + vars: + query: "[*].volumes.{{osv.value.volume}}.mount" + mntpath: "{{ (ands_storage_domains | json_query(query)) }}" + path: "{{ mntpath[0] ~ (osv.value.path | default('')) }}" + name: "{{osv.key}}" + volume: "{{osv.value}}" + when: ( mntpath | length ) > 0 + +- name: Check if static configuration exists + local_action: stat path="{{ kaas_project_path }}/files/" + register: result + +- name: Search static configuration + include_tasks: search.yml + when: result.stat.exists + +- name: Configure KaaS files + include_tasks: file.yml + run_once: true +# delegate_to: "{{ groups.masters[0] }}" + with_items: "{{ kaas_project_config.files | default(ands_openshift_files) }}" + loop_control: + loop_var: file + vars: + pvar: "kaas_{{ file.osv }}_path" + path: "{{ hostvars[inventory_hostname][pvar] }}/{{ file.path }}" + when: file.osv in ( kaas_project_config.volumes | default(kaas_openshift_volumes) ) + +- name: Load OpenSSL keys + include_tasks: keys.yml +# delegate_to: "{{ groups.masters[0] }}" + run_once: true + with_dict: "{{ kaas_project_config.pods }}" + loop_control: + loop_var: pod + +- name: "Run OC script" + include_tasks: ocscript.yml +# delegate_to: "{{ groups.masters[0] }}" + run_once: true + when: kaas_project_config.oc is defined + +- name: "Configure all templates" + include_tasks: templates.yml +# delegate_to: "{{ groups.masters[0] }}" + run_once: true + when: + - kaas_project_config.oc is undefined + - kaas_project_config.pods != {} + + diff --git a/roles/ands_kaas/tasks/main.yml b/roles/ands_kaas/tasks/main.yml index c9fb857..0931f80 100644 --- a/roles/ands_kaas/tasks/main.yml +++ b/roles/ands_kaas/tasks/main.yml @@ -1,9 +1,9 @@ --- - name: Provision OpenShift resources & configurations -# include: only_templates.yml - include: project.yml +# include_tasks: only_templates.yml + include_tasks: project.yml run_once: true - delegate_to: "{{ groups.masters[0] }}" +# delegate_to: "{{ groups.masters[0] }}" with_items: "{{ kaas_projects }}" loop_control: loop_var: kaas_project diff --git a/roles/ands_kaas/tasks/ocitem.yml b/roles/ands_kaas/tasks/ocitem.yml index f21e8cd..addb249 100644 --- a/roles/ands_kaas/tasks/ocitem.yml +++ b/roles/ands_kaas/tasks/ocitem.yml @@ -1,13 +1,13 @@ --- - name: OpenShift templates - include: templates.yml + include_tasks: templates.yml run_once: true vars: kaas_template_glob: "{{ ocitem.template }}" when: ocitem.template is defined - name: OpenShift commands - include: oc.yml - delegate_to: "{{ groups.masters[0] }}" + include_tasks: oc.yml +# delegate_to: "{{ groups.masters[0] }}" run_once: true when: ocitem.oc is defined diff --git a/roles/ands_kaas/tasks/ocscript.yml b/roles/ands_kaas/tasks/ocscript.yml index 4927de4..6890f43 100644 --- a/roles/ands_kaas/tasks/ocscript.yml +++ b/roles/ands_kaas/tasks/ocscript.yml @@ -1,6 +1,6 @@ --- -- include: ocitem.yml - delegate_to: "{{ groups.masters[0] }}" +- include_tasks: ocitem.yml +# delegate_to: "{{ groups.masters[0] }}" run_once: true with_items: "{{ kaas_project_config.oc }}" loop_control: diff --git a/roles/ands_kaas/tasks/project.yml b/roles/ands_kaas/tasks/project.yml index 002596b..40b5180 100644 --- a/roles/ands_kaas/tasks/project.yml +++ b/roles/ands_kaas/tasks/project.yml @@ -4,73 +4,24 @@ when: "'{{kaas_project_path}}/vars/globals.yml' | is_file" - name: Load variables - include_vars: dir="{{kaas_project_path}}/vars" name="kaas_project_config" + include_vars: dir="{{kaas_project_path}}/vars" name="var_{{kaas_project}}_config" when: "'{{kaas_project_path}}/vars' | is_dir" -- name: Ensure OpenShift template directory exists - file: path="{{ kaas_template_path }}" state="directory" mode=0755 owner=root group=root - -- name: Configure KaaS volumes - include: volume.yml - run_once: true - delegate_to: "{{ groups.masters[0] }}" - with_dict: "{{ kaas_project_config.volumes | default(kaas_openshift_volumes) }}" - loop_control: - loop_var: osv - vars: - query: "[*].volumes.{{osv.value.volume}}.mount" - mntpath: "{{ (ands_storage_domains | json_query(query)) }}" - path: "{{ mntpath[0] ~ (osv.value.path | default('')) }}" - name: "{{osv.key}}" - volume: "{{osv.value}}" - when: ( mntpath | length ) > 0 - -- name: Copy static configuration - include: sync_all.yml - run_once: true - delegate_to: "{{ groups.masters[0] }}" - with_items: "{{ lookup('pipe', search).split('\n') }}" - loop_control: - loop_var: osv_path +- set_fact: "var_{{kaas_project}}_config={{var_empty}}" vars: - search: "find {{ kaas_project_path }}/files/ -type d -mindepth 1 -maxdepth 1" - osv: "{{ osv_path | basename }}" - pvar: "kaas_{{ osv }}_path" - local_path: "{{ osv_path }}" - remote_path: "{{ hostvars[inventory_hostname][pvar] }}" - when: - - osv in kaas_openshift_volumes - - hostvars[inventory_hostname][pvar] is defined - -- name: Configure KaaS files - include: file.yml - run_once: true - delegate_to: "{{ groups.masters[0] }}" - with_items: "{{ kaas_project_config.files | default(ands_openshift_files) }}" - loop_control: - loop_var: file + var_empty: + pods: {} + var_name: "var_{{kaas_project}}_config" + when: hostvars[inventory_hostname][var_name] is not defined + +#- debug: msg="{{kaas_project_path}}" +#- debug: +# msg="{{kaas_project_config}}" +# vars: +# var_name: "var_{{kaas_project}}_config" +# kaas_project_config: "{{hostvars[inventory_hostname][var_name]}}" + +- include_tasks: do_project.yml vars: - pvar: "kaas_{{ file.osv }}_path" - path: "{{ hostvars[inventory_hostname][pvar] }}/{{ file.path }}" - when: file.osv in ( kaas_project_config.volumes | default(kaas_openshift_volumes) ) - -- name: Load OpenSSL keys - include: keys.yml - delegate_to: "{{ groups.masters[0] }}" - run_once: true - with_dict: "{{ kaas_project_config.pods }}" - loop_control: - loop_var: pod - -- name: "Run OC script" - include: ocscript.yml - delegate_to: "{{ groups.masters[0] }}" - run_once: true - when: kaas_project_config.oc is defined - -- name: "Configure all templates" - include: templates.yml - delegate_to: "{{ groups.masters[0] }}" - run_once: true - when: kaas_project_config.oc is undefined - + var_name: "var_{{kaas_project}}_config" + kaas_project_config: "{{hostvars[inventory_hostname][var_name]}}" diff --git a/roles/ands_kaas/tasks/search.yml b/roles/ands_kaas/tasks/search.yml new file mode 100644 index 0000000..e54c42b --- /dev/null +++ b/roles/ands_kaas/tasks/search.yml @@ -0,0 +1,16 @@ +- name: Copy static configuration + include_tasks: sync_all.yml + run_once: true +# delegate_to: "{{ groups.masters[0] }}" + with_items: "{{ lookup('pipe', search).split('\n') }}" + loop_control: + loop_var: osv_path + vars: + search: "find {{ kaas_project_path }}/files/ -type d -mindepth 1 -maxdepth 1" + osv: "{{ osv_path | basename }}" + pvar: "kaas_{{ osv }}_path" + local_path: "{{ osv_path }}" + remote_path: "{{ hostvars[inventory_hostname][pvar] }}" + when: + - osv in kaas_openshift_volumes + - hostvars[inventory_hostname][pvar] is defined diff --git a/roles/ands_kaas/tasks/sync.yml b/roles/ands_kaas/tasks/sync.yml index 399cb66..07764ca 100644 --- a/roles/ands_kaas/tasks/sync.yml +++ b/roles/ands_kaas/tasks/sync.yml @@ -4,5 +4,5 @@ register: result - name: "Sync '{{ item_name }}'" - synchronize: src="{{ item_src }}" dest="{{ remote_path }}/" archive=yes + local_action: synchronize src="{{ item_src }}" dest="{{ remote_path }}/" archive=yes when: (result.stat.exists == False) or (kaas_resync | default(false)) diff --git a/roles/ands_kaas/tasks/sync_all.yml b/roles/ands_kaas/tasks/sync_all.yml index 58a1710..2c7bae1 100644 --- a/roles/ands_kaas/tasks/sync_all.yml +++ b/roles/ands_kaas/tasks/sync_all.yml @@ -2,7 +2,7 @@ - name: "Analyze '{{ local_path | basename }}'" # debug: msg="{{ local_path }} - {{ item_name }} - {{ item }}" - include: sync.yml + include_tasks: sync.yml run_once: true with_items: "{{ lookup('pipe', filesearch).split('\n') }}" vars: diff --git a/roles/ands_kaas/tasks/templates.yml b/roles/ands_kaas/tasks/templates.yml index 75d43f3..e1612bc 100644 --- a/roles/ands_kaas/tasks/templates.yml +++ b/roles/ands_kaas/tasks/templates.yml @@ -8,13 +8,15 @@ - "{{ role_path }}/templates/{{ kaas_template_glob | default('*') }}.j2" - "{{ kaas_project_path }}/templates/{{ kaas_template_glob | default('*') }}.j2" +#- debug: msg="{{ results }}" + - name: "Sort and execute KaaS templates" - include: "template.yml" - delegate_to: "{{ groups.masters[0] }}" + include_tasks: "template.yml" +# delegate_to: "{{ groups.masters[0] }}" run_once: true with_items: "{{ sorted_tmpl }}" vars: - sorted_tmpl: "{{ results | json_query('results[*].stdout_lines') | sum(start=[]) | map('basename') | sort | unique }}" + sorted_tmpl: "{{ (results.results[0] is defined) | ternary (results | json_query('results[*].stdout_lines') | sum(start=[]) | map('basename') | sort | unique, []) }}" loop_control: loop_var: tmpl_name diff --git a/roles/ands_openshift/defaults/main.yml b/roles/ands_openshift/defaults/main.yml index 857c389..e473b98 100644 --- a/roles/ands_openshift/defaults/main.yml +++ b/roles/ands_openshift/defaults/main.yml @@ -1,8 +1,10 @@ -openshift_all_subroles: "{{ [ 'hostnames', 'users', 'ssh', 'storage', 'heketi' ] }}" +openshift_common_subroles: "{{ [ 'hostnames', 'users', 'storage' ] }}" +openshift_heketi_subroles: "{{ [ 'ssh', 'heketi' ] }}" +openshift_all_subroles: "{{ ands_configure_heketi | default(False) | ternary(openshift_common_subroles + openshift_heketi_subroles, openshift_common_subroles) }}" + openshift_subroles: "{{ ( subrole is defined ) | ternary( [ subrole ], openshift_all_subroles ) }}" openshift_namespace: "default" -ands_disable_dynamic_provisioning: false ssh_template_path: "{{ ands_paths.provision }}/ssh/" storage_template_path: "{{ ands_paths.provision }}/gfs/" diff --git a/roles/ands_openshift/tasks/heketi.yml b/roles/ands_openshift/tasks/heketi.yml index 149f85d..7d2c4ed 100644 --- a/roles/ands_openshift/tasks/heketi.yml +++ b/roles/ands_openshift/tasks/heketi.yml @@ -5,7 +5,7 @@ with_items: - heketi-client - - include: heketi_resources.yml + - include_tasks: heketi_resources.yml run_once: true delegate_to: "{{ groups.masters[0] }}" when: ansible_lvm.lvs.{{ ands_heketi_lv }} is defined diff --git a/roles/ands_openshift/tasks/heketi_resources.yml b/roles/ands_openshift/tasks/heketi_resources.yml index 06ae6b3..4abb411 100644 --- a/roles/ands_openshift/tasks/heketi_resources.yml +++ b/roles/ands_openshift/tasks/heketi_resources.yml @@ -37,7 +37,7 @@ changed_when: (result | failed) or ((result.stdout | int) < 1) - name: Fix GlusterFS volume permissions - include: heketi_perms.yml + include_tasks: heketi_perms.yml run_once: true delegate_to: "{{ groups.masters[0] }}" when: (result | changed) diff --git a/roles/ands_openshift/tasks/main.yml b/roles/ands_openshift/tasks/main.yml index f72123f..cd62a10 100644 --- a/roles/ands_openshift/tasks/main.yml +++ b/roles/ands_openshift/tasks/main.yml @@ -1,6 +1,6 @@ --- - name: "Configuring OpenShift" - include: "{{ current_subrole }}.yml" + include_tasks: "{{ current_subrole }}.yml" with_items: "{{ openshift_subroles }}" loop_control: loop_var: current_subrole diff --git a/roles/ands_openshift/tasks/ssh.yml b/roles/ands_openshift/tasks/ssh.yml index 7d8d99d..e8cb8d1 100644 --- a/roles/ands_openshift/tasks/ssh.yml +++ b/roles/ands_openshift/tasks/ssh.yml @@ -7,7 +7,7 @@ changed_when: (result | failed) failed_when: false -- include: ssh_keygen.yml +- include_tasks: ssh_keygen.yml run_once: true delegate_to: "{{ groups.masters[0] }}" when: (result | changed) diff --git a/roles/ands_openshift/tasks/storage.yml b/roles/ands_openshift/tasks/storage.yml index be2583a..66f0855 100644 --- a/roles/ands_openshift/tasks/storage.yml +++ b/roles/ands_openshift/tasks/storage.yml @@ -1,4 +1,4 @@ --- -- include: storage_resources.yml +- include_tasks: storage_resources.yml run_once: true delegate_to: "{{ groups.masters[0] }}" diff --git a/roles/ands_openshift/tasks/users.yml b/roles/ands_openshift/tasks/users.yml index c816203..a692a24 100644 --- a/roles/ands_openshift/tasks/users.yml +++ b/roles/ands_openshift/tasks/users.yml @@ -3,6 +3,6 @@ copy: src="users/htpasswd" dest="/etc/origin/master/htpasswd" mode=0644 owner=root group=root force=yes backup=no when: "'masters' in group_names" -- include: users_resources.yml +- include_tasks: users_resources.yml run_once: true delegate_to: "{{ groups.masters[0] }}" diff --git a/roles/ands_storage/tasks/main.yml b/roles/ands_storage/tasks/main.yml index a86babe..9318f88 100644 --- a/roles/ands_storage/tasks/main.yml +++ b/roles/ands_storage/tasks/main.yml @@ -5,7 +5,7 @@ ands_data_path: "{{ ands_data_path }}" - name: Analyze storage devices - include: detect_device.yml + include_tasks: detect_device.yml when: not ands_data_device is defined - name: Create Ands VG @@ -30,6 +30,8 @@ - name: Add Heketi to Storage Domains set_fact: ands_storage_domains="{{ ands_storage_domains | union([ands_heketi_domain]) }}" when: + - ands_configure_heketi + - ands_heketi_domain is defined - (ansible_lvm.lvs[ands_heketi_lv] is defined) or (ands_heketi_volume_size is defined) - heketi_stat_result.stat.exists == False diff --git a/roles/ands_vagrant_vm/README b/roles/ands_vagrant_vm/README index ca4f0d5..1c271b4 100644 --- a/roles/ands_vagrant_vm/README +++ b/roles/ands_vagrant_vm/README @@ -7,7 +7,7 @@ Parameters: - vagrant_project - The vagrant project name, just specifies a subdirectory with virtual machines to allow parallel execution of VMs for testing and staging setups - vagrant_projects_dir - Location of all vagrant projects - vagrant_project_dir - Location of this specific vagrant project, normally is vagrant_projects_dir/vagrant_project - - vagrant_hosts - Number of VMs to generate, defaults to number of configured ands_hosts (i.e. OpenShift nodes currently) + - vagrant_hosts - Number of VMs to generate, defaults to number of configured ands_servers (i.e. OpenShift nodes currently) - vagrant_cpu_cores - Number of CPU cores for each VM - vagrant_mem_size - Memory per VM in GB - vagrant_disk_size - Data disk size per VM in GB @@ -17,3 +17,8 @@ Facts: Actions: - Creates and starts VMs + + +ToDo: + - We need to install a couple of vagrant plugins (requires vbox 5.2) + vagrant plugin install vagrant-disksize diff --git a/roles/ands_vagrant_vm/defaults/main.yml b/roles/ands_vagrant_vm/defaults/main.yml index 93d92b6..8387ac2 100644 --- a/roles/ands_vagrant_vm/defaults/main.yml +++ b/roles/ands_vagrant_vm/defaults/main.yml @@ -5,4 +5,4 @@ vagrant_project_dir: "/home/vagrant/projects/{{vagrant_project}}" vagrant_disk_size: 60200 vagrant_mem_size: 16384 vagrant_cpu_cores: 4 -vagrant_hosts: {{ groups.ands_hosts | length }} +vagrant_hosts: "{{ groups.ands_servers | length }}" diff --git a/roles/ands_vagrant_vm/templates/Vagrantfile.j2 b/roles/ands_vagrant_vm/templates/Vagrantfile.j2 index 54128d4..b044e2e 100644 --- a/roles/ands_vagrant_vm/templates/Vagrantfile.j2 +++ b/roles/ands_vagrant_vm/templates/Vagrantfile.j2 @@ -2,25 +2,32 @@ # vi: set ft=ruby : {% set net = ands_openshift_network | ipaddr('network') | ipaddr(0) | regex_replace('\.\d+$', '') %} {% set storage_net = ands_storage_network | ipaddr('network') | ipaddr(0) | regex_replace('\.\d+$', '') %} +{% set public_net = ands_openshift_public_network | ipaddr('network') | ipaddr(0) | regex_replace('\.\d+$', '') %} {% set netid = ( net | regex_replace('^.*\.', '') ) %} {% set storage_netid = ( storage_net | regex_replace('^.*\.', '') ) %} +{% set public_netid = ( public_net | regex_replace('^.*\.', '') ) %} {% set macid = ( (netid | length) > 2 ) | ternary(netid, "0" ~ netid) %} Vagrant.configure("2") do |config| (1..{{ vagrant_hosts }}).each do |i| config.vm.define "{{ vagrant_hostname_template }}#{i}" do |node| - node.vm.network "public_network", bridge: "br0", mac: "080027{{ macid }}02#{i}", ip: "{{ net }}.#{i}" - node.vm.network "private_network", mac: "080027{{ macid }}12#{i}", ip: "{{ storage_net }}.#{i}", name: "vboxnet0" + node.vm.network "public_network", nm_controlled: "yes", bridge: "br0", mac: "080027{{ macid }}02#{i}", ip: "{{ public_net }}.#{i}", type: "dhcp" + node.vm.network "private_network", nm_controlled: "yes", mac: "080027{{ macid }}12#{i}", ip: "{{ storage_net }}.#{i}", name: "vboxnet0", type: "static" node.vm.box = "centos/7" + node.disksize.size = "80 GB" node.vm.hostname = "{{ vagrant_hostname_template }}#{i}.ipe.kit.edu" # node.vm.synced_folder "../data", "/root/data" # Configuring DHCP in 'vm.network' causes 2 DHCP clients (dhclinet & nm) running in parallel and getting 2 IPs. - node.vm.provision "shell", run: "always", inline: "( ip addr show | grep -v 141.52.64.15 | grep -v 141.52.64.17 | grep -v 141.52.64.28 | grep 141.52 ) || dhclient -cf /var/lib/NetworkManager/dhclient-eth0.conf eth1" - node.vm.provision "shell", run: "always", inline: "( ip addr show | grep {{ netid }}.#{i} ) || ip addr add 192.168.{{ netid }}.#{i}/24 dev eth1" - node.vm.provision "shell", run: "always", inline: "( ip addr show | grep {{ storage_netid }}.#{i} ) || ifcfg eth2 192.168.{{ storage_netid }}.#{i}" +# node.vm.provision "shell", run: "always", inline: "( ip addr show dev eth1 | grep -v 141.52.64.15 | grep -v 141.52.64.17 | grep -v 141.52.64.28 | grep 141.52 ) || dhclient -cf /var/lib/NetworkManager/dhclient-eth0.conf eth1" + node.vm.provision "shell", run: "always", inline: "( ip addr show dev eth1 | grep {{ public_netid }}.#{i} ) || ip addr add 192.168.{{ public_netid }}.#{i}/24 dev eth1" + + node.vm.provision "shell", run: "always", inline: "( ip addr show dev eth2 | grep {{ storage_netid }}.#{i} ) || ip addr add 192.168.{{ storage_netid }}.#{i}/24 dev eth2" + node.vm.provision "shell", run: "always", inline: "( ip addr show dev eth2 | grep {{ netid }}.#{i} ) || ip addr add 192.168.{{ netid }}.#{i}/24 dev eth2" node.vm.provision "shell", run: "always", inline: "chmod +r /etc/sysconfig/network-scripts/ifcfg-eth*" node.vm.provision "shell", run: "always", inline: "chcon --reference /etc/sysconfig/network-scripts/ifcfg-eth0 /etc/sysconfig/network-scripts/ifcfg-eth*" + + node.vm.provision "shell", run: "always", inline: "ip route del default dev eth0" node.vm.provision "shell" do |s| ssh_pub_key = File.readlines("authorized_keys").first.strip @@ -36,15 +43,20 @@ Vagrant.configure("2") do |config| #vb.gui = true vb.customize [ "modifyvm", :id, + "--natnet1", "192.168.23#{i}/24", # "--ostype", "Linux_64", "--audio", "none", ] - vb.customize [ + unless File.exist?("../disks/#{i}.vdi") + vb.customize [ 'createhd', '--filename', "../disks/#{i}", '--format', 'VDI', '--size', {{ 1024 * (vagrant_disk_size | int) }} - ] - vb.customize [ + ] + vb.customize [ 'storageattach', :id, '--storagectl', 'IDE Controller', '--port', 1, '--device', 0,'--type', 'hdd', '--medium', "../disks/#{i}.vdi" - ] +# Since VirtualBox 5.1 +# 'storageattach', :id, '--storagectl', 'IDE', '--port', 1, '--device', 0,'--type', 'hdd', '--medium', "../disks/#{i}.vdi" + ] + end end end end diff --git a/roles/ands_vagrant_vmconf/tasks/main.yml b/roles/ands_vagrant_vmconf/tasks/main.yml new file mode 100644 index 0000000..f52a52d --- /dev/null +++ b/roles/ands_vagrant_vmconf/tasks/main.yml @@ -0,0 +1,28 @@ + - name: Get partition info + parted: device="/dev/sda" unit="MiB" + register: partinfo + + - set_fact: parts="{{partinfo.partitions | length}}" + - set_fact: end="{{partinfo.partitions[(parts | int) - 1].end | round | int}}" + + - name: Add partition to the first disk + parted: device="/dev/sda" number=4 part_start="{{ end }}MiB" flags="lvm" state="present" + + - name: Resize Root VG + lvg: vg="VolGroup00" pvs="/dev/sda3,/dev/sda4" + + - name: Resize Root LV + lvol: vg="VolGroup00" lv="LogVol00" size="+100%FREE" + + - name: Resize FS + filesystem: dev="/dev/mapper/VolGroup00-LogVol00" fstype="xfs" resizefs="yes" + + - name: Create required directories + file: path="/etc/origin/node/" state="directory" + +# We just need networkmanager running +# - name: Bypass absent NM +# copy: remote_src="yes" src="/etc/resolv.conf" dest="/etc/origin/node/resolv.conf" + + - name: Update CentOS + yum: name=* state=latest update_cache=yes diff --git a/roles/common/tasks/main.yml b/roles/common/tasks/main.yml index 3f49a39..7f6922b 100644 --- a/roles/common/tasks/main.yml +++ b/roles/common/tasks/main.yml @@ -4,6 +4,9 @@ - epel-release - centos-release-openshift-origin +- name: Ensure GlusterFS repositories are present + yum: name="centos-release-gluster{{ glusterfs_version }}" state=present + # Seems we need iptables-services at least temporary... - name: Ensure all required packages are installed package: name={{item}} state=present @@ -16,7 +19,11 @@ - python-passlib - python2-ruamel-yaml - python2-jmespath + - python-ipaddress - iptables-services + - PyYAML + - python-rhsm-certificates + - glusterfs-fuse - name: Ensure all extra packages are installed package: name={{item}} state=present diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml index 1263cd2..e424e01 100644 --- a/roles/docker/tasks/main.yml +++ b/roles/docker/tasks/main.yml @@ -20,7 +20,7 @@ - debug: msg="{{ loop_device_check.stderr }}" when: loop_device_check.stderr -- include: storage.yml +- import_tasks: storage.yml when: loop_device_check.rc == 0 - name: extend the vg diff --git a/roles/glusterfs/files/gluster-link.service b/roles/glusterfs/files/gluster-link.service new file mode 100644 index 0000000..ddcea10 --- /dev/null +++ b/roles/glusterfs/files/gluster-link.service @@ -0,0 +1,8 @@ +[Unit] +After=origin-node.service + +[Service] +ExecStart=/usr/bin/ln -sf /run/glusterd/glusterd.socket /run/glusterd.socket + +[Install] +WantedBy=multi-user.target diff --git a/roles/glusterfs/tasks/common.yml b/roles/glusterfs/tasks/common.yml index 7675cb9..5e8e3b6 100644 --- a/roles/glusterfs/tasks/common.yml +++ b/roles/glusterfs/tasks/common.yml @@ -7,9 +7,7 @@ with_items: - glusterfs-cli - glusterfs-fuse - - glusterfs-libs - glusterfs-rdma - - glusterfs - libsemanage-python - name: Allow fuse in SELinux configuration diff --git a/roles/glusterfs/tasks/create_domain.yml b/roles/glusterfs/tasks/create_domain.yml index b3fc89e..8f8042b 100644 --- a/roles/glusterfs/tasks/create_domain.yml +++ b/roles/glusterfs/tasks/create_domain.yml @@ -1,6 +1,6 @@ --- - name: Configure volumes - include: create_volume.yml + include_tasks: create_volume.yml with_dict: "{{ domain.volumes }}" vars: domain_servers: "{{ groups[domain.servers] | map('extract', hostvars, 'ands_storage_hostname') | list }}" diff --git a/roles/glusterfs/tasks/create_volume.yml b/roles/glusterfs/tasks/create_volume.yml index 9b955b0..ca4f39a 100644 --- a/roles/glusterfs/tasks/create_volume.yml +++ b/roles/glusterfs/tasks/create_volume.yml @@ -1,4 +1,4 @@ --- -- include: "{{ volume.value.type }}/vols{{((domain_servers | length) < 4) | ternary((domain_servers | length), 3) }}.yml" +- include_tasks: "{{ volume.value.type }}/vols{{((domain_servers | length) < 4) | ternary((domain_servers | length), 3) }}.yml" vars: name: "{{ volume.key }}" diff --git a/roles/glusterfs/tasks/main.yml b/roles/glusterfs/tasks/main.yml index dbd1aad..d7ee766 100644 --- a/roles/glusterfs/tasks/main.yml +++ b/roles/glusterfs/tasks/main.yml @@ -1,13 +1,34 @@ --- -- include: common.yml +- name: Install GlusterFS Common Software + include_tasks: common.yml when: - "'software' in glusterfs_subroles" -- include: server.yml +- name: Install GlusterFS client + include_tasks: setup-client.yml + when: + - "'software' in glusterfs_subroles" + - "'ands_storage_servers' not in group_names" + +- name: Install GlusterFS OpenShift Server + include_tasks: setup-openshift-server.yml + when: + - "'software' in glusterfs_subroles" + - "'ands_storage_servers' in group_names" + - "'glusterfs' in group_names" + +- name: Install GlusterFS External Server + include_tasks: setup-external-server.yml when: - "'software' in glusterfs_subroles" - "'ands_storage_servers' in group_names" + - "'glusterfs' not in group_names" + +- name: Configure gluster peers (on first host) + shell: gluster peer probe {{item}} + run_once: true + with_items: "{{ glusterfs_servers }}" -- include: volumes.yml +- include_tasks: volumes.yml when: - "'volumes' in glusterfs_subroles" diff --git a/roles/glusterfs/tasks/mount_domain.yml b/roles/glusterfs/tasks/mount_domain.yml index 94b6677..355ed29 100644 --- a/roles/glusterfs/tasks/mount_domain.yml +++ b/roles/glusterfs/tasks/mount_domain.yml @@ -1,6 +1,6 @@ --- - name: Mount volumes - include: mount_volume.yml + include_tasks: mount_volume.yml with_dict: "{{ domain.volumes }}" vars: name: "{{ volume.key }}" diff --git a/roles/glusterfs/tasks/setup-client.yml b/roles/glusterfs/tasks/setup-client.yml new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/roles/glusterfs/tasks/setup-client.yml diff --git a/roles/glusterfs/tasks/server.yml b/roles/glusterfs/tasks/setup-external-server.yml index 328a8c5..cc0b2f1 100644 --- a/roles/glusterfs/tasks/server.yml +++ b/roles/glusterfs/tasks/setup-external-server.yml @@ -4,6 +4,7 @@ with_items: - glusterfs-server - glusterfs-rdma + - glusterfs - name: Ensure GlusterFS service is running service: name=glusterd state=started enabled=yes @@ -25,7 +26,3 @@ - name: Create folder for GlusterFS bricks file: dest="{{glusterfs_bricks_path}}" owner="root" group="root" mode="0755" state="directory" -- name: Configure gluster peers (on first host) - shell: gluster peer probe {{item}} - run_once: true - with_items: "{{ glusterfs_servers }}" diff --git a/roles/glusterfs/tasks/setup-openshift-server.yml b/roles/glusterfs/tasks/setup-openshift-server.yml new file mode 100644 index 0000000..20ebbf8 --- /dev/null +++ b/roles/glusterfs/tasks/setup-openshift-server.yml @@ -0,0 +1,9 @@ +--- +- name: Link control socket + file: src="/run/glusterd/glusterd.socket" dest="/run/glusterd.socket" state="link" + +- name: Copy systemd unit to recreate link on re-start + copy: src="gluster-link.service" dest="/etc/systemd/system/gluster-link.service" owner="root" group="root" mode="0644" + +- name: Enable systemd unit + systemd: enabled=true name=gluster-link daemon_reload=yes
\ No newline at end of file diff --git a/roles/glusterfs/tasks/volumes.yml b/roles/glusterfs/tasks/volumes.yml index e393c08..c4d49ac 100644 --- a/roles/glusterfs/tasks/volumes.yml +++ b/roles/glusterfs/tasks/volumes.yml @@ -1,5 +1,5 @@ - name: Configure volume domains - include: create_domain.yml + include_tasks: create_domain.yml run_once: true delegate_to: "{{ groups[domain.servers][0] }}" with_items: "{{ glusterfs_domains }}" @@ -7,7 +7,7 @@ loop_var: domain - name: Mount volume domains - include: mount_domain.yml + include_tasks: mount_domain.yml when: ( domain.clients | default("---") ) in group_names with_items: "{{ glusterfs_domains }}" loop_control: diff --git a/roles/openshift_resource/tasks/main.yml b/roles/openshift_resource/tasks/main.yml index 698efea..d44d2e0 100644 --- a/roles/openshift_resource/tasks/main.yml +++ b/roles/openshift_resource/tasks/main.yml @@ -13,10 +13,10 @@ set_fact: tmpl="{{ results.stdout | from_yaml }}" when: template.find(".json") == -1 - - include: template.yml + - include_tasks: template.yml when: tmpl.kind == "Template" - - include: resource.yml + - include_tasks: resource.yml when: tmpl.kind != "Template" run_once: true diff --git a/roles/openvpn/tasks/main.yml b/roles/openvpn/tasks/main.yml index df49976..11e9b94 100644 --- a/roles/openvpn/tasks/main.yml +++ b/roles/openvpn/tasks/main.yml @@ -19,14 +19,14 @@ register: result - name: setup openvpn keys - include: keys.yml + include_tasks: keys.yml when: result.stat.exists == False - name: Ensure CA key is removed file: path="{{openvpn_keydir}}/ca.key" state=absent - name: setup openvpn configuration - include: config.yml + include_tasks: config.yml - name: Ensure OpenVPN service is enabled service: name="{{openvpn_service}}" enabled=yes @@ -5,16 +5,20 @@ case "${1}" in all) - ./setup.sh prepare - ./setup.sh openshift - ./setup.sh configure - ./setup.sh projects + ./setup.sh -i $inventory prepare || exit 1 + ./setup.sh -i $inventory openshift || exit 1 + ./setup.sh -i $inventory gluster || exit 1 + ./setup.sh -i $inventory configure || exit 1 + ./setup.sh -i $inventory projects || exit 1 ;; local) apply playbooks/local.yml || exit 1 ;; vm) - apply playbooks/ands-setup-vm.yml || exit 1 + apply playbooks/ands-vm-setup.yml || exit 1 + ;; + vmconf) + apply playbooks/ands-vm-conf.yml || exit 1 ;; prepare) apply playbooks/ands-prepare.yml || exit 1 @@ -22,6 +26,9 @@ case "${1}" in openshift) apply playbooks/openshift-install.yml || exit 1 ;; + gluster) + apply playbooks/ands-gluster.yml || exit 1 + ;; configure) apply playbooks/openshift-setup.yml || exit 1 ;; @@ -34,6 +41,7 @@ case "${1}" in nodes) ./setup.sh prepare || exit 1 ./setup.sh openshift-nodes || exit 1 + ./setup.sh gluster || exit 1 ./setup.sh configure || exit 1 ;; users) diff --git a/setup/configs/openshift.yml b/setup/configs/openshift.yml index 5b23837..6b9995c 100644 --- a/setup/configs/openshift.yml +++ b/setup/configs/openshift.yml @@ -1,6 +1,8 @@ --- ands_openshift_projects: katrin: KArlsruhe TRItium Neutrino + adei: ADEI + ands_openshift_users: pdv: IPE Administation Account @@ -8,5 +10,5 @@ ands_openshift_users: csa: Suren A. Chilingaryan <csa@suren.me> ands_openshift_roles: - cluster-admin: csa, pdv, katrin + cluster-admin: csa, pdv katrin/admin: katrin diff --git a/setup/configs/volumes.yml b/setup/configs/volumes.yml index 134a887..d0ba063 100644 --- a/setup/configs/volumes.yml +++ b/setup/configs/volumes.yml @@ -11,7 +11,7 @@ ands_heketi_domain: ands_storage_domains: - servers: "ands_storage_servers" - clients: "ands_servers" + clients: "masters" volumes: provision: { type: "cfg", mount: "{{ ands_paths.provision }}" } - servers: "storage_nodes" |