diff options
41 files changed, 387 insertions, 81 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible index ceb11a9e9..231ecd118 100644 --- a/.tito/packages/openshift-ansible +++ b/.tito/packages/openshift-ansible @@ -1 +1 @@ -3.6.116-1 ./ +3.6.119-1 ./ diff --git a/openshift-ansible.spec b/openshift-ansible.spec index 0b9531269..a275199cf 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -9,7 +9,7 @@ %global __requires_exclude ^/usr/bin/ansible-playbook$ Name: openshift-ansible -Version: 3.6.116 +Version: 3.6.119 Release: 1%{?dist} Summary: Openshift and Atomic Enterprise Ansible License: ASL 2.0 @@ -280,6 +280,25 @@ Atomic OpenShift Utilities includes %changelog +* Tue Jun 20 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.119-1 +- Temporarilly only migrate jobs as we were before (sdodson@redhat.com) +- Disable TLS verification in skopeo inspect (rhcarvalho@gmail.com) +- Preserve etcd3 storage if it's already in use (sdodson@redhat.com) +- GlusterFS: Generate better secret keys (jarrpa@redhat.com) +- GlusterFS: Fix error when groups.glusterfs_registry is undefined. + (jarrpa@redhat.com) +- GlusterFS: Use proper identity in heketi secret (jarrpa@redhat.com) +- GlusterFS: Allow configuration of heketi port (jarrpa@redhat.com) +- GlusterFS: Fix variable typo (jarrpa@redhat.com) +- GlusterFS: Minor template fixes (jarrpa@redhat.com) +- registry: mount GlusterFS storage volume from correct host + (jarrpa@redhat.com) + +* Mon Jun 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.117-1 +- Run storage upgrade pre and post master upgrade (rteague@redhat.com) +- Introduce etcd migrate role (jchaloup@redhat.com) +- Add support for rhel, aci, vxlan (srampal@cisco.com) + * Sun Jun 18 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.116-1 - PAPR: define openshift_image_tag via command line (rhcarvalho@gmail.com) - Ensure only one ES pod per PV (peter.portante@redhat.com) diff --git a/playbooks/adhoc/contiv/delete_contiv.yml b/playbooks/adhoc/contiv/delete_contiv.yml index 91948c72e..eec6c23a7 100644 --- a/playbooks/adhoc/contiv/delete_contiv.yml +++ b/playbooks/adhoc/contiv/delete_contiv.yml @@ -1,5 +1,5 @@ --- -- name: delete contiv +- name: Uninstall contiv hosts: all gather_facts: False tasks: diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index c18c49d7b..6738ce11f 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -11,7 +11,7 @@ - name: Upgrade job storage command: > {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig - migrate storage --confirm + migrate storage --include=jobs --confirm # If facts cache were for some reason deleted, this fact may not be set, and if not set # it will always default to true. This causes problems for the etcd data dir fact detection @@ -149,7 +149,7 @@ - name: Upgrade job storage command: > {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig - migrate storage --confirm + migrate storage --include=jobs --confirm ############################################################################## # Gate on master update complete diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index 429460b2c..70108fb7a 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -27,7 +27,17 @@ - name: Set clean install fact set_fact: - l_clean_install: "{{ not master_config_stat.stat.exists }}" + l_clean_install: "{{ not master_config_stat.stat.exists | bool }}" + + - name: Determine if etcd3 storage is in use + command: grep -Pzo "storage-backend:\n.*etcd3" /etc/origin/master/master-config.yaml -q + register: etcd3_grep + failed_when: false + changed_when: false + + - name: Set etcd3 fact + set_fact: + l_etcd3_enabled: "{{ etcd3_grep.rc == 0 | bool }}" - set_fact: openshift_master_pod_eviction_timeout: "{{ lookup('oo_option', 'openshift_master_pod_eviction_timeout') | default(none, true) }}" @@ -131,7 +141,8 @@ etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}" etcd_cert_config_dir: "{{ openshift.common.config_base }}/master" etcd_cert_prefix: "master.etcd-" - r_openshift_master_clean_install: hostvars[groups.oo_first_master.0].l_clean_install + r_openshift_master_clean_install: "{{ hostvars[groups.oo_first_master.0].l_clean_install }}" + r_openshift_master_etcd3_storage: "{{ hostvars[groups.oo_first_master.0].l_etcd3_enabled }}" - role: nuage_master when: openshift.common.use_nuage | bool - role: calico_master diff --git a/roles/contiv/defaults/main.yml b/roles/contiv/defaults/main.yml index 1ccae61f2..8c4d19537 100644 --- a/roles/contiv/defaults/main.yml +++ b/roles/contiv/defaults/main.yml @@ -1,12 +1,12 @@ --- # The version of Contiv binaries to use -contiv_version: 1.0.0-beta.3-02-21-2017.20-52-42.UTC +contiv_version: 1.0.1 # The version of cni binaries cni_version: v0.4.0 -contiv_default_subnet: "20.1.1.1/24" -contiv_default_gw: "20.1.1.254" +contiv_default_subnet: "10.128.0.0/16" +contiv_default_gw: "10.128.254.254" # TCP port that Netmaster listens for network connections netmaster_port: 9999 @@ -69,6 +69,9 @@ netplugin_fwd_mode: bridge # Contiv fabric mode aci|default contiv_fabric_mode: default +# Global VLAN range +contiv_vlan_range: "2900-3000" + # Encapsulation type vlan|vxlan to use for instantiating container networks contiv_encap_mode: vlan @@ -78,8 +81,8 @@ netplugin_driver: ovs # Create a default Contiv network for use by pods contiv_default_network: true -# VLAN/ VXLAN tag value to be used for the default network -contiv_default_network_tag: 1 +# Statically configured tag for default network (if needed) +contiv_default_network_tag: "" #SRFIXME (use the openshift variables) https_proxy: "" @@ -95,6 +98,9 @@ apic_leaf_nodes: "" apic_phys_dom: "" apic_contracts_unrestricted_mode: no apic_epg_bridge_domain: not_specified +apic_configure_default_policy: false +apic_default_external_contract: "uni/tn-common/brc-default" +apic_default_app_profile: "contiv-infra-app-profile" is_atomic: False kube_cert_dir: "/data/src/github.com/openshift/origin/openshift.local.config/master" master_name: "{{ groups['masters'][0] }}" @@ -104,3 +110,12 @@ kube_ca_cert: "{{ kube_cert_dir }}/ca.crt" kube_key: "{{ kube_cert_dir }}/admin.key" kube_cert: "{{ kube_cert_dir }}/admin.crt" kube_master_api_port: 8443 + +# contivh1 default subnet and gateway +#contiv_h1_subnet_default: "132.1.1.0/24" +#contiv_h1_gw_default: "132.1.1.1" +contiv_h1_subnet_default: "10.129.0.0/16" +contiv_h1_gw_default: "10.129.0.1" + +# contiv default private subnet for ext access +contiv_private_ext_subnet: "10.130.0.0/16" diff --git a/roles/contiv/meta/main.yml b/roles/contiv/meta/main.yml index 3223afb6e..da6409f1e 100644 --- a/roles/contiv/meta/main.yml +++ b/roles/contiv/meta/main.yml @@ -26,3 +26,5 @@ dependencies: etcd_url_scheme: http etcd_peer_url_scheme: http when: contiv_role == "netmaster" +- role: contiv_auth_proxy + when: (contiv_role == "netmaster") and (contiv_enable_auth_proxy == true) diff --git a/roles/contiv/tasks/default_network.yml b/roles/contiv/tasks/default_network.yml index 9cf98bb80..f679443e0 100644 --- a/roles/contiv/tasks/default_network.yml +++ b/roles/contiv/tasks/default_network.yml @@ -6,10 +6,53 @@ retries: 9 delay: 10 +- name: Contiv | Set globals + command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" global set --fabric-mode {{ contiv_fabric_mode }} --vlan-range {{ contiv_vlan_range }} --fwd-mode {{ netplugin_fwd_mode }} --private-subnet {{ contiv_private_ext_subnet }}' + +- name: Contiv | Set arp mode to flood if ACI + command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" global set --arp-mode flood' + when: contiv_fabric_mode == "aci" + - name: Contiv | Check if default-net exists command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" net ls' register: net_result - name: Contiv | Create default-net - command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" net create --subnet={{ contiv_default_subnet }} -e {{ contiv_encap_mode }} -p {{ contiv_default_network_tag }} --gateway={{ contiv_default_gw }} default-net' + command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" net create --subnet={{ contiv_default_subnet }} -e {{ contiv_encap_mode }} -p {{ contiv_default_network_tag }} --gateway {{ contiv_default_gw }} default-net' when: net_result.stdout.find("default-net") == -1 + +- name: Contiv | Create host access infra network for VxLan routing case + command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" net create --subnet={{ contiv_h1_subnet_default }} --gateway={{ contiv_h1_gw_default }} --nw-type="infra" contivh1' + when: (contiv_encap_mode == "vxlan") and (netplugin_fwd_mode == "routing") + +#- name: Contiv | Create an allow-all policy for the default-group +# command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" policy create ose-allow-all-policy' +# when: contiv_fabric_mode == "aci" + +- name: Contiv | Set up aci external contract to consume default external contract + command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" external-contracts create -c -a {{ apic_default_external_contract }} oseExtToConsume' + when: (contiv_fabric_mode == "aci") and (apic_configure_default_policy == true) + +- name: Contiv | Set up aci external contract to provide default external contract + command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" external-contracts create -p -a {{ apic_default_external_contract }} oseExtToProvide' + when: (contiv_fabric_mode == "aci") and (apic_configure_default_policy == true) + +- name: Contiv | Create aci default-group + command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" group create default-net default-group' + when: contiv_fabric_mode == "aci" + +- name: Contiv | Add external contracts to the default-group + command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" group create -e oseExtToConsume -e oseExtToProvide default-net default-group' + when: (contiv_fabric_mode == "aci") and (apic_configure_default_policy == true) + +#- name: Contiv | Add policy rule 1 for allow-all policy +# command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" policy rule-add -d in --action allow ose-allow-all-policy 1' +# when: contiv_fabric_mode == "aci" + +#- name: Contiv | Add policy rule 2 for allow-all policy +# command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" policy rule-add -d out --action allow ose-allow-all-policy 2' +# when: contiv_fabric_mode == "aci" + +- name: Contiv | Create default aci app profile + command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" app-profile create -g default-group {{ apic_default_app_profile }}' + when: contiv_fabric_mode == "aci" diff --git a/roles/contiv/tasks/netmaster.yml b/roles/contiv/tasks/netmaster.yml index 5057767b8..acaf7386e 100644 --- a/roles/contiv/tasks/netmaster.yml +++ b/roles/contiv/tasks/netmaster.yml @@ -23,7 +23,7 @@ line: "{{ hostvars[item]['ansible_' + netmaster_interface].ipv4.address }} netmaster" state: present when: hostvars[item]['ansible_' + netmaster_interface].ipv4.address is defined - with_items: groups['masters'] + with_items: "{{ groups['masters'] }}" - name: Netmaster | Create netmaster symlinks file: diff --git a/roles/contiv/tasks/netplugin_iptables.yml b/roles/contiv/tasks/netplugin_iptables.yml index 8c348ac67..184c595c5 100644 --- a/roles/contiv/tasks/netplugin_iptables.yml +++ b/roles/contiv/tasks/netplugin_iptables.yml @@ -23,7 +23,36 @@ notify: Save iptables rules - name: Netplugin IPtables | Open vxlan port with iptables - command: /sbin/iptables -I INPUT 1 -p udp --dport 8472 -j ACCEPT -m comment --comment "vxlan" + command: /sbin/iptables -I INPUT 1 -p udp --dport 8472 -j ACCEPT -m comment --comment "netplugin vxlan 8472" + when: iptablesrules.stdout.find("netplugin vxlan 8472") == -1 + notify: Save iptables rules - name: Netplugin IPtables | Open vxlan port with iptables - command: /sbin/iptables -I INPUT 1 -p udp --dport 4789 -j ACCEPT -m comment --comment "vxlan" + command: /sbin/iptables -I INPUT 1 -p udp --dport 4789 -j ACCEPT -m comment --comment "netplugin vxlan 4789" + when: iptablesrules.stdout.find("netplugin vxlan 4789") == -1 + notify: Save iptables rules + +- name: Netplugin IPtables | Allow from contivh0 + command: /sbin/iptables -I FORWARD 1 -i contivh0 -j ACCEPT -m comment --comment "contivh0 FORWARD input" + when: iptablesrules.stdout.find("contivh0 FORWARD input") == -1 + notify: Save iptables rules + +- name: Netplugin IPtables | Allow to contivh0 + command: /sbin/iptables -I FORWARD 1 -o contivh0 -j ACCEPT -m comment --comment "contivh0 FORWARD output" + when: iptablesrules.stdout.find("contivh0 FORWARD output") == -1 + notify: Save iptables rules + +- name: Netplugin IPtables | Allow from contivh1 + command: /sbin/iptables -I FORWARD 1 -i contivh1 -j ACCEPT -m comment --comment "contivh1 FORWARD input" + when: iptablesrules.stdout.find("contivh1 FORWARD input") == -1 + notify: Save iptables rules + +- name: Netplugin IPtables | Allow to contivh1 + command: /sbin/iptables -I FORWARD 1 -o contivh1 -j ACCEPT -m comment --comment "contivh1 FORWARD output" + when: iptablesrules.stdout.find("contivh1 FORWARD output") == -1 + notify: Save iptables rules + +- name: Netplugin IPtables | Allow dns + command: /sbin/iptables -I INPUT 1 -p udp --dport 53 -j ACCEPT -m comment --comment "contiv dns" + when: iptablesrules.stdout.find("contiv dns") == -1 + notify: Save iptables rules diff --git a/roles/contiv/tasks/packageManagerInstall.yml b/roles/contiv/tasks/packageManagerInstall.yml index 2eff1b85f..e0d48e643 100644 --- a/roles/contiv/tasks/packageManagerInstall.yml +++ b/roles/contiv/tasks/packageManagerInstall.yml @@ -4,9 +4,10 @@ did_install: false - include: pkgMgrInstallers/centos-install.yml - when: ansible_distribution == "CentOS" and not is_atomic + when: (ansible_os_family == "RedHat") and + not is_atomic - name: Package Manager | Set fact saying we did CentOS package install set_fact: did_install: true - when: ansible_distribution == "CentOS" + when: (ansible_os_family == "RedHat") diff --git a/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml b/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml index 51c3d35ac..91e6aadf3 100644 --- a/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml +++ b/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml @@ -1,13 +1,13 @@ --- -- name: PkgMgr CentOS | Install net-tools pkg for route +- name: PkgMgr RHEL/CentOS | Install net-tools pkg for route yum: pkg=net-tools state=latest -- name: PkgMgr CentOS | Get openstack kilo rpm +- name: PkgMgr RHEL/CentOS | Get openstack ocata rpm get_url: - url: https://repos.fedorapeople.org/repos/openstack/openstack-kilo/rdo-release-kilo-2.noarch.rpm - dest: /tmp/rdo-release-kilo-2.noarch.rpm + url: https://repos.fedorapeople.org/repos/openstack/openstack-ocata/rdo-release-ocata-2.noarch.rpm + dest: /tmp/rdo-release-ocata-2.noarch.rpm validate_certs: False environment: http_proxy: "{{ http_proxy|default('') }}" @@ -16,15 +16,15 @@ tags: - ovs_install -- name: PkgMgr CentOS | Install openstack kilo rpm - yum: name=/tmp/rdo-release-kilo-2.noarch.rpm state=present +- name: PkgMgr RHEL/CentOS | Install openstack ocata rpm + yum: name=/tmp/rdo-release-ocata-2.noarch.rpm state=present tags: - ovs_install -- name: PkgMgr CentOS | Install ovs +- name: PkgMgr RHEL/CentOS | Install ovs yum: - pkg=openvswitch - state=latest + pkg=openvswitch-2.5.0-2.el7.x86_64 + state=present environment: http_proxy: "{{ http_proxy|default('') }}" https_proxy: "{{ https_proxy|default('') }}" diff --git a/roles/contiv/templates/netplugin.j2 b/roles/contiv/templates/netplugin.j2 index f3d26c037..a4928cc3d 100644 --- a/roles/contiv/templates/netplugin.j2 +++ b/roles/contiv/templates/netplugin.j2 @@ -1,9 +1,7 @@ {% if contiv_encap_mode == "vlan" %} NETPLUGIN_ARGS='-vlan-if {{ netplugin_interface }} -ctrl-ip {{ netplugin_ctrl_ip }} -plugin-mode kubernetes -cluster-store etcd://{{ etcd_url }}' {% endif %} -{# Note: Commenting out vxlan encap mode support until it is fully supported {% if contiv_encap_mode == "vxlan" %} -NETPLUGIN_ARGS='-vtep-ip {{ netplugin_ctrl_ip }} -e {{contiv_encap_mode}} -ctrl-ip {{ netplugin_ctrl_ip }} -plugin-mode kubernetes -cluster-store etcd://{{ etcd_url }}' +NETPLUGIN_ARGS='-vtep-ip {{ netplugin_ctrl_ip }} -ctrl-ip {{ netplugin_ctrl_ip }} -plugin-mode kubernetes -cluster-store etcd://{{ etcd_url }}' {% endif %} -#} diff --git a/roles/contiv_auth_proxy/README.md b/roles/contiv_auth_proxy/README.md new file mode 100644 index 000000000..287b6c148 --- /dev/null +++ b/roles/contiv_auth_proxy/README.md @@ -0,0 +1,29 @@ +Role Name +========= + +Role to install Contiv API Proxy and UI + +Requirements +------------ + +Docker needs to be installed to run the auth proxy container. + +Role Variables +-------------- + +auth_proxy_image specifies the image with version tag to be used to spin up the auth proxy container. +auth_proxy_cert, auth_proxy_key specify files to use for the proxy server certificates. +auth_proxy_port is the host port and auth_proxy_datastore the cluster data store address. + +Dependencies +------------ + +docker + +Example Playbook +---------------- + +- hosts: netplugin-node + become: true + roles: + - { role: auth_proxy, auth_proxy_port: 10000, auth_proxy_datastore: etcd://netmaster:22379 } diff --git a/roles/contiv_auth_proxy/defaults/main.yml b/roles/contiv_auth_proxy/defaults/main.yml new file mode 100644 index 000000000..4e637a947 --- /dev/null +++ b/roles/contiv_auth_proxy/defaults/main.yml @@ -0,0 +1,11 @@ +--- +auth_proxy_image: "contiv/auth_proxy:1.0.0-beta.2" +auth_proxy_port: 10000 +contiv_certs: "/var/contiv/certs" +cluster_store: "{{ hostvars[groups['masters'][0]]['ansible_' + netmaster_interface].ipv4.address }}:22379" +auth_proxy_cert: "{{ contiv_certs }}/auth_proxy_cert.pem" +auth_proxy_key: "{{ contiv_certs }}/auth_proxy_key.pem" +auth_proxy_datastore: "{{ cluster_store }}" +auth_proxy_binaries: "/var/contiv_cache" +auth_proxy_local_install: False +auth_proxy_rule_comment: "Contiv auth proxy service" diff --git a/roles/contiv_auth_proxy/files/auth-proxy.service b/roles/contiv_auth_proxy/files/auth-proxy.service new file mode 100644 index 000000000..7cd2edff1 --- /dev/null +++ b/roles/contiv_auth_proxy/files/auth-proxy.service @@ -0,0 +1,13 @@ +[Unit] +Description=Contiv Proxy and UI +After=auditd.service systemd-user-sessions.service time-sync.target docker.service + +[Service] +ExecStart=/usr/bin/auth_proxy.sh start +ExecStop=/usr/bin/auth_proxy.sh stop +KillMode=control-group +Restart=on-failure +RestartSec=10 + +[Install] +WantedBy=multi-user.target diff --git a/roles/contiv_auth_proxy/handlers/main.yml b/roles/contiv_auth_proxy/handlers/main.yml new file mode 100644 index 000000000..9cb9bea49 --- /dev/null +++ b/roles/contiv_auth_proxy/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for auth_proxy diff --git a/roles/contiv_auth_proxy/tasks/cleanup.yml b/roles/contiv_auth_proxy/tasks/cleanup.yml new file mode 100644 index 000000000..a29659cc9 --- /dev/null +++ b/roles/contiv_auth_proxy/tasks/cleanup.yml @@ -0,0 +1,10 @@ +--- + +- name: stop auth-proxy container + service: name=auth-proxy state=stopped + +- name: cleanup iptables for auth proxy + shell: iptables -D INPUT -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "{{ auth_proxy_rule_comment }} ({{ item }})" + become: true + with_items: + - "{{ auth_proxy_port }}" diff --git a/roles/contiv_auth_proxy/tasks/main.yml b/roles/contiv_auth_proxy/tasks/main.yml new file mode 100644 index 000000000..74e7bf794 --- /dev/null +++ b/roles/contiv_auth_proxy/tasks/main.yml @@ -0,0 +1,37 @@ +--- +# tasks file for auth_proxy +- name: setup iptables for auth proxy + shell: > + ( iptables -L INPUT | grep "{{ auth_proxy_rule_comment }} ({{ item }})" ) || \ + iptables -I INPUT 1 -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "{{ auth_proxy_rule_comment }} ({{ item }})" + become: true + with_items: + - "{{ auth_proxy_port }}" + +# Load the auth-proxy-image from local tar. Ignore any errors to handle the +# case where the image is not built in +- name: copy auth-proxy image + copy: src={{ auth_proxy_binaries }}/auth-proxy-image.tar dest=/tmp/auth-proxy-image.tar + when: auth_proxy_local_install == True + +- name: load auth-proxy image + shell: docker load -i /tmp/auth-proxy-image.tar + when: auth_proxy_local_install == True + +- name: create cert folder for proxy + file: path=/var/contiv/certs state=directory + +- name: copy shell script for starting auth-proxy + template: src=auth_proxy.j2 dest=/usr/bin/auth_proxy.sh mode=u=rwx,g=rx,o=rx + +- name: copy cert for starting auth-proxy + copy: src=cert.pem dest=/var/contiv/certs/auth_proxy_cert.pem mode=u=rw,g=r,o=r + +- name: copy key for starting auth-proxy + copy: src=key.pem dest=/var/contiv/certs/auth_proxy_key.pem mode=u=rw,g=r,o=r + +- name: copy systemd units for auth-proxy + copy: src=auth-proxy.service dest=/etc/systemd/system/auth-proxy.service + +- name: start auth-proxy container + systemd: name=auth-proxy daemon_reload=yes state=started enabled=yes diff --git a/roles/contiv_auth_proxy/templates/auth_proxy.j2 b/roles/contiv_auth_proxy/templates/auth_proxy.j2 new file mode 100644 index 000000000..e82e5b4ab --- /dev/null +++ b/roles/contiv_auth_proxy/templates/auth_proxy.j2 @@ -0,0 +1,36 @@ +#!/bin/bash + +usage="$0 start/stop" +if [ $# -ne 1 ]; then + echo USAGE: $usage + exit 1 +fi + +case $1 in +start) + set -e + + /usr/bin/docker run --rm \ + -p 10000:{{ auth_proxy_port }} \ + --net=host --name=auth-proxy \ + -e NO_NETMASTER_STARTUP_CHECK=1 \ + -v /var/contiv:/var/contiv \ + {{ auth_proxy_image }} \ + --tls-key-file={{ auth_proxy_key }} \ + --tls-certificate={{ auth_proxy_cert }} \ + --data-store-address={{ auth_proxy_datastore }} \ + --netmaster-address={{ service_vip }}:9999 \ + --listen-address=:10000 + ;; + +stop) + # don't stop on error + /usr/bin/docker stop auth-proxy + /usr/bin/docker rm -f -v auth-proxy + ;; + +*) + echo USAGE: $usage + exit 1 + ;; +esac diff --git a/roles/contiv_auth_proxy/tests/inventory b/roles/contiv_auth_proxy/tests/inventory new file mode 100644 index 000000000..d18580b3c --- /dev/null +++ b/roles/contiv_auth_proxy/tests/inventory @@ -0,0 +1 @@ +localhost
\ No newline at end of file diff --git a/roles/contiv_auth_proxy/tests/test.yml b/roles/contiv_auth_proxy/tests/test.yml new file mode 100644 index 000000000..2af3250cd --- /dev/null +++ b/roles/contiv_auth_proxy/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - auth_proxy diff --git a/roles/contiv_auth_proxy/vars/main.yml b/roles/contiv_auth_proxy/vars/main.yml new file mode 100644 index 000000000..9032766c4 --- /dev/null +++ b/roles/contiv_auth_proxy/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for auth_proxy diff --git a/roles/contiv_facts/defaults/main.yaml b/roles/contiv_facts/defaults/main.yaml index a6c08fa63..7b8150954 100644 --- a/roles/contiv_facts/defaults/main.yaml +++ b/roles/contiv_facts/defaults/main.yaml @@ -8,3 +8,6 @@ bin_dir: /usr/bin ansible_temp_dir: /tmp/.ansible/files source_type: packageManager + +# Whether or not to also install and enable the Contiv auth_proxy +contiv_enable_auth_proxy: false diff --git a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py index 60aacf715..26bf4c09b 100644 --- a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py +++ b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py @@ -169,7 +169,7 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck): registries = [registry] for registry in registries: - args = {"_raw_params": "skopeo inspect docker://{}/{}".format(registry, image)} + args = {"_raw_params": "skopeo inspect --tls-verify=false docker://{}/{}".format(registry, image)} result = self.execute_module("command", args, task_vars=task_vars) if result.get("rc", 0) == 0 and not result.get("failed"): return True diff --git a/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml b/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml index e6bb196b8..c504bfb80 100644 --- a/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml +++ b/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml @@ -35,7 +35,7 @@ mount: state: mounted fstype: glusterfs - src: "{{ groups.oo_glusterfs_to_config[0] }}:/{{ openshift.hosted.registry.storage.glusterfs.path }}" + src: "{% if 'glusterfs_registry' in groups %}{{ groups.glusterfs_registry[0] }}{% else %}{{ groups.glusterfs[0] }}{% endif %}:/{{ openshift.hosted.registry.storage.glusterfs.path }}" name: "{{ mktemp.stdout }}" - name: Set registry volume permissions diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml index 6a082d71a..2d3ce5bcd 100644 --- a/roles/openshift_master/defaults/main.yml +++ b/roles/openshift_master/defaults/main.yml @@ -1,3 +1,4 @@ --- openshift_node_ips: [] r_openshift_master_clean_install: false +r_openshift_master_etcd3_storage: false diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index 630d70a7e..86532cd0a 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -167,26 +167,6 @@ - restart master api - restart master controllers -- name: Configure master to use etcd3 storage backend on 3.6 clean installs - yedit: - src: /etc/origin/master/master-config.yaml - key: "{{ item.key }}" - value: "{{ item.value }}" - with_items: - - key: kubernetesMasterConfig.apiServerArguments.storage-backend - value: - - etcd3 - - key: kubernetesMasterConfig.apiServerArguments.storage-media-type - value: - - application/vnd.kubernetes.protobuf - when: - - r_openshift_master_clean_install - - openshift.common.version_gte_3_6 - notify: - - restart master - - restart master api - - restart master controllers - - include: set_loopback_context.yml when: openshift.common.version_gte_3_2_or_1_2 diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2 index 1935d9592..6c26e5092 100644 --- a/roles/openshift_master/templates/master.yaml.v1.j2 +++ b/roles/openshift_master/templates/master.yaml.v1.j2 @@ -139,6 +139,12 @@ kubernetesMasterConfig: - v1 {% endif %} apiServerArguments: {{ openshift.master.api_server_args | default(None) | to_padded_yaml( level=2 ) }} +{% if r_openshift_master_etcd3_storage or ( r_openshift_master_clean_install and openshift.common.version_gte_3_6 ) %} + storage-backend: + - etcd3 + storage-media-type: + - application/vnd.kubernetes.protobuf +{% endif %} controllerArguments: {{ openshift.master.controller_args | default(None) | to_padded_yaml( level=2 ) }} masterCount: {{ openshift.master.master_count if openshift.master.cluster_method | default(None) == 'native' else 1 }} masterIP: {{ openshift.common.ip }} diff --git a/roles/openshift_storage_glusterfs/README.md b/roles/openshift_storage_glusterfs/README.md index 62fc35299..da4e348b4 100644 --- a/roles/openshift_storage_glusterfs/README.md +++ b/roles/openshift_storage_glusterfs/README.md @@ -90,7 +90,8 @@ GlusterFS cluster into a new or existing OpenShift cluster: | openshift_storage_glusterfs_heketi_admin_key | auto-generated | String to use as secret key for performing heketi commands as admin | openshift_storage_glusterfs_heketi_user_key | auto-generated | String to use as secret key for performing heketi commands as user that can only view or modify volumes | openshift_storage_glusterfs_heketi_topology_load | True | Load the GlusterFS topology information into heketi -| openshift_storage_glusterfs_heketi_url | Undefined | URL for the heketi REST API, dynamically determined in native mode +| openshift_storage_glusterfs_heketi_url | Undefined | When heketi is native, this sets the hostname portion of the final heketi route URL. When heketi is external, this is the full URL to the heketi service. +| openshift_storage_glusterfs_heketi_port | 8080 | TCP port for external heketi service **NOTE:** This has no effect in native mode | openshift_storage_glusterfs_heketi_wipe | False | Destroy any existing heketi resources, defaults to the value of `openshift_storage_glusterfs_wipe` Each role variable also has a corresponding variable to optionally configure a diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml index 468877e57..4ff56af9e 100644 --- a/roles/openshift_storage_glusterfs/defaults/main.yml +++ b/roles/openshift_storage_glusterfs/defaults/main.yml @@ -13,11 +13,12 @@ openshift_storage_glusterfs_heketi_is_missing: True openshift_storage_glusterfs_heketi_deploy_is_missing: True openshift_storage_glusterfs_heketi_image: "{{ 'rhgs3/rhgs-volmanager-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'heketi/heketi' | quote }}" openshift_storage_glusterfs_heketi_version: 'latest' -openshift_storage_glusterfs_heketi_admin_key: "{{ 32 | oo_generate_secret }}" -openshift_storage_glusterfs_heketi_user_key: "{{ 32 | oo_generate_secret }}" +openshift_storage_glusterfs_heketi_admin_key: "{{ omit }}" +openshift_storage_glusterfs_heketi_user_key: "{{ omit }}" openshift_storage_glusterfs_heketi_topology_load: True openshift_storage_glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_wipe }}" openshift_storage_glusterfs_heketi_url: "{{ omit }}" +openshift_storage_glusterfs_heketi_port: 8080 openshift_storage_glusterfs_registry_timeout: "{{ openshift_storage_glusterfs_timeout }}" openshift_storage_glusterfs_registry_namespace: "{{ openshift.hosted.registry.namespace | default('default') }}" @@ -33,8 +34,9 @@ openshift_storage_glusterfs_registry_heketi_is_missing: "{{ openshift_storage_gl openshift_storage_glusterfs_registry_heketi_deploy_is_missing: "{{ openshift_storage_glusterfs_heketi_deploy_is_missing }}" openshift_storage_glusterfs_registry_heketi_image: "{{ openshift_storage_glusterfs_heketi_image }}" openshift_storage_glusterfs_registry_heketi_version: "{{ openshift_storage_glusterfs_heketi_version }}" -openshift_storage_glusterfs_registry_heketi_admin_key: "{{ 32 | oo_generate_secret }}" -openshift_storage_glusterfs_registry_heketi_user_key: "{{ 32 | oo_generate_secret }}" +openshift_storage_glusterfs_registry_heketi_admin_key: "{{ omit }}" +openshift_storage_glusterfs_registry_heketi_user_key: "{{ omit }}" openshift_storage_glusterfs_registry_heketi_topology_load: "{{ openshift_storage_glusterfs_heketi_topology_load }}" openshift_storage_glusterfs_registry_heketi_wipe: "{{ openshift_storage_glusterfs_heketi_wipe }}" openshift_storage_glusterfs_registry_heketi_url: "{{ openshift_storage_glusterfs_heketi_url | default(omit) }}" +openshift_storage_glusterfs_registry_heketi_port: 8080 diff --git a/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml index 81b4fa5dc..4434f750c 100644 --- a/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml +++ b/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml @@ -29,7 +29,7 @@ objects: - kind: Route apiVersion: v1 metadata: - name: deploy-heketi-${CLUSTER_NAME} + name: ${HEKETI_ROUTE} labels: glusterfs: deploy-heketi-${CLUSTER_NAME}-route deploy-heketi: support @@ -115,14 +115,19 @@ parameters: displayName: Namespace description: Set the namespace where the GlusterFS pods reside value: default +- name: HEKETI_ROUTE + displayName: heketi route name + description: Set the hostname for the route URL + value: "heketi-glusterfs" - name: IMAGE_NAME - displayName: heketi container name + displayName: heketi container image name required: True - name: IMAGE_VERSION - displayName: heketi container versiona + displayName: heketi container image version required: True - name: CLUSTER_NAME displayName: GlusterFS cluster name + description: A unique name to identify this heketi service, useful for running multiple heketi instances value: glusterfs - name: TOPOLOGY_PATH displayName: heketi topology file location diff --git a/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-template.yml b/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-template.yml index dc3d2250a..8c5e1ded3 100644 --- a/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-template.yml +++ b/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-template.yml @@ -125,11 +125,12 @@ parameters: description: Labels which define the daemonset node selector. Must contain at least one label of the format \'glusterfs=<CLUSTER_NAME>-host\' value: '{ "glusterfs": "storage-host" }' - name: IMAGE_NAME - displayName: GlusterFS container name + displayName: GlusterFS container image name required: True - name: IMAGE_VERSION - displayName: GlusterFS container versiona + displayName: GlusterFS container image version required: True - name: CLUSTER_NAME displayName: GlusterFS cluster name + description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances value: storage diff --git a/roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml index 1d8f1abdf..e3fa0a9fb 100644 --- a/roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml +++ b/roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml @@ -27,7 +27,7 @@ objects: - kind: Route apiVersion: v1 metadata: - name: heketi-${CLUSTER_NAME} + name: ${HEKETI_ROUTE} labels: glusterfs: heketi-${CLUSTER_NAME}-route spec: @@ -109,12 +109,17 @@ parameters: displayName: Namespace description: Set the namespace where the GlusterFS pods reside value: default +- name: HEKETI_ROUTE + displayName: heketi route name + description: Set the hostname for the route URL + value: "heketi-glusterfs" - name: IMAGE_NAME - displayName: heketi container name + displayName: heketi container image name required: True - name: IMAGE_VERSION - displayName: heketi container versiona + displayName: heketi container image version required: True - name: CLUSTER_NAME displayName: GlusterFS cluster name + description: A unique name to identify this heketi service, useful for running multiple heketi instances value: glusterfs diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml index 829c1f51b..4406ef28b 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml @@ -123,21 +123,32 @@ when: - glusterfs_heketi_topology_load -- include: heketi_deploy_part1.yml +- name: Generate heketi admin key + set_fact: + glusterfs_heketi_admin_key: "{{ 32 | oo_generate_secret }}" when: - glusterfs_heketi_is_native - - glusterfs_heketi_deploy_is_missing - - glusterfs_heketi_is_missing + - glusterfs_heketi_admin_key is undefined -- name: Set heketi URL +- name: Generate heketi user key set_fact: - glusterfs_heketi_url: "localhost:8080" + glusterfs_heketi_user_key: "{{ 32 | oo_generate_secret }}" + until: "glusterfs_heketi_user_key != glusterfs_heketi_admin_key" + delay: 1 + retries: 10 + when: + - glusterfs_heketi_is_native + - glusterfs_heketi_user_key is undefined + +- include: heketi_deploy_part1.yml when: - glusterfs_heketi_is_native + - glusterfs_heketi_deploy_is_missing + - glusterfs_heketi_is_missing - name: Set heketi-cli command set_fact: - glusterfs_heketi_client: "{% if glusterfs_heketi_is_native %}oc rsh {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} {% endif %}heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}'" + glusterfs_heketi_client: "{% if glusterfs_heketi_is_native %}oc rsh {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} {% endif %}heketi-cli -s http://{% if glusterfs_heketi_is_native %}localhost:8080{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %} --user admin --secret '{{ glusterfs_heketi_admin_key }}'" - name: Verify heketi service command: "{{ glusterfs_heketi_client }} cluster list" @@ -155,21 +166,43 @@ - glusterfs_heketi_is_native - glusterfs_heketi_is_missing -- name: Create heketi user secret +- name: Create heketi secret oc_secret: namespace: "{{ glusterfs_namespace }}" state: present - name: "heketi-{{ glusterfs_name }}-user-secret" + name: "heketi-{{ glusterfs_name }}-secret" type: "kubernetes.io/glusterfs" force: True contents: - path: key - data: "{{ glusterfs_heketi_user_key }}" + data: "{{ glusterfs_heketi_admin_key }}" + when: + - glusterfs_storageclass + +- name: Get heketi route + oc_obj: + namespace: "{{ glusterfs_namespace }}" + kind: route + state: list + name: "heketi-{{ glusterfs_name }}" + register: heketi_route + when: + - glusterfs_storageclass + - glusterfs_heketi_is_native + +- name: Determine StorageClass heketi URL + set_fact: + glusterfs_heketi_route: "{{ heketi_route.results.results[0]['spec']['host'] }}" + when: + - glusterfs_storageclass + - glusterfs_heketi_is_native - name: Generate GlusterFS StorageClass file template: src: "{{ openshift.common.examples_content_version }}/glusterfs-storageclass.yml.j2" dest: "{{ mktemp.stdout }}/glusterfs-storageclass.yml" + when: + - glusterfs_storageclass - name: Create GlusterFS StorageClass oc_obj: diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml index aa303d126..dbfe126a4 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml @@ -19,6 +19,7 @@ glusterfs_heketi_topology_load: "{{ openshift_storage_glusterfs_heketi_topology_load }}" glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_heketi_wipe }}" glusterfs_heketi_url: "{{ openshift_storage_glusterfs_heketi_url }}" + glusterfs_heketi_port: "{{ openshift_storage_glusterfs_heketi_port }}" glusterfs_nodes: "{{ groups.glusterfs }}" - include: glusterfs_common.yml diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml index 4c6891eeb..0849f2a2e 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml @@ -19,12 +19,13 @@ glusterfs_heketi_topology_load: "{{ openshift_storage_glusterfs_registry_heketi_topology_load }}" glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_registry_heketi_wipe }}" glusterfs_heketi_url: "{{ openshift_storage_glusterfs_registry_heketi_url }}" - glusterfs_nodes: "{{ groups.glusterfs_registry }}" + glusterfs_heketi_port: "{{ openshift_storage_glusterfs_registry_heketi_port }}" + glusterfs_nodes: "{{ groups.glusterfs_registry | default(groups.glusterfs) }}" - include: glusterfs_common.yml when: - - groups.glusterfs_registry | default([]) | count > 0 - - "'glusterfs' not in groups or groups.glusterfs_registry != groups.glusterfs" + - glusterfs_nodes | default([]) | count > 0 + - "'glusterfs' not in groups or glusterfs_nodes != groups.glusterfs" - name: Delete pre-existing GlusterFS registry resources oc_obj: diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml index 318d34b5d..ea9b1fe1f 100644 --- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml +++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml @@ -33,6 +33,7 @@ params: IMAGE_NAME: "{{ glusterfs_heketi_image }}" IMAGE_VERSION: "{{ glusterfs_heketi_version }}" + HEKETI_ROUTE: "{{ glusterfs_heketi_url | default(['heketi-',glusterfs_name]|join) }}" HEKETI_USER_KEY: "{{ glusterfs_heketi_user_key }}" HEKETI_ADMIN_KEY: "{{ glusterfs_heketi_admin_key }}" HEKETI_KUBE_NAMESPACE: "{{ glusterfs_namespace }}" diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml index 3a9619d9d..26343b909 100644 --- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml +++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml @@ -103,6 +103,7 @@ params: IMAGE_NAME: "{{ glusterfs_heketi_image }}" IMAGE_VERSION: "{{ glusterfs_heketi_version }}" + HEKETI_ROUTE: "{{ glusterfs_heketi_url | default(['heketi-',glusterfs_name]|join) }}" HEKETI_USER_KEY: "{{ glusterfs_heketi_user_key }}" HEKETI_ADMIN_KEY: "{{ glusterfs_heketi_admin_key }}" HEKETI_KUBE_NAMESPACE: "{{ glusterfs_namespace }}" @@ -124,7 +125,7 @@ - name: Set heketi-cli command set_fact: - glusterfs_heketi_client: "{% if glusterfs_heketi_is_native %}oc rsh {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} {% endif %}heketi-cli -s http://localhost:8080 --user admin --secret '{{ glusterfs_heketi_admin_key }}'" + glusterfs_heketi_client: "oc rsh {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} heketi-cli -s http://localhost:8080 --user admin --secret '{{ glusterfs_heketi_admin_key }}'" - name: Verify heketi service command: "{{ glusterfs_heketi_client }} cluster list" diff --git a/roles/openshift_storage_glusterfs/tasks/main.yml b/roles/openshift_storage_glusterfs/tasks/main.yml index c9bfdd1cd..d2d8c6c10 100644 --- a/roles/openshift_storage_glusterfs/tasks/main.yml +++ b/roles/openshift_storage_glusterfs/tasks/main.yml @@ -11,7 +11,7 @@ - include: glusterfs_registry.yml when: - - "groups.glusterfs_registry | default([]) | count > 0 or openshift.hosted.registry.storage.kind == 'glusterfs' or openshift.hosted.registry.glusterfs.swap" + - "groups.glusterfs_registry | default([]) | count > 0 or openshift.hosted.registry.storage.kind == 'glusterfs' or openshift.hosted.registry.storage.glusterfs.swap" - name: Delete temp directory file: diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2 index 9b8fae310..5ea801e60 100644 --- a/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2 +++ b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2 @@ -5,6 +5,7 @@ metadata: name: glusterfs-{{ glusterfs_name }} provisioner: kubernetes.io/glusterfs parameters: - resturl: "http://{{ glusterfs_heketi_url }}:8081" + resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}" + restuser: "admin" secretNamespace: "{{ glusterfs_namespace }}" - secretName: "heketi-{{ glusterfs_name }}-user-secret" + secretName: "heketi-{{ glusterfs_name }}-secret" |