diff options
Diffstat (limited to 'roles')
634 files changed, 22355 insertions, 4569 deletions
diff --git a/roles/ansible_service_broker/meta/main.yml b/roles/ansible_service_broker/meta/main.yml index ec4aafb79..65b736500 100644 --- a/roles/ansible_service_broker/meta/main.yml +++ b/roles/ansible_service_broker/meta/main.yml @@ -12,4 +12,5 @@ galaxy_info: categories: - cloud dependencies: +- role: lib_utils - role: lib_openshift diff --git a/roles/ansible_service_broker/tasks/install.yml b/roles/ansible_service_broker/tasks/install.yml index 4ca47d074..f869b5fae 100644 --- a/roles/ansible_service_broker/tasks/install.yml +++ b/roles/ansible_service_broker/tasks/install.yml @@ -4,7 +4,7 @@ - name: Set default image variables based on deployment type include_vars: "{{ item }}" with_first_found: - - "{{ openshift_deployment_type | default(deployment_type) }}.yml" + - "{{ openshift_deployment_type }}.yml" - "default_images.yml" - name: set ansible_service_broker facts @@ -72,6 +72,15 @@ - apiGroups: ["image.openshift.io", ""] resources: ["images"] verbs: ["get", "list"] + - apiGroups: ["network.openshift.io"] + resources: ["clusternetworks", "netnamespaces"] + verbs: ["get"] + - apiGroups: ["network.openshift.io"] + resources: ["netnamespaces"] + verbs: ["update"] + - apiGroups: ["networking.k8s.io"] + resources: ["networkpolicies"] + verbs: ["create", "delete"] - name: Create asb-access cluster role oc_clusterrole: @@ -366,6 +375,11 @@ secret: secretName: etcd-auth-secret +- name: set auth name and type facts if needed + set_fact: + ansible_service_broker_registry_auth_type: "secret" + ansible_service_broker_registry_auth_name: "asb-registry-auth" + when: ansible_service_broker_registry_user != "" and ansible_service_broker_registry_password != "" # TODO: saw a oc_configmap in the library, but didn't understand how to get it to do the following: - name: Create config map for ansible-service-broker @@ -393,6 +407,8 @@ org: {{ ansible_service_broker_registry_organization }} tag: {{ ansible_service_broker_registry_tag }} white_list: {{ ansible_service_broker_registry_whitelist | to_yaml }} + auth_type: "{{ ansible_service_broker_registry_auth_type | default("") }}" + auth_name: "{{ ansible_service_broker_registry_auth_name | default("") }}" - type: local_openshift name: localregistry namespaces: ['openshift'] @@ -438,6 +454,7 @@ data: "{{ ansible_service_broker_registry_user }}" - path: password data: "{{ ansible_service_broker_registry_password }}" + when: ansible_service_broker_registry_user != "" and ansible_service_broker_registry_password != "" - name: Create the Broker resource in the catalog oc_obj: diff --git a/roles/ansible_service_broker/vars/default_images.yml b/roles/ansible_service_broker/vars/default_images.yml index 248e0363d..0ed1d9674 100644 --- a/roles/ansible_service_broker/vars/default_images.yml +++ b/roles/ansible_service_broker/vars/default_images.yml @@ -1,6 +1,6 @@ --- -__ansible_service_broker_image_prefix: ansibleplaybookbundle/ +__ansible_service_broker_image_prefix: ansibleplaybookbundle/origin- __ansible_service_broker_image_tag: latest __ansible_service_broker_etcd_image_prefix: quay.io/coreos/ diff --git a/roles/calico/handlers/main.yml b/roles/calico/handlers/main.yml index 9cc0604a3..ed484c0dd 100644 --- a/roles/calico/handlers/main.yml +++ b/roles/calico/handlers/main.yml @@ -9,6 +9,6 @@ name: "{{ openshift_docker_service_name }}" state: restarted register: l_docker_restart_docker_in_calico_result - until: not l_docker_restart_docker_in_calico_result | failed + until: not (l_docker_restart_docker_in_calico_result is failed) retries: 3 delay: 30 diff --git a/roles/calico/meta/main.yml b/roles/calico/meta/main.yml index 816c81369..e3997911b 100644 --- a/roles/calico/meta/main.yml +++ b/roles/calico/meta/main.yml @@ -13,5 +13,6 @@ galaxy_info: - cloud - system dependencies: +- role: lib_utils - role: openshift_facts - role: openshift_master_facts diff --git a/roles/calico/tasks/main.yml b/roles/calico/tasks/main.yml index 0e3863304..556953a71 100644 --- a/roles/calico/tasks/main.yml +++ b/roles/calico/tasks/main.yml @@ -7,14 +7,13 @@ - not (calico_etcd_cert_dir is defined and calico_etcd_ca_cert_file is defined and calico_etcd_cert_file is defined and calico_etcd_key_file is defined and calico_etcd_endpoints is defined) - name: Calico Node | Generate OpenShift-etcd certs - include_role: + import_role: name: etcd tasks_from: client_certificates when: calico_etcd_ca_cert_file is not defined or calico_etcd_cert_file is not defined or calico_etcd_key_file is not defined or calico_etcd_endpoints is not defined or calico_etcd_cert_dir is not defined vars: etcd_cert_prefix: calico.etcd- etcd_cert_config_dir: "{{ openshift.common.config_base }}/calico" - embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}" etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" etcd_cert_subdir: "openshift-calico-{{ openshift.common.hostname }}" diff --git a/roles/calico_master/meta/main.yml b/roles/calico_master/meta/main.yml index 4d70c79cf..73c94db4e 100644 --- a/roles/calico_master/meta/main.yml +++ b/roles/calico_master/meta/main.yml @@ -13,5 +13,6 @@ galaxy_info: - cloud - system dependencies: +- role: lib_utils - role: calico - role: openshift_facts diff --git a/roles/calico_master/tasks/main.yml b/roles/calico_master/tasks/main.yml index 16d960d8b..834ebba64 100644 --- a/roles/calico_master/tasks/main.yml +++ b/roles/calico_master/tasks/main.yml @@ -19,11 +19,11 @@ - name: Calico Master | Launch Calico Policy Controller command: > - {{ openshift.common.client_binary }} create + {{ openshift_client_binary }} create -f {{ mktemp.stdout }}/calico-policy-controller.yml --config={{ openshift.common.config_base }}/master/admin.kubeconfig register: calico_create_output - failed_when: ('already exists' not in calico_create_output.stderr) and ('created' not in calico_create_output.stdout) + failed_when: "('already exists' not in calico_create_output.stderr) and ('created' not in calico_create_output.stdout) and calico_create_output.rc != 0" changed_when: ('created' in calico_create_output.stdout) - name: Calico Master | Delete temp directory diff --git a/roles/cockpit-ui/meta/main.yml b/roles/cockpit-ui/meta/main.yml index 4d619fff6..372c29c28 100644 --- a/roles/cockpit-ui/meta/main.yml +++ b/roles/cockpit-ui/meta/main.yml @@ -12,4 +12,6 @@ galaxy_info: categories: - cloud dependencies: +- role: lib_utils - role: lib_openshift +- role: openshift_facts diff --git a/roles/cockpit-ui/tasks/main.yml b/roles/cockpit-ui/tasks/main.yml index f60912033..d4174d879 100644 --- a/roles/cockpit-ui/tasks/main.yml +++ b/roles/cockpit-ui/tasks/main.yml @@ -39,7 +39,7 @@ - name: Deploy registry-console command: > - {{ openshift.common.client_binary }} new-app --template=registry-console + {{ openshift_client_binary }} new-app --template=registry-console {% if openshift_cockpit_deployer_prefix is defined %}-p IMAGE_PREFIX="{{ openshift_cockpit_deployer_prefix }}"{% endif %} {% if openshift_cockpit_deployer_basename is defined %}-p IMAGE_BASENAME="{{ openshift_cockpit_deployer_basename }}"{% endif %} {% if openshift_cockpit_deployer_version is defined %}-p IMAGE_VERSION="{{ openshift_cockpit_deployer_version }}"{% endif %} diff --git a/roles/cockpit/meta/main.yml b/roles/cockpit/meta/main.yml index 8c0ed3cb8..07e466f04 100644 --- a/roles/cockpit/meta/main.yml +++ b/roles/cockpit/meta/main.yml @@ -12,4 +12,4 @@ galaxy_info: categories: - cloud dependencies: -- role: lib_os_firewall +- role: lib_utils diff --git a/roles/cockpit/tasks/main.yml b/roles/cockpit/tasks/main.yml index f63b3e49b..577cd7daf 100644 --- a/roles/cockpit/tasks/main.yml +++ b/roles/cockpit/tasks/main.yml @@ -10,13 +10,13 @@ - cockpit-bridge - cockpit-docker - "{{ cockpit_plugins }}" - when: not openshift.common.is_containerized | bool + when: not openshift_is_containerized | bool register: result - until: result | success + until: result is succeeded - name: Enable cockpit-ws systemd: name: cockpit.socket enabled: true state: started - when: not openshift.common.is_containerized | bool + when: not openshift_is_containerized | bool diff --git a/roles/container_runtime/README.md b/roles/container_runtime/README.md index 51f469aaf..665b1b012 100644 --- a/roles/container_runtime/README.md +++ b/roles/container_runtime/README.md @@ -5,7 +5,7 @@ Ensures docker package or system container is installed, and optionally raises t container-daemon.json items may be found at https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-configuration-file -This role is designed to be used with include_role and tasks_from. +This role is designed to be used with import_role and tasks_from. Entry points ------------ @@ -30,7 +30,7 @@ Example Playbook - hosts: servers tasks: - - include_role: container_runtime + - import_role: container_runtime tasks_from: package_docker.yml License diff --git a/roles/container_runtime/defaults/main.yml b/roles/container_runtime/defaults/main.yml index d7eb8663f..7397e2bec 100644 --- a/roles/container_runtime/defaults/main.yml +++ b/roles/container_runtime/defaults/main.yml @@ -2,8 +2,6 @@ docker_cli_auth_config_path: '/root/.docker' openshift_docker_signature_verification: False -repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}" - openshift_docker_alternative_creds: False # oreg_url is defined by user input. @@ -13,7 +11,7 @@ oreg_auth_credentials_replace: False openshift_docker_use_system_container: False openshift_docker_disable_push_dockerhub: False # bool openshift_docker_selinux_enabled: True -openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}" +openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False) | bool) else 'docker' }}" openshift_docker_hosted_registry_insecure: False # bool @@ -55,11 +53,25 @@ openshift_docker_is_node_or_master: "{{ True if inventory_hostname in (groups['o docker_alt_storage_path: /var/lib/containers/docker docker_default_storage_path: /var/lib/docker +docker_storage_path: "{{ docker_default_storage_path }}" +docker_storage_size: 40G +docker_storage_setup_options: + vg: docker_vg + data_size: 99%VG + storage_driver: overlay2 + root_lv_name: docker-root-lv + root_lv_size: 100%FREE + root_lv_mount_path: "{{ docker_storage_path }}" +docker_storage_extra_options: +- "--storage-opt overlay2.override_kernel_check=true" +- "{{ '--storage-opt overlay2.size=' ~ docker_storage_size if container_runtime_docker_storage_setup_device is defined and container_runtime_docker_storage_setup_device != '' else '' }}" +- "--graph={{ docker_storage_path}}" + # Set local versions of facts that must be in json format for container-daemon.json # NOTE: When jinja2.9+ is used the container-daemon.json file can move to using tojson l_docker_log_options: "{{ l2_docker_log_options | to_json }}" -l_docker_log_options_dict: "{{ l2_docker_log_options | oo_list_to_dict | to_json }}" +l_docker_log_options_dict: "{{ l2_docker_log_options | lib_utils_oo_list_to_dict | to_json }}" l_docker_additional_registries: "{{ l2_docker_additional_registries | to_json }}" l_docker_blocked_registries: "{{ l2_docker_blocked_registries | to_json }}" l_docker_insecure_registries: "{{ l2_docker_insecure_registries | to_json }}" @@ -89,45 +101,34 @@ l_crt_crio_image_tag_dict: openshift-enterprise: "{{ l_openshift_image_tag }}" origin: "{{ openshift_crio_image_tag | default(openshift_crio_image_tag_default) }}" -l_crt_crio_image_prepend_dict: - openshift-enterprise: "registry.access.redhat.com/openshift3" - origin: "docker.io/gscrivano" - l_crt_crio_image_dict: - Fedora: - crio_image_name: "cri-o-fedora" - crio_image_tag: "latest" - CentOS: - crio_image_name: "cri-o-centos" - crio_image_tag: "latest" - RedHat: - crio_image_name: "cri-o" - crio_image_tag: "{{ openshift_crio_image_tag | default(l_crt_crio_image_tag_dict[openshift_deployment_type]) }}" - -l_crio_image_prepend: "{{ l_crt_crio_image_prepend_dict[openshift_deployment_type] }}" -l_crio_image_name: "{{ l_crt_crio_image_dict[ansible_distribution]['crio_image_name'] }}" -l_crio_image_tag: "{{ l_crt_crio_image_dict[ansible_distribution] }}" - -l_crio_image_default: "{{ l_crio_image_prepend }}/{{ l_crio_image_name }}:{{ l_crio_image_tag }}" + Fedora: "registry.fedoraproject.org/latest/cri-o" + CentOS: "registry.centos.org/projectatomic/cri-o" + RedHat: "registry.access.redhat.com/openshift3/cri-o" + +l_crio_image_name: "{{ l_crt_crio_image_dict[ansible_distribution] }}" +l_crio_image_tag: "{{ l_crt_crio_image_tag_dict[openshift_deployment_type] }}" + +l_crio_image_default: "{{ l_crio_image_name }}:{{ l_crio_image_tag }}" l_crio_image: "{{ openshift_crio_systemcontainer_image_override | default(l_crio_image_default) }}" # ----------------------- # # systemcontainers_docker # # ----------------------- # -l_crt_docker_image_prepend_dict: - Fedora: "registry.fedoraproject.org/f25" - Centos: "docker.io/gscrivano" - RedHat: "registry.access.redhat.com/openshift3" +l_crt_docker_image_dict: + Fedora: "registry.fedoraproject.org/latest/docker" + CentOS: "registry.centos.org/projectatomic/docker" + RedHat: "registry.access.redhat.com/openshift3/container-engine" openshift_docker_image_tag_default: "latest" l_crt_docker_image_tag_dict: openshift-enterprise: "{{ l_openshift_image_tag }}" origin: "{{ openshift_docker_image_tag | default(openshift_docker_image_tag_default) }}" -l_docker_image_prepend: "{{ l_crt_docker_image_prepend_dict[ansible_distribution] }}" +l_docker_image_prepend: "{{ l_crt_docker_image_dict[ansible_distribution] }}" l_docker_image_tag: "{{ l_crt_docker_image_tag_dict[openshift_deployment_type] }}" -l_docker_image_default: "{{ l_docker_image_prepend }}/{{ openshift_docker_service_name }}:{{ l_docker_image_tag }}" +l_docker_image_default: "{{ l_docker_image_prepend }}:{{ l_docker_image_tag }}" l_docker_image: "{{ openshift_docker_systemcontainer_image_override | default(l_docker_image_default) }}" l_is_node_system_container: "{{ (openshift_use_node_system_container | default(openshift_use_system_containers | default(false)) | bool) }}" diff --git a/roles/container_runtime/handlers/main.yml b/roles/container_runtime/handlers/main.yml index 67cd6d782..87c5de0e9 100644 --- a/roles/container_runtime/handlers/main.yml +++ b/roles/container_runtime/handlers/main.yml @@ -6,7 +6,7 @@ state: restarted daemon_reload: yes register: r_docker_restart_docker_result - until: not r_docker_restart_docker_result | failed + until: not (r_docker_restart_docker_result is failed) retries: 3 delay: 30 when: not docker_service_status_changed | default(false) | bool diff --git a/roles/container_runtime/meta/main.yml b/roles/container_runtime/meta/main.yml index 02fceb745..3bc2607fb 100644 --- a/roles/container_runtime/meta/main.yml +++ b/roles/container_runtime/meta/main.yml @@ -11,5 +11,5 @@ galaxy_info: - 7 dependencies: - role: lib_openshift -- role: lib_os_firewall - role: lib_utils +- role: openshift_facts diff --git a/roles/container_runtime/tasks/common/post.yml b/roles/container_runtime/tasks/common/post.yml index d790eb2c0..23fd8528a 100644 --- a/roles/container_runtime/tasks/common/post.yml +++ b/roles/container_runtime/tasks/common/post.yml @@ -11,7 +11,7 @@ - meta: flush_handlers # This needs to run after docker is restarted to account for proxy settings. -# registry_auth is called directly with include_role in some places, so we +# registry_auth is called directly with import_role in some places, so we # have to put it in the root of the tasks/ directory. - include_tasks: ../registry_auth.yml @@ -22,5 +22,5 @@ - include_tasks: setup_docker_symlink.yml when: - - openshift_use_crio + - openshift_use_crio | bool - dockerstat.stat.islnk is defined and not (dockerstat.stat.islnk | bool) diff --git a/roles/container_runtime/tasks/common/syscontainer_packages.yml b/roles/container_runtime/tasks/common/syscontainer_packages.yml index 715ed492d..d429047e6 100644 --- a/roles/container_runtime/tasks/common/syscontainer_packages.yml +++ b/roles/container_runtime/tasks/common/syscontainer_packages.yml @@ -4,18 +4,18 @@ package: name: container-selinux state: present - when: not openshift.common.is_atomic | bool + when: not openshift_is_atomic | bool register: result - until: result | success + until: result is succeeded # Used to pull and install the system container - name: Ensure atomic is installed package: name: atomic state: present - when: not openshift.common.is_atomic | bool + when: not openshift_is_atomic | bool register: result - until: result | success + until: result is succeeded # At the time of writing the atomic command requires runc for it's own use. This # task is here in the even that the atomic package ever removes the dependency. @@ -23,6 +23,6 @@ package: name: runc state: present - when: not openshift.common.is_atomic | bool + when: not openshift_is_atomic | bool register: result - until: result | success + until: result is succeeded diff --git a/roles/container_runtime/tasks/docker_sanity.yml b/roles/container_runtime/tasks/docker_sanity.yml index e62cf5505..bc4da1cce 100644 --- a/roles/container_runtime/tasks/docker_sanity.yml +++ b/roles/container_runtime/tasks/docker_sanity.yml @@ -5,23 +5,38 @@ - name: Error out if Docker pre-installed but too old fail: msg: "Docker {{ curr_docker_version.stdout }} is installed, but >= 1.9.1 is required." - when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.9.1', '<') and not docker_version is defined + when: + - not (curr_docker_version is skipped) + - curr_docker_version.stdout != '' + - curr_docker_version.stdout is version_compare('1.9.1', '<') + - not (docker_version is defined) - name: Error out if requested Docker is too old fail: msg: "Docker {{ docker_version }} requested, but >= 1.9.1 is required." - when: docker_version is defined and docker_version | version_compare('1.9.1', '<') + when: + - docker_version is defined + - docker_version is version_compare('1.9.1', '<') # If a docker_version was requested, sanity check that we can install or upgrade to it, and # no downgrade is required. - name: Fail if Docker version requested but downgrade is required fail: msg: "Docker {{ curr_docker_version.stdout }} is installed, but version {{ docker_version }} was requested." - when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and docker_version is defined and curr_docker_version.stdout | version_compare(docker_version, '>') + when: + - not (curr_docker_version is skipped) + - curr_docker_version.stdout != '' + - docker_version is defined + - curr_docker_version.stdout is version_compare(docker_version, '>') # This involves an extremely slow migration process, users should instead run the # Docker 1.10 upgrade playbook to accomplish this. - name: Error out if attempting to upgrade Docker across the 1.10 boundary fail: msg: "Cannot upgrade Docker to >= 1.10, please upgrade or remove Docker manually, or use the Docker upgrade playbook if OpenShift is already installed." - when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.10', '<') and docker_version is defined and docker_version | version_compare('1.10', '>=') + when: + - not (curr_docker_version is skipped) + - curr_docker_version.stdout != '' + - curr_docker_version.stdout is version_compare('1.10', '<') + - docker_version is defined + - docker_version is version_compare('1.10', '>=') diff --git a/roles/container_runtime/tasks/docker_storage_setup_overlay.yml b/roles/container_runtime/tasks/docker_storage_setup_overlay.yml new file mode 100644 index 000000000..782c002e3 --- /dev/null +++ b/roles/container_runtime/tasks/docker_storage_setup_overlay.yml @@ -0,0 +1,10 @@ +--- +- name: Setup the docker-storage for overlay + template: + src: docker_storage_setup.j2 + dest: /etc/sysconfig/docker-storage-setup + owner: root + group: root + mode: 0664 + when: + - container_runtime_docker_storage_type == 'overlay2' diff --git a/roles/container_runtime/tasks/docker_upgrade_check.yml b/roles/container_runtime/tasks/docker_upgrade_check.yml new file mode 100644 index 000000000..8dd916e79 --- /dev/null +++ b/roles/container_runtime/tasks/docker_upgrade_check.yml @@ -0,0 +1,81 @@ +--- + +# This snippet determines if a Docker upgrade is required by checking the inventory +# variables, the available packages, and sets l_docker_upgrade to True if so. + +- set_fact: + docker_upgrade: True + when: docker_upgrade is not defined + +- name: Check if Docker is installed + command: rpm -q docker + args: + warn: no + register: pkg_check + failed_when: pkg_check.rc > 1 + changed_when: no + +- name: Get current version of Docker + command: "{{ repoquery_installed }} --qf '%{version}' docker" + register: curr_docker_version + retries: 4 + until: curr_docker_version is succeeded + changed_when: false + when: not openshift_is_atomic | bool + +- name: Get latest available version of Docker + command: > + {{ repoquery_cmd }} --qf '%{version}' "docker" + register: avail_docker_version + retries: 4 + until: avail_docker_version is succeeded + # Don't expect docker rpm to be available on hosts that don't already have it installed: + when: + - not openshift_is_atomic | bool + - pkg_check.rc == 0 + failed_when: false + changed_when: false + +- fail: + msg: This playbook requires access to Docker 1.12 or later + # Disable the 1.12 requirement if the user set a specific Docker version + when: + - not openshift_is_atomic | bool + - docker_version is not defined + - docker_upgrade is not defined or docker_upgrade | bool == True + - (pkg_check.rc == 0 and (avail_docker_version.stdout == "" or avail_docker_version.stdout is version_compare('1.12','<'))) + +# Default l_docker_upgrade to False, we'll set to True if an upgrade is required: +- set_fact: + l_docker_upgrade: False + +# Make sure a docker_version is set if none was requested: +- set_fact: + docker_version: "{{ avail_docker_version.stdout }}" + when: + - not openshift_is_atomic | bool + - pkg_check.rc == 0 and docker_version is not defined + +- name: Flag for Docker upgrade if necessary + set_fact: + l_docker_upgrade: True + when: + - not openshift_is_atomic | bool + - pkg_check.rc == 0 + - curr_docker_version.stdout is version_compare(docker_version,'<') + +# Additional checks for Atomic hosts: +- name: Determine available Docker + shell: "rpm -q --queryformat '---\ncurr_version: %{VERSION}\navail_version: \n' docker" + register: g_atomic_docker_version_result + when: openshift_is_atomic | bool + +- set_fact: + l_docker_version: "{{ g_atomic_docker_version_result.stdout | from_yaml }}" + when: openshift_is_atomic | bool + +- fail: + msg: This playbook requires access to Docker 1.12 or later + when: + - openshift_is_atomic | bool + - l_docker_version.avail_version | default(l_docker_version.curr_version, true) is version_compare('1.12','<') diff --git a/roles/container_runtime/tasks/main.yml b/roles/container_runtime/tasks/main.yml index 96d8606c6..07da831c4 100644 --- a/roles/container_runtime/tasks/main.yml +++ b/roles/container_runtime/tasks/main.yml @@ -1,2 +1,2 @@ --- -# This role is meant to be used with include_role and tasks_from. +# This role is meant to be used with import_role and tasks_from. diff --git a/roles/container_runtime/tasks/package_docker.yml b/roles/container_runtime/tasks/package_docker.yml index 89899c9cf..ed9a2709b 100644 --- a/roles/container_runtime/tasks/package_docker.yml +++ b/roles/container_runtime/tasks/package_docker.yml @@ -1,12 +1,23 @@ --- - include_tasks: common/pre.yml +# In some cases, some services may be run as containers and docker may still +# be installed via rpm. +- include_tasks: common/atomic_proxy.yml + when: + - > + (openshift_use_system_containers | default(False)) | bool + or (openshift_use_etcd_system_container | default(False)) | bool + or (openshift_use_openvswitch_system_container | default(False)) | bool + or (openshift_use_node_system_container | default(False)) | bool + or (openshift_use_master_system_container | default(False)) | bool + - name: Get current installed Docker version command: "{{ repoquery_installed }} --qf '%{version}' docker" - when: not openshift.common.is_atomic | bool + when: not openshift_is_atomic | bool register: curr_docker_version retries: 4 - until: curr_docker_version | succeeded + until: curr_docker_version is succeeded changed_when: false # Some basic checks to ensure the role will complete @@ -19,9 +30,12 @@ package: name: "docker{{ '-' + docker_version if docker_version is defined else '' }}" state: present - when: not openshift.common.is_atomic | bool and not curr_docker_version | skipped and not curr_docker_version.stdout != '' + when: + - not (openshift_is_atomic | bool) + - not (curr_docker_version is skipped) + - not (curr_docker_version.stdout != '') register: result - until: result | success + until: result is succeeded - block: # Extend the default Docker service unit file when using iptables-services @@ -45,7 +59,7 @@ lineinfile: dest: /etc/sysconfig/docker regexp: '^{{ item.reg_conf_var }}=.*$' - line: "{{ item.reg_conf_var }}='{{ item.reg_fact_val | oo_prepend_strings_in_list(item.reg_flag ~ ' ') | join(' ') }}'" + line: "{{ item.reg_conf_var }}='{{ item.reg_fact_val | lib_utils_oo_prepend_strings_in_list(item.reg_flag ~ ' ') | join(' ') }}'" when: - item.reg_fact_val != [] - docker_check.stat.isreg is defined @@ -98,7 +112,7 @@ line: "OPTIONS='\ {% if ansible_selinux.status | default(None) == 'enabled' and openshift_docker_selinux_enabled | default(true) | bool %} --selinux-enabled {% endif %} \ {% if openshift_docker_log_driver | bool %} --log-driver {{ openshift_docker_log_driver }}{% endif %} \ - {% if l2_docker_log_options != [] %} {{ l2_docker_log_options | oo_split() | oo_prepend_strings_in_list('--log-opt ') | join(' ')}}{% endif %} \ + {% if l2_docker_log_options != [] %} {{ l2_docker_log_options | lib_utils_oo_split() | lib_utils_oo_prepend_strings_in_list('--log-opt ') | join(' ')}}{% endif %} \ {% if openshift_docker_hosted_registry_insecure and (openshift_docker_hosted_registry_network | bool) %} --insecure-registry={{ openshift_docker_hosted_registry_network }} {% endif %} \ {% if docker_options is defined %} {{ docker_options }}{% endif %} \ {% if openshift_docker_options %} {{ openshift_docker_options }}{% endif %} \ @@ -137,11 +151,11 @@ state: started daemon_reload: yes register: r_docker_package_docker_start_result - until: not r_docker_package_docker_start_result | failed + until: not (r_docker_package_docker_start_result is failed) retries: 3 delay: 30 - set_fact: - docker_service_status_changed: "{{ (r_docker_package_docker_start_result | changed) and (r_docker_already_running_result.stdout != 'ActiveState=active' ) }}" + docker_service_status_changed: "{{ (r_docker_package_docker_start_result is changed) and (r_docker_already_running_result.stdout != 'ActiveState=active' ) }}" - include_tasks: common/post.yml diff --git a/roles/container_runtime/tasks/registry_auth.yml b/roles/container_runtime/tasks/registry_auth.yml index 2c7bc5711..4f1abd59a 100644 --- a/roles/container_runtime/tasks/registry_auth.yml +++ b/roles/container_runtime/tasks/registry_auth.yml @@ -15,6 +15,7 @@ - not openshift_docker_alternative_creds | bool - oreg_auth_user is defined - (not docker_cli_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool + no_log: True # docker_creds is a custom module from lib_utils # 'docker login' requires a docker.service running on the local host, this is an @@ -30,3 +31,4 @@ - openshift_docker_alternative_creds | bool - oreg_auth_user is defined - (not docker_cli_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool + no_log: True diff --git a/roles/container_runtime/tasks/systemcontainer_crio.yml b/roles/container_runtime/tasks/systemcontainer_crio.yml index 61f122f3c..d588f2618 100644 --- a/roles/container_runtime/tasks/systemcontainer_crio.yml +++ b/roles/container_runtime/tasks/systemcontainer_crio.yml @@ -3,7 +3,7 @@ - name: Check we are not using node as a Docker container with CRI-O fail: msg='Cannot use CRI-O with node configured as a Docker container' when: - - openshift.common.is_containerized | bool + - openshift_is_containerized | bool - not l_is_node_system_container | bool - include_tasks: common/pre.yml @@ -81,6 +81,17 @@ dest: /etc/cni/net.d/openshift-sdn.conf src: 80-openshift-sdn.conf.j2 +- name: Create /etc/sysconfig/crio-storage + copy: + content: "" + dest: /etc/sysconfig/crio-storage + force: no + +- name: Create /etc/sysconfig/crio-network + template: + dest: /etc/sysconfig/crio-network + src: crio-network.j2 + - name: Start the CRI-O service systemd: name: "cri-o" @@ -93,4 +104,4 @@ # 'docker login' - include_tasks: common/post.yml vars: - openshift_docker_alternative_creds: "{{ openshift_use_crio_only }}" + openshift_docker_alternative_creds: "{{ openshift_use_crio_only | bool }}" diff --git a/roles/container_runtime/tasks/systemcontainer_docker.yml b/roles/container_runtime/tasks/systemcontainer_docker.yml index 10570fe34..5f715cd21 100644 --- a/roles/container_runtime/tasks/systemcontainer_docker.yml +++ b/roles/container_runtime/tasks/systemcontainer_docker.yml @@ -18,9 +18,9 @@ # Make sure Docker is installed so we are able to use the client - name: Install Docker so we can use the client package: name=docker{{ '-' + docker_version if docker_version is defined else '' }} state=present - when: not openshift.common.is_atomic | bool + when: not openshift_is_atomic | bool register: result - until: result | success + until: result is succeeded # Make sure docker is disabled. Errors are ignored. - name: Disable Docker @@ -31,7 +31,7 @@ daemon_reload: yes ignore_errors: True register: r_docker_systemcontainer_docker_stop_result - until: not r_docker_systemcontainer_docker_stop_result | failed + until: not (r_docker_systemcontainer_docker_stop_result is failed) retries: 3 delay: 30 @@ -42,6 +42,12 @@ - debug: var: l_docker_image +# Do the authentication before pulling the container engine system container +# as the pull might be from an authenticated registry. +- include_tasks: registry_auth.yml + vars: + openshift_docker_alternative_creds: True + # NOTE: no_proxy added as a workaround until https://github.com/projectatomic/atomic/pull/999 is released - name: Pre-pull Container Engine System Container image command: "atomic pull --storage ostree {{ l_docker_image }}" @@ -87,12 +93,12 @@ state: started daemon_reload: yes register: r_docker_systemcontainer_docker_start_result - until: not r_docker_systemcontainer_docker_start_result | failed + until: not (r_docker_systemcontainer_docker_start_result is failed) retries: 3 delay: 30 - set_fact: - docker_service_status_changed: "{{ r_docker_systemcontainer_docker_start_result | changed }}" + docker_service_status_changed: "{{ r_docker_systemcontainer_docker_start_result is changed }}" # Since docker is running as a system container, docker login will fail to create # credentials. Use alternate method if requiring authenticated registries. diff --git a/roles/container_runtime/templates/crio-network.j2 b/roles/container_runtime/templates/crio-network.j2 new file mode 100644 index 000000000..763be97d7 --- /dev/null +++ b/roles/container_runtime/templates/crio-network.j2 @@ -0,0 +1,9 @@ +{% if 'http_proxy' in openshift.common %} +HTTP_PROXY={{ openshift.common.http_proxy }} +{% endif %} +{% if 'https_proxy' in openshift.common %} +HTTPS_PROXY={{ openshift.common.https_proxy }} +{% endif %} +{% if 'no_proxy' in openshift.common %} +NO_PROXY={{ openshift.common.no_proxy }} +{% endif %} diff --git a/roles/container_runtime/templates/crio.conf.j2 b/roles/container_runtime/templates/crio.conf.j2 index 3f066a17f..0a1ff2e0a 100644 --- a/roles/container_runtime/templates/crio.conf.j2 +++ b/roles/container_runtime/templates/crio.conf.j2 @@ -27,7 +27,7 @@ storage_option = [ [crio.api] # listen is the path to the AF_LOCAL socket on which crio will listen. -listen = "/var/run/crio.sock" +listen = "/var/run/crio/crio.sock" # stream_address is the IP address on which the stream server will listen stream_address = "" diff --git a/roles/container_runtime/templates/docker_storage_setup.j2 b/roles/container_runtime/templates/docker_storage_setup.j2 new file mode 100644 index 000000000..ec540ea44 --- /dev/null +++ b/roles/container_runtime/templates/docker_storage_setup.j2 @@ -0,0 +1,16 @@ +# Edit this file to override any configuration options specified in +# /usr/lib/docker-storage-setup/docker-storage-setup. +# +# For more details refer to "man docker-storage-setup" +{% if container_runtime_docker_storage_setup_device is defined and container_runtime_docker_storage_setup_device != '' %} +DEVS={{ container_runtime_docker_storage_setup_device }} +VG={{ docker_storage_setup_options.vg }} +DATA_SIZE={{ docker_storage_setup_options.data_size }} +STORAGE_DRIVER="{{ docker_storage_setup_options.storage_driver }}" +CONTAINER_ROOT_LV_NAME="{{ docker_storage_setup_options.root_lv_name }}" +CONTAINER_ROOT_LV_SIZE="{{ docker_storage_setup_options.root_lv_size }}" +CONTAINER_ROOT_LV_MOUNT_PATH="{{ docker_storage_setup_options.root_lv_mount_path }}" +{% else %} +STORAGE_DRIVER="{{ docker_storage_setup_options.storage_driver }}" +{% endif %} +EXTRA_STORAGE_OPTIONS="{{ docker_storage_extra_options | join(' ') }}" diff --git a/roles/contiv/README.md b/roles/contiv/README.md index fa36039d9..ce414f9fb 100644 --- a/roles/contiv/README.md +++ b/roles/contiv/README.md @@ -19,8 +19,8 @@ Install Contiv components (netmaster, netplugin, contiv_etcd) on Master and Mini * ``openshift_use_contiv=True`` * ``openshift_use_openshift_sdn=False`` * ``os_sdn_network_plugin_name='cni'`` -* ``netmaster_interface=eth0`` -* ``netplugin_interface=eth1`` +* ``contiv_netmaster_interface=eth0`` +* ``contiv_netplugin_interface=eth1`` * ref. Openshift docs Contiv section for more details ## Example bare metal deployment of Openshift + Contiv diff --git a/roles/contiv/defaults/main.yml b/roles/contiv/defaults/main.yml index aa976d921..4869abc61 100644 --- a/roles/contiv/defaults/main.yml +++ b/roles/contiv/defaults/main.yml @@ -1,51 +1,63 @@ --- # The version of Contiv binaries to use -contiv_version: 1.1.1 +contiv_version: 1.2.0 # The version of cni binaries -cni_version: v0.4.0 +contiv_cni_version: v0.4.0 + +# If the node we are deploying to is to be a contiv master. +contiv_master: false contiv_default_subnet: "10.128.0.0/16" contiv_default_gw: "10.128.254.254" -# TCP port that Netmaster listens for network connections -netmaster_port: 9999 -# Default for contiv_role -contiv_role: netmaster +# Ports netmaster listens on +contiv_netmaster_port: 9999 +contiv_netmaster_port_proto: tcp +contiv_ofnet_master_port: 9001 +contiv_ofnet_master_port_proto: tcp +# Ports netplugin listens on +contiv_netplugin_port: 6640 +contiv_netplugin_port_proto: tcp +contiv_ofnet_vxlan_port: 9002 +contiv_ofnet_vxlan_port_proto: tcp +contiv_ovs_port: 9003 +contiv_ovs_port_proto: tcp -# TCP port that Netplugin listens for network connections -netplugin_port: 6640 -contiv_rpc_port1: 9001 -contiv_rpc_port2: 9002 -contiv_rpc_port3: 9003 +contiv_vxlan_port: 4789 +contiv_vxlan_port_proto: udp # Interface used by Netplugin for inter-host traffic when encap_mode is vlan. # The interface must support 802.1Q trunking. -netplugin_interface: "eno16780032" +contiv_netplugin_interface: "eno16780032" # IP address of the interface used for control communication within the cluster # It needs to be reachable from all nodes in the cluster. -netplugin_ctrl_ip: "{{ hostvars[inventory_hostname]['ansible_' + netplugin_interface].ipv4.address }}" +contiv_netplugin_ctrl_ip: "{{ hostvars[inventory_hostname]['ansible_' + contiv_netplugin_interface].ipv4.address }}" # IP used to terminate vxlan tunnels -netplugin_vtep_ip: "{{ hostvars[inventory_hostname]['ansible_' + netplugin_interface].ipv4.address }}" +contiv_netplugin_vtep_ip: "{{ hostvars[inventory_hostname]['ansible_' + contiv_netplugin_interface].ipv4.address }}" # Interface used to bind Netmaster service -netmaster_interface: "{{ netplugin_interface }}" +contiv_netmaster_interface: "{{ contiv_netplugin_interface }}" + +# IP address of the interface used for control communication within the cluster +# It needs to be reachable from all nodes in the cluster. +contiv_netmaster_ctrl_ip: "{{ hostvars[inventory_hostname]['ansible_' + contiv_netmaster_interface].ipv4.address }}" # Path to the contiv binaries -bin_dir: /usr/bin +contiv_bin_dir: /usr/bin # Path to the contivk8s cni binary -cni_bin_dir: /opt/cni/bin +contiv_cni_bin_dir: /opt/cni/bin # Path to cni archive download directory -cni_download_dir: /tmp +contiv_cni_download_dir: /tmp # URL for cni binaries -cni_bin_url_base: "https://github.com/containernetworking/cni/releases/download/" -cni_bin_url: "{{ cni_bin_url_base }}/{{ cni_version }}/cni-{{ cni_version }}.tbz2" +contiv_cni_bin_url_base: "https://github.com/containernetworking/cni/releases/download/" +contiv_cni_bin_url: "{{ contiv_cni_bin_url_base }}/{{ contiv_cni_version }}/cni-{{ contiv_cni_version }}.tbz2" # Contiv config directory @@ -60,11 +72,11 @@ contiv_download_url_base: "https://github.com/contiv/netplugin/releases/download contiv_download_url: "{{ contiv_download_url_base }}/{{ contiv_version }}/netplugin-{{ contiv_version }}.tar.bz2" # This is where kubelet looks for plugin files -kube_plugin_dir: /usr/libexec/kubernetes/kubelet-plugins/net/exec +contiv_kube_plugin_dir: /usr/libexec/kubernetes/kubelet-plugins/net/exec # Specifies routed mode vs bridged mode for networking (bridge | routing) # if you are using an external router for all routing, you should select bridge here -netplugin_fwd_mode: bridge +contiv_netplugin_fwd_mode: routing # Contiv fabric mode aci|default contiv_fabric_mode: default @@ -73,10 +85,10 @@ contiv_fabric_mode: default contiv_vlan_range: "2900-3000" # Encapsulation type vlan|vxlan to use for instantiating container networks -contiv_encap_mode: vlan +contiv_encap_mode: vxlan # Backend used by Netplugin for instantiating container networks -netplugin_driver: ovs +contiv_netplugin_driver: ovs # Create a default Contiv network for use by pods contiv_default_network: true @@ -85,39 +97,80 @@ contiv_default_network: true contiv_default_network_tag: "" #SRFIXME (use the openshift variables) -https_proxy: "" -http_proxy: "" -no_proxy: "" +contiv_https_proxy: "" +contiv_http_proxy: "" +contiv_no_proxy: "" # The following are aci specific parameters when contiv_fabric_mode: aci is set. # Otherwise, you can ignore these. -apic_url: "" -apic_username: "" -apic_password: "" -apic_leaf_nodes: "" -apic_phys_dom: "" -apic_contracts_unrestricted_mode: no -apic_epg_bridge_domain: not_specified +contiv_apic_url: "" +contiv_apic_username: "" +contiv_apic_password: "" +contiv_apic_leaf_nodes: "" +contiv_apic_phys_dom: "" +contiv_apic_contracts_unrestricted_mode: no +contiv_apic_epg_bridge_domain: not_specified apic_configure_default_policy: false -apic_default_external_contract: "uni/tn-common/brc-default" -apic_default_app_profile: "contiv-infra-app-profile" -is_atomic: False -kube_cert_dir: "/data/src/github.com/openshift/origin/openshift.local.config/master" -master_name: "{{ groups['masters'][0] }}" -contiv_etcd_port: 22379 -etcd_url: "{{ hostvars[groups['masters'][0]]['ansible_' + netmaster_interface].ipv4.address }}:{{ contiv_etcd_port }}" -kube_ca_cert: "{{ kube_cert_dir }}/ca.crt" -kube_key: "{{ kube_cert_dir }}/admin.key" -kube_cert: "{{ kube_cert_dir }}/admin.crt" -kube_master_api_port: 8443 +contiv_apic_default_external_contract: "uni/tn-common/brc-default" +contiv_apic_default_app_profile: "contiv-infra-app-profile" +contiv_kube_cert_dir: "/data/src/github.com/openshift/origin/openshift.local.config/master" +contiv_kube_ca_cert: "{{ contiv_kube_cert_dir }}/ca.crt" +contiv_kube_key: "{{ contiv_kube_cert_dir }}/admin.key" +contiv_kube_cert: "{{ contiv_kube_cert_dir }}/admin.crt" +contiv_kube_master_api_port: 8443 +contiv_kube_master_api_port_proto: tcp # contivh1 default subnet and gateway -#contiv_h1_subnet_default: "132.1.1.0/24" -#contiv_h1_gw_default: "132.1.1.1" contiv_h1_subnet_default: "10.129.0.0/16" contiv_h1_gw_default: "10.129.0.1" # contiv default private subnet for ext access contiv_private_ext_subnet: "10.130.0.0/16" -openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}" +contiv_openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False) | bool) else 'docker' }}" + +contiv_api_proxy_port: 10000 +contiv_api_proxy_port_proto: tcp +contiv_api_proxy_image_repo: contiv/auth_proxy +contiv_api_proxy_ip: "{{ hostvars[inventory_hostname]['ansible_' + contiv_netmaster_interface].ipv4.address }}" + +contiv_etcd_system_user: contivetcd +contiv_etcd_system_uid: 823 +contiv_etcd_system_group: contivetcd +contiv_etcd_system_gid: 823 +contiv_etcd_port: 22379 +contiv_etcd_port_proto: tcp +contiv_etcd_peer_port: 22380 +contiv_etcd_peer_port_proto: tcp +contiv_etcd_url: "http://127.0.0.1:{{ contiv_etcd_port }}" +contiv_etcd_init_image_repo: ferest/etcd-initer +contiv_etcd_init_image_tag: latest +contiv_etcd_image_repo: quay.io/coreos/etcd +contiv_etcd_image_tag: v3.2.4 +contiv_etcd_conf_dir: /etc/contiv-etcd +contiv_etcd_data_dir: /var/lib/contiv-etcd +contiv_etcd_peers: |- + {% for host in groups.oo_masters_to_config -%} + {{ host }}=http://{{ hostvars[host]['ip'] | default(hostvars[host].ansible_default_ipv4['address']) }}:{{ contiv_etcd_peer_port }}{% if not loop.last %},{% endif %} + {%- endfor %} + +# List of port/protocol pairs to allow inbound access to on every host +# netplugin runs on, from all host IPs in the cluster. +contiv_netplugin_internal: [ "{{ contiv_ofnet_vxlan_port }}/{{ contiv_ofnet_vxlan_port_proto }}", + "{{ contiv_ovs_port }}/{{ contiv_ovs_port_proto }}", + "{{ contiv_vxlan_port }}/{{ contiv_vxlan_port_proto }}" ] +# Allow all forwarded traffic in and out of these interfaces. +contiv_netplugin_forward_interfaces: [ contivh0, contivh1 ] + +# List of port/protocol pairs to allow inbound access to on every host +# netmaster runs on, from all host IPs in the cluster. Note that every host +# that runs netmaster also runs netplugin, so the above netplugin rules will +# apply as well. +contiv_netmaster_internal: [ "{{ contiv_ofnet_master_port }}/{{ contiv_ofnet_master_port_proto }}", + "{{ contiv_netmaster_port }}/{{ contiv_netmaster_port_proto }}", + "{{ contiv_etcd_port }}/{{ contiv_etcd_port_proto }}", + "{{ contiv_etcd_peer_port }}/{{ contiv_etcd_peer_port_proto }}", + "{{ contiv_kube_master_api_port }}/{{ contiv_kube_master_api_port_proto }}" ] +# List of port/protocol pairs to allow inbound access to on every host +# netmaster runs on, from any host anywhere. +contiv_netmaster_external: [ "{{ contiv_api_proxy_port }}/{{ contiv_api_proxy_port_proto }}" ] diff --git a/roles/contiv/meta/main.yml b/roles/contiv/meta/main.yml index a2c2f98a7..e8607cc90 100644 --- a/roles/contiv/meta/main.yml +++ b/roles/contiv/meta/main.yml @@ -13,18 +13,5 @@ galaxy_info: - cloud - system dependencies: +- role: lib_utils - role: contiv_facts -- role: etcd - etcd_service: contiv-etcd - etcd_is_thirdparty: True - etcd_peer_port: 22380 - etcd_client_port: 22379 - etcd_conf_dir: /etc/contiv-etcd/ - etcd_data_dir: /var/lib/contiv-etcd/ - etcd_ca_host: "{{ inventory_hostname }}" - etcd_cert_config_dir: /etc/contiv-etcd/ - etcd_url_scheme: http - etcd_peer_url_scheme: http - when: contiv_role == "netmaster" -- role: contiv_auth_proxy - when: contiv_role == "netmaster" diff --git a/roles/contiv/tasks/aci.yml b/roles/contiv/tasks/aci.yml index 30d2eb339..8a56b3590 100644 --- a/roles/contiv/tasks/aci.yml +++ b/roles/contiv/tasks/aci.yml @@ -11,7 +11,7 @@ - name: ACI | Copy shell script used by aci-gw service template: src: aci_gw.j2 - dest: "{{ bin_dir }}/aci_gw.sh" + dest: "{{ contiv_bin_dir }}/aci_gw.sh" mode: u=rwx,g=rx,o=rx - name: ACI | Copy systemd units for aci-gw diff --git a/roles/contiv/tasks/api_proxy.yml b/roles/contiv/tasks/api_proxy.yml new file mode 100644 index 000000000..8b524dd6e --- /dev/null +++ b/roles/contiv/tasks/api_proxy.yml @@ -0,0 +1,120 @@ +--- +- name: API proxy | Create contiv-api-proxy openshift user + oc_serviceaccount: + state: present + name: contiv-api-proxy + namespace: kube-system + run_once: true + +- name: API proxy | Set contiv-api-proxy openshift user permissions + oc_adm_policy_user: + user: system:serviceaccount:kube-system:contiv-api-proxy + resource_kind: scc + resource_name: hostnetwork + state: present + run_once: true + +- name: API proxy | Create temp directory for doing work + command: mktemp -d /tmp/openshift-contiv-XXXXXX + register: mktemp + changed_when: False + # For things that pass temp files between steps, we want to make sure they + # run on the same node. + delegate_to: "{{ groups.oo_masters_to_config.0 }}" + run_once: true + +- name: API proxy | Check for existing api proxy secret volume + oc_obj: + namespace: kube-system + kind: secret + state: list + selector: "name=contiv-api-proxy-secret" + register: existing_secret_volume + run_once: true + +- name: API proxy | Generate a self signed certificate for api proxy + command: openssl req -new -nodes -x509 -subj "/C=US/ST=/L=/O=/CN=localhost" -days 3650 -keyout "{{ mktemp.stdout }}/key.pem" -out "{{ mktemp.stdout }}/cert.pem" -extensions v3_ca + when: (contiv_api_proxy_cert is not defined or contiv_api_proxy_key is not defined) + and not existing_secret_volume.results.results[0]['items'] + register: created_self_signed_cert + delegate_to: "{{ groups.oo_masters_to_config.0 }}" + run_once: true + +- name: API proxy | Read self signed certificate file + command: cat "{{ mktemp.stdout }}/cert.pem" + register: generated_cert + when: created_self_signed_cert.changed + delegate_to: "{{ groups.oo_masters_to_config.0 }}" + run_once: true + +- name: API proxy | Read self signed key file + command: cat "{{ mktemp.stdout }}/key.pem" + register: generated_key + when: created_self_signed_cert.changed + delegate_to: "{{ groups.oo_masters_to_config.0 }}" + run_once: true + +- name: API proxy | Create api-proxy-secrets.yml from template using generated cert + template: + src: api-proxy-secrets.yml.j2 + dest: "{{ mktemp.stdout }}/api-proxy-secrets.yml" + vars: + key: "{{ generated_key.stdout }}" + cert: "{{ generated_cert.stdout }}" + when: created_self_signed_cert.changed + delegate_to: "{{ groups.oo_masters_to_config.0 }}" + run_once: true + +- name: API proxy | Create api-proxy-secrets.yml from template using user defined cert + template: + src: api-proxy-secrets.yml.j2 + dest: "{{ mktemp.stdout }}/api-proxy-secrets.yml" + vars: + key: "{{ lookup('file', contiv_api_proxy_key) }}" + cert: "{{ lookup('file', contiv_api_proxy_cert) }}" + when: contiv_api_proxy_cert is defined and contiv_api_proxy_key is defined + delegate_to: "{{ groups.oo_masters_to_config.0 }}" + run_once: true + +- name: API proxy | Create secret certificate volume + oc_obj: + state: present + namespace: "kube-system" + kind: secret + name: contiv-api-proxy-secret + files: + - "{{ mktemp.stdout }}/api-proxy-secrets.yml" + when: (contiv_api_proxy_cert is defined and contiv_api_proxy_key is defined) + or created_self_signed_cert.changed + delegate_to: "{{ groups.oo_masters_to_config.0 }}" + run_once: true + +- name: API proxy | Create api-proxy-daemonset.yml from template + template: + src: api-proxy-daemonset.yml.j2 + dest: "{{ mktemp.stdout }}/api-proxy-daemonset.yml" + vars: + etcd_host: "etcd://{{ groups.oo_etcd_to_config.0 }}:{{ contiv_etcd_port }}" + delegate_to: "{{ groups.oo_masters_to_config.0 }}" + run_once: true + +# Always "import" this file, k8s won't do anything if it matches exactly what +# is already in the cluster. +- name: API proxy | Add API proxy daemonset + oc_obj: + state: present + namespace: "kube-system" + kind: daemonset + name: contiv-api-proxy + files: + - "{{ mktemp.stdout }}/api-proxy-daemonset.yml" + delegate_to: "{{ groups.oo_masters_to_config.0 }}" + run_once: true + +- name: API proxy | Delete temp directory + file: + name: "{{ mktemp.stdout }}" + state: absent + changed_when: False + delegate_to: "{{ groups.oo_masters_to_config.0 }}" + run_once: true diff --git a/roles/contiv/tasks/default_network.yml b/roles/contiv/tasks/default_network.yml index f679443e0..e9763d34a 100644 --- a/roles/contiv/tasks/default_network.yml +++ b/roles/contiv/tasks/default_network.yml @@ -1,58 +1,71 @@ --- -- name: Contiv | Wait for netmaster - command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" tenant ls' +- name: Default network | Wait for netmaster + command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" tenant ls' register: tenant_result until: tenant_result.stdout.find("default") != -1 retries: 9 delay: 10 -- name: Contiv | Set globals - command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" global set --fabric-mode {{ contiv_fabric_mode }} --vlan-range {{ contiv_vlan_range }} --fwd-mode {{ netplugin_fwd_mode }} --private-subnet {{ contiv_private_ext_subnet }}' +- name: Default network | Set globals + command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" global set --fabric-mode {{ contiv_fabric_mode }} --vlan-range {{ contiv_vlan_range }} --fwd-mode {{ contiv_netplugin_fwd_mode }} --private-subnet {{ contiv_private_ext_subnet }}' + run_once: true -- name: Contiv | Set arp mode to flood if ACI - command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" global set --arp-mode flood' +- name: Default network | Set arp mode to flood if ACI + command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" global set --arp-mode flood' when: contiv_fabric_mode == "aci" + run_once: true -- name: Contiv | Check if default-net exists - command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" net ls' +- name: Default network | Check if default-net exists + command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" net ls' register: net_result + run_once: true -- name: Contiv | Create default-net - command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" net create --subnet={{ contiv_default_subnet }} -e {{ contiv_encap_mode }} -p {{ contiv_default_network_tag }} --gateway {{ contiv_default_gw }} default-net' +- name: Default network | Create default-net + command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" net create --subnet={{ contiv_default_subnet }} -e {{ contiv_encap_mode }} -p {{ contiv_default_network_tag }} --gateway {{ contiv_default_gw }} default-net' when: net_result.stdout.find("default-net") == -1 + run_once: true -- name: Contiv | Create host access infra network for VxLan routing case - command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" net create --subnet={{ contiv_h1_subnet_default }} --gateway={{ contiv_h1_gw_default }} --nw-type="infra" contivh1' - when: (contiv_encap_mode == "vxlan") and (netplugin_fwd_mode == "routing") +- name: Default network | Create host access infra network for VxLan routing case + command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" net create --subnet={{ contiv_h1_subnet_default }} --gateway={{ contiv_h1_gw_default }} --nw-type="infra" contivh1' + when: (contiv_encap_mode == "vxlan") and (contiv_netplugin_fwd_mode == "routing") + run_once: true -#- name: Contiv | Create an allow-all policy for the default-group -# command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" policy create ose-allow-all-policy' +#- name: Default network | Create an allow-all policy for the default-group +# command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" policy create ose-allow-all-policy' # when: contiv_fabric_mode == "aci" +# run_once: true -- name: Contiv | Set up aci external contract to consume default external contract - command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" external-contracts create -c -a {{ apic_default_external_contract }} oseExtToConsume' +- name: Default network | Set up aci external contract to consume default external contract + command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" external-contracts create -c -a {{ contiv_apic_default_external_contract }} oseExtToConsume' when: (contiv_fabric_mode == "aci") and (apic_configure_default_policy == true) + run_once: true -- name: Contiv | Set up aci external contract to provide default external contract - command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" external-contracts create -p -a {{ apic_default_external_contract }} oseExtToProvide' +- name: Default network | Set up aci external contract to provide default external contract + command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" external-contracts create -p -a {{ contiv_apic_default_external_contract }} oseExtToProvide' when: (contiv_fabric_mode == "aci") and (apic_configure_default_policy == true) + run_once: true -- name: Contiv | Create aci default-group - command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" group create default-net default-group' +- name: Default network | Create aci default-group + command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" group create default-net default-group' when: contiv_fabric_mode == "aci" + run_once: true -- name: Contiv | Add external contracts to the default-group - command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" group create -e oseExtToConsume -e oseExtToProvide default-net default-group' +- name: Default network | Add external contracts to the default-group + command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" group create -e oseExtToConsume -e oseExtToProvide default-net default-group' when: (contiv_fabric_mode == "aci") and (apic_configure_default_policy == true) + run_once: true -#- name: Contiv | Add policy rule 1 for allow-all policy -# command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" policy rule-add -d in --action allow ose-allow-all-policy 1' +#- name: Default network | Add policy rule 1 for allow-all policy +# command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" policy rule-add -d in --action allow ose-allow-all-policy 1' # when: contiv_fabric_mode == "aci" +# run_once: true -#- name: Contiv | Add policy rule 2 for allow-all policy -# command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" policy rule-add -d out --action allow ose-allow-all-policy 2' +#- name: Default network | Add policy rule 2 for allow-all policy +# command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" policy rule-add -d out --action allow ose-allow-all-policy 2' # when: contiv_fabric_mode == "aci" +# run_once: true -- name: Contiv | Create default aci app profile - command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" app-profile create -g default-group {{ apic_default_app_profile }}' +- name: Default network | Create default aci app profile + command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" app-profile create -g default-group {{ contiv_apic_default_app_profile }}' when: contiv_fabric_mode == "aci" + run_once: true diff --git a/roles/contiv/tasks/download_bins.yml b/roles/contiv/tasks/download_bins.yml index 741c1d1da..47d74da9c 100644 --- a/roles/contiv/tasks/download_bins.yml +++ b/roles/contiv/tasks/download_bins.yml @@ -4,12 +4,12 @@ path: "{{ contiv_current_release_directory }}" state: directory -- name: Install bzip2 +- name: Download Bins | Install bzip2 yum: name: bzip2 state: installed register: result - until: result | success + until: result is succeeded - name: Download Bins | Download Contiv tar file get_url: @@ -18,9 +18,9 @@ mode: 0755 validate_certs: False environment: - http_proxy: "{{ http_proxy|default('') }}" - https_proxy: "{{ https_proxy|default('') }}" - no_proxy: "{{ no_proxy|default('') }}" + http_proxy: "{{ contiv_http_proxy|default('') }}" + https_proxy: "{{ contiv_https_proxy|default('') }}" + no_proxy: "{{ contiv_no_proxy|default('') }}" - name: Download Bins | Extract Contiv tar file unarchive: @@ -30,19 +30,19 @@ - name: Download Bins | Download cni tar file get_url: - url: "{{ cni_bin_url }}" - dest: "{{ cni_download_dir }}" + url: "{{ contiv_cni_bin_url }}" + dest: "{{ contiv_cni_download_dir }}" mode: 0755 validate_certs: False environment: - http_proxy: "{{ http_proxy|default('') }}" - https_proxy: "{{ https_proxy|default('') }}" - no_proxy: "{{ no_proxy|default('') }}" + http_proxy: "{{ contiv_http_proxy|default('') }}" + https_proxy: "{{ contiv_https_proxy|default('') }}" + no_proxy: "{{ contiv_no_proxy|default('') }}" register: download_file - name: Download Bins | Extract cni tar file unarchive: src: "{{ download_file.dest }}" - dest: "{{ cni_download_dir }}" + dest: "{{ contiv_cni_download_dir }}" copy: no when: download_file.changed diff --git a/roles/contiv/tasks/etcd.yml b/roles/contiv/tasks/etcd.yml new file mode 100644 index 000000000..b08ead982 --- /dev/null +++ b/roles/contiv/tasks/etcd.yml @@ -0,0 +1,114 @@ +--- +# To run contiv-etcd in a container as non-root, we need to match the uid/gid +# with the filesystem permissions on the host. +- name: Contiv etcd | Create local unix group + group: + name: "{{ contiv_etcd_system_group }}" + gid: "{{ contiv_etcd_system_gid }}" + system: yes + +- name: Contiv etcd | Create local unix user + user: + name: "{{ contiv_etcd_system_user }}" + createhome: no + uid: "{{ contiv_etcd_system_uid }}" + group: "{{ contiv_etcd_system_group }}" + home: "{{ contiv_etcd_data_dir }}" + shell: /bin/false + system: yes + +- name: Contiv etcd | Create directories + file: + path: "{{ item }}" + state: directory + mode: g-rwx,o-rwx + owner: "{{ contiv_etcd_system_user }}" + group: "{{ contiv_etcd_system_group }}" + setype: svirt_sandbox_file_t + seuser: system_u + serole: object_r + selevel: s0 + recurse: yes + with_items: + - "{{ contiv_etcd_data_dir }}" + - "{{ contiv_etcd_conf_dir }}" + +- name: Contiv etcd | Create contiv-etcd openshift user + oc_serviceaccount: + state: present + name: contiv-etcd + namespace: kube-system + run_once: true + +- name: Contiv etcd | Create temp directory for doing work + command: mktemp -d /tmp/openshift-contiv-XXXXXX + register: mktemp + changed_when: False + # For things that pass temp files between steps, we want to make sure they + # run on the same node. + delegate_to: "{{ groups.oo_masters_to_config.0 }}" + run_once: true + +- name: Contiv etcd | Create etcd-scc.yml from template + template: + src: etcd-scc.yml.j2 + dest: "{{ mktemp.stdout }}/etcd-scc.yml" + delegate_to: "{{ groups.oo_masters_to_config.0 }}" + run_once: true + +- name: Contiv etcd | Create etcd.yml from template + template: + src: etcd-daemonset.yml.j2 + dest: "{{ mktemp.stdout }}/etcd-daemonset.yml" + delegate_to: "{{ groups.oo_masters_to_config.0 }}" + run_once: true + +- name: Contiv etcd | Create etcd-proxy.yml from template + template: + src: etcd-proxy-daemonset.yml.j2 + dest: "{{ mktemp.stdout }}/etcd-proxy-daemonset.yml" + delegate_to: "{{ groups.oo_masters_to_config.0 }}" + run_once: true + +- name: Contiv etcd | Add etcd scc + oc_obj: + state: present + namespace: "kube-system" + kind: SecurityContextConstraints + name: contiv-etcd + files: + - "{{ mktemp.stdout }}/etcd-scc.yml" + delegate_to: "{{ groups.oo_masters_to_config.0 }}" + run_once: true + +# Always "import" this file, k8s won't do anything if it matches exactly what +# is already in the cluster. +- name: Contiv etcd | Add etcd daemonset + oc_obj: + state: present + namespace: "kube-system" + kind: daemonset + name: contiv-etcd + files: + - "{{ mktemp.stdout }}/etcd-daemonset.yml" + delegate_to: "{{ groups.oo_masters_to_config.0 }}" + run_once: true + +- name: Contiv etcd | Add etcd-proxy daemonset + oc_obj: + state: present + namespace: "kube-system" + kind: daemonset + name: contiv-etcd-proxy + files: + - "{{ mktemp.stdout }}/etcd-proxy-daemonset.yml" + delegate_to: "{{ groups.oo_masters_to_config.0 }}" + run_once: true + +- name: Contiv etcd | Delete temp directory + file: + name: "{{ mktemp.stdout }}" + state: absent + changed_when: False + delegate_to: "{{ groups.oo_masters_to_config.0 }}" + run_once: true diff --git a/roles/contiv/tasks/main.yml b/roles/contiv/tasks/main.yml index cb9196a71..4d530ae90 100644 --- a/roles/contiv/tasks/main.yml +++ b/roles/contiv/tasks/main.yml @@ -1,14 +1,15 @@ --- -- name: Ensure bin_dir exists +- include_tasks: old_version_cleanup.yml + +- name: Ensure contiv_bin_dir exists file: - path: "{{ bin_dir }}" + path: "{{ contiv_bin_dir }}" recurse: yes state: directory - include_tasks: download_bins.yml - include_tasks: netmaster.yml - when: contiv_role == "netmaster" + when: contiv_master - include_tasks: netplugin.yml - when: contiv_role == "netplugin" diff --git a/roles/contiv/tasks/netmaster.yml b/roles/contiv/tasks/netmaster.yml index 6f15af8c2..bb22fb801 100644 --- a/roles/contiv/tasks/netmaster.yml +++ b/roles/contiv/tasks/netmaster.yml @@ -1,34 +1,16 @@ --- - include_tasks: netmaster_firewalld.yml - when: has_firewalld + when: contiv_has_firewalld - include_tasks: netmaster_iptables.yml - when: not has_firewalld and has_iptables + when: not contiv_has_firewalld and contiv_has_iptables -- name: Netmaster | Check is /etc/hosts file exists - stat: - path: /etc/hosts - register: hosts - -- name: Netmaster | Create hosts file if it is not present - file: - path: /etc/hosts - state: touch - when: not hosts.stat.exists - -- name: Netmaster | Build hosts file - lineinfile: - dest: /etc/hosts - regexp: .*netmaster$ - line: "{{ hostvars[item]['ansible_' + netmaster_interface].ipv4.address }} netmaster" - state: present - when: hostvars[item]['ansible_' + netmaster_interface].ipv4.address is defined - with_items: "{{ groups['masters'] }}" +- include_tasks: etcd.yml - name: Netmaster | Create netmaster symlinks file: src: "{{ contiv_current_release_directory }}/{{ item }}" - dest: "{{ bin_dir }}/{{ item }}" + dest: "{{ contiv_bin_dir }}/{{ item }}" state: link with_items: - netmaster @@ -36,7 +18,7 @@ - name: Netmaster | Copy environment file for netmaster template: - src: netmaster.env.j2 + src: netmaster.j2 dest: /etc/default/netmaster mode: 0644 notify: restart netmaster @@ -75,3 +57,5 @@ - include_tasks: default_network.yml when: contiv_default_network == true + +- include_tasks: api_proxy.yml diff --git a/roles/contiv/tasks/netmaster_firewalld.yml b/roles/contiv/tasks/netmaster_firewalld.yml index 2975351ac..0d52f821d 100644 --- a/roles/contiv/tasks/netmaster_firewalld.yml +++ b/roles/contiv/tasks/netmaster_firewalld.yml @@ -1,16 +1,17 @@ --- -- name: Netmaster Firewalld | Open Netmaster port +- name: Netmaster Firewalld | Add internal rules firewalld: - port: "{{ netmaster_port }}/tcp" - permanent: false - state: enabled - # in case this is also a node where firewalld turned off - ignore_errors: yes + immediate: true + permanent: true + port: "{{ item[0] }}" + source: "{{ item[1] }}" + with_nested: + - "{{ contiv_netmaster_internal }}" + - "{{ groups.oo_nodes_to_config|difference(hostvars[inventory_hostname]['ansible_' + contiv_netmaster_interface].ipv4.address)|list }}" -- name: Netmaster Firewalld | Save Netmaster port +- name: Netmaster Firewalld | Add external rules firewalld: - port: "{{ netmaster_port }}/tcp" + immediate: true permanent: true - state: enabled - # in case this is also a node where firewalld turned off - ignore_errors: yes + port: "{{ item }}" + with_items: "{{ contiv_netmaster_external }}" diff --git a/roles/contiv/tasks/netmaster_iptables.yml b/roles/contiv/tasks/netmaster_iptables.yml index 07bb16ea7..3b68ea0c3 100644 --- a/roles/contiv/tasks/netmaster_iptables.yml +++ b/roles/contiv/tasks/netmaster_iptables.yml @@ -1,21 +1,32 @@ --- -- name: Netmaster IPtables | Get iptables rules - command: iptables -L --wait - register: iptablesrules - check_mode: no - -- name: Netmaster IPtables | Enable iptables at boot - service: - name: iptables - enabled: yes - state: started +- name: Netmaster IPtables | Add internal rules + iptables: + action: insert + chain: INPUT + # Parsed from the contiv_netmaster_internal list, this will be tcp or udp. + protocol: "{{ item[0].split('/')[1] }}" + match: "{{ item[0].split('/')[1] }}" + # Parsed from the contiv_netmaster_internal list, this will be a port number. + destination_port: "{{ item[0].split('/')[0] }}" + # This is an IP address from a node in the cluster. + source: "{{ item[1] }}" + jump: ACCEPT + comment: contiv + with_nested: + - "{{ contiv_netmaster_internal }}" + - "{{ groups.oo_nodes_to_config|difference(hostvars[inventory_hostname]['ansible_' + contiv_netmaster_interface].ipv4.address)|list }}" + notify: Save iptables rules -- name: Netmaster IPtables | Open Netmaster with iptables - command: /sbin/iptables -I INPUT 1 -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "contiv" - with_items: - - "{{ netmaster_port }}" - - "{{ contiv_rpc_port1 }}" - - "{{ contiv_rpc_port2 }}" - - "{{ contiv_rpc_port3 }}" - when: iptablesrules.stdout.find("contiv") == -1 +- name: Netmaster IPtables | Add external rules + iptables: + action: insert + chain: INPUT + # Parsed from the contiv_netmaster_external list, this will be tcp or udp. + protocol: "{{ item.split('/')[1] }}" + match: "{{ item.split('/')[1] }}" + # Parsed from the contiv_netmaster_external list, this will be a port number. + destination_port: "{{ item.split('/')[0] }}" + jump: ACCEPT + comment: contiv + with_items: "{{ contiv_netmaster_external }}" notify: Save iptables rules diff --git a/roles/contiv/tasks/netplugin.yml b/roles/contiv/tasks/netplugin.yml index cf92a8cc0..60f432202 100644 --- a/roles/contiv/tasks/netplugin.yml +++ b/roles/contiv/tasks/netplugin.yml @@ -1,9 +1,9 @@ --- - include_tasks: netplugin_firewalld.yml - when: has_firewalld + when: contiv_has_firewalld - include_tasks: netplugin_iptables.yml - when: has_iptables + when: not contiv_has_firewalld and contiv_has_iptables - name: Netplugin | Ensure localhost entry correct in /etc/hosts lineinfile: @@ -20,41 +20,40 @@ state: absent - include_tasks: ovs.yml - when: netplugin_driver == "ovs" + when: contiv_netplugin_driver == "ovs" - name: Netplugin | Create Netplugin bin symlink file: src: "{{ contiv_current_release_directory }}/netplugin" - dest: "{{ bin_dir }}/netplugin" + dest: "{{ contiv_bin_dir }}/netplugin" state: link - -- name: Netplugin | Ensure cni_bin_dir exists +- name: Netplugin | Ensure contiv_cni_bin_dir exists file: - path: "{{ cni_bin_dir }}" + path: "{{ contiv_cni_bin_dir }}" recurse: yes state: directory - name: Netplugin | Create CNI bin symlink file: src: "{{ contiv_current_release_directory }}/contivk8s" - dest: "{{ cni_bin_dir }}/contivk8s" + dest: "{{ contiv_cni_bin_dir }}/contivk8s" state: link - name: Netplugin | Copy CNI loopback bin copy: - src: "{{ cni_download_dir }}/loopback" - dest: "{{ cni_bin_dir }}/loopback" + src: "{{ contiv_cni_download_dir }}/loopback" + dest: "{{ contiv_cni_bin_dir }}/loopback" remote_src: True mode: 0755 -- name: Netplugin | Ensure kube_plugin_dir and cni/net.d directories exist +- name: Netplugin | Ensure contiv_kube_plugin_dir and cni/net.d directories exist file: path: "{{ item }}" recurse: yes state: directory with_items: - - "{{ kube_plugin_dir }}" + - "{{ contiv_kube_plugin_dir }}" - "/etc/cni/net.d" - name: Netplugin | Ensure contiv_config_dir exists @@ -68,7 +67,7 @@ src: contiv_cni.conf dest: "{{ item }}" with_items: - - "{{ kube_plugin_dir }}/contiv_cni.conf" + - "{{ contiv_kube_plugin_dir }}/contiv_cni.conf" - "/etc/cni/net.d" # notify: restart kubelet @@ -85,11 +84,11 @@ mode: 0644 notify: restart netplugin -- name: Docker | Make sure proxy setting exists +- name: Netplugin | Make sure docker proxy setting exists lineinfile: dest: /etc/sysconfig/docker-network regexp: '^https_proxy.*' - line: 'https_proxy={{ https_proxy }}' + line: 'https_proxy={{ contiv_https_proxy }}' state: present register: docker_updated @@ -101,15 +100,15 @@ - name: systemd reload command: systemctl daemon-reload - when: docker_updated|changed + when: docker_updated is changed -- name: Docker | Restart docker +- name: Netplugin | Restart docker service: - name: "{{ openshift_docker_service_name }}" + name: "{{ contiv_openshift_docker_service_name }}" state: restarted - when: docker_updated|changed + when: docker_updated is changed register: l_docker_restart_docker_in_contiv_result - until: not l_docker_restart_docker_in_contiv_result | failed + until: not (l_docker_restart_docker_in_contiv_result is failed) retries: 3 delay: 30 diff --git a/roles/contiv/tasks/netplugin_firewalld.yml b/roles/contiv/tasks/netplugin_firewalld.yml index 3aeffae56..5ac531ec6 100644 --- a/roles/contiv/tasks/netplugin_firewalld.yml +++ b/roles/contiv/tasks/netplugin_firewalld.yml @@ -1,34 +1,17 @@ --- -- name: Netplugin Firewalld | Open Netplugin port +- name: Netplugin Firewalld | Add internal rules firewalld: - port: "{{ netplugin_port }}/tcp" - permanent: false - state: enabled - # in case this is also a node where firewalld turned off - ignore_errors: yes - -- name: Netplugin Firewalld | Save Netplugin port - firewalld: - port: "{{ netplugin_port }}/tcp" + immediate: true permanent: true - state: enabled - # in case this is also a node where firewalld turned off - ignore_errors: yes - -- name: Netplugin Firewalld | Open vxlan port - firewalld: - port: "8472/udp" - permanent: false - state: enabled - # in case this is also a node where firewalld turned off - ignore_errors: yes - when: contiv_encap_mode == "vxlan" + port: "{{ item[0] }}" + source: "{{ item[1] }}" + with_nested: + - "{{ contiv_netplugin_internal }}" + - "{{ groups.oo_nodes_to_config|difference(hostvars[inventory_hostname]['ansible_' + contiv_netmaster_interface].ipv4.address)|list }}" -- name: Netplugin Firewalld | Save firewalld vxlan port for flanneld +- name: Netplugin Firewalld | Add dns rule firewalld: - port: "8472/udp" + immediate: true permanent: true - state: enabled - # in case this is also a node where firewalld turned off - ignore_errors: yes - when: contiv_encap_mode == "vxlan" + port: "53/udp" + interface: contivh0 diff --git a/roles/contiv/tasks/netplugin_iptables.yml b/roles/contiv/tasks/netplugin_iptables.yml index 3ea34645d..9d376f4e5 100644 --- a/roles/contiv/tasks/netplugin_iptables.yml +++ b/roles/contiv/tasks/netplugin_iptables.yml @@ -1,58 +1,52 @@ --- -- name: Netplugin IPtables | Get iptables rules - command: iptables -L --wait - register: iptablesrules - check_mode: no +- name: Netplugin IPtables | Add internal rules + iptables: + action: insert + chain: INPUT + protocol: "{{ item[0].split('/')[1] }}" + match: "{{ item[0].split('/')[1] }}" + destination_port: "{{ item[0].split('/')[0] }}" + source: "{{ item[1] }}" + jump: ACCEPT + comment: contiv + with_nested: + - "{{ contiv_netplugin_internal }}" + - "{{ groups.oo_nodes_to_config|difference(hostvars[inventory_hostname]['ansible_' + contiv_netmaster_interface].ipv4.address)|list }}" + notify: Save iptables rules + +- name: Netplugin IPtables | Add [in] forward rules + iptables: + action: insert + chain: FORWARD + in_interface: "{{ item }}" + jump: ACCEPT + comment: contiv + with_items: "{{ contiv_netplugin_forward_interfaces }}" + notify: Save iptables rules + +- name: Netplugin IPtables | Add [out] forward rules + iptables: + action: insert + chain: FORWARD + out_interface: "{{ item }}" + jump: ACCEPT + comment: contiv + with_items: "{{ contiv_netplugin_forward_interfaces }}" + notify: Save iptables rules + +- name: Netplugin IPtables | Add dns rule + iptables: + action: insert + chain: INPUT + protocol: udp + match: udp + destination_port: 53 + in_interface: contivh0 + jump: ACCEPT + comment: contiv + notify: Save iptables rules - name: Netplugin IPtables | Enable iptables at boot service: name: iptables enabled: yes - state: started - -- name: Netplugin IPtables | Open Netmaster with iptables - command: /sbin/iptables -I INPUT 1 -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "contiv" - with_items: - - "{{ netmaster_port }}" - - "{{ contiv_rpc_port1 }}" - - "{{ contiv_rpc_port2 }}" - - "{{ contiv_rpc_port3 }}" - - "{{ contiv_etcd_port }}" - - "{{ kube_master_api_port }}" - when: iptablesrules.stdout.find("contiv") == -1 - notify: Save iptables rules - -- name: Netplugin IPtables | Open vxlan port with iptables - command: /sbin/iptables -I INPUT 1 -p udp --dport 8472 -j ACCEPT -m comment --comment "netplugin vxlan 8472" - when: iptablesrules.stdout.find("netplugin vxlan 8472") == -1 - notify: Save iptables rules - -- name: Netplugin IPtables | Open vxlan port with iptables - command: /sbin/iptables -I INPUT 1 -p udp --dport 4789 -j ACCEPT -m comment --comment "netplugin vxlan 4789" - when: iptablesrules.stdout.find("netplugin vxlan 4789") == -1 - notify: Save iptables rules - -- name: Netplugin IPtables | Allow from contivh0 - command: /sbin/iptables -I FORWARD 1 -i contivh0 -j ACCEPT -m comment --comment "contivh0 FORWARD input" - when: iptablesrules.stdout.find("contivh0 FORWARD input") == -1 - notify: Save iptables rules - -- name: Netplugin IPtables | Allow to contivh0 - command: /sbin/iptables -I FORWARD 1 -o contivh0 -j ACCEPT -m comment --comment "contivh0 FORWARD output" - when: iptablesrules.stdout.find("contivh0 FORWARD output") == -1 - notify: Save iptables rules - -- name: Netplugin IPtables | Allow from contivh1 - command: /sbin/iptables -I FORWARD 1 -i contivh1 -j ACCEPT -m comment --comment "contivh1 FORWARD input" - when: iptablesrules.stdout.find("contivh1 FORWARD input") == -1 - notify: Save iptables rules - -- name: Netplugin IPtables | Allow to contivh1 - command: /sbin/iptables -I FORWARD 1 -o contivh1 -j ACCEPT -m comment --comment "contivh1 FORWARD output" - when: iptablesrules.stdout.find("contivh1 FORWARD output") == -1 - notify: Save iptables rules - -- name: Netplugin IPtables | Allow dns - command: /sbin/iptables -I INPUT 1 -p udp --dport 53 -j ACCEPT -m comment --comment "contiv dns" - when: iptablesrules.stdout.find("contiv dns") == -1 - notify: Save iptables rules diff --git a/roles/contiv/tasks/old_version_cleanup.yml b/roles/contiv/tasks/old_version_cleanup.yml new file mode 100644 index 000000000..8b3d88096 --- /dev/null +++ b/roles/contiv/tasks/old_version_cleanup.yml @@ -0,0 +1,43 @@ +--- +- name: Old version cleanup | Check if old auth proxy service exists + stat: + path: /etc/systemd/system/auth-proxy.service + register: auth_proxy_stat + +- name: Old version cleanup | Stop old auth proxy + service: + name: auth-proxy + enabled: no + state: stopped + when: auth_proxy_stat.stat.exists + +# Note(NB): The new containerized contiv-etcd service uses the same data +# directory on the host, so etcd data is not lost. +- name: Old version cleanup | Check if old contiv-etcd service exists + stat: + path: /etc/systemd/system/contiv-etcd.service + register: contiv_etcd_stat + +- name: Old version cleanup | Stop old contiv-etcd + service: + name: contiv-etcd + enabled: no + state: stopped + when: contiv_etcd_stat.stat.exists + +- name: Old version cleanup | Delete old files + file: + state: absent + path: "{{ item }}" + with_items: + - /etc/systemd/system/auth-proxy.service + - /var/contiv/certs + - /usr/bin/auth_proxy.sh + - /etc/systemd/system/contiv-etcd.service + - /etc/systemd/system/contiv-etcd.service.d + +- include_tasks: old_version_cleanup_iptables.yml + when: not contiv_has_firewalld and contiv_has_iptables + +- include_tasks: old_version_cleanup_firewalld.yml + when: contiv_has_firewalld diff --git a/roles/contiv/tasks/old_version_cleanup_firewalld.yml b/roles/contiv/tasks/old_version_cleanup_firewalld.yml new file mode 100644 index 000000000..675a6358a --- /dev/null +++ b/roles/contiv/tasks/old_version_cleanup_firewalld.yml @@ -0,0 +1,11 @@ +--- +- name: Old version cleanup | Delete old firewalld rules + firewalld: + state: absent + immediate: true + permanent: true + port: "{{ item }}" + with_items: + - "9999/tcp" + - "6640/tcp" + - "8472/udp" diff --git a/roles/contiv/tasks/old_version_cleanup_iptables.yml b/roles/contiv/tasks/old_version_cleanup_iptables.yml new file mode 100644 index 000000000..513357606 --- /dev/null +++ b/roles/contiv/tasks/old_version_cleanup_iptables.yml @@ -0,0 +1,44 @@ +--- +- name: Old version cleanup | Delete old forward [in] iptables rules + iptables: + state: absent + chain: FORWARD + in_interface: "{{ item }}" + jump: ACCEPT + comment: "{{ item }} FORWARD input" + with_items: + - contivh0 + - contivh1 + notify: Save iptables rules + +- name: Old version cleanup | Delete old forward [out] iptables rules + iptables: + state: absent + chain: FORWARD + out_interface: "{{ item }}" + jump: ACCEPT + comment: "{{ item }} FORWARD output" + with_items: + - contivh0 + - contivh1 + notify: Save iptables rules + +- name: Old version cleanup | Delete old input iptables rules + iptables: + state: absent + chain: INPUT + protocol: "{{ item.split('/')[1] }}" + match: "{{ item.split('/')[1] }}" + destination_port: "{{ item.split('/')[0] }}" + comment: "{{ item.split('/')[2] }}" + jump: ACCEPT + with_items: + - "53/udp/contiv dns" + - "4789/udp/netplugin vxlan 4789" + - "8472/udp/netplugin vxlan 8472" + - "9003/tcp/contiv" + - "9002/tcp/contiv" + - "9001/tcp/contiv" + - "9999/tcp/contiv" + - "10000/tcp/Contiv auth proxy service (10000)" + notify: Save iptables rules diff --git a/roles/contiv/tasks/ovs.yml b/roles/contiv/tasks/ovs.yml index 5c92e90e9..21ba6ead4 100644 --- a/roles/contiv/tasks/ovs.yml +++ b/roles/contiv/tasks/ovs.yml @@ -1,6 +1,6 @@ --- - include_tasks: packageManagerInstall.yml - when: source_type == "packageManager" + when: contiv_source_type == "packageManager" tags: - binary-update diff --git a/roles/contiv/tasks/packageManagerInstall.yml b/roles/contiv/tasks/packageManagerInstall.yml index d5726476c..8c8e7a7bd 100644 --- a/roles/contiv/tasks/packageManagerInstall.yml +++ b/roles/contiv/tasks/packageManagerInstall.yml @@ -4,10 +4,9 @@ did_install: false - include_tasks: pkgMgrInstallers/centos-install.yml - when: (ansible_os_family == "RedHat") and - not is_atomic + when: ansible_os_family == "RedHat" and not openshift_is_atomic | bool - name: Package Manager | Set fact saying we did CentOS package install set_fact: did_install: true - when: (ansible_os_family == "RedHat") + when: ansible_os_family == "RedHat" diff --git a/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml b/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml index 62b4716a3..2c82973d6 100644 --- a/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml +++ b/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml @@ -4,7 +4,7 @@ pkg=net-tools state=latest register: result - until: result | success + until: result is succeeded - name: PkgMgr RHEL/CentOS | Get openstack ocata rpm get_url: @@ -12,9 +12,9 @@ dest: /tmp/rdo-release-ocata-2.noarch.rpm validate_certs: False environment: - http_proxy: "{{ http_proxy|default('') }}" - https_proxy: "{{ https_proxy|default('') }}" - no_proxy: "{{ no_proxy|default('') }}" + http_proxy: "{{ contiv_http_proxy|default('') }}" + https_proxy: "{{ contiv_https_proxy|default('') }}" + no_proxy: "{{ contiv_no_proxy|default('') }}" tags: - ovs_install @@ -23,17 +23,17 @@ tags: - ovs_install register: result - until: result | success + until: result is succeeded - name: PkgMgr RHEL/CentOS | Install ovs yum: - pkg=openvswitch-2.5.0-2.el7.x86_64 + pkg=openvswitch state=present environment: - http_proxy: "{{ http_proxy|default('') }}" - https_proxy: "{{ https_proxy|default('') }}" - no_proxy: "{{ no_proxy|default('') }}" + http_proxy: "{{ contiv_http_proxy|default('') }}" + https_proxy: "{{ contiv_https_proxy|default('') }}" + no_proxy: "{{ contiv_no_proxy|default('') }}" tags: - ovs_install register: result - until: result | success + until: result is succeeded diff --git a/roles/contiv/templates/aci-gw.service b/roles/contiv/templates/aci-gw.service index 90bb98001..e2813c99d 100644 --- a/roles/contiv/templates/aci-gw.service +++ b/roles/contiv/templates/aci-gw.service @@ -1,10 +1,13 @@ [Unit] Description=Contiv ACI gw -After=auditd.service systemd-user-sessions.service time-sync.target {{ openshift_docker_service_name }}.service +After=auditd.service systemd-user-sessions.service time-sync.target {{ contiv_openshift_docker_service_name }}.service [Service] -ExecStart={{ bin_dir }}/aci_gw.sh start -ExecStop={{ bin_dir }}/aci_gw.sh stop +ExecStart={{ contiv_bin_dir }}/aci_gw.sh start +ExecStop={{ contiv_bin_dir }}/aci_gw.sh stop KillMode=control-group -Restart=on-failure +Restart=always RestartSec=10 + +[Install] +WantedBy=multi-user.target diff --git a/roles/contiv/templates/aci_gw.j2 b/roles/contiv/templates/aci_gw.j2 index ab4ad46a6..5ff349945 100644 --- a/roles/contiv/templates/aci_gw.j2 +++ b/roles/contiv/templates/aci_gw.j2 @@ -11,13 +11,13 @@ start) set -e docker run --net=host \ - -e "APIC_URL={{ apic_url }}" \ - -e "APIC_USERNAME={{ apic_username }}" \ - -e "APIC_PASSWORD={{ apic_password }}" \ - -e "APIC_LEAF_NODE={{ apic_leaf_nodes }}" \ - -e "APIC_PHYS_DOMAIN={{ apic_phys_dom }}" \ - -e "APIC_EPG_BRIDGE_DOMAIN={{ apic_epg_bridge_domain }}" \ - -e "APIC_CONTRACTS_UNRESTRICTED_MODE={{ apic_contracts_unrestricted_mode }}" \ + -e "APIC_URL={{ contiv_apic_url }}" \ + -e "APIC_USERNAME={{ contiv_apic_username }}" \ + -e "APIC_PASSWORD={{ contiv_apic_password }}" \ + -e "APIC_LEAF_NODE={{ contiv_apic_leaf_nodes }}" \ + -e "APIC_PHYS_DOMAIN={{ contiv_apic_phys_dom }}" \ + -e "APIC_EPG_BRIDGE_DOMAIN={{ contiv_apic_epg_bridge_domain }}" \ + -e "APIC_CONTRACTS_UNRESTRICTED_MODE={{ contiv_apic_contracts_unrestricted_mode }}" \ --name=contiv-aci-gw \ contiv/aci-gw ;; diff --git a/roles/contiv/templates/api-proxy-daemonset.yml.j2 b/roles/contiv/templates/api-proxy-daemonset.yml.j2 new file mode 100644 index 000000000..a15073580 --- /dev/null +++ b/roles/contiv/templates/api-proxy-daemonset.yml.j2 @@ -0,0 +1,57 @@ +--- +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: + name: contiv-api-proxy + namespace: kube-system +spec: + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + name: contiv-api-proxy + template: + metadata: + namespace: kube-system + labels: + name: contiv-api-proxy + annotations: + scheduler.alpha.kubernetes.io/critical-pod: "" + spec: + serviceAccountName: contiv-api-proxy + hostNetwork: true + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: +{% for node in groups.oo_masters_to_config %} + - "{{ node }}" +{% endfor %} + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + containers: + - name: contiv-api-proxy + image: "{{ contiv_api_proxy_image_repo }}:{{ contiv_version }}" + args: + - "--listen-address=0.0.0.0:{{ contiv_api_proxy_port }}" + - --tls-key-file=/var/contiv/api_proxy_key.pem + - --tls-certificate=/var/contiv/api_proxy_cert.pem + - "--data-store-address={{ etcd_host }}" + - --data-store-driver=etcd + - "--netmaster-address=127.0.0.1:{{ contiv_netmaster_port }}" + ports: + - containerPort: "{{ contiv_api_proxy_port }}" + hostPort: "{{ contiv_api_proxy_port }}" + volumeMounts: + - name: secret-volume + mountPath: /var/contiv + readOnly: true + volumes: + - name: secret-volume + secret: + secretName: contiv-api-proxy-secret diff --git a/roles/contiv/templates/api-proxy-secrets.yml.j2 b/roles/contiv/templates/api-proxy-secrets.yml.j2 new file mode 100644 index 000000000..cd800c97d --- /dev/null +++ b/roles/contiv/templates/api-proxy-secrets.yml.j2 @@ -0,0 +1,12 @@ +--- +apiVersion: v1 +kind: Secret +metadata: + name: contiv-api-proxy-secret + namespace: kube-system + labels: + name: contiv-api-proxy-secret +# Use data+b64encode, because stringData doesn't preserve newlines. +data: + api_proxy_key.pem: "{{ key | b64encode }}" + api_proxy_cert.pem: "{{ cert | b64encode }}" diff --git a/roles/contiv/templates/contiv.cfg.j2 b/roles/contiv/templates/contiv.cfg.j2 index f0e99c556..1dce9fcc2 100644 --- a/roles/contiv/templates/contiv.cfg.j2 +++ b/roles/contiv/templates/contiv.cfg.j2 @@ -1,5 +1,5 @@ { - "K8S_API_SERVER": "https://{{ hostvars[groups['masters'][0]]['ansible_' + netmaster_interface].ipv4.address }}:{{ kube_master_api_port }}", + "K8S_API_SERVER": "https://{{ hostvars[groups['masters'][0]]['ansible_' + contiv_netmaster_interface].ipv4.address }}:{{ contiv_kube_master_api_port }}", "K8S_CA": "{{ openshift.common.config_base }}/node/ca.crt", "K8S_KEY": "{{ openshift.common.config_base }}/node/system:node:{{ openshift.common.hostname }}.key", "K8S_CERT": "{{ openshift.common.config_base }}/node/system:node:{{ openshift.common.hostname }}.crt", diff --git a/roles/contiv/templates/contiv.cfg.master.j2 b/roles/contiv/templates/contiv.cfg.master.j2 index fac8e3c4c..ca29b8001 100644 --- a/roles/contiv/templates/contiv.cfg.master.j2 +++ b/roles/contiv/templates/contiv.cfg.master.j2 @@ -1,5 +1,5 @@ { - "K8S_API_SERVER": "https://{{ hostvars[groups['masters'][0]]['ansible_' + netmaster_interface].ipv4.address }}:{{ kube_master_api_port }}", + "K8S_API_SERVER": "https://{{ hostvars[groups['masters'][0]]['ansible_' + contiv_netmaster_interface].ipv4.address }}:{{ contiv_kube_master_api_port }}", "K8S_CA": "{{ openshift.common.config_base }}/master/ca.crt", "K8S_KEY": "{{ openshift.common.config_base }}/master/system:node:{{ openshift.common.hostname }}.key", "K8S_CERT": "{{ openshift.common.config_base }}/master/system:node:{{ openshift.common.hostname }}.crt", diff --git a/roles/contiv/templates/etcd-daemonset.yml.j2 b/roles/contiv/templates/etcd-daemonset.yml.j2 new file mode 100644 index 000000000..76937e670 --- /dev/null +++ b/roles/contiv/templates/etcd-daemonset.yml.j2 @@ -0,0 +1,83 @@ +--- +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: + name: contiv-etcd + namespace: kube-system +spec: + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + name: contiv-etcd + template: + metadata: + namespace: kube-system + labels: + name: contiv-etcd + annotations: + scheduler.alpha.kubernetes.io/critical-pod: "" + spec: + serviceAccountName: contiv-etcd + hostNetwork: true + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: +{% for node in groups.oo_masters_to_config %} + - "{{ node }}" +{% endfor %} + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + initContainers: + - name: contiv-etcd-init + image: "{{ contiv_etcd_init_image_repo }}:{{ contiv_etcd_init_image_tag }}" + env: + - name: ETCD_INIT_ARGSFILE + value: "{{ contiv_etcd_conf_dir }}/contiv-etcd-args" + - name: ETCD_INIT_LISTEN_PORT + value: "{{ contiv_etcd_port }}" + - name: ETCD_INIT_PEER_PORT + value: "{{ contiv_etcd_peer_port }}" + - name: ETCD_INIT_CLUSTER + value: "{{ contiv_etcd_peers }}" + - name: ETCD_INIT_DATA_DIR + value: "{{ contiv_etcd_data_dir }}" + volumeMounts: + - name: contiv-etcd-conf-dir + mountPath: "{{ contiv_etcd_conf_dir }}" + securityContext: + runAsUser: "{{ contiv_etcd_system_uid }}" + fsGroup: "{{ contiv_etcd_system_gid }}" + containers: + - name: contiv-etcd + image: "{{ contiv_etcd_image_repo }}:{{ contiv_etcd_image_tag }}" + command: + - sh + - -c + - 'exec etcd $(cat "$ETCD_INIT_ARGSFILE")' + env: + - name: ETCD_INIT_ARGSFILE + value: "{{ contiv_etcd_conf_dir }}/contiv-etcd-args" + volumeMounts: + - name: contiv-etcd-conf-dir + mountPath: "{{ contiv_etcd_conf_dir }}" + - name: contiv-etcd-data-dir + mountPath: "{{ contiv_etcd_data_dir }}" + securityContext: + runAsUser: "{{ contiv_etcd_system_uid }}" + fsGroup: "{{ contiv_etcd_system_gid }}" + volumes: + - name: contiv-etcd-data-dir + hostPath: + type: DirectoryOrCreate + path: "{{ contiv_etcd_data_dir }}" + - name: contiv-etcd-conf-dir + hostPath: + type: DirectoryOrCreate + path: "{{ contiv_etcd_conf_dir }}" diff --git a/roles/contiv/templates/etcd-proxy-daemonset.yml.j2 b/roles/contiv/templates/etcd-proxy-daemonset.yml.j2 new file mode 100644 index 000000000..4ec6cfd76 --- /dev/null +++ b/roles/contiv/templates/etcd-proxy-daemonset.yml.j2 @@ -0,0 +1,55 @@ +--- +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: + name: contiv-etcd-proxy + namespace: kube-system +spec: + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + name: contiv-etcd-proxy + template: + metadata: + namespace: kube-system + labels: + name: contiv-etcd-proxy + annotations: + scheduler.alpha.kubernetes.io/critical-pod: "" + spec: + serviceAccountName: contiv-etcd + hostNetwork: true + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: NotIn + values: +{% for node in groups.oo_masters_to_config %} + - "{{ node }}" +{% endfor %} + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + containers: + - name: contiv-etcd-proxy + image: "{{ contiv_etcd_image_repo }}:{{ contiv_etcd_image_tag }}" + command: + - etcd + - "--proxy=on" + - "--listen-client-urls=http://127.0.0.1:{{ contiv_etcd_port }}" + - "--advertise-client-urls=http://127.0.0.1:{{ contiv_etcd_port }}" + - "--initial-cluster={{ contiv_etcd_peers }}" + - "--data-dir={{ contiv_etcd_data_dir }}" + volumeMounts: + - name: contiv-etcd-data-dir + mountPath: "{{ contiv_etcd_data_dir }}" + securityContext: + runAsUser: "{{ contiv_etcd_system_uid }}" + fsGroup: "{{ contiv_etcd_system_gid }}" + volumes: + - name: contiv-etcd-data-dir + emptyDir: {} diff --git a/roles/contiv/templates/etcd-scc.yml.j2 b/roles/contiv/templates/etcd-scc.yml.j2 new file mode 100644 index 000000000..6c4bb1d1e --- /dev/null +++ b/roles/contiv/templates/etcd-scc.yml.j2 @@ -0,0 +1,42 @@ +allowHostDirVolumePlugin: true +allowHostIPC: false +allowHostNetwork: true +allowHostPID: false +allowHostPorts: false +allowPrivilegedContainer: false +allowedCapabilities: [] +allowedFlexVolumes: [] +apiVersion: v1 +defaultAddCapabilities: [] +fsGroup: + ranges: + - max: "{{ contiv_etcd_system_gid }}" + min: "{{ contiv_etcd_system_gid }}" + type: MustRunAs +groups: [] +kind: SecurityContextConstraints +metadata: + annotations: + kubernetes.io/description: 'For contiv-etcd only.' + creationTimestamp: null + name: contiv-etcd +priority: null +readOnlyRootFilesystem: true +requiredDropCapabilities: +- KILL +- MKNOD +- SETUID +- SETGID +runAsUser: + type: MustRunAs + uid: "{{ contiv_etcd_system_uid }}" +seLinuxContext: + type: MustRunAs +supplementalGroups: + type: MustRunAs +users: +- system:serviceaccount:kube-system:contiv-etcd +volumes: +- emptyDir +- hostPath +- secret diff --git a/roles/contiv/templates/netmaster.env.j2 b/roles/contiv/templates/netmaster.env.j2 deleted file mode 100644 index 5b5c84a2e..000000000 --- a/roles/contiv/templates/netmaster.env.j2 +++ /dev/null @@ -1,2 +0,0 @@ -NETMASTER_ARGS='--cluster-store etcd://{{ etcd_url }} --cluster-mode=kubernetes' - diff --git a/roles/contiv/templates/netmaster.j2 b/roles/contiv/templates/netmaster.j2 new file mode 100644 index 000000000..c9db122b5 --- /dev/null +++ b/roles/contiv/templates/netmaster.j2 @@ -0,0 +1 @@ +NETMASTER_ARGS='--etcd={{ contiv_etcd_url }} --listen-url=127.0.0.1:{{ contiv_netmaster_port }} --fwdmode={{ contiv_netplugin_fwd_mode }} --infra={{ contiv_fabric_mode }} --control-url={{ contiv_netmaster_ctrl_ip }}:{{ contiv_netmaster_port }} --cluster-mode=kubernetes --netmode={{ contiv_encap_mode }}' diff --git a/roles/contiv/templates/netmaster.service b/roles/contiv/templates/netmaster.service index a602c955e..b7289bc38 100644 --- a/roles/contiv/templates/netmaster.service +++ b/roles/contiv/templates/netmaster.service @@ -4,7 +4,10 @@ After=auditd.service systemd-user-sessions.service contiv-etcd.service [Service] EnvironmentFile=/etc/default/netmaster -ExecStart={{ bin_dir }}/netmaster $NETMASTER_ARGS +ExecStart={{ contiv_bin_dir }}/netmaster $NETMASTER_ARGS KillMode=control-group -Restart=on-failure +Restart=always RestartSec=10 + +[Install] +WantedBy=multi-user.target diff --git a/roles/contiv/templates/netplugin.j2 b/roles/contiv/templates/netplugin.j2 index a4928cc3d..0fd727401 100644 --- a/roles/contiv/templates/netplugin.j2 +++ b/roles/contiv/templates/netplugin.j2 @@ -1,7 +1,6 @@ {% if contiv_encap_mode == "vlan" %} -NETPLUGIN_ARGS='-vlan-if {{ netplugin_interface }} -ctrl-ip {{ netplugin_ctrl_ip }} -plugin-mode kubernetes -cluster-store etcd://{{ etcd_url }}' +NETPLUGIN_ARGS='--vlan-if={{ contiv_netplugin_interface }} --ctrl-ip={{ contiv_netplugin_ctrl_ip }} --etcd={{ contiv_etcd_url }} --fwdmode={{ contiv_netplugin_fwd_mode }} --cluster-mode=kubernetes --netmode={{ contiv_encap_mode }}' {% endif %} {% if contiv_encap_mode == "vxlan" %} -NETPLUGIN_ARGS='-vtep-ip {{ netplugin_ctrl_ip }} -ctrl-ip {{ netplugin_ctrl_ip }} -plugin-mode kubernetes -cluster-store etcd://{{ etcd_url }}' +NETPLUGIN_ARGS='--vtep-ip={{ contiv_netplugin_ctrl_ip }} --vxlan-port={{ contiv_vxlan_port }} --ctrl-ip={{ contiv_netplugin_ctrl_ip }} --etcd={{ contiv_etcd_url }} --fwdmode={{ contiv_netplugin_fwd_mode }} --cluster-mode=kubernetes --netmode={{ contiv_encap_mode }}' {% endif %} - diff --git a/roles/contiv/templates/netplugin.service b/roles/contiv/templates/netplugin.service index dc7b95bb5..2e1ca1bdf 100644 --- a/roles/contiv/templates/netplugin.service +++ b/roles/contiv/templates/netplugin.service @@ -4,5 +4,10 @@ After=auditd.service systemd-user-sessions.service contiv-etcd.service [Service] EnvironmentFile=/etc/default/netplugin -ExecStart={{ bin_dir }}/netplugin $NETPLUGIN_ARGS +ExecStart={{ contiv_bin_dir }}/netplugin $NETPLUGIN_ARGS KillMode=control-group +Restart=always +RestartSec=10 + +[Install] +WantedBy=multi-user.target diff --git a/roles/contiv_auth_proxy/README.md b/roles/contiv_auth_proxy/README.md deleted file mode 100644 index 287b6c148..000000000 --- a/roles/contiv_auth_proxy/README.md +++ /dev/null @@ -1,29 +0,0 @@ -Role Name -========= - -Role to install Contiv API Proxy and UI - -Requirements ------------- - -Docker needs to be installed to run the auth proxy container. - -Role Variables --------------- - -auth_proxy_image specifies the image with version tag to be used to spin up the auth proxy container. -auth_proxy_cert, auth_proxy_key specify files to use for the proxy server certificates. -auth_proxy_port is the host port and auth_proxy_datastore the cluster data store address. - -Dependencies ------------- - -docker - -Example Playbook ----------------- - -- hosts: netplugin-node - become: true - roles: - - { role: auth_proxy, auth_proxy_port: 10000, auth_proxy_datastore: etcd://netmaster:22379 } diff --git a/roles/contiv_auth_proxy/defaults/main.yml b/roles/contiv_auth_proxy/defaults/main.yml deleted file mode 100644 index e1d904c6a..000000000 --- a/roles/contiv_auth_proxy/defaults/main.yml +++ /dev/null @@ -1,12 +0,0 @@ ---- -auth_proxy_image: "contiv/auth_proxy:1.1.1" -auth_proxy_port: 10000 -contiv_certs: "/var/contiv/certs" -cluster_store: "etcd://{{ hostvars[groups['masters'][0]]['ansible_' + netmaster_interface].ipv4.address }}:22379" -auth_proxy_cert: "{{ contiv_certs }}/auth_proxy_cert.pem" -auth_proxy_key: "{{ contiv_certs }}/auth_proxy_key.pem" -auth_proxy_datastore: "{{ cluster_store }}" -auth_proxy_binaries: "/var/contiv_cache" -auth_proxy_local_install: False -auth_proxy_rule_comment: "Contiv auth proxy service" -service_vip: "{{ hostvars[groups['masters'][0]]['ansible_' + netmaster_interface].ipv4.address }}" diff --git a/roles/contiv_auth_proxy/files/auth-proxy.service b/roles/contiv_auth_proxy/files/auth-proxy.service deleted file mode 100644 index 7cd2edff1..000000000 --- a/roles/contiv_auth_proxy/files/auth-proxy.service +++ /dev/null @@ -1,13 +0,0 @@ -[Unit] -Description=Contiv Proxy and UI -After=auditd.service systemd-user-sessions.service time-sync.target docker.service - -[Service] -ExecStart=/usr/bin/auth_proxy.sh start -ExecStop=/usr/bin/auth_proxy.sh stop -KillMode=control-group -Restart=on-failure -RestartSec=10 - -[Install] -WantedBy=multi-user.target diff --git a/roles/contiv_auth_proxy/files/cert.pem b/roles/contiv_auth_proxy/files/cert.pem deleted file mode 100644 index 63df4603f..000000000 --- a/roles/contiv_auth_proxy/files/cert.pem +++ /dev/null @@ -1,33 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIFuTCCA6GgAwIBAgIJAOFyylO2zW2EMA0GCSqGSIb3DQEBCwUAMHMxCzAJBgNV -BAYTAlVTMQswCQYDVQQIDAJDQTERMA8GA1UEBwwIU2FuIEpvc2UxDTALBgNVBAoM -BENQU0cxFjAUBgNVBAsMDUlUIERlcGFydG1lbnQxHTAbBgNVBAMMFGF1dGgtbG9j -YWwuY2lzY28uY29tMB4XDTE3MDcxMzE5NDYwMVoXDTI3MDcxMTE5NDYwMVowczEL -MAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMREwDwYDVQQHDAhTYW4gSm9zZTENMAsG -A1UECgwEQ1BTRzEWMBQGA1UECwwNSVQgRGVwYXJ0bWVudDEdMBsGA1UEAwwUYXV0 -aC1sb2NhbC5jaXNjby5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC -AQDKCg26dvsD1u3f1lCaLlVptyTyGyanaJ73mlHiUnAMcu0A/p3kzluTeQLZJxtl -MToM7rT/lun6fbhQC+7TQep9mufBzLhssyzRnT9rnGSeGwN66mO/rlYPZc5C1D7p -7QZh1uLznzgOA2zMkgnI+n6LB2TZWg+XLhZZIr5SVYE18lj0tnwq3R1uznVv9t06 -grUYK2K7x0Y3Pt2e6yV0e1w2FOGH+7v3mm0c8r1+7U+4EZ2SM3fdG7nyTL/187gl -yE8X4HOnAyYGbAnULJC02LR/DTQpv/RpLN/YJEpHZWApHZCKh+fbFdIhRRwEnT4L -DLy3GJVFDEsmFaC91wf24+HAeUl9/hRIbxo9x/7kXmrhMlK38x2oo3cPh0XZxHje -XmJUGG1OByAuIZaGFwS9lUuGTNvpN8P/v3HN/nORc0RE3fvoXIv4nuhaEfuo32q4 -dvO4aNjmxjz1JcUEx6DiMQe4ECaReYdvI+j9ZkUJj/e89iLsQ8gz5t3FTM+tmBi1 -hrRBAgWyRY5DKECVv2SNFiX55JQGA5vQDGw51qTTuhntfBhkHvhKL7V1FRZazx6N -wqFyynig/jplb1ZNdKZ9ZxngZr6qHIx4RcGaJ9HdVhik7NyUCiHjWeGagzun2Omq -FFXAD9Hmfctac5bGxx0FBi95kO8bd8b0GSIh2CWanETjawIDAQABo1AwTjAdBgNV -HQ4EFgQU5P1g5gFZot//iwEV98MwW2YXzEMwHwYDVR0jBBgwFoAU5P1g5gFZot// -iwEV98MwW2YXzEMwDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAgEAbWgN -BkFzzG5sbG7vUb23Ggv/0TCCuMtuKBGOBR0EW5Ssw6Aml7j3AGiy/1+2sdrQMsx2 -nVpexyQW5XS/X+8JjH7H7ifvwl3bVJ8xiR/9ioIJovrQojxQO0cUB2Lljj3bPd/R -/tddAhPj0uN9N7UAejA12kXGa0Rrzb2U1rIpO9jnTbQYJiTOSzFiiGRMZWx3hfsW -SDTpPmsV2Mh+jcmuxvPITl0s+vtqsm7SYoUZHwJ80LvrPbmk/5hTZGRsI3W5jipB -PpOxvBnAWnQH3miMhty2TDaQ9JjYUwnxjFFZvNIYtp8+eH4nlbSldbgZoUeAe8It -X6SsP8gT/uQh3TPvzNIfYROA7qTwoOQ8ZW8ssai/EttHAztFxketgNEfjwUTz8EJ -yKeyAJ7qk3zD5k7p33ZNLWjmN0Awx3fCE9OQmNUyNX7PpYb4i+tHWu3h6Clw0RUf -0gb1I+iyB3PXmpiYtxdMxGSi9CQIyWHzC4bsTQZkrzzIHWFSwewhUWOQ2Wko0hrv -DnkS5k0cMPn5aNxw56H6OI+6hb+y/GGkTxNY9Gbxypx6lgZson0EY80EPZOJAORM -XggJtTjiMpzvKh18DZY/Phmdh0C2tt8KYFdG83qLEhya9WZujbLAm38vIziFHbdX -jOitXBSPyVrV3JvsCVksp+YC8Lnv3FsM494R4kA= ------END CERTIFICATE----- diff --git a/roles/contiv_auth_proxy/files/key.pem b/roles/contiv_auth_proxy/files/key.pem deleted file mode 100644 index 7224e569c..000000000 --- a/roles/contiv_auth_proxy/files/key.pem +++ /dev/null @@ -1,51 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIJKQIBAAKCAgEAygoNunb7A9bt39ZQmi5Vabck8hsmp2ie95pR4lJwDHLtAP6d -5M5bk3kC2ScbZTE6DO60/5bp+n24UAvu00HqfZrnwcy4bLMs0Z0/a5xknhsDeupj -v65WD2XOQtQ+6e0GYdbi8584DgNszJIJyPp+iwdk2VoPly4WWSK+UlWBNfJY9LZ8 -Kt0dbs51b/bdOoK1GCtiu8dGNz7dnusldHtcNhThh/u795ptHPK9fu1PuBGdkjN3 -3Ru58ky/9fO4JchPF+BzpwMmBmwJ1CyQtNi0fw00Kb/0aSzf2CRKR2VgKR2Qiofn -2xXSIUUcBJ0+Cwy8txiVRQxLJhWgvdcH9uPhwHlJff4USG8aPcf+5F5q4TJSt/Md -qKN3D4dF2cR43l5iVBhtTgcgLiGWhhcEvZVLhkzb6TfD/79xzf5zkXNERN376FyL -+J7oWhH7qN9quHbzuGjY5sY89SXFBMeg4jEHuBAmkXmHbyPo/WZFCY/3vPYi7EPI -M+bdxUzPrZgYtYa0QQIFskWOQyhAlb9kjRYl+eSUBgOb0AxsOdak07oZ7XwYZB74 -Si+1dRUWWs8ejcKhcsp4oP46ZW9WTXSmfWcZ4Ga+qhyMeEXBmifR3VYYpOzclAoh -41nhmoM7p9jpqhRVwA/R5n3LWnOWxscdBQYveZDvG3fG9BkiIdglmpxE42sCAwEA -AQKCAgANVU6EoLd+EGAQZo9ZLXebi2eXxqztXV0oT/nZasFUQP1dFHCNGgU3HURP -2mHXcsE2+0XcnDQCwOs59R+kt3PnKCLlSkJdghGSH8OAsYh+WqAHK5K7oqCxUXGk -PWeNfoPuTwUZOMe1PQqgEX8t0UIqoKlKIsRmoLb+2Okge94UFlNCiwx0s7TujBd5 -9Ruycc/LsYlJhSQgHzj29OO65S03sHcVx0onU/yhbW+OAdFB/3+bl2PwppTF5cTB -UX00mRyHIdvgCLgoslaPtwUxuh9nRxLLMozJqBl5pSN1xL3s2LOiQMfPUIhWg74O -m+XtSsDlgGzRardG4ySBgsBWzcEnGWi5/xyc/6dtERzR382+CLUfOEoucGJHk6kj -RdbVx5FCawpAzjs9Wo49Vr+WQceSiBfb2+ndNUTiD0wu7xLEVPcYC6CMk71qZv5H -0qGlLhtkHF0nSQytbwqwfMz2SGDfkwIHgQ0gTKMpEMWK79E24ewE1BnMiaKC1bgk -evB6WM1YZFMKS5L7fshJcbeMe9dhSF3s+Y0MYVv5MCL1VMZyIzAcj8mkPYZyBRUk -MC87GnaebeTvHNtimvqCuWDGVI1SOoc1xtopkxinTqtIYGuQacrSmfyf9D3Rg4+l -kB0ibtJV+HLP94q266aef/PdpXszs7zo0h6skpLItW/jAuSNuQKCAQEA/VdXpMi8 -nfOtXwOZlGA2+jShYyHyCl2TKgbpfDGl1yKNkbBrIu2/PEl1DpmzSeG1tdNCzN68 -4vEjpF/jBsdSJj4BDiRY6HEcURXpw4yTZ7oCnUCbzadLIo3wX/gFDEVZz+0nQQ29 -5x0XGuQnJXC2fe/CyrkfltKhFSYoTSjtMbma4Pm3Q3HP3wGOvoUKtKNDO5rF26Qh -YtqJgJSKBAms0wKiy9VVTa6DaXrtSnXTR+Ltud3xnWBrX1Z+idwxYt/Be5W2woHf -M5zPIqMUgry5ujtRxhLmleFXDAYbaIQR9AZXlSS3w+9Gcl5EDRkFXqlaoCfppwTR -wakj2lNjbAidPwKCAQEAzCjgko4/Yss/0dCs8ySKd2IaRF93OwC/E2SHVqe5bATh -rVmDn/KIH4J2fI4FiaIHELT1CU5vmganYbK2k7CoJztjJltM1B7rkpHiVSL+qMqn -yBZFg3LFq9eiBPZHyQEc+HMJUhFRexjdeqLH78HCoPz1QnKo2xRoGHhSQ/Rh6lXo -20tldL9HrSxPRmwxnyLgWGcWopv/92JNxu6FgnZcnsVjkpO2mriLD7+Ty5qfvkwc -RFDBYnq2JjBcvqngrzDIGDzC7hTA5BRuuQdNMZggJwO6nKdZDUrq5NIo9B07FLj1 -IRMVm7D1vJYzYI6HW7Wj4vNRXMY8jG1fwvNG0+xy1QKCAQEA7m14R9bAZWuDnGt3 -7APNWheUWAcHk6fTq/cLYV4cdWfIkvfVLO9STrvXliEjcoIhkPk94jAy1ucZo0a3 -FJccgm9ScOvWXRSvEMUt12ODC1ktwq+esqMi/GdXdgqnPZA7YYwRqJD1TAC90Qou -qXb12Xp/+mjWCQ08mvnpbgz5hxXmZJvAVZJUj84YeMgfdjg9O2iDlB5ZaX7BcCjb -58bvRzww2ONzQAPhG7Gch7pyWTKCh64RCgtHold2CesY87QglV4mvdKarSmEbFXN -JOnXZiUT5fW93AtS8DcDLo81klMxtGT1KksUIukC5MzKl/eNGjPWG+FWRAwaeQyI -ApHs4wKCAQAI10RSVGKeTprm5Rh4Nv7gCJmGmHO7VF7x4gqSUBURfmyfax7uEDyg -0K982VGYEjIoIQ3zZzgh/WPGMU0CvEWr3UB/6rg6/1PINxUMBsXsXUpCueQsuw2g -UWgsutWE+M1eXOzsZt+Waw88PkxWL5fUDOA6DmkNg6a2WI+Hbc/HrAy3Yl50Xcwm -zaJpNEo5z/LTITOzuvmsps8jbDTP33xHS9jyAf+IV7F97xfhW0LLpNQciTq2nwXA -RZvejdCzBXPEyOzQDooD1natAInxOds6lUjBe+W5U6M0YX1whMuILDJBSmhHI7Sg -hAiZh9KIwCbmrw6468S3eA0LjillB/o5AoIBAQCg93syT50nYF2UWWP/rEa7qf6h -+YpBPpJskIl3NDMJtie9OcdsoFpjblpFbsMqsSag9KhGl7wn4f8qXO0HERSb8oYd -1Zu6BgUCuRXuAKNI4f508IooNpXx9y7xxl4giFBnDPa6W3KWqZ2LMDt92htMd/Zm -qvoyYZhFhMSyKFzPDAFdsZijJgahqJRKhHeW9BsPqho5i7Ys+PhE8e/vUZs2zUeS -QEHWhVisDTNKOoJIdz7JXFgEXCPTLAxXIIhYSkIfQxHxsWjt0vs79tzUkV8NlpKt -d7s0iyHnD6kDvoxYOSI9YmSEnnFBFdgeiD+/VD+7enOdqb5MHsjuw+by09ft ------END RSA PRIVATE KEY----- diff --git a/roles/contiv_auth_proxy/handlers/main.yml b/roles/contiv_auth_proxy/handlers/main.yml deleted file mode 100644 index 9cb9bea49..000000000 --- a/roles/contiv_auth_proxy/handlers/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -# handlers file for auth_proxy diff --git a/roles/contiv_auth_proxy/tasks/cleanup.yml b/roles/contiv_auth_proxy/tasks/cleanup.yml deleted file mode 100644 index a29659cc9..000000000 --- a/roles/contiv_auth_proxy/tasks/cleanup.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- - -- name: stop auth-proxy container - service: name=auth-proxy state=stopped - -- name: cleanup iptables for auth proxy - shell: iptables -D INPUT -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "{{ auth_proxy_rule_comment }} ({{ item }})" - become: true - with_items: - - "{{ auth_proxy_port }}" diff --git a/roles/contiv_auth_proxy/tasks/main.yml b/roles/contiv_auth_proxy/tasks/main.yml deleted file mode 100644 index 74e7bf794..000000000 --- a/roles/contiv_auth_proxy/tasks/main.yml +++ /dev/null @@ -1,37 +0,0 @@ ---- -# tasks file for auth_proxy -- name: setup iptables for auth proxy - shell: > - ( iptables -L INPUT | grep "{{ auth_proxy_rule_comment }} ({{ item }})" ) || \ - iptables -I INPUT 1 -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "{{ auth_proxy_rule_comment }} ({{ item }})" - become: true - with_items: - - "{{ auth_proxy_port }}" - -# Load the auth-proxy-image from local tar. Ignore any errors to handle the -# case where the image is not built in -- name: copy auth-proxy image - copy: src={{ auth_proxy_binaries }}/auth-proxy-image.tar dest=/tmp/auth-proxy-image.tar - when: auth_proxy_local_install == True - -- name: load auth-proxy image - shell: docker load -i /tmp/auth-proxy-image.tar - when: auth_proxy_local_install == True - -- name: create cert folder for proxy - file: path=/var/contiv/certs state=directory - -- name: copy shell script for starting auth-proxy - template: src=auth_proxy.j2 dest=/usr/bin/auth_proxy.sh mode=u=rwx,g=rx,o=rx - -- name: copy cert for starting auth-proxy - copy: src=cert.pem dest=/var/contiv/certs/auth_proxy_cert.pem mode=u=rw,g=r,o=r - -- name: copy key for starting auth-proxy - copy: src=key.pem dest=/var/contiv/certs/auth_proxy_key.pem mode=u=rw,g=r,o=r - -- name: copy systemd units for auth-proxy - copy: src=auth-proxy.service dest=/etc/systemd/system/auth-proxy.service - -- name: start auth-proxy container - systemd: name=auth-proxy daemon_reload=yes state=started enabled=yes diff --git a/roles/contiv_auth_proxy/templates/auth_proxy.j2 b/roles/contiv_auth_proxy/templates/auth_proxy.j2 deleted file mode 100644 index 0ab8c831b..000000000 --- a/roles/contiv_auth_proxy/templates/auth_proxy.j2 +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash - -usage="$0 start/stop" -if [ $# -ne 1 ]; then - echo USAGE: $usage - exit 1 -fi - -case $1 in -start) - set -e - - /usr/bin/docker run --rm \ - -p 10000:{{ auth_proxy_port }} \ - --net=host --name=auth-proxy \ - -e NO_NETMASTER_STARTUP_CHECK=1 \ - -v /var/contiv:/var/contiv:z \ - {{ auth_proxy_image }} \ - --tls-key-file={{ auth_proxy_key }} \ - --tls-certificate={{ auth_proxy_cert }} \ - --data-store-address={{ auth_proxy_datastore }} \ - --netmaster-address={{ service_vip }}:9999 \ - --listen-address=:10000 - ;; - -stop) - # don't stop on error - /usr/bin/docker stop auth-proxy - /usr/bin/docker rm -f -v auth-proxy - ;; - -*) - echo USAGE: $usage - exit 1 - ;; -esac diff --git a/roles/contiv_auth_proxy/tests/inventory b/roles/contiv_auth_proxy/tests/inventory deleted file mode 100644 index d18580b3c..000000000 --- a/roles/contiv_auth_proxy/tests/inventory +++ /dev/null @@ -1 +0,0 @@ -localhost
\ No newline at end of file diff --git a/roles/contiv_auth_proxy/tests/test.yml b/roles/contiv_auth_proxy/tests/test.yml deleted file mode 100644 index 2af3250cd..000000000 --- a/roles/contiv_auth_proxy/tests/test.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -- hosts: localhost - remote_user: root - roles: - - auth_proxy diff --git a/roles/contiv_auth_proxy/vars/main.yml b/roles/contiv_auth_proxy/vars/main.yml deleted file mode 100644 index 9032766c4..000000000 --- a/roles/contiv_auth_proxy/vars/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -# vars file for auth_proxy diff --git a/roles/contiv_facts/defaults/main.yaml b/roles/contiv_facts/defaults/main.yaml index 7b8150954..c1622c56a 100644 --- a/roles/contiv_facts/defaults/main.yaml +++ b/roles/contiv_facts/defaults/main.yaml @@ -1,13 +1,10 @@ --- # The directory where binaries are stored on Ansible # managed systems. -bin_dir: /usr/bin +contiv_bin_dir: /usr/bin # The directory used by Ansible to temporarily store # files on Ansible managed systems. -ansible_temp_dir: /tmp/.ansible/files +contiv_ansible_temp_dir: /tmp/.ansible/files -source_type: packageManager - -# Whether or not to also install and enable the Contiv auth_proxy -contiv_enable_auth_proxy: false +contiv_source_type: packageManager diff --git a/roles/contiv_facts/tasks/fedora-install.yml b/roles/contiv_facts/tasks/fedora-install.yml index a57f6eb19..b8239a636 100644 --- a/roles/contiv_facts/tasks/fedora-install.yml +++ b/roles/contiv_facts/tasks/fedora-install.yml @@ -4,16 +4,16 @@ name: dnf state: installed register: result - until: result | success + until: result is succeeded - name: Update repo cache command: dnf update -y retries: 5 delay: 10 environment: - https_proxy: "{{ https_proxy }}" - http_proxy: "{{ http_proxy }}" - no_proxy: "{{ no_proxy }}" + https_proxy: "{{ contiv_https_proxy }}" + http_proxy: "{{ contiv_http_proxy }}" + no_proxy: "{{ contiv_no_proxy }}" - name: Install libselinux-python command: dnf install {{ item }} -y @@ -21,6 +21,6 @@ - python-dnf - libselinux-python environment: - https_proxy: "{{ https_proxy }}" - http_proxy: "{{ http_proxy }}" - no_proxy: "{{ no_proxy }}" + https_proxy: "{{ contiv_https_proxy }}" + http_proxy: "{{ contiv_http_proxy }}" + no_proxy: "{{ contiv_no_proxy }}" diff --git a/roles/contiv_facts/tasks/main.yml b/roles/contiv_facts/tasks/main.yml index 3267a4ab0..11f1e1369 100644 --- a/roles/contiv_facts/tasks/main.yml +++ b/roles/contiv_facts/tasks/main.yml @@ -1,60 +1,31 @@ --- -- name: Determine if Atomic - stat: path=/run/ostree-booted - register: s - changed_when: false - check_mode: no - -- name: Init the is_atomic fact - set_fact: - is_atomic: false - -- name: Set the is_atomic fact - set_fact: - is_atomic: true - when: s.stat.exists - - name: Determine if CoreOS raw: "grep '^NAME=' /etc/os-release | sed s'/NAME=//'" register: distro check_mode: no -- name: Init the is_coreos fact +- name: Init the contiv_is_coreos fact set_fact: - is_coreos: false + contiv_is_coreos: false -- name: Set the is_coreos fact +- name: Set the contiv_is_coreos fact set_fact: - is_coreos: true + contiv_is_coreos: true when: "'CoreOS' in distro.stdout" -- name: Set docker config file directory - set_fact: - docker_config_dir: "/etc/sysconfig" - -- name: Override docker config file directory for Debian - set_fact: - docker_config_dir: "/etc/default" - when: ansible_distribution == "Debian" or ansible_distribution == "Ubuntu" - -- name: Create config file directory - file: - path: "{{ docker_config_dir }}" - state: directory - - name: Set the bin directory path for CoreOS set_fact: - bin_dir: "/opt/bin" - when: is_coreos + contiv_bin_dir: "/opt/bin" + when: contiv_is_coreos - name: Create the directory used to store binaries file: - path: "{{ bin_dir }}" + path: "{{ contiv_bin_dir }}" state: directory - name: Create Ansible temp directory file: - path: "{{ ansible_temp_dir }}" + path: "{{ contiv_ansible_temp_dir }}" state: directory - name: Determine if has rpm @@ -63,26 +34,26 @@ changed_when: false check_mode: no -- name: Init the has_rpm fact +- name: Init the contiv_has_rpm fact set_fact: - has_rpm: false + contiv_has_rpm: false -- name: Set the has_rpm fact +- name: Set the contiv_has_rpm fact set_fact: - has_rpm: true + contiv_has_rpm: true when: s.stat.exists -- name: Init the has_firewalld fact +- name: Init the contiv_has_firewalld fact set_fact: - has_firewalld: false + contiv_has_firewalld: false -- name: Init the has_iptables fact +- name: Init the contiv_has_iptables fact set_fact: - has_iptables: false + contiv_has_iptables: false # collect information about what packages are installed - include_tasks: rpm.yml - when: has_rpm + when: contiv_has_rpm - include_tasks: fedora-install.yml - when: not is_atomic and ansible_distribution == "Fedora" + when: not openshift_is_atomic and ansible_distribution == "Fedora" diff --git a/roles/contiv_facts/tasks/rpm.yml b/roles/contiv_facts/tasks/rpm.yml index 07401a6dd..dc6c5d3b7 100644 --- a/roles/contiv_facts/tasks/rpm.yml +++ b/roles/contiv_facts/tasks/rpm.yml @@ -6,10 +6,17 @@ failed_when: false check_mode: no -- name: Set the has_firewalld fact +- name: RPM | Determine if firewalld enabled + command: "systemctl status firewalld.service" + register: ss + changed_when: false + failed_when: false + check_mode: no + +- name: Set the contiv_has_firewalld fact set_fact: - has_firewalld: true - when: s.rc == 0 + contiv_has_firewalld: true + when: s.rc == 0 and ss.rc == 0 - name: Determine if iptables-services installed command: "rpm -q iptables-services" @@ -18,7 +25,7 @@ failed_when: false check_mode: no -- name: Set the has_iptables fact +- name: Set the contiv_has_iptables fact set_fact: - has_iptables: true + contiv_has_iptables: true when: s.rc == 0 diff --git a/roles/etcd/defaults/main.yaml b/roles/etcd/defaults/main.yaml index 3038ed9f6..87e249642 100644 --- a/roles/etcd/defaults/main.yaml +++ b/roles/etcd/defaults/main.yaml @@ -5,12 +5,12 @@ r_etcd_common_backup_sufix_name: '' l_is_etcd_system_container: "{{ (openshift_use_etcd_system_container | default(openshift_use_system_containers | default(false)) | bool) }}" # runc, docker, host -r_etcd_common_etcd_runtime: "{{ 'runc' if l_is_etcd_system_container else 'docker' if l_is_containerized else 'host' }}" +r_etcd_common_etcd_runtime: "{{ 'runc' if l_is_etcd_system_container else 'docker' if openshift_is_containerized else 'host' }}" r_etcd_common_embedded_etcd: false osm_etcd_image: 'registry.access.redhat.com/rhel7/etcd' etcd_image_dict: - origin: "registry.fedoraproject.org/f26/etcd" + origin: "registry.fedoraproject.org/latest/etcd" openshift-enterprise: "{{ osm_etcd_image }}" etcd_image: "{{ etcd_image_dict[openshift_deployment_type | default('origin')] }}" @@ -98,4 +98,4 @@ r_etcd_os_firewall_allow: # set the backend quota to 4GB by default etcd_quota_backend_bytes: 4294967296 -openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}" +openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False) | bool) else 'docker' }}" diff --git a/roles/etcd/meta/main.yml b/roles/etcd/meta/main.yml index 879ca4f4e..af58eff62 100644 --- a/roles/etcd/meta/main.yml +++ b/roles/etcd/meta/main.yml @@ -17,5 +17,5 @@ galaxy_info: - system dependencies: - role: lib_openshift -- role: lib_os_firewall - role: lib_utils +- role: openshift_facts diff --git a/roles/etcd/tasks/auxiliary/drop_etcdctl.yml b/roles/etcd/tasks/auxiliary/drop_etcdctl.yml index 603f2531f..cab835e20 100644 --- a/roles/etcd/tasks/auxiliary/drop_etcdctl.yml +++ b/roles/etcd/tasks/auxiliary/drop_etcdctl.yml @@ -1,9 +1,9 @@ --- - name: Install etcd for etcdctl package: name=etcd{{ '-' + etcd_version if etcd_version is defined else '' }} state=present - when: not openshift.common.is_atomic | bool + when: not openshift_is_containerized | bool register: result - until: result | success + until: result is succeeded - name: Configure etcd profile.d aliases template: diff --git a/roles/etcd/tasks/backup/backup.yml b/roles/etcd/tasks/backup/backup.yml index 9da023dbd..acd1bb0bc 100644 --- a/roles/etcd/tasks/backup/backup.yml +++ b/roles/etcd/tasks/backup/backup.yml @@ -44,7 +44,7 @@ - r_etcd_common_embedded_etcd | bool - not l_ostree_booted.stat.exists | bool register: result - until: result | success + until: result is succeeded - name: Check selinux label of '{{ etcd_data_dir }}' command: > diff --git a/roles/etcd/tasks/certificates/deploy_ca.yml b/roles/etcd/tasks/certificates/deploy_ca.yml index bd4dafafd..ebaff353b 100644 --- a/roles/etcd/tasks/certificates/deploy_ca.yml +++ b/roles/etcd/tasks/certificates/deploy_ca.yml @@ -7,7 +7,7 @@ delegate_to: "{{ etcd_ca_host }}" run_once: true register: result - until: result | success + until: result is succeeded - file: path: "{{ item }}" diff --git a/roles/etcd/tasks/certificates/fetch_client_certificates_from_ca.yml b/roles/etcd/tasks/certificates/fetch_client_certificates_from_ca.yml index 119071a72..ce295d2f5 100644 --- a/roles/etcd/tasks/certificates/fetch_client_certificates_from_ca.yml +++ b/roles/etcd/tasks/certificates/fetch_client_certificates_from_ca.yml @@ -28,7 +28,7 @@ etcd_client_certs_missing: "{{ true if etcd_certificates_redeploy | default(false) | bool else (False in (g_external_etcd_cert_stat_result.results | default({}) - | oo_collect(attribute='stat.exists') + | lib_utils_oo_collect(attribute='stat.exists') | list)) }}" - name: Ensure generated_certs directory present @@ -57,6 +57,7 @@ # Certificates must be signed serially in order to avoid competing # for the serial file. +# delegated_serial_command is a custom module in lib_utils - name: Sign and create the client crt delegated_serial_command: command: > @@ -79,13 +80,6 @@ when: etcd_client_certs_missing | bool delegate_to: "{{ etcd_ca_host }}" -- name: Create local temp directory for syncing certs - local_action: command mktemp -d /tmp/etcd_certificates-XXXXXXX - register: g_etcd_client_mktemp - changed_when: False - when: etcd_client_certs_missing | bool - become: no - - name: Create a tarball of the etcd certs command: > tar -czvf {{ etcd_generated_certs_dir }}/{{ etcd_cert_subdir }}.tgz @@ -101,8 +95,7 @@ - name: Retrieve the etcd cert tarballs fetch: src: "{{ etcd_generated_certs_dir }}/{{ etcd_cert_subdir }}.tgz" - dest: "{{ g_etcd_client_mktemp.stdout }}/" - flat: yes + dest: "/tmp" fail_on_missing: yes validate_checksum: yes when: etcd_client_certs_missing | bool @@ -116,10 +109,15 @@ - name: Unarchive etcd cert tarballs unarchive: - src: "{{ g_etcd_client_mktemp.stdout }}/{{ etcd_cert_subdir }}.tgz" + src: "/tmp/{{ inventory_hostname }}/{{ etcd_generated_certs_dir }}/{{ etcd_cert_subdir }}.tgz" dest: "{{ etcd_cert_config_dir }}" when: etcd_client_certs_missing | bool +- name: Delete temporary directory + local_action: file path="/tmp/{{ inventory_hostname }}" state=absent + changed_when: False + when: etcd_client_certs_missing | bool + - file: path: "{{ etcd_cert_config_dir }}/{{ item }}" owner: root @@ -130,9 +128,3 @@ - "{{ etcd_cert_prefix }}client.key" - "{{ etcd_cert_prefix }}ca.crt" when: etcd_client_certs_missing | bool - -- name: Delete temporary directory - local_action: file path="{{ g_etcd_client_mktemp.stdout }}" state=absent - changed_when: False - when: etcd_client_certs_missing | bool - become: no diff --git a/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml b/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml index f4726940a..7c8b87d99 100644 --- a/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml +++ b/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml @@ -5,7 +5,7 @@ state: present when: not etcd_is_containerized | bool register: result - until: result | success + until: result is succeeded - name: Check status of etcd certificates stat: @@ -21,7 +21,7 @@ etcd_server_certs_missing: "{{ true if etcd_certificates_redeploy | default(false) | bool else (False in (g_etcd_server_cert_stat_result.results | default({}) - | oo_collect(attribute='stat.exists') + | lib_utils_oo_collect(attribute='stat.exists') | list)) }}" - name: Ensure generated_certs directory present @@ -50,6 +50,7 @@ # Certificates must be signed serially in order to avoid competing # for the serial file. +# delegated_serial_command is a custom module in lib_utils - name: Sign and create the server crt delegated_serial_command: command: > @@ -83,6 +84,7 @@ # Certificates must be signed serially in order to avoid competing # for the serial file. +# delegated_serial_command is a custom module in lib_utils - name: Sign and create the peer crt delegated_serial_command: command: > @@ -105,13 +107,6 @@ when: etcd_server_certs_missing | bool delegate_to: "{{ etcd_ca_host }}" -- name: Create local temp directory for syncing certs - local_action: command mktemp -d /tmp/etcd_certificates-XXXXXXX - become: no - register: g_etcd_server_mktemp - changed_when: False - when: etcd_server_certs_missing | bool - - name: Create a tarball of the etcd certs command: > tar -czvf {{ etcd_generated_certs_dir }}/{{ etcd_cert_subdir }}.tgz @@ -127,8 +122,7 @@ - name: Retrieve etcd cert tarball fetch: src: "{{ etcd_generated_certs_dir }}/{{ etcd_cert_subdir }}.tgz" - dest: "{{ g_etcd_server_mktemp.stdout }}/" - flat: yes + dest: "/tmp" fail_on_missing: yes validate_checksum: yes when: etcd_server_certs_missing | bool @@ -144,7 +138,7 @@ - name: Unarchive cert tarball unarchive: - src: "{{ g_etcd_server_mktemp.stdout }}/{{ etcd_cert_subdir }}.tgz" + src: "/tmp/{{ inventory_hostname }}/{{ etcd_generated_certs_dir }}/{{ etcd_cert_subdir }}.tgz" dest: "{{ etcd_cert_config_dir }}" when: etcd_server_certs_missing | bool @@ -161,8 +155,7 @@ - name: Retrieve etcd ca cert tarball fetch: src: "{{ etcd_generated_certs_dir }}/{{ etcd_ca_name }}.tgz" - dest: "{{ g_etcd_server_mktemp.stdout }}/" - flat: yes + dest: "/tmp" fail_on_missing: yes validate_checksum: yes when: etcd_server_certs_missing | bool @@ -177,8 +170,7 @@ when: etcd_server_certs_missing | bool - name: Delete temporary directory - local_action: file path="{{ g_etcd_server_mktemp.stdout }}" state=absent - become: no + local_action: file path="/tmp/{{ inventory_hostname }}" state=absent changed_when: False when: etcd_server_certs_missing | bool diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml index b2100801f..12e41667e 100644 --- a/roles/etcd/tasks/main.yml +++ b/roles/etcd/tasks/main.yml @@ -13,7 +13,7 @@ package: name=etcd{{ '-' + etcd_version if etcd_version is defined else '' }} state=present when: not etcd_is_containerized | bool register: result - until: result | success + until: result is succeeded - include_tasks: drop_etcdctl.yml when: @@ -93,7 +93,9 @@ daemon_reload: yes when: not l_is_etcd_system_container | bool register: task_result - failed_when: task_result|failed and 'could not' not in task_result.msg|lower + failed_when: + - task_result is failed + - ('could not' not in task_result.msg|lower) - name: Install etcd container service file template: @@ -131,4 +133,4 @@ - name: Set fact etcd_service_status_changed set_fact: - etcd_service_status_changed: "{{ start_result | changed }}" + etcd_service_status_changed: "{{ start_result is changed }}" diff --git a/roles/etcd/tasks/migration/add_ttls.yml b/roles/etcd/tasks/migration/add_ttls.yml index a4b0ff31d..3d945344c 100644 --- a/roles/etcd/tasks/migration/add_ttls.yml +++ b/roles/etcd/tasks/migration/add_ttls.yml @@ -11,7 +11,7 @@ - name: Re-introduce leases (as a replacement for key TTLs) command: > - {{ openshift.common.client_binary }} adm migrate etcd-ttl \ + {{ openshift_client_binary }} adm migrate etcd-ttl \ --cert {{ r_etcd_common_master_peer_cert_file }} \ --key {{ r_etcd_common_master_peer_key_file }} \ --cacert {{ r_etcd_common_master_peer_ca_file }} \ diff --git a/roles/etcd/tasks/migration/migrate.yml b/roles/etcd/tasks/migration/migrate.yml index 54a9c74ff..630640ab1 100644 --- a/roles/etcd/tasks/migration/migrate.yml +++ b/roles/etcd/tasks/migration/migrate.yml @@ -1,7 +1,7 @@ --- # Should this be run in a serial manner? - set_fact: - l_etcd_service: "{{ 'etcd_container' if openshift.common.is_containerized else 'etcd' }}" + l_etcd_service: "{{ 'etcd_container' if (openshift_is_containerized | bool) else 'etcd' }}" - name: Migrate etcd data command: > diff --git a/roles/etcd/tasks/system_container.yml b/roles/etcd/tasks/system_container.yml index ca8b6a707..e37652536 100644 --- a/roles/etcd/tasks/system_container.yml +++ b/roles/etcd/tasks/system_container.yml @@ -29,7 +29,9 @@ masked: no daemon_reload: yes register: task_result - failed_when: task_result|failed and 'could not' not in task_result.msg|lower + failed_when: + - task_result is failed + - ('could not' not in task_result.msg|lower) when: "'etcd' not in etcd_result.stdout" - name: Disable etcd_container @@ -39,7 +41,9 @@ enabled: no daemon_reload: yes register: task_result - failed_when: task_result|failed and 'could not' not in task_result.msg|lower + failed_when: + - task_result is failed + - ('could not' not in task_result.msg|lower) - name: Remove etcd_container.service file: diff --git a/roles/etcd/tasks/upgrade/upgrade_image.yml b/roles/etcd/tasks/upgrade/upgrade_image.yml index 6e712ba74..13bb0faca 100644 --- a/roles/etcd/tasks/upgrade/upgrade_image.yml +++ b/roles/etcd/tasks/upgrade/upgrade_image.yml @@ -45,7 +45,7 @@ state: latest when: not l_ostree_booted.stat.exists | bool register: result - until: result | success + until: result is succeeded - name: Verify cluster is healthy command: "{{ etcdctlv2 }} cluster-health" diff --git a/roles/etcd/tasks/upgrade/upgrade_rpm.yml b/roles/etcd/tasks/upgrade/upgrade_rpm.yml index e98def46e..180ed4135 100644 --- a/roles/etcd/tasks/upgrade/upgrade_rpm.yml +++ b/roles/etcd/tasks/upgrade/upgrade_rpm.yml @@ -19,7 +19,7 @@ name: "{{ l_etcd_target_package }}" state: latest register: result - until: result | success + until: result is succeeded - lineinfile: destfile: "{{ etcd_conf_file }}" diff --git a/roles/etcd/tasks/version_detect.yml b/roles/etcd/tasks/version_detect.yml index fe1e418d8..ab3626cec 100644 --- a/roles/etcd/tasks/version_detect.yml +++ b/roles/etcd/tasks/version_detect.yml @@ -12,7 +12,7 @@ - debug: msg: "Etcd rpm version {{ etcd_rpm_version.stdout }} detected" when: - - not openshift.common.is_containerized | bool + - not openshift_is_containerized | bool - block: - name: Record containerized etcd version (docker) @@ -52,4 +52,4 @@ - debug: msg: "Etcd containerized version {{ etcd_container_version }} detected" when: - - openshift.common.is_containerized | bool + - openshift_is_containerized | bool diff --git a/roles/flannel/defaults/main.yaml b/roles/flannel/defaults/main.yaml index 488b6b0bc..d9e4d2354 100644 --- a/roles/flannel/defaults/main.yaml +++ b/roles/flannel/defaults/main.yaml @@ -2,8 +2,8 @@ flannel_interface: "{{ ansible_default_ipv4.interface }}" flannel_etcd_key: /openshift.com/network etcd_hosts: "{{ etcd_urls }}" -etcd_peer_ca_file: "{{ openshift.common.config_base }}/node/{{ 'ca' if (embedded_etcd | bool) else 'flannel.etcd-ca' }}.crt" -etcd_peer_cert_file: "{{ openshift.common.config_base }}/node/{{ 'system:node:' + openshift.common.hostname if (embedded_etcd | bool) else 'flannel.etcd-client' }}.crt" -etcd_peer_key_file: "{{ openshift.common.config_base }}/node/{{ 'system:node:' + openshift.common.hostname if (embedded_etcd | bool) else 'flannel.etcd-client' }}.key" +etcd_peer_ca_file: "{{ openshift.common.config_base }}/node/flannel.etcd-ca.crt" +etcd_peer_cert_file: "{{ openshift.common.config_base }}/node/flannel.etcd-client.crt" +etcd_peer_key_file: "{{ openshift.common.config_base }}/node/flannel.etcd-client.key" -openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}" +openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False) | bool) else 'docker' }}" diff --git a/roles/flannel/handlers/main.yml b/roles/flannel/handlers/main.yml index 705d39f9a..f94399fab 100644 --- a/roles/flannel/handlers/main.yml +++ b/roles/flannel/handlers/main.yml @@ -9,7 +9,7 @@ name: "{{ openshift_docker_service_name }}" state: restarted register: l_docker_restart_docker_in_flannel_result - until: not l_docker_restart_docker_in_flannel_result | failed + until: not (l_docker_restart_docker_in_flannel_result is failed) retries: 3 delay: 30 @@ -18,6 +18,10 @@ name: "{{ openshift_service_type }}-node" state: restarted register: l_restart_node_result - until: not l_restart_node_result | failed + until: not (l_restart_node_result is failed) retries: 3 delay: 30 + +- name: save iptable rules + become: yes + command: 'iptables-save' diff --git a/roles/flannel/meta/main.yml b/roles/flannel/meta/main.yml index 51128dba6..38d2f748b 100644 --- a/roles/flannel/meta/main.yml +++ b/roles/flannel/meta/main.yml @@ -12,4 +12,6 @@ galaxy_info: categories: - cloud - system -dependencies: [] +dependencies: +- role: lib_utils +- role: openshift_facts diff --git a/roles/flannel/tasks/main.yml b/roles/flannel/tasks/main.yml index befe1b2e6..11981fb80 100644 --- a/roles/flannel/tasks/main.yml +++ b/roles/flannel/tasks/main.yml @@ -2,9 +2,9 @@ - name: Install flannel become: yes package: name=flannel state=present - when: not openshift.common.is_atomic | bool + when: not openshift_is_atomic | bool register: result - until: result | success + until: result is succeeded - name: Set flannel etcd options become: yes @@ -41,3 +41,13 @@ notify: - restart docker - restart node + +- name: Enable Pod to Pod communication + command: /sbin/iptables --wait -I FORWARD -d {{ hostvars[groups.oo_first_master.0].openshift.master.sdn_cluster_network_cidr }} -i {{ flannel_interface }} -j ACCEPT -m comment --comment "Pod to Pod communication" + notify: + - save iptable rules + +- name: Allow external network access + command: /sbin/iptables -t nat -A POSTROUTING -o {{ flannel_interface }} -j MASQUERADE -m comment --comment "Allow external network access" + notify: + - save iptable rules diff --git a/roles/flannel_register/defaults/main.yaml b/roles/flannel_register/defaults/main.yaml index 1d0f5df6a..cd11fd9ff 100644 --- a/roles/flannel_register/defaults/main.yaml +++ b/roles/flannel_register/defaults/main.yaml @@ -4,6 +4,6 @@ flannel_subnet_len: "{{ 32 - (openshift.master.sdn_host_subnet_length | int) }}" flannel_etcd_key: /openshift.com/network etcd_hosts: "{{ etcd_urls }}" etcd_conf_dir: "{{ openshift.common.config_base }}/master" -etcd_peer_ca_file: "{{ etcd_conf_dir + '/ca.crt' if (openshift.master.embedded_etcd | bool) else etcd_conf_dir + '/master.etcd-ca.crt' }}" +etcd_peer_ca_file: "{{ etcd_conf_dir + '/master.etcd-ca.crt' }}" etcd_peer_cert_file: "{{ etcd_conf_dir }}/master.etcd-client.crt" etcd_peer_key_file: "{{ etcd_conf_dir }}/master.etcd-client.key" diff --git a/roles/flannel_register/meta/main.yml b/roles/flannel_register/meta/main.yml index 73bddcca4..1e44ff5ba 100644 --- a/roles/flannel_register/meta/main.yml +++ b/roles/flannel_register/meta/main.yml @@ -13,4 +13,5 @@ galaxy_info: - cloud - system dependencies: -- { role: openshift_facts } +- role: openshift_facts +- role: lib_utils diff --git a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py index 83ca83350..a38b95c1d 100644 --- a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py +++ b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py @@ -31,6 +31,7 @@ class CallbackModule(CallbackBase): 'installer_phase_node', 'installer_phase_glusterfs', 'installer_phase_hosted', + 'installer_phase_web_console', 'installer_phase_metrics', 'installer_phase_logging', 'installer_phase_prometheus', @@ -80,6 +81,10 @@ class CallbackModule(CallbackBase): 'title': 'Hosted Install', 'playbook': 'playbooks/openshift-hosted/config.yml' }, + 'installer_phase_web_console': { + 'title': 'Web Console Install', + 'playbook': 'playbooks/openshift-web-console/config.yml' + }, 'installer_phase_metrics': { 'title': 'Metrics Install', 'playbook': 'playbooks/openshift-metrics/config.yml' @@ -122,6 +127,10 @@ class CallbackModule(CallbackBase): self._display.display( '\tThis phase can be restarted by running: {}'.format( phase_attributes[phase]['playbook'])) + if 'message' in stats.custom['_run'][phase]: + self._display.display( + '\t{}'.format( + stats.custom['_run'][phase]['message'])) self._display.display("", screen_only=True) diff --git a/roles/kuryr/meta/main.yml b/roles/kuryr/meta/main.yml index 7fd5adf41..7eb8ed781 100644 --- a/roles/kuryr/meta/main.yml +++ b/roles/kuryr/meta/main.yml @@ -13,5 +13,6 @@ galaxy_info: - cloud - system dependencies: -- { role: lib_openshift } -- { role: openshift_facts } +- role: lib_utils +- role: lib_openshift +- role: openshift_facts diff --git a/roles/kuryr/tasks/node.yaml b/roles/kuryr/tasks/node.yaml index 08f2d5adc..41d0ead20 100644 --- a/roles/kuryr/tasks/node.yaml +++ b/roles/kuryr/tasks/node.yaml @@ -40,7 +40,7 @@ regexp: '^OPTIONS="?(.*?)"?$' backrefs: yes backup: yes - line: 'OPTIONS="\1 --disable dns,proxy,plugins"' + line: 'OPTIONS="\1 --disable proxy"' - name: force node restart to disable the proxy service: diff --git a/roles/kuryr/templates/cni-daemonset.yaml.j2 b/roles/kuryr/templates/cni-daemonset.yaml.j2 index 39348ae90..09f4c7dfe 100644 --- a/roles/kuryr/templates/cni-daemonset.yaml.j2 +++ b/roles/kuryr/templates/cni-daemonset.yaml.j2 @@ -26,6 +26,13 @@ spec: image: kuryr/cni:latest imagePullPolicy: IfNotPresent command: [ "cni_ds_init" ] + env: + - name: CNI_DAEMON + value: "True" + - name: KUBERNETES_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName securityContext: privileged: true volumeMounts: @@ -38,6 +45,10 @@ spec: subPath: kuryr-cni.conf - name: etc mountPath: /etc + - name: proc + mountPath: /host_proc + - name: openvswitch + mountPath: /var/run/openvswitch volumes: - name: bin hostPath: @@ -50,4 +61,10 @@ spec: name: kuryr-config - name: etc hostPath: - path: /etc
\ No newline at end of file + path: /etc + - name: proc + hostPath: + path: /proc + - name: openvswitch + hostPath: + path: /var/run/openvswitch diff --git a/roles/kuryr/templates/configmap.yaml.j2 b/roles/kuryr/templates/configmap.yaml.j2 index 96c215f00..4bf1dbddf 100644 --- a/roles/kuryr/templates/configmap.yaml.j2 +++ b/roles/kuryr/templates/configmap.yaml.j2 @@ -16,17 +16,17 @@ data: # Directory for Kuryr vif binding executables. (string value) #bindir = /usr/libexec/kuryr + # Neutron subnetpool name will be prefixed by this. (string value) + #subnetpool_name_prefix = kuryrPool + + # baremetal or nested-containers are the supported values. (string value) + #deployment_type = baremetal + # If set to true, the logging level will be set to DEBUG instead of the default # INFO level. (boolean value) # Note: This option can be changed without restarting. #debug = false - # DEPRECATED: If set to false, the logging level will be set to WARNING instead - # of the default INFO level. (boolean value) - # This option is deprecated for removal. - # Its value may be silently ignored in the future. - #verbose = true - # The name of a logging configuration file. This file is appended to any # existing logging configuration files. For details about logging configuration # files, see the Python logging module documentation. Note that when logging @@ -46,7 +46,7 @@ data: # logging will go to stderr as defined by use_stderr. This option is ignored if # log_config_append is set. (string value) # Deprecated group/name - [DEFAULT]/logfile - #log_file = /var/log/kuryr/kuryr-controller.log + #log_file = <None> # (Optional) The base directory used for relative log_file paths. This option # is ignored if log_config_append is set. (string value) @@ -65,13 +65,19 @@ data: # is set. (boolean value) #use_syslog = false + # Enable journald for logging. If running in a systemd environment you may wish + # to enable journal support. Doing so will use the journal native protocol + # which includes structured metadata in addition to log messages.This option is + # ignored if log_config_append is set. (boolean value) + #use_journal = false + # Syslog facility to receive log lines. This option is ignored if # log_config_append is set. (string value) #syslog_log_facility = LOG_USER # Log output to standard error. This option is ignored if log_config_append is # set. (boolean value) - #use_stderr = true + #use_stderr = false # Format string to use for log messages with context. (string value) #logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s @@ -93,7 +99,7 @@ data: # List of package logging levels in logger=LEVEL pairs. This option is ignored # if log_config_append is set. (list value) - #default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO + #default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,oslo_messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO # Enables or disables publication of error events. (boolean value) #publish_errors = false @@ -106,15 +112,86 @@ data: # value) #instance_uuid_format = "[instance: %(uuid)s] " + # Interval, number of seconds, of log rate limiting. (integer value) + #rate_limit_interval = 0 + + # Maximum number of logged messages per rate_limit_interval. (integer value) + #rate_limit_burst = 0 + + # Log level name used by rate limiting: CRITICAL, ERROR, INFO, WARNING, DEBUG + # or empty string. Logs with level greater or equal to rate_limit_except_level + # are not filtered. An empty string means that all levels are filtered. (string + # value) + #rate_limit_except_level = CRITICAL + # Enables or disables fatal status of deprecations. (boolean value) #fatal_deprecations = false [binding] + # Configuration options for container interface binding. - driver = kuryr.lib.binding.drivers.vlan + # + # From kuryr_kubernetes + # + + # The name prefix of the veth endpoint put inside the container. (string value) + #veth_dst_prefix = eth + + # Driver to use for binding and unbinding ports. (string value) + # Deprecated group/name - [binding]/driver + #default_driver = kuryr.lib.binding.drivers.veth + + # Drivers to use for binding and unbinding ports. (list value) + #enabled_drivers = kuryr.lib.binding.drivers.veth + + # Specifies the name of the Nova instance interface to link the virtual devices + # to (only applicable to some binding drivers. (string value) link_iface = eth0 + driver = kuryr.lib.binding.drivers.vlan + + + [cni_daemon] + + # + # From kuryr_kubernetes + # + + # Enable CNI Daemon configuration. (boolean value) + daemon_enabled = true + + # Bind address for CNI daemon HTTP server. It is recommened to allow only local + # connections. (string value) + bind_address = 127.0.0.1:50036 + + # Maximum number of processes that will be spawned to process requests from CNI + # driver. (integer value) + #worker_num = 30 + + # Time (in seconds) the CNI daemon will wait for VIF annotation to appear in + # pod metadata before failing the CNI request. (integer value) + #vif_annotation_timeout = 120 + + # Kuryr uses pyroute2 library to manipulate networking interfaces. When + # processing a high number of Kuryr requests in parallel, it may take kernel + # more time to process all networking stack changes. This option allows to tune + # internal pyroute2 timeout. (integer value) + #pyroute2_timeout = 30 + + # Set to True when you are running kuryr-daemon inside a Docker container on + # Kubernetes host. E.g. as DaemonSet on Kubernetes cluster Kuryr is supposed to + # provide networking for. This mainly means thatkuryr-daemon will look for + # network namespaces in $netns_proc_dir instead of /proc. (boolean value) + docker_mode = true + + # When docker_mode is set to True, this config option should be set to where + # host's /proc directory is mounted. Please note that mounting it is necessary + # to allow Kuryr-Kubernetes to move host interfaces between host network + # namespaces, which is essential for Kuryr to work. (string value) + netns_proc_dir = /host_proc + + [kubernetes] # @@ -164,11 +241,6 @@ data: # The driver that manages VIFs pools for Kubernetes Pods (string value) vif_pool_driver = {{ kuryr_openstack_enable_pools | default(False) | ternary('nested', 'noop') }} - [vif_pool] - ports_pool_max = {{ kuryr_openstack_pool_max | default(0) }} - ports_pool_min = {{ kuryr_openstack_pool_min | default(1) }} - ports_pool_batch = {{ kuryr_openstack_pool_batch | default(5) }} - ports_pool_update_frequency = {{ kuryr_openstack_pool_update_frequency | default(20) }} [neutron] # Configuration options for OpenStack Neutron @@ -232,13 +304,55 @@ data: external_svc_subnet = {{ kuryr_openstack_external_svc_subnet_id }} [pod_vif_nested] + worker_nodes_subnet = {{ kuryr_openstack_worker_nodes_subnet_id }} + + + [pool_manager] + + # + # From kuryr_kubernetes + # + + # Absolute path to socket file that will be used for communication with the + # Pool Manager daemon (string value) + #sock_file = /run/kuryr/kuryr_manage.sock + + + [vif_pool] + + # + # From kuryr_kubernetes + # + + # Set a maximun amount of ports per pool. 0 to disable (integer value) + ports_pool_max = {{ kuryr_openstack_pool_max | default(0) }} + + # Set a target minimum size of the pool of ports (integer value) + ports_pool_min = {{ kuryr_openstack_pool_min | default(1) }} + + # Number of ports to be created in a bulk request (integer value) + ports_pool_batch = {{ kuryr_openstack_pool_batch | default(5) }} + + # Minimun interval (in seconds) between pool updates (integer value) + ports_pool_update_frequency = {{ kuryr_openstack_pool_update_frequency | default(20) }} + kuryr-cni.conf: |+ [DEFAULT] # # From kuryr_kubernetes # + + # Directory for Kuryr vif binding executables. (string value) + #bindir = /usr/libexec/kuryr + + # Neutron subnetpool name will be prefixed by this. (string value) + #subnetpool_name_prefix = kuryrPool + + # baremetal or nested-containers are the supported values. (string value) + #deployment_type = baremetal + # If set to true, the logging level will be set to DEBUG instead of the default # INFO level. (boolean value) # Note: This option can be changed without restarting. @@ -263,7 +377,7 @@ data: # logging will go to stderr as defined by use_stderr. This option is ignored if # log_config_append is set. (string value) # Deprecated group/name - [DEFAULT]/logfile - #log_file = /var/log/kuryr/cni.log + #log_file = <None> # (Optional) The base directory used for relative log_file paths. This option # is ignored if log_config_append is set. (string value) @@ -282,6 +396,12 @@ data: # is set. (boolean value) #use_syslog = false + # Enable journald for logging. If running in a systemd environment you may wish + # to enable journal support. Doing so will use the journal native protocol + # which includes structured metadata in addition to log messages.This option is + # ignored if log_config_append is set. (boolean value) + #use_journal = false + # Syslog facility to receive log lines. This option is ignored if # log_config_append is set. (string value) #syslog_log_facility = LOG_USER @@ -310,7 +430,7 @@ data: # List of package logging levels in logger=LEVEL pairs. This option is ignored # if log_config_append is set. (list value) - #default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO + #default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,oslo_messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO # Enables or disables publication of error events. (boolean value) #publish_errors = false @@ -323,14 +443,85 @@ data: # value) #instance_uuid_format = "[instance: %(uuid)s] " + # Interval, number of seconds, of log rate limiting. (integer value) + #rate_limit_interval = 0 + + # Maximum number of logged messages per rate_limit_interval. (integer value) + #rate_limit_burst = 0 + + # Log level name used by rate limiting: CRITICAL, ERROR, INFO, WARNING, DEBUG + # or empty string. Logs with level greater or equal to rate_limit_except_level + # are not filtered. An empty string means that all levels are filtered. (string + # value) + #rate_limit_except_level = CRITICAL + # Enables or disables fatal status of deprecations. (boolean value) #fatal_deprecations = false [binding] + # Configuration options for container interface binding. + + # + # From kuryr_kubernetes + # + + # The name prefix of the veth endpoint put inside the container. (string value) + #veth_dst_prefix = eth + + # Driver to use for binding and unbinding ports. (string value) + # Deprecated group/name - [binding]/driver + #default_driver = kuryr.lib.binding.drivers.veth + + # Drivers to use for binding and unbinding ports. (list value) + #enabled_drivers = kuryr.lib.binding.drivers.veth + + # Specifies the name of the Nova instance interface to link the virtual devices + # to (only applicable to some binding drivers. (string value) + link_iface = eth0 driver = kuryr.lib.binding.drivers.vlan - link_iface = {{ kuryr_cni_link_interface }} + + + [cni_daemon] + + # + # From kuryr_kubernetes + # + + # Enable CNI Daemon configuration. (boolean value) + daemon_enabled = true + + # Bind address for CNI daemon HTTP server. It is recommened to allow only local + # connections. (string value) + bind_address = 127.0.0.1:50036 + + # Maximum number of processes that will be spawned to process requests from CNI + # driver. (integer value) + #worker_num = 30 + + # Time (in seconds) the CNI daemon will wait for VIF annotation to appear in + # pod metadata before failing the CNI request. (integer value) + #vif_annotation_timeout = 120 + + # Kuryr uses pyroute2 library to manipulate networking interfaces. When + # processing a high number of Kuryr requests in parallel, it may take kernel + # more time to process all networking stack changes. This option allows to tune + # internal pyroute2 timeout. (integer value) + #pyroute2_timeout = 30 + + # Set to True when you are running kuryr-daemon inside a Docker container on + # Kubernetes host. E.g. as DaemonSet on Kubernetes cluster Kuryr is supposed to + # provide networking for. This mainly means thatkuryr-daemon will look for + # network namespaces in $netns_proc_dir instead of /proc. (boolean value) + docker_mode = true + + # When docker_mode is set to True, this config option should be set to where + # host's /proc directory is mounted. Please note that mounting it is necessary + # to allow Kuryr-Kubernetes to move host interfaces between host network + # namespaces, which is essential for Kuryr to work. (string value) + netns_proc_dir = /host_proc + [kubernetes] @@ -341,12 +532,136 @@ data: # The root URL of the Kubernetes API (string value) api_root = {{ openshift.master.api_url }} - # The token to talk to the k8s API - token_file = /etc/kuryr/token + # Absolute path to client cert to connect to HTTPS K8S_API (string value) + # ssl_client_crt_file = /etc/kuryr/controller.crt + + # Absolute path client key file to connect to HTTPS K8S_API (string value) + # ssl_client_key_file = /etc/kuryr/controller.key # Absolute path to ca cert file to connect to HTTPS K8S_API (string value) - ssl_ca_crt_file = /etc/kuryr/ca.crt + ssl_ca_crt_file = /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + + # The token to talk to the k8s API + token_file = /var/run/secrets/kubernetes.io/serviceaccount/token # HTTPS K8S_API server identity verification (boolean value) # TODO (apuimedo): Make configurable ssl_verify_server_crt = True + + # The driver to determine OpenStack project for pod ports (string value) + pod_project_driver = default + + # The driver to determine OpenStack project for services (string value) + service_project_driver = default + + # The driver to determine Neutron subnets for pod ports (string value) + pod_subnets_driver = default + + # The driver to determine Neutron subnets for services (string value) + service_subnets_driver = default + + # The driver to determine Neutron security groups for pods (string value) + pod_security_groups_driver = default + + # The driver to determine Neutron security groups for services (string value) + service_security_groups_driver = default + + # The driver that provides VIFs for Kubernetes Pods. (string value) + pod_vif_driver = nested-vlan + + # The driver that manages VIFs pools for Kubernetes Pods (string value) + vif_pool_driver = {{ kuryr_openstack_enable_pools | default(False) | ternary('nested', 'noop') }} + + [neutron] + # Configuration options for OpenStack Neutron + + # + # From kuryr_kubernetes + # + + # Authentication URL (string value) + auth_url = {{ kuryr_openstack_auth_url }} + + # Authentication type to load (string value) + # Deprecated group/name - [neutron]/auth_plugin + auth_type = password + + # Domain ID to scope to (string value) + user_domain_name = {{ kuryr_openstack_user_domain_name }} + + # User's password (string value) + password = {{ kuryr_openstack_password }} + + # Domain name containing project (string value) + project_domain_name = {{ kuryr_openstack_project_domain_name }} + + # Project ID to scope to (string value) + # Deprecated group/name - [neutron]/tenant-id + project_id = {{ kuryr_openstack_project_id }} + + # Token (string value) + #token = <None> + + # Trust ID (string value) + #trust_id = <None> + + # User's domain id (string value) + #user_domain_id = <None> + + # User id (string value) + #user_id = <None> + + # Username (string value) + # Deprecated group/name - [neutron]/user-name + username = {{kuryr_openstack_username }} + + # Whether a plugging operation is failed if the port to plug does not become + # active (boolean value) + #vif_plugging_is_fatal = false + + # Seconds to wait for port to become active (integer value) + #vif_plugging_timeout = 0 + + [neutron_defaults] + + pod_security_groups = {{ kuryr_openstack_pod_sg_id }} + pod_subnet = {{ kuryr_openstack_pod_subnet_id }} + service_subnet = {{ kuryr_openstack_service_subnet_id }} + project = {{ kuryr_openstack_pod_project_id }} + # TODO (apuimedo): Remove the duplicated line just after this one once the + # RDO packaging contains the upstream patch + worker_nodes_subnet = {{ kuryr_openstack_worker_nodes_subnet_id }} + + [pod_vif_nested] + + worker_nodes_subnet = {{ kuryr_openstack_worker_nodes_subnet_id }} + + + [pool_manager] + + # + # From kuryr_kubernetes + # + + # Absolute path to socket file that will be used for communication with the + # Pool Manager daemon (string value) + #sock_file = /run/kuryr/kuryr_manage.sock + + + [vif_pool] + + # + # From kuryr_kubernetes + # + + # Set a maximun amount of ports per pool. 0 to disable (integer value) + ports_pool_max = {{ kuryr_openstack_pool_max | default(0) }} + + # Set a target minimum size of the pool of ports (integer value) + ports_pool_min = {{ kuryr_openstack_pool_min | default(1) }} + + # Number of ports to be created in a bulk request (integer value) + ports_pool_batch = {{ kuryr_openstack_pool_batch | default(5) }} + + # Minimun interval (in seconds) between pool updates (integer value) + ports_pool_update_frequency = {{ kuryr_openstack_pool_update_frequency | default(20) }} diff --git a/roles/kuryr/templates/controller-deployment.yaml.j2 b/roles/kuryr/templates/controller-deployment.yaml.j2 index d970270b5..155d1faab 100644 --- a/roles/kuryr/templates/controller-deployment.yaml.j2 +++ b/roles/kuryr/templates/controller-deployment.yaml.j2 @@ -22,6 +22,13 @@ spec: - image: kuryr/controller:latest imagePullPolicy: IfNotPresent name: controller +{% if kuryr_openstack_enable_pools | default(false) %} + readinessProbe: + exec: + command: + - cat + - /tmp/pools_loaded +{% endif %} terminationMessagePath: "/dev/termination-log" # FIXME(dulek): This shouldn't be required, but without it selinux is # complaining about access to kuryr.conf. diff --git a/roles/openshift_sanitize_inventory/library/conditional_set_fact.py b/roles/lib_openshift/library/conditional_set_fact.py index f61801714..363399f33 100644 --- a/roles/openshift_sanitize_inventory/library/conditional_set_fact.py +++ b/roles/lib_openshift/library/conditional_set_fact.py @@ -29,6 +29,10 @@ EXAMPLES = ''' fact1: not_defined_variable fact2: defined_variable +- name: Conditionally set fact falling back on default + conditional_set_fact: + fact1: not_defined_var | defined_variable + ''' @@ -48,12 +52,14 @@ def run_module(): is_changed = False for param in module.params['vars']: - other_var = module.params['vars'][param] - - if other_var in module.params['facts']: - local_facts[param] = module.params['facts'][other_var] - if not is_changed: - is_changed = True + other_vars = module.params['vars'][param].replace(" ", "") + + for other_var in other_vars.split('|'): + if other_var in module.params['facts']: + local_facts[param] = module.params['facts'][other_var] + if not is_changed: + is_changed = True + break return module.exit_json(changed=is_changed, # noqa: F405 ansible_facts=local_facts) diff --git a/roles/lib_openshift/library/oc_group.py b/roles/lib_openshift/library/oc_group.py index 1b63a6c13..72023eaf7 100644 --- a/roles/lib_openshift/library/oc_group.py +++ b/roles/lib_openshift/library/oc_group.py @@ -1485,7 +1485,7 @@ class OCGroup(OpenShiftCLI): def needs_update(self): ''' verify an update is needed ''' - return not Utils.check_def_equal(self.config.data, self.group.yaml_dict, skip_keys=[], debug=True) + return not Utils.check_def_equal(self.config.data, self.group.yaml_dict, skip_keys=['users'], debug=True) # pylint: disable=too-many-return-statements,too-many-branches @staticmethod diff --git a/roles/lib_openshift/src/class/oc_group.py b/roles/lib_openshift/src/class/oc_group.py index 89fb09ea4..53e6b6766 100644 --- a/roles/lib_openshift/src/class/oc_group.py +++ b/roles/lib_openshift/src/class/oc_group.py @@ -59,7 +59,7 @@ class OCGroup(OpenShiftCLI): def needs_update(self): ''' verify an update is needed ''' - return not Utils.check_def_equal(self.config.data, self.group.yaml_dict, skip_keys=[], debug=True) + return not Utils.check_def_equal(self.config.data, self.group.yaml_dict, skip_keys=['users'], debug=True) # pylint: disable=too-many-return-statements,too-many-branches @staticmethod diff --git a/roles/lib_openshift/src/test/unit/test_oc_scale.py b/roles/lib_openshift/src/test/unit/test_oc_scale.py index d810735f2..9d10c84f3 100755 --- a/roles/lib_openshift/src/test/unit/test_oc_scale.py +++ b/roles/lib_openshift/src/test/unit/test_oc_scale.py @@ -27,7 +27,7 @@ class OCScaleTest(unittest.TestCase): @mock.patch('oc_scale.Utils.create_tmpfile_copy') @mock.patch('oc_scale.OCScale.openshift_cmd') def test_state_list(self, mock_openshift_cmd, mock_tmpfile_copy): - ''' Testing a get ''' + ''' Testing a list ''' params = {'name': 'router', 'namespace': 'default', 'replicas': 2, @@ -71,8 +71,296 @@ class OCScaleTest(unittest.TestCase): @mock.patch('oc_scale.Utils.create_tmpfile_copy') @mock.patch('oc_scale.OCScale.openshift_cmd') + def test_state_present(self, mock_openshift_cmd, mock_tmpfile_copy): + ''' Testing a state present ''' + params = {'name': 'router', + 'namespace': 'default', + 'replicas': 2, + 'state': 'present', + 'kind': 'dc', + 'kubeconfig': '/etc/origin/master/admin.kubeconfig', + 'debug': False} + + dc = '''{"kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "router", + "namespace": "default", + "selfLink": "/oapi/v1/namespaces/default/deploymentconfigs/router", + "uid": "a441eedc-e1ae-11e6-a2d5-0e6967f34d42", + "resourceVersion": "6558", + "generation": 8, + "creationTimestamp": "2017-01-23T20:58:07Z", + "labels": { + "router": "router" + } + }, + "spec": { + "replicas": 2, + } + }''' + + mock_openshift_cmd.side_effect = [ + {"cmd": '/usr/bin/oc get dc router -n default', + 'results': dc, + 'returncode': 0}] + + mock_tmpfile_copy.side_effect = [ + '/tmp/mocked_kubeconfig', + ] + + results = OCScale.run_ansible(params, False) + + self.assertFalse(results['changed']) + self.assertEqual(results['state'], 'present') + self.assertEqual(results['result'][0], 2) + + @mock.patch('oc_scale.Utils.create_tmpfile_copy') + @mock.patch('oc_scale.OCScale.openshift_cmd') + def test_scale_up(self, mock_openshift_cmd, mock_tmpfile_copy): + ''' Testing a scale up ''' + params = {'name': 'router', + 'namespace': 'default', + 'replicas': 3, + 'state': 'present', + 'kind': 'dc', + 'kubeconfig': '/etc/origin/master/admin.kubeconfig', + 'debug': False} + + dc = '''{"kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "router", + "namespace": "default", + "selfLink": "/oapi/v1/namespaces/default/deploymentconfigs/router", + "uid": "a441eedc-e1ae-11e6-a2d5-0e6967f34d42", + "resourceVersion": "6558", + "generation": 8, + "creationTimestamp": "2017-01-23T20:58:07Z", + "labels": { + "router": "router" + } + }, + "spec": { + "replicas": 2, + } + }''' + dc_updated = '''{"kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "router", + "namespace": "default", + "selfLink": "/oapi/v1/namespaces/default/deploymentconfigs/router", + "uid": "a441eedc-e1ae-11e6-a2d5-0e6967f34d42", + "resourceVersion": "6559", + "generation": 9, + "creationTimestamp": "2017-01-24T20:58:07Z", + "labels": { + "router": "router" + } + }, + "spec": { + "replicas": 3, + } + }''' + + mock_openshift_cmd.side_effect = [ + {"cmd": '/usr/bin/oc get dc router -n default', + 'results': dc, + 'returncode': 0}, + {"cmd": '/usr/bin/oc get dc router -n default', + 'results': dc, + 'returncode': 0}, + {"cmd": '/usr/bin/oc replace', + 'results': dc, + 'returncode': 0}, + {"cmd": '/usr/bin/oc get dc router -n default', + 'results': dc_updated, + 'returncode': 0}] + + mock_tmpfile_copy.side_effect = [ + '/tmp/mocked_kubeconfig', + ] + + results = OCScale.run_ansible(params, False) + + self.assertTrue(results['changed']) + self.assertEqual(results['state'], 'present') + self.assertEqual(results['result'][0], 3) + + @mock.patch('oc_scale.Utils.create_tmpfile_copy') + @mock.patch('oc_scale.OCScale.openshift_cmd') + def test_scale_down(self, mock_openshift_cmd, mock_tmpfile_copy): + ''' Testing a scale down ''' + params = {'name': 'router', + 'namespace': 'default', + 'replicas': 1, + 'state': 'present', + 'kind': 'dc', + 'kubeconfig': '/etc/origin/master/admin.kubeconfig', + 'debug': False} + + dc = '''{"kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "router", + "namespace": "default", + "selfLink": "/oapi/v1/namespaces/default/deploymentconfigs/router", + "uid": "a441eedc-e1ae-11e6-a2d5-0e6967f34d42", + "resourceVersion": "6558", + "generation": 8, + "creationTimestamp": "2017-01-23T20:58:07Z", + "labels": { + "router": "router" + } + }, + "spec": { + "replicas": 2, + } + }''' + dc_updated = '''{"kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "router", + "namespace": "default", + "selfLink": "/oapi/v1/namespaces/default/deploymentconfigs/router", + "uid": "a441eedc-e1ae-11e6-a2d5-0e6967f34d42", + "resourceVersion": "6560", + "generation": 9, + "creationTimestamp": "2017-01-24T20:58:07Z", + "labels": { + "router": "router" + } + }, + "spec": { + "replicas": 1, + } + }''' + + mock_openshift_cmd.side_effect = [ + {"cmd": '/usr/bin/oc get dc router -n default', + 'results': dc, + 'returncode': 0}, + {"cmd": '/usr/bin/oc get dc router -n default', + 'results': dc, + 'returncode': 0}, + {"cmd": '/usr/bin/oc replace', + 'results': dc, + 'returncode': 0}, + {"cmd": '/usr/bin/oc get dc router -n default', + 'results': dc_updated, + 'returncode': 0}] + + mock_tmpfile_copy.side_effect = [ + '/tmp/mocked_kubeconfig', + ] + + results = OCScale.run_ansible(params, False) + + self.assertTrue(results['changed']) + self.assertEqual(results['state'], 'present') + self.assertEqual(results['result'][0], 1) + + @mock.patch('oc_scale.Utils.create_tmpfile_copy') + @mock.patch('oc_scale.OCScale.openshift_cmd') + def test_scale_failed(self, mock_openshift_cmd, mock_tmpfile_copy): + ''' Testing a scale failure ''' + params = {'name': 'router', + 'namespace': 'default', + 'replicas': 1, + 'state': 'present', + 'kind': 'dc', + 'kubeconfig': '/etc/origin/master/admin.kubeconfig', + 'debug': False} + + dc = '''{"kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "router", + "namespace": "default", + "selfLink": "/oapi/v1/namespaces/default/deploymentconfigs/router", + "uid": "a441eedc-e1ae-11e6-a2d5-0e6967f34d42", + "resourceVersion": "6558", + "generation": 8, + "creationTimestamp": "2017-01-23T20:58:07Z", + "labels": { + "router": "router" + } + }, + "spec": { + "replicas": 2, + } + }''' + error_message = "foo" + + mock_openshift_cmd.side_effect = [ + {"cmd": '/usr/bin/oc get dc router -n default', + 'results': dc, + 'returncode': 0}, + {"cmd": '/usr/bin/oc get dc router -n default', + 'results': dc, + 'returncode': 0}, + {"cmd": '/usr/bin/oc replace', + 'results': error_message, + 'returncode': 1}] + + mock_tmpfile_copy.side_effect = [ + '/tmp/mocked_kubeconfig', + ] + + results = OCScale.run_ansible(params, False) + + self.assertTrue(results['failed']) + + @mock.patch('oc_scale.Utils.create_tmpfile_copy') + @mock.patch('oc_scale.OCScale.openshift_cmd') + def test_state_unknown(self, mock_openshift_cmd, mock_tmpfile_copy): + ''' Testing an unknown state ''' + params = {'name': 'router', + 'namespace': 'default', + 'replicas': 2, + 'state': 'unknown-state', + 'kind': 'dc', + 'kubeconfig': '/etc/origin/master/admin.kubeconfig', + 'debug': False} + + dc = '''{"kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "router", + "namespace": "default", + "selfLink": "/oapi/v1/namespaces/default/deploymentconfigs/router", + "uid": "a441eedc-e1ae-11e6-a2d5-0e6967f34d42", + "resourceVersion": "6558", + "generation": 8, + "creationTimestamp": "2017-01-23T20:58:07Z", + "labels": { + "router": "router" + } + }, + "spec": { + "replicas": 2, + } + }''' + + mock_openshift_cmd.side_effect = [ + {"cmd": '/usr/bin/oc get dc router -n default', + 'results': dc, + 'returncode': 0}] + + mock_tmpfile_copy.side_effect = [ + '/tmp/mocked_kubeconfig', + ] + + results = OCScale.run_ansible(params, False) + + self.assertFalse('changed' in results) + self.assertEqual(results['failed'], True) + + @mock.patch('oc_scale.Utils.create_tmpfile_copy') + @mock.patch('oc_scale.OCScale.openshift_cmd') def test_scale(self, mock_openshift_cmd, mock_tmpfile_copy): - ''' Testing a get ''' + ''' Testing scale ''' params = {'name': 'router', 'namespace': 'default', 'replicas': 3, @@ -120,8 +408,57 @@ class OCScaleTest(unittest.TestCase): @mock.patch('oc_scale.Utils.create_tmpfile_copy') @mock.patch('oc_scale.OCScale.openshift_cmd') + def test_scale_rc(self, mock_openshift_cmd, mock_tmpfile_copy): + ''' Testing scale for replication controllers ''' + params = {'name': 'router', + 'namespace': 'default', + 'replicas': 3, + 'state': 'list', + 'kind': 'rc', + 'kubeconfig': '/etc/origin/master/admin.kubeconfig', + 'debug': False} + + rc = '''{"kind": "ReplicationController", + "apiVersion": "v1", + "metadata": { + "name": "router", + "namespace": "default", + "selfLink": "/oapi/v1/namespaces/default/deploymentconfigs/router", + "uid": "a441eedc-e1ae-11e6-a2d5-0e6967f34d42", + "resourceVersion": "6558", + "generation": 8, + "creationTimestamp": "2017-01-23T20:58:07Z", + "labels": { + "router": "router" + } + }, + "spec": { + "replicas": 3, + } + }''' + + mock_openshift_cmd.side_effect = [ + {"cmd": '/usr/bin/oc get rc router -n default', + 'results': rc, + 'returncode': 0}, + {"cmd": '/usr/bin/oc create -f /tmp/router -n default', + 'results': '', + 'returncode': 0} + ] + + mock_tmpfile_copy.side_effect = [ + '/tmp/mocked_kubeconfig', + ] + + results = OCScale.run_ansible(params, False) + + self.assertFalse(results['changed']) + self.assertEqual(results['result'][0], 3) + + @mock.patch('oc_scale.Utils.create_tmpfile_copy') + @mock.patch('oc_scale.OCScale.openshift_cmd') def test_no_dc_scale(self, mock_openshift_cmd, mock_tmpfile_copy): - ''' Testing a get ''' + ''' Testing scale for inexisting dc ''' params = {'name': 'not_there', 'namespace': 'default', 'replicas': 3, @@ -205,7 +542,7 @@ class OCScaleTest(unittest.TestCase): @mock.patch('shutil.which') @mock.patch('os.environ.get') def test_binary_lookup_fallback_py3(self, mock_env_get, mock_shutil_which): - ''' Testing binary lookup fallback ''' + ''' Testing binary lookup fallback in py3 ''' mock_env_get.side_effect = lambda _v, _d: '' @@ -217,7 +554,7 @@ class OCScaleTest(unittest.TestCase): @mock.patch('shutil.which') @mock.patch('os.environ.get') def test_binary_lookup_in_path_py3(self, mock_env_get, mock_shutil_which): - ''' Testing binary lookup in path ''' + ''' Testing binary lookup in path in py3 ''' oc_bin = '/usr/bin/oc' @@ -231,7 +568,7 @@ class OCScaleTest(unittest.TestCase): @mock.patch('shutil.which') @mock.patch('os.environ.get') def test_binary_lookup_in_usr_local_py3(self, mock_env_get, mock_shutil_which): - ''' Testing binary lookup in /usr/local/bin ''' + ''' Testing binary lookup in /usr/local/bin in py3 ''' oc_bin = '/usr/local/bin/oc' @@ -245,7 +582,7 @@ class OCScaleTest(unittest.TestCase): @mock.patch('shutil.which') @mock.patch('os.environ.get') def test_binary_lookup_in_home_py3(self, mock_env_get, mock_shutil_which): - ''' Testing binary lookup in ~/bin ''' + ''' Testing binary lookup in ~/bin in py3 ''' oc_bin = os.path.expanduser('~/bin/oc') diff --git a/roles/lib_os_firewall/README.md b/roles/lib_os_firewall/README.md deleted file mode 100644 index ba8c84865..000000000 --- a/roles/lib_os_firewall/README.md +++ /dev/null @@ -1,63 +0,0 @@ -lib_os_firewall -=========== - -lib_os_firewall manages iptables firewall settings for a minimal use -case (Adding/Removing rules based on protocol and port number). - -Note: firewalld is not supported on Atomic Host -https://bugzilla.redhat.com/show_bug.cgi?id=1403331 - -Requirements ------------- - -Ansible 2.2 - -Role Variables --------------- - -| Name | Default | | -|---------------------------|---------|----------------------------------------| -| os_firewall_allow | [] | List of service,port mappings to allow | -| os_firewall_deny | [] | List of service, port mappings to deny | - -Dependencies ------------- - -None. - -Example Playbook ----------------- - -Use iptables and open tcp ports 80 and 443: -``` ---- -- hosts: servers - vars: - os_firewall_use_firewalld: false - os_firewall_allow: - - service: httpd - port: 80/tcp - - service: https - port: 443/tcp - tasks: - - include_role: - name: lib_os_firewall - - - name: set allow rules - os_firewall_manage_iptables: - name: "{{ item.service }}" - action: add - protocol: "{{ item.port.split('/')[1] }}" - port: "{{ item.port.split('/')[0] }}" - with_items: "{{ os_firewall_allow }}" -``` - - -License -------- - -Apache License, Version 2.0 - -Author Information ------------------- -Jason DeTiberus - jdetiber@redhat.com diff --git a/roles/openshift_persistent_volumes/action_plugins/generate_pv_pvcs_list.py b/roles/lib_utils/action_plugins/generate_pv_pvcs_list.py index eb13a58ba..eb13a58ba 100644 --- a/roles/openshift_persistent_volumes/action_plugins/generate_pv_pvcs_list.py +++ b/roles/lib_utils/action_plugins/generate_pv_pvcs_list.py diff --git a/roles/lib_utils/action_plugins/sanity_checks.py b/roles/lib_utils/action_plugins/sanity_checks.py new file mode 100644 index 000000000..09ce55e8f --- /dev/null +++ b/roles/lib_utils/action_plugins/sanity_checks.py @@ -0,0 +1,181 @@ +""" +Ansible action plugin to ensure inventory variables are set +appropriately and no conflicting options have been provided. +""" +import re + +from ansible.plugins.action import ActionBase +from ansible import errors + +# Valid values for openshift_deployment_type +VALID_DEPLOYMENT_TYPES = ('origin', 'openshift-enterprise') + +# Tuple of variable names and default values if undefined. +NET_PLUGIN_LIST = (('openshift_use_openshift_sdn', True), + ('openshift_use_flannel', False), + ('openshift_use_nuage', False), + ('openshift_use_contiv', False), + ('openshift_use_calico', False)) + +ENTERPRISE_TAG_REGEX_ERROR = """openshift_image_tag must be in the format +v#.#[.#[.#]]. Examples: v1.2, v3.4.1, v3.5.1.3, +v3.5.1.3.4, v1.2-1, v1.2.3-4, v1.2.3-4.5, v1.2.3-4.5.6 +You specified openshift_image_tag={}""" + +ORIGIN_TAG_REGEX_ERROR = """openshift_image_tag must be in the format +v#.#.#[-optional.#]. Examples: v1.2.3, v3.5.1-alpha.1 +You specified openshift_image_tag={}""" + +ORIGIN_TAG_REGEX = {'re': '(^v?\\d+\\.\\d+\\.\\d+(-[\\w\\-\\.]*)?$)', + 'error_msg': ORIGIN_TAG_REGEX_ERROR} +ENTERPRISE_TAG_REGEX = {'re': '(^v\\d+\\.\\d+(\\.\\d+)*(-\\d+(\\.\\d+)*)?$)', + 'error_msg': ENTERPRISE_TAG_REGEX_ERROR} +IMAGE_TAG_REGEX = {'origin': ORIGIN_TAG_REGEX, + 'openshift-enterprise': ENTERPRISE_TAG_REGEX} + +CONTAINERIZED_NO_TAG_ERROR_MSG = """To install a containerized Origin release, +you must set openshift_release or openshift_image_tag in your inventory to +specify which version of the OpenShift component images to use. +(Suggestion: add openshift_release="x.y" to inventory.)""" + + +def to_bool(var_to_check): + """Determine a boolean value given the multiple + ways bools can be specified in ansible.""" + # http://yaml.org/type/bool.html + yes_list = (True, 1, "True", "1", "true", "TRUE", + "Yes", "yes", "Y", "y", "YES", + "on", "ON", "On") + return var_to_check in yes_list + + +class ActionModule(ActionBase): + """Action plugin to execute sanity checks.""" + def template_var(self, hostvars, host, varname): + """Retrieve a variable from hostvars and template it. + If undefined, return None type.""" + res = hostvars[host].get(varname) + if res is None: + return None + return self._templar.template(res) + + def check_openshift_deployment_type(self, hostvars, host): + """Ensure a valid openshift_deployment_type is set""" + openshift_deployment_type = self.template_var(hostvars, host, + 'openshift_deployment_type') + if openshift_deployment_type not in VALID_DEPLOYMENT_TYPES: + type_strings = ", ".join(VALID_DEPLOYMENT_TYPES) + msg = "openshift_deployment_type must be defined and one of {}".format(type_strings) + raise errors.AnsibleModuleError(msg) + return openshift_deployment_type + + def check_python_version(self, hostvars, host, distro): + """Ensure python version is 3 for Fedora and python 2 for others""" + ansible_python = self.template_var(hostvars, host, 'ansible_python') + if distro == "Fedora": + if ansible_python['version']['major'] != 3: + msg = "openshift-ansible requires Python 3 for {};".format(distro) + msg += " For information on enabling Python 3 with Ansible," + msg += " see https://docs.ansible.com/ansible/python_3_support.html" + raise errors.AnsibleModuleError(msg) + else: + if ansible_python['version']['major'] != 2: + msg = "openshift-ansible requires Python 2 for {};".format(distro) + + def check_image_tag_format(self, hostvars, host, openshift_deployment_type): + """Ensure openshift_image_tag is formatted correctly""" + openshift_image_tag = self.template_var(hostvars, host, 'openshift_image_tag') + if not openshift_image_tag or openshift_image_tag == 'latest': + return None + regex_to_match = IMAGE_TAG_REGEX[openshift_deployment_type]['re'] + res = re.match(regex_to_match, str(openshift_image_tag)) + if res is None: + msg = IMAGE_TAG_REGEX[openshift_deployment_type]['error_msg'] + msg = msg.format(str(openshift_image_tag)) + raise errors.AnsibleModuleError(msg) + + def no_origin_image_version(self, hostvars, host, openshift_deployment_type): + """Ensure we can determine what image version to use with origin + fail when: + - openshift_is_containerized + - openshift_deployment_type == 'origin' + - openshift_release is not defined + - openshift_image_tag is not defined""" + if not openshift_deployment_type == 'origin': + return None + oic = self.template_var(hostvars, host, 'openshift_is_containerized') + if not to_bool(oic): + return None + orelease = self.template_var(hostvars, host, 'openshift_release') + oitag = self.template_var(hostvars, host, 'openshift_image_tag') + if not orelease and not oitag: + raise errors.AnsibleModuleError(CONTAINERIZED_NO_TAG_ERROR_MSG) + + def network_plugin_check(self, hostvars, host): + """Ensure only one type of network plugin is enabled""" + res = [] + # Loop through each possible network plugin boolean, determine the + # actual boolean value, and append results into a list. + for plugin, default_val in NET_PLUGIN_LIST: + res_temp = self.template_var(hostvars, host, plugin) + if res_temp is None: + res_temp = default_val + res.append(to_bool(res_temp)) + + if sum(res) != 1: + plugin_str = list(zip([x[0] for x in NET_PLUGIN_LIST], res)) + + msg = "Host Checked: {} Only one of must be true. Found: {}".format(host, plugin_str) + raise errors.AnsibleModuleError(msg) + + def check_hostname_vars(self, hostvars, host): + """Checks to ensure openshift_hostname + and openshift_public_hostname + conform to the proper length of 63 characters or less""" + for varname in ('openshift_public_hostname', 'openshift_hostname'): + var_value = self.template_var(hostvars, host, varname) + if var_value and len(var_value) > 63: + msg = '{} must be 63 characters or less'.format(varname) + raise errors.AnsibleModuleError(msg) + + def run_checks(self, hostvars, host): + """Execute the hostvars validations against host""" + distro = self.template_var(hostvars, host, 'ansible_distribution') + odt = self.check_openshift_deployment_type(hostvars, host) + self.check_python_version(hostvars, host, distro) + self.check_image_tag_format(hostvars, host, odt) + self.no_origin_image_version(hostvars, host, odt) + self.network_plugin_check(hostvars, host) + self.check_hostname_vars(hostvars, host) + + def run(self, tmp=None, task_vars=None): + result = super(ActionModule, self).run(tmp, task_vars) + + # self.task_vars holds all in-scope variables. + # Ignore settting self.task_vars outside of init. + # pylint: disable=W0201 + self.task_vars = task_vars or {} + + # self._task.args holds task parameters. + # check_hosts is a parameter to this plugin, and should provide + # a list of hosts. + check_hosts = self._task.args.get('check_hosts') + if not check_hosts: + msg = "check_hosts is required" + raise errors.AnsibleModuleError(msg) + + # We need to access each host's variables + hostvars = self.task_vars.get('hostvars') + if not hostvars: + msg = hostvars + raise errors.AnsibleModuleError(msg) + + # We loop through each host in the provided list check_hosts + for host in check_hosts: + self.run_checks(hostvars, host) + + result["changed"] = False + result["failed"] = False + result["msg"] = "Sanity Checks passed" + + return result diff --git a/roles/lib_utils/callback_plugins/aa_version_requirement.py b/roles/lib_utils/callback_plugins/aa_version_requirement.py new file mode 100644 index 000000000..1093acdae --- /dev/null +++ b/roles/lib_utils/callback_plugins/aa_version_requirement.py @@ -0,0 +1,60 @@ +#!/usr/bin/python + +""" +This callback plugin verifies the required minimum version of Ansible +is installed for proper operation of the OpenShift Ansible Installer. +The plugin is named with leading `aa_` to ensure this plugin is loaded +first (alphanumerically) by Ansible. +""" +import sys +from ansible import __version__ + +if __version__ < '2.0': + # pylint: disable=import-error,no-name-in-module + # Disabled because pylint warns when Ansible v2 is installed + from ansible.callbacks import display as pre2_display + CallbackBase = object + + def display(*args, **kwargs): + """Set up display function for pre Ansible v2""" + pre2_display(*args, **kwargs) +else: + from ansible.plugins.callback import CallbackBase + from ansible.utils.display import Display + + def display(*args, **kwargs): + """Set up display function for Ansible v2""" + display_instance = Display() + display_instance.display(*args, **kwargs) + + +# Set to minimum required Ansible version +REQUIRED_VERSION = '2.4.1.0' +DESCRIPTION = "Supported versions: %s or newer" % REQUIRED_VERSION + + +def version_requirement(version): + """Test for minimum required version""" + return version >= REQUIRED_VERSION + + +class CallbackModule(CallbackBase): + """ + Ansible callback plugin + """ + + CALLBACK_VERSION = 1.0 + CALLBACK_NAME = 'version_requirement' + + def __init__(self): + """ + Version verification is performed in __init__ to catch the + requirement early in the execution of Ansible and fail gracefully + """ + super(CallbackModule, self).__init__() + + if not version_requirement(__version__): + display( + 'FATAL: Current Ansible version (%s) is not supported. %s' + % (__version__, DESCRIPTION), color='red') + sys.exit(1) diff --git a/roles/lib_utils/callback_plugins/openshift_quick_installer.py b/roles/lib_utils/callback_plugins/openshift_quick_installer.py new file mode 100644 index 000000000..365e2443d --- /dev/null +++ b/roles/lib_utils/callback_plugins/openshift_quick_installer.py @@ -0,0 +1,360 @@ +# pylint: disable=invalid-name,protected-access,import-error,line-too-long,attribute-defined-outside-init + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +"""This file is a stdout callback plugin for the OpenShift Quick +Installer. The purpose of this callback plugin is to reduce the amount +of produced output for customers and enable simpler progress checking. + +What's different: + +* Playbook progress is expressed as: Play <current_play>/<total_plays> (Play Name) + Ex: Play 3/30 (Initialize Megafrobber) + +* The Tasks and Handlers in each play (and included roles) are printed + as a series of .'s following the play progress line. + +* Many of these methods include copy and paste code from the upstream + default.py callback. We do that to give us control over the stdout + output while allowing Ansible to handle the file logging + normally. The biggest changes here are that we are manually setting + `log_only` to True in the Display.display method and we redefine the + Display.banner method locally so we can set log_only on that call as + well. + +""" + +from __future__ import (absolute_import, print_function) +import sys +from ansible import constants as C +from ansible.plugins.callback import CallbackBase +from ansible.utils.color import colorize, hostcolor + + +class CallbackModule(CallbackBase): + + """ + Ansible callback plugin + """ + CALLBACK_VERSION = 2.2 + CALLBACK_TYPE = 'stdout' + CALLBACK_NAME = 'openshift_quick_installer' + CALLBACK_NEEDS_WHITELIST = False + plays_count = 0 + plays_total_ran = 0 + + def __init__(self): + """Constructor, ensure standard self.*s are set""" + self._play = None + self._last_task_banner = None + super(CallbackModule, self).__init__() + + def banner(self, msg, color=None): + '''Prints a header-looking line with stars taking up to 80 columns + of width (3 columns, minimum) + + Overrides the upstream banner method so that display is called + with log_only=True + ''' + msg = msg.strip() + star_len = (79 - len(msg)) + if star_len < 0: + star_len = 3 + stars = "*" * star_len + self._display.display("\n%s %s" % (msg, stars), color=color, log_only=True) + + def _print_task_banner(self, task): + """Imported from the upstream 'default' callback""" + # args can be specified as no_log in several places: in the task or in + # the argument spec. We can check whether the task is no_log but the + # argument spec can't be because that is only run on the target + # machine and we haven't run it thereyet at this time. + # + # So we give people a config option to affect display of the args so + # that they can secure this if they feel that their stdout is insecure + # (shoulder surfing, logging stdout straight to a file, etc). + args = '' + if not task.no_log and C.DISPLAY_ARGS_TO_STDOUT: + args = ', '.join('%s=%s' % a for a in task.args.items()) + args = ' %s' % args + + self.banner(u"TASK [%s%s]" % (task.get_name().strip(), args)) + if self._display.verbosity >= 2: + path = task.get_path() + if path: + self._display.display(u"task path: %s" % path, color=C.COLOR_DEBUG, log_only=True) + + self._last_task_banner = task._uuid + + def v2_playbook_on_start(self, playbook): + """This is basically the start of it all""" + self.plays_count = len(playbook.get_plays()) + self.plays_total_ran = 0 + + if self._display.verbosity > 1: + from os.path import basename + self.banner("PLAYBOOK: %s" % basename(playbook._file_name)) + + def v2_playbook_on_play_start(self, play): + """Each play calls this once before running any tasks + +We could print the number of tasks here as well by using +`play.get_tasks()` but that is not accurate when a play includes a +role. Only the tasks directly assigned to a play are exposed in the +`play` object. + """ + self.plays_total_ran += 1 + print("") + print("Play %s/%s (%s)" % (self.plays_total_ran, self.plays_count, play.get_name())) + + name = play.get_name().strip() + if not name: + msg = "PLAY" + else: + msg = "PLAY [%s]" % name + + self._play = play + + self.banner(msg) + + # pylint: disable=unused-argument,no-self-use + def v2_playbook_on_task_start(self, task, is_conditional): + """This prints out the task header. For example: + +TASK [openshift_facts : Ensure PyYaml is installed] ***... + +Rather than print out all that for every task, we print a dot +character to indicate a task has been started. + """ + sys.stdout.write('.') + + args = '' + # args can be specified as no_log in several places: in the task or in + # the argument spec. We can check whether the task is no_log but the + # argument spec can't be because that is only run on the target + # machine and we haven't run it thereyet at this time. + # + # So we give people a config option to affect display of the args so + # that they can secure this if they feel that their stdout is insecure + # (shoulder surfing, logging stdout straight to a file, etc). + if not task.no_log and C.DISPLAY_ARGS_TO_STDOUT: + args = ', '.join(('%s=%s' % a for a in task.args.items())) + args = ' %s' % args + self.banner("TASK [%s%s]" % (task.get_name().strip(), args)) + if self._display.verbosity >= 2: + path = task.get_path() + if path: + self._display.display("task path: %s" % path, color=C.COLOR_DEBUG, log_only=True) + + # pylint: disable=unused-argument,no-self-use + def v2_playbook_on_handler_task_start(self, task): + """Print out task header for handlers + +Rather than print out a header for every handler, we print a dot +character to indicate a handler task has been started. +""" + sys.stdout.write('.') + + self.banner("RUNNING HANDLER [%s]" % task.get_name().strip()) + + # pylint: disable=unused-argument,no-self-use + def v2_playbook_on_cleanup_task_start(self, task): + """Print out a task header for cleanup tasks + +Rather than print out a header for every handler, we print a dot +character to indicate a handler task has been started. +""" + sys.stdout.write('.') + + self.banner("CLEANUP TASK [%s]" % task.get_name().strip()) + + def v2_playbook_on_include(self, included_file): + """Print out paths to statically included files""" + msg = 'included: %s for %s' % (included_file._filename, ", ".join([h.name for h in included_file._hosts])) + self._display.display(msg, color=C.COLOR_SKIP, log_only=True) + + def v2_runner_on_ok(self, result): + """This prints out task results in a fancy format + +The only thing we change here is adding `log_only=True` to the +.display() call + """ + delegated_vars = result._result.get('_ansible_delegated_vars', None) + self._clean_results(result._result, result._task.action) + if result._task.action in ('include', 'import_role'): + return + elif result._result.get('changed', False): + if delegated_vars: + msg = "changed: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host']) + else: + msg = "changed: [%s]" % result._host.get_name() + color = C.COLOR_CHANGED + else: + if delegated_vars: + msg = "ok: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host']) + else: + msg = "ok: [%s]" % result._host.get_name() + color = C.COLOR_OK + + if result._task.loop and 'results' in result._result: + self._process_items(result) + else: + + if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and '_ansible_verbose_override' not in result._result: + msg += " => %s" % (self._dump_results(result._result),) + self._display.display(msg, color=color, log_only=True) + + self._handle_warnings(result._result) + + def v2_runner_item_on_ok(self, result): + """Print out task results for items you're iterating over""" + delegated_vars = result._result.get('_ansible_delegated_vars', None) + if result._task.action in ('include', 'import_role'): + return + elif result._result.get('changed', False): + msg = 'changed' + color = C.COLOR_CHANGED + else: + msg = 'ok' + color = C.COLOR_OK + + if delegated_vars: + msg += ": [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host']) + else: + msg += ": [%s]" % result._host.get_name() + + msg += " => (item=%s)" % (self._get_item(result._result),) + + if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and '_ansible_verbose_override' not in result._result: + msg += " => %s" % self._dump_results(result._result) + self._display.display(msg, color=color, log_only=True) + + def v2_runner_item_on_skipped(self, result): + """Print out task results when an item is skipped""" + if C.DISPLAY_SKIPPED_HOSTS: + msg = "skipping: [%s] => (item=%s) " % (result._host.get_name(), self._get_item(result._result)) + if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and '_ansible_verbose_override' not in result._result: + msg += " => %s" % self._dump_results(result._result) + self._display.display(msg, color=C.COLOR_SKIP, log_only=True) + + def v2_runner_on_skipped(self, result): + """Print out task results when a task (or something else?) is skipped""" + if C.DISPLAY_SKIPPED_HOSTS: + if result._task.loop and 'results' in result._result: + self._process_items(result) + else: + msg = "skipping: [%s]" % result._host.get_name() + if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and '_ansible_verbose_override' not in result._result: + msg += " => %s" % self._dump_results(result._result) + self._display.display(msg, color=C.COLOR_SKIP, log_only=True) + + def v2_playbook_on_notify(self, res, handler): + """What happens when a task result is 'changed' and the task has a +'notify' list attached. + """ + self._display.display("skipping: no hosts matched", color=C.COLOR_SKIP, log_only=True) + + ###################################################################### + # So we can bubble up errors to the top + def v2_runner_on_failed(self, result, ignore_errors=False): + """I guess this is when an entire task has failed?""" + + if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid: + self._print_task_banner(result._task) + + delegated_vars = result._result.get('_ansible_delegated_vars', None) + if 'exception' in result._result: + if self._display.verbosity < 3: + # extract just the actual error message from the exception text + error = result._result['exception'].strip().split('\n')[-1] + msg = "An exception occurred during task execution. To see the full traceback, use -vvv. The error was: %s" % error + else: + msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception'] + + self._display.display(msg, color=C.COLOR_ERROR) + + if result._task.loop and 'results' in result._result: + self._process_items(result) + + else: + if delegated_vars: + self._display.display("fatal: [%s -> %s]: FAILED! => %s" % (result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result)), color=C.COLOR_ERROR) + else: + self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result)), color=C.COLOR_ERROR) + + if ignore_errors: + self._display.display("...ignoring", color=C.COLOR_SKIP) + + def v2_runner_item_on_failed(self, result): + """When an item in a task fails.""" + delegated_vars = result._result.get('_ansible_delegated_vars', None) + if 'exception' in result._result: + if self._display.verbosity < 3: + # extract just the actual error message from the exception text + error = result._result['exception'].strip().split('\n')[-1] + msg = "An exception occurred during task execution. To see the full traceback, use -vvv. The error was: %s" % error + else: + msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception'] + + self._display.display(msg, color=C.COLOR_ERROR) + + msg = "failed: " + if delegated_vars: + msg += "[%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host']) + else: + msg += "[%s]" % (result._host.get_name()) + + self._display.display(msg + " (item=%s) => %s" % (self._get_item(result._result), self._dump_results(result._result)), color=C.COLOR_ERROR) + self._handle_warnings(result._result) + + ###################################################################### + def v2_playbook_on_stats(self, stats): + """Print the final playbook run stats""" + self._display.display("", screen_only=True) + self.banner("PLAY RECAP") + + hosts = sorted(stats.processed.keys()) + for h in hosts: + t = stats.summarize(h) + + self._display.display( + u"%s : %s %s %s %s" % ( + hostcolor(h, t), + colorize(u'ok', t['ok'], C.COLOR_OK), + colorize(u'changed', t['changed'], C.COLOR_CHANGED), + colorize(u'unreachable', t['unreachable'], C.COLOR_UNREACHABLE), + colorize(u'failed', t['failures'], C.COLOR_ERROR)), + screen_only=True + ) + + self._display.display( + u"%s : %s %s %s %s" % ( + hostcolor(h, t, False), + colorize(u'ok', t['ok'], None), + colorize(u'changed', t['changed'], None), + colorize(u'unreachable', t['unreachable'], None), + colorize(u'failed', t['failures'], None)), + log_only=True + ) + + self._display.display("", screen_only=True) + self._display.display("", screen_only=True) + + # Some plays are conditional and won't run (such as load + # balancers) if they aren't required. Sometimes plays are + # conditionally included later in the run. Let the user know + # about this to avoid potential confusion. + if self.plays_total_ran != self.plays_count: + print("Installation Complete: Note: Play count is only an estimate, some plays may have been skipped or dynamically added") + self._display.display("", screen_only=True) diff --git a/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py b/roles/lib_utils/filter_plugins/oo_cert_expiry.py index a2bc9ecdb..58b228fee 100644 --- a/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py +++ b/roles/lib_utils/filter_plugins/oo_cert_expiry.py @@ -31,7 +31,6 @@ certificates Example playbook usage: - name: Generate expiration results JSON - become: no run_once: yes delegate_to: localhost when: openshift_certificate_expiry_save_json_results|bool diff --git a/roles/lib_utils/filter_plugins/oo_filters.py b/roles/lib_utils/filter_plugins/oo_filters.py new file mode 100644 index 000000000..574743ff1 --- /dev/null +++ b/roles/lib_utils/filter_plugins/oo_filters.py @@ -0,0 +1,695 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# pylint: disable=too-many-lines +""" +Custom filters for use in openshift-ansible +""" +import json +import os +import pdb +import random +import re + +from base64 import b64encode +from collections import Mapping +# pylint no-name-in-module and import-error disabled here because pylint +# fails to properly detect the packages when installed in a virtualenv +from distutils.util import strtobool # pylint:disable=no-name-in-module,import-error +from operator import itemgetter + +import yaml + +from ansible import errors +from ansible.parsing.yaml.dumper import AnsibleDumper + +# pylint: disable=import-error,no-name-in-module +from ansible.module_utils.six import string_types, u +# pylint: disable=import-error,no-name-in-module +from ansible.module_utils.six.moves.urllib.parse import urlparse + +HAS_OPENSSL = False +try: + import OpenSSL.crypto + HAS_OPENSSL = True +except ImportError: + pass + + +# pylint: disable=C0103 + +def lib_utils_oo_pdb(arg): + """ This pops you into a pdb instance where arg is the data passed in + from the filter. + Ex: "{{ hostvars | lib_utils_oo_pdb }}" + """ + pdb.set_trace() + return arg + + +def get_attr(data, attribute=None): + """ This looks up dictionary attributes of the form a.b.c and returns + the value. + + If the key isn't present, None is returned. + Ex: data = {'a': {'b': {'c': 5}}} + attribute = "a.b.c" + returns 5 + """ + if not attribute: + raise errors.AnsibleFilterError("|failed expects attribute to be set") + + ptr = data + for attr in attribute.split('.'): + if attr in ptr: + ptr = ptr[attr] + else: + ptr = None + break + + return ptr + + +def oo_flatten(data): + """ This filter plugin will flatten a list of lists + """ + if not isinstance(data, list): + raise errors.AnsibleFilterError("|failed expects to flatten a List") + + return [item for sublist in data for item in sublist] + + +def lib_utils_oo_collect(data_list, attribute=None, filters=None): + """ This takes a list of dict and collects all attributes specified into a + list. If filter is specified then we will include all items that + match _ALL_ of filters. If a dict entry is missing the key in a + filter it will be excluded from the match. + Ex: data_list = [ {'a':1, 'b':5, 'z': 'z'}, # True, return + {'a':2, 'z': 'z'}, # True, return + {'a':3, 'z': 'z'}, # True, return + {'a':4, 'z': 'b'}, # FAILED, obj['z'] != obj['z'] + ] + attribute = 'a' + filters = {'z': 'z'} + returns [1, 2, 3] + + This also deals with lists of lists with dict as elements. + Ex: data_list = [ + [ {'a':1, 'b':5, 'z': 'z'}, # True, return + {'a':2, 'b':6, 'z': 'z'} # True, return + ], + [ {'a':3, 'z': 'z'}, # True, return + {'a':4, 'z': 'b'} # FAILED, obj['z'] != obj['z'] + ], + {'a':5, 'z': 'z'}, # True, return + ] + attribute = 'a' + filters = {'z': 'z'} + returns [1, 2, 3, 5] + """ + if not isinstance(data_list, list): + raise errors.AnsibleFilterError("lib_utils_oo_collect expects to filter on a List") + + if not attribute: + raise errors.AnsibleFilterError("lib_utils_oo_collect expects attribute to be set") + + data = [] + retval = [] + + for item in data_list: + if isinstance(item, list): + retval.extend(lib_utils_oo_collect(item, attribute, filters)) + else: + data.append(item) + + if filters is not None: + if not isinstance(filters, dict): + raise errors.AnsibleFilterError( + "lib_utils_oo_collect expects filter to be a dict") + retval.extend([get_attr(d, attribute) for d in data if ( + all([d.get(key, None) == filters[key] for key in filters]))]) + else: + retval.extend([get_attr(d, attribute) for d in data]) + + retval = [val for val in retval if val is not None] + + return retval + + +def lib_utils_oo_select_keys_from_list(data, keys): + """ This returns a list, which contains the value portions for the keys + Ex: data = { 'a':1, 'b':2, 'c':3 } + keys = ['a', 'c'] + returns [1, 3] + """ + + if not isinstance(data, list): + raise errors.AnsibleFilterError("|lib_utils_oo_select_keys_from_list failed expects to filter on a list") + + if not isinstance(keys, list): + raise errors.AnsibleFilterError("|lib_utils_oo_select_keys_from_list failed expects first param is a list") + + # Gather up the values for the list of keys passed in + retval = [lib_utils_oo_select_keys(item, keys) for item in data] + + return oo_flatten(retval) + + +def lib_utils_oo_select_keys(data, keys): + """ This returns a list, which contains the value portions for the keys + Ex: data = { 'a':1, 'b':2, 'c':3 } + keys = ['a', 'c'] + returns [1, 3] + """ + + if not isinstance(data, Mapping): + raise errors.AnsibleFilterError("|lib_utils_oo_select_keys failed expects to filter on a dict or object") + + if not isinstance(keys, list): + raise errors.AnsibleFilterError("|lib_utils_oo_select_keys failed expects first param is a list") + + # Gather up the values for the list of keys passed in + retval = [data[key] for key in keys if key in data] + + return retval + + +def lib_utils_oo_prepend_strings_in_list(data, prepend): + """ This takes a list of strings and prepends a string to each item in the + list + Ex: data = ['cart', 'tree'] + prepend = 'apple-' + returns ['apple-cart', 'apple-tree'] + """ + if not isinstance(data, list): + raise errors.AnsibleFilterError("|failed expects first param is a list") + if not all(isinstance(x, string_types) for x in data): + raise errors.AnsibleFilterError("|failed expects first param is a list" + " of strings") + retval = [prepend + s for s in data] + return retval + + +def lib_utils_oo_dict_to_list_of_dict(data, key_title='key', value_title='value'): + """Take a dict and arrange them as a list of dicts + + Input data: + {'region': 'infra', 'test_k': 'test_v'} + + Return data: + [{'key': 'region', 'value': 'infra'}, {'key': 'test_k', 'value': 'test_v'}] + + Written for use of the oc_label module + """ + if not isinstance(data, dict): + # pylint: disable=line-too-long + raise errors.AnsibleFilterError("|failed expects first param is a dict. Got %s. Type: %s" % (str(data), str(type(data)))) + + rval = [] + for label in data.items(): + rval.append({key_title: label[0], value_title: label[1]}) + + return rval + + +def oo_ami_selector(data, image_name): + """ This takes a list of amis and an image name and attempts to return + the latest ami. + """ + if not isinstance(data, list): + raise errors.AnsibleFilterError("|failed expects first param is a list") + + if not data: + return None + else: + if image_name is None or not image_name.endswith('_*'): + ami = sorted(data, key=itemgetter('name'), reverse=True)[0] + return ami['ami_id'] + else: + ami_info = [(ami, ami['name'].split('_')[-1]) for ami in data] + ami = sorted(ami_info, key=itemgetter(1), reverse=True)[0][0] + return ami['ami_id'] + + +def lib_utils_oo_split(string, separator=','): + """ This splits the input string into a list. If the input string is + already a list we will return it as is. + """ + if isinstance(string, list): + return string + return string.split(separator) + + +def lib_utils_oo_dict_to_keqv_list(data): + """Take a dict and return a list of k=v pairs + + Input data: + {'a': 1, 'b': 2} + + Return data: + ['a=1', 'b=2'] + """ + return ['='.join(str(e) for e in x) for x in data.items()] + + +def lib_utils_oo_list_to_dict(lst, separator='='): + """ This converts a list of ["k=v"] to a dictionary {k: v}. + """ + kvs = [i.split(separator) for i in lst] + return {k: v for k, v in kvs} + + +def haproxy_backend_masters(hosts, port): + """ This takes an array of dicts and returns an array of dicts + to be used as a backend for the haproxy role + """ + servers = [] + for idx, host_info in enumerate(hosts): + server = dict(name="master%s" % idx) + server_ip = host_info['openshift']['common']['ip'] + server['address'] = "%s:%s" % (server_ip, port) + server['opts'] = 'check' + servers.append(server) + return servers + + +# pylint: disable=too-many-branches, too-many-nested-blocks +def lib_utils_oo_parse_named_certificates(certificates, named_certs_dir, internal_hostnames): + """ Parses names from list of certificate hashes. + + Ex: certificates = [{ "certfile": "/root/custom1.crt", + "keyfile": "/root/custom1.key", + "cafile": "/root/custom-ca1.crt" }, + { "certfile": "custom2.crt", + "keyfile": "custom2.key", + "cafile": "custom-ca2.crt" }] + + returns [{ "certfile": "/etc/origin/master/named_certificates/custom1.crt", + "keyfile": "/etc/origin/master/named_certificates/custom1.key", + "cafile": "/etc/origin/master/named_certificates/custom-ca1.crt", + "names": [ "public-master-host.com", + "other-master-host.com" ] }, + { "certfile": "/etc/origin/master/named_certificates/custom2.crt", + "keyfile": "/etc/origin/master/named_certificates/custom2.key", + "cafile": "/etc/origin/master/named_certificates/custom-ca-2.crt", + "names": [ "some-hostname.com" ] }] + """ + if not isinstance(named_certs_dir, string_types): + raise errors.AnsibleFilterError("|failed expects named_certs_dir is str or unicode") + + if not isinstance(internal_hostnames, list): + raise errors.AnsibleFilterError("|failed expects internal_hostnames is list") + + if not HAS_OPENSSL: + raise errors.AnsibleFilterError("|missing OpenSSL python bindings") + + for certificate in certificates: + if 'names' in certificate.keys(): + continue + else: + certificate['names'] = [] + + if not os.path.isfile(certificate['certfile']) or not os.path.isfile(certificate['keyfile']): + raise errors.AnsibleFilterError("|certificate and/or key does not exist '%s', '%s'" % + (certificate['certfile'], certificate['keyfile'])) + + try: + st_cert = open(certificate['certfile'], 'rt').read() + cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, st_cert) + certificate['names'].append(str(cert.get_subject().commonName.decode())) + for i in range(cert.get_extension_count()): + if cert.get_extension(i).get_short_name() == 'subjectAltName': + for name in str(cert.get_extension(i)).split(', '): + if 'DNS:' in name: + certificate['names'].append(name.replace('DNS:', '')) + except Exception: + raise errors.AnsibleFilterError(("|failed to parse certificate '%s', " % certificate['certfile'] + + "please specify certificate names in host inventory")) + + certificate['names'] = list(set(certificate['names'])) + if 'cafile' not in certificate: + certificate['names'] = [name for name in certificate['names'] if name not in internal_hostnames] + if not certificate['names']: + raise errors.AnsibleFilterError(("|failed to parse certificate '%s' or " % certificate['certfile'] + + "detected a collision with internal hostname, please specify " + + "certificate names in host inventory")) + + for certificate in certificates: + # Update paths for configuration + certificate['certfile'] = os.path.join(named_certs_dir, os.path.basename(certificate['certfile'])) + certificate['keyfile'] = os.path.join(named_certs_dir, os.path.basename(certificate['keyfile'])) + if 'cafile' in certificate: + certificate['cafile'] = os.path.join(named_certs_dir, os.path.basename(certificate['cafile'])) + return certificates + + +def lib_utils_oo_parse_certificate_san(certificate): + """ Parses SubjectAlternativeNames from a PEM certificate. + + Ex: certificate = '''-----BEGIN CERTIFICATE----- + MIIEcjCCAlqgAwIBAgIBAzANBgkqhkiG9w0BAQsFADAhMR8wHQYDVQQDDBZldGNk + LXNpZ25lckAxNTE2ODIwNTg1MB4XDTE4MDEyNDE5MDMzM1oXDTIzMDEyMzE5MDMz + M1owHzEdMBsGA1UEAwwUbWFzdGVyMS5hYnV0Y2hlci5jb20wggEiMA0GCSqGSIb3 + DQEBAQUAA4IBDwAwggEKAoIBAQD4wBdWXNI3TF1M0b0bEIGyJPvdqKeGwF5XlxWg + NoA1Ain/Xz0N1SW5pXW2CDo9HX+ay8DyhzR532yrBa+RO3ivNCmfnexTQinfSLWG + mBEdiu7HO3puR/GNm74JNyXoEKlMAIRiTGq9HPoTo7tNV5MLodgYirpHrkSutOww + DfFSrNjH/ehqxwQtrIOnTAHigdTOrKVdoYxqXblDEMONTPLI5LMvm4/BqnAVaOyb + 9RUzND6lxU/ei3FbUS5IoeASOHx0l1ifxae3OeSNAimm/RIRo9rieFNUFh45TzID + elsdGrLB75LH/gnRVV1xxVbwPN6xW1mEwOceRMuhIArJQ2G5AgMBAAGjgbYwgbMw + UQYDVR0jBEowSIAUXTqN88vCI6E7wONls3QJ4/63unOhJaQjMCExHzAdBgNVBAMM + FmV0Y2Qtc2lnbmVyQDE1MTY4MjA1ODWCCQDMaopfom6OljAMBgNVHRMBAf8EAjAA + MBMGA1UdJQQMMAoGCCsGAQUFBwMBMAsGA1UdDwQEAwIFoDAdBgNVHQ4EFgQU7l05 + OYeY3HppL6/0VJSirudj8t0wDwYDVR0RBAgwBocEwKh6ujANBgkqhkiG9w0BAQsF + AAOCAgEAFU8sicE5EeQsUPnFEqDvoJd1cVE+8aCBqkW0++4GsVw2A/JOJ3OBJL6r + BV3b1u8/e8xBNi8hPi42Q+LWBITZZ/COFyhwEAK94hcr7eZLCV2xfUdMJziP4Qkh + /WRN7vXHTtJ6NP/d6A22SPbtnMSt9Y6G8y9qa5HBrqIqmkYbLzDw/SdZbDbuGhRk + xUwg2ahXNblVoE5P6rxPONgXliA94telZ1/61iyrVaiGQb1/GUP/DRfvvR4dOCrA + lMosW6fm37Wdi/8iYW+aDPWGS+yVK/sjSnHNjxqvrzkfGk+COa5riT9hJ7wZY0Hb + YiJS74SZgZt/nnr5PI2zFRUiZLECqCkZnC/sz29i+irLabnq7Cif9Mv+TUcXWvry + TdJuaaYdTSMRSUkDd/c9Ife8tOr1i1xhFzDNKNkZjTVRk1MBquSXndVCDKucdfGi + YoWm+NDFrayw8yxK/KTHo3Db3lu1eIXTHxriodFx898b//hysHr4hs4/tsEFUTZi + 705L2ScIFLfnyaPby5GK/3sBIXtuhOFM3QV3JoYKlJB5T6wJioVoUmSLc+UxZMeE + t9gGVQbVxtLvNHUdW7uKQ5pd76nIJqApQf8wg2Pja8oo56fRZX2XLt8nm9cswcC4 + Y1mDMvtfxglQATwMTuoKGdREuu1mbdb8QqdyQmZuMa72q+ax2kQ= + -----END CERTIFICATE-----''' + + returns ['192.168.122.186'] + """ + + if not HAS_OPENSSL: + raise errors.AnsibleFilterError("|missing OpenSSL python bindings") + + names = [] + + try: + lcert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, certificate) + for i in range(lcert.get_extension_count()): + if lcert.get_extension(i).get_short_name() == 'subjectAltName': + sanstr = str(lcert.get_extension(i)) + sanstr = sanstr.replace('DNS:', '') + sanstr = sanstr.replace('IP Address:', '') + names = sanstr.split(', ') + except Exception: + raise errors.AnsibleFilterError("|failed to parse certificate") + + return names + + +def lib_utils_oo_generate_secret(num_bytes): + """ generate a session secret """ + + if not isinstance(num_bytes, int): + raise errors.AnsibleFilterError("|failed expects num_bytes is int") + + return b64encode(os.urandom(num_bytes)).decode('utf-8') + + +def lib_utils_to_padded_yaml(data, level=0, indent=2, **kw): + """ returns a yaml snippet padded to match the indent level you specify """ + if data in [None, ""]: + return "" + + try: + transformed = u(yaml.dump(data, indent=indent, allow_unicode=True, + default_flow_style=False, + Dumper=AnsibleDumper, **kw)) + padded = "\n".join([" " * level * indent + line for line in transformed.splitlines()]) + return "\n{0}".format(padded) + except Exception as my_e: + raise errors.AnsibleFilterError('Failed to convert: %s' % my_e) + + +def lib_utils_oo_pods_match_component(pods, deployment_type, component): + """ Filters a list of Pods and returns the ones matching the deployment_type and component + """ + if not isinstance(pods, list): + raise errors.AnsibleFilterError("failed expects to filter on a list") + if not isinstance(deployment_type, string_types): + raise errors.AnsibleFilterError("failed expects deployment_type to be a string") + if not isinstance(component, string_types): + raise errors.AnsibleFilterError("failed expects component to be a string") + + image_prefix = 'openshift/origin-' + if deployment_type == 'openshift-enterprise': + image_prefix = 'openshift3/ose-' + + matching_pods = [] + image_regex = image_prefix + component + r'.*' + for pod in pods: + for container in pod['spec']['containers']: + if re.search(image_regex, container['image']): + matching_pods.append(pod) + break # stop here, don't add a pod more than once + + return matching_pods + + +def lib_utils_oo_image_tag_to_rpm_version(version, include_dash=False): + """ Convert an image tag string to an RPM version if necessary + Empty strings and strings that are already in rpm version format + are ignored. Also remove non semantic version components. + + Ex. v3.2.0.10 -> -3.2.0.10 + v1.2.0-rc1 -> -1.2.0 + """ + if not isinstance(version, string_types): + raise errors.AnsibleFilterError("|failed expects a string or unicode") + if version.startswith("v"): + version = version[1:] + # Strip release from requested version, we no longer support this. + version = version.split('-')[0] + + if include_dash and version and not version.startswith("-"): + version = "-" + version + + return version + + +def lib_utils_oo_hostname_from_url(url): + """ Returns the hostname contained in a URL + + Ex: https://ose3-master.example.com/v1/api -> ose3-master.example.com + """ + if not isinstance(url, string_types): + raise errors.AnsibleFilterError("|failed expects a string or unicode") + parse_result = urlparse(url) + if parse_result.netloc != '': + return parse_result.netloc + else: + # netloc wasn't parsed, assume url was missing scheme and path + return parse_result.path + + +# pylint: disable=invalid-name, unused-argument +def lib_utils_oo_loadbalancer_frontends( + api_port, servers_hostvars, use_nuage=False, nuage_rest_port=None): + """TODO: Document me.""" + loadbalancer_frontends = [{'name': 'atomic-openshift-api', + 'mode': 'tcp', + 'options': ['tcplog'], + 'binds': ["*:{0}".format(api_port)], + 'default_backend': 'atomic-openshift-api'}] + if bool(strtobool(str(use_nuage))) and nuage_rest_port is not None: + loadbalancer_frontends.append({'name': 'nuage-monitor', + 'mode': 'tcp', + 'options': ['tcplog'], + 'binds': ["*:{0}".format(nuage_rest_port)], + 'default_backend': 'nuage-monitor'}) + return loadbalancer_frontends + + +# pylint: disable=invalid-name +def lib_utils_oo_loadbalancer_backends( + api_port, servers_hostvars, use_nuage=False, nuage_rest_port=None): + """TODO: Document me.""" + loadbalancer_backends = [{'name': 'atomic-openshift-api', + 'mode': 'tcp', + 'option': 'tcplog', + 'balance': 'source', + 'servers': haproxy_backend_masters(servers_hostvars, api_port)}] + if bool(strtobool(str(use_nuage))) and nuage_rest_port is not None: + # pylint: disable=line-too-long + loadbalancer_backends.append({'name': 'nuage-monitor', + 'mode': 'tcp', + 'option': 'tcplog', + 'balance': 'source', + 'servers': haproxy_backend_masters(servers_hostvars, nuage_rest_port)}) + return loadbalancer_backends + + +def lib_utils_oo_chomp_commit_offset(version): + """Chomp any "+git.foo" commit offset string from the given `version` + and return the modified version string. + +Ex: +- chomp_commit_offset(None) => None +- chomp_commit_offset(1337) => "1337" +- chomp_commit_offset("v3.4.0.15+git.derp") => "v3.4.0.15" +- chomp_commit_offset("v3.4.0.15") => "v3.4.0.15" +- chomp_commit_offset("v1.3.0+52492b4") => "v1.3.0" + """ + if version is None: + return version + else: + # Stringify, just in case it's a Number type. Split by '+' and + # return the first split. No concerns about strings without a + # '+', .split() returns an array of the original string. + return str(version).split('+')[0] + + +def lib_utils_oo_random_word(length, source='abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'): + """Generates a random string of given length from a set of alphanumeric characters. + The default source uses [a-z][A-Z][0-9] + Ex: + - lib_utils_oo_random_word(3) => aB9 + - lib_utils_oo_random_word(4, source='012') => 0123 + """ + return ''.join(random.choice(source) for i in range(length)) + + +def lib_utils_oo_contains_rule(source, apiGroups, resources, verbs): + '''Return true if the specified rule is contained within the provided source''' + + rules = source['rules'] + + if rules: + for rule in rules: + if set(rule['apiGroups']) == set(apiGroups): + if set(rule['resources']) == set(resources): + if set(rule['verbs']) == set(verbs): + return True + + return False + + +def lib_utils_oo_selector_to_string_list(user_dict): + """Convert a dict of selectors to a key=value list of strings + +Given input of {'region': 'infra', 'zone': 'primary'} returns a list +of items as ['region=infra', 'zone=primary'] + """ + selectors = [] + for key in user_dict: + selectors.append("{}={}".format(key, user_dict[key])) + return selectors + + +def lib_utils_oo_filter_sa_secrets(sa_secrets, secret_hint='-token-'): + """Parse the Service Account Secrets list, `sa_secrets`, (as from +oc_serviceaccount_secret:state=list) and return the name of the secret +containing the `secret_hint` string. For example, by default this will +return the name of the secret holding the SA bearer token. + +Only provide the 'results' object to this filter. This filter expects +to receive a list like this: + + [ + { + "name": "management-admin-dockercfg-p31s2" + }, + { + "name": "management-admin-token-bnqsh" + } + ] + + +Returns: + +* `secret_name` [string] - The name of the secret matching the + `secret_hint` parameter. By default this is the secret holding the + SA's bearer token. + +Example playbook usage: + +Register a return value from oc_serviceaccount_secret with and pass +that result to this filter plugin. + + - name: Get all SA Secrets + oc_serviceaccount_secret: + state: list + service_account: management-admin + namespace: management-infra + register: sa + + - name: Save the SA bearer token secret name + set_fact: + management_token: "{{ sa.results | lib_utils_oo_filter_sa_secrets }}" + + - name: Get the SA bearer token value + oc_secret: + state: list + name: "{{ management_token }}" + namespace: management-infra + decode: true + register: sa_secret + + - name: Print the bearer token value + debug: + var: sa_secret.results.decoded.token + + """ + secret_name = None + + for secret in sa_secrets: + # each secret is a hash + if secret['name'].find(secret_hint) == -1: + continue + else: + secret_name = secret['name'] + break + + return secret_name + + +def lib_utils_oo_l_of_d_to_csv(input_list): + """Map a list of dictionaries, input_list, into a csv string + of json values. + + Example input: + [{'var1': 'val1', 'var2': 'val2'}, {'var1': 'val3', 'var2': 'val4'}] + Example output: + u'{"var1": "val1", "var2": "val2"},{"var1": "val3", "var2": "val4"}' + """ + return ','.join(json.dumps(x) for x in input_list) + + +def map_from_pairs(source, delim="="): + ''' Returns a dict given the source and delim delimited ''' + if source == '': + return dict() + + return dict(item.split(delim) for item in source.split(",")) + + +class FilterModule(object): + """ Custom ansible filter mapping """ + + # pylint: disable=no-self-use, too-few-public-methods + def filters(self): + """ returns a mapping of filters to methods """ + return { + "lib_utils_oo_select_keys": lib_utils_oo_select_keys, + "lib_utils_oo_select_keys_from_list": lib_utils_oo_select_keys_from_list, + "lib_utils_oo_chomp_commit_offset": lib_utils_oo_chomp_commit_offset, + "lib_utils_oo_collect": lib_utils_oo_collect, + "lib_utils_oo_pdb": lib_utils_oo_pdb, + "lib_utils_oo_prepend_strings_in_list": lib_utils_oo_prepend_strings_in_list, + "lib_utils_oo_dict_to_list_of_dict": lib_utils_oo_dict_to_list_of_dict, + "lib_utils_oo_split": lib_utils_oo_split, + "lib_utils_oo_dict_to_keqv_list": lib_utils_oo_dict_to_keqv_list, + "lib_utils_oo_list_to_dict": lib_utils_oo_list_to_dict, + "lib_utils_oo_parse_named_certificates": lib_utils_oo_parse_named_certificates, + "lib_utils_oo_parse_certificate_san": lib_utils_oo_parse_certificate_san, + "lib_utils_oo_generate_secret": lib_utils_oo_generate_secret, + "lib_utils_oo_pods_match_component": lib_utils_oo_pods_match_component, + "lib_utils_oo_image_tag_to_rpm_version": lib_utils_oo_image_tag_to_rpm_version, + "lib_utils_oo_hostname_from_url": lib_utils_oo_hostname_from_url, + "lib_utils_oo_loadbalancer_frontends": lib_utils_oo_loadbalancer_frontends, + "lib_utils_oo_loadbalancer_backends": lib_utils_oo_loadbalancer_backends, + "lib_utils_to_padded_yaml": lib_utils_to_padded_yaml, + "lib_utils_oo_random_word": lib_utils_oo_random_word, + "lib_utils_oo_contains_rule": lib_utils_oo_contains_rule, + "lib_utils_oo_selector_to_string_list": lib_utils_oo_selector_to_string_list, + "lib_utils_oo_filter_sa_secrets": lib_utils_oo_filter_sa_secrets, + "lib_utils_oo_l_of_d_to_csv": lib_utils_oo_l_of_d_to_csv, + "map_from_pairs": map_from_pairs + } diff --git a/roles/lib_utils/filter_plugins/openshift_aws_filters.py b/roles/lib_utils/filter_plugins/openshift_aws_filters.py new file mode 100644 index 000000000..f16048056 --- /dev/null +++ b/roles/lib_utils/filter_plugins/openshift_aws_filters.py @@ -0,0 +1,90 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +''' +Custom filters for use in openshift_aws +''' + +from ansible import errors + + +class FilterModule(object): + ''' Custom ansible filters for use by openshift_aws role''' + + @staticmethod + def scale_groups_serial(scale_group_info, upgrade=False): + ''' This function will determine what the deployment serial should be and return it + + Search through the tags and find the deployment_serial tag. Once found, + determine if an increment is needed during an upgrade. + if upgrade is true then increment the serial and return it + else return the serial + ''' + if scale_group_info == []: + return 1 + + scale_group_info = scale_group_info[0] + + if not isinstance(scale_group_info, dict): + raise errors.AnsibleFilterError("|filter plugin failed: Expected scale_group_info to be a dict") + + serial = None + + for tag in scale_group_info['tags']: + if tag['key'] == 'deployment_serial': + serial = int(tag['value']) + if upgrade: + serial += 1 + break + else: + raise errors.AnsibleFilterError("|filter plugin failed: deployment_serial tag was not found") + + return serial + + @staticmethod + def scale_groups_match_capacity(scale_group_info): + ''' This function will verify that the scale group instance count matches + the scale group desired capacity + + ''' + for scale_group in scale_group_info: + if scale_group['desired_capacity'] != len(scale_group['instances']): + return False + + return True + + @staticmethod + def build_instance_tags(clusterid): + ''' This function will return a dictionary of the instance tags. + + The main desire to have this inside of a filter_plugin is that we + need to build the following key. + + {"kubernetes.io/cluster/{{ openshift_aws_clusterid }}": "{{ openshift_aws_clusterid}}"} + + ''' + tags = {'clusterid': clusterid, + 'kubernetes.io/cluster/{}'.format(clusterid): clusterid} + + return tags + + @staticmethod + def get_default_az(subnets): + ''' From a list of subnets/AZs in a specific region (from the VPC + structure), return the AZ that has the key/value + 'default_az=True.' ''' + + for subnet in subnets: + if subnet.get('default_az'): + return subnet['az'] + + # if there was none marked with default_az=True, just return the first + # one. (this does mean we could possible return an item that has + # default_az=False set + return subnets[0]['az'] + + def filters(self): + ''' returns a mapping of filters to methods ''' + return {'build_instance_tags': self.build_instance_tags, + 'get_default_az': self.get_default_az, + 'scale_groups_match_capacity': self.scale_groups_match_capacity, + 'scale_groups_serial': self.scale_groups_serial} diff --git a/roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py b/roles/lib_utils/filter_plugins/openshift_hosted_filters.py index 003ce5f9e..003ce5f9e 100644 --- a/roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py +++ b/roles/lib_utils/filter_plugins/openshift_hosted_filters.py diff --git a/roles/openshift_master_facts/filter_plugins/openshift_master.py b/roles/lib_utils/filter_plugins/openshift_master.py index ff15f693b..e67b19c28 100644 --- a/roles/openshift_master_facts/filter_plugins/openshift_master.py +++ b/roles/lib_utils/filter_plugins/openshift_master.py @@ -10,11 +10,7 @@ from ansible import errors from ansible.parsing.yaml.dumper import AnsibleDumper from ansible.plugins.filter.core import to_bool as ansible_bool -# ansible.compat.six goes away with Ansible 2.4 -try: - from ansible.compat.six import string_types, u -except ImportError: - from ansible.module_utils.six import string_types, u +from ansible.module_utils.six import string_types, u import yaml diff --git a/roles/etcd/library/delegated_serial_command.py b/roles/lib_utils/library/delegated_serial_command.py index 0cab1ca88..0cab1ca88 100755 --- a/roles/etcd/library/delegated_serial_command.py +++ b/roles/lib_utils/library/delegated_serial_command.py diff --git a/roles/lib_utils/library/docker_creds.py b/roles/lib_utils/library/docker_creds.py index d4674845e..936fb1c38 100644 --- a/roles/lib_utils/library/docker_creds.py +++ b/roles/lib_utils/library/docker_creds.py @@ -135,7 +135,7 @@ def update_config(docker_config, registry, username, password): docker_config['auths'][registry] = {} # base64 encode our username:password string - encoded_data = base64.b64encode('{}:{}'.format(username, password)) + encoded_data = base64.b64encode('{}:{}'.format(username, password).encode()) # check if the same value is already present for idempotency. if 'auth' in docker_config['auths'][registry]: @@ -148,6 +148,8 @@ def update_config(docker_config, registry, username, password): def write_config(module, docker_config, dest): '''Write updated credentials into dest/config.json''' + if not isinstance(docker_config, dict): + docker_config = docker_config.decode() conf_file_path = os.path.join(dest, 'config.json') try: with open(conf_file_path, 'w') as conf_file: diff --git a/roles/lib_utils/library/kubeclient_ca.py b/roles/lib_utils/library/kubeclient_ca.py new file mode 100644 index 000000000..a89a5574f --- /dev/null +++ b/roles/lib_utils/library/kubeclient_ca.py @@ -0,0 +1,88 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +''' kubeclient_ca ansible module ''' + +import base64 +import yaml +from ansible.module_utils.basic import AnsibleModule + + +DOCUMENTATION = ''' +--- +module: kubeclient_ca +short_description: Modify kubeclient certificate-authority-data +author: Andrew Butcher +requirements: [ ] +''' +EXAMPLES = ''' +- kubeclient_ca: + client_path: /etc/origin/master/admin.kubeconfig + ca_path: /etc/origin/master/ca-bundle.crt + +- slurp: + src: /etc/origin/master/ca-bundle.crt + register: ca_data +- kubeclient_ca: + client_path: /etc/origin/master/admin.kubeconfig + ca_data: "{{ ca_data.content }}" +''' + + +def main(): + ''' Modify kubeconfig located at `client_path`, setting the + certificate authority data to specified `ca_data` or contents of + `ca_path`. + ''' + + module = AnsibleModule( # noqa: F405 + argument_spec=dict( + client_path=dict(required=True), + ca_data=dict(required=False, default=None), + ca_path=dict(required=False, default=None), + backup=dict(required=False, default=True, type='bool'), + ), + supports_check_mode=True, + mutually_exclusive=[['ca_data', 'ca_path']], + required_one_of=[['ca_data', 'ca_path']] + ) + + client_path = module.params['client_path'] + ca_data = module.params['ca_data'] + ca_path = module.params['ca_path'] + backup = module.params['backup'] + + try: + with open(client_path) as client_config_file: + client_config_data = yaml.safe_load(client_config_file.read()) + + if ca_data is None: + with open(ca_path) as ca_file: + ca_data = base64.standard_b64encode(ca_file.read()) + + changes = [] + # Naively update the CA information for each cluster in the + # kubeconfig. + for cluster in client_config_data['clusters']: + if cluster['cluster']['certificate-authority-data'] != ca_data: + cluster['cluster']['certificate-authority-data'] = ca_data + changes.append(cluster['name']) + + if not module.check_mode: + if len(changes) > 0 and backup: + module.backup_local(client_path) + + with open(client_path, 'w') as client_config_file: + client_config_string = yaml.dump(client_config_data, default_flow_style=False) + client_config_string = client_config_string.replace('\'\'', '""') + client_config_file.write(client_config_string) + + return module.exit_json(changed=(len(changes) > 0)) + + # ignore broad-except error to avoid stack trace to ansible user + # pylint: disable=broad-except + except Exception as error: + return module.fail_json(msg=str(error)) + + +if __name__ == '__main__': + main() diff --git a/roles/lib_utils/library/modify_yaml.py b/roles/lib_utils/library/modify_yaml.py new file mode 100644 index 000000000..9b8f9ba33 --- /dev/null +++ b/roles/lib_utils/library/modify_yaml.py @@ -0,0 +1,117 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +''' modify_yaml ansible module ''' + +import yaml + +# ignore pylint errors related to the module_utils import +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import +from ansible.module_utils.basic import * # noqa: F402,F403 + + +DOCUMENTATION = ''' +--- +module: modify_yaml +short_description: Modify yaml key value pairs +author: Andrew Butcher +requirements: [ ] +''' +EXAMPLES = ''' +- modify_yaml: + dest: /etc/origin/master/master-config.yaml + yaml_key: 'kubernetesMasterConfig.masterCount' + yaml_value: 2 +''' + + +def set_key(yaml_data, yaml_key, yaml_value): + ''' Updates a parsed yaml structure setting a key to a value. + + :param yaml_data: yaml structure to modify. + :type yaml_data: dict + :param yaml_key: Key to modify. + :type yaml_key: mixed + :param yaml_value: Value use for yaml_key. + :type yaml_value: mixed + :returns: Changes to the yaml_data structure + :rtype: dict(tuple()) + ''' + changes = [] + ptr = yaml_data + final_key = yaml_key.split('.')[-1] + for key in yaml_key.split('.'): + # Key isn't present and we're not on the final key. Set to empty dictionary. + if key not in ptr and key != final_key: + ptr[key] = {} + ptr = ptr[key] + # Current key is the final key. Update value. + elif key == final_key: + if (key in ptr and module.safe_eval(ptr[key]) != yaml_value) or (key not in ptr): # noqa: F405 + ptr[key] = yaml_value + changes.append((yaml_key, yaml_value)) + else: + # Next value is None and we're not on the final key. + # Turn value into an empty dictionary. + if ptr[key] is None and key != final_key: + ptr[key] = {} + ptr = ptr[key] + return changes + + +def main(): + ''' Modify key (supplied in jinja2 dot notation) in yaml file, setting + the key to the desired value. + ''' + + # disabling pylint errors for global-variable-undefined and invalid-name + # for 'global module' usage, since it is required to use ansible_facts + # pylint: disable=global-variable-undefined, invalid-name, + # redefined-outer-name + global module + + module = AnsibleModule( # noqa: F405 + argument_spec=dict( + dest=dict(required=True), + yaml_key=dict(required=True), + yaml_value=dict(required=True), + backup=dict(required=False, default=True, type='bool'), + ), + supports_check_mode=True, + ) + + dest = module.params['dest'] + yaml_key = module.params['yaml_key'] + yaml_value = module.safe_eval(module.params['yaml_value']) + backup = module.params['backup'] + + # Represent null values as an empty string. + # pylint: disable=missing-docstring, unused-argument + def none_representer(dumper, data): + return yaml.ScalarNode(tag=u'tag:yaml.org,2002:null', value=u'') + + yaml.add_representer(type(None), none_representer) + + try: + with open(dest) as yaml_file: + yaml_data = yaml.safe_load(yaml_file.read()) + + changes = set_key(yaml_data, yaml_key, yaml_value) + + if len(changes) > 0: + if backup: + module.backup_local(dest) + with open(dest, 'w') as yaml_file: + yaml_string = yaml.dump(yaml_data, default_flow_style=False) + yaml_string = yaml_string.replace('\'\'', '""') + yaml_file.write(yaml_string) + + return module.exit_json(changed=(len(changes) > 0), changes=changes) + + # ignore broad-except error to avoid stack trace to ansible user + # pylint: disable=broad-except + except Exception as error: + return module.fail_json(msg=str(error)) + + +if __name__ == '__main__': + main() diff --git a/roles/lib_utils/library/oo_ec2_group.py b/roles/lib_utils/library/oo_ec2_group.py new file mode 100644 index 000000000..615021ac5 --- /dev/null +++ b/roles/lib_utils/library/oo_ec2_group.py @@ -0,0 +1,903 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# pylint: skip-file +# flake8: noqa + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'core'} + +DOCUMENTATION = ''' +--- +module: ec2_group +author: "Andrew de Quincey (@adq)" +version_added: "1.3" +requirements: [ boto3 ] +short_description: maintain an ec2 VPC security group. +description: + - maintains ec2 security groups. This module has a dependency on python-boto >= 2.5 +options: + name: + description: + - Name of the security group. + - One of and only one of I(name) or I(group_id) is required. + - Required if I(state=present). + required: false + group_id: + description: + - Id of group to delete (works only with absent). + - One of and only one of I(name) or I(group_id) is required. + required: false + version_added: "2.4" + description: + description: + - Description of the security group. Required when C(state) is C(present). + required: false + vpc_id: + description: + - ID of the VPC to create the group in. + required: false + rules: + description: + - List of firewall inbound rules to enforce in this group (see example). If none are supplied, + no inbound rules will be enabled. Rules list may include its own name in `group_name`. + This allows idempotent loopback additions (e.g. allow group to access itself). + Rule sources list support was added in version 2.4. This allows to define multiple sources per + source type as well as multiple source types per rule. Prior to 2.4 an individual source is allowed. + required: false + rules_egress: + description: + - List of firewall outbound rules to enforce in this group (see example). If none are supplied, + a default all-out rule is assumed. If an empty list is supplied, no outbound rules will be enabled. + Rule Egress sources list support was added in version 2.4. + required: false + version_added: "1.6" + state: + version_added: "1.4" + description: + - Create or delete a security group + required: false + default: 'present' + choices: [ "present", "absent" ] + aliases: [] + purge_rules: + version_added: "1.8" + description: + - Purge existing rules on security group that are not found in rules + required: false + default: 'true' + aliases: [] + purge_rules_egress: + version_added: "1.8" + description: + - Purge existing rules_egress on security group that are not found in rules_egress + required: false + default: 'true' + aliases: [] + tags: + version_added: "2.4" + description: + - A dictionary of one or more tags to assign to the security group. + required: false + purge_tags: + version_added: "2.4" + description: + - If yes, existing tags will be purged from the resource to match exactly what is defined by I(tags) parameter. If the I(tags) parameter is not set then + tags will not be modified. + required: false + default: yes + choices: [ 'yes', 'no' ] + +extends_documentation_fragment: + - aws + - ec2 + +notes: + - If a rule declares a group_name and that group doesn't exist, it will be + automatically created. In that case, group_desc should be provided as well. + The module will refuse to create a depended-on group without a description. +''' + +EXAMPLES = ''' +- name: example ec2 group + ec2_group: + name: example + description: an example EC2 group + vpc_id: 12345 + region: eu-west-1 + aws_secret_key: SECRET + aws_access_key: ACCESS + rules: + - proto: tcp + from_port: 80 + to_port: 80 + cidr_ip: 0.0.0.0/0 + - proto: tcp + from_port: 22 + to_port: 22 + cidr_ip: 10.0.0.0/8 + - proto: tcp + from_port: 443 + to_port: 443 + group_id: amazon-elb/sg-87654321/amazon-elb-sg + - proto: tcp + from_port: 3306 + to_port: 3306 + group_id: 123412341234/sg-87654321/exact-name-of-sg + - proto: udp + from_port: 10050 + to_port: 10050 + cidr_ip: 10.0.0.0/8 + - proto: udp + from_port: 10051 + to_port: 10051 + group_id: sg-12345678 + - proto: icmp + from_port: 8 # icmp type, -1 = any type + to_port: -1 # icmp subtype, -1 = any subtype + cidr_ip: 10.0.0.0/8 + - proto: all + # the containing group name may be specified here + group_name: example + rules_egress: + - proto: tcp + from_port: 80 + to_port: 80 + cidr_ip: 0.0.0.0/0 + cidr_ipv6: 64:ff9b::/96 + group_name: example-other + # description to use if example-other needs to be created + group_desc: other example EC2 group + +- name: example2 ec2 group + ec2_group: + name: example2 + description: an example2 EC2 group + vpc_id: 12345 + region: eu-west-1 + rules: + # 'ports' rule keyword was introduced in version 2.4. It accepts a single port value or a list of values including ranges (from_port-to_port). + - proto: tcp + ports: 22 + group_name: example-vpn + - proto: tcp + ports: + - 80 + - 443 + - 8080-8099 + cidr_ip: 0.0.0.0/0 + # Rule sources list support was added in version 2.4. This allows to define multiple sources per source type as well as multiple source types per rule. + - proto: tcp + ports: + - 6379 + - 26379 + group_name: + - example-vpn + - example-redis + - proto: tcp + ports: 5665 + group_name: example-vpn + cidr_ip: + - 172.16.1.0/24 + - 172.16.17.0/24 + cidr_ipv6: + - 2607:F8B0::/32 + - 64:ff9b::/96 + group_id: + - sg-edcd9784 + +- name: "Delete group by its id" + ec2_group: + group_id: sg-33b4ee5b + state: absent +''' + +RETURN = ''' +group_name: + description: Security group name + sample: My Security Group + type: string + returned: on create/update +group_id: + description: Security group id + sample: sg-abcd1234 + type: string + returned: on create/update +description: + description: Description of security group + sample: My Security Group + type: string + returned: on create/update +tags: + description: Tags associated with the security group + sample: + Name: My Security Group + Purpose: protecting stuff + type: dict + returned: on create/update +vpc_id: + description: ID of VPC to which the security group belongs + sample: vpc-abcd1234 + type: string + returned: on create/update +ip_permissions: + description: Inbound rules associated with the security group. + sample: + - from_port: 8182 + ip_protocol: tcp + ip_ranges: + - cidr_ip: "1.1.1.1/32" + ipv6_ranges: [] + prefix_list_ids: [] + to_port: 8182 + user_id_group_pairs: [] + type: list + returned: on create/update +ip_permissions_egress: + description: Outbound rules associated with the security group. + sample: + - ip_protocol: -1 + ip_ranges: + - cidr_ip: "0.0.0.0/0" + ipv6_ranges: [] + prefix_list_ids: [] + user_id_group_pairs: [] + type: list + returned: on create/update +owner_id: + description: AWS Account ID of the security group + sample: 123456789012 + type: int + returned: on create/update +''' + +import json +import re +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ec2 import boto3_conn +from ansible.module_utils.ec2 import get_aws_connection_info +from ansible.module_utils.ec2 import ec2_argument_spec +from ansible.module_utils.ec2 import camel_dict_to_snake_dict +from ansible.module_utils.ec2 import HAS_BOTO3 +from ansible.module_utils.ec2 import boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list, compare_aws_tags +from ansible.module_utils.ec2 import AWSRetry +import traceback + +try: + import botocore +except ImportError: + pass # caught by imported HAS_BOTO3 + + +@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) +def get_security_groups_with_backoff(connection, **kwargs): + return connection.describe_security_groups(**kwargs) + + +def deduplicate_rules_args(rules): + """Returns unique rules""" + if rules is None: + return None + return list(dict(zip((json.dumps(r, sort_keys=True) for r in rules), rules)).values()) + + +def make_rule_key(prefix, rule, group_id, cidr_ip): + if 'proto' in rule: + proto, from_port, to_port = [rule.get(x, None) for x in ('proto', 'from_port', 'to_port')] + elif 'IpProtocol' in rule: + proto, from_port, to_port = [rule.get(x, None) for x in ('IpProtocol', 'FromPort', 'ToPort')] + if proto not in ['icmp', 'tcp', 'udp'] and from_port == -1 and to_port == -1: + from_port = 'none' + to_port = 'none' + key = "%s-%s-%s-%s-%s-%s" % (prefix, proto, from_port, to_port, group_id, cidr_ip) + return key.lower().replace('-none', '-None') + + +def add_rules_to_lookup(ipPermissions, group_id, prefix, dict): + for rule in ipPermissions: + for groupGrant in rule.get('UserIdGroupPairs', []): + dict[make_rule_key(prefix, rule, group_id, groupGrant.get('GroupId'))] = (rule, groupGrant) + for ipv4Grants in rule.get('IpRanges', []): + dict[make_rule_key(prefix, rule, group_id, ipv4Grants.get('CidrIp'))] = (rule, ipv4Grants) + for ipv6Grants in rule.get('Ipv6Ranges', []): + dict[make_rule_key(prefix, rule, group_id, ipv6Grants.get('CidrIpv6'))] = (rule, ipv6Grants) + + +def validate_rule(module, rule): + VALID_PARAMS = ('cidr_ip', 'cidr_ipv6', + 'group_id', 'group_name', 'group_desc', + 'proto', 'from_port', 'to_port') + if not isinstance(rule, dict): + module.fail_json(msg='Invalid rule parameter type [%s].' % type(rule)) + for k in rule: + if k not in VALID_PARAMS: + module.fail_json(msg='Invalid rule parameter \'{}\''.format(k)) + + if 'group_id' in rule and 'cidr_ip' in rule: + module.fail_json(msg='Specify group_id OR cidr_ip, not both') + elif 'group_name' in rule and 'cidr_ip' in rule: + module.fail_json(msg='Specify group_name OR cidr_ip, not both') + elif 'group_id' in rule and 'cidr_ipv6' in rule: + module.fail_json(msg="Specify group_id OR cidr_ipv6, not both") + elif 'group_name' in rule and 'cidr_ipv6' in rule: + module.fail_json(msg="Specify group_name OR cidr_ipv6, not both") + elif 'cidr_ip' in rule and 'cidr_ipv6' in rule: + module.fail_json(msg="Specify cidr_ip OR cidr_ipv6, not both") + elif 'group_id' in rule and 'group_name' in rule: + module.fail_json(msg='Specify group_id OR group_name, not both') + + +def get_target_from_rule(module, client, rule, name, group, groups, vpc_id): + """ + Returns tuple of (group_id, ip) after validating rule params. + + rule: Dict describing a rule. + name: Name of the security group being managed. + groups: Dict of all available security groups. + + AWS accepts an ip range or a security group as target of a rule. This + function validate the rule specification and return either a non-None + group_id or a non-None ip range. + """ + + FOREIGN_SECURITY_GROUP_REGEX = '^(\S+)/(sg-\S+)/(\S+)' + group_id = None + group_name = None + ip = None + ipv6 = None + target_group_created = False + + if 'group_id' in rule and 'cidr_ip' in rule: + module.fail_json(msg="Specify group_id OR cidr_ip, not both") + elif 'group_name' in rule and 'cidr_ip' in rule: + module.fail_json(msg="Specify group_name OR cidr_ip, not both") + elif 'group_id' in rule and 'cidr_ipv6' in rule: + module.fail_json(msg="Specify group_id OR cidr_ipv6, not both") + elif 'group_name' in rule and 'cidr_ipv6' in rule: + module.fail_json(msg="Specify group_name OR cidr_ipv6, not both") + elif 'group_id' in rule and 'group_name' in rule: + module.fail_json(msg="Specify group_id OR group_name, not both") + elif 'cidr_ip' in rule and 'cidr_ipv6' in rule: + module.fail_json(msg="Specify cidr_ip OR cidr_ipv6, not both") + elif rule.get('group_id') and re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']): + # this is a foreign Security Group. Since you can't fetch it you must create an instance of it + owner_id, group_id, group_name = re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']).groups() + group_instance = dict(GroupId=group_id, GroupName=group_name) + groups[group_id] = group_instance + groups[group_name] = group_instance + elif 'group_id' in rule: + group_id = rule['group_id'] + elif 'group_name' in rule: + group_name = rule['group_name'] + if group_name == name: + group_id = group['GroupId'] + groups[group_id] = group + groups[group_name] = group + elif group_name in groups and group.get('VpcId') and groups[group_name].get('VpcId'): + # both are VPC groups, this is ok + group_id = groups[group_name]['GroupId'] + elif group_name in groups and not (group.get('VpcId') or groups[group_name].get('VpcId')): + # both are EC2 classic, this is ok + group_id = groups[group_name]['GroupId'] + else: + # if we got here, either the target group does not exist, or there + # is a mix of EC2 classic + VPC groups. Mixing of EC2 classic + VPC + # is bad, so we have to create a new SG because no compatible group + # exists + if not rule.get('group_desc', '').strip(): + module.fail_json(msg="group %s will be automatically created by rule %s and " + "no description was provided" % (group_name, rule)) + if not module.check_mode: + params = dict(GroupName=group_name, Description=rule['group_desc']) + if vpc_id: + params['VpcId'] = vpc_id + auto_group = client.create_security_group(**params) + group_id = auto_group['GroupId'] + groups[group_id] = auto_group + groups[group_name] = auto_group + target_group_created = True + elif 'cidr_ip' in rule: + ip = rule['cidr_ip'] + elif 'cidr_ipv6' in rule: + ipv6 = rule['cidr_ipv6'] + + return group_id, ip, ipv6, target_group_created + + +def ports_expand(ports): + # takes a list of ports and returns a list of (port_from, port_to) + ports_expanded = [] + for port in ports: + if not isinstance(port, str): + ports_expanded.append((port,) * 2) + elif '-' in port: + ports_expanded.append(tuple(p.strip() for p in port.split('-', 1))) + else: + ports_expanded.append((port.strip(),) * 2) + + return ports_expanded + + +def rule_expand_ports(rule): + # takes a rule dict and returns a list of expanded rule dicts + if 'ports' not in rule: + return [rule] + + ports = rule['ports'] if isinstance(rule['ports'], list) else [rule['ports']] + + rule_expanded = [] + for from_to in ports_expand(ports): + temp_rule = rule.copy() + del temp_rule['ports'] + temp_rule['from_port'], temp_rule['to_port'] = from_to + rule_expanded.append(temp_rule) + + return rule_expanded + + +def rules_expand_ports(rules): + # takes a list of rules and expands it based on 'ports' + if not rules: + return rules + + return [rule for rule_complex in rules + for rule in rule_expand_ports(rule_complex)] + + +def rule_expand_source(rule, source_type): + # takes a rule dict and returns a list of expanded rule dicts for specified source_type + sources = rule[source_type] if isinstance(rule[source_type], list) else [rule[source_type]] + source_types_all = ('cidr_ip', 'cidr_ipv6', 'group_id', 'group_name') + + rule_expanded = [] + for source in sources: + temp_rule = rule.copy() + for s in source_types_all: + temp_rule.pop(s, None) + temp_rule[source_type] = source + rule_expanded.append(temp_rule) + + return rule_expanded + + +def rule_expand_sources(rule): + # takes a rule dict and returns a list of expanded rule discts + source_types = (stype for stype in ('cidr_ip', 'cidr_ipv6', 'group_id', 'group_name') if stype in rule) + + return [r for stype in source_types + for r in rule_expand_source(rule, stype)] + + +def rules_expand_sources(rules): + # takes a list of rules and expands it based on 'cidr_ip', 'group_id', 'group_name' + if not rules: + return rules + + return [rule for rule_complex in rules + for rule in rule_expand_sources(rule_complex)] + + +def authorize_ip(type, changed, client, group, groupRules, + ip, ip_permission, module, rule, ethertype): + # If rule already exists, don't later delete it + for thisip in ip: + rule_id = make_rule_key(type, rule, group['GroupId'], thisip) + if rule_id in groupRules: + del groupRules[rule_id] + else: + if not module.check_mode: + ip_permission = serialize_ip_grant(rule, thisip, ethertype) + if ip_permission: + try: + if type == "in": + client.authorize_security_group_ingress(GroupId=group['GroupId'], + IpPermissions=[ip_permission]) + elif type == "out": + client.authorize_security_group_egress(GroupId=group['GroupId'], + IpPermissions=[ip_permission]) + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Unable to authorize %s for ip %s security group '%s' - %s" % + (type, thisip, group['GroupName'], e), + exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + changed = True + return changed, ip_permission + + +def serialize_group_grant(group_id, rule): + permission = {'IpProtocol': rule['proto'], + 'FromPort': rule['from_port'], + 'ToPort': rule['to_port'], + 'UserIdGroupPairs': [{'GroupId': group_id}]} + + return fix_port_and_protocol(permission) + + +def serialize_revoke(grant, rule): + permission = dict() + fromPort = rule['FromPort'] if 'FromPort' in rule else None + toPort = rule['ToPort'] if 'ToPort' in rule else None + if 'GroupId' in grant: + permission = {'IpProtocol': rule['IpProtocol'], + 'FromPort': fromPort, + 'ToPort': toPort, + 'UserIdGroupPairs': [{'GroupId': grant['GroupId']}] + } + elif 'CidrIp' in grant: + permission = {'IpProtocol': rule['IpProtocol'], + 'FromPort': fromPort, + 'ToPort': toPort, + 'IpRanges': [grant] + } + elif 'CidrIpv6' in grant: + permission = {'IpProtocol': rule['IpProtocol'], + 'FromPort': fromPort, + 'ToPort': toPort, + 'Ipv6Ranges': [grant] + } + return fix_port_and_protocol(permission) + + +def serialize_ip_grant(rule, thisip, ethertype): + permission = {'IpProtocol': rule['proto'], + 'FromPort': rule['from_port'], + 'ToPort': rule['to_port']} + if ethertype == "ipv4": + permission['IpRanges'] = [{'CidrIp': thisip}] + elif ethertype == "ipv6": + permission['Ipv6Ranges'] = [{'CidrIpv6': thisip}] + + return fix_port_and_protocol(permission) + + +def fix_port_and_protocol(permission): + for key in ['FromPort', 'ToPort']: + if key in permission: + if permission[key] is None: + del permission[key] + else: + permission[key] = int(permission[key]) + + permission['IpProtocol'] = str(permission['IpProtocol']) + + return permission + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + name=dict(), + group_id=dict(), + description=dict(), + vpc_id=dict(), + rules=dict(type='list'), + rules_egress=dict(type='list'), + state=dict(default='present', type='str', choices=['present', 'absent']), + purge_rules=dict(default=True, required=False, type='bool'), + purge_rules_egress=dict(default=True, required=False, type='bool'), + tags=dict(required=False, type='dict', aliases=['resource_tags']), + purge_tags=dict(default=True, required=False, type='bool') + ) + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=[['name', 'group_id']], + required_if=[['state', 'present', ['name']]], + ) + + if not HAS_BOTO3: + module.fail_json(msg='boto3 required for this module') + + name = module.params['name'] + group_id = module.params['group_id'] + description = module.params['description'] + vpc_id = module.params['vpc_id'] + rules = deduplicate_rules_args(rules_expand_sources(rules_expand_ports(module.params['rules']))) + rules_egress = deduplicate_rules_args(rules_expand_sources(rules_expand_ports(module.params['rules_egress']))) + state = module.params.get('state') + purge_rules = module.params['purge_rules'] + purge_rules_egress = module.params['purge_rules_egress'] + tags = module.params['tags'] + purge_tags = module.params['purge_tags'] + + if state == 'present' and not description: + module.fail_json(msg='Must provide description when state is present.') + + changed = False + region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) + if not region: + module.fail_json(msg="The AWS region must be specified as an " + "environment variable or in the AWS credentials " + "profile.") + client = boto3_conn(module, conn_type='client', resource='ec2', endpoint=ec2_url, region=region, **aws_connect_params) + group = None + groups = dict() + security_groups = [] + # do get all security groups + # find if the group is present + try: + response = get_security_groups_with_backoff(client) + security_groups = response.get('SecurityGroups', []) + except botocore.exceptions.NoCredentialsError as e: + module.fail_json(msg="Error in describe_security_groups: %s" % "Unable to locate credentials", exception=traceback.format_exc()) + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Error in describe_security_groups: %s" % e, exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + + for sg in security_groups: + groups[sg['GroupId']] = sg + groupName = sg['GroupName'] + if groupName in groups: + # Prioritise groups from the current VPC + # even if current VPC is EC2-Classic + if groups[groupName].get('VpcId') == vpc_id: + # Group saved already matches current VPC, change nothing + pass + elif vpc_id is None and groups[groupName].get('VpcId') is None: + # We're in EC2 classic, and the group already saved is as well + # No VPC groups can be used alongside EC2 classic groups + pass + else: + # the current SG stored has no direct match, so we can replace it + groups[groupName] = sg + else: + groups[groupName] = sg + + if group_id and sg['GroupId'] == group_id: + group = sg + elif groupName == name and (vpc_id is None or sg.get('VpcId') == vpc_id): + group = sg + + # Ensure requested group is absent + if state == 'absent': + if group: + # found a match, delete it + try: + if not module.check_mode: + client.delete_security_group(GroupId=group['GroupId']) + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Unable to delete security group '%s' - %s" % (group, e), + exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + else: + group = None + changed = True + else: + # no match found, no changes required + pass + + # Ensure requested group is present + elif state == 'present': + if group: + # existing group + if group['Description'] != description: + module.warn("Group description does not match existing group. Descriptions cannot be changed without deleting " + "and re-creating the security group. Try using state=absent to delete, then rerunning this task.") + + # if the group doesn't exist, create it now + else: + # no match found, create it + if not module.check_mode: + params = dict(GroupName=name, Description=description) + if vpc_id: + params['VpcId'] = vpc_id + group = client.create_security_group(**params) + # When a group is created, an egress_rule ALLOW ALL + # to 0.0.0.0/0 is added automatically but it's not + # reflected in the object returned by the AWS API + # call. We re-read the group for getting an updated object + # amazon sometimes takes a couple seconds to update the security group so wait till it exists + while True: + group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0] + if group.get('VpcId') and not group.get('IpPermissionsEgress'): + pass + else: + break + + changed = True + + if tags is not None: + current_tags = boto3_tag_list_to_ansible_dict(group.get('Tags', [])) + tags_need_modify, tags_to_delete = compare_aws_tags(current_tags, tags, purge_tags) + if tags_to_delete: + try: + client.delete_tags(Resources=[group['GroupId']], Tags=[{'Key': tag} for tag in tags_to_delete]) + except botocore.exceptions.ClientError as e: + module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + changed = True + + # Add/update tags + if tags_need_modify: + try: + client.create_tags(Resources=[group['GroupId']], Tags=ansible_dict_to_boto3_tag_list(tags_need_modify)) + except botocore.exceptions.ClientError as e: + module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + changed = True + + else: + module.fail_json(msg="Unsupported state requested: %s" % state) + + # create a lookup for all existing rules on the group + ip_permission = [] + if group: + # Manage ingress rules + groupRules = {} + add_rules_to_lookup(group['IpPermissions'], group['GroupId'], 'in', groupRules) + # Now, go through all provided rules and ensure they are there. + if rules is not None: + for rule in rules: + validate_rule(module, rule) + group_id, ip, ipv6, target_group_created = get_target_from_rule(module, client, rule, name, + group, groups, vpc_id) + if target_group_created: + changed = True + + if rule['proto'] in ('all', '-1', -1): + rule['proto'] = -1 + rule['from_port'] = None + rule['to_port'] = None + + if group_id: + rule_id = make_rule_key('in', rule, group['GroupId'], group_id) + if rule_id in groupRules: + del groupRules[rule_id] + else: + if not module.check_mode: + ip_permission = serialize_group_grant(group_id, rule) + if ip_permission: + ips = ip_permission + if vpc_id: + [useridpair.update({'VpcId': vpc_id}) for useridpair in + ip_permission.get('UserIdGroupPairs', [])] + try: + client.authorize_security_group_ingress(GroupId=group['GroupId'], IpPermissions=[ips]) + except botocore.exceptions.ClientError as e: + module.fail_json( + msg="Unable to authorize ingress for group %s security group '%s' - %s" % + (group_id, group['GroupName'], e), + exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + changed = True + elif ip: + # Convert ip to list we can iterate over + if ip and not isinstance(ip, list): + ip = [ip] + + changed, ip_permission = authorize_ip("in", changed, client, group, groupRules, ip, ip_permission, + module, rule, "ipv4") + elif ipv6: + # Convert ip to list we can iterate over + if not isinstance(ipv6, list): + ipv6 = [ipv6] + # If rule already exists, don't later delete it + changed, ip_permission = authorize_ip("in", changed, client, group, groupRules, ipv6, ip_permission, + module, rule, "ipv6") + # Finally, remove anything left in the groupRules -- these will be defunct rules + if purge_rules: + for (rule, grant) in groupRules.values(): + ip_permission = serialize_revoke(grant, rule) + if not module.check_mode: + try: + client.revoke_security_group_ingress(GroupId=group['GroupId'], IpPermissions=[ip_permission]) + except botocore.exceptions.ClientError as e: + module.fail_json( + msg="Unable to revoke ingress for security group '%s' - %s" % + (group['GroupName'], e), + exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + changed = True + + # Manage egress rules + groupRules = {} + add_rules_to_lookup(group['IpPermissionsEgress'], group['GroupId'], 'out', groupRules) + # Now, go through all provided rules and ensure they are there. + if rules_egress is not None: + for rule in rules_egress: + validate_rule(module, rule) + group_id, ip, ipv6, target_group_created = get_target_from_rule(module, client, rule, name, + group, groups, vpc_id) + if target_group_created: + changed = True + + if rule['proto'] in ('all', '-1', -1): + rule['proto'] = -1 + rule['from_port'] = None + rule['to_port'] = None + + if group_id: + rule_id = make_rule_key('out', rule, group['GroupId'], group_id) + if rule_id in groupRules: + del groupRules[rule_id] + else: + if not module.check_mode: + ip_permission = serialize_group_grant(group_id, rule) + if ip_permission: + ips = ip_permission + if vpc_id: + [useridpair.update({'VpcId': vpc_id}) for useridpair in + ip_permission.get('UserIdGroupPairs', [])] + try: + client.authorize_security_group_egress(GroupId=group['GroupId'], IpPermissions=[ips]) + except botocore.exceptions.ClientError as e: + module.fail_json( + msg="Unable to authorize egress for group %s security group '%s' - %s" % + (group_id, group['GroupName'], e), + exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + changed = True + elif ip: + # Convert ip to list we can iterate over + if not isinstance(ip, list): + ip = [ip] + changed, ip_permission = authorize_ip("out", changed, client, group, groupRules, ip, + ip_permission, module, rule, "ipv4") + elif ipv6: + # Convert ip to list we can iterate over + if not isinstance(ipv6, list): + ipv6 = [ipv6] + # If rule already exists, don't later delete it + changed, ip_permission = authorize_ip("out", changed, client, group, groupRules, ipv6, + ip_permission, module, rule, "ipv6") + elif vpc_id is not None: + # when no egress rules are specified and we're in a VPC, + # we add in a default allow all out rule, which was the + # default behavior before egress rules were added + default_egress_rule = 'out--1-None-None-' + group['GroupId'] + '-0.0.0.0/0' + if default_egress_rule not in groupRules: + if not module.check_mode: + ip_permission = [{'IpProtocol': '-1', + 'IpRanges': [{'CidrIp': '0.0.0.0/0'}] + } + ] + try: + client.authorize_security_group_egress(GroupId=group['GroupId'], IpPermissions=ip_permission) + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Unable to authorize egress for ip %s security group '%s' - %s" % + ('0.0.0.0/0', + group['GroupName'], + e), + exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + changed = True + else: + # make sure the default egress rule is not removed + del groupRules[default_egress_rule] + + # Finally, remove anything left in the groupRules -- these will be defunct rules + if purge_rules_egress and vpc_id is not None: + for (rule, grant) in groupRules.values(): + # we shouldn't be revoking 0.0.0.0 egress + if grant != '0.0.0.0/0': + ip_permission = serialize_revoke(grant, rule) + if not module.check_mode: + try: + client.revoke_security_group_egress(GroupId=group['GroupId'], IpPermissions=[ip_permission]) + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Unable to revoke egress for ip %s security group '%s' - %s" % + (grant, group['GroupName'], e), + exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + changed = True + + if group: + security_group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0] + security_group = camel_dict_to_snake_dict(security_group) + security_group['tags'] = boto3_tag_list_to_ansible_dict(security_group.get('tags', []), + tag_name_key_name='key', tag_value_key_name='value') + module.exit_json(changed=changed, **security_group) + else: + module.exit_json(changed=changed, group_id=None) + + +if __name__ == '__main__': + main() diff --git a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py b/roles/lib_utils/library/openshift_cert_expiry.py index e355266b0..e355266b0 100644 --- a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py +++ b/roles/lib_utils/library/openshift_cert_expiry.py diff --git a/roles/openshift_cli/library/openshift_container_binary_sync.py b/roles/lib_utils/library/openshift_container_binary_sync.py index 440b8ec28..440b8ec28 100644 --- a/roles/openshift_cli/library/openshift_container_binary_sync.py +++ b/roles/lib_utils/library/openshift_container_binary_sync.py diff --git a/roles/lib_os_firewall/library/os_firewall_manage_iptables.py b/roles/lib_utils/library/os_firewall_manage_iptables.py index aeee3ede8..aeee3ede8 100755..100644 --- a/roles/lib_os_firewall/library/os_firewall_manage_iptables.py +++ b/roles/lib_utils/library/os_firewall_manage_iptables.py diff --git a/roles/lib_utils/library/rpm_q.py b/roles/lib_utils/library/rpm_q.py new file mode 100644 index 000000000..3dec50fc2 --- /dev/null +++ b/roles/lib_utils/library/rpm_q.py @@ -0,0 +1,72 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Tobias Florek <tob@butter.sh> +# Licensed under the terms of the MIT License +""" +An ansible module to query the RPM database. For use, when yum/dnf are not +available. +""" + +# pylint: disable=redefined-builtin,wildcard-import,unused-wildcard-import +from ansible.module_utils.basic import * # noqa: F403 + +DOCUMENTATION = """ +--- +module: rpm_q +short_description: Query the RPM database +author: Tobias Florek +options: + name: + description: + - The name of the package to query + required: true + state: + description: + - Whether the package is supposed to be installed or not + choices: [present, absent] + default: present +""" + +EXAMPLES = """ +- rpm_q: name=ansible state=present +- rpm_q: name=ansible state=absent +""" + +RPM_BINARY = '/bin/rpm' + + +def main(): + """ + Checks rpm -q for the named package and returns the installed packages + or None if not installed. + """ + module = AnsibleModule( # noqa: F405 + argument_spec=dict( + name=dict(required=True), + state=dict(default='present', choices=['present', 'absent']) + ), + supports_check_mode=True + ) + + name = module.params['name'] + state = module.params['state'] + + # pylint: disable=invalid-name + rc, out, err = module.run_command([RPM_BINARY, '-q', name]) + + installed = out.rstrip('\n').split('\n') + + if rc != 0: + if state == 'present': + module.fail_json(msg="%s is not installed" % name, stdout=out, stderr=err, rc=rc) + else: + module.exit_json(changed=False) + elif state == 'present': + module.exit_json(changed=False, installed_versions=installed) + else: + module.fail_json(msg="%s is installed", installed_versions=installed) + + +if __name__ == '__main__': + main() diff --git a/roles/lib_utils/library/swapoff.py b/roles/lib_utils/library/swapoff.py new file mode 100644 index 000000000..925eeb17d --- /dev/null +++ b/roles/lib_utils/library/swapoff.py @@ -0,0 +1,137 @@ +#!/usr/bin/env python +# pylint: disable=missing-docstring +# +# Copyright 2017 Red Hat, Inc. and/or its affiliates +# and other contributors as indicated by the @author tags. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import subprocess + +from ansible.module_utils.basic import AnsibleModule + + +DOCUMENTATION = ''' +--- +module: swapoff + +short_description: Disable swap and comment from /etc/fstab + +version_added: "2.4" + +description: + - This module disables swap and comments entries from /etc/fstab + +author: + - "Michael Gugino <mgugino@redhat.com>" +''' + +EXAMPLES = ''' +# Pass in a message +- name: Disable Swap + swapoff: {} +''' + + +def check_swap_in_fstab(module): + '''Check for uncommented swap entries in fstab''' + res = subprocess.call(['grep', '^[^#].*swap', '/etc/fstab']) + + if res == 2: + # rc 2 == cannot open file. + result = {'failed': True, + 'changed': False, + 'msg': 'unable to read /etc/fstab', + 'state': 'unknown'} + module.fail_json(**result) + elif res == 1: + # No grep match, fstab looks good. + return False + elif res == 0: + # There is an uncommented entry for fstab. + return True + else: + # Some other grep error code, we shouldn't get here. + result = {'failed': True, + 'changed': False, + 'msg': 'unknow problem with grep "^[^#].*swap" /etc/fstab ', + 'state': 'unknown'} + module.fail_json(**result) + + +def check_swapon_status(module): + '''Check if swap is actually in use.''' + try: + res = subprocess.check_output(['swapon', '--show']) + except subprocess.CalledProcessError: + # Some other grep error code, we shouldn't get here. + result = {'failed': True, + 'changed': False, + 'msg': 'unable to execute swapon --show', + 'state': 'unknown'} + module.fail_json(**result) + return 'NAME' in str(res) + + +def comment_swap_fstab(module): + '''Comment out swap lines in /etc/fstab''' + res = subprocess.call(['sed', '-i.bak', 's/^[^#].*swap.*/#&/', '/etc/fstab']) + if res: + result = {'failed': True, + 'changed': False, + 'msg': 'sed failed to comment swap in /etc/fstab', + 'state': 'unknown'} + module.fail_json(**result) + + +def run_swapoff(module, changed): + '''Run swapoff command''' + res = subprocess.call(['swapoff', '--all']) + if res: + result = {'failed': True, + 'changed': changed, + 'msg': 'swapoff --all returned {}'.format(str(res)), + 'state': 'unknown'} + module.fail_json(**result) + + +def run_module(): + '''Run this module''' + module = AnsibleModule( + supports_check_mode=False, + argument_spec={} + ) + changed = False + + swap_fstab_res = check_swap_in_fstab(module) + swap_is_inuse_res = check_swapon_status(module) + + if swap_fstab_res: + comment_swap_fstab(module) + changed = True + + if swap_is_inuse_res: + run_swapoff(module, changed) + changed = True + + result = {'changed': changed} + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == '__main__': + main() diff --git a/roles/lib_utils/lookup_plugins/openshift_master_facts_default_predicates.py b/roles/lib_utils/lookup_plugins/openshift_master_facts_default_predicates.py new file mode 100644 index 000000000..4858c5ec6 --- /dev/null +++ b/roles/lib_utils/lookup_plugins/openshift_master_facts_default_predicates.py @@ -0,0 +1,143 @@ +# pylint: disable=missing-docstring + +import re +from ansible.errors import AnsibleError +from ansible.plugins.lookup import LookupBase + + +class LookupModule(LookupBase): + # pylint: disable=too-many-branches,too-many-statements,too-many-arguments + + def run(self, terms, variables=None, regions_enabled=True, short_version=None, + deployment_type=None, **kwargs): + + predicates = [] + + if short_version is None or deployment_type is None: + if 'openshift' not in variables: + raise AnsibleError("This lookup module requires openshift_facts to be run prior to use") + + if deployment_type is None: + if 'common' not in variables['openshift'] or 'deployment_type' not in variables['openshift']['common']: + raise AnsibleError("This lookup module requires that the deployment_type be set") + + deployment_type = variables['openshift']['common']['deployment_type'] + + if short_version is None: + if 'short_version' in variables['openshift']['common']: + short_version = variables['openshift']['common']['short_version'] + elif 'openshift_release' in variables: + release = variables['openshift_release'] + if release.startswith('v'): + short_version = release[1:] + else: + short_version = release + short_version = '.'.join(short_version.split('.')[0:2]) + elif 'openshift_version' in variables: + version = variables['openshift_version'] + short_version = '.'.join(version.split('.')[0:2]) + else: + # pylint: disable=line-too-long + raise AnsibleError("Either OpenShift needs to be installed or openshift_release needs to be specified") + if deployment_type == 'origin': + if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '3.6', '3.7', '3.8', '3.9', 'latest']: + raise AnsibleError("Unknown short_version %s" % short_version) + elif deployment_type == 'openshift-enterprise': + if short_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6', '3.7', '3.8', '3.9', 'latest']: + raise AnsibleError("Unknown short_version %s" % short_version) + else: + raise AnsibleError("Unknown deployment_type %s" % deployment_type) + + if deployment_type == 'origin': + # convert short_version to enterprise short_version + short_version = re.sub('^1.', '3.', short_version) + + if short_version == 'latest': + short_version = '3.9' + + # Predicates ordered according to OpenShift Origin source: + # origin/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go + + if short_version == '3.1': + predicates.extend([ + {'name': 'PodFitsHostPorts'}, + {'name': 'PodFitsResources'}, + {'name': 'NoDiskConflict'}, + {'name': 'MatchNodeSelector'}, + ]) + + if short_version == '3.2': + predicates.extend([ + {'name': 'PodFitsHostPorts'}, + {'name': 'PodFitsResources'}, + {'name': 'NoDiskConflict'}, + {'name': 'NoVolumeZoneConflict'}, + {'name': 'MatchNodeSelector'}, + {'name': 'MaxEBSVolumeCount'}, + {'name': 'MaxGCEPDVolumeCount'} + ]) + + if short_version == '3.3': + predicates.extend([ + {'name': 'NoDiskConflict'}, + {'name': 'NoVolumeZoneConflict'}, + {'name': 'MaxEBSVolumeCount'}, + {'name': 'MaxGCEPDVolumeCount'}, + {'name': 'GeneralPredicates'}, + {'name': 'PodToleratesNodeTaints'}, + {'name': 'CheckNodeMemoryPressure'} + ]) + + if short_version == '3.4': + predicates.extend([ + {'name': 'NoDiskConflict'}, + {'name': 'NoVolumeZoneConflict'}, + {'name': 'MaxEBSVolumeCount'}, + {'name': 'MaxGCEPDVolumeCount'}, + {'name': 'GeneralPredicates'}, + {'name': 'PodToleratesNodeTaints'}, + {'name': 'CheckNodeMemoryPressure'}, + {'name': 'CheckNodeDiskPressure'}, + {'name': 'MatchInterPodAffinity'} + ]) + + if short_version in ['3.5', '3.6']: + predicates.extend([ + {'name': 'NoVolumeZoneConflict'}, + {'name': 'MaxEBSVolumeCount'}, + {'name': 'MaxGCEPDVolumeCount'}, + {'name': 'MatchInterPodAffinity'}, + {'name': 'NoDiskConflict'}, + {'name': 'GeneralPredicates'}, + {'name': 'PodToleratesNodeTaints'}, + {'name': 'CheckNodeMemoryPressure'}, + {'name': 'CheckNodeDiskPressure'}, + ]) + + if short_version in ['3.7', '3.8', '3.9']: + predicates.extend([ + {'name': 'NoVolumeZoneConflict'}, + {'name': 'MaxEBSVolumeCount'}, + {'name': 'MaxGCEPDVolumeCount'}, + {'name': 'MaxAzureDiskVolumeCount'}, + {'name': 'MatchInterPodAffinity'}, + {'name': 'NoDiskConflict'}, + {'name': 'GeneralPredicates'}, + {'name': 'PodToleratesNodeTaints'}, + {'name': 'CheckNodeMemoryPressure'}, + {'name': 'CheckNodeDiskPressure'}, + {'name': 'NoVolumeNodeConflict'}, + ]) + + if regions_enabled: + region_predicate = { + 'name': 'Region', + 'argument': { + 'serviceAffinity': { + 'labels': ['region'] + } + } + } + predicates.append(region_predicate) + + return predicates diff --git a/roles/lib_utils/lookup_plugins/openshift_master_facts_default_priorities.py b/roles/lib_utils/lookup_plugins/openshift_master_facts_default_priorities.py new file mode 100644 index 000000000..18e1b2e0c --- /dev/null +++ b/roles/lib_utils/lookup_plugins/openshift_master_facts_default_priorities.py @@ -0,0 +1,117 @@ +# pylint: disable=missing-docstring + +import re +from ansible.errors import AnsibleError +from ansible.plugins.lookup import LookupBase + + +class LookupModule(LookupBase): + # pylint: disable=too-many-branches,too-many-statements,too-many-arguments + + def run(self, terms, variables=None, zones_enabled=True, short_version=None, + deployment_type=None, **kwargs): + + priorities = [] + + if short_version is None or deployment_type is None: + if 'openshift' not in variables: + raise AnsibleError("This lookup module requires openshift_facts to be run prior to use") + + if deployment_type is None: + if 'common' not in variables['openshift'] or 'deployment_type' not in variables['openshift']['common']: + raise AnsibleError("This lookup module requires that the deployment_type be set") + + deployment_type = variables['openshift']['common']['deployment_type'] + + if short_version is None: + if 'short_version' in variables['openshift']['common']: + short_version = variables['openshift']['common']['short_version'] + elif 'openshift_release' in variables: + release = variables['openshift_release'] + if release.startswith('v'): + short_version = release[1:] + else: + short_version = release + short_version = '.'.join(short_version.split('.')[0:2]) + elif 'openshift_version' in variables: + version = variables['openshift_version'] + short_version = '.'.join(version.split('.')[0:2]) + else: + # pylint: disable=line-too-long + raise AnsibleError("Either OpenShift needs to be installed or openshift_release needs to be specified") + + if deployment_type == 'origin': + if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '3.6', '3.7', '3.8', '3.9', 'latest']: + raise AnsibleError("Unknown short_version %s" % short_version) + elif deployment_type == 'openshift-enterprise': + if short_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6', '3.7', '3.8', '3.9', 'latest']: + raise AnsibleError("Unknown short_version %s" % short_version) + else: + raise AnsibleError("Unknown deployment_type %s" % deployment_type) + + if deployment_type == 'origin': + # convert short_version to origin short_version + short_version = re.sub('^1.', '3.', short_version) + + if short_version == 'latest': + short_version = '3.9' + + if short_version == '3.1': + priorities.extend([ + {'name': 'LeastRequestedPriority', 'weight': 1}, + {'name': 'BalancedResourceAllocation', 'weight': 1}, + {'name': 'SelectorSpreadPriority', 'weight': 1} + ]) + + if short_version == '3.2': + priorities.extend([ + {'name': 'LeastRequestedPriority', 'weight': 1}, + {'name': 'BalancedResourceAllocation', 'weight': 1}, + {'name': 'SelectorSpreadPriority', 'weight': 1}, + {'name': 'NodeAffinityPriority', 'weight': 1} + ]) + + if short_version == '3.3': + priorities.extend([ + {'name': 'LeastRequestedPriority', 'weight': 1}, + {'name': 'BalancedResourceAllocation', 'weight': 1}, + {'name': 'SelectorSpreadPriority', 'weight': 1}, + {'name': 'NodeAffinityPriority', 'weight': 1}, + {'name': 'TaintTolerationPriority', 'weight': 1} + ]) + + if short_version == '3.4': + priorities.extend([ + {'name': 'LeastRequestedPriority', 'weight': 1}, + {'name': 'BalancedResourceAllocation', 'weight': 1}, + {'name': 'SelectorSpreadPriority', 'weight': 1}, + {'name': 'NodePreferAvoidPodsPriority', 'weight': 10000}, + {'name': 'NodeAffinityPriority', 'weight': 1}, + {'name': 'TaintTolerationPriority', 'weight': 1}, + {'name': 'InterPodAffinityPriority', 'weight': 1} + ]) + + if short_version in ['3.5', '3.6', '3.7', '3.8', '3.9']: + priorities.extend([ + {'name': 'SelectorSpreadPriority', 'weight': 1}, + {'name': 'InterPodAffinityPriority', 'weight': 1}, + {'name': 'LeastRequestedPriority', 'weight': 1}, + {'name': 'BalancedResourceAllocation', 'weight': 1}, + {'name': 'NodePreferAvoidPodsPriority', 'weight': 10000}, + {'name': 'NodeAffinityPriority', 'weight': 1}, + {'name': 'TaintTolerationPriority', 'weight': 1} + ]) + + if zones_enabled: + zone_priority = { + 'name': 'Zone', + 'argument': { + 'serviceAntiAffinity': { + 'label': 'zone' + } + }, + 'weight': 2 + } + priorities.append(zone_priority) + + return priorities diff --git a/roles/openshift_certificate_expiry/test/conftest.py b/roles/lib_utils/test/conftest.py index df948fff0..aabdd4fa1 100644 --- a/roles/openshift_certificate_expiry/test/conftest.py +++ b/roles/lib_utils/test/conftest.py @@ -1,7 +1,15 @@ # pylint: disable=missing-docstring,invalid-name,redefined-outer-name +import os import pytest +import sys + from OpenSSL import crypto +sys.path.insert(1, os.path.join(os.path.dirname(__file__), os.pardir, "lookup_plugins")) + +from openshift_master_facts_default_predicates import LookupModule as PredicatesLookupModule # noqa: E402 +from openshift_master_facts_default_priorities import LookupModule as PrioritiesLookupModule # noqa: E402 + # Parameter list for valid_cert fixture VALID_CERTIFICATE_PARAMS = [ { @@ -117,3 +125,48 @@ def valid_cert(request, ca): 'cert_file': cert_file, 'cert': cert } + + +@pytest.fixture() +def predicates_lookup(): + return PredicatesLookupModule() + + +@pytest.fixture() +def priorities_lookup(): + return PrioritiesLookupModule() + + +@pytest.fixture() +def facts(): + return { + 'openshift': { + 'common': {} + } + } + + +@pytest.fixture(params=[True, False]) +def regions_enabled(request): + return request.param + + +@pytest.fixture(params=[True, False]) +def zones_enabled(request): + return request.param + + +def v_prefix(release): + """Prefix a release number with 'v'.""" + return "v" + release + + +def minor(release): + """Add a suffix to release, making 'X.Y' become 'X.Y.Z'.""" + return release + ".1" + + +@pytest.fixture(params=[str, v_prefix, minor]) +def release_mod(request): + """Modifies a release string to alternative valid values.""" + return request.param diff --git a/roles/openshift_master_facts/test/openshift_master_facts_bad_input_tests.py b/roles/lib_utils/test/openshift_master_facts_bad_input_tests.py index e8da1e04a..e8da1e04a 100644 --- a/roles/openshift_master_facts/test/openshift_master_facts_bad_input_tests.py +++ b/roles/lib_utils/test/openshift_master_facts_bad_input_tests.py diff --git a/roles/openshift_master_facts/test/conftest.py b/roles/lib_utils/test/openshift_master_facts_conftest.py index 140cced73..140cced73 100644 --- a/roles/openshift_master_facts/test/conftest.py +++ b/roles/lib_utils/test/openshift_master_facts_conftest.py diff --git a/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py b/roles/lib_utils/test/openshift_master_facts_default_predicates_tests.py index 11aad9f03..11aad9f03 100644 --- a/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py +++ b/roles/lib_utils/test/openshift_master_facts_default_predicates_tests.py diff --git a/roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py b/roles/lib_utils/test/openshift_master_facts_default_priorities_tests.py index 527fc9ff4..527fc9ff4 100644 --- a/roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py +++ b/roles/lib_utils/test/openshift_master_facts_default_priorities_tests.py diff --git a/roles/openshift_certificate_expiry/test/test_fakeopensslclasses.py b/roles/lib_utils/test/test_fakeopensslclasses.py index 8a521a765..8a521a765 100644 --- a/roles/openshift_certificate_expiry/test/test_fakeopensslclasses.py +++ b/roles/lib_utils/test/test_fakeopensslclasses.py diff --git a/roles/openshift_certificate_expiry/test/test_load_and_handle_cert.py b/roles/lib_utils/test/test_load_and_handle_cert.py index 98792e2ee..98792e2ee 100644 --- a/roles/openshift_certificate_expiry/test/test_load_and_handle_cert.py +++ b/roles/lib_utils/test/test_load_and_handle_cert.py diff --git a/roles/nickhammond.logrotate/tasks/main.yml b/roles/nickhammond.logrotate/tasks/main.yml index 32d3acb86..50ad7e373 100644 --- a/roles/nickhammond.logrotate/tasks/main.yml +++ b/roles/nickhammond.logrotate/tasks/main.yml @@ -1,9 +1,9 @@ --- - name: nickhammond.logrotate | Install logrotate package: name=logrotate state=present - when: not openshift.common.is_atomic | bool + when: not openshift_is_atomic | bool register: result - until: result | success + until: result is succeeded - name: nickhammond.logrotate | Setup logrotate.d scripts template: diff --git a/roles/nuage_ca/meta/main.yml b/roles/nuage_ca/meta/main.yml index 36838debc..0d0b8d1a5 100644 --- a/roles/nuage_ca/meta/main.yml +++ b/roles/nuage_ca/meta/main.yml @@ -13,4 +13,4 @@ galaxy_info: - cloud - system dependencies: -- { role: nuage_common } +- role: nuage_common diff --git a/roles/nuage_ca/tasks/main.yaml b/roles/nuage_ca/tasks/main.yaml index 46929fa1f..cb7844bc5 100644 --- a/roles/nuage_ca/tasks/main.yaml +++ b/roles/nuage_ca/tasks/main.yaml @@ -1,9 +1,9 @@ --- - name: Install openssl package: name=openssl state=present - when: not openshift.common.is_atomic | bool + when: not openshift_is_atomic | bool register: result - until: result | success + until: result is succeeded - name: Create CA directory file: path="{{ nuage_ca_dir }}" state=directory diff --git a/roles/nuage_common/tasks/main.yml b/roles/nuage_common/tasks/main.yml index 6c8c9f8d2..ec42518ff 100644 --- a/roles/nuage_common/tasks/main.yml +++ b/roles/nuage_common/tasks/main.yml @@ -2,17 +2,17 @@ - name: Set the Nuage plugin openshift directory fact to handle Atomic host install set_fact: nuage_node_plugin_dir: /var/usr/share/vsp-openshift - when: openshift.common.is_atomic | bool + when: openshift_is_atomic | bool - name: Set the Nuage CNI network config directory fact to handle Atomic host install set_fact: nuage_node_cni_netconf_dir: /var/etc/cni/net.d/ - when: openshift.common.is_atomic | bool + when: openshift_is_atomic | bool - name: Set the Nuage CNI binary directory fact to handle Atomic host install set_fact: nuage_node_cni_bin_dir: /var/opt/cni/bin/ - when: openshift.common.is_atomic | bool + when: openshift_is_atomic | bool - name: Assure CNI plugin config dir exists before daemon set install become: yes diff --git a/roles/nuage_master/meta/main.yml b/roles/nuage_master/meta/main.yml index e2f7af5ad..643800680 100644 --- a/roles/nuage_master/meta/main.yml +++ b/roles/nuage_master/meta/main.yml @@ -14,4 +14,4 @@ galaxy_info: - system dependencies: - role: lib_openshift -- role: lib_os_firewall +- role: lib_utils diff --git a/roles/nuage_master/tasks/main.yaml b/roles/nuage_master/tasks/main.yaml index c264427de..29e16b6f8 100644 --- a/roles/nuage_master/tasks/main.yaml +++ b/roles/nuage_master/tasks/main.yaml @@ -5,22 +5,22 @@ - name: Set the Nuage certificate directory fact for Atomic hosts set_fact: cert_output_dir: /var/usr/share/nuage-openshift-monitor - when: openshift.common.is_atomic | bool + when: openshift_is_atomic | bool - name: Set the Nuage kubeconfig file path fact for Atomic hosts set_fact: kube_config: /var/usr/share/nuage-openshift-monitor/nuage.kubeconfig - when: openshift.common.is_atomic | bool + when: openshift_is_atomic | bool - name: Set the Nuage monitor yaml location fact for Atomic hosts set_fact: kubemon_yaml: /var/usr/share/nuage-openshift-monitor/nuage-openshift-monitor.yaml - when: openshift.common.is_atomic | bool + when: openshift_is_atomic | bool - name: Set the Nuage monitor certs location fact for Atomic hosts set_fact: nuage_master_crt_dir: /var/usr/share/nuage-openshift-monitor/ - when: openshift.common.is_atomic | bool + when: openshift_is_atomic | bool - name: Set the Nuage master config directory for daemon sets install set_fact: @@ -35,27 +35,27 @@ - name: Set the Nuage CNI plugin binary directory for daemon sets install set_fact: nuage_cni_bin_dsets_mount_dir: /var/opt/cni/bin - when: openshift.common.is_atomic | bool + when: openshift_is_atomic | bool - name: Create directory /usr/share/nuage-openshift-monitor become: yes file: path=/usr/share/nuage-openshift-monitor state=directory - when: not openshift.common.is_atomic | bool + when: not openshift_is_atomic | bool - name: Create directory /var/usr/share/nuage-openshift-monitor become: yes file: path=/var/usr/share/nuage-openshift-monitor state=directory - when: openshift.common.is_atomic | bool + when: openshift_is_atomic | bool - name: Create directory /var/usr/bin for monitor binary on atomic become: yes file: path=/var/usr/bin state=directory - when: openshift.common.is_atomic | bool + when: openshift_is_atomic | bool - name: Create CNI bin directory /var/opt/cni/bin become: yes file: path=/var/opt/cni/bin state=directory - when: openshift.common.is_atomic | bool + when: openshift_is_atomic | bool - name: Create the log directory become: yes diff --git a/roles/nuage_master/tasks/serviceaccount.yml b/roles/nuage_master/tasks/serviceaccount.yml index fbf2c4f8d..9127b33d6 100644 --- a/roles/nuage_master/tasks/serviceaccount.yml +++ b/roles/nuage_master/tasks/serviceaccount.yml @@ -19,7 +19,7 @@ - name: Generate the node client config command: > - {{ openshift.common.client_binary }} adm create-api-client-config + {{ openshift_client_binary }} adm create-api-client-config --certificate-authority={{ openshift_master_ca_cert }} --client-dir={{ cert_output_dir }} --master={{ openshift.master.api_url }} diff --git a/roles/nuage_node/meta/main.yml b/roles/nuage_node/meta/main.yml index 9b0315054..0480502b7 100644 --- a/roles/nuage_node/meta/main.yml +++ b/roles/nuage_node/meta/main.yml @@ -15,4 +15,4 @@ galaxy_info: dependencies: - role: nuage_common - role: nuage_ca -- role: lib_os_firewall +- role: lib_utils diff --git a/roles/nuage_node/tasks/main.yaml b/roles/nuage_node/tasks/main.yaml index c6b7a9b10..1f1bd1653 100644 --- a/roles/nuage_node/tasks/main.yaml +++ b/roles/nuage_node/tasks/main.yaml @@ -2,17 +2,17 @@ - name: Set the Nuage plugin openshift directory fact for Atomic hosts set_fact: vsp_openshift_dir: /var/usr/share/vsp-openshift - when: openshift.common.is_atomic | bool + when: openshift_is_atomic | bool - name: Set the Nuage CNI binary directory fact for Atomic hosts set_fact: cni_bin_dir: /var/opt/cni/bin/ - when: openshift.common.is_atomic | bool + when: openshift_is_atomic | bool - name: Set the Nuage plugin certs directory fact for Atomic hosts set_fact: nuage_plugin_crt_dir: /var/usr/share/vsp-openshift - when: openshift.common.is_atomic | bool + when: openshift_is_atomic | bool - name: Assure CNI conf dir exists become: yes @@ -36,7 +36,7 @@ - name: Add additional Docker mounts for Nuage for atomic hosts become: yes lineinfile: dest="{{ openshift_atomic_node_config_file }}" line="{{ nuage_atomic_docker_additional_mounts }}" - when: openshift.common.is_atomic | bool + when: openshift_is_atomic | bool - name: Restart node services command: /bin/true diff --git a/roles/openshift_aws/README.md b/roles/openshift_aws/README.md index 4aca5c7a8..de73ab01d 100644 --- a/roles/openshift_aws/README.md +++ b/roles/openshift_aws/README.md @@ -7,9 +7,9 @@ This role contains many task-areas to provision resources and perform actions against an AWS account for the purposes of dynamically building an openshift cluster. -This role is primarily intended to be used with "include_role" and "tasks_from". +This role is primarily intended to be used with "import_role" and "tasks_from". -include_role can be called from the tasks section in a play. See example +import_role can be called from the tasks section in a play. See example playbook below for reference. These task-areas are: @@ -40,7 +40,7 @@ Example Playbook ---------------- ```yaml -- include_role: +- import_role: name: openshift_aws tasks_from: vpc.yml vars: diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml index 42ef22846..178e0849c 100644 --- a/roles/openshift_aws/defaults/main.yml +++ b/roles/openshift_aws/defaults/main.yml @@ -6,9 +6,7 @@ openshift_aws_create_security_groups: True openshift_aws_create_launch_config: True openshift_aws_create_scale_group: True -openshift_aws_current_version: '' -openshift_aws_new_version: '' - +openshift_aws_node_group_upgrade: False openshift_aws_wait_for_ssh: True openshift_aws_clusterid: default @@ -19,7 +17,6 @@ openshift_aws_build_ami_group: "{{ openshift_aws_clusterid }}" openshift_aws_iam_cert_name: "{{ openshift_aws_clusterid }}-master-external" openshift_aws_iam_cert_path: '' openshift_aws_iam_cert_key_path: '' -openshift_aws_scale_group_basename: "{{ openshift_aws_clusterid }} openshift" openshift_aws_iam_role_name: openshift_node_describe_instances openshift_aws_iam_role_policy_json: "{{ lookup('file', 'describeinstances.json') }}" @@ -34,86 +31,110 @@ openshift_aws_ami_name: openshift-gi openshift_aws_base_ami_name: ami_base openshift_aws_launch_config_bootstrap_token: '' -openshift_aws_launch_config_basename: "{{ openshift_aws_clusterid }}" openshift_aws_users: [] openshift_aws_ami_tags: bootstrap: "true" openshift-created: "true" - clusterid: "{{ openshift_aws_clusterid }}" parent: "{{ openshift_aws_base_ami | default('unknown') }}" openshift_aws_s3_mode: create openshift_aws_s3_bucket_name: "{{ openshift_aws_clusterid }}-docker-registry" -openshift_aws_elb_health_check: - ping_protocol: tcp - ping_port: 443 - response_timeout: 5 - interval: 30 - unhealthy_threshold: 2 - healthy_threshold: 2 - openshift_aws_elb_basename: "{{ openshift_aws_clusterid }}" -openshift_aws_elb_name_dict: - master: - external: "{{ openshift_aws_elb_basename }}-master-external" - internal: "{{ openshift_aws_elb_basename }}-master-internal" - infra: - external: "{{ openshift_aws_elb_basename }}-infra" - -openshift_aws_elb_idle_timout: 400 -openshift_aws_elb_scheme: internet-facing -openshift_aws_elb_cert_arn: '' openshift_aws_elb_dict: master: external: - - protocol: tcp - load_balancer_port: 80 - instance_protocol: ssl - instance_port: 443 - - protocol: ssl - load_balancer_port: 443 - instance_protocol: ssl - instance_port: 443 - # ssl certificate required for https or ssl - ssl_certificate_id: "{{ openshift_aws_elb_cert_arn }}" + cross_az_load_balancing: False + health_check: + ping_protocol: tcp + ping_port: "{{ openshift_master_api_port | default(8443) }}" + response_timeout: 5 + interval: 30 + unhealthy_threshold: 2 + healthy_threshold: 2 + idle_timout: 400 + listeners: + - protocol: tcp + load_balancer_port: 80 + instance_protocol: ssl + instance_port: "{{ openshift_master_api_port | default(8443) }}" + - protocol: ssl + load_balancer_port: "{{ openshift_master_api_port | default(8443) }}" + instance_protocol: ssl + instance_port: "{{ openshift_master_api_port | default(8443) }}" + ssl_certificate_id: '' + name: "{{ openshift_aws_elb_basename }}-master-external" + tags: "{{ openshift_aws_kube_tags }}" internal: - - protocol: tcp - load_balancer_port: 80 - instance_protocol: tcp - instance_port: 80 - - protocol: tcp - load_balancer_port: 443 - instance_protocol: tcp - instance_port: 443 + cross_az_load_balancing: False + health_check: + ping_protocol: tcp + ping_port: "{{ openshift_master_api_port | default(8443) }}" + response_timeout: 5 + interval: 30 + unhealthy_threshold: 2 + healthy_threshold: 2 + idle_timout: 400 + listeners: + - protocol: tcp + load_balancer_port: 80 + instance_protocol: tcp + instance_port: 80 + - protocol: tcp + load_balancer_port: "{{ openshift_master_api_port | default(8443) }}" + instance_protocol: tcp + instance_port: "{{ openshift_master_api_port | default(8443) }}" + name: "{{ openshift_aws_elb_basename }}-master-internal" + tags: "{{ openshift_aws_kube_tags }}" infra: external: - - protocol: tcp - load_balancer_port: 80 - instance_protocol: tcp - instance_port: 443 - proxy_protocol: True - - protocol: tcp - load_balancer_port: 443 - instance_protocol: tcp - instance_port: 443 - proxy_protocol: True + cross_az_load_balancing: False + health_check: + ping_protocol: tcp + ping_port: 443 + response_timeout: 5 + interval: 30 + unhealthy_threshold: 2 + healthy_threshold: 2 + idle_timout: 400 + listeners: + - protocol: tcp + load_balancer_port: 80 + instance_protocol: tcp + instance_port: 443 + proxy_protocol: True + - protocol: tcp + load_balancer_port: 443 + instance_protocol: tcp + instance_port: 443 + proxy_protocol: True + name: "{{ openshift_aws_elb_basename }}-infra" + tags: "{{ openshift_aws_kube_tags }}" openshift_aws_node_group_config_master_volumes: +- device_name: /dev/sda1 + volume_size: 100 + device_type: gp2 + delete_on_termination: False - device_name: /dev/sdb volume_size: 100 device_type: gp2 delete_on_termination: False openshift_aws_node_group_config_node_volumes: +- device_name: /dev/sda1 + volume_size: 100 + device_type: gp2 + delete_on_termination: True - device_name: /dev/sdb volume_size: 100 device_type: gp2 delete_on_termination: True +# build_instance_tags is a custom filter in role lib_utils openshift_aws_node_group_config_tags: "{{ openshift_aws_clusterid | build_instance_tags }}" openshift_aws_node_group_termination_policy: Default openshift_aws_node_group_replace_instances: [] @@ -124,6 +145,33 @@ openshift_aws_ami_map: infra: "{{ openshift_aws_ami }}" compute: "{{ openshift_aws_ami }}" +openshift_aws_master_group: +- name: "{{ openshift_aws_clusterid }} master group" + group: master + tags: + host-type: master + sub-host-type: default + runtime: docker + +openshift_aws_node_groups: +- name: "{{ openshift_aws_clusterid }} compute group" + group: compute + tags: + host-type: node + sub-host-type: compute + runtime: docker + +- name: "{{ openshift_aws_clusterid }} infra group" + group: infra + tags: + host-type: node + sub-host-type: infra + runtime: docker + +openshift_aws_created_asgs: [] +openshift_aws_current_asgs: [] + +# these will be used during upgrade openshift_aws_master_group_config: # The 'master' key is always required here. master: @@ -135,18 +183,13 @@ openshift_aws_master_group_config: min_size: 3 max_size: 3 desired_size: 3 - tags: - host-type: master - sub-host-type: default - runtime: docker - version: "{{ openshift_aws_new_version }}" wait_for_instances: True termination_policy: "{{ openshift_aws_node_group_termination_policy }}" replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}" iam_role: "{{ openshift_aws_iam_role_name }}" policy_name: "{{ openshift_aws_iam_role_policy_name }}" policy_json: "{{ openshift_aws_iam_role_policy_json }}" - elbs: "{{ openshift_aws_elb_name_dict['master'].keys()| map('extract', openshift_aws_elb_name_dict['master']) | list }}" + elbs: "{{ openshift_aws_elb_dict | json_query('master.[*][0][*].name') }}" openshift_aws_node_group_config: # The 'compute' key is always required here. @@ -159,11 +202,6 @@ openshift_aws_node_group_config: min_size: 3 max_size: 100 desired_size: 3 - tags: - host-type: node - sub-host-type: compute - runtime: docker - version: "{{ openshift_aws_new_version }}" termination_policy: "{{ openshift_aws_node_group_termination_policy }}" replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}" iam_role: "{{ openshift_aws_iam_role_name }}" @@ -179,21 +217,14 @@ openshift_aws_node_group_config: min_size: 2 max_size: 20 desired_size: 2 - tags: - host-type: node - sub-host-type: infra - runtime: docker - version: "{{ openshift_aws_new_version }}" termination_policy: "{{ openshift_aws_node_group_termination_policy }}" replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}" iam_role: "{{ openshift_aws_iam_role_name }}" policy_name: "{{ openshift_aws_iam_role_policy_name }}" policy_json: "{{ openshift_aws_iam_role_policy_json }}" - elbs: "{{ openshift_aws_elb_name_dict['infra'].keys()| map('extract', openshift_aws_elb_name_dict['infra']) | list }}" - -openshift_aws_elb_tags: "{{ openshift_aws_kube_tags }}" -openshift_aws_elb_az_load_balancing: False + elbs: "{{ openshift_aws_elb_dict | json_query('infra.[*][0][*].name') }}" +# build_instance_tags is a custom filter in role lib_utils openshift_aws_kube_tags: "{{ openshift_aws_clusterid | build_instance_tags }}" openshift_aws_elb_security_groups: "{{ openshift_aws_launch_config_security_groups }}" @@ -236,8 +267,8 @@ openshift_aws_node_security_groups: to_port: 80 cidr_ip: 0.0.0.0/0 - proto: tcp - from_port: 443 - to_port: 443 + from_port: "{{ openshift_master_api_port | default(8443) }}" + to_port: "{{ openshift_master_api_port | default(8443) }}" cidr_ip: 0.0.0.0/0 compute: name: "{{ openshift_aws_clusterid }}_compute" @@ -251,8 +282,8 @@ openshift_aws_node_security_groups: to_port: 80 cidr_ip: 0.0.0.0/0 - proto: tcp - from_port: 443 - to_port: 443 + from_port: "{{ openshift_master_api_port | default(8443) }}" + to_port: "{{ openshift_master_api_port | default(8443) }}" cidr_ip: 0.0.0.0/0 - proto: tcp from_port: 30000 @@ -265,8 +296,6 @@ openshift_aws_node_security_groups: openshift_aws_vpc_tags: Name: "{{ openshift_aws_vpc_name }}" -openshift_aws_subnet_az: us-east-1c - openshift_aws_vpc: name: "{{ openshift_aws_vpc_name }}" cidr: 172.31.0.0/16 @@ -274,30 +303,20 @@ openshift_aws_vpc: us-east-1: - cidr: 172.31.48.0/20 az: "us-east-1c" + default_az: true - cidr: 172.31.32.0/20 az: "us-east-1e" - cidr: 172.31.16.0/20 az: "us-east-1a" +openshift_aws_subnet_az: "{{ openshift_aws_vpc.subnets[openshift_aws_region] | get_default_az }}" + openshift_aws_node_run_bootstrap_startup: True openshift_aws_node_user_data: '' openshift_aws_node_config_namespace: openshift-node -openshift_aws_node_groups: nodes - openshift_aws_masters_groups: masters,etcd,nodes -# If creating extra node groups, you'll need to define all of the following - -# The format is the same as openshift_aws_node_group_config, but the top-level -# key names should be different (ie, not == master or infra). -# openshift_aws_node_group_config_extra: {} - -# This variable should look like openshift_aws_launch_config_security_groups -# and contain a one-to-one mapping of top level keys that are defined in -# openshift_aws_node_group_config_extra. -# openshift_aws_launch_config_security_groups_extra: {} - -# openshift_aws_node_security_groups_extra: {} - -# openshift_aws_ami_map_extra: {} +# By default, don't delete things like the shared IAM instance +# profile and uploaded ssh keys +openshift_aws_enable_uninstall_shared_objects: False diff --git a/roles/openshift_aws/filter_plugins/openshift_aws_filters.py b/roles/openshift_aws/filter_plugins/openshift_aws_filters.py deleted file mode 100644 index e707abd3f..000000000 --- a/roles/openshift_aws/filter_plugins/openshift_aws_filters.py +++ /dev/null @@ -1,41 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -''' -Custom filters for use in openshift_aws -''' - - -class FilterModule(object): - ''' Custom ansible filters for use by openshift_aws role''' - - @staticmethod - def scale_groups_match_capacity(scale_group_info): - ''' This function will verify that the scale group instance count matches - the scale group desired capacity - - ''' - for scale_group in scale_group_info: - if scale_group['desired_capacity'] != len(scale_group['instances']): - return False - - return True - - @staticmethod - def build_instance_tags(clusterid): - ''' This function will return a dictionary of the instance tags. - - The main desire to have this inside of a filter_plugin is that we - need to build the following key. - - {"kubernetes.io/cluster/{{ openshift_aws_clusterid }}": "{{ openshift_aws_clusterid}}"} - - ''' - tags = {'clusterid': clusterid, - 'kubernetes.io/cluster/{}'.format(clusterid): clusterid} - - return tags - - def filters(self): - ''' returns a mapping of filters to methods ''' - return {'build_instance_tags': self.build_instance_tags, - 'scale_groups_match_capacity': self.scale_groups_match_capacity} diff --git a/roles/openshift_aws/tasks/accept_nodes.yml b/roles/openshift_aws/tasks/accept_nodes.yml index ae320962f..db30fe5c9 100644 --- a/roles/openshift_aws/tasks/accept_nodes.yml +++ b/roles/openshift_aws/tasks/accept_nodes.yml @@ -1,6 +1,8 @@ --- +- include_tasks: setup_master_group.yml + - name: fetch masters - ec2_remote_facts: + ec2_instance_facts: region: "{{ openshift_aws_region | default('us-east-1') }}" filters: "{{ {'tag:kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid, @@ -11,7 +13,7 @@ until: "'instances' in mastersout and mastersout.instances|length > 0" - name: fetch new node instances - ec2_remote_facts: + ec2_instance_facts: region: "{{ openshift_aws_region }}" filters: "{{ {'tag:kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid, @@ -22,13 +24,18 @@ delay: 3 until: "'instances' in instancesout and instancesout.instances|length > 0" -- debug: +- name: Dump the private dns names + debug: msg: "{{ instancesout.instances|map(attribute='private_dns_name') | list }}" +- name: Dump the master public ip address + debug: + msg: "{{ mastersout.instances[0].public_ip_address }}" + - name: approve nodes oc_adm_csr: #approve_all: True nodes: "{{ instancesout.instances|map(attribute='private_dns_name') | list }}" timeout: 60 register: nodeout - delegate_to: "{{ mastersout.instances[0].public_ip_address }}" + delegate_to: "{{ groups.masters.0 }}" diff --git a/roles/openshift_aws/tasks/build_node_group.yml b/roles/openshift_aws/tasks/build_node_group.yml index 2c1e88cfb..a9f9cc3c4 100644 --- a/roles/openshift_aws/tasks/build_node_group.yml +++ b/roles/openshift_aws/tasks/build_node_group.yml @@ -1,6 +1,4 @@ --- -# This task file expects l_nodes_to_build to be passed in. - # When openshift_aws_use_custom_ami is '' then # we retrieve the latest build AMI. # Then set openshift_aws_ami to the ami. @@ -26,6 +24,36 @@ # Need to set epoch time in one place to use for launch_config and scale_group - set_fact: l_epoch_time: "{{ ansible_date_time.epoch }}" +# +# query asg's and determine if we need to create the others. +# if we find more than 1 for each type, then exit +- name: query all asg's for this cluster + ec2_asg_facts: + region: "{{ openshift_aws_region }}" + tags: "{{ {'kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid} | combine(openshift_aws_node_group.tags) }}" + register: asgs + +- fail: + msg: "Found more than 1 auto scaling group that matches the query for group: {{ openshift_aws_node_group }}" + when: + - asgs.results|length > 1 + +- debug: + msg: "{{ asgs }}" + +- name: set the value for the deployment_serial and the current asgs + set_fact: + # scale_groups_serial is a custom filter in role lib_utils + l_deployment_serial: "{{ openshift_aws_node_group_deployment_serial if openshift_aws_node_group_deployment_serial is defined else asgs.results | scale_groups_serial(openshift_aws_node_group_upgrade) }}" + openshift_aws_current_asgs: "{{ asgs.results | map(attribute='auto_scaling_group_name') | list | union(openshift_aws_current_asgs) }}" + +- name: dump deployment serial + debug: + msg: "Deployment serial: {{ l_deployment_serial }}" + +- name: dump current_asgs + debug: + msg: "openshift_aws_current_asgs: {{ openshift_aws_current_asgs }}" - when: openshift_aws_create_iam_role include_tasks: iam_role.yml diff --git a/roles/openshift_aws/tasks/elb.yml b/roles/openshift_aws/tasks/elb.yml index 5d371ec7a..d8257cf31 100644 --- a/roles/openshift_aws/tasks/elb.yml +++ b/roles/openshift_aws/tasks/elb.yml @@ -5,18 +5,18 @@ - name: "Create ELB {{ l_elb_dict_item.key }}" ec2_elb_lb: - name: "{{ l_openshift_aws_elb_name_dict[l_elb_dict_item.key][item.key] }}" + name: "{{ item.value.name }}" state: present - cross_az_load_balancing: "{{ openshift_aws_elb_az_load_balancing }}" + cross_az_load_balancing: "{{ item.value.cross_az_load_balancing }}" security_group_names: "{{ l_elb_security_groups[l_elb_dict_item.key] }}" - idle_timeout: "{{ openshift_aws_elb_idle_timout }}" + idle_timeout: "{{ item.value.idle_timout }}" region: "{{ openshift_aws_region }}" subnets: - "{{ subnetout.subnets[0].id }}" - health_check: "{{ openshift_aws_elb_health_check }}" - listeners: "{{ item.value }}" - scheme: "{{ openshift_aws_elb_scheme }}" - tags: "{{ openshift_aws_elb_tags }}" + health_check: "{{ item.value.health_check }}" + listeners: "{{ item.value.listeners }}" + scheme: "{{ (item.key == 'internal') | ternary('internal','internet-facing') }}" + tags: "{{ item.value.tags }}" wait: True register: new_elb with_dict: "{{ l_elb_dict_item.value }}" diff --git a/roles/openshift_aws/tasks/iam_role.yml b/roles/openshift_aws/tasks/iam_role.yml index d9910d938..cf3bb28fb 100644 --- a/roles/openshift_aws/tasks/iam_role.yml +++ b/roles/openshift_aws/tasks/iam_role.yml @@ -13,11 +13,10 @@ ##### - name: Create an iam role iam_role: - name: "{{ item.value.iam_role }}" + name: "{{ l_node_group_config[openshift_aws_node_group.group].iam_role }}" assume_role_policy_document: "{{ lookup('file','trustpolicy.json') }}" state: "{{ openshift_aws_iam_role_state | default('present') }}" - when: item.value.iam_role is defined - with_dict: "{{ l_nodes_to_build }}" + when: l_node_group_config[openshift_aws_node_group.group].iam_role is defined ##### # The second part of this task file is linking the role to a policy @@ -28,9 +27,8 @@ - name: create an iam policy iam_policy: iam_type: role - iam_name: "{{ item.value.iam_role }}" - policy_json: "{{ item.value.policy_json }}" - policy_name: "{{ item.value.policy_name }}" + iam_name: "{{ l_node_group_config[openshift_aws_node_group.group].iam_role }}" + policy_json: "{{ l_node_group_config[openshift_aws_node_group.group].policy_json }}" + policy_name: "{{ l_node_group_config[openshift_aws_node_group.group].policy_name }}" state: "{{ openshift_aws_iam_role_state | default('present') }}" - when: item.value.iam_role is defined - with_dict: "{{ l_nodes_to_build }}" + when: "'iam_role' in l_node_group_config[openshift_aws_node_group.group]" diff --git a/roles/openshift_aws/tasks/launch_config.yml b/roles/openshift_aws/tasks/launch_config.yml index fed80b7eb..6f78ee00a 100644 --- a/roles/openshift_aws/tasks/launch_config.yml +++ b/roles/openshift_aws/tasks/launch_config.yml @@ -1,15 +1,26 @@ --- -- fail: - msg: "Ensure that an AMI value is defined for openshift_aws_ami or openshift_aws_launch_config_custom_image." - when: - - openshift_aws_ami is undefined +- name: fetch the security groups for launch config + ec2_group_facts: + filters: + group-name: "{{ openshift_aws_launch_config_security_groups[openshift_aws_node_group.group] }}" + vpc-id: "{{ vpcout.vpcs[0].id }}" + region: "{{ openshift_aws_region }}" + register: ec2sgs -- fail: - msg: "Ensure that openshift_deployment_type is defined." - when: - - openshift_deployment_type is undefined - -- include_tasks: launch_config_create.yml - with_dict: "{{ l_nodes_to_build }}" - loop_control: - loop_var: launch_config_item +# Create the scale group config +- name: Create the node scale group launch config + ec2_lc: + name: "{{ openshift_aws_node_group.name }}-{{ openshift_aws_ami_map[openshift_aws_node_group.group] | default(openshift_aws_ami) }}-{{ l_epoch_time }}" + region: "{{ openshift_aws_region }}" + image_id: "{{ openshift_aws_ami_map[openshift_aws_node_group.group] | default(openshift_aws_ami) }}" + instance_type: "{{ l_node_group_config[openshift_aws_node_group.group].instance_type }}" + security_groups: "{{ openshift_aws_launch_config_security_group_id | default(ec2sgs.security_groups | map(attribute='group_id')| list) }}" + instance_profile_name: "{{ l_node_group_config[openshift_aws_node_group.group].iam_role if l_node_group_config[openshift_aws_node_group.group].iam_role is defined and + l_node_group_config[openshift_aws_node_group.group].iam_role != '' and + openshift_aws_create_iam_role + else omit }}" + user_data: "{{ lookup('template', 'user_data.j2') }}" + key_name: "{{ openshift_aws_ssh_key_name }}" + ebs_optimized: False + volumes: "{{ l_node_group_config[openshift_aws_node_group.group].volumes }}" + assign_public_ip: True diff --git a/roles/openshift_aws/tasks/launch_config_create.yml b/roles/openshift_aws/tasks/launch_config_create.yml deleted file mode 100644 index f7f0f0953..000000000 --- a/roles/openshift_aws/tasks/launch_config_create.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- -- name: fetch the security groups for launch config - ec2_group_facts: - filters: - group-name: "{{ l_launch_config_security_groups[launch_config_item.key] }}" - vpc-id: "{{ vpcout.vpcs[0].id }}" - region: "{{ openshift_aws_region }}" - register: ec2sgs - -# Create the scale group config -- name: Create the node scale group launch config - ec2_lc: - name: "{{ openshift_aws_launch_config_basename }}-{{ launch_config_item.key }}{{'-' ~ openshift_aws_new_version if openshift_aws_new_version != '' else '' }}" - region: "{{ openshift_aws_region }}" - image_id: "{{ l_aws_ami_map[launch_config_item.key] | default(openshift_aws_ami) }}" - instance_type: "{{ launch_config_item.value.instance_type }}" - security_groups: "{{ openshift_aws_launch_config_security_group_id | default(ec2sgs.security_groups | map(attribute='group_id')| list) }}" - instance_profile_name: "{{ launch_config_item.value.iam_role if launch_config_item.value.iam_role is defined and - launch_config_item.value.iam_role != '' and - openshift_aws_create_iam_role - else omit }}" - user_data: "{{ lookup('template', 'user_data.j2') }}" - key_name: "{{ openshift_aws_ssh_key_name }}" - ebs_optimized: False - volumes: "{{ launch_config_item.value.volumes }}" - assign_public_ip: True diff --git a/roles/openshift_aws/tasks/master_facts.yml b/roles/openshift_aws/tasks/master_facts.yml index 530b0134d..c2e362acd 100644 --- a/roles/openshift_aws/tasks/master_facts.yml +++ b/roles/openshift_aws/tasks/master_facts.yml @@ -3,7 +3,7 @@ ec2_elb_facts: region: "{{ openshift_aws_region }}" names: - - "{{ openshift_aws_elb_name_dict['master']['internal'] }}" + - "{{ openshift_aws_elb_dict['master']['internal']['name'] }}" delegate_to: localhost register: elbs diff --git a/roles/openshift_aws/tasks/provision.yml b/roles/openshift_aws/tasks/provision.yml index 06f649343..2b5f317d8 100644 --- a/roles/openshift_aws/tasks/provision.yml +++ b/roles/openshift_aws/tasks/provision.yml @@ -1,32 +1,16 @@ --- -- when: openshift_aws_create_iam_cert | bool - name: create the iam_cert for elb certificate - include_tasks: iam_cert.yml - -- when: openshift_aws_create_s3 | bool - name: create s3 bucket for registry - include_tasks: s3.yml - - include_tasks: vpc_and_subnet_id.yml -- name: create elbs - include_tasks: elb.yml - with_dict: "{{ openshift_aws_elb_dict }}" - vars: - l_elb_security_groups: "{{ openshift_aws_elb_security_groups }}" - l_openshift_aws_elb_name_dict: "{{ openshift_aws_elb_name_dict }}" - loop_control: - loop_var: l_elb_dict_item - - name: include scale group creation for master include_tasks: build_node_group.yml + with_items: "{{ openshift_aws_master_group }}" vars: - l_nodes_to_build: "{{ openshift_aws_master_group_config }}" - l_launch_config_security_groups: "{{ openshift_aws_launch_config_security_groups }}" - l_aws_ami_map: "{{ openshift_aws_ami_map }}" + l_node_group_config: "{{ openshift_aws_master_group_config }}" + loop_control: + loop_var: openshift_aws_node_group - name: fetch newly created instances - ec2_remote_facts: + ec2_instance_facts: region: "{{ openshift_aws_region }}" filters: "tag:clusterid": "{{ openshift_aws_clusterid }}" diff --git a/roles/openshift_aws/tasks/provision_elb.yml b/roles/openshift_aws/tasks/provision_elb.yml new file mode 100644 index 000000000..fcc49c3ea --- /dev/null +++ b/roles/openshift_aws/tasks/provision_elb.yml @@ -0,0 +1,14 @@ +--- +- when: openshift_aws_create_iam_cert | bool + name: create the iam_cert for elb certificate + include_tasks: iam_cert.yml + +- include_tasks: vpc_and_subnet_id.yml + +- name: create elbs + include_tasks: elb.yml + with_dict: "{{ openshift_aws_elb_dict }}" + vars: + l_elb_security_groups: "{{ openshift_aws_elb_security_groups }}" + loop_control: + loop_var: l_elb_dict_item diff --git a/roles/openshift_aws/tasks/provision_instance.yml b/roles/openshift_aws/tasks/provision_instance.yml index 8cc75cd0c..786db1570 100644 --- a/roles/openshift_aws/tasks/provision_instance.yml +++ b/roles/openshift_aws/tasks/provision_instance.yml @@ -14,11 +14,7 @@ instance_type: m4.xlarge vpc_subnet_id: "{{ openshift_aws_subnet_id | default(subnetout.subnets[0].id) }}" image: "{{ openshift_aws_base_ami }}" - volumes: - - device_name: /dev/sdb - volume_type: gp2 - volume_size: 100 - delete_on_termination: true + volumes: "{{ openshift_aws_node_group_config_node_volumes }}" wait: yes exact_count: 1 count_tag: @@ -27,7 +23,7 @@ Name: "{{ openshift_aws_base_ami_name }}" - name: fetch newly created instances - ec2_remote_facts: + ec2_instance_facts: region: "{{ openshift_aws_region }}" filters: "tag:Name": "{{ openshift_aws_base_ami_name }}" @@ -46,5 +42,5 @@ - name: add host to nodes add_host: - groups: nodes + groups: nodes,g_new_node_hosts name: "{{ instancesout.instances[0].public_dns_name }}" diff --git a/roles/openshift_aws/tasks/provision_nodes.yml b/roles/openshift_aws/tasks/provision_nodes.yml index 041ed0791..9105b5b4c 100644 --- a/roles/openshift_aws/tasks/provision_nodes.yml +++ b/roles/openshift_aws/tasks/provision_nodes.yml @@ -2,25 +2,12 @@ # Get bootstrap config token # bootstrap should be created on first master # need to fetch it and shove it into cloud data -- name: fetch master instances - ec2_remote_facts: - region: "{{ openshift_aws_region }}" - filters: - "tag:clusterid": "{{ openshift_aws_clusterid }}" - "tag:host-type": master - instance-state-name: running - register: instancesout - retries: 20 - delay: 3 - until: - - "'instances' in instancesout" - - instancesout.instances|length > 0 +- include_tasks: setup_master_group.yml - name: slurp down the bootstrap.kubeconfig slurp: src: /etc/origin/master/bootstrap.kubeconfig - delegate_to: "{{ instancesout.instances[0].public_ip_address }}" - remote_user: root + delegate_to: "{{ groups.masters.0 }}" register: bootstrap - name: set_fact for kubeconfig token @@ -31,20 +18,15 @@ - name: include build compute and infra node groups include_tasks: build_node_group.yml + with_items: "{{ openshift_aws_node_groups }}" vars: - l_nodes_to_build: "{{ openshift_aws_node_group_config }}" - l_launch_config_security_groups: "{{ openshift_aws_launch_config_security_groups }}" - l_aws_ami_map: "{{ openshift_aws_ami_map }}" - -- name: include build node group for extra nodes - include_tasks: build_node_group.yml - when: openshift_aws_node_group_config_extra is defined - vars: - l_nodes_to_build: "{{ openshift_aws_node_group_config_extra | default({}) }}" - l_launch_config_security_groups: "{{ openshift_aws_launch_config_security_groups_extra }}" - l_aws_ami_map: "{{ openshift_aws_ami_map_extra }}" + l_node_group_config: "{{ openshift_aws_node_group_config }}" + loop_control: + loop_var: openshift_aws_node_group # instances aren't scaling fast enough here, we need to wait for them - when: openshift_aws_wait_for_ssh | bool name: wait for our new nodes to come up include_tasks: wait_for_groups.yml + vars: + created_asgs: "{{ openshift_aws_created_asgs }}" diff --git a/roles/openshift_aws/tasks/remove_scale_group.yml b/roles/openshift_aws/tasks/remove_scale_group.yml index 55d1af2b5..a01cde294 100644 --- a/roles/openshift_aws/tasks/remove_scale_group.yml +++ b/roles/openshift_aws/tasks/remove_scale_group.yml @@ -1,10 +1,13 @@ --- +# FIGURE OUT HOW TO REMOVE SCALE GROUPS +# use openshift_aws_current_asgs?? - name: fetch the scale groups ec2_asg_facts: region: "{{ openshift_aws_region }}" + name: "^{{ item }}$" tags: - "{{ {'kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid, - 'version': openshift_aws_current_version} }}" + "{{ {'kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid} }}" + with_items: "{{ openshift_aws_current_asgs if openshift_aws_current_asgs != [] else openshift_aws_asgs_to_remove }}" register: qasg - name: remove non-master scale groups @@ -14,7 +17,7 @@ name: "{{ item.auto_scaling_group_name }}" when: "'master' not in item.auto_scaling_group_name" register: asg_results - with_items: "{{ qasg.results }}" + with_items: "{{ qasg | json_query('results[*]') | sum(attribute='results', start=[]) }}" async: 600 poll: 0 diff --git a/roles/openshift_aws/tasks/s3.yml b/roles/openshift_aws/tasks/s3.yml index 9cf37c840..ba70bcff6 100644 --- a/roles/openshift_aws/tasks/s3.yml +++ b/roles/openshift_aws/tasks/s3.yml @@ -1,6 +1,6 @@ --- - name: Create an s3 bucket - s3: + aws_s3: bucket: "{{ openshift_aws_s3_bucket_name }}" mode: "{{ openshift_aws_s3_mode }}" region: "{{ openshift_aws_region }}" diff --git a/roles/openshift_aws/tasks/scale_group.yml b/roles/openshift_aws/tasks/scale_group.yml index 30df7545d..6ce8c58ba 100644 --- a/roles/openshift_aws/tasks/scale_group.yml +++ b/roles/openshift_aws/tasks/scale_group.yml @@ -1,20 +1,30 @@ --- +- name: set node group name + set_fact: + l_node_group_name: "{{ openshift_aws_node_group.name }} {{ l_deployment_serial }}" + - name: Create the scale group ec2_asg: - name: "{{ openshift_aws_scale_group_basename }} {{ item.key }}" - launch_config_name: "{{ openshift_aws_launch_config_basename }}-{{ item.key }}{{ '-' ~ openshift_aws_new_version if openshift_aws_new_version != '' else '' }}" - health_check_period: "{{ item.value.health_check.period }}" - health_check_type: "{{ item.value.health_check.type }}" - min_size: "{{ item.value.min_size }}" - max_size: "{{ item.value.max_size }}" - desired_capacity: "{{ item.value.desired_size }}" + name: "{{ l_node_group_name }}" + launch_config_name: "{{ openshift_aws_node_group.name }}-{{ openshift_aws_ami_map[openshift_aws_node_group.group] | default(openshift_aws_ami) }}-{{ l_epoch_time }}" + health_check_period: "{{ l_node_group_config[openshift_aws_node_group.group].health_check.period }}" + health_check_type: "{{ l_node_group_config[openshift_aws_node_group.group].health_check.type }}" + min_size: "{{ l_node_group_config[openshift_aws_node_group.group].min_size }}" + max_size: "{{ l_node_group_config[openshift_aws_node_group.group].max_size }}" + desired_capacity: "{{ l_node_group_config[openshift_aws_node_group.group].desired_size }}" region: "{{ openshift_aws_region }}" - termination_policies: "{{ item.value.termination_policy if 'termination_policy' in item.value else omit }}" - load_balancers: "{{ item.value.elbs if 'elbs' in item.value else omit }}" - wait_for_instances: "{{ item.value.wait_for_instances | default(False)}}" + termination_policies: "{{ l_node_group_config[openshift_aws_node_group.group].termination_policy if 'termination_policy' in l_node_group_config[openshift_aws_node_group.group] else omit }}" + load_balancers: "{{ l_node_group_config[openshift_aws_node_group.group].elbs if 'elbs' in l_node_group_config[openshift_aws_node_group.group] else omit }}" + wait_for_instances: "{{ l_node_group_config[openshift_aws_node_group.group].wait_for_instances | default(False)}}" vpc_zone_identifier: "{{ subnetout.subnets[0].id }}" replace_instances: "{{ openshift_aws_node_group_replace_instances if openshift_aws_node_group_replace_instances != [] else omit }}" - replace_all_instances: "{{ omit if openshift_aws_node_group_replace_instances != [] else (item.value.replace_all_instances | default(omit)) }}" + replace_all_instances: "{{ omit if openshift_aws_node_group_replace_instances != [] + else (l_node_group_config[openshift_aws_node_group.group].replace_all_instances | default(omit)) }}" tags: - - "{{ openshift_aws_node_group_config_tags | combine(item.value.tags) }}" - with_dict: "{{ l_nodes_to_build }}" + - "{{ openshift_aws_node_group_config_tags + | combine(openshift_aws_node_group.tags) + | combine({'deployment_serial': l_deployment_serial, 'ami': openshift_aws_ami_map[openshift_aws_node_group.group] | default(openshift_aws_ami)}) }}" + +- name: append the asg name to the openshift_aws_created_asgs fact + set_fact: + openshift_aws_created_asgs: "{{ [l_node_group_name] | union(openshift_aws_created_asgs) | list }}" diff --git a/roles/openshift_aws/tasks/seal_ami.yml b/roles/openshift_aws/tasks/seal_ami.yml index 7a3d0fb68..74877d5c7 100644 --- a/roles/openshift_aws/tasks/seal_ami.yml +++ b/roles/openshift_aws/tasks/seal_ami.yml @@ -1,6 +1,6 @@ --- - name: fetch newly created instances - ec2_remote_facts: + ec2_instance_facts: region: "{{ openshift_aws_region }}" filters: "tag:Name": "{{ openshift_aws_base_ami_name }}" @@ -12,7 +12,7 @@ - name: bundle ami ec2_ami: - instance_id: "{{ instancesout.instances.0.id }}" + instance_id: "{{ instancesout.instances.0.instance_id }}" region: "{{ openshift_aws_region }}" state: present description: "This was provisioned {{ ansible_date_time.iso8601 }}" @@ -46,4 +46,4 @@ ec2: state: absent region: "{{ openshift_aws_region }}" - instance_ids: "{{ instancesout.instances.0.id }}" + instance_ids: "{{ instancesout.instances.0.instance_id }}" diff --git a/roles/openshift_aws/tasks/security_group.yml b/roles/openshift_aws/tasks/security_group.yml index 43834079e..0568a0190 100644 --- a/roles/openshift_aws/tasks/security_group.yml +++ b/roles/openshift_aws/tasks/security_group.yml @@ -6,11 +6,27 @@ "tag:Name": "{{ openshift_aws_clusterid }}" register: vpcout -- include_tasks: security_group_create.yml - vars: - l_security_groups: "{{ openshift_aws_node_security_groups }}" +- name: create the node group sgs + oo_ec2_group: + name: "{{ item.value.name}}" + description: "{{ item.value.desc }}" + rules: "{{ item.value.rules if 'rules' in item.value else [] }}" + region: "{{ openshift_aws_region }}" + vpc_id: "{{ vpcout.vpcs[0].id }}" + with_dict: "{{ openshift_aws_node_security_groups }}" + +- name: create the k8s sgs for the node group + oo_ec2_group: + name: "{{ item.value.name }}_k8s" + description: "{{ item.value.desc }} for k8s" + region: "{{ openshift_aws_region }}" + vpc_id: "{{ vpcout.vpcs[0].id }}" + with_dict: "{{ openshift_aws_node_security_groups }}" + register: k8s_sg_create -- include_tasks: security_group_create.yml - when: openshift_aws_node_security_groups_extra is defined - vars: - l_security_groups: "{{ openshift_aws_node_security_groups_extra | default({}) }}" +- name: tag sg groups with proper tags + ec2_tag: + tags: "{{ openshift_aws_security_groups_tags }}" + resource: "{{ item.group_id }}" + region: "{{ openshift_aws_region }}" + with_items: "{{ k8s_sg_create.results }}" diff --git a/roles/openshift_aws/tasks/security_group_create.yml b/roles/openshift_aws/tasks/security_group_create.yml deleted file mode 100644 index ef6060555..000000000 --- a/roles/openshift_aws/tasks/security_group_create.yml +++ /dev/null @@ -1,25 +0,0 @@ ---- -- name: create the node group sgs - ec2_group: - name: "{{ item.value.name}}" - description: "{{ item.value.desc }}" - rules: "{{ item.value.rules if 'rules' in item.value else [] }}" - region: "{{ openshift_aws_region }}" - vpc_id: "{{ vpcout.vpcs[0].id }}" - with_dict: "{{ l_security_groups }}" - -- name: create the k8s sgs for the node group - ec2_group: - name: "{{ item.value.name }}_k8s" - description: "{{ item.value.desc }} for k8s" - region: "{{ openshift_aws_region }}" - vpc_id: "{{ vpcout.vpcs[0].id }}" - with_dict: "{{ l_security_groups }}" - register: k8s_sg_create - -- name: tag sg groups with proper tags - ec2_tag: - tags: "{{ openshift_aws_security_groups_tags }}" - resource: "{{ item.group_id }}" - region: "{{ openshift_aws_region }}" - with_items: "{{ k8s_sg_create.results }}" diff --git a/roles/openshift_aws/tasks/setup_master_group.yml b/roles/openshift_aws/tasks/setup_master_group.yml index 05b68f460..700917ef4 100644 --- a/roles/openshift_aws/tasks/setup_master_group.yml +++ b/roles/openshift_aws/tasks/setup_master_group.yml @@ -8,7 +8,7 @@ msg: "openshift_aws_region={{ openshift_aws_region }}" - name: fetch newly created instances - ec2_remote_facts: + ec2_instance_facts: region: "{{ openshift_aws_region }}" filters: "tag:clusterid": "{{ openshift_aws_clusterid }}" @@ -19,11 +19,13 @@ delay: 3 until: instancesout.instances|length > 0 +- debug: var=instancesout + - name: add new master to masters group add_host: groups: "{{ openshift_aws_masters_groups }}" name: "{{ item.public_dns_name }}" - hostname: "{{ openshift_aws_clusterid }}-master-{{ item.id[:-5] }}" + hostname: "{{ openshift_aws_clusterid }}-master-{{ item.instance_id[:-5] }}" with_items: "{{ instancesout.instances }}" - name: wait for ssh to become available diff --git a/roles/openshift_aws/tasks/setup_scale_group_facts.yml b/roles/openshift_aws/tasks/setup_scale_group_facts.yml index d65fdc2de..14c5246c9 100644 --- a/roles/openshift_aws/tasks/setup_scale_group_facts.yml +++ b/roles/openshift_aws/tasks/setup_scale_group_facts.yml @@ -1,11 +1,15 @@ --- -- name: group scale group nodes - ec2_remote_facts: +- name: fetch all created instances + ec2_instance_facts: region: "{{ openshift_aws_region }}" filters: - "{{ {'tag:kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid }}}" + "{{ {'tag:kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid, + 'instance-state-name': 'running'} }}" register: qinstances +# The building of new and current groups is dependent of having a list of the current asgs and the created ones +# that can be found in the variables: openshift_aws_created_asgs, openshift_aws_current_asgs. If these do not exist, we cannot determine which hosts are +# new and which hosts are current. - name: Build new node group add_host: groups: oo_sg_new_nodes @@ -13,10 +17,16 @@ name: "{{ item.public_dns_name }}" hostname: "{{ item.public_dns_name }}" when: - - (item.tags.version | default(False)) == openshift_aws_new_version + - openshift_aws_created_asgs != [] + - "'aws:autoscaling:groupName' in item.tags" + - item.tags['aws:autoscaling:groupName'] in openshift_aws_created_asgs - "'node' in item.tags['host-type']" with_items: "{{ qinstances.instances }}" +- name: dump openshift_aws_current_asgs + debug: + msg: "{{ openshift_aws_current_asgs }}" + - name: Build current node group add_host: groups: oo_sg_current_nodes @@ -24,7 +34,9 @@ name: "{{ item.public_dns_name }}" hostname: "{{ item.public_dns_name }}" when: - - (item.tags.version | default('')) == openshift_aws_current_version + - openshift_aws_current_asgs != [] + - "'aws:autoscaling:groupName' in item.tags" + - item.tags['aws:autoscaling:groupName'] in openshift_aws_current_asgs - "'node' in item.tags['host-type']" with_items: "{{ qinstances.instances }}" diff --git a/roles/openshift_aws/tasks/uninstall_security_group.yml b/roles/openshift_aws/tasks/uninstall_security_group.yml new file mode 100644 index 000000000..55d40e8ec --- /dev/null +++ b/roles/openshift_aws/tasks/uninstall_security_group.yml @@ -0,0 +1,14 @@ +--- +- name: delete the node group sgs + oo_ec2_group: + state: absent + name: "{{ item.value.name}}" + region: "{{ openshift_aws_region }}" + with_dict: "{{ openshift_aws_node_security_groups }}" + +- name: delete the k8s sgs for the node group + oo_ec2_group: + state: absent + name: "{{ item.value.name }}_k8s" + region: "{{ openshift_aws_region }}" + with_dict: "{{ openshift_aws_node_security_groups }}" diff --git a/roles/openshift_aws/tasks/uninstall_ssh_keys.yml b/roles/openshift_aws/tasks/uninstall_ssh_keys.yml new file mode 100644 index 000000000..27e42da53 --- /dev/null +++ b/roles/openshift_aws/tasks/uninstall_ssh_keys.yml @@ -0,0 +1,9 @@ +--- +- name: Remove the public keys for the user(s) + ec2_key: + state: absent + name: "{{ item.key_name }}" + region: "{{ openshift_aws_region }}" + with_items: "{{ openshift_aws_users }}" + no_log: True + when: openshift_aws_enable_uninstall_shared_objects | bool diff --git a/roles/openshift_aws/tasks/uninstall_vpc.yml b/roles/openshift_aws/tasks/uninstall_vpc.yml new file mode 100644 index 000000000..ecf39f694 --- /dev/null +++ b/roles/openshift_aws/tasks/uninstall_vpc.yml @@ -0,0 +1,36 @@ +--- +- name: Fetch the VPC for the vpc.id + ec2_vpc_net_facts: + region: "{{ openshift_aws_region }}" + filters: + "tag:Name": "{{ openshift_aws_clusterid }}" + register: vpcout +- debug: + var: vpcout + verbosity: 1 + +- when: vpcout.vpcs | length > 0 + block: + - name: delete the vpc igw + ec2_vpc_igw: + state: absent + region: "{{ openshift_aws_region }}" + vpc_id: "{{ vpcout.vpcs[0].id }}" + register: igw + + - name: delete the vpc subnets + ec2_vpc_subnet: + state: absent + region: "{{ openshift_aws_region }}" + vpc_id: "{{ vpcout.vpcs[0].id }}" + cidr: "{{ item.cidr }}" + az: "{{ item.az }}" + with_items: "{{ openshift_aws_vpc.subnets[openshift_aws_region] }}" + + - name: Delete AWS VPC + ec2_vpc_net: + state: absent + region: "{{ openshift_aws_region }}" + name: "{{ openshift_aws_clusterid }}" + cidr_block: "{{ openshift_aws_vpc.cidr }}" + register: vpc diff --git a/roles/openshift_aws/tasks/upgrade_node_group.yml b/roles/openshift_aws/tasks/upgrade_node_group.yml index c3f86f523..4f4730dd6 100644 --- a/roles/openshift_aws/tasks/upgrade_node_group.yml +++ b/roles/openshift_aws/tasks/upgrade_node_group.yml @@ -1,12 +1,22 @@ --- -- fail: - msg: 'Please ensure the current_version and new_version variables are not the same.' +- include_tasks: provision_nodes.yml + vars: + openshift_aws_node_group_upgrade: True when: - - openshift_aws_current_version == openshift_aws_new_version + - openshift_aws_upgrade_provision_nodes | default(True) -- include_tasks: provision_nodes.yml +- debug: var=openshift_aws_current_asgs +- debug: var=openshift_aws_created_asgs + +- name: fail if asg variables aren't set + fail: + msg: "Please ensure that openshift_aws_created_asgs and openshift_aws_current_asgs are defined." + when: + - openshift_aws_created_asgs == [] + - openshift_aws_current_asgs == [] - include_tasks: accept_nodes.yml + when: openshift_aws_upgrade_accept_nodes | default(True) - include_tasks: setup_scale_group_facts.yml diff --git a/roles/openshift_aws/tasks/wait_for_groups.yml b/roles/openshift_aws/tasks/wait_for_groups.yml index 9f1a68a2a..3ad876e37 100644 --- a/roles/openshift_aws/tasks/wait_for_groups.yml +++ b/roles/openshift_aws/tasks/wait_for_groups.yml @@ -1,31 +1,42 @@ --- # The idea here is to wait until all scale groups are at # their desired capacity before continuing. -- name: fetch the scale groups +# This is accomplished with a custom filter_plugin and until clause +- name: "fetch the scale groups" ec2_asg_facts: region: "{{ openshift_aws_region }}" tags: - "{{ {'kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid} }}" + "{{ {'kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid } }}" register: qasg - until: qasg.results | scale_groups_match_capacity | bool + # scale_groups_match_capacity is a custom filter in role lib_utils + until: qasg | json_query('results[*]') | scale_groups_match_capacity | bool delay: 10 retries: 60 +- debug: var=openshift_aws_created_asgs + +# how do we gaurantee the instances are up? - name: fetch newly created instances - ec2_remote_facts: + ec2_instance_facts: region: "{{ openshift_aws_region }}" filters: "{{ {'tag:kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid, - 'tag:version': openshift_aws_new_version} }}" + 'tag:aws:autoscaling:groupName': item, + 'instance-state-name': 'running'} }}" + with_items: "{{ openshift_aws_created_asgs if openshift_aws_created_asgs != [] else qasg | sum(attribute='results', start=[]) }}" register: instancesout until: instancesout.instances|length > 0 delay: 5 retries: 60 +- name: dump instances + debug: + msg: "{{ instancesout.results | sum(attribute='instances', start=[]) }}" + - name: wait for ssh to become available wait_for: port: 22 host: "{{ item.public_ip_address }}" timeout: 300 search_regex: OpenSSH - with_items: "{{ instancesout.instances }}" + with_items: "{{ instancesout.results | sum(attribute='instances', start=[]) }}" diff --git a/roles/openshift_aws/templates/user_data.j2 b/roles/openshift_aws/templates/user_data.j2 index fe0fe83d4..bda1334cd 100644 --- a/roles/openshift_aws/templates/user_data.j2 +++ b/roles/openshift_aws/templates/user_data.j2 @@ -7,8 +7,8 @@ write_files: owner: 'root:root' permissions: '0640' content: | - openshift_group_type: {{ launch_config_item.key }} -{% if launch_config_item.key != 'master' %} + openshift_group_type: {{ openshift_aws_node_group.group }} +{% if openshift_aws_node_group.group != 'master' %} - path: /etc/origin/node/bootstrap.kubeconfig owner: 'root:root' permissions: '0640' @@ -19,7 +19,7 @@ runcmd: {% if openshift_aws_node_run_bootstrap_startup %} - [ ansible-playbook, /root/openshift_bootstrap/bootstrap.yml] {% endif %} -{% if launch_config_item.key != 'master' %} +{% if openshift_aws_node_group.group != 'master' %} - [ systemctl, restart, NetworkManager] - [ systemctl, enable, {% if openshift_deployment_type == 'openshift-enterprise' %}atomic-openshift{% else %}origin{% endif %}-node] - [ systemctl, start, {% if openshift_deployment_type == 'openshift-enterprise' %}atomic-openshift{% else %}origin{% endif %}-node] diff --git a/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-policy.yaml b/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-policy.yaml new file mode 100644 index 000000000..90ee40943 --- /dev/null +++ b/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-policy.yaml @@ -0,0 +1,10 @@ +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: bootstrap-autoapprover +roleRef: + kind: ClusterRole + name: system:node-bootstrap-autoapprover +subjects: +- kind: User + name: system:serviceaccount:openshift-infra:bootstrap-autoapprover diff --git a/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-role.yaml b/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-role.yaml new file mode 100644 index 000000000..d8143d047 --- /dev/null +++ b/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-role.yaml @@ -0,0 +1,21 @@ +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: system:node-bootstrap-autoapprover +rules: +- apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests + verbs: + - delete + - get + - list + - watch +- apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests/approval + verbs: + - create + - update diff --git a/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-serviceaccount.yaml b/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-serviceaccount.yaml new file mode 100644 index 000000000..e22ce6f34 --- /dev/null +++ b/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-serviceaccount.yaml @@ -0,0 +1,5 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + name: bootstrap-autoapprover + namespace: openshift-infra diff --git a/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller.yaml b/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller.yaml new file mode 100644 index 000000000..dbcedb407 --- /dev/null +++ b/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller.yaml @@ -0,0 +1,68 @@ +kind: StatefulSet +apiVersion: apps/v1beta1 +metadata: + name: bootstrap-autoapprover + namespace: openshift-infra +spec: + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + app: bootstrap-autoapprover + spec: + serviceAccountName: bootstrap-autoapprover + terminationGracePeriodSeconds: 1 + containers: + - name: signer + image: openshift/node:v3.7.0-rc.0 + command: + - /bin/bash + - -c + args: + - | + #!/bin/bash + set -o errexit + set -o nounset + set -o pipefail + + unset KUBECONFIG + cat <<SCRIPT > /tmp/signer + #!/bin/bash + # + # It will approve any CSR that is not approved yet, and delete any CSR that expired more than 60 seconds + # ago. + # + + set -o errexit + set -o nounset + set -o pipefail + + name=\${1} + condition=\${2} + certificate=\${3} + username=\${4} + + # auto approve + if [[ -z "\${condition}" && ("\${username}" == "system:serviceaccount:openshift-infra:node-bootstrapper" || "\${username}" == "system:node:"* ) ]]; then + oc adm certificate approve "\${name}" + exit 0 + fi + + # check certificate age + if [[ -n "\${certificate}" ]]; then + text="\$( echo "\${certificate}" | base64 -d - )" + if ! echo "\${text}" | openssl x509 -noout; then + echo "error: Unable to parse certificate" 2>&1 + exit 1 + fi + if ! echo "\${text}" | openssl x509 -checkend -60 > /dev/null; then + echo "Certificate is expired, deleting" + oc delete csr "\${name}" + fi + exit 0 + fi + SCRIPT + chmod u+x /tmp/signer + + exec oc observe csr --maximum-errors=1 --resync-period=10m -a '{.status.conditions[*].type}' -a '{.status.certificate}' -a '{.spec.username}' -- /tmp/signer diff --git a/roles/openshift_bootstrap_autoapprover/tasks/main.yml b/roles/openshift_bootstrap_autoapprover/tasks/main.yml new file mode 100644 index 000000000..88e9d08e7 --- /dev/null +++ b/roles/openshift_bootstrap_autoapprover/tasks/main.yml @@ -0,0 +1,28 @@ +--- +- name: Copy auto-approver config to host + run_once: true + copy: + src: "{{ item }}" + dest: /tmp/openshift-approver/ + owner: root + mode: 0400 + with_fileglob: + - "*.yaml" + +- name: Set auto-approver nodeSelector + run_once: true + yedit: + src: "/tmp/openshift-approver/openshift-bootstrap-controller.yaml" + key: spec.template.spec.nodeSelector + value: "{{ openshift_master_bootstrap_auto_approver_node_selector | default({}) }}" + value_type: list + +- name: Create auto-approver on cluster + run_once: true + command: oc apply -f /tmp/openshift-approver/ + +- name: Remove auto-approver config + run_once: true + file: + path: /tmp/openshift-approver/ + state: absent diff --git a/roles/openshift_builddefaults/meta/main.yml b/roles/openshift_builddefaults/meta/main.yml index 422d08400..60ac189a8 100644 --- a/roles/openshift_builddefaults/meta/main.yml +++ b/roles/openshift_builddefaults/meta/main.yml @@ -13,3 +13,4 @@ galaxy_info: - cloud dependencies: - role: openshift_facts +- role: lib_utils diff --git a/roles/openshift_buildoverrides/meta/main.yml b/roles/openshift_buildoverrides/meta/main.yml index e9d2e8712..edca92e6f 100644 --- a/roles/openshift_buildoverrides/meta/main.yml +++ b/roles/openshift_buildoverrides/meta/main.yml @@ -13,3 +13,4 @@ galaxy_info: - cloud dependencies: - role: openshift_facts +- role: lib_utils diff --git a/roles/openshift_buildoverrides/vars/main.yml b/roles/openshift_buildoverrides/vars/main.yml index cf49a6ebf..df53280c8 100644 --- a/roles/openshift_buildoverrides/vars/main.yml +++ b/roles/openshift_buildoverrides/vars/main.yml @@ -9,3 +9,4 @@ buildoverrides_yaml: imageLabels: "{{ openshift_buildoverrides_image_labels | default(None) }}" nodeSelector: "{{ openshift_buildoverrides_nodeselectors | default(None) }}" annotations: "{{ openshift_buildoverrides_annotations | default(None) }}" + tolerations: "{{ openshift_buildoverrides_tolerations | default(None) }}" diff --git a/roles/openshift_ca/meta/main.yml b/roles/openshift_ca/meta/main.yml index f8b784a63..b2081efc6 100644 --- a/roles/openshift_ca/meta/main.yml +++ b/roles/openshift_ca/meta/main.yml @@ -14,3 +14,5 @@ galaxy_info: - system dependencies: - role: openshift_cli +- role: openshift_facts +- role: lib_utils diff --git a/roles/openshift_ca/tasks/main.yml b/roles/openshift_ca/tasks/main.yml index eb00f13db..9c8534c74 100644 --- a/roles/openshift_ca/tasks/main.yml +++ b/roles/openshift_ca/tasks/main.yml @@ -9,17 +9,18 @@ - name: Install the base package for admin tooling package: - name: "{{ openshift_service_type }}{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}" + name: "{{ openshift_service_type }}{{ openshift_pkg_version | default('') | lib_utils_oo_image_tag_to_rpm_version(include_dash=True) }}" state: present - when: not openshift.common.is_containerized | bool + when: not openshift_is_containerized | bool register: install_result - until: install_result | success + until: install_result is succeeded delegate_to: "{{ openshift_ca_host }}" run_once: true - name: Reload generated facts openshift_facts: - when: hostvars[openshift_ca_host].install_result | changed + when: + - hostvars[openshift_ca_host].install_result | default({'changed':false}) is changed - name: Create openshift_ca_config_dir if it does not exist file: @@ -41,7 +42,7 @@ - set_fact: master_ca_missing: "{{ False in (g_master_ca_stat_result.results - | oo_collect(attribute='stat.exists') + | lib_utils_oo_collect(attribute='stat.exists') | list) }}" run_once: true @@ -87,11 +88,11 @@ # This should NOT replace the CA due to --overwrite=false when a CA already exists. - name: Create the master certificates if they do not already exist command: > - {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm ca create-master-certs - {% for named_ca_certificate in openshift.master.named_certificates | default([]) | oo_collect('cafile') %} + {{ hostvars[openshift_ca_host]['first_master_client_binary'] }} adm ca create-master-certs + {% for named_ca_certificate in openshift.master.named_certificates | default([]) | lib_utils_oo_collect('cafile') %} --certificate-authority {{ named_ca_certificate }} {% endfor %} - {% for legacy_ca_certificate in g_master_legacy_ca_result.files | default([]) | oo_collect('path') %} + {% for legacy_ca_certificate in g_master_legacy_ca_result.files | default([]) | lib_utils_oo_collect('path') %} --certificate-authority {{ legacy_ca_certificate }} {% endfor %} --hostnames={{ hostvars[openshift_ca_host].openshift.common.all_hostnames | join(',') }} @@ -117,7 +118,7 @@ src: "{{ item }}" dest: "{{ openshift_ca_clientconfig_tmpdir.stdout }}/" remote_src: true - with_items: "{{ g_master_legacy_ca_result.files | default([]) | oo_collect('path') }}" + with_items: "{{ g_master_legacy_ca_result.files | default([]) | lib_utils_oo_collect('path') }}" delegate_to: "{{ openshift_ca_host }}" run_once: true - copy: @@ -137,7 +138,7 @@ - name: Test local loopback context command: > - {{ hostvars[openshift_ca_host].openshift.common.client_binary }} config view + {{ hostvars[openshift_ca_host]['first_master_client_binary'] }} config view --config={{ openshift_master_loopback_config }} changed_when: false register: loopback_config @@ -154,9 +155,9 @@ register: openshift_ca_loopback_tmpdir - name: Generate the loopback master client config command: > - {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm create-api-client-config + {{ hostvars[openshift_ca_host]['first_master_client_binary'] }} adm create-api-client-config --certificate-authority={{ openshift_ca_cert }} - {% for named_ca_certificate in openshift.master.named_certificates | default([]) | oo_collect('cafile') %} + {% for named_ca_certificate in openshift.master.named_certificates | default([]) | lib_utils_oo_collect('cafile') %} --certificate-authority {{ named_ca_certificate }} {% endfor %} --client-dir={{ openshift_ca_loopback_tmpdir.stdout }} diff --git a/roles/openshift_certificate_expiry/meta/main.yml b/roles/openshift_certificate_expiry/meta/main.yml index c13b29ba5..6758f5b36 100644 --- a/roles/openshift_certificate_expiry/meta/main.yml +++ b/roles/openshift_certificate_expiry/meta/main.yml @@ -13,4 +13,5 @@ galaxy_info: categories: - cloud - system -dependencies: [] +dependencies: +- role: lib_utils diff --git a/roles/openshift_certificate_expiry/tasks/main.yml b/roles/openshift_certificate_expiry/tasks/main.yml index b5234bd1e..7062b5060 100644 --- a/roles/openshift_certificate_expiry/tasks/main.yml +++ b/roles/openshift_certificate_expiry/tasks/main.yml @@ -7,7 +7,6 @@ register: check_results - name: Generate expiration report HTML - become: no run_once: yes template: src: cert-expiry-table.html.j2 @@ -17,11 +16,12 @@ - name: Generate the result JSON string run_once: yes - set_fact: json_result_string="{{ hostvars|oo_cert_expiry_results_to_json(play_hosts) }}" + set_fact: + # oo_cert_expiry_results_to_json is a custom filter in role lib_utils + json_result_string: "{{ hostvars|oo_cert_expiry_results_to_json(play_hosts) }}" when: openshift_certificate_expiry_save_json_results|bool - name: Generate results JSON file - become: no run_once: yes template: src: save_json_results.j2 diff --git a/roles/openshift_cli/defaults/main.yml b/roles/openshift_cli/defaults/main.yml index 631a0455e..9faec639f 100644 --- a/roles/openshift_cli/defaults/main.yml +++ b/roles/openshift_cli/defaults/main.yml @@ -8,4 +8,4 @@ system_images_registry: "{{ system_images_registry_dict[openshift_deployment_typ openshift_use_crio_only: False l_is_system_container_image: "{{ openshift_use_master_system_container | default(openshift_use_system_containers | default(False)) | bool }}" -l_use_cli_atomic_image: "{{ openshift_use_crio_only or l_is_system_container_image }}" +l_use_cli_atomic_image: "{{ (openshift_use_crio_only | bool) or (l_is_system_container_image | bool) }}" diff --git a/roles/openshift_cli/meta/main.yml b/roles/openshift_cli/meta/main.yml index 5d2b6abed..e531543b9 100644 --- a/roles/openshift_cli/meta/main.yml +++ b/roles/openshift_cli/meta/main.yml @@ -13,3 +13,4 @@ galaxy_info: - cloud dependencies: - role: openshift_facts +- role: lib_utils diff --git a/roles/openshift_cli/tasks/main.yml b/roles/openshift_cli/tasks/main.yml index a90143aa3..ae8d1ace0 100644 --- a/roles/openshift_cli/tasks/main.yml +++ b/roles/openshift_cli/tasks/main.yml @@ -1,9 +1,9 @@ --- - name: Install clients - package: name={{ openshift_service_type }}-clients state=present - when: not openshift.common.is_containerized | bool + package: name={{ openshift_service_type }}-clients{{ openshift_pkg_version | default('') }} state=present + when: not openshift_is_containerized | bool register: result - until: result | success + until: result is succeeded - block: - name: Pull CLI Image @@ -12,13 +12,14 @@ register: pull_result changed_when: "'Downloaded newer image' in pull_result.stdout" + # openshift_container_binary_sync is a custom module in lib_utils - name: Copy client binaries/symlinks out of CLI image for use on the host openshift_container_binary_sync: image: "{{ openshift_cli_image }}" tag: "{{ openshift_image_tag }}" backend: "docker" when: - - openshift.common.is_containerized | bool + - openshift_is_containerized | bool - not l_use_cli_atomic_image | bool - block: @@ -28,13 +29,14 @@ register: pull_result changed_when: "'Pulling layer' in pull_result.stdout" + # openshift_container_binary_sync is a custom module in lib_utils - name: Copy client binaries/symlinks out of CLI image for use on the host openshift_container_binary_sync: image: "{{ '' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift_cli_image }}" tag: "{{ openshift_image_tag }}" backend: "atomic" when: - - openshift.common.is_containerized | bool + - openshift_is_containerized | bool - l_use_cli_atomic_image | bool - name: Reload facts to pick up installed OpenShift version @@ -42,6 +44,6 @@ - name: Install bash completion for oc tools package: name=bash-completion state=present - when: not openshift.common.is_containerized | bool + when: not openshift_is_containerized | bool register: result - until: result | success + until: result is succeeded diff --git a/roles/openshift_clock/tasks/main.yaml b/roles/openshift_clock/tasks/main.yaml index 82c73b583..cdacdd042 100644 --- a/roles/openshift_clock/tasks/main.yaml +++ b/roles/openshift_clock/tasks/main.yaml @@ -10,7 +10,7 @@ - openshift_clock_enabled | bool - chrony_installed.rc != 0 register: result - until: result | success + until: result is succeeded - name: Start and enable ntpd/chronyd command: timedatectl set-ntp true diff --git a/roles/openshift_cloud_provider/meta/main.yml b/roles/openshift_cloud_provider/meta/main.yml index 8ab95bf5a..e49cc4430 100644 --- a/roles/openshift_cloud_provider/meta/main.yml +++ b/roles/openshift_cloud_provider/meta/main.yml @@ -13,3 +13,4 @@ galaxy_info: - cloud dependencies: - role: openshift_facts +- role: lib_utils diff --git a/roles/openshift_cloud_provider/tasks/gce.yml b/roles/openshift_cloud_provider/tasks/gce.yml index ee4048911..395bd304c 100644 --- a/roles/openshift_cloud_provider/tasks/gce.yml +++ b/roles/openshift_cloud_provider/tasks/gce.yml @@ -13,5 +13,11 @@ ini_file: dest: "{{ openshift.common.config_base }}/cloudprovider/gce.conf" section: Global - option: multizone - value: "true" + option: "{{ item.key }}" + value: "{{ item.value }}" + with_items: + - { key: 'project-id', value: '{{ openshift_gcp_project }}' } + - { key: 'network-name', value: '{{ openshift_gcp_network_name }}' } + - { key: 'node-tags', value: '{{ openshift_gcp_prefix }}ocp' } + - { key: 'node-instance-prefix', value: '{{ openshift_gcp_prefix }}' } + - { key: 'multizone', value: 'false' } diff --git a/roles/openshift_cloud_provider/tasks/main.yml b/roles/openshift_cloud_provider/tasks/main.yml index dff492a69..3513577fa 100644 --- a/roles/openshift_cloud_provider/tasks/main.yml +++ b/roles/openshift_cloud_provider/tasks/main.yml @@ -19,3 +19,6 @@ - include_tasks: gce.yml when: cloudprovider_is_gce | bool + +- include_tasks: vsphere.yml + when: cloudprovider_is_vsphere | bool diff --git a/roles/openshift_cloud_provider/tasks/vsphere.yml b/roles/openshift_cloud_provider/tasks/vsphere.yml new file mode 100644 index 000000000..3a33df241 --- /dev/null +++ b/roles/openshift_cloud_provider/tasks/vsphere.yml @@ -0,0 +1,6 @@ +--- +- name: Create cloud config + template: + dest: "{{ openshift.common.config_base }}/cloudprovider/vsphere.conf" + src: vsphere.conf.j2 + when: openshift_cloudprovider_vsphere_username is defined and openshift_cloudprovider_vsphere_password is defined and openshift_cloudprovider_vsphere_host is defined and openshift_cloudprovider_vsphere_datacenter is defined and openshift_cloudprovider_vsphere_datastore is defined diff --git a/roles/openshift_cloud_provider/templates/openstack.conf.j2 b/roles/openshift_cloud_provider/templates/openstack.conf.j2 index 313ee02b4..30f18ffa9 100644 --- a/roles/openshift_cloud_provider/templates/openstack.conf.j2 +++ b/roles/openshift_cloud_provider/templates/openstack.conf.j2 @@ -19,3 +19,7 @@ region = {{ openshift_cloudprovider_openstack_region }} [LoadBalancer] subnet-id = {{ openshift_cloudprovider_openstack_lb_subnet_id }} {% endif %} +{% if openshift_cloudprovider_openstack_blockstorage_version is defined %} +[BlockStorage] +bs-version={{ openshift_cloudprovider_openstack_blockstorage_version }} +{% endif %}
\ No newline at end of file diff --git a/roles/openshift_cloud_provider/templates/vsphere.conf.j2 b/roles/openshift_cloud_provider/templates/vsphere.conf.j2 new file mode 100644 index 000000000..84e5e371c --- /dev/null +++ b/roles/openshift_cloud_provider/templates/vsphere.conf.j2 @@ -0,0 +1,15 @@ +[Global] +user = "{{ openshift_cloudprovider_vsphere_username }}" +password = "{{ openshift_cloudprovider_vsphere_password }}" +server = "{{ openshift_cloudprovider_vsphere_host }}" +port = 443 +insecure-flag = 1 +datacenter = {{ openshift_cloudprovider_vsphere_datacenter }} +datastore = {{ openshift_cloudprovider_vsphere_datastore }} +{% if openshift_cloudprovider_vsphere_folder is defined %} +working-dir = /{{ openshift_cloudprovider_vsphere_datacenter }}/vm/{{ openshift_cloudprovider_vsphere_folder }}/ +{% else %} +working-dir = /{{ openshift_cloudprovider_vsphere_datacenter }}/vm/ +{% endif %} +[Disk] +scsicontrollertype = pvscsi diff --git a/roles/openshift_cloud_provider/vars/main.yml b/roles/openshift_cloud_provider/vars/main.yml index c9d953f58..e71db80b9 100644 --- a/roles/openshift_cloud_provider/vars/main.yml +++ b/roles/openshift_cloud_provider/vars/main.yml @@ -3,3 +3,4 @@ has_cloudprovider: "{{ openshift_cloudprovider_kind | default(None) != None }}" cloudprovider_is_aws: "{{ has_cloudprovider | bool and openshift_cloudprovider_kind == 'aws' }}" cloudprovider_is_openstack: "{{ has_cloudprovider | bool and openshift_cloudprovider_kind == 'openstack' }}" cloudprovider_is_gce: "{{ has_cloudprovider | bool and openshift_cloudprovider_kind == 'gce' }}" +cloudprovider_is_vsphere: "{{ has_cloudprovider | bool and openshift_cloudprovider_kind == 'vsphere' }}" diff --git a/roles/openshift_cluster_autoscaler/README.md b/roles/openshift_cluster_autoscaler/README.md index d775a8a71..137ae0cef 100644 --- a/roles/openshift_cluster_autoscaler/README.md +++ b/roles/openshift_cluster_autoscaler/README.md @@ -28,7 +28,7 @@ Example Playbook remote_user: root tasks: - name: include role autoscaler - include_role: + import_role: name: openshift_cluster_autoscaler vars: openshift_clusterid: opstest diff --git a/roles/openshift_cluster_autoscaler/meta/main.yml b/roles/openshift_cluster_autoscaler/meta/main.yml index d2bbd2576..543eb6fed 100644 --- a/roles/openshift_cluster_autoscaler/meta/main.yml +++ b/roles/openshift_cluster_autoscaler/meta/main.yml @@ -1,3 +1,4 @@ --- dependencies: - lib_openshift +- role: lib_utils diff --git a/roles/openshift_daemonset_config/defaults/main.yml b/roles/openshift_daemonset_config/defaults/main.yml new file mode 100644 index 000000000..ebe5671d2 --- /dev/null +++ b/roles/openshift_daemonset_config/defaults/main.yml @@ -0,0 +1,19 @@ +--- +openshift_daemonset_config_namespace: openshift-node +openshift_daemonset_config_daemonset_name: ops-node-config +openshift_daemonset_config_configmap_name: "{{ openshift_daemonset_config_daemonset_name }}" +openshift_daemonset_config_node_selector: + config: config +openshift_daemonset_config_sa_name: ops +openshift_daemonset_config_configmap_files: {} +openshift_daemonset_config_configmap_literals: {} +openshift_daemonset_config_monitoring: False +openshift_daemonset_config_interval: 300 +openshift_daemonset_config_script: config.sh +openshift_daemonset_config_secret_name: operations-config-secret +openshift_daemonset_config_secrets: {} +openshift_daemonset_config_runasuser: 0 +openshift_daemonset_config_privileged: True +openshift_daemonset_config_resources: + cpu: 10m + memory: 10Mi diff --git a/roles/openshift_daemonset_config/meta/main.yml b/roles/openshift_daemonset_config/meta/main.yml new file mode 100644 index 000000000..d2bbd2576 --- /dev/null +++ b/roles/openshift_daemonset_config/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: +- lib_openshift diff --git a/roles/openshift_daemonset_config/tasks/main.yml b/roles/openshift_daemonset_config/tasks/main.yml new file mode 100644 index 000000000..450cc9dca --- /dev/null +++ b/roles/openshift_daemonset_config/tasks/main.yml @@ -0,0 +1,58 @@ +--- +- name: add a sa + oc_serviceaccount: + name: "{{ openshift_daemonset_config_sa_name }}" + namespace: "{{ openshift_daemonset_config_namespace }}" + +- name: add sa to privileged scc + oc_adm_policy_user: + namespace: "{{ openshift_daemonset_config_namespace }}" + resource_kind: scc + resource_name: privileged + state: present + user: "system:serviceaccount:{{ openshift_daemonset_config_namespace }}:{{ openshift_daemonset_config_sa_name }}" + +- name: copy template to disk + template: + dest: "/tmp/{{ item.name }}" + src: "{{ item.name }}.j2" + with_items: + - name: daemonset.yml + +- name: copy files to disk + copy: + src: "{{ item.key }}" + dest: "{{ item.value }}" + with_dict: "{{ openshift_daemonset_config_configmap_files }}" + +- name: create the namespace + oc_project: + state: present + name: "{{ openshift_daemonset_config_namespace }}" + +- name: lay down secrets + oc_secret: + state: present + name: "{{ openshift_daemonset_config_secret_name }}" + namespace: "{{ openshift_daemonset_config_namespace }}" + delete_after: true + contents: "{{ openshift_daemonset_config_secrets }}" + when: + - openshift_daemonset_config_secrets != {} + +- name: create the configmap + oc_configmap: + state: present + name: "{{ openshift_daemonset_config_configmap_name }}" + namespace: "{{ openshift_daemonset_config_namespace }}" + from_literal: "{{ openshift_daemonset_config_configmap_literals }}" + from_file: "{{ openshift_daemonset_config_configmap_files }}" + +- name: deploy daemonset + oc_obj: + state: present + namespace: "{{ openshift_daemonset_config_namespace }}" # openshift-node?? + name: "{{ openshift_daemonset_config_daemonset_name }}" + kind: daemonset + files: + - /tmp/daemonset.yml diff --git a/roles/openshift_daemonset_config/templates/daemonset.yml.j2 b/roles/openshift_daemonset_config/templates/daemonset.yml.j2 new file mode 100644 index 000000000..9792f6d16 --- /dev/null +++ b/roles/openshift_daemonset_config/templates/daemonset.yml.j2 @@ -0,0 +1,142 @@ +--- +kind: DaemonSet +apiVersion: extensions/v1beta1 +metadata: + name: {{ openshift_daemonset_config_daemonset_name }} + annotations: + kubernetes.io/description: | + This daemon set manages the operational configuration for a cluster and ensures all nodes have + a concrete set of config in place. It could also use a local ansible run against the /host directory. +spec: + selector: + matchLabels: + app: {{ openshift_daemonset_config_daemonset_name }} + confighosts: ops + ops.openshift.io/role: operations + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + app: {{ openshift_daemonset_config_daemonset_name }} + confighosts: ops + ops.openshift.io/role: operations + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: +{% if openshift_daemonset_config_node_selector is defined and openshift_daemonset_config_node_selector != {} %} + nodeSelector: {{ openshift_daemonset_config_node_selector | to_json }} +{% endif %} + serviceAccountName: {{ openshift_daemonset_config_sa_name }} + hostNetwork: true + hostPID: true + hostIPC: true + containers: + - name: config + image: centos:7 + env: + - name: RESYNC_INTERVAL + value: "{{ openshift_daemonset_config_interval }}" + command: + - /bin/bash + - -c + - | + #!/bin/sh + set -o errexit + + while true; do + + # execute user defined script + sh /opt/config/{{ openshift_daemonset_config_script }} + + # sleep for ${RESYNC_INTERVAL} minutes, then loop. if we fail Kubelet will restart us again + echo "Success, sleeping for ${RESYNC_INTERVAL}s" + exec sleep ${RESYNC_INTERVAL} + + # Return to perform the config + done + securityContext: + # Must be root to modify host system + runAsUser: {{ openshift_daemonset_config_runasuser }} + # Permission could be reduced by selecting an appropriate SELinux policy that allows + # us to update the named directories + privileged: {{ openshift_daemonset_config_privileged }} + volumeMounts: + # Directory which contains the host volume. + - mountPath: /host + name: host + # Our node configuration + - mountPath: /opt/config + name: config +{% if openshift_daemonset_config_secrets != {} %} + # Our delivered secrets + - mountPath: /opt/secrets + name: secrets +{% endif %} + resources: + requests: + cpu: {{ openshift_daemonset_config_resources.cpu }} + memory: {{ openshift_daemonset_config_resources.memory }} +{% if openshift_daemonset_config_monitoring %} + - name: monitoring + image: openshifttools/oso-centos7-host-monitoring:latest + securityContext: + # Must be root to read content + runAsUser: 0 + privileged: true + + volumeMounts: + - mountPath: /host + name: host + readOnly: true + - mountPath: /etc/localtime + subPath: etc/localtime + name: host + readOnly: true + - mountPath: /sys + subPath: sys + name: host + readOnly: true + - mountPath: /var/run/docker.sock + subPath: var/run/docker.sock + name: host + readOnly: true + - mountPath: /var/run/openvswitch + subPath: var/run/openvswitch + name: host + readOnly: true + - mountPath: /etc/origin + subPath: etc/origin + name: host + readOnly: true + - mountPath: /usr/bin/oc + subPath: usr/bin/oc + name: host + readOnly: true + name: host + readOnly: true + - mountPath: /host/var/cache/yum + subPath: var/cache/yum + name: host + - mountPath: /container_setup/monitoring-config.yml + subPath: monitoring-config.yaml + name: config + - mountPath: /opt/config + name: config + resources: + requests: + cpu: 10m + memory: 10Mi +{% endif %} + volumes: + - name: config + configMap: + name: {{ openshift_daemonset_config_configmap_name }} +{% if openshift_daemonset_config_secrets != {} %} + - name: secrets + secret: + secretName: {{ openshift_daemonset_config_secret_name }} +{% endif %} + - name: host + hostPath: + path: / diff --git a/roles/openshift_default_storage_class/meta/main.yml b/roles/openshift_default_storage_class/meta/main.yml index d7d57fe39..30671a59a 100644 --- a/roles/openshift_default_storage_class/meta/main.yml +++ b/roles/openshift_default_storage_class/meta/main.yml @@ -13,3 +13,4 @@ galaxy_info: - cloud dependencies: - role: lib_openshift +- role: lib_utils diff --git a/roles/openshift_docker_gc/meta/main.yml b/roles/openshift_docker_gc/meta/main.yml index f88a7c533..c8472d8bc 100644 --- a/roles/openshift_docker_gc/meta/main.yml +++ b/roles/openshift_docker_gc/meta/main.yml @@ -11,3 +11,4 @@ galaxy_info: - 7 dependencies: - role: lib_openshift +- role: lib_utils diff --git a/roles/openshift_etcd/meta/main.yml b/roles/openshift_etcd/meta/main.yml index 0e28fec03..25ae6a936 100644 --- a/roles/openshift_etcd/meta/main.yml +++ b/roles/openshift_etcd/meta/main.yml @@ -14,3 +14,4 @@ galaxy_info: dependencies: - role: openshift_etcd_facts - role: etcd +- role: lib_utils diff --git a/roles/openshift_etcd_client_certificates/meta/main.yml b/roles/openshift_etcd_client_certificates/meta/main.yml index fbc72c8a3..6c79d345c 100644 --- a/roles/openshift_etcd_client_certificates/meta/main.yml +++ b/roles/openshift_etcd_client_certificates/meta/main.yml @@ -11,4 +11,5 @@ galaxy_info: - 7 categories: - cloud -dependencies: [] +dependencies: +- role: lib_utils diff --git a/roles/openshift_etcd_client_certificates/tasks/main.yml b/roles/openshift_etcd_client_certificates/tasks/main.yml index 7f8b667f0..18d07fc2f 100644 --- a/roles/openshift_etcd_client_certificates/tasks/main.yml +++ b/roles/openshift_etcd_client_certificates/tasks/main.yml @@ -1,4 +1,4 @@ --- -- include_role: +- import_role: name: etcd tasks_from: client_certificates diff --git a/roles/openshift_etcd_facts/meta/main.yml b/roles/openshift_etcd_facts/meta/main.yml index 925aa9f92..5e64a8596 100644 --- a/roles/openshift_etcd_facts/meta/main.yml +++ b/roles/openshift_etcd_facts/meta/main.yml @@ -13,3 +13,4 @@ galaxy_info: - cloud dependencies: - role: openshift_facts +- role: lib_utils diff --git a/roles/openshift_etcd_facts/vars/main.yml b/roles/openshift_etcd_facts/vars/main.yml index 0c072b64a..d716c9505 100644 --- a/roles/openshift_etcd_facts/vars/main.yml +++ b/roles/openshift_etcd_facts/vars/main.yml @@ -1,6 +1,6 @@ --- -etcd_is_containerized: "{{ openshift.common.is_containerized }}" -etcd_is_atomic: "{{ openshift.common.is_atomic }}" +etcd_is_containerized: "{{ openshift_is_containerized | bool }}" +etcd_is_atomic: "{{ openshift_is_atomic }}" etcd_hostname: "{{ openshift.common.hostname }}" etcd_ip: "{{ openshift.common.ip }}" etcd_cert_subdir: "etcd-{{ openshift.common.hostname }}" diff --git a/roles/openshift_examples/defaults/main.yml b/roles/openshift_examples/defaults/main.yml index e623b33f3..0a6e8f20c 100644 --- a/roles/openshift_examples/defaults/main.yml +++ b/roles/openshift_examples/defaults/main.yml @@ -8,7 +8,7 @@ openshift_examples_load_quickstarts: true content_version: "{{ openshift.common.examples_content_version }}" -examples_base: "{{ openshift.common.config_base if openshift.common.is_containerized | bool else '/usr/share/openshift' }}/examples" +examples_base: "{{ openshift.common.config_base if openshift_is_containerized | bool else '/usr/share/openshift' }}/examples" image_streams_base: "{{ examples_base }}/image-streams" centos_image_streams: - "{{ image_streams_base }}/image-streams-centos7.json" diff --git a/roles/openshift_examples/examples-sync.sh b/roles/openshift_examples/examples-sync.sh index 1d5fba990..648bf7293 100755 --- a/roles/openshift_examples/examples-sync.sh +++ b/roles/openshift_examples/examples-sync.sh @@ -5,8 +5,8 @@ # # This script should be run from openshift-ansible/roles/openshift_examples -XPAAS_VERSION=ose-v1.4.6 -ORIGIN_VERSION=${1:-v3.7} +XPAAS_VERSION=ose-v1.4.7 +ORIGIN_VERSION=${1:-v3.9} RHAMP_TAG=2.0.0.GA EXAMPLES_BASE=$(pwd)/files/examples/${ORIGIN_VERSION} find ${EXAMPLES_BASE} -name '*.json' -delete diff --git a/roles/openshift_examples/files/examples/latest b/roles/openshift_examples/files/examples/latest new file mode 120000 index 000000000..8cad94b63 --- /dev/null +++ b/roles/openshift_examples/files/examples/latest @@ -0,0 +1 @@ +v3.9
\ No newline at end of file diff --git a/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-backup-job.yaml b/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-backup-job.yaml new file mode 100644 index 000000000..48d1d4e26 --- /dev/null +++ b/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-backup-job.yaml @@ -0,0 +1,28 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: cloudforms-backup +spec: + template: + metadata: + name: cloudforms-backup + spec: + containers: + - name: postgresql + image: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-postgresql:latest + command: + - "/opt/rh/cfme-container-scripts/backup_db" + env: + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: cloudforms-secrets + key: database-url + volumeMounts: + - name: cfme-backup-vol + mountPath: "/backups" + volumes: + - name: cfme-backup-vol + persistentVolumeClaim: + claimName: cloudforms-backup + restartPolicy: Never diff --git a/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-backup-pvc.yaml b/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-backup-pvc.yaml new file mode 100644 index 000000000..92598ce82 --- /dev/null +++ b/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-backup-pvc.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: cloudforms-backup +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 15Gi diff --git a/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-pv-backup-example.yaml b/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-pv-backup-example.yaml new file mode 100644 index 000000000..4fe349897 --- /dev/null +++ b/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-pv-backup-example.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: cfme-pv03 +spec: + capacity: + storage: 15Gi + accessModes: + - ReadWriteOnce + nfs: + path: "/exports/cfme-pv03" + server: "<your-nfs-host-here>" + persistentVolumeReclaimPolicy: Retain diff --git a/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-pv-db-example.yaml b/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-pv-db-example.yaml index 250a99b8d..0cdd821b5 100644 --- a/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-pv-db-example.yaml +++ b/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-pv-db-example.yaml @@ -1,13 +1,38 @@ apiVersion: v1 -kind: PersistentVolume +kind: Template +labels: + template: cloudforms-db-pv metadata: - name: cfme-pv01 -spec: - capacity: - storage: 15Gi - accessModes: + name: cloudforms-db-pv + annotations: + description: PV Template for CFME PostgreSQL DB + tags: PVS, CFME +objects: +- apiVersion: v1 + kind: PersistentVolume + metadata: + name: cfme-db + spec: + capacity: + storage: "${PV_SIZE}" + accessModes: - ReadWriteOnce - nfs: - path: /exports/cfme-pv01 - server: <your-nfs-host-here> - persistentVolumeReclaimPolicy: Retain + nfs: + path: "${BASE_PATH}/cfme-db" + server: "${NFS_HOST}" + persistentVolumeReclaimPolicy: Retain +parameters: +- name: PV_SIZE + displayName: PV Size for DB + required: true + description: The size of the CFME DB PV given in Gi + value: 15Gi +- name: BASE_PATH + displayName: Exports Directory Base Path + required: true + description: The parent directory of your NFS exports + value: "/exports" +- name: NFS_HOST + displayName: NFS Server Hostname + required: true + description: The hostname or IP address of the NFS server diff --git a/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-pv-region-example.yaml b/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-pv-region-example.yaml deleted file mode 100644 index cba9bbe35..000000000 --- a/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-pv-region-example.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: v1 -kind: PersistentVolume -metadata: - name: cfme-pv02 -spec: - capacity: - storage: 5Gi - accessModes: - - ReadWriteOnce - nfs: - path: /exports/cfme-pv02 - server: <your-nfs-host-here> - persistentVolumeReclaimPolicy: Retain diff --git a/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-pv-server-example.yaml b/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-pv-server-example.yaml index c08c21265..527090ae8 100644 --- a/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-pv-server-example.yaml +++ b/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-pv-server-example.yaml @@ -1,13 +1,38 @@ apiVersion: v1 -kind: PersistentVolume +kind: Template +labels: + template: cloudforms-app-pv metadata: - name: cfme-pv03 -spec: - capacity: - storage: 5Gi - accessModes: + name: cloudforms-app-pv + annotations: + description: PV Template for CFME Server + tags: PVS, CFME +objects: +- apiVersion: v1 + kind: PersistentVolume + metadata: + name: cfme-app + spec: + capacity: + storage: "${PV_SIZE}" + accessModes: - ReadWriteOnce - nfs: - path: /exports/cfme-pv03 - server: <your-nfs-host-here> - persistentVolumeReclaimPolicy: Retain + nfs: + path: "${BASE_PATH}/cfme-app" + server: "${NFS_HOST}" + persistentVolumeReclaimPolicy: Retain +parameters: +- name: PV_SIZE + displayName: PV Size for App + required: true + description: The size of the CFME APP PV given in Gi + value: 5Gi +- name: BASE_PATH + displayName: Exports Directory Base Path + required: true + description: The parent directory of your NFS exports + value: "/exports" +- name: NFS_HOST + displayName: NFS Server Hostname + required: true + description: The hostname or IP address of the NFS server diff --git a/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-restore-job.yaml b/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-restore-job.yaml new file mode 100644 index 000000000..7fd4fc2e1 --- /dev/null +++ b/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-restore-job.yaml @@ -0,0 +1,35 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: cloudforms-restore +spec: + template: + metadata: + name: cloudforms-restore + spec: + containers: + - name: postgresql + image: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-postgresql:latest + command: + - "/opt/rh/cfme-container-scripts/restore_db" + env: + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: cloudforms-secrets + key: database-url + - name: BACKUP_VERSION + value: latest + volumeMounts: + - name: cfme-backup-vol + mountPath: "/backups" + - name: cfme-prod-vol + mountPath: "/restore" + volumes: + - name: cfme-backup-vol + persistentVolumeClaim: + claimName: cloudforms-backup + - name: cfme-prod-vol + persistentVolumeClaim: + claimName: cloudforms-postgresql + restartPolicy: Never diff --git a/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-scc-sysadmin.yaml b/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-scc-sysadmin.yaml new file mode 100644 index 000000000..d2ece9298 --- /dev/null +++ b/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-scc-sysadmin.yaml @@ -0,0 +1,38 @@ +allowHostDirVolumePlugin: false +allowHostIPC: false +allowHostNetwork: false +allowHostPID: false +allowHostPorts: false +allowPrivilegedContainer: false +allowedCapabilities: +apiVersion: v1 +defaultAddCapabilities: +- SYS_ADMIN +fsGroup: + type: RunAsAny +groups: +- system:cluster-admins +kind: SecurityContextConstraints +metadata: + annotations: + kubernetes.io/description: cfme-sysadmin provides all features of the anyuid SCC but allows users to have SYS_ADMIN capabilities. This is the required scc for Pods requiring to run with systemd and the message bus. + creationTimestamp: + name: cfme-sysadmin +priority: 10 +readOnlyRootFilesystem: false +requiredDropCapabilities: +- MKNOD +- SYS_CHROOT +runAsUser: + type: RunAsAny +seLinuxContext: + type: MustRunAs +supplementalGroups: + type: RunAsAny +users: +volumes: +- configMap +- downwardAPI +- emptyDir +- persistentVolumeClaim +- secret diff --git a/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-template-ext-db.yaml b/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-template-ext-db.yaml new file mode 100644 index 000000000..9866c29c3 --- /dev/null +++ b/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-template-ext-db.yaml @@ -0,0 +1,956 @@ +apiVersion: v1 +kind: Template +labels: + template: cloudforms-ext-db +metadata: + name: cloudforms-ext-db + annotations: + description: CloudForms appliance with persistent storage using a external DB host + tags: instant-app,cloudforms,cfme + iconClass: icon-rails +objects: +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: cfme-orchestrator +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: cfme-anyuid +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: cfme-privileged +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: cfme-httpd +- apiVersion: v1 + kind: Secret + metadata: + name: "${NAME}-secrets" + stringData: + pg-password: "${DATABASE_PASSWORD}" + admin-password: "${APPLICATION_ADMIN_PASSWORD}" + database-url: postgresql://${DATABASE_USER}:${DATABASE_PASSWORD}@${DATABASE_SERVICE_NAME}/${DATABASE_NAME}?encoding=utf8&pool=5&wait_timeout=5 + v2-key: "${V2_KEY}" +- apiVersion: v1 + kind: Secret + metadata: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + stringData: + rabbit-password: "${ANSIBLE_RABBITMQ_PASSWORD}" + secret-key: "${ANSIBLE_SECRET_KEY}" + admin-password: "${ANSIBLE_ADMIN_PASSWORD}" +- apiVersion: v1 + kind: Service + metadata: + annotations: + description: Exposes and load balances CloudForms pods + service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"},{"name":"${MEMCACHED_SERVICE_NAME}","namespace":"","kind":"Service"}]' + name: "${NAME}" + spec: + clusterIP: None + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 80 + selector: + name: "${NAME}" +- apiVersion: v1 + kind: Route + metadata: + name: "${HTTPD_SERVICE_NAME}" + spec: + host: "${APPLICATION_DOMAIN}" + port: + targetPort: http + tls: + termination: edge + insecureEdgeTerminationPolicy: Redirect + to: + kind: Service + name: "${HTTPD_SERVICE_NAME}" +- apiVersion: apps/v1beta1 + kind: StatefulSet + metadata: + name: "${NAME}" + annotations: + description: Defines how to deploy the CloudForms appliance + spec: + serviceName: "${NAME}" + replicas: "${APPLICATION_REPLICA_COUNT}" + template: + metadata: + labels: + name: "${NAME}" + name: "${NAME}" + spec: + containers: + - name: cloudforms + image: "${FRONTEND_APPLICATION_IMG_NAME}:${FRONTEND_APPLICATION_IMG_TAG}" + livenessProbe: + exec: + command: + - pidof + - MIQ Server + initialDelaySeconds: 480 + timeoutSeconds: 3 + readinessProbe: + tcpSocket: + port: 80 + initialDelaySeconds: 200 + timeoutSeconds: 3 + ports: + - containerPort: 80 + protocol: TCP + volumeMounts: + - name: "${NAME}-server" + mountPath: "/persistent" + env: + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: APPLICATION_INIT_DELAY + value: "${APPLICATION_INIT_DELAY}" + - name: DATABASE_REGION + value: "${DATABASE_REGION}" + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: database-url + - name: V2_KEY + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: v2-key + - name: APPLICATION_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: admin-password + - name: ANSIBLE_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: admin-password + resources: + requests: + memory: "${APPLICATION_MEM_REQ}" + cpu: "${APPLICATION_CPU_REQ}" + limits: + memory: "${APPLICATION_MEM_LIMIT}" + lifecycle: + preStop: + exec: + command: + - "/opt/rh/cfme-container-scripts/sync-pv-data" + serviceAccount: cfme-orchestrator + serviceAccountName: cfme-orchestrator + terminationGracePeriodSeconds: 90 + volumeClaimTemplates: + - metadata: + name: "${NAME}-server" + annotations: + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "${APPLICATION_VOLUME_CAPACITY}" +- apiVersion: v1 + kind: Service + metadata: + annotations: + description: Headless service for CloudForms backend pods + name: "${NAME}-backend" + spec: + clusterIP: None + selector: + name: "${NAME}-backend" +- apiVersion: apps/v1beta1 + kind: StatefulSet + metadata: + name: "${NAME}-backend" + annotations: + description: Defines how to deploy the CloudForms appliance + spec: + serviceName: "${NAME}-backend" + replicas: 0 + template: + metadata: + labels: + name: "${NAME}-backend" + name: "${NAME}-backend" + spec: + containers: + - name: cloudforms + image: "${BACKEND_APPLICATION_IMG_NAME}:${BACKEND_APPLICATION_IMG_TAG}" + livenessProbe: + exec: + command: + - pidof + - MIQ Server + initialDelaySeconds: 480 + timeoutSeconds: 3 + volumeMounts: + - name: "${NAME}-server" + mountPath: "/persistent" + env: + - name: APPLICATION_INIT_DELAY + value: "${APPLICATION_INIT_DELAY}" + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: database-url + - name: MIQ_SERVER_DEFAULT_ROLES + value: database_operations,event,reporting,scheduler,smartstate,ems_operations,ems_inventory,automate + - name: FRONTEND_SERVICE_NAME + value: "${NAME}" + - name: V2_KEY + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: v2-key + - name: ANSIBLE_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: admin-password + resources: + requests: + memory: "${APPLICATION_MEM_REQ}" + cpu: "${APPLICATION_CPU_REQ}" + limits: + memory: "${APPLICATION_MEM_LIMIT}" + lifecycle: + preStop: + exec: + command: + - "/opt/rh/cfme-container-scripts/sync-pv-data" + serviceAccount: cfme-orchestrator + serviceAccountName: cfme-orchestrator + terminationGracePeriodSeconds: 90 + volumeClaimTemplates: + - metadata: + name: "${NAME}-server" + annotations: + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "${APPLICATION_VOLUME_CAPACITY}" +- apiVersion: v1 + kind: Service + metadata: + name: "${MEMCACHED_SERVICE_NAME}" + annotations: + description: Exposes the memcached server + spec: + ports: + - name: memcached + port: 11211 + targetPort: 11211 + selector: + name: "${MEMCACHED_SERVICE_NAME}" +- apiVersion: v1 + kind: DeploymentConfig + metadata: + name: "${MEMCACHED_SERVICE_NAME}" + annotations: + description: Defines how to deploy memcached + spec: + strategy: + type: Recreate + triggers: + - type: ConfigChange + replicas: 1 + selector: + name: "${MEMCACHED_SERVICE_NAME}" + template: + metadata: + name: "${MEMCACHED_SERVICE_NAME}" + labels: + name: "${MEMCACHED_SERVICE_NAME}" + spec: + volumes: [] + containers: + - name: memcached + image: "${MEMCACHED_IMG_NAME}:${MEMCACHED_IMG_TAG}" + ports: + - containerPort: 11211 + readinessProbe: + timeoutSeconds: 1 + initialDelaySeconds: 5 + tcpSocket: + port: 11211 + livenessProbe: + timeoutSeconds: 1 + initialDelaySeconds: 30 + tcpSocket: + port: 11211 + volumeMounts: [] + env: + - name: MEMCACHED_MAX_MEMORY + value: "${MEMCACHED_MAX_MEMORY}" + - name: MEMCACHED_MAX_CONNECTIONS + value: "${MEMCACHED_MAX_CONNECTIONS}" + - name: MEMCACHED_SLAB_PAGE_SIZE + value: "${MEMCACHED_SLAB_PAGE_SIZE}" + resources: + requests: + memory: "${MEMCACHED_MEM_REQ}" + cpu: "${MEMCACHED_CPU_REQ}" + limits: + memory: "${MEMCACHED_MEM_LIMIT}" +- apiVersion: v1 + kind: Service + metadata: + name: "${DATABASE_SERVICE_NAME}" + annotations: + description: Remote database service + spec: + ports: + - name: postgresql + port: 5432 + targetPort: "${{DATABASE_PORT}}" + selector: {} +- apiVersion: v1 + kind: Endpoints + metadata: + name: "${DATABASE_SERVICE_NAME}" + subsets: + - addresses: + - ip: "${DATABASE_IP}" + ports: + - port: "${{DATABASE_PORT}}" + name: postgresql +- apiVersion: v1 + kind: Service + metadata: + annotations: + description: Exposes and load balances Ansible pods + service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"}]' + name: "${ANSIBLE_SERVICE_NAME}" + spec: + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 80 + - name: https + port: 443 + protocol: TCP + targetPort: 443 + selector: + name: "${ANSIBLE_SERVICE_NAME}" +- apiVersion: v1 + kind: DeploymentConfig + metadata: + name: "${ANSIBLE_SERVICE_NAME}" + annotations: + description: Defines how to deploy the Ansible appliance + spec: + strategy: + type: Recreate + serviceName: "${ANSIBLE_SERVICE_NAME}" + replicas: 0 + template: + metadata: + labels: + name: "${ANSIBLE_SERVICE_NAME}" + name: "${ANSIBLE_SERVICE_NAME}" + spec: + containers: + - name: ansible + image: "${ANSIBLE_IMG_NAME}:${ANSIBLE_IMG_TAG}" + livenessProbe: + tcpSocket: + port: 443 + initialDelaySeconds: 480 + timeoutSeconds: 3 + readinessProbe: + httpGet: + path: "/" + port: 443 + scheme: HTTPS + initialDelaySeconds: 200 + timeoutSeconds: 3 + ports: + - containerPort: 80 + protocol: TCP + - containerPort: 443 + protocol: TCP + securityContext: + privileged: true + env: + - name: ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: admin-password + - name: RABBITMQ_USER_NAME + value: "${ANSIBLE_RABBITMQ_USER_NAME}" + - name: RABBITMQ_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: rabbit-password + - name: ANSIBLE_SECRET_KEY + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: secret-key + - name: DATABASE_SERVICE_NAME + value: "${DATABASE_SERVICE_NAME}" + - name: POSTGRESQL_USER + value: "${DATABASE_USER}" + - name: POSTGRESQL_PASSWORD + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: pg-password + - name: POSTGRESQL_DATABASE + value: "${ANSIBLE_DATABASE_NAME}" + resources: + requests: + memory: "${ANSIBLE_MEM_REQ}" + cpu: "${ANSIBLE_CPU_REQ}" + limits: + memory: "${ANSIBLE_MEM_LIMIT}" + serviceAccount: cfme-privileged + serviceAccountName: cfme-privileged +- apiVersion: v1 + kind: ConfigMap + metadata: + name: "${HTTPD_SERVICE_NAME}-configs" + data: + application.conf: | + # Timeout: The number of seconds before receives and sends time out. + Timeout 120 + + RewriteEngine On + Options SymLinksIfOwnerMatch + + <VirtualHost *:80> + KeepAlive on + # Without ServerName mod_auth_mellon compares against http:// and not https:// from the IdP + ServerName https://%{REQUEST_HOST} + + ProxyPreserveHost on + + RewriteCond %{REQUEST_URI} ^/ws [NC] + RewriteCond %{HTTP:UPGRADE} ^websocket$ [NC] + RewriteCond %{HTTP:CONNECTION} ^Upgrade$ [NC] + RewriteRule .* ws://${NAME}%{REQUEST_URI} [P,QSA,L] + + # For httpd, some ErrorDocuments must by served by the httpd pod + RewriteCond %{REQUEST_URI} !^/proxy_pages + + # For SAML /saml2 is only served by mod_auth_mellon in the httpd pod + RewriteCond %{REQUEST_URI} !^/saml2 + RewriteRule ^/ http://${NAME}%{REQUEST_URI} [P,QSA,L] + ProxyPassReverse / http://${NAME}/ + + # Ensures httpd stdout/stderr are seen by docker logs. + ErrorLog "| /usr/bin/tee /proc/1/fd/2 /var/log/httpd/error_log" + CustomLog "| /usr/bin/tee /proc/1/fd/1 /var/log/httpd/access_log" common + </VirtualHost> + authentication.conf: | + # Load appropriate authentication configuration files + # + Include "conf.d/configuration-${HTTPD_AUTH_TYPE}-auth" + configuration-internal-auth: | + # Internal authentication + # + configuration-external-auth: | + Include "conf.d/external-auth-load-modules-conf" + + <Location /dashboard/kerberos_authenticate> + AuthType Kerberos + AuthName "Kerberos Login" + KrbMethodNegotiate On + KrbMethodK5Passwd Off + KrbAuthRealms ${HTTPD_AUTH_KERBEROS_REALMS} + Krb5KeyTab /etc/http.keytab + KrbServiceName Any + Require pam-account httpd-auth + + ErrorDocument 401 /proxy_pages/invalid_sso_credentials.js + </Location> + + Include "conf.d/external-auth-login-form-conf" + Include "conf.d/external-auth-application-api-conf" + Include "conf.d/external-auth-lookup-user-details-conf" + Include "conf.d/external-auth-remote-user-conf" + configuration-active-directory-auth: | + Include "conf.d/external-auth-load-modules-conf" + + <Location /dashboard/kerberos_authenticate> + AuthType Kerberos + AuthName "Kerberos Login" + KrbMethodNegotiate On + KrbMethodK5Passwd Off + KrbAuthRealms ${HTTPD_AUTH_KERBEROS_REALMS} + Krb5KeyTab /etc/krb5.keytab + KrbServiceName Any + Require pam-account httpd-auth + + ErrorDocument 401 /proxy_pages/invalid_sso_credentials.js + </Location> + + Include "conf.d/external-auth-login-form-conf" + Include "conf.d/external-auth-application-api-conf" + Include "conf.d/external-auth-lookup-user-details-conf" + Include "conf.d/external-auth-remote-user-conf" + configuration-saml-auth: | + LoadModule auth_mellon_module modules/mod_auth_mellon.so + + <Location /> + MellonEnable "info" + + MellonIdPMetadataFile "/etc/httpd/saml2/idp-metadata.xml" + + MellonSPPrivateKeyFile "/etc/httpd/saml2/sp-key.key" + MellonSPCertFile "/etc/httpd/saml2/sp-cert.cert" + MellonSPMetadataFile "/etc/httpd/saml2/sp-metadata.xml" + + MellonVariable "sp-cookie" + MellonSecureCookie On + MellonCookiePath "/" + + MellonIdP "IDP" + + MellonEndpointPath "/saml2" + + MellonUser username + MellonMergeEnvVars On + + MellonSetEnvNoPrefix "REMOTE_USER" username + MellonSetEnvNoPrefix "REMOTE_USER_EMAIL" email + MellonSetEnvNoPrefix "REMOTE_USER_FIRSTNAME" firstname + MellonSetEnvNoPrefix "REMOTE_USER_LASTNAME" lastname + MellonSetEnvNoPrefix "REMOTE_USER_FULLNAME" fullname + MellonSetEnvNoPrefix "REMOTE_USER_GROUPS" groups + </Location> + + <Location /saml_login> + AuthType "Mellon" + MellonEnable "auth" + Require valid-user + </Location> + + Include "conf.d/external-auth-remote-user-conf" + external-auth-load-modules-conf: | + LoadModule authnz_pam_module modules/mod_authnz_pam.so + LoadModule intercept_form_submit_module modules/mod_intercept_form_submit.so + LoadModule lookup_identity_module modules/mod_lookup_identity.so + LoadModule auth_kerb_module modules/mod_auth_kerb.so + external-auth-login-form-conf: | + <Location /dashboard/external_authenticate> + InterceptFormPAMService httpd-auth + InterceptFormLogin user_name + InterceptFormPassword user_password + InterceptFormLoginSkip admin + InterceptFormClearRemoteUserForSkipped on + </Location> + external-auth-application-api-conf: | + <LocationMatch ^/api> + SetEnvIf Authorization '^Basic +YWRtaW46' let_admin_in + SetEnvIf X-Auth-Token '^.+$' let_api_token_in + SetEnvIf X-MIQ-Token '^.+$' let_sys_token_in + + AuthType Basic + AuthName "External Authentication (httpd) for API" + AuthBasicProvider PAM + + AuthPAMService httpd-auth + Require valid-user + Order Allow,Deny + Allow from env=let_admin_in + Allow from env=let_api_token_in + Allow from env=let_sys_token_in + Satisfy Any + </LocationMatch> + external-auth-lookup-user-details-conf: | + <LocationMatch ^/dashboard/external_authenticate$|^/dashboard/kerberos_authenticate$|^/api> + LookupUserAttr mail REMOTE_USER_EMAIL + LookupUserAttr givenname REMOTE_USER_FIRSTNAME + LookupUserAttr sn REMOTE_USER_LASTNAME + LookupUserAttr displayname REMOTE_USER_FULLNAME + LookupUserAttr domainname REMOTE_USER_DOMAIN + + LookupUserGroups REMOTE_USER_GROUPS ":" + LookupDbusTimeout 5000 + </LocationMatch> + external-auth-remote-user-conf: | + RequestHeader unset X_REMOTE_USER + + RequestHeader set X_REMOTE_USER %{REMOTE_USER}e env=REMOTE_USER + RequestHeader set X_EXTERNAL_AUTH_ERROR %{EXTERNAL_AUTH_ERROR}e env=EXTERNAL_AUTH_ERROR + RequestHeader set X_REMOTE_USER_EMAIL %{REMOTE_USER_EMAIL}e env=REMOTE_USER_EMAIL + RequestHeader set X_REMOTE_USER_FIRSTNAME %{REMOTE_USER_FIRSTNAME}e env=REMOTE_USER_FIRSTNAME + RequestHeader set X_REMOTE_USER_LASTNAME %{REMOTE_USER_LASTNAME}e env=REMOTE_USER_LASTNAME + RequestHeader set X_REMOTE_USER_FULLNAME %{REMOTE_USER_FULLNAME}e env=REMOTE_USER_FULLNAME + RequestHeader set X_REMOTE_USER_GROUPS %{REMOTE_USER_GROUPS}e env=REMOTE_USER_GROUPS + RequestHeader set X_REMOTE_USER_DOMAIN %{REMOTE_USER_DOMAIN}e env=REMOTE_USER_DOMAIN +- apiVersion: v1 + kind: ConfigMap + metadata: + name: "${HTTPD_SERVICE_NAME}-auth-configs" + data: + auth-type: internal + auth-kerberos-realms: undefined + auth-configuration.conf: | + # External Authentication Configuration File + # + # For details on usage please see https://github.com/ManageIQ/manageiq-pods/blob/master/README.md#configuring-external-authentication +- apiVersion: v1 + kind: Service + metadata: + name: "${HTTPD_SERVICE_NAME}" + annotations: + description: Exposes the httpd server + service.alpha.openshift.io/dependencies: '[{"name":"${NAME}","namespace":"","kind":"Service"}]' + spec: + ports: + - name: http + port: 80 + targetPort: 80 + selector: + name: httpd +- apiVersion: v1 + kind: Service + metadata: + name: "${HTTPD_DBUS_API_SERVICE_NAME}" + annotations: + description: Exposes the httpd server dbus api + service.alpha.openshift.io/dependencies: '[{"name":"${NAME}","namespace":"","kind":"Service"}]' + spec: + ports: + - name: http-dbus-api + port: 8080 + targetPort: 8080 + selector: + name: httpd +- apiVersion: v1 + kind: DeploymentConfig + metadata: + name: "${HTTPD_SERVICE_NAME}" + annotations: + description: Defines how to deploy httpd + spec: + strategy: + type: Recreate + recreateParams: + timeoutSeconds: 1200 + triggers: + - type: ConfigChange + replicas: 1 + selector: + name: "${HTTPD_SERVICE_NAME}" + template: + metadata: + name: "${HTTPD_SERVICE_NAME}" + labels: + name: "${HTTPD_SERVICE_NAME}" + spec: + volumes: + - name: httpd-config + configMap: + name: "${HTTPD_SERVICE_NAME}-configs" + - name: httpd-auth-config + configMap: + name: "${HTTPD_SERVICE_NAME}-auth-configs" + containers: + - name: httpd + image: "${HTTPD_IMG_NAME}:${HTTPD_IMG_TAG}" + ports: + - containerPort: 80 + protocol: TCP + - containerPort: 8080 + protocol: TCP + livenessProbe: + exec: + command: + - pidof + - httpd + initialDelaySeconds: 15 + timeoutSeconds: 3 + readinessProbe: + tcpSocket: + port: 80 + initialDelaySeconds: 10 + timeoutSeconds: 3 + volumeMounts: + - name: httpd-config + mountPath: "${HTTPD_CONFIG_DIR}" + - name: httpd-auth-config + mountPath: "${HTTPD_AUTH_CONFIG_DIR}" + resources: + requests: + memory: "${HTTPD_MEM_REQ}" + cpu: "${HTTPD_CPU_REQ}" + limits: + memory: "${HTTPD_MEM_LIMIT}" + env: + - name: HTTPD_AUTH_TYPE + valueFrom: + configMapKeyRef: + name: "${HTTPD_SERVICE_NAME}-auth-configs" + key: auth-type + - name: HTTPD_AUTH_KERBEROS_REALMS + valueFrom: + configMapKeyRef: + name: "${HTTPD_SERVICE_NAME}-auth-configs" + key: auth-kerberos-realms + lifecycle: + postStart: + exec: + command: + - "/usr/bin/save-container-environment" + serviceAccount: cfme-httpd + serviceAccountName: cfme-httpd +parameters: +- name: NAME + displayName: Name + required: true + description: The name assigned to all of the frontend objects defined in this template. + value: cloudforms +- name: V2_KEY + displayName: CloudForms Encryption Key + required: true + description: Encryption Key for CloudForms Passwords + from: "[a-zA-Z0-9]{43}" + generate: expression +- name: DATABASE_SERVICE_NAME + displayName: PostgreSQL Service Name + required: true + description: The name of the OpenShift Service exposed for the PostgreSQL container. + value: postgresql +- name: DATABASE_USER + displayName: PostgreSQL User + required: true + description: PostgreSQL user that will access the database. + value: root +- name: DATABASE_PASSWORD + displayName: PostgreSQL Password + required: true + description: Password for the PostgreSQL user. + from: "[a-zA-Z0-9]{8}" + generate: expression +- name: DATABASE_IP + displayName: PostgreSQL Server IP + required: true + description: PostgreSQL external server IP used to configure service. + value: '' +- name: DATABASE_PORT + displayName: PostgreSQL Server Port + required: true + description: PostgreSQL external server port used to configure service. + value: '5432' +- name: DATABASE_NAME + required: true + displayName: PostgreSQL Database Name + description: Name of the PostgreSQL database accessed. + value: vmdb_production +- name: DATABASE_REGION + required: true + displayName: Application Database Region + description: Database region that will be used for application. + value: '0' +- name: APPLICATION_ADMIN_PASSWORD + displayName: Application Admin Password + required: true + description: Admin password that will be set on the application. + value: smartvm +- name: ANSIBLE_DATABASE_NAME + displayName: Ansible PostgreSQL database name + required: true + description: The database to be used by the Ansible continer + value: awx +- name: MEMCACHED_SERVICE_NAME + required: true + displayName: Memcached Service Name + description: The name of the OpenShift Service exposed for the Memcached container. + value: memcached +- name: MEMCACHED_MAX_MEMORY + displayName: Memcached Max Memory + description: Memcached maximum memory for memcached object storage in MB. + value: '64' +- name: MEMCACHED_MAX_CONNECTIONS + displayName: Memcached Max Connections + description: Memcached maximum number of connections allowed. + value: '1024' +- name: MEMCACHED_SLAB_PAGE_SIZE + displayName: Memcached Slab Page Size + description: Memcached size of each slab page. + value: 1m +- name: ANSIBLE_SERVICE_NAME + displayName: Ansible Service Name + description: The name of the OpenShift Service exposed for the Ansible container. + value: ansible +- name: ANSIBLE_ADMIN_PASSWORD + displayName: Ansible admin User password + required: true + description: The password for the Ansible container admin user + from: "[a-zA-Z0-9]{32}" + generate: expression +- name: ANSIBLE_SECRET_KEY + displayName: Ansible Secret Key + required: true + description: Encryption key for the Ansible container + from: "[a-f0-9]{32}" + generate: expression +- name: ANSIBLE_RABBITMQ_USER_NAME + displayName: RabbitMQ Username + required: true + description: Username for the Ansible RabbitMQ Server + value: ansible +- name: ANSIBLE_RABBITMQ_PASSWORD + displayName: RabbitMQ Server Password + required: true + description: Password for the Ansible RabbitMQ Server + from: "[a-zA-Z0-9]{32}" + generate: expression +- name: APPLICATION_CPU_REQ + displayName: Application Min CPU Requested + required: true + description: Minimum amount of CPU time the Application container will need (expressed in millicores). + value: 1000m +- name: MEMCACHED_CPU_REQ + displayName: Memcached Min CPU Requested + required: true + description: Minimum amount of CPU time the Memcached container will need (expressed in millicores). + value: 200m +- name: ANSIBLE_CPU_REQ + displayName: Ansible Min CPU Requested + required: true + description: Minimum amount of CPU time the Ansible container will need (expressed in millicores). + value: 1000m +- name: APPLICATION_MEM_REQ + displayName: Application Min RAM Requested + required: true + description: Minimum amount of memory the Application container will need. + value: 6144Mi +- name: MEMCACHED_MEM_REQ + displayName: Memcached Min RAM Requested + required: true + description: Minimum amount of memory the Memcached container will need. + value: 64Mi +- name: ANSIBLE_MEM_REQ + displayName: Ansible Min RAM Requested + required: true + description: Minimum amount of memory the Ansible container will need. + value: 2048Mi +- name: APPLICATION_MEM_LIMIT + displayName: Application Max RAM Limit + required: true + description: Maximum amount of memory the Application container can consume. + value: 16384Mi +- name: MEMCACHED_MEM_LIMIT + displayName: Memcached Max RAM Limit + required: true + description: Maximum amount of memory the Memcached container can consume. + value: 256Mi +- name: ANSIBLE_MEM_LIMIT + displayName: Ansible Max RAM Limit + required: true + description: Maximum amount of memory the Ansible container can consume. + value: 8096Mi +- name: MEMCACHED_IMG_NAME + displayName: Memcached Image Name + description: This is the Memcached image name requested to deploy. + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-memcached +- name: MEMCACHED_IMG_TAG + displayName: Memcached Image Tag + description: This is the Memcached image tag/version requested to deploy. + value: latest +- name: FRONTEND_APPLICATION_IMG_NAME + displayName: Frontend Application Image Name + description: This is the Frontend Application image name requested to deploy. + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-app-ui +- name: BACKEND_APPLICATION_IMG_NAME + displayName: Backend Application Image Name + description: This is the Backend Application image name requested to deploy. + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-app +- name: FRONTEND_APPLICATION_IMG_TAG + displayName: Front end Application Image Tag + description: This is the CloudForms Frontend Application image tag/version requested to deploy. + value: latest +- name: BACKEND_APPLICATION_IMG_TAG + displayName: Back end Application Image Tag + description: This is the CloudForms Backend Application image tag/version requested to deploy. + value: latest +- name: ANSIBLE_IMG_NAME + displayName: Ansible Image Name + description: This is the Ansible image name requested to deploy. + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-embedded-ansible +- name: ANSIBLE_IMG_TAG + displayName: Ansible Image Tag + description: This is the Ansible image tag/version requested to deploy. + value: latest +- name: APPLICATION_DOMAIN + displayName: Application Hostname + description: The exposed hostname that will route to the application service, if left blank a value will be defaulted. + value: '' +- name: APPLICATION_REPLICA_COUNT + displayName: Application Replica Count + description: This is the number of Application replicas requested to deploy. + value: '1' +- name: APPLICATION_INIT_DELAY + displayName: Application Init Delay + required: true + description: Delay in seconds before we attempt to initialize the application. + value: '15' +- name: APPLICATION_VOLUME_CAPACITY + displayName: Application Volume Capacity + required: true + description: Volume space available for application data. + value: 5Gi +- name: HTTPD_SERVICE_NAME + required: true + displayName: Apache httpd Service Name + description: The name of the OpenShift Service exposed for the httpd container. + value: httpd +- name: HTTPD_DBUS_API_SERVICE_NAME + required: true + displayName: Apache httpd DBus API Service Name + description: The name of httpd dbus api service. + value: httpd-dbus-api +- name: HTTPD_IMG_NAME + displayName: Apache httpd Image Name + description: This is the httpd image name requested to deploy. + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-httpd +- name: HTTPD_IMG_TAG + displayName: Apache httpd Image Tag + description: This is the httpd image tag/version requested to deploy. + value: latest +- name: HTTPD_CONFIG_DIR + displayName: Apache httpd Configuration Directory + description: Directory used to store the Apache configuration files. + value: "/etc/httpd/conf.d" +- name: HTTPD_AUTH_CONFIG_DIR + displayName: External Authentication Configuration Directory + description: Directory used to store the external authentication configuration files. + value: "/etc/httpd/auth-conf.d" +- name: HTTPD_CPU_REQ + displayName: Apache httpd Min CPU Requested + required: true + description: Minimum amount of CPU time the httpd container will need (expressed in millicores). + value: 500m +- name: HTTPD_MEM_REQ + displayName: Apache httpd Min RAM Requested + required: true + description: Minimum amount of memory the httpd container will need. + value: 512Mi +- name: HTTPD_MEM_LIMIT + displayName: Apache httpd Max RAM Limit + required: true + description: Maximum amount of memory the httpd container can consume. + value: 8192Mi diff --git a/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-template.yaml b/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-template.yaml index 3bc6c5813..5c757b6c2 100644 --- a/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-template.yaml +++ b/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-template.yaml @@ -5,17 +5,308 @@ labels: metadata: name: cloudforms annotations: - description: "CloudForms appliance with persistent storage" - tags: "instant-app,cloudforms,cfme" - iconClass: "icon-rails" + description: CloudForms appliance with persistent storage + tags: instant-app,cloudforms,cfme + iconClass: icon-rails objects: - apiVersion: v1 + kind: ServiceAccount + metadata: + name: cfme-orchestrator +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: cfme-anyuid +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: cfme-privileged +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: cfme-httpd +- apiVersion: v1 + kind: Secret + metadata: + name: "${NAME}-secrets" + stringData: + pg-password: "${DATABASE_PASSWORD}" + admin-password: "${APPLICATION_ADMIN_PASSWORD}" + database-url: postgresql://${DATABASE_USER}:${DATABASE_PASSWORD}@${DATABASE_SERVICE_NAME}/${DATABASE_NAME}?encoding=utf8&pool=5&wait_timeout=5 + v2-key: "${V2_KEY}" +- apiVersion: v1 + kind: Secret + metadata: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + stringData: + rabbit-password: "${ANSIBLE_RABBITMQ_PASSWORD}" + secret-key: "${ANSIBLE_SECRET_KEY}" + admin-password: "${ANSIBLE_ADMIN_PASSWORD}" +- apiVersion: v1 + kind: ConfigMap + metadata: + name: "${DATABASE_SERVICE_NAME}-configs" + data: + 01_miq_overrides.conf: | + #------------------------------------------------------------------------------ + # CONNECTIONS AND AUTHENTICATION + #------------------------------------------------------------------------------ + + tcp_keepalives_count = 9 + tcp_keepalives_idle = 3 + tcp_keepalives_interval = 75 + + #------------------------------------------------------------------------------ + # RESOURCE USAGE (except WAL) + #------------------------------------------------------------------------------ + + shared_preload_libraries = 'pglogical,repmgr_funcs' + max_worker_processes = 10 + + #------------------------------------------------------------------------------ + # WRITE AHEAD LOG + #------------------------------------------------------------------------------ + + wal_level = 'logical' + wal_log_hints = on + wal_buffers = 16MB + checkpoint_completion_target = 0.9 + + #------------------------------------------------------------------------------ + # REPLICATION + #------------------------------------------------------------------------------ + + max_wal_senders = 10 + wal_sender_timeout = 0 + max_replication_slots = 10 + hot_standby = on + + #------------------------------------------------------------------------------ + # ERROR REPORTING AND LOGGING + #------------------------------------------------------------------------------ + + log_filename = 'postgresql.log' + log_rotation_age = 0 + log_min_duration_statement = 5000 + log_connections = on + log_disconnections = on + log_line_prefix = '%t:%r:%c:%u@%d:[%p]:' + log_lock_waits = on + + #------------------------------------------------------------------------------ + # AUTOVACUUM PARAMETERS + #------------------------------------------------------------------------------ + + log_autovacuum_min_duration = 0 + autovacuum_naptime = 5min + autovacuum_vacuum_threshold = 500 + autovacuum_analyze_threshold = 500 + autovacuum_vacuum_scale_factor = 0.05 + + #------------------------------------------------------------------------------ + # LOCK MANAGEMENT + #------------------------------------------------------------------------------ + + deadlock_timeout = 5s + + #------------------------------------------------------------------------------ + # VERSION/PLATFORM COMPATIBILITY + #------------------------------------------------------------------------------ + + escape_string_warning = off + standard_conforming_strings = off +- apiVersion: v1 + kind: ConfigMap + metadata: + name: "${HTTPD_SERVICE_NAME}-configs" + data: + application.conf: | + # Timeout: The number of seconds before receives and sends time out. + Timeout 120 + + RewriteEngine On + Options SymLinksIfOwnerMatch + + <VirtualHost *:80> + KeepAlive on + # Without ServerName mod_auth_mellon compares against http:// and not https:// from the IdP + ServerName https://%{REQUEST_HOST} + + ProxyPreserveHost on + + RewriteCond %{REQUEST_URI} ^/ws [NC] + RewriteCond %{HTTP:UPGRADE} ^websocket$ [NC] + RewriteCond %{HTTP:CONNECTION} ^Upgrade$ [NC] + RewriteRule .* ws://${NAME}%{REQUEST_URI} [P,QSA,L] + + # For httpd, some ErrorDocuments must by served by the httpd pod + RewriteCond %{REQUEST_URI} !^/proxy_pages + + # For SAML /saml2 is only served by mod_auth_mellon in the httpd pod + RewriteCond %{REQUEST_URI} !^/saml2 + RewriteRule ^/ http://${NAME}%{REQUEST_URI} [P,QSA,L] + ProxyPassReverse / http://${NAME}/ + + # Ensures httpd stdout/stderr are seen by docker logs. + ErrorLog "| /usr/bin/tee /proc/1/fd/2 /var/log/httpd/error_log" + CustomLog "| /usr/bin/tee /proc/1/fd/1 /var/log/httpd/access_log" common + </VirtualHost> + authentication.conf: | + # Load appropriate authentication configuration files + # + Include "conf.d/configuration-${HTTPD_AUTH_TYPE}-auth" + configuration-internal-auth: | + # Internal authentication + # + configuration-external-auth: | + Include "conf.d/external-auth-load-modules-conf" + + <Location /dashboard/kerberos_authenticate> + AuthType Kerberos + AuthName "Kerberos Login" + KrbMethodNegotiate On + KrbMethodK5Passwd Off + KrbAuthRealms ${HTTPD_AUTH_KERBEROS_REALMS} + Krb5KeyTab /etc/http.keytab + KrbServiceName Any + Require pam-account httpd-auth + + ErrorDocument 401 /proxy_pages/invalid_sso_credentials.js + </Location> + + Include "conf.d/external-auth-login-form-conf" + Include "conf.d/external-auth-application-api-conf" + Include "conf.d/external-auth-lookup-user-details-conf" + Include "conf.d/external-auth-remote-user-conf" + configuration-active-directory-auth: | + Include "conf.d/external-auth-load-modules-conf" + + <Location /dashboard/kerberos_authenticate> + AuthType Kerberos + AuthName "Kerberos Login" + KrbMethodNegotiate On + KrbMethodK5Passwd Off + KrbAuthRealms ${HTTPD_AUTH_KERBEROS_REALMS} + Krb5KeyTab /etc/krb5.keytab + KrbServiceName Any + Require pam-account httpd-auth + + ErrorDocument 401 /proxy_pages/invalid_sso_credentials.js + </Location> + + Include "conf.d/external-auth-login-form-conf" + Include "conf.d/external-auth-application-api-conf" + Include "conf.d/external-auth-lookup-user-details-conf" + Include "conf.d/external-auth-remote-user-conf" + configuration-saml-auth: | + LoadModule auth_mellon_module modules/mod_auth_mellon.so + + <Location /> + MellonEnable "info" + + MellonIdPMetadataFile "/etc/httpd/saml2/idp-metadata.xml" + + MellonSPPrivateKeyFile "/etc/httpd/saml2/sp-key.key" + MellonSPCertFile "/etc/httpd/saml2/sp-cert.cert" + MellonSPMetadataFile "/etc/httpd/saml2/sp-metadata.xml" + + MellonVariable "sp-cookie" + MellonSecureCookie On + MellonCookiePath "/" + + MellonIdP "IDP" + + MellonEndpointPath "/saml2" + + MellonUser username + MellonMergeEnvVars On + + MellonSetEnvNoPrefix "REMOTE_USER" username + MellonSetEnvNoPrefix "REMOTE_USER_EMAIL" email + MellonSetEnvNoPrefix "REMOTE_USER_FIRSTNAME" firstname + MellonSetEnvNoPrefix "REMOTE_USER_LASTNAME" lastname + MellonSetEnvNoPrefix "REMOTE_USER_FULLNAME" fullname + MellonSetEnvNoPrefix "REMOTE_USER_GROUPS" groups + </Location> + + <Location /saml_login> + AuthType "Mellon" + MellonEnable "auth" + Require valid-user + </Location> + + Include "conf.d/external-auth-remote-user-conf" + external-auth-load-modules-conf: | + LoadModule authnz_pam_module modules/mod_authnz_pam.so + LoadModule intercept_form_submit_module modules/mod_intercept_form_submit.so + LoadModule lookup_identity_module modules/mod_lookup_identity.so + LoadModule auth_kerb_module modules/mod_auth_kerb.so + external-auth-login-form-conf: | + <Location /dashboard/external_authenticate> + InterceptFormPAMService httpd-auth + InterceptFormLogin user_name + InterceptFormPassword user_password + InterceptFormLoginSkip admin + InterceptFormClearRemoteUserForSkipped on + </Location> + external-auth-application-api-conf: | + <LocationMatch ^/api> + SetEnvIf Authorization '^Basic +YWRtaW46' let_admin_in + SetEnvIf X-Auth-Token '^.+$' let_api_token_in + SetEnvIf X-MIQ-Token '^.+$' let_sys_token_in + + AuthType Basic + AuthName "External Authentication (httpd) for API" + AuthBasicProvider PAM + + AuthPAMService httpd-auth + Require valid-user + Order Allow,Deny + Allow from env=let_admin_in + Allow from env=let_api_token_in + Allow from env=let_sys_token_in + Satisfy Any + </LocationMatch> + external-auth-lookup-user-details-conf: | + <LocationMatch ^/dashboard/external_authenticate$|^/dashboard/kerberos_authenticate$|^/api> + LookupUserAttr mail REMOTE_USER_EMAIL + LookupUserAttr givenname REMOTE_USER_FIRSTNAME + LookupUserAttr sn REMOTE_USER_LASTNAME + LookupUserAttr displayname REMOTE_USER_FULLNAME + LookupUserAttr domainname REMOTE_USER_DOMAIN + + LookupUserGroups REMOTE_USER_GROUPS ":" + LookupDbusTimeout 5000 + </LocationMatch> + external-auth-remote-user-conf: | + RequestHeader unset X_REMOTE_USER + + RequestHeader set X_REMOTE_USER %{REMOTE_USER}e env=REMOTE_USER + RequestHeader set X_EXTERNAL_AUTH_ERROR %{EXTERNAL_AUTH_ERROR}e env=EXTERNAL_AUTH_ERROR + RequestHeader set X_REMOTE_USER_EMAIL %{REMOTE_USER_EMAIL}e env=REMOTE_USER_EMAIL + RequestHeader set X_REMOTE_USER_FIRSTNAME %{REMOTE_USER_FIRSTNAME}e env=REMOTE_USER_FIRSTNAME + RequestHeader set X_REMOTE_USER_LASTNAME %{REMOTE_USER_LASTNAME}e env=REMOTE_USER_LASTNAME + RequestHeader set X_REMOTE_USER_FULLNAME %{REMOTE_USER_FULLNAME}e env=REMOTE_USER_FULLNAME + RequestHeader set X_REMOTE_USER_GROUPS %{REMOTE_USER_GROUPS}e env=REMOTE_USER_GROUPS + RequestHeader set X_REMOTE_USER_DOMAIN %{REMOTE_USER_DOMAIN}e env=REMOTE_USER_DOMAIN +- apiVersion: v1 + kind: ConfigMap + metadata: + name: "${HTTPD_SERVICE_NAME}-auth-configs" + data: + auth-type: internal + auth-kerberos-realms: undefined + auth-configuration.conf: | + # External Authentication Configuration File + # + # For details on usage please see https://github.com/ManageIQ/manageiq-pods/blob/master/README.md#configuring-external-authentication +- apiVersion: v1 kind: Service metadata: annotations: - description: "Exposes and load balances CloudForms pods" + description: Exposes and load balances CloudForms pods service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"},{"name":"${MEMCACHED_SERVICE_NAME}","namespace":"","kind":"Service"}]' - name: ${NAME} + name: "${NAME}" spec: clusterIP: None ports: @@ -23,141 +314,97 @@ objects: port: 80 protocol: TCP targetPort: 80 - - name: https - port: 443 - protocol: TCP - targetPort: 443 selector: - name: ${NAME} + name: "${NAME}" - apiVersion: v1 kind: Route metadata: - name: ${NAME} + name: "${HTTPD_SERVICE_NAME}" spec: - host: ${APPLICATION_DOMAIN} + host: "${APPLICATION_DOMAIN}" port: - targetPort: https + targetPort: http tls: - termination: passthrough + termination: edge + insecureEdgeTerminationPolicy: Redirect to: kind: Service - name: ${NAME} -- apiVersion: v1 - kind: ImageStream - metadata: - name: cfme-openshift-app - annotations: - description: "Keeps track of changes in the CloudForms app image" - spec: - dockerImageRepository: "${APPLICATION_IMG_NAME}" -- apiVersion: v1 - kind: ImageStream - metadata: - name: cfme-openshift-postgresql - annotations: - description: "Keeps track of changes in the CloudForms postgresql image" - spec: - dockerImageRepository: "${POSTGRESQL_IMG_NAME}" -- apiVersion: v1 - kind: ImageStream - metadata: - name: cfme-openshift-memcached - annotations: - description: "Keeps track of changes in the CloudForms memcached image" - spec: - dockerImageRepository: "${MEMCACHED_IMG_NAME}" + name: "${HTTPD_SERVICE_NAME}" - apiVersion: v1 kind: PersistentVolumeClaim metadata: name: "${NAME}-${DATABASE_SERVICE_NAME}" spec: accessModes: - - ReadWriteOnce + - ReadWriteOnce resources: requests: - storage: ${DATABASE_VOLUME_CAPACITY} -- apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: "${NAME}-region" - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: ${APPLICATION_REGION_VOLUME_CAPACITY} + storage: "${DATABASE_VOLUME_CAPACITY}" - apiVersion: apps/v1beta1 - kind: "StatefulSet" + kind: StatefulSet metadata: - name: ${NAME} + name: "${NAME}" annotations: - description: "Defines how to deploy the CloudForms appliance" + description: Defines how to deploy the CloudForms appliance spec: serviceName: "${NAME}" - replicas: 1 + replicas: "${APPLICATION_REPLICA_COUNT}" template: metadata: labels: - name: ${NAME} - name: ${NAME} + name: "${NAME}" + name: "${NAME}" spec: containers: - name: cloudforms - image: "${APPLICATION_IMG_NAME}:${APPLICATION_IMG_TAG}" + image: "${FRONTEND_APPLICATION_IMG_NAME}:${FRONTEND_APPLICATION_IMG_TAG}" livenessProbe: - tcpSocket: - port: 443 + exec: + command: + - pidof + - MIQ Server initialDelaySeconds: 480 timeoutSeconds: 3 readinessProbe: - httpGet: - path: / - port: 443 - scheme: HTTPS + tcpSocket: + port: 80 initialDelaySeconds: 200 timeoutSeconds: 3 ports: - containerPort: 80 protocol: TCP - - containerPort: 443 - protocol: TCP - securityContext: - privileged: true volumeMounts: - - - name: "${NAME}-server" - mountPath: "/persistent" - - - name: "${NAME}-region" - mountPath: "/persistent-region" + - name: "${NAME}-server" + mountPath: "/persistent" env: - - - name: "APPLICATION_INIT_DELAY" - value: "${APPLICATION_INIT_DELAY}" - - - name: "DATABASE_SERVICE_NAME" - value: "${DATABASE_SERVICE_NAME}" - - - name: "DATABASE_REGION" - value: "${DATABASE_REGION}" - - - name: "MEMCACHED_SERVICE_NAME" - value: "${MEMCACHED_SERVICE_NAME}" - - - name: "POSTGRESQL_USER" - value: "${DATABASE_USER}" - - - name: "POSTGRESQL_PASSWORD" - value: "${DATABASE_PASSWORD}" - - - name: "POSTGRESQL_DATABASE" - value: "${DATABASE_NAME}" - - - name: "POSTGRESQL_MAX_CONNECTIONS" - value: "${POSTGRESQL_MAX_CONNECTIONS}" - - - name: "POSTGRESQL_SHARED_BUFFERS" - value: "${POSTGRESQL_SHARED_BUFFERS}" + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: APPLICATION_INIT_DELAY + value: "${APPLICATION_INIT_DELAY}" + - name: DATABASE_REGION + value: "${DATABASE_REGION}" + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: database-url + - name: V2_KEY + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: v2-key + - name: APPLICATION_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: admin-password + - name: ANSIBLE_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: admin-password resources: requests: memory: "${APPLICATION_MEM_REQ}" @@ -168,59 +415,128 @@ objects: preStop: exec: command: - - /opt/rh/cfme-container-scripts/sync-pv-data - volumes: - - - name: "${NAME}-region" - persistentVolumeClaim: - claimName: ${NAME}-region + - "/opt/rh/cfme-container-scripts/sync-pv-data" + serviceAccount: cfme-orchestrator + serviceAccountName: cfme-orchestrator + terminationGracePeriodSeconds: 90 volumeClaimTemplates: - - metadata: - name: "${NAME}-server" - annotations: - # Uncomment this if using dynamic volume provisioning. - # https://docs.openshift.org/latest/install_config/persistent_storage/dynamically_provisioning_pvs.html - # volume.alpha.kubernetes.io/storage-class: anything - spec: - accessModes: [ ReadWriteOnce ] + - metadata: + name: "${NAME}-server" + annotations: + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "${APPLICATION_VOLUME_CAPACITY}" +- apiVersion: v1 + kind: Service + metadata: + annotations: + description: Headless service for CloudForms backend pods + name: "${NAME}-backend" + spec: + clusterIP: None + selector: + name: "${NAME}-backend" +- apiVersion: apps/v1beta1 + kind: StatefulSet + metadata: + name: "${NAME}-backend" + annotations: + description: Defines how to deploy the CloudForms appliance + spec: + serviceName: "${NAME}-backend" + replicas: 0 + template: + metadata: + labels: + name: "${NAME}-backend" + name: "${NAME}-backend" + spec: + containers: + - name: cloudforms + image: "${BACKEND_APPLICATION_IMG_NAME}:${BACKEND_APPLICATION_IMG_TAG}" + livenessProbe: + exec: + command: + - pidof + - MIQ Server + initialDelaySeconds: 480 + timeoutSeconds: 3 + volumeMounts: + - name: "${NAME}-server" + mountPath: "/persistent" + env: + - name: APPLICATION_INIT_DELAY + value: "${APPLICATION_INIT_DELAY}" + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: database-url + - name: MIQ_SERVER_DEFAULT_ROLES + value: database_operations,event,reporting,scheduler,smartstate,ems_operations,ems_inventory,automate + - name: FRONTEND_SERVICE_NAME + value: "${NAME}" + - name: V2_KEY + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: v2-key + - name: ANSIBLE_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: admin-password resources: requests: - storage: "${APPLICATION_VOLUME_CAPACITY}" + memory: "${APPLICATION_MEM_REQ}" + cpu: "${APPLICATION_CPU_REQ}" + limits: + memory: "${APPLICATION_MEM_LIMIT}" + lifecycle: + preStop: + exec: + command: + - "/opt/rh/cfme-container-scripts/sync-pv-data" + serviceAccount: cfme-orchestrator + serviceAccountName: cfme-orchestrator + terminationGracePeriodSeconds: 90 + volumeClaimTemplates: + - metadata: + name: "${NAME}-server" + annotations: + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "${APPLICATION_VOLUME_CAPACITY}" - apiVersion: v1 - kind: "Service" + kind: Service metadata: name: "${MEMCACHED_SERVICE_NAME}" annotations: - description: "Exposes the memcached server" + description: Exposes the memcached server spec: ports: - - - name: "memcached" - port: 11211 - targetPort: 11211 + - name: memcached + port: 11211 + targetPort: 11211 selector: name: "${MEMCACHED_SERVICE_NAME}" - apiVersion: v1 - kind: "DeploymentConfig" + kind: DeploymentConfig metadata: name: "${MEMCACHED_SERVICE_NAME}" annotations: - description: "Defines how to deploy memcached" + description: Defines how to deploy memcached spec: strategy: - type: "Recreate" + type: Recreate triggers: - - - type: "ImageChange" - imageChangeParams: - automatic: true - containerNames: - - "memcached" - from: - kind: "ImageStreamTag" - name: "cfme-openshift-memcached:${MEMCACHED_IMG_TAG}" - - - type: "ConfigChange" + - type: ConfigChange replicas: 1 selector: name: "${MEMCACHED_SERVICE_NAME}" @@ -232,74 +548,58 @@ objects: spec: volumes: [] containers: - - - name: "memcached" - image: "${MEMCACHED_IMG_NAME}:${MEMCACHED_IMG_TAG}" - ports: - - - containerPort: 11211 - readinessProbe: - timeoutSeconds: 1 - initialDelaySeconds: 5 - tcpSocket: - port: 11211 - livenessProbe: - timeoutSeconds: 1 - initialDelaySeconds: 30 - tcpSocket: - port: 11211 - volumeMounts: [] - env: - - - name: "MEMCACHED_MAX_MEMORY" - value: "${MEMCACHED_MAX_MEMORY}" - - - name: "MEMCACHED_MAX_CONNECTIONS" - value: "${MEMCACHED_MAX_CONNECTIONS}" - - - name: "MEMCACHED_SLAB_PAGE_SIZE" - value: "${MEMCACHED_SLAB_PAGE_SIZE}" - resources: - requests: - memory: "${MEMCACHED_MEM_REQ}" - cpu: "${MEMCACHED_CPU_REQ}" - limits: - memory: "${MEMCACHED_MEM_LIMIT}" + - name: memcached + image: "${MEMCACHED_IMG_NAME}:${MEMCACHED_IMG_TAG}" + ports: + - containerPort: 11211 + readinessProbe: + timeoutSeconds: 1 + initialDelaySeconds: 5 + tcpSocket: + port: 11211 + livenessProbe: + timeoutSeconds: 1 + initialDelaySeconds: 30 + tcpSocket: + port: 11211 + volumeMounts: [] + env: + - name: MEMCACHED_MAX_MEMORY + value: "${MEMCACHED_MAX_MEMORY}" + - name: MEMCACHED_MAX_CONNECTIONS + value: "${MEMCACHED_MAX_CONNECTIONS}" + - name: MEMCACHED_SLAB_PAGE_SIZE + value: "${MEMCACHED_SLAB_PAGE_SIZE}" + resources: + requests: + memory: "${MEMCACHED_MEM_REQ}" + cpu: "${MEMCACHED_CPU_REQ}" + limits: + memory: "${MEMCACHED_MEM_LIMIT}" - apiVersion: v1 - kind: "Service" + kind: Service metadata: name: "${DATABASE_SERVICE_NAME}" annotations: - description: "Exposes the database server" + description: Exposes the database server spec: ports: - - - name: "postgresql" - port: 5432 - targetPort: 5432 + - name: postgresql + port: 5432 + targetPort: 5432 selector: name: "${DATABASE_SERVICE_NAME}" - apiVersion: v1 - kind: "DeploymentConfig" + kind: DeploymentConfig metadata: name: "${DATABASE_SERVICE_NAME}" annotations: - description: "Defines how to deploy the database" + description: Defines how to deploy the database spec: strategy: - type: "Recreate" + type: Recreate triggers: - - - type: "ImageChange" - imageChangeParams: - automatic: true - containerNames: - - "postgresql" - from: - kind: "ImageStreamTag" - name: "cfme-openshift-postgresql:${POSTGRESQL_IMG_TAG}" - - - type: "ConfigChange" + - type: ConfigChange replicas: 1 selector: name: "${DATABASE_SERVICE_NAME}" @@ -310,236 +610,524 @@ objects: name: "${DATABASE_SERVICE_NAME}" spec: volumes: - - - name: "cfme-pgdb-volume" - persistentVolumeClaim: - claimName: "${NAME}-${DATABASE_SERVICE_NAME}" + - name: cfme-pgdb-volume + persistentVolumeClaim: + claimName: "${NAME}-${DATABASE_SERVICE_NAME}" + - name: cfme-pg-configs + configMap: + name: "${DATABASE_SERVICE_NAME}-configs" containers: - - - name: "postgresql" - image: "${POSTGRESQL_IMG_NAME}:${POSTGRESQL_IMG_TAG}" - ports: - - - containerPort: 5432 - readinessProbe: - timeoutSeconds: 1 - initialDelaySeconds: 15 + - name: postgresql + image: "${POSTGRESQL_IMG_NAME}:${POSTGRESQL_IMG_TAG}" + ports: + - containerPort: 5432 + readinessProbe: + timeoutSeconds: 1 + initialDelaySeconds: 15 + exec: + command: + - "/bin/sh" + - "-i" + - "-c" + - psql -h 127.0.0.1 -U ${POSTGRESQL_USER} -q -d ${POSTGRESQL_DATABASE} -c 'SELECT 1' + livenessProbe: + timeoutSeconds: 1 + initialDelaySeconds: 60 + tcpSocket: + port: 5432 + volumeMounts: + - name: cfme-pgdb-volume + mountPath: "/var/lib/pgsql/data" + - name: cfme-pg-configs + mountPath: "${POSTGRESQL_CONFIG_DIR}" + env: + - name: POSTGRESQL_USER + value: "${DATABASE_USER}" + - name: POSTGRESQL_PASSWORD + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: pg-password + - name: POSTGRESQL_DATABASE + value: "${DATABASE_NAME}" + - name: POSTGRESQL_MAX_CONNECTIONS + value: "${POSTGRESQL_MAX_CONNECTIONS}" + - name: POSTGRESQL_SHARED_BUFFERS + value: "${POSTGRESQL_SHARED_BUFFERS}" + - name: POSTGRESQL_CONFIG_DIR + value: "${POSTGRESQL_CONFIG_DIR}" + resources: + requests: + memory: "${POSTGRESQL_MEM_REQ}" + cpu: "${POSTGRESQL_CPU_REQ}" + limits: + memory: "${POSTGRESQL_MEM_LIMIT}" +- apiVersion: v1 + kind: Service + metadata: + annotations: + description: Exposes and load balances Ansible pods + service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"}]' + name: "${ANSIBLE_SERVICE_NAME}" + spec: + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 80 + - name: https + port: 443 + protocol: TCP + targetPort: 443 + selector: + name: "${ANSIBLE_SERVICE_NAME}" +- apiVersion: v1 + kind: DeploymentConfig + metadata: + name: "${ANSIBLE_SERVICE_NAME}" + annotations: + description: Defines how to deploy the Ansible appliance + spec: + strategy: + type: Recreate + serviceName: "${ANSIBLE_SERVICE_NAME}" + replicas: 0 + template: + metadata: + labels: + name: "${ANSIBLE_SERVICE_NAME}" + name: "${ANSIBLE_SERVICE_NAME}" + spec: + containers: + - name: ansible + image: "${ANSIBLE_IMG_NAME}:${ANSIBLE_IMG_TAG}" + livenessProbe: + tcpSocket: + port: 443 + initialDelaySeconds: 480 + timeoutSeconds: 3 + readinessProbe: + httpGet: + path: "/" + port: 443 + scheme: HTTPS + initialDelaySeconds: 200 + timeoutSeconds: 3 + ports: + - containerPort: 80 + protocol: TCP + - containerPort: 443 + protocol: TCP + securityContext: + privileged: true + env: + - name: ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: admin-password + - name: RABBITMQ_USER_NAME + value: "${ANSIBLE_RABBITMQ_USER_NAME}" + - name: RABBITMQ_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: rabbit-password + - name: ANSIBLE_SECRET_KEY + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: secret-key + - name: DATABASE_SERVICE_NAME + value: "${DATABASE_SERVICE_NAME}" + - name: POSTGRESQL_USER + value: "${DATABASE_USER}" + - name: POSTGRESQL_PASSWORD + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: pg-password + - name: POSTGRESQL_DATABASE + value: "${ANSIBLE_DATABASE_NAME}" + resources: + requests: + memory: "${ANSIBLE_MEM_REQ}" + cpu: "${ANSIBLE_CPU_REQ}" + limits: + memory: "${ANSIBLE_MEM_LIMIT}" + serviceAccount: cfme-privileged + serviceAccountName: cfme-privileged +- apiVersion: v1 + kind: Service + metadata: + name: "${HTTPD_SERVICE_NAME}" + annotations: + description: Exposes the httpd server + service.alpha.openshift.io/dependencies: '[{"name":"${NAME}","namespace":"","kind":"Service"}]' + spec: + ports: + - name: http + port: 80 + targetPort: 80 + selector: + name: httpd +- apiVersion: v1 + kind: Service + metadata: + name: "${HTTPD_DBUS_API_SERVICE_NAME}" + annotations: + description: Exposes the httpd server dbus api + service.alpha.openshift.io/dependencies: '[{"name":"${NAME}","namespace":"","kind":"Service"}]' + spec: + ports: + - name: http-dbus-api + port: 8080 + targetPort: 8080 + selector: + name: httpd +- apiVersion: v1 + kind: DeploymentConfig + metadata: + name: "${HTTPD_SERVICE_NAME}" + annotations: + description: Defines how to deploy httpd + spec: + strategy: + type: Recreate + recreateParams: + timeoutSeconds: 1200 + triggers: + - type: ConfigChange + replicas: 1 + selector: + name: "${HTTPD_SERVICE_NAME}" + template: + metadata: + name: "${HTTPD_SERVICE_NAME}" + labels: + name: "${HTTPD_SERVICE_NAME}" + spec: + volumes: + - name: httpd-config + configMap: + name: "${HTTPD_SERVICE_NAME}-configs" + - name: httpd-auth-config + configMap: + name: "${HTTPD_SERVICE_NAME}-auth-configs" + containers: + - name: httpd + image: "${HTTPD_IMG_NAME}:${HTTPD_IMG_TAG}" + ports: + - containerPort: 80 + protocol: TCP + - containerPort: 8080 + protocol: TCP + livenessProbe: + exec: + command: + - pidof + - httpd + initialDelaySeconds: 15 + timeoutSeconds: 3 + readinessProbe: + tcpSocket: + port: 80 + initialDelaySeconds: 10 + timeoutSeconds: 3 + volumeMounts: + - name: httpd-config + mountPath: "${HTTPD_CONFIG_DIR}" + - name: httpd-auth-config + mountPath: "${HTTPD_AUTH_CONFIG_DIR}" + resources: + requests: + memory: "${HTTPD_MEM_REQ}" + cpu: "${HTTPD_CPU_REQ}" + limits: + memory: "${HTTPD_MEM_LIMIT}" + env: + - name: HTTPD_AUTH_TYPE + valueFrom: + configMapKeyRef: + name: "${HTTPD_SERVICE_NAME}-auth-configs" + key: auth-type + - name: HTTPD_AUTH_KERBEROS_REALMS + valueFrom: + configMapKeyRef: + name: "${HTTPD_SERVICE_NAME}-auth-configs" + key: auth-kerberos-realms + lifecycle: + postStart: exec: command: - - "/bin/sh" - - "-i" - - "-c" - - "psql -h 127.0.0.1 -U ${POSTGRESQL_USER} -q -d ${POSTGRESQL_DATABASE} -c 'SELECT 1'" - livenessProbe: - timeoutSeconds: 1 - initialDelaySeconds: 60 - tcpSocket: - port: 5432 - volumeMounts: - - - name: "cfme-pgdb-volume" - mountPath: "/var/lib/pgsql/data" - env: - - - name: "POSTGRESQL_USER" - value: "${DATABASE_USER}" - - - name: "POSTGRESQL_PASSWORD" - value: "${DATABASE_PASSWORD}" - - - name: "POSTGRESQL_DATABASE" - value: "${DATABASE_NAME}" - - - name: "POSTGRESQL_MAX_CONNECTIONS" - value: "${POSTGRESQL_MAX_CONNECTIONS}" - - - name: "POSTGRESQL_SHARED_BUFFERS" - value: "${POSTGRESQL_SHARED_BUFFERS}" - resources: - requests: - memory: "${POSTGRESQL_MEM_REQ}" - cpu: "${POSTGRESQL_CPU_REQ}" - limits: - memory: "${POSTGRESQL_MEM_LIMIT}" - + - "/usr/bin/save-container-environment" + serviceAccount: cfme-httpd + serviceAccountName: cfme-httpd parameters: - - - name: "NAME" - displayName: Name - required: true - description: "The name assigned to all of the frontend objects defined in this template." - value: cloudforms - - - name: "DATABASE_SERVICE_NAME" - displayName: "PostgreSQL Service Name" - required: true - description: "The name of the OpenShift Service exposed for the PostgreSQL container." - value: "postgresql" - - - name: "DATABASE_USER" - displayName: "PostgreSQL User" - required: true - description: "PostgreSQL user that will access the database." - value: "root" - - - name: "DATABASE_PASSWORD" - displayName: "PostgreSQL Password" - required: true - description: "Password for the PostgreSQL user." - value: "smartvm" - - - name: "DATABASE_NAME" - required: true - displayName: "PostgreSQL Database Name" - description: "Name of the PostgreSQL database accessed." - value: "vmdb_production" - - - name: "DATABASE_REGION" - required: true - displayName: "Application Database Region" - description: "Database region that will be used for application." - value: "0" - - - name: "MEMCACHED_SERVICE_NAME" - required: true - displayName: "Memcached Service Name" - description: "The name of the OpenShift Service exposed for the Memcached container." - value: "memcached" - - - name: "MEMCACHED_MAX_MEMORY" - displayName: "Memcached Max Memory" - description: "Memcached maximum memory for memcached object storage in MB." - value: "64" - - - name: "MEMCACHED_MAX_CONNECTIONS" - displayName: "Memcached Max Connections" - description: "Memcached maximum number of connections allowed." - value: "1024" - - - name: "MEMCACHED_SLAB_PAGE_SIZE" - displayName: "Memcached Slab Page Size" - description: "Memcached size of each slab page." - value: "1m" - - - name: "POSTGRESQL_MAX_CONNECTIONS" - displayName: "PostgreSQL Max Connections" - description: "PostgreSQL maximum number of database connections allowed." - value: "100" - - - name: "POSTGRESQL_SHARED_BUFFERS" - displayName: "PostgreSQL Shared Buffer Amount" - description: "Amount of memory dedicated for PostgreSQL shared memory buffers." - value: "256MB" - - - name: "APPLICATION_CPU_REQ" - displayName: "Application Min CPU Requested" - required: true - description: "Minimum amount of CPU time the Application container will need (expressed in millicores)." - value: "1000m" - - - name: "POSTGRESQL_CPU_REQ" - displayName: "PostgreSQL Min CPU Requested" - required: true - description: "Minimum amount of CPU time the PostgreSQL container will need (expressed in millicores)." - value: "500m" - - - name: "MEMCACHED_CPU_REQ" - displayName: "Memcached Min CPU Requested" - required: true - description: "Minimum amount of CPU time the Memcached container will need (expressed in millicores)." - value: "200m" - - - name: "APPLICATION_MEM_REQ" - displayName: "Application Min RAM Requested" - required: true - description: "Minimum amount of memory the Application container will need." - value: "6144Mi" - - - name: "POSTGRESQL_MEM_REQ" - displayName: "PostgreSQL Min RAM Requested" - required: true - description: "Minimum amount of memory the PostgreSQL container will need." - value: "1024Mi" - - - name: "MEMCACHED_MEM_REQ" - displayName: "Memcached Min RAM Requested" - required: true - description: "Minimum amount of memory the Memcached container will need." - value: "64Mi" - - - name: "APPLICATION_MEM_LIMIT" - displayName: "Application Max RAM Limit" - required: true - description: "Maximum amount of memory the Application container can consume." - value: "16384Mi" - - - name: "POSTGRESQL_MEM_LIMIT" - displayName: "PostgreSQL Max RAM Limit" - required: true - description: "Maximum amount of memory the PostgreSQL container can consume." - value: "8192Mi" - - - name: "MEMCACHED_MEM_LIMIT" - displayName: "Memcached Max RAM Limit" - required: true - description: "Maximum amount of memory the Memcached container can consume." - value: "256Mi" - - - name: "POSTGRESQL_IMG_NAME" - displayName: "PostgreSQL Image Name" - description: "This is the PostgreSQL image name requested to deploy." - value: "registry.access.redhat.com/cloudforms45/cfme-openshift-postgresql" - - - name: "POSTGRESQL_IMG_TAG" - displayName: "PostgreSQL Image Tag" - description: "This is the PostgreSQL image tag/version requested to deploy." - value: "latest" - - - name: "MEMCACHED_IMG_NAME" - displayName: "Memcached Image Name" - description: "This is the Memcached image name requested to deploy." - value: "registry.access.redhat.com/cloudforms45/cfme-openshift-memcached" - - - name: "MEMCACHED_IMG_TAG" - displayName: "Memcached Image Tag" - description: "This is the Memcached image tag/version requested to deploy." - value: "latest" - - - name: "APPLICATION_IMG_NAME" - displayName: "Application Image Name" - description: "This is the Application image name requested to deploy." - value: "registry.access.redhat.com/cloudforms45/cfme-openshift-app" - - - name: "APPLICATION_IMG_TAG" - displayName: "Application Image Tag" - description: "This is the Application image tag/version requested to deploy." - value: "latest" - - - name: "APPLICATION_DOMAIN" - displayName: "Application Hostname" - description: "The exposed hostname that will route to the application service, if left blank a value will be defaulted." - value: "" - - - name: "APPLICATION_INIT_DELAY" - displayName: "Application Init Delay" - required: true - description: "Delay in seconds before we attempt to initialize the application." - value: "15" - - - name: "APPLICATION_VOLUME_CAPACITY" - displayName: "Application Volume Capacity" - required: true - description: "Volume space available for application data." - value: "5Gi" - - - name: "APPLICATION_REGION_VOLUME_CAPACITY" - displayName: "Application Region Volume Capacity" - required: true - description: "Volume space available for region application data." - value: "5Gi" - - - name: "DATABASE_VOLUME_CAPACITY" - displayName: "Database Volume Capacity" - required: true - description: "Volume space available for database." - value: "15Gi" +- name: NAME + displayName: Name + required: true + description: The name assigned to all of the frontend objects defined in this template. + value: cloudforms +- name: V2_KEY + displayName: CloudForms Encryption Key + required: true + description: Encryption Key for CloudForms Passwords + from: "[a-zA-Z0-9]{43}" + generate: expression +- name: DATABASE_SERVICE_NAME + displayName: PostgreSQL Service Name + required: true + description: The name of the OpenShift Service exposed for the PostgreSQL container. + value: postgresql +- name: DATABASE_USER + displayName: PostgreSQL User + required: true + description: PostgreSQL user that will access the database. + value: root +- name: DATABASE_PASSWORD + displayName: PostgreSQL Password + required: true + description: Password for the PostgreSQL user. + from: "[a-zA-Z0-9]{8}" + generate: expression +- name: DATABASE_NAME + required: true + displayName: PostgreSQL Database Name + description: Name of the PostgreSQL database accessed. + value: vmdb_production +- name: DATABASE_REGION + required: true + displayName: Application Database Region + description: Database region that will be used for application. + value: '0' +- name: APPLICATION_ADMIN_PASSWORD + displayName: Application Admin Password + required: true + description: Admin password that will be set on the application. + value: smartvm +- name: ANSIBLE_DATABASE_NAME + displayName: Ansible PostgreSQL database name + required: true + description: The database to be used by the Ansible continer + value: awx +- name: MEMCACHED_SERVICE_NAME + required: true + displayName: Memcached Service Name + description: The name of the OpenShift Service exposed for the Memcached container. + value: memcached +- name: MEMCACHED_MAX_MEMORY + displayName: Memcached Max Memory + description: Memcached maximum memory for memcached object storage in MB. + value: '64' +- name: MEMCACHED_MAX_CONNECTIONS + displayName: Memcached Max Connections + description: Memcached maximum number of connections allowed. + value: '1024' +- name: MEMCACHED_SLAB_PAGE_SIZE + displayName: Memcached Slab Page Size + description: Memcached size of each slab page. + value: 1m +- name: POSTGRESQL_CONFIG_DIR + displayName: PostgreSQL Configuration Overrides + description: Directory used to store PostgreSQL configuration overrides. + value: "/var/lib/pgsql/conf.d" +- name: POSTGRESQL_MAX_CONNECTIONS + displayName: PostgreSQL Max Connections + description: PostgreSQL maximum number of database connections allowed. + value: '1000' +- name: POSTGRESQL_SHARED_BUFFERS + displayName: PostgreSQL Shared Buffer Amount + description: Amount of memory dedicated for PostgreSQL shared memory buffers. + value: 1GB +- name: ANSIBLE_SERVICE_NAME + displayName: Ansible Service Name + description: The name of the OpenShift Service exposed for the Ansible container. + value: ansible +- name: ANSIBLE_ADMIN_PASSWORD + displayName: Ansible admin User password + required: true + description: The password for the Ansible container admin user + from: "[a-zA-Z0-9]{32}" + generate: expression +- name: ANSIBLE_SECRET_KEY + displayName: Ansible Secret Key + required: true + description: Encryption key for the Ansible container + from: "[a-f0-9]{32}" + generate: expression +- name: ANSIBLE_RABBITMQ_USER_NAME + displayName: RabbitMQ Username + required: true + description: Username for the Ansible RabbitMQ Server + value: ansible +- name: ANSIBLE_RABBITMQ_PASSWORD + displayName: RabbitMQ Server Password + required: true + description: Password for the Ansible RabbitMQ Server + from: "[a-zA-Z0-9]{32}" + generate: expression +- name: APPLICATION_CPU_REQ + displayName: Application Min CPU Requested + required: true + description: Minimum amount of CPU time the Application container will need (expressed in millicores). + value: 1000m +- name: POSTGRESQL_CPU_REQ + displayName: PostgreSQL Min CPU Requested + required: true + description: Minimum amount of CPU time the PostgreSQL container will need (expressed in millicores). + value: 500m +- name: MEMCACHED_CPU_REQ + displayName: Memcached Min CPU Requested + required: true + description: Minimum amount of CPU time the Memcached container will need (expressed in millicores). + value: 200m +- name: ANSIBLE_CPU_REQ + displayName: Ansible Min CPU Requested + required: true + description: Minimum amount of CPU time the Ansible container will need (expressed in millicores). + value: 1000m +- name: APPLICATION_MEM_REQ + displayName: Application Min RAM Requested + required: true + description: Minimum amount of memory the Application container will need. + value: 6144Mi +- name: POSTGRESQL_MEM_REQ + displayName: PostgreSQL Min RAM Requested + required: true + description: Minimum amount of memory the PostgreSQL container will need. + value: 4Gi +- name: MEMCACHED_MEM_REQ + displayName: Memcached Min RAM Requested + required: true + description: Minimum amount of memory the Memcached container will need. + value: 64Mi +- name: ANSIBLE_MEM_REQ + displayName: Ansible Min RAM Requested + required: true + description: Minimum amount of memory the Ansible container will need. + value: 2048Mi +- name: APPLICATION_MEM_LIMIT + displayName: Application Max RAM Limit + required: true + description: Maximum amount of memory the Application container can consume. + value: 16384Mi +- name: POSTGRESQL_MEM_LIMIT + displayName: PostgreSQL Max RAM Limit + required: true + description: Maximum amount of memory the PostgreSQL container can consume. + value: 8Gi +- name: MEMCACHED_MEM_LIMIT + displayName: Memcached Max RAM Limit + required: true + description: Maximum amount of memory the Memcached container can consume. + value: 256Mi +- name: ANSIBLE_MEM_LIMIT + displayName: Ansible Max RAM Limit + required: true + description: Maximum amount of memory the Ansible container can consume. + value: 8096Mi +- name: POSTGRESQL_IMG_NAME + displayName: PostgreSQL Image Name + description: This is the PostgreSQL image name requested to deploy. + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-postgresql +- name: POSTGRESQL_IMG_TAG + displayName: PostgreSQL Image Tag + description: This is the PostgreSQL image tag/version requested to deploy. + value: latest +- name: MEMCACHED_IMG_NAME + displayName: Memcached Image Name + description: This is the Memcached image name requested to deploy. + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-memcached +- name: MEMCACHED_IMG_TAG + displayName: Memcached Image Tag + description: This is the Memcached image tag/version requested to deploy. + value: latest +- name: FRONTEND_APPLICATION_IMG_NAME + displayName: Frontend Application Image Name + description: This is the Frontend Application image name requested to deploy. + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-app-ui +- name: BACKEND_APPLICATION_IMG_NAME + displayName: Backend Application Image Name + description: This is the Backend Application image name requested to deploy. + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-app +- name: FRONTEND_APPLICATION_IMG_TAG + displayName: Front end Application Image Tag + description: This is the CloudForms Frontend Application image tag/version requested to deploy. + value: latest +- name: BACKEND_APPLICATION_IMG_TAG + displayName: Back end Application Image Tag + description: This is the CloudForms Backend Application image tag/version requested to deploy. + value: latest +- name: ANSIBLE_IMG_NAME + displayName: Ansible Image Name + description: This is the Ansible image name requested to deploy. + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-embedded-ansible +- name: ANSIBLE_IMG_TAG + displayName: Ansible Image Tag + description: This is the Ansible image tag/version requested to deploy. + value: latest +- name: APPLICATION_DOMAIN + displayName: Application Hostname + description: The exposed hostname that will route to the application service, if left blank a value will be defaulted. + value: '' +- name: APPLICATION_REPLICA_COUNT + displayName: Application Replica Count + description: This is the number of Application replicas requested to deploy. + value: '1' +- name: APPLICATION_INIT_DELAY + displayName: Application Init Delay + required: true + description: Delay in seconds before we attempt to initialize the application. + value: '15' +- name: APPLICATION_VOLUME_CAPACITY + displayName: Application Volume Capacity + required: true + description: Volume space available for application data. + value: 5Gi +- name: DATABASE_VOLUME_CAPACITY + displayName: Database Volume Capacity + required: true + description: Volume space available for database. + value: 15Gi +- name: HTTPD_SERVICE_NAME + required: true + displayName: Apache httpd Service Name + description: The name of the OpenShift Service exposed for the httpd container. + value: httpd +- name: HTTPD_DBUS_API_SERVICE_NAME + required: true + displayName: Apache httpd DBus API Service Name + description: The name of httpd dbus api service. + value: httpd-dbus-api +- name: HTTPD_IMG_NAME + displayName: Apache httpd Image Name + description: This is the httpd image name requested to deploy. + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-httpd +- name: HTTPD_IMG_TAG + displayName: Apache httpd Image Tag + description: This is the httpd image tag/version requested to deploy. + value: latest +- name: HTTPD_CONFIG_DIR + displayName: Apache Configuration Directory + description: Directory used to store the Apache configuration files. + value: "/etc/httpd/conf.d" +- name: HTTPD_AUTH_CONFIG_DIR + displayName: External Authentication Configuration Directory + description: Directory used to store the external authentication configuration files. + value: "/etc/httpd/auth-conf.d" +- name: HTTPD_CPU_REQ + displayName: Apache httpd Min CPU Requested + required: true + description: Minimum amount of CPU time the httpd container will need (expressed in millicores). + value: 500m +- name: HTTPD_MEM_REQ + displayName: Apache httpd Min RAM Requested + required: true + description: Minimum amount of memory the httpd container will need. + value: 512Mi +- name: HTTPD_MEM_LIMIT + displayName: Apache httpd Max RAM Limit + required: true + description: Maximum amount of memory the httpd container can consume. + value: 8192Mi diff --git a/roles/openshift_examples/files/examples/v3.7/v3.8 b/roles/openshift_examples/files/examples/v3.7/v3.8 deleted file mode 120000 index 8ddcf661c..000000000 --- a/roles/openshift_examples/files/examples/v3.7/v3.8 +++ /dev/null @@ -1 +0,0 @@ -v3.8
\ No newline at end of file diff --git a/roles/openshift_examples/files/examples/v3.8/db-templates/mariadb-persistent-template.json b/roles/openshift_examples/files/examples/v3.8/db-templates/mariadb-persistent-template.json index 217ef11dd..92be8f42e 100644 --- a/roles/openshift_examples/files/examples/v3.8/db-templates/mariadb-persistent-template.json +++ b/roles/openshift_examples/files/examples/v3.8/db-templates/mariadb-persistent-template.json @@ -4,7 +4,7 @@ "metadata": { "name": "mariadb-persistent", "annotations": { - "openshift.io/display-name": "MariaDB (Persistent)", + "openshift.io/display-name": "MariaDB", "description": "MariaDB database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.", "iconClass": "icon-mariadb", "tags": "database,mariadb", diff --git a/roles/openshift_examples/files/examples/v3.8/db-templates/mongodb-persistent-template.json b/roles/openshift_examples/files/examples/v3.8/db-templates/mongodb-persistent-template.json index 97e4128a4..4e3e64d48 100644 --- a/roles/openshift_examples/files/examples/v3.8/db-templates/mongodb-persistent-template.json +++ b/roles/openshift_examples/files/examples/v3.8/db-templates/mongodb-persistent-template.json @@ -4,7 +4,7 @@ "metadata": { "name": "mongodb-persistent", "annotations": { - "openshift.io/display-name": "MongoDB (Persistent)", + "openshift.io/display-name": "MongoDB", "description": "MongoDB database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/blob/master/3.2/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.", "iconClass": "icon-mongodb", "tags": "database,mongodb", diff --git a/roles/openshift_examples/files/examples/v3.8/db-templates/mysql-persistent-template.json b/roles/openshift_examples/files/examples/v3.8/db-templates/mysql-persistent-template.json index 48ac114fd..6ac80f3a0 100644 --- a/roles/openshift_examples/files/examples/v3.8/db-templates/mysql-persistent-template.json +++ b/roles/openshift_examples/files/examples/v3.8/db-templates/mysql-persistent-template.json @@ -4,7 +4,7 @@ "metadata": { "name": "mysql-persistent", "annotations": { - "openshift.io/display-name": "MySQL (Persistent)", + "openshift.io/display-name": "MySQL", "description": "MySQL database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.7/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.", "iconClass": "icon-mysql-database", "tags": "database,mysql", diff --git a/roles/openshift_examples/files/examples/v3.8/db-templates/postgresql-persistent-template.json b/roles/openshift_examples/files/examples/v3.8/db-templates/postgresql-persistent-template.json index 8a2d23907..190509112 100644 --- a/roles/openshift_examples/files/examples/v3.8/db-templates/postgresql-persistent-template.json +++ b/roles/openshift_examples/files/examples/v3.8/db-templates/postgresql-persistent-template.json @@ -4,7 +4,7 @@ "metadata": { "name": "postgresql-persistent", "annotations": { - "openshift.io/display-name": "PostgreSQL (Persistent)", + "openshift.io/display-name": "PostgreSQL", "description": "PostgreSQL database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.", "iconClass": "icon-postgresql", "tags": "database,postgresql", diff --git a/roles/openshift_examples/files/examples/v3.8/db-templates/redis-persistent-template.json b/roles/openshift_examples/files/examples/v3.8/db-templates/redis-persistent-template.json index e0e0a88d5..d1103d3af 100644 --- a/roles/openshift_examples/files/examples/v3.8/db-templates/redis-persistent-template.json +++ b/roles/openshift_examples/files/examples/v3.8/db-templates/redis-persistent-template.json @@ -4,7 +4,7 @@ "metadata": { "name": "redis-persistent", "annotations": { - "openshift.io/display-name": "Redis (Persistent)", + "openshift.io/display-name": "Redis", "description": "Redis in-memory data structure store, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/redis-container/blob/master/3.2.\n\nNOTE: You must have persistent volumes available in your cluster to use this template.", "iconClass": "icon-redis", "tags": "database,redis", diff --git a/roles/openshift_examples/files/examples/v3.8/image-streams/image-streams-centos7.json b/roles/openshift_examples/files/examples/v3.8/image-streams/image-streams-centos7.json index e7af160d9..0aa586061 100644 --- a/roles/openshift_examples/files/examples/v3.8/image-streams/image-streams-centos7.json +++ b/roles/openshift_examples/files/examples/v3.8/image-streams/image-streams-centos7.json @@ -407,7 +407,7 @@ "annotations": { "openshift.io/display-name": "Python (Latest)", "openshift.io/provider-display-name": "Red Hat, Inc.", - "description": "Build and run Python applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.5/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Python available on OpenShift, including major versions updates.", + "description": "Build and run Python applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.6/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Python available on OpenShift, including major versions updates.", "iconClass": "icon-python", "tags": "builder,python", "supports":"python", @@ -415,7 +415,7 @@ }, "from": { "kind": "ImageStreamTag", - "name": "3.5" + "name": "3.6" } }, { @@ -485,6 +485,23 @@ "kind": "DockerImage", "name": "centos/python-35-centos7:latest" } + }, + { + "name": "3.6", + "annotations": { + "openshift.io/display-name": "Python 3.6", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run Python 3.6 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.6/README.md.", + "iconClass": "icon-python", + "tags": "builder,python", + "supports":"python:3.6,python", + "version": "3.6", + "sampleRepo": "https://github.com/openshift/django-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "centos/python-36-centos7:latest" + } } ] } diff --git a/roles/openshift_examples/files/examples/v3.8/image-streams/image-streams-rhel7.json b/roles/openshift_examples/files/examples/v3.8/image-streams/image-streams-rhel7.json index 2b082fc75..7ab9f4e59 100644 --- a/roles/openshift_examples/files/examples/v3.8/image-streams/image-streams-rhel7.json +++ b/roles/openshift_examples/files/examples/v3.8/image-streams/image-streams-rhel7.json @@ -407,7 +407,7 @@ "annotations": { "openshift.io/display-name": "Python (Latest)", "openshift.io/provider-display-name": "Red Hat, Inc.", - "description": "Build and run Python applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.5/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Python available on OpenShift, including major versions updates.", + "description": "Build and run Python applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.6/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Python available on OpenShift, including major versions updates.", "iconClass": "icon-python", "tags": "builder,python", "supports":"python", @@ -415,7 +415,7 @@ }, "from": { "kind": "ImageStreamTag", - "name": "3.5" + "name": "3.6" } }, { @@ -485,6 +485,23 @@ "kind": "DockerImage", "name": "registry.access.redhat.com/rhscl/python-35-rhel7:latest" } + }, + { + "name": "3.6", + "annotations": { + "openshift.io/display-name": "Python 3.6", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run Python 3.6 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.6/README.md.", + "iconClass": "icon-python", + "tags": "builder,python", + "supports":"python:3.6,python", + "version": "3.6", + "sampleRepo": "https://github.com/openshift/django-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/rhscl/python-36-rhel7:latest" + } } ] } diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/cakephp-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/cakephp-mysql-persistent.json index 86ddc184a..40b4eaa81 100644 --- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/cakephp-mysql-persistent.json +++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/cakephp-mysql-persistent.json @@ -4,7 +4,7 @@ "metadata": { "name": "cakephp-mysql-persistent", "annotations": { - "openshift.io/display-name": "CakePHP + MySQL (Persistent)", + "openshift.io/display-name": "CakePHP + MySQL", "description": "An example CakePHP application with a MySQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/cakephp-ex/blob/master/README.md.", "tags": "quickstart,php,cakephp", "iconClass": "icon-php", @@ -17,7 +17,8 @@ }, "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/cake-ex/blob/master/README.md.", "labels": { - "template": "cakephp-mysql-persistent" + "template": "cakephp-mysql-persistent", + "app": "cakephp-mysql-persistent" }, "objects": [ { diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/cakephp-mysql.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/cakephp-mysql.json index 3c964bd6a..ecd90e495 100644 --- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/cakephp-mysql.json +++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/cakephp-mysql.json @@ -17,7 +17,8 @@ }, "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/cake-ex/blob/master/README.md.", "labels": { - "template": "cakephp-mysql-example" + "template": "cakephp-mysql-example", + "app": "cakephp-mysql-example" }, "objects": [ { diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/dancer-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/dancer-mysql-persistent.json index 0a10c5fbc..17a155600 100644 --- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/dancer-mysql-persistent.json +++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/dancer-mysql-persistent.json @@ -4,7 +4,7 @@ "metadata": { "name": "dancer-mysql-persistent", "annotations": { - "openshift.io/display-name": "Dancer + MySQL (Persistent)", + "openshift.io/display-name": "Dancer + MySQL", "description": "An example Dancer application with a MySQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/dancer-ex/blob/master/README.md.", "tags": "quickstart,perl,dancer", "iconClass": "icon-perl", @@ -17,7 +17,8 @@ }, "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/dancer-ex/blob/master/README.md.", "labels": { - "template": "dancer-mysql-persistent" + "template": "dancer-mysql-persistent", + "app": "dancer-mysql-persistent" }, "objects": [ { diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/dancer-mysql.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/dancer-mysql.json index 6122d5436..abf711535 100644 --- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/dancer-mysql.json +++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/dancer-mysql.json @@ -17,7 +17,8 @@ }, "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/dancer-ex/blob/master/README.md.", "labels": { - "template": "dancer-mysql-example" + "template": "dancer-mysql-example", + "app": "dancer-mysql-example" }, "objects": [ { diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/django-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/django-postgresql-persistent.json index f3b5838fa..c8dab0b53 100644 --- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/django-postgresql-persistent.json +++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/django-postgresql-persistent.json @@ -4,7 +4,7 @@ "metadata": { "name": "django-psql-persistent", "annotations": { - "openshift.io/display-name": "Django + PostgreSQL (Persistent)", + "openshift.io/display-name": "Django + PostgreSQL", "description": "An example Django application with a PostgreSQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/django-ex/blob/master/README.md.", "tags": "quickstart,python,django", "iconClass": "icon-python", @@ -17,7 +17,8 @@ }, "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/django-ex/blob/master/README.md.", "labels": { - "template": "django-psql-persistent" + "template": "django-psql-persistent", + "app": "django-psql-persistent" }, "objects": [ { diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/django-postgresql.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/django-postgresql.json index b21295df2..6395defda 100644 --- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/django-postgresql.json +++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/django-postgresql.json @@ -17,7 +17,8 @@ }, "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/django-ex/blob/master/README.md.", "labels": { - "template": "django-psql-example" + "template": "django-psql-example", + "app": "django-psql-example" }, "objects": [ { diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/httpd.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/httpd.json index 3771280bf..d7cab4889 100644 --- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/httpd.json +++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/httpd.json @@ -17,7 +17,8 @@ }, "message": "The following service(s) have been created in your project: ${NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/httpd-ex/blob/master/README.md.", "labels": { - "template": "httpd-example" + "template": "httpd-example", + "app": "httpd-example" }, "objects": [ { diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/jenkins-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/jenkins-ephemeral-template.json index 28b4b9d81..6186bba10 100644 --- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/jenkins-ephemeral-template.json +++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/jenkins-ephemeral-template.json @@ -15,6 +15,9 @@ } }, "message": "A Jenkins service has been created in your project. Log into Jenkins with your OpenShift account. The tutorial at https://github.com/openshift/origin/blob/master/examples/jenkins/README.md contains more information about using this template.", + "labels": { + "app": "jenkins-ephemeral" + }, "objects": [ { "kind": "Route", diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/jenkins-persistent-template.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/jenkins-persistent-template.json index 4915bb12c..59dc0d22b 100644 --- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/jenkins-persistent-template.json +++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/jenkins-persistent-template.json @@ -4,7 +4,7 @@ "metadata": { "name": "jenkins-persistent", "annotations": { - "openshift.io/display-name": "Jenkins (Persistent)", + "openshift.io/display-name": "Jenkins", "description": "Jenkins service, with persistent storage.\n\nNOTE: You must have persistent volumes available in your cluster to use this template.", "iconClass": "icon-jenkins", "tags": "instant-app,jenkins", @@ -15,6 +15,9 @@ } }, "message": "A Jenkins service has been created in your project. Log into Jenkins with your OpenShift account. The tutorial at https://github.com/openshift/origin/blob/master/examples/jenkins/README.md contains more information about using this template.", + "labels": { + "app": "jenkins-persistent" + }, "objects": [ { "kind": "Route", diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/nodejs-mongodb-persistent.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/nodejs-mongodb-persistent.json index 7f2a5d804..f04adaa67 100644 --- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/nodejs-mongodb-persistent.json +++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/nodejs-mongodb-persistent.json @@ -4,7 +4,7 @@ "metadata": { "name": "nodejs-mongo-persistent", "annotations": { - "openshift.io/display-name": "Node.js + MongoDB (Persistent)", + "openshift.io/display-name": "Node.js + MongoDB", "description": "An example Node.js application with a MongoDB database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/nodejs-ex/blob/master/README.md.", "tags": "quickstart,nodejs", "iconClass": "icon-nodejs", @@ -17,7 +17,8 @@ }, "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/nodejs-ex/blob/master/README.md.", "labels": { - "template": "nodejs-mongo-persistent" + "template": "nodejs-mongo-persistent", + "app": "nodejs-mongo-persistent" }, "objects": [ { diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/nodejs-mongodb.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/nodejs-mongodb.json index b3afae46e..0ce36dba5 100644 --- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/nodejs-mongodb.json +++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/nodejs-mongodb.json @@ -17,7 +17,8 @@ }, "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/nodejs-ex/blob/master/README.md.", "labels": { - "template": "nodejs-mongodb-example" + "template": "nodejs-mongodb-example", + "app": "nodejs-mongodb-example" }, "objects": [ { diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/rails-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/rails-postgresql-persistent.json index 1c03be28a..10e9382cc 100644 --- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/rails-postgresql-persistent.json +++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/rails-postgresql-persistent.json @@ -4,7 +4,7 @@ "metadata": { "name": "rails-pgsql-persistent", "annotations": { - "openshift.io/display-name": "Rails + PostgreSQL (Persistent)", + "openshift.io/display-name": "Rails + PostgreSQL", "description": "An example Rails application with a PostgreSQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/rails-ex/blob/master/README.md.", "tags": "quickstart,ruby,rails", "iconClass": "icon-ruby", @@ -17,7 +17,8 @@ }, "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/rails-ex/blob/master/README.md.", "labels": { - "template": "rails-pgsql-persistent" + "template": "rails-pgsql-persistent", + "app": "rails-pgsql-persistent" }, "objects": [ { diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/rails-postgresql.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/rails-postgresql.json index 240289d33..8ec2c8ea6 100644 --- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/rails-postgresql.json +++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/rails-postgresql.json @@ -17,7 +17,8 @@ }, "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/rails-ex/blob/master/README.md.", "labels": { - "template": "rails-postgresql-example" + "template": "rails-postgresql-example", + "app": "rails-postgresql-example" }, "objects": [ { diff --git a/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-backup-job.yaml b/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-backup-job.yaml new file mode 100644 index 000000000..48d1d4e26 --- /dev/null +++ b/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-backup-job.yaml @@ -0,0 +1,28 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: cloudforms-backup +spec: + template: + metadata: + name: cloudforms-backup + spec: + containers: + - name: postgresql + image: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-postgresql:latest + command: + - "/opt/rh/cfme-container-scripts/backup_db" + env: + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: cloudforms-secrets + key: database-url + volumeMounts: + - name: cfme-backup-vol + mountPath: "/backups" + volumes: + - name: cfme-backup-vol + persistentVolumeClaim: + claimName: cloudforms-backup + restartPolicy: Never diff --git a/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-backup-pvc.yaml b/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-backup-pvc.yaml new file mode 100644 index 000000000..92598ce82 --- /dev/null +++ b/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-backup-pvc.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: cloudforms-backup +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 15Gi diff --git a/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-pv-backup-example.yaml b/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-pv-backup-example.yaml new file mode 100644 index 000000000..4fe349897 --- /dev/null +++ b/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-pv-backup-example.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: cfme-pv03 +spec: + capacity: + storage: 15Gi + accessModes: + - ReadWriteOnce + nfs: + path: "/exports/cfme-pv03" + server: "<your-nfs-host-here>" + persistentVolumeReclaimPolicy: Retain diff --git a/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-pv-db-example.yaml b/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-pv-db-example.yaml index 250a99b8d..0cdd821b5 100644 --- a/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-pv-db-example.yaml +++ b/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-pv-db-example.yaml @@ -1,13 +1,38 @@ apiVersion: v1 -kind: PersistentVolume +kind: Template +labels: + template: cloudforms-db-pv metadata: - name: cfme-pv01 -spec: - capacity: - storage: 15Gi - accessModes: + name: cloudforms-db-pv + annotations: + description: PV Template for CFME PostgreSQL DB + tags: PVS, CFME +objects: +- apiVersion: v1 + kind: PersistentVolume + metadata: + name: cfme-db + spec: + capacity: + storage: "${PV_SIZE}" + accessModes: - ReadWriteOnce - nfs: - path: /exports/cfme-pv01 - server: <your-nfs-host-here> - persistentVolumeReclaimPolicy: Retain + nfs: + path: "${BASE_PATH}/cfme-db" + server: "${NFS_HOST}" + persistentVolumeReclaimPolicy: Retain +parameters: +- name: PV_SIZE + displayName: PV Size for DB + required: true + description: The size of the CFME DB PV given in Gi + value: 15Gi +- name: BASE_PATH + displayName: Exports Directory Base Path + required: true + description: The parent directory of your NFS exports + value: "/exports" +- name: NFS_HOST + displayName: NFS Server Hostname + required: true + description: The hostname or IP address of the NFS server diff --git a/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-pv-region-example.yaml b/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-pv-region-example.yaml deleted file mode 100644 index cba9bbe35..000000000 --- a/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-pv-region-example.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: v1 -kind: PersistentVolume -metadata: - name: cfme-pv02 -spec: - capacity: - storage: 5Gi - accessModes: - - ReadWriteOnce - nfs: - path: /exports/cfme-pv02 - server: <your-nfs-host-here> - persistentVolumeReclaimPolicy: Retain diff --git a/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-pv-server-example.yaml b/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-pv-server-example.yaml index c08c21265..527090ae8 100644 --- a/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-pv-server-example.yaml +++ b/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-pv-server-example.yaml @@ -1,13 +1,38 @@ apiVersion: v1 -kind: PersistentVolume +kind: Template +labels: + template: cloudforms-app-pv metadata: - name: cfme-pv03 -spec: - capacity: - storage: 5Gi - accessModes: + name: cloudforms-app-pv + annotations: + description: PV Template for CFME Server + tags: PVS, CFME +objects: +- apiVersion: v1 + kind: PersistentVolume + metadata: + name: cfme-app + spec: + capacity: + storage: "${PV_SIZE}" + accessModes: - ReadWriteOnce - nfs: - path: /exports/cfme-pv03 - server: <your-nfs-host-here> - persistentVolumeReclaimPolicy: Retain + nfs: + path: "${BASE_PATH}/cfme-app" + server: "${NFS_HOST}" + persistentVolumeReclaimPolicy: Retain +parameters: +- name: PV_SIZE + displayName: PV Size for App + required: true + description: The size of the CFME APP PV given in Gi + value: 5Gi +- name: BASE_PATH + displayName: Exports Directory Base Path + required: true + description: The parent directory of your NFS exports + value: "/exports" +- name: NFS_HOST + displayName: NFS Server Hostname + required: true + description: The hostname or IP address of the NFS server diff --git a/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-restore-job.yaml b/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-restore-job.yaml new file mode 100644 index 000000000..7fd4fc2e1 --- /dev/null +++ b/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-restore-job.yaml @@ -0,0 +1,35 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: cloudforms-restore +spec: + template: + metadata: + name: cloudforms-restore + spec: + containers: + - name: postgresql + image: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-postgresql:latest + command: + - "/opt/rh/cfme-container-scripts/restore_db" + env: + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: cloudforms-secrets + key: database-url + - name: BACKUP_VERSION + value: latest + volumeMounts: + - name: cfme-backup-vol + mountPath: "/backups" + - name: cfme-prod-vol + mountPath: "/restore" + volumes: + - name: cfme-backup-vol + persistentVolumeClaim: + claimName: cloudforms-backup + - name: cfme-prod-vol + persistentVolumeClaim: + claimName: cloudforms-postgresql + restartPolicy: Never diff --git a/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-scc-sysadmin.yaml b/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-scc-sysadmin.yaml new file mode 100644 index 000000000..d2ece9298 --- /dev/null +++ b/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-scc-sysadmin.yaml @@ -0,0 +1,38 @@ +allowHostDirVolumePlugin: false +allowHostIPC: false +allowHostNetwork: false +allowHostPID: false +allowHostPorts: false +allowPrivilegedContainer: false +allowedCapabilities: +apiVersion: v1 +defaultAddCapabilities: +- SYS_ADMIN +fsGroup: + type: RunAsAny +groups: +- system:cluster-admins +kind: SecurityContextConstraints +metadata: + annotations: + kubernetes.io/description: cfme-sysadmin provides all features of the anyuid SCC but allows users to have SYS_ADMIN capabilities. This is the required scc for Pods requiring to run with systemd and the message bus. + creationTimestamp: + name: cfme-sysadmin +priority: 10 +readOnlyRootFilesystem: false +requiredDropCapabilities: +- MKNOD +- SYS_CHROOT +runAsUser: + type: RunAsAny +seLinuxContext: + type: MustRunAs +supplementalGroups: + type: RunAsAny +users: +volumes: +- configMap +- downwardAPI +- emptyDir +- persistentVolumeClaim +- secret diff --git a/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-template-ext-db.yaml b/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-template-ext-db.yaml new file mode 100644 index 000000000..9866c29c3 --- /dev/null +++ b/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-template-ext-db.yaml @@ -0,0 +1,956 @@ +apiVersion: v1 +kind: Template +labels: + template: cloudforms-ext-db +metadata: + name: cloudforms-ext-db + annotations: + description: CloudForms appliance with persistent storage using a external DB host + tags: instant-app,cloudforms,cfme + iconClass: icon-rails +objects: +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: cfme-orchestrator +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: cfme-anyuid +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: cfme-privileged +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: cfme-httpd +- apiVersion: v1 + kind: Secret + metadata: + name: "${NAME}-secrets" + stringData: + pg-password: "${DATABASE_PASSWORD}" + admin-password: "${APPLICATION_ADMIN_PASSWORD}" + database-url: postgresql://${DATABASE_USER}:${DATABASE_PASSWORD}@${DATABASE_SERVICE_NAME}/${DATABASE_NAME}?encoding=utf8&pool=5&wait_timeout=5 + v2-key: "${V2_KEY}" +- apiVersion: v1 + kind: Secret + metadata: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + stringData: + rabbit-password: "${ANSIBLE_RABBITMQ_PASSWORD}" + secret-key: "${ANSIBLE_SECRET_KEY}" + admin-password: "${ANSIBLE_ADMIN_PASSWORD}" +- apiVersion: v1 + kind: Service + metadata: + annotations: + description: Exposes and load balances CloudForms pods + service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"},{"name":"${MEMCACHED_SERVICE_NAME}","namespace":"","kind":"Service"}]' + name: "${NAME}" + spec: + clusterIP: None + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 80 + selector: + name: "${NAME}" +- apiVersion: v1 + kind: Route + metadata: + name: "${HTTPD_SERVICE_NAME}" + spec: + host: "${APPLICATION_DOMAIN}" + port: + targetPort: http + tls: + termination: edge + insecureEdgeTerminationPolicy: Redirect + to: + kind: Service + name: "${HTTPD_SERVICE_NAME}" +- apiVersion: apps/v1beta1 + kind: StatefulSet + metadata: + name: "${NAME}" + annotations: + description: Defines how to deploy the CloudForms appliance + spec: + serviceName: "${NAME}" + replicas: "${APPLICATION_REPLICA_COUNT}" + template: + metadata: + labels: + name: "${NAME}" + name: "${NAME}" + spec: + containers: + - name: cloudforms + image: "${FRONTEND_APPLICATION_IMG_NAME}:${FRONTEND_APPLICATION_IMG_TAG}" + livenessProbe: + exec: + command: + - pidof + - MIQ Server + initialDelaySeconds: 480 + timeoutSeconds: 3 + readinessProbe: + tcpSocket: + port: 80 + initialDelaySeconds: 200 + timeoutSeconds: 3 + ports: + - containerPort: 80 + protocol: TCP + volumeMounts: + - name: "${NAME}-server" + mountPath: "/persistent" + env: + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: APPLICATION_INIT_DELAY + value: "${APPLICATION_INIT_DELAY}" + - name: DATABASE_REGION + value: "${DATABASE_REGION}" + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: database-url + - name: V2_KEY + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: v2-key + - name: APPLICATION_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: admin-password + - name: ANSIBLE_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: admin-password + resources: + requests: + memory: "${APPLICATION_MEM_REQ}" + cpu: "${APPLICATION_CPU_REQ}" + limits: + memory: "${APPLICATION_MEM_LIMIT}" + lifecycle: + preStop: + exec: + command: + - "/opt/rh/cfme-container-scripts/sync-pv-data" + serviceAccount: cfme-orchestrator + serviceAccountName: cfme-orchestrator + terminationGracePeriodSeconds: 90 + volumeClaimTemplates: + - metadata: + name: "${NAME}-server" + annotations: + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "${APPLICATION_VOLUME_CAPACITY}" +- apiVersion: v1 + kind: Service + metadata: + annotations: + description: Headless service for CloudForms backend pods + name: "${NAME}-backend" + spec: + clusterIP: None + selector: + name: "${NAME}-backend" +- apiVersion: apps/v1beta1 + kind: StatefulSet + metadata: + name: "${NAME}-backend" + annotations: + description: Defines how to deploy the CloudForms appliance + spec: + serviceName: "${NAME}-backend" + replicas: 0 + template: + metadata: + labels: + name: "${NAME}-backend" + name: "${NAME}-backend" + spec: + containers: + - name: cloudforms + image: "${BACKEND_APPLICATION_IMG_NAME}:${BACKEND_APPLICATION_IMG_TAG}" + livenessProbe: + exec: + command: + - pidof + - MIQ Server + initialDelaySeconds: 480 + timeoutSeconds: 3 + volumeMounts: + - name: "${NAME}-server" + mountPath: "/persistent" + env: + - name: APPLICATION_INIT_DELAY + value: "${APPLICATION_INIT_DELAY}" + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: database-url + - name: MIQ_SERVER_DEFAULT_ROLES + value: database_operations,event,reporting,scheduler,smartstate,ems_operations,ems_inventory,automate + - name: FRONTEND_SERVICE_NAME + value: "${NAME}" + - name: V2_KEY + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: v2-key + - name: ANSIBLE_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: admin-password + resources: + requests: + memory: "${APPLICATION_MEM_REQ}" + cpu: "${APPLICATION_CPU_REQ}" + limits: + memory: "${APPLICATION_MEM_LIMIT}" + lifecycle: + preStop: + exec: + command: + - "/opt/rh/cfme-container-scripts/sync-pv-data" + serviceAccount: cfme-orchestrator + serviceAccountName: cfme-orchestrator + terminationGracePeriodSeconds: 90 + volumeClaimTemplates: + - metadata: + name: "${NAME}-server" + annotations: + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "${APPLICATION_VOLUME_CAPACITY}" +- apiVersion: v1 + kind: Service + metadata: + name: "${MEMCACHED_SERVICE_NAME}" + annotations: + description: Exposes the memcached server + spec: + ports: + - name: memcached + port: 11211 + targetPort: 11211 + selector: + name: "${MEMCACHED_SERVICE_NAME}" +- apiVersion: v1 + kind: DeploymentConfig + metadata: + name: "${MEMCACHED_SERVICE_NAME}" + annotations: + description: Defines how to deploy memcached + spec: + strategy: + type: Recreate + triggers: + - type: ConfigChange + replicas: 1 + selector: + name: "${MEMCACHED_SERVICE_NAME}" + template: + metadata: + name: "${MEMCACHED_SERVICE_NAME}" + labels: + name: "${MEMCACHED_SERVICE_NAME}" + spec: + volumes: [] + containers: + - name: memcached + image: "${MEMCACHED_IMG_NAME}:${MEMCACHED_IMG_TAG}" + ports: + - containerPort: 11211 + readinessProbe: + timeoutSeconds: 1 + initialDelaySeconds: 5 + tcpSocket: + port: 11211 + livenessProbe: + timeoutSeconds: 1 + initialDelaySeconds: 30 + tcpSocket: + port: 11211 + volumeMounts: [] + env: + - name: MEMCACHED_MAX_MEMORY + value: "${MEMCACHED_MAX_MEMORY}" + - name: MEMCACHED_MAX_CONNECTIONS + value: "${MEMCACHED_MAX_CONNECTIONS}" + - name: MEMCACHED_SLAB_PAGE_SIZE + value: "${MEMCACHED_SLAB_PAGE_SIZE}" + resources: + requests: + memory: "${MEMCACHED_MEM_REQ}" + cpu: "${MEMCACHED_CPU_REQ}" + limits: + memory: "${MEMCACHED_MEM_LIMIT}" +- apiVersion: v1 + kind: Service + metadata: + name: "${DATABASE_SERVICE_NAME}" + annotations: + description: Remote database service + spec: + ports: + - name: postgresql + port: 5432 + targetPort: "${{DATABASE_PORT}}" + selector: {} +- apiVersion: v1 + kind: Endpoints + metadata: + name: "${DATABASE_SERVICE_NAME}" + subsets: + - addresses: + - ip: "${DATABASE_IP}" + ports: + - port: "${{DATABASE_PORT}}" + name: postgresql +- apiVersion: v1 + kind: Service + metadata: + annotations: + description: Exposes and load balances Ansible pods + service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"}]' + name: "${ANSIBLE_SERVICE_NAME}" + spec: + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 80 + - name: https + port: 443 + protocol: TCP + targetPort: 443 + selector: + name: "${ANSIBLE_SERVICE_NAME}" +- apiVersion: v1 + kind: DeploymentConfig + metadata: + name: "${ANSIBLE_SERVICE_NAME}" + annotations: + description: Defines how to deploy the Ansible appliance + spec: + strategy: + type: Recreate + serviceName: "${ANSIBLE_SERVICE_NAME}" + replicas: 0 + template: + metadata: + labels: + name: "${ANSIBLE_SERVICE_NAME}" + name: "${ANSIBLE_SERVICE_NAME}" + spec: + containers: + - name: ansible + image: "${ANSIBLE_IMG_NAME}:${ANSIBLE_IMG_TAG}" + livenessProbe: + tcpSocket: + port: 443 + initialDelaySeconds: 480 + timeoutSeconds: 3 + readinessProbe: + httpGet: + path: "/" + port: 443 + scheme: HTTPS + initialDelaySeconds: 200 + timeoutSeconds: 3 + ports: + - containerPort: 80 + protocol: TCP + - containerPort: 443 + protocol: TCP + securityContext: + privileged: true + env: + - name: ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: admin-password + - name: RABBITMQ_USER_NAME + value: "${ANSIBLE_RABBITMQ_USER_NAME}" + - name: RABBITMQ_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: rabbit-password + - name: ANSIBLE_SECRET_KEY + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: secret-key + - name: DATABASE_SERVICE_NAME + value: "${DATABASE_SERVICE_NAME}" + - name: POSTGRESQL_USER + value: "${DATABASE_USER}" + - name: POSTGRESQL_PASSWORD + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: pg-password + - name: POSTGRESQL_DATABASE + value: "${ANSIBLE_DATABASE_NAME}" + resources: + requests: + memory: "${ANSIBLE_MEM_REQ}" + cpu: "${ANSIBLE_CPU_REQ}" + limits: + memory: "${ANSIBLE_MEM_LIMIT}" + serviceAccount: cfme-privileged + serviceAccountName: cfme-privileged +- apiVersion: v1 + kind: ConfigMap + metadata: + name: "${HTTPD_SERVICE_NAME}-configs" + data: + application.conf: | + # Timeout: The number of seconds before receives and sends time out. + Timeout 120 + + RewriteEngine On + Options SymLinksIfOwnerMatch + + <VirtualHost *:80> + KeepAlive on + # Without ServerName mod_auth_mellon compares against http:// and not https:// from the IdP + ServerName https://%{REQUEST_HOST} + + ProxyPreserveHost on + + RewriteCond %{REQUEST_URI} ^/ws [NC] + RewriteCond %{HTTP:UPGRADE} ^websocket$ [NC] + RewriteCond %{HTTP:CONNECTION} ^Upgrade$ [NC] + RewriteRule .* ws://${NAME}%{REQUEST_URI} [P,QSA,L] + + # For httpd, some ErrorDocuments must by served by the httpd pod + RewriteCond %{REQUEST_URI} !^/proxy_pages + + # For SAML /saml2 is only served by mod_auth_mellon in the httpd pod + RewriteCond %{REQUEST_URI} !^/saml2 + RewriteRule ^/ http://${NAME}%{REQUEST_URI} [P,QSA,L] + ProxyPassReverse / http://${NAME}/ + + # Ensures httpd stdout/stderr are seen by docker logs. + ErrorLog "| /usr/bin/tee /proc/1/fd/2 /var/log/httpd/error_log" + CustomLog "| /usr/bin/tee /proc/1/fd/1 /var/log/httpd/access_log" common + </VirtualHost> + authentication.conf: | + # Load appropriate authentication configuration files + # + Include "conf.d/configuration-${HTTPD_AUTH_TYPE}-auth" + configuration-internal-auth: | + # Internal authentication + # + configuration-external-auth: | + Include "conf.d/external-auth-load-modules-conf" + + <Location /dashboard/kerberos_authenticate> + AuthType Kerberos + AuthName "Kerberos Login" + KrbMethodNegotiate On + KrbMethodK5Passwd Off + KrbAuthRealms ${HTTPD_AUTH_KERBEROS_REALMS} + Krb5KeyTab /etc/http.keytab + KrbServiceName Any + Require pam-account httpd-auth + + ErrorDocument 401 /proxy_pages/invalid_sso_credentials.js + </Location> + + Include "conf.d/external-auth-login-form-conf" + Include "conf.d/external-auth-application-api-conf" + Include "conf.d/external-auth-lookup-user-details-conf" + Include "conf.d/external-auth-remote-user-conf" + configuration-active-directory-auth: | + Include "conf.d/external-auth-load-modules-conf" + + <Location /dashboard/kerberos_authenticate> + AuthType Kerberos + AuthName "Kerberos Login" + KrbMethodNegotiate On + KrbMethodK5Passwd Off + KrbAuthRealms ${HTTPD_AUTH_KERBEROS_REALMS} + Krb5KeyTab /etc/krb5.keytab + KrbServiceName Any + Require pam-account httpd-auth + + ErrorDocument 401 /proxy_pages/invalid_sso_credentials.js + </Location> + + Include "conf.d/external-auth-login-form-conf" + Include "conf.d/external-auth-application-api-conf" + Include "conf.d/external-auth-lookup-user-details-conf" + Include "conf.d/external-auth-remote-user-conf" + configuration-saml-auth: | + LoadModule auth_mellon_module modules/mod_auth_mellon.so + + <Location /> + MellonEnable "info" + + MellonIdPMetadataFile "/etc/httpd/saml2/idp-metadata.xml" + + MellonSPPrivateKeyFile "/etc/httpd/saml2/sp-key.key" + MellonSPCertFile "/etc/httpd/saml2/sp-cert.cert" + MellonSPMetadataFile "/etc/httpd/saml2/sp-metadata.xml" + + MellonVariable "sp-cookie" + MellonSecureCookie On + MellonCookiePath "/" + + MellonIdP "IDP" + + MellonEndpointPath "/saml2" + + MellonUser username + MellonMergeEnvVars On + + MellonSetEnvNoPrefix "REMOTE_USER" username + MellonSetEnvNoPrefix "REMOTE_USER_EMAIL" email + MellonSetEnvNoPrefix "REMOTE_USER_FIRSTNAME" firstname + MellonSetEnvNoPrefix "REMOTE_USER_LASTNAME" lastname + MellonSetEnvNoPrefix "REMOTE_USER_FULLNAME" fullname + MellonSetEnvNoPrefix "REMOTE_USER_GROUPS" groups + </Location> + + <Location /saml_login> + AuthType "Mellon" + MellonEnable "auth" + Require valid-user + </Location> + + Include "conf.d/external-auth-remote-user-conf" + external-auth-load-modules-conf: | + LoadModule authnz_pam_module modules/mod_authnz_pam.so + LoadModule intercept_form_submit_module modules/mod_intercept_form_submit.so + LoadModule lookup_identity_module modules/mod_lookup_identity.so + LoadModule auth_kerb_module modules/mod_auth_kerb.so + external-auth-login-form-conf: | + <Location /dashboard/external_authenticate> + InterceptFormPAMService httpd-auth + InterceptFormLogin user_name + InterceptFormPassword user_password + InterceptFormLoginSkip admin + InterceptFormClearRemoteUserForSkipped on + </Location> + external-auth-application-api-conf: | + <LocationMatch ^/api> + SetEnvIf Authorization '^Basic +YWRtaW46' let_admin_in + SetEnvIf X-Auth-Token '^.+$' let_api_token_in + SetEnvIf X-MIQ-Token '^.+$' let_sys_token_in + + AuthType Basic + AuthName "External Authentication (httpd) for API" + AuthBasicProvider PAM + + AuthPAMService httpd-auth + Require valid-user + Order Allow,Deny + Allow from env=let_admin_in + Allow from env=let_api_token_in + Allow from env=let_sys_token_in + Satisfy Any + </LocationMatch> + external-auth-lookup-user-details-conf: | + <LocationMatch ^/dashboard/external_authenticate$|^/dashboard/kerberos_authenticate$|^/api> + LookupUserAttr mail REMOTE_USER_EMAIL + LookupUserAttr givenname REMOTE_USER_FIRSTNAME + LookupUserAttr sn REMOTE_USER_LASTNAME + LookupUserAttr displayname REMOTE_USER_FULLNAME + LookupUserAttr domainname REMOTE_USER_DOMAIN + + LookupUserGroups REMOTE_USER_GROUPS ":" + LookupDbusTimeout 5000 + </LocationMatch> + external-auth-remote-user-conf: | + RequestHeader unset X_REMOTE_USER + + RequestHeader set X_REMOTE_USER %{REMOTE_USER}e env=REMOTE_USER + RequestHeader set X_EXTERNAL_AUTH_ERROR %{EXTERNAL_AUTH_ERROR}e env=EXTERNAL_AUTH_ERROR + RequestHeader set X_REMOTE_USER_EMAIL %{REMOTE_USER_EMAIL}e env=REMOTE_USER_EMAIL + RequestHeader set X_REMOTE_USER_FIRSTNAME %{REMOTE_USER_FIRSTNAME}e env=REMOTE_USER_FIRSTNAME + RequestHeader set X_REMOTE_USER_LASTNAME %{REMOTE_USER_LASTNAME}e env=REMOTE_USER_LASTNAME + RequestHeader set X_REMOTE_USER_FULLNAME %{REMOTE_USER_FULLNAME}e env=REMOTE_USER_FULLNAME + RequestHeader set X_REMOTE_USER_GROUPS %{REMOTE_USER_GROUPS}e env=REMOTE_USER_GROUPS + RequestHeader set X_REMOTE_USER_DOMAIN %{REMOTE_USER_DOMAIN}e env=REMOTE_USER_DOMAIN +- apiVersion: v1 + kind: ConfigMap + metadata: + name: "${HTTPD_SERVICE_NAME}-auth-configs" + data: + auth-type: internal + auth-kerberos-realms: undefined + auth-configuration.conf: | + # External Authentication Configuration File + # + # For details on usage please see https://github.com/ManageIQ/manageiq-pods/blob/master/README.md#configuring-external-authentication +- apiVersion: v1 + kind: Service + metadata: + name: "${HTTPD_SERVICE_NAME}" + annotations: + description: Exposes the httpd server + service.alpha.openshift.io/dependencies: '[{"name":"${NAME}","namespace":"","kind":"Service"}]' + spec: + ports: + - name: http + port: 80 + targetPort: 80 + selector: + name: httpd +- apiVersion: v1 + kind: Service + metadata: + name: "${HTTPD_DBUS_API_SERVICE_NAME}" + annotations: + description: Exposes the httpd server dbus api + service.alpha.openshift.io/dependencies: '[{"name":"${NAME}","namespace":"","kind":"Service"}]' + spec: + ports: + - name: http-dbus-api + port: 8080 + targetPort: 8080 + selector: + name: httpd +- apiVersion: v1 + kind: DeploymentConfig + metadata: + name: "${HTTPD_SERVICE_NAME}" + annotations: + description: Defines how to deploy httpd + spec: + strategy: + type: Recreate + recreateParams: + timeoutSeconds: 1200 + triggers: + - type: ConfigChange + replicas: 1 + selector: + name: "${HTTPD_SERVICE_NAME}" + template: + metadata: + name: "${HTTPD_SERVICE_NAME}" + labels: + name: "${HTTPD_SERVICE_NAME}" + spec: + volumes: + - name: httpd-config + configMap: + name: "${HTTPD_SERVICE_NAME}-configs" + - name: httpd-auth-config + configMap: + name: "${HTTPD_SERVICE_NAME}-auth-configs" + containers: + - name: httpd + image: "${HTTPD_IMG_NAME}:${HTTPD_IMG_TAG}" + ports: + - containerPort: 80 + protocol: TCP + - containerPort: 8080 + protocol: TCP + livenessProbe: + exec: + command: + - pidof + - httpd + initialDelaySeconds: 15 + timeoutSeconds: 3 + readinessProbe: + tcpSocket: + port: 80 + initialDelaySeconds: 10 + timeoutSeconds: 3 + volumeMounts: + - name: httpd-config + mountPath: "${HTTPD_CONFIG_DIR}" + - name: httpd-auth-config + mountPath: "${HTTPD_AUTH_CONFIG_DIR}" + resources: + requests: + memory: "${HTTPD_MEM_REQ}" + cpu: "${HTTPD_CPU_REQ}" + limits: + memory: "${HTTPD_MEM_LIMIT}" + env: + - name: HTTPD_AUTH_TYPE + valueFrom: + configMapKeyRef: + name: "${HTTPD_SERVICE_NAME}-auth-configs" + key: auth-type + - name: HTTPD_AUTH_KERBEROS_REALMS + valueFrom: + configMapKeyRef: + name: "${HTTPD_SERVICE_NAME}-auth-configs" + key: auth-kerberos-realms + lifecycle: + postStart: + exec: + command: + - "/usr/bin/save-container-environment" + serviceAccount: cfme-httpd + serviceAccountName: cfme-httpd +parameters: +- name: NAME + displayName: Name + required: true + description: The name assigned to all of the frontend objects defined in this template. + value: cloudforms +- name: V2_KEY + displayName: CloudForms Encryption Key + required: true + description: Encryption Key for CloudForms Passwords + from: "[a-zA-Z0-9]{43}" + generate: expression +- name: DATABASE_SERVICE_NAME + displayName: PostgreSQL Service Name + required: true + description: The name of the OpenShift Service exposed for the PostgreSQL container. + value: postgresql +- name: DATABASE_USER + displayName: PostgreSQL User + required: true + description: PostgreSQL user that will access the database. + value: root +- name: DATABASE_PASSWORD + displayName: PostgreSQL Password + required: true + description: Password for the PostgreSQL user. + from: "[a-zA-Z0-9]{8}" + generate: expression +- name: DATABASE_IP + displayName: PostgreSQL Server IP + required: true + description: PostgreSQL external server IP used to configure service. + value: '' +- name: DATABASE_PORT + displayName: PostgreSQL Server Port + required: true + description: PostgreSQL external server port used to configure service. + value: '5432' +- name: DATABASE_NAME + required: true + displayName: PostgreSQL Database Name + description: Name of the PostgreSQL database accessed. + value: vmdb_production +- name: DATABASE_REGION + required: true + displayName: Application Database Region + description: Database region that will be used for application. + value: '0' +- name: APPLICATION_ADMIN_PASSWORD + displayName: Application Admin Password + required: true + description: Admin password that will be set on the application. + value: smartvm +- name: ANSIBLE_DATABASE_NAME + displayName: Ansible PostgreSQL database name + required: true + description: The database to be used by the Ansible continer + value: awx +- name: MEMCACHED_SERVICE_NAME + required: true + displayName: Memcached Service Name + description: The name of the OpenShift Service exposed for the Memcached container. + value: memcached +- name: MEMCACHED_MAX_MEMORY + displayName: Memcached Max Memory + description: Memcached maximum memory for memcached object storage in MB. + value: '64' +- name: MEMCACHED_MAX_CONNECTIONS + displayName: Memcached Max Connections + description: Memcached maximum number of connections allowed. + value: '1024' +- name: MEMCACHED_SLAB_PAGE_SIZE + displayName: Memcached Slab Page Size + description: Memcached size of each slab page. + value: 1m +- name: ANSIBLE_SERVICE_NAME + displayName: Ansible Service Name + description: The name of the OpenShift Service exposed for the Ansible container. + value: ansible +- name: ANSIBLE_ADMIN_PASSWORD + displayName: Ansible admin User password + required: true + description: The password for the Ansible container admin user + from: "[a-zA-Z0-9]{32}" + generate: expression +- name: ANSIBLE_SECRET_KEY + displayName: Ansible Secret Key + required: true + description: Encryption key for the Ansible container + from: "[a-f0-9]{32}" + generate: expression +- name: ANSIBLE_RABBITMQ_USER_NAME + displayName: RabbitMQ Username + required: true + description: Username for the Ansible RabbitMQ Server + value: ansible +- name: ANSIBLE_RABBITMQ_PASSWORD + displayName: RabbitMQ Server Password + required: true + description: Password for the Ansible RabbitMQ Server + from: "[a-zA-Z0-9]{32}" + generate: expression +- name: APPLICATION_CPU_REQ + displayName: Application Min CPU Requested + required: true + description: Minimum amount of CPU time the Application container will need (expressed in millicores). + value: 1000m +- name: MEMCACHED_CPU_REQ + displayName: Memcached Min CPU Requested + required: true + description: Minimum amount of CPU time the Memcached container will need (expressed in millicores). + value: 200m +- name: ANSIBLE_CPU_REQ + displayName: Ansible Min CPU Requested + required: true + description: Minimum amount of CPU time the Ansible container will need (expressed in millicores). + value: 1000m +- name: APPLICATION_MEM_REQ + displayName: Application Min RAM Requested + required: true + description: Minimum amount of memory the Application container will need. + value: 6144Mi +- name: MEMCACHED_MEM_REQ + displayName: Memcached Min RAM Requested + required: true + description: Minimum amount of memory the Memcached container will need. + value: 64Mi +- name: ANSIBLE_MEM_REQ + displayName: Ansible Min RAM Requested + required: true + description: Minimum amount of memory the Ansible container will need. + value: 2048Mi +- name: APPLICATION_MEM_LIMIT + displayName: Application Max RAM Limit + required: true + description: Maximum amount of memory the Application container can consume. + value: 16384Mi +- name: MEMCACHED_MEM_LIMIT + displayName: Memcached Max RAM Limit + required: true + description: Maximum amount of memory the Memcached container can consume. + value: 256Mi +- name: ANSIBLE_MEM_LIMIT + displayName: Ansible Max RAM Limit + required: true + description: Maximum amount of memory the Ansible container can consume. + value: 8096Mi +- name: MEMCACHED_IMG_NAME + displayName: Memcached Image Name + description: This is the Memcached image name requested to deploy. + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-memcached +- name: MEMCACHED_IMG_TAG + displayName: Memcached Image Tag + description: This is the Memcached image tag/version requested to deploy. + value: latest +- name: FRONTEND_APPLICATION_IMG_NAME + displayName: Frontend Application Image Name + description: This is the Frontend Application image name requested to deploy. + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-app-ui +- name: BACKEND_APPLICATION_IMG_NAME + displayName: Backend Application Image Name + description: This is the Backend Application image name requested to deploy. + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-app +- name: FRONTEND_APPLICATION_IMG_TAG + displayName: Front end Application Image Tag + description: This is the CloudForms Frontend Application image tag/version requested to deploy. + value: latest +- name: BACKEND_APPLICATION_IMG_TAG + displayName: Back end Application Image Tag + description: This is the CloudForms Backend Application image tag/version requested to deploy. + value: latest +- name: ANSIBLE_IMG_NAME + displayName: Ansible Image Name + description: This is the Ansible image name requested to deploy. + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-embedded-ansible +- name: ANSIBLE_IMG_TAG + displayName: Ansible Image Tag + description: This is the Ansible image tag/version requested to deploy. + value: latest +- name: APPLICATION_DOMAIN + displayName: Application Hostname + description: The exposed hostname that will route to the application service, if left blank a value will be defaulted. + value: '' +- name: APPLICATION_REPLICA_COUNT + displayName: Application Replica Count + description: This is the number of Application replicas requested to deploy. + value: '1' +- name: APPLICATION_INIT_DELAY + displayName: Application Init Delay + required: true + description: Delay in seconds before we attempt to initialize the application. + value: '15' +- name: APPLICATION_VOLUME_CAPACITY + displayName: Application Volume Capacity + required: true + description: Volume space available for application data. + value: 5Gi +- name: HTTPD_SERVICE_NAME + required: true + displayName: Apache httpd Service Name + description: The name of the OpenShift Service exposed for the httpd container. + value: httpd +- name: HTTPD_DBUS_API_SERVICE_NAME + required: true + displayName: Apache httpd DBus API Service Name + description: The name of httpd dbus api service. + value: httpd-dbus-api +- name: HTTPD_IMG_NAME + displayName: Apache httpd Image Name + description: This is the httpd image name requested to deploy. + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-httpd +- name: HTTPD_IMG_TAG + displayName: Apache httpd Image Tag + description: This is the httpd image tag/version requested to deploy. + value: latest +- name: HTTPD_CONFIG_DIR + displayName: Apache httpd Configuration Directory + description: Directory used to store the Apache configuration files. + value: "/etc/httpd/conf.d" +- name: HTTPD_AUTH_CONFIG_DIR + displayName: External Authentication Configuration Directory + description: Directory used to store the external authentication configuration files. + value: "/etc/httpd/auth-conf.d" +- name: HTTPD_CPU_REQ + displayName: Apache httpd Min CPU Requested + required: true + description: Minimum amount of CPU time the httpd container will need (expressed in millicores). + value: 500m +- name: HTTPD_MEM_REQ + displayName: Apache httpd Min RAM Requested + required: true + description: Minimum amount of memory the httpd container will need. + value: 512Mi +- name: HTTPD_MEM_LIMIT + displayName: Apache httpd Max RAM Limit + required: true + description: Maximum amount of memory the httpd container can consume. + value: 8192Mi diff --git a/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-template.yaml b/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-template.yaml index 3bc6c5813..5c757b6c2 100644 --- a/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-template.yaml +++ b/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-template.yaml @@ -5,17 +5,308 @@ labels: metadata: name: cloudforms annotations: - description: "CloudForms appliance with persistent storage" - tags: "instant-app,cloudforms,cfme" - iconClass: "icon-rails" + description: CloudForms appliance with persistent storage + tags: instant-app,cloudforms,cfme + iconClass: icon-rails objects: - apiVersion: v1 + kind: ServiceAccount + metadata: + name: cfme-orchestrator +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: cfme-anyuid +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: cfme-privileged +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: cfme-httpd +- apiVersion: v1 + kind: Secret + metadata: + name: "${NAME}-secrets" + stringData: + pg-password: "${DATABASE_PASSWORD}" + admin-password: "${APPLICATION_ADMIN_PASSWORD}" + database-url: postgresql://${DATABASE_USER}:${DATABASE_PASSWORD}@${DATABASE_SERVICE_NAME}/${DATABASE_NAME}?encoding=utf8&pool=5&wait_timeout=5 + v2-key: "${V2_KEY}" +- apiVersion: v1 + kind: Secret + metadata: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + stringData: + rabbit-password: "${ANSIBLE_RABBITMQ_PASSWORD}" + secret-key: "${ANSIBLE_SECRET_KEY}" + admin-password: "${ANSIBLE_ADMIN_PASSWORD}" +- apiVersion: v1 + kind: ConfigMap + metadata: + name: "${DATABASE_SERVICE_NAME}-configs" + data: + 01_miq_overrides.conf: | + #------------------------------------------------------------------------------ + # CONNECTIONS AND AUTHENTICATION + #------------------------------------------------------------------------------ + + tcp_keepalives_count = 9 + tcp_keepalives_idle = 3 + tcp_keepalives_interval = 75 + + #------------------------------------------------------------------------------ + # RESOURCE USAGE (except WAL) + #------------------------------------------------------------------------------ + + shared_preload_libraries = 'pglogical,repmgr_funcs' + max_worker_processes = 10 + + #------------------------------------------------------------------------------ + # WRITE AHEAD LOG + #------------------------------------------------------------------------------ + + wal_level = 'logical' + wal_log_hints = on + wal_buffers = 16MB + checkpoint_completion_target = 0.9 + + #------------------------------------------------------------------------------ + # REPLICATION + #------------------------------------------------------------------------------ + + max_wal_senders = 10 + wal_sender_timeout = 0 + max_replication_slots = 10 + hot_standby = on + + #------------------------------------------------------------------------------ + # ERROR REPORTING AND LOGGING + #------------------------------------------------------------------------------ + + log_filename = 'postgresql.log' + log_rotation_age = 0 + log_min_duration_statement = 5000 + log_connections = on + log_disconnections = on + log_line_prefix = '%t:%r:%c:%u@%d:[%p]:' + log_lock_waits = on + + #------------------------------------------------------------------------------ + # AUTOVACUUM PARAMETERS + #------------------------------------------------------------------------------ + + log_autovacuum_min_duration = 0 + autovacuum_naptime = 5min + autovacuum_vacuum_threshold = 500 + autovacuum_analyze_threshold = 500 + autovacuum_vacuum_scale_factor = 0.05 + + #------------------------------------------------------------------------------ + # LOCK MANAGEMENT + #------------------------------------------------------------------------------ + + deadlock_timeout = 5s + + #------------------------------------------------------------------------------ + # VERSION/PLATFORM COMPATIBILITY + #------------------------------------------------------------------------------ + + escape_string_warning = off + standard_conforming_strings = off +- apiVersion: v1 + kind: ConfigMap + metadata: + name: "${HTTPD_SERVICE_NAME}-configs" + data: + application.conf: | + # Timeout: The number of seconds before receives and sends time out. + Timeout 120 + + RewriteEngine On + Options SymLinksIfOwnerMatch + + <VirtualHost *:80> + KeepAlive on + # Without ServerName mod_auth_mellon compares against http:// and not https:// from the IdP + ServerName https://%{REQUEST_HOST} + + ProxyPreserveHost on + + RewriteCond %{REQUEST_URI} ^/ws [NC] + RewriteCond %{HTTP:UPGRADE} ^websocket$ [NC] + RewriteCond %{HTTP:CONNECTION} ^Upgrade$ [NC] + RewriteRule .* ws://${NAME}%{REQUEST_URI} [P,QSA,L] + + # For httpd, some ErrorDocuments must by served by the httpd pod + RewriteCond %{REQUEST_URI} !^/proxy_pages + + # For SAML /saml2 is only served by mod_auth_mellon in the httpd pod + RewriteCond %{REQUEST_URI} !^/saml2 + RewriteRule ^/ http://${NAME}%{REQUEST_URI} [P,QSA,L] + ProxyPassReverse / http://${NAME}/ + + # Ensures httpd stdout/stderr are seen by docker logs. + ErrorLog "| /usr/bin/tee /proc/1/fd/2 /var/log/httpd/error_log" + CustomLog "| /usr/bin/tee /proc/1/fd/1 /var/log/httpd/access_log" common + </VirtualHost> + authentication.conf: | + # Load appropriate authentication configuration files + # + Include "conf.d/configuration-${HTTPD_AUTH_TYPE}-auth" + configuration-internal-auth: | + # Internal authentication + # + configuration-external-auth: | + Include "conf.d/external-auth-load-modules-conf" + + <Location /dashboard/kerberos_authenticate> + AuthType Kerberos + AuthName "Kerberos Login" + KrbMethodNegotiate On + KrbMethodK5Passwd Off + KrbAuthRealms ${HTTPD_AUTH_KERBEROS_REALMS} + Krb5KeyTab /etc/http.keytab + KrbServiceName Any + Require pam-account httpd-auth + + ErrorDocument 401 /proxy_pages/invalid_sso_credentials.js + </Location> + + Include "conf.d/external-auth-login-form-conf" + Include "conf.d/external-auth-application-api-conf" + Include "conf.d/external-auth-lookup-user-details-conf" + Include "conf.d/external-auth-remote-user-conf" + configuration-active-directory-auth: | + Include "conf.d/external-auth-load-modules-conf" + + <Location /dashboard/kerberos_authenticate> + AuthType Kerberos + AuthName "Kerberos Login" + KrbMethodNegotiate On + KrbMethodK5Passwd Off + KrbAuthRealms ${HTTPD_AUTH_KERBEROS_REALMS} + Krb5KeyTab /etc/krb5.keytab + KrbServiceName Any + Require pam-account httpd-auth + + ErrorDocument 401 /proxy_pages/invalid_sso_credentials.js + </Location> + + Include "conf.d/external-auth-login-form-conf" + Include "conf.d/external-auth-application-api-conf" + Include "conf.d/external-auth-lookup-user-details-conf" + Include "conf.d/external-auth-remote-user-conf" + configuration-saml-auth: | + LoadModule auth_mellon_module modules/mod_auth_mellon.so + + <Location /> + MellonEnable "info" + + MellonIdPMetadataFile "/etc/httpd/saml2/idp-metadata.xml" + + MellonSPPrivateKeyFile "/etc/httpd/saml2/sp-key.key" + MellonSPCertFile "/etc/httpd/saml2/sp-cert.cert" + MellonSPMetadataFile "/etc/httpd/saml2/sp-metadata.xml" + + MellonVariable "sp-cookie" + MellonSecureCookie On + MellonCookiePath "/" + + MellonIdP "IDP" + + MellonEndpointPath "/saml2" + + MellonUser username + MellonMergeEnvVars On + + MellonSetEnvNoPrefix "REMOTE_USER" username + MellonSetEnvNoPrefix "REMOTE_USER_EMAIL" email + MellonSetEnvNoPrefix "REMOTE_USER_FIRSTNAME" firstname + MellonSetEnvNoPrefix "REMOTE_USER_LASTNAME" lastname + MellonSetEnvNoPrefix "REMOTE_USER_FULLNAME" fullname + MellonSetEnvNoPrefix "REMOTE_USER_GROUPS" groups + </Location> + + <Location /saml_login> + AuthType "Mellon" + MellonEnable "auth" + Require valid-user + </Location> + + Include "conf.d/external-auth-remote-user-conf" + external-auth-load-modules-conf: | + LoadModule authnz_pam_module modules/mod_authnz_pam.so + LoadModule intercept_form_submit_module modules/mod_intercept_form_submit.so + LoadModule lookup_identity_module modules/mod_lookup_identity.so + LoadModule auth_kerb_module modules/mod_auth_kerb.so + external-auth-login-form-conf: | + <Location /dashboard/external_authenticate> + InterceptFormPAMService httpd-auth + InterceptFormLogin user_name + InterceptFormPassword user_password + InterceptFormLoginSkip admin + InterceptFormClearRemoteUserForSkipped on + </Location> + external-auth-application-api-conf: | + <LocationMatch ^/api> + SetEnvIf Authorization '^Basic +YWRtaW46' let_admin_in + SetEnvIf X-Auth-Token '^.+$' let_api_token_in + SetEnvIf X-MIQ-Token '^.+$' let_sys_token_in + + AuthType Basic + AuthName "External Authentication (httpd) for API" + AuthBasicProvider PAM + + AuthPAMService httpd-auth + Require valid-user + Order Allow,Deny + Allow from env=let_admin_in + Allow from env=let_api_token_in + Allow from env=let_sys_token_in + Satisfy Any + </LocationMatch> + external-auth-lookup-user-details-conf: | + <LocationMatch ^/dashboard/external_authenticate$|^/dashboard/kerberos_authenticate$|^/api> + LookupUserAttr mail REMOTE_USER_EMAIL + LookupUserAttr givenname REMOTE_USER_FIRSTNAME + LookupUserAttr sn REMOTE_USER_LASTNAME + LookupUserAttr displayname REMOTE_USER_FULLNAME + LookupUserAttr domainname REMOTE_USER_DOMAIN + + LookupUserGroups REMOTE_USER_GROUPS ":" + LookupDbusTimeout 5000 + </LocationMatch> + external-auth-remote-user-conf: | + RequestHeader unset X_REMOTE_USER + + RequestHeader set X_REMOTE_USER %{REMOTE_USER}e env=REMOTE_USER + RequestHeader set X_EXTERNAL_AUTH_ERROR %{EXTERNAL_AUTH_ERROR}e env=EXTERNAL_AUTH_ERROR + RequestHeader set X_REMOTE_USER_EMAIL %{REMOTE_USER_EMAIL}e env=REMOTE_USER_EMAIL + RequestHeader set X_REMOTE_USER_FIRSTNAME %{REMOTE_USER_FIRSTNAME}e env=REMOTE_USER_FIRSTNAME + RequestHeader set X_REMOTE_USER_LASTNAME %{REMOTE_USER_LASTNAME}e env=REMOTE_USER_LASTNAME + RequestHeader set X_REMOTE_USER_FULLNAME %{REMOTE_USER_FULLNAME}e env=REMOTE_USER_FULLNAME + RequestHeader set X_REMOTE_USER_GROUPS %{REMOTE_USER_GROUPS}e env=REMOTE_USER_GROUPS + RequestHeader set X_REMOTE_USER_DOMAIN %{REMOTE_USER_DOMAIN}e env=REMOTE_USER_DOMAIN +- apiVersion: v1 + kind: ConfigMap + metadata: + name: "${HTTPD_SERVICE_NAME}-auth-configs" + data: + auth-type: internal + auth-kerberos-realms: undefined + auth-configuration.conf: | + # External Authentication Configuration File + # + # For details on usage please see https://github.com/ManageIQ/manageiq-pods/blob/master/README.md#configuring-external-authentication +- apiVersion: v1 kind: Service metadata: annotations: - description: "Exposes and load balances CloudForms pods" + description: Exposes and load balances CloudForms pods service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"},{"name":"${MEMCACHED_SERVICE_NAME}","namespace":"","kind":"Service"}]' - name: ${NAME} + name: "${NAME}" spec: clusterIP: None ports: @@ -23,141 +314,97 @@ objects: port: 80 protocol: TCP targetPort: 80 - - name: https - port: 443 - protocol: TCP - targetPort: 443 selector: - name: ${NAME} + name: "${NAME}" - apiVersion: v1 kind: Route metadata: - name: ${NAME} + name: "${HTTPD_SERVICE_NAME}" spec: - host: ${APPLICATION_DOMAIN} + host: "${APPLICATION_DOMAIN}" port: - targetPort: https + targetPort: http tls: - termination: passthrough + termination: edge + insecureEdgeTerminationPolicy: Redirect to: kind: Service - name: ${NAME} -- apiVersion: v1 - kind: ImageStream - metadata: - name: cfme-openshift-app - annotations: - description: "Keeps track of changes in the CloudForms app image" - spec: - dockerImageRepository: "${APPLICATION_IMG_NAME}" -- apiVersion: v1 - kind: ImageStream - metadata: - name: cfme-openshift-postgresql - annotations: - description: "Keeps track of changes in the CloudForms postgresql image" - spec: - dockerImageRepository: "${POSTGRESQL_IMG_NAME}" -- apiVersion: v1 - kind: ImageStream - metadata: - name: cfme-openshift-memcached - annotations: - description: "Keeps track of changes in the CloudForms memcached image" - spec: - dockerImageRepository: "${MEMCACHED_IMG_NAME}" + name: "${HTTPD_SERVICE_NAME}" - apiVersion: v1 kind: PersistentVolumeClaim metadata: name: "${NAME}-${DATABASE_SERVICE_NAME}" spec: accessModes: - - ReadWriteOnce + - ReadWriteOnce resources: requests: - storage: ${DATABASE_VOLUME_CAPACITY} -- apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: "${NAME}-region" - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: ${APPLICATION_REGION_VOLUME_CAPACITY} + storage: "${DATABASE_VOLUME_CAPACITY}" - apiVersion: apps/v1beta1 - kind: "StatefulSet" + kind: StatefulSet metadata: - name: ${NAME} + name: "${NAME}" annotations: - description: "Defines how to deploy the CloudForms appliance" + description: Defines how to deploy the CloudForms appliance spec: serviceName: "${NAME}" - replicas: 1 + replicas: "${APPLICATION_REPLICA_COUNT}" template: metadata: labels: - name: ${NAME} - name: ${NAME} + name: "${NAME}" + name: "${NAME}" spec: containers: - name: cloudforms - image: "${APPLICATION_IMG_NAME}:${APPLICATION_IMG_TAG}" + image: "${FRONTEND_APPLICATION_IMG_NAME}:${FRONTEND_APPLICATION_IMG_TAG}" livenessProbe: - tcpSocket: - port: 443 + exec: + command: + - pidof + - MIQ Server initialDelaySeconds: 480 timeoutSeconds: 3 readinessProbe: - httpGet: - path: / - port: 443 - scheme: HTTPS + tcpSocket: + port: 80 initialDelaySeconds: 200 timeoutSeconds: 3 ports: - containerPort: 80 protocol: TCP - - containerPort: 443 - protocol: TCP - securityContext: - privileged: true volumeMounts: - - - name: "${NAME}-server" - mountPath: "/persistent" - - - name: "${NAME}-region" - mountPath: "/persistent-region" + - name: "${NAME}-server" + mountPath: "/persistent" env: - - - name: "APPLICATION_INIT_DELAY" - value: "${APPLICATION_INIT_DELAY}" - - - name: "DATABASE_SERVICE_NAME" - value: "${DATABASE_SERVICE_NAME}" - - - name: "DATABASE_REGION" - value: "${DATABASE_REGION}" - - - name: "MEMCACHED_SERVICE_NAME" - value: "${MEMCACHED_SERVICE_NAME}" - - - name: "POSTGRESQL_USER" - value: "${DATABASE_USER}" - - - name: "POSTGRESQL_PASSWORD" - value: "${DATABASE_PASSWORD}" - - - name: "POSTGRESQL_DATABASE" - value: "${DATABASE_NAME}" - - - name: "POSTGRESQL_MAX_CONNECTIONS" - value: "${POSTGRESQL_MAX_CONNECTIONS}" - - - name: "POSTGRESQL_SHARED_BUFFERS" - value: "${POSTGRESQL_SHARED_BUFFERS}" + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: APPLICATION_INIT_DELAY + value: "${APPLICATION_INIT_DELAY}" + - name: DATABASE_REGION + value: "${DATABASE_REGION}" + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: database-url + - name: V2_KEY + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: v2-key + - name: APPLICATION_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: admin-password + - name: ANSIBLE_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: admin-password resources: requests: memory: "${APPLICATION_MEM_REQ}" @@ -168,59 +415,128 @@ objects: preStop: exec: command: - - /opt/rh/cfme-container-scripts/sync-pv-data - volumes: - - - name: "${NAME}-region" - persistentVolumeClaim: - claimName: ${NAME}-region + - "/opt/rh/cfme-container-scripts/sync-pv-data" + serviceAccount: cfme-orchestrator + serviceAccountName: cfme-orchestrator + terminationGracePeriodSeconds: 90 volumeClaimTemplates: - - metadata: - name: "${NAME}-server" - annotations: - # Uncomment this if using dynamic volume provisioning. - # https://docs.openshift.org/latest/install_config/persistent_storage/dynamically_provisioning_pvs.html - # volume.alpha.kubernetes.io/storage-class: anything - spec: - accessModes: [ ReadWriteOnce ] + - metadata: + name: "${NAME}-server" + annotations: + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "${APPLICATION_VOLUME_CAPACITY}" +- apiVersion: v1 + kind: Service + metadata: + annotations: + description: Headless service for CloudForms backend pods + name: "${NAME}-backend" + spec: + clusterIP: None + selector: + name: "${NAME}-backend" +- apiVersion: apps/v1beta1 + kind: StatefulSet + metadata: + name: "${NAME}-backend" + annotations: + description: Defines how to deploy the CloudForms appliance + spec: + serviceName: "${NAME}-backend" + replicas: 0 + template: + metadata: + labels: + name: "${NAME}-backend" + name: "${NAME}-backend" + spec: + containers: + - name: cloudforms + image: "${BACKEND_APPLICATION_IMG_NAME}:${BACKEND_APPLICATION_IMG_TAG}" + livenessProbe: + exec: + command: + - pidof + - MIQ Server + initialDelaySeconds: 480 + timeoutSeconds: 3 + volumeMounts: + - name: "${NAME}-server" + mountPath: "/persistent" + env: + - name: APPLICATION_INIT_DELAY + value: "${APPLICATION_INIT_DELAY}" + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: database-url + - name: MIQ_SERVER_DEFAULT_ROLES + value: database_operations,event,reporting,scheduler,smartstate,ems_operations,ems_inventory,automate + - name: FRONTEND_SERVICE_NAME + value: "${NAME}" + - name: V2_KEY + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: v2-key + - name: ANSIBLE_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: admin-password resources: requests: - storage: "${APPLICATION_VOLUME_CAPACITY}" + memory: "${APPLICATION_MEM_REQ}" + cpu: "${APPLICATION_CPU_REQ}" + limits: + memory: "${APPLICATION_MEM_LIMIT}" + lifecycle: + preStop: + exec: + command: + - "/opt/rh/cfme-container-scripts/sync-pv-data" + serviceAccount: cfme-orchestrator + serviceAccountName: cfme-orchestrator + terminationGracePeriodSeconds: 90 + volumeClaimTemplates: + - metadata: + name: "${NAME}-server" + annotations: + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "${APPLICATION_VOLUME_CAPACITY}" - apiVersion: v1 - kind: "Service" + kind: Service metadata: name: "${MEMCACHED_SERVICE_NAME}" annotations: - description: "Exposes the memcached server" + description: Exposes the memcached server spec: ports: - - - name: "memcached" - port: 11211 - targetPort: 11211 + - name: memcached + port: 11211 + targetPort: 11211 selector: name: "${MEMCACHED_SERVICE_NAME}" - apiVersion: v1 - kind: "DeploymentConfig" + kind: DeploymentConfig metadata: name: "${MEMCACHED_SERVICE_NAME}" annotations: - description: "Defines how to deploy memcached" + description: Defines how to deploy memcached spec: strategy: - type: "Recreate" + type: Recreate triggers: - - - type: "ImageChange" - imageChangeParams: - automatic: true - containerNames: - - "memcached" - from: - kind: "ImageStreamTag" - name: "cfme-openshift-memcached:${MEMCACHED_IMG_TAG}" - - - type: "ConfigChange" + - type: ConfigChange replicas: 1 selector: name: "${MEMCACHED_SERVICE_NAME}" @@ -232,74 +548,58 @@ objects: spec: volumes: [] containers: - - - name: "memcached" - image: "${MEMCACHED_IMG_NAME}:${MEMCACHED_IMG_TAG}" - ports: - - - containerPort: 11211 - readinessProbe: - timeoutSeconds: 1 - initialDelaySeconds: 5 - tcpSocket: - port: 11211 - livenessProbe: - timeoutSeconds: 1 - initialDelaySeconds: 30 - tcpSocket: - port: 11211 - volumeMounts: [] - env: - - - name: "MEMCACHED_MAX_MEMORY" - value: "${MEMCACHED_MAX_MEMORY}" - - - name: "MEMCACHED_MAX_CONNECTIONS" - value: "${MEMCACHED_MAX_CONNECTIONS}" - - - name: "MEMCACHED_SLAB_PAGE_SIZE" - value: "${MEMCACHED_SLAB_PAGE_SIZE}" - resources: - requests: - memory: "${MEMCACHED_MEM_REQ}" - cpu: "${MEMCACHED_CPU_REQ}" - limits: - memory: "${MEMCACHED_MEM_LIMIT}" + - name: memcached + image: "${MEMCACHED_IMG_NAME}:${MEMCACHED_IMG_TAG}" + ports: + - containerPort: 11211 + readinessProbe: + timeoutSeconds: 1 + initialDelaySeconds: 5 + tcpSocket: + port: 11211 + livenessProbe: + timeoutSeconds: 1 + initialDelaySeconds: 30 + tcpSocket: + port: 11211 + volumeMounts: [] + env: + - name: MEMCACHED_MAX_MEMORY + value: "${MEMCACHED_MAX_MEMORY}" + - name: MEMCACHED_MAX_CONNECTIONS + value: "${MEMCACHED_MAX_CONNECTIONS}" + - name: MEMCACHED_SLAB_PAGE_SIZE + value: "${MEMCACHED_SLAB_PAGE_SIZE}" + resources: + requests: + memory: "${MEMCACHED_MEM_REQ}" + cpu: "${MEMCACHED_CPU_REQ}" + limits: + memory: "${MEMCACHED_MEM_LIMIT}" - apiVersion: v1 - kind: "Service" + kind: Service metadata: name: "${DATABASE_SERVICE_NAME}" annotations: - description: "Exposes the database server" + description: Exposes the database server spec: ports: - - - name: "postgresql" - port: 5432 - targetPort: 5432 + - name: postgresql + port: 5432 + targetPort: 5432 selector: name: "${DATABASE_SERVICE_NAME}" - apiVersion: v1 - kind: "DeploymentConfig" + kind: DeploymentConfig metadata: name: "${DATABASE_SERVICE_NAME}" annotations: - description: "Defines how to deploy the database" + description: Defines how to deploy the database spec: strategy: - type: "Recreate" + type: Recreate triggers: - - - type: "ImageChange" - imageChangeParams: - automatic: true - containerNames: - - "postgresql" - from: - kind: "ImageStreamTag" - name: "cfme-openshift-postgresql:${POSTGRESQL_IMG_TAG}" - - - type: "ConfigChange" + - type: ConfigChange replicas: 1 selector: name: "${DATABASE_SERVICE_NAME}" @@ -310,236 +610,524 @@ objects: name: "${DATABASE_SERVICE_NAME}" spec: volumes: - - - name: "cfme-pgdb-volume" - persistentVolumeClaim: - claimName: "${NAME}-${DATABASE_SERVICE_NAME}" + - name: cfme-pgdb-volume + persistentVolumeClaim: + claimName: "${NAME}-${DATABASE_SERVICE_NAME}" + - name: cfme-pg-configs + configMap: + name: "${DATABASE_SERVICE_NAME}-configs" containers: - - - name: "postgresql" - image: "${POSTGRESQL_IMG_NAME}:${POSTGRESQL_IMG_TAG}" - ports: - - - containerPort: 5432 - readinessProbe: - timeoutSeconds: 1 - initialDelaySeconds: 15 + - name: postgresql + image: "${POSTGRESQL_IMG_NAME}:${POSTGRESQL_IMG_TAG}" + ports: + - containerPort: 5432 + readinessProbe: + timeoutSeconds: 1 + initialDelaySeconds: 15 + exec: + command: + - "/bin/sh" + - "-i" + - "-c" + - psql -h 127.0.0.1 -U ${POSTGRESQL_USER} -q -d ${POSTGRESQL_DATABASE} -c 'SELECT 1' + livenessProbe: + timeoutSeconds: 1 + initialDelaySeconds: 60 + tcpSocket: + port: 5432 + volumeMounts: + - name: cfme-pgdb-volume + mountPath: "/var/lib/pgsql/data" + - name: cfme-pg-configs + mountPath: "${POSTGRESQL_CONFIG_DIR}" + env: + - name: POSTGRESQL_USER + value: "${DATABASE_USER}" + - name: POSTGRESQL_PASSWORD + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: pg-password + - name: POSTGRESQL_DATABASE + value: "${DATABASE_NAME}" + - name: POSTGRESQL_MAX_CONNECTIONS + value: "${POSTGRESQL_MAX_CONNECTIONS}" + - name: POSTGRESQL_SHARED_BUFFERS + value: "${POSTGRESQL_SHARED_BUFFERS}" + - name: POSTGRESQL_CONFIG_DIR + value: "${POSTGRESQL_CONFIG_DIR}" + resources: + requests: + memory: "${POSTGRESQL_MEM_REQ}" + cpu: "${POSTGRESQL_CPU_REQ}" + limits: + memory: "${POSTGRESQL_MEM_LIMIT}" +- apiVersion: v1 + kind: Service + metadata: + annotations: + description: Exposes and load balances Ansible pods + service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"}]' + name: "${ANSIBLE_SERVICE_NAME}" + spec: + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 80 + - name: https + port: 443 + protocol: TCP + targetPort: 443 + selector: + name: "${ANSIBLE_SERVICE_NAME}" +- apiVersion: v1 + kind: DeploymentConfig + metadata: + name: "${ANSIBLE_SERVICE_NAME}" + annotations: + description: Defines how to deploy the Ansible appliance + spec: + strategy: + type: Recreate + serviceName: "${ANSIBLE_SERVICE_NAME}" + replicas: 0 + template: + metadata: + labels: + name: "${ANSIBLE_SERVICE_NAME}" + name: "${ANSIBLE_SERVICE_NAME}" + spec: + containers: + - name: ansible + image: "${ANSIBLE_IMG_NAME}:${ANSIBLE_IMG_TAG}" + livenessProbe: + tcpSocket: + port: 443 + initialDelaySeconds: 480 + timeoutSeconds: 3 + readinessProbe: + httpGet: + path: "/" + port: 443 + scheme: HTTPS + initialDelaySeconds: 200 + timeoutSeconds: 3 + ports: + - containerPort: 80 + protocol: TCP + - containerPort: 443 + protocol: TCP + securityContext: + privileged: true + env: + - name: ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: admin-password + - name: RABBITMQ_USER_NAME + value: "${ANSIBLE_RABBITMQ_USER_NAME}" + - name: RABBITMQ_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: rabbit-password + - name: ANSIBLE_SECRET_KEY + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: secret-key + - name: DATABASE_SERVICE_NAME + value: "${DATABASE_SERVICE_NAME}" + - name: POSTGRESQL_USER + value: "${DATABASE_USER}" + - name: POSTGRESQL_PASSWORD + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: pg-password + - name: POSTGRESQL_DATABASE + value: "${ANSIBLE_DATABASE_NAME}" + resources: + requests: + memory: "${ANSIBLE_MEM_REQ}" + cpu: "${ANSIBLE_CPU_REQ}" + limits: + memory: "${ANSIBLE_MEM_LIMIT}" + serviceAccount: cfme-privileged + serviceAccountName: cfme-privileged +- apiVersion: v1 + kind: Service + metadata: + name: "${HTTPD_SERVICE_NAME}" + annotations: + description: Exposes the httpd server + service.alpha.openshift.io/dependencies: '[{"name":"${NAME}","namespace":"","kind":"Service"}]' + spec: + ports: + - name: http + port: 80 + targetPort: 80 + selector: + name: httpd +- apiVersion: v1 + kind: Service + metadata: + name: "${HTTPD_DBUS_API_SERVICE_NAME}" + annotations: + description: Exposes the httpd server dbus api + service.alpha.openshift.io/dependencies: '[{"name":"${NAME}","namespace":"","kind":"Service"}]' + spec: + ports: + - name: http-dbus-api + port: 8080 + targetPort: 8080 + selector: + name: httpd +- apiVersion: v1 + kind: DeploymentConfig + metadata: + name: "${HTTPD_SERVICE_NAME}" + annotations: + description: Defines how to deploy httpd + spec: + strategy: + type: Recreate + recreateParams: + timeoutSeconds: 1200 + triggers: + - type: ConfigChange + replicas: 1 + selector: + name: "${HTTPD_SERVICE_NAME}" + template: + metadata: + name: "${HTTPD_SERVICE_NAME}" + labels: + name: "${HTTPD_SERVICE_NAME}" + spec: + volumes: + - name: httpd-config + configMap: + name: "${HTTPD_SERVICE_NAME}-configs" + - name: httpd-auth-config + configMap: + name: "${HTTPD_SERVICE_NAME}-auth-configs" + containers: + - name: httpd + image: "${HTTPD_IMG_NAME}:${HTTPD_IMG_TAG}" + ports: + - containerPort: 80 + protocol: TCP + - containerPort: 8080 + protocol: TCP + livenessProbe: + exec: + command: + - pidof + - httpd + initialDelaySeconds: 15 + timeoutSeconds: 3 + readinessProbe: + tcpSocket: + port: 80 + initialDelaySeconds: 10 + timeoutSeconds: 3 + volumeMounts: + - name: httpd-config + mountPath: "${HTTPD_CONFIG_DIR}" + - name: httpd-auth-config + mountPath: "${HTTPD_AUTH_CONFIG_DIR}" + resources: + requests: + memory: "${HTTPD_MEM_REQ}" + cpu: "${HTTPD_CPU_REQ}" + limits: + memory: "${HTTPD_MEM_LIMIT}" + env: + - name: HTTPD_AUTH_TYPE + valueFrom: + configMapKeyRef: + name: "${HTTPD_SERVICE_NAME}-auth-configs" + key: auth-type + - name: HTTPD_AUTH_KERBEROS_REALMS + valueFrom: + configMapKeyRef: + name: "${HTTPD_SERVICE_NAME}-auth-configs" + key: auth-kerberos-realms + lifecycle: + postStart: exec: command: - - "/bin/sh" - - "-i" - - "-c" - - "psql -h 127.0.0.1 -U ${POSTGRESQL_USER} -q -d ${POSTGRESQL_DATABASE} -c 'SELECT 1'" - livenessProbe: - timeoutSeconds: 1 - initialDelaySeconds: 60 - tcpSocket: - port: 5432 - volumeMounts: - - - name: "cfme-pgdb-volume" - mountPath: "/var/lib/pgsql/data" - env: - - - name: "POSTGRESQL_USER" - value: "${DATABASE_USER}" - - - name: "POSTGRESQL_PASSWORD" - value: "${DATABASE_PASSWORD}" - - - name: "POSTGRESQL_DATABASE" - value: "${DATABASE_NAME}" - - - name: "POSTGRESQL_MAX_CONNECTIONS" - value: "${POSTGRESQL_MAX_CONNECTIONS}" - - - name: "POSTGRESQL_SHARED_BUFFERS" - value: "${POSTGRESQL_SHARED_BUFFERS}" - resources: - requests: - memory: "${POSTGRESQL_MEM_REQ}" - cpu: "${POSTGRESQL_CPU_REQ}" - limits: - memory: "${POSTGRESQL_MEM_LIMIT}" - + - "/usr/bin/save-container-environment" + serviceAccount: cfme-httpd + serviceAccountName: cfme-httpd parameters: - - - name: "NAME" - displayName: Name - required: true - description: "The name assigned to all of the frontend objects defined in this template." - value: cloudforms - - - name: "DATABASE_SERVICE_NAME" - displayName: "PostgreSQL Service Name" - required: true - description: "The name of the OpenShift Service exposed for the PostgreSQL container." - value: "postgresql" - - - name: "DATABASE_USER" - displayName: "PostgreSQL User" - required: true - description: "PostgreSQL user that will access the database." - value: "root" - - - name: "DATABASE_PASSWORD" - displayName: "PostgreSQL Password" - required: true - description: "Password for the PostgreSQL user." - value: "smartvm" - - - name: "DATABASE_NAME" - required: true - displayName: "PostgreSQL Database Name" - description: "Name of the PostgreSQL database accessed." - value: "vmdb_production" - - - name: "DATABASE_REGION" - required: true - displayName: "Application Database Region" - description: "Database region that will be used for application." - value: "0" - - - name: "MEMCACHED_SERVICE_NAME" - required: true - displayName: "Memcached Service Name" - description: "The name of the OpenShift Service exposed for the Memcached container." - value: "memcached" - - - name: "MEMCACHED_MAX_MEMORY" - displayName: "Memcached Max Memory" - description: "Memcached maximum memory for memcached object storage in MB." - value: "64" - - - name: "MEMCACHED_MAX_CONNECTIONS" - displayName: "Memcached Max Connections" - description: "Memcached maximum number of connections allowed." - value: "1024" - - - name: "MEMCACHED_SLAB_PAGE_SIZE" - displayName: "Memcached Slab Page Size" - description: "Memcached size of each slab page." - value: "1m" - - - name: "POSTGRESQL_MAX_CONNECTIONS" - displayName: "PostgreSQL Max Connections" - description: "PostgreSQL maximum number of database connections allowed." - value: "100" - - - name: "POSTGRESQL_SHARED_BUFFERS" - displayName: "PostgreSQL Shared Buffer Amount" - description: "Amount of memory dedicated for PostgreSQL shared memory buffers." - value: "256MB" - - - name: "APPLICATION_CPU_REQ" - displayName: "Application Min CPU Requested" - required: true - description: "Minimum amount of CPU time the Application container will need (expressed in millicores)." - value: "1000m" - - - name: "POSTGRESQL_CPU_REQ" - displayName: "PostgreSQL Min CPU Requested" - required: true - description: "Minimum amount of CPU time the PostgreSQL container will need (expressed in millicores)." - value: "500m" - - - name: "MEMCACHED_CPU_REQ" - displayName: "Memcached Min CPU Requested" - required: true - description: "Minimum amount of CPU time the Memcached container will need (expressed in millicores)." - value: "200m" - - - name: "APPLICATION_MEM_REQ" - displayName: "Application Min RAM Requested" - required: true - description: "Minimum amount of memory the Application container will need." - value: "6144Mi" - - - name: "POSTGRESQL_MEM_REQ" - displayName: "PostgreSQL Min RAM Requested" - required: true - description: "Minimum amount of memory the PostgreSQL container will need." - value: "1024Mi" - - - name: "MEMCACHED_MEM_REQ" - displayName: "Memcached Min RAM Requested" - required: true - description: "Minimum amount of memory the Memcached container will need." - value: "64Mi" - - - name: "APPLICATION_MEM_LIMIT" - displayName: "Application Max RAM Limit" - required: true - description: "Maximum amount of memory the Application container can consume." - value: "16384Mi" - - - name: "POSTGRESQL_MEM_LIMIT" - displayName: "PostgreSQL Max RAM Limit" - required: true - description: "Maximum amount of memory the PostgreSQL container can consume." - value: "8192Mi" - - - name: "MEMCACHED_MEM_LIMIT" - displayName: "Memcached Max RAM Limit" - required: true - description: "Maximum amount of memory the Memcached container can consume." - value: "256Mi" - - - name: "POSTGRESQL_IMG_NAME" - displayName: "PostgreSQL Image Name" - description: "This is the PostgreSQL image name requested to deploy." - value: "registry.access.redhat.com/cloudforms45/cfme-openshift-postgresql" - - - name: "POSTGRESQL_IMG_TAG" - displayName: "PostgreSQL Image Tag" - description: "This is the PostgreSQL image tag/version requested to deploy." - value: "latest" - - - name: "MEMCACHED_IMG_NAME" - displayName: "Memcached Image Name" - description: "This is the Memcached image name requested to deploy." - value: "registry.access.redhat.com/cloudforms45/cfme-openshift-memcached" - - - name: "MEMCACHED_IMG_TAG" - displayName: "Memcached Image Tag" - description: "This is the Memcached image tag/version requested to deploy." - value: "latest" - - - name: "APPLICATION_IMG_NAME" - displayName: "Application Image Name" - description: "This is the Application image name requested to deploy." - value: "registry.access.redhat.com/cloudforms45/cfme-openshift-app" - - - name: "APPLICATION_IMG_TAG" - displayName: "Application Image Tag" - description: "This is the Application image tag/version requested to deploy." - value: "latest" - - - name: "APPLICATION_DOMAIN" - displayName: "Application Hostname" - description: "The exposed hostname that will route to the application service, if left blank a value will be defaulted." - value: "" - - - name: "APPLICATION_INIT_DELAY" - displayName: "Application Init Delay" - required: true - description: "Delay in seconds before we attempt to initialize the application." - value: "15" - - - name: "APPLICATION_VOLUME_CAPACITY" - displayName: "Application Volume Capacity" - required: true - description: "Volume space available for application data." - value: "5Gi" - - - name: "APPLICATION_REGION_VOLUME_CAPACITY" - displayName: "Application Region Volume Capacity" - required: true - description: "Volume space available for region application data." - value: "5Gi" - - - name: "DATABASE_VOLUME_CAPACITY" - displayName: "Database Volume Capacity" - required: true - description: "Volume space available for database." - value: "15Gi" +- name: NAME + displayName: Name + required: true + description: The name assigned to all of the frontend objects defined in this template. + value: cloudforms +- name: V2_KEY + displayName: CloudForms Encryption Key + required: true + description: Encryption Key for CloudForms Passwords + from: "[a-zA-Z0-9]{43}" + generate: expression +- name: DATABASE_SERVICE_NAME + displayName: PostgreSQL Service Name + required: true + description: The name of the OpenShift Service exposed for the PostgreSQL container. + value: postgresql +- name: DATABASE_USER + displayName: PostgreSQL User + required: true + description: PostgreSQL user that will access the database. + value: root +- name: DATABASE_PASSWORD + displayName: PostgreSQL Password + required: true + description: Password for the PostgreSQL user. + from: "[a-zA-Z0-9]{8}" + generate: expression +- name: DATABASE_NAME + required: true + displayName: PostgreSQL Database Name + description: Name of the PostgreSQL database accessed. + value: vmdb_production +- name: DATABASE_REGION + required: true + displayName: Application Database Region + description: Database region that will be used for application. + value: '0' +- name: APPLICATION_ADMIN_PASSWORD + displayName: Application Admin Password + required: true + description: Admin password that will be set on the application. + value: smartvm +- name: ANSIBLE_DATABASE_NAME + displayName: Ansible PostgreSQL database name + required: true + description: The database to be used by the Ansible continer + value: awx +- name: MEMCACHED_SERVICE_NAME + required: true + displayName: Memcached Service Name + description: The name of the OpenShift Service exposed for the Memcached container. + value: memcached +- name: MEMCACHED_MAX_MEMORY + displayName: Memcached Max Memory + description: Memcached maximum memory for memcached object storage in MB. + value: '64' +- name: MEMCACHED_MAX_CONNECTIONS + displayName: Memcached Max Connections + description: Memcached maximum number of connections allowed. + value: '1024' +- name: MEMCACHED_SLAB_PAGE_SIZE + displayName: Memcached Slab Page Size + description: Memcached size of each slab page. + value: 1m +- name: POSTGRESQL_CONFIG_DIR + displayName: PostgreSQL Configuration Overrides + description: Directory used to store PostgreSQL configuration overrides. + value: "/var/lib/pgsql/conf.d" +- name: POSTGRESQL_MAX_CONNECTIONS + displayName: PostgreSQL Max Connections + description: PostgreSQL maximum number of database connections allowed. + value: '1000' +- name: POSTGRESQL_SHARED_BUFFERS + displayName: PostgreSQL Shared Buffer Amount + description: Amount of memory dedicated for PostgreSQL shared memory buffers. + value: 1GB +- name: ANSIBLE_SERVICE_NAME + displayName: Ansible Service Name + description: The name of the OpenShift Service exposed for the Ansible container. + value: ansible +- name: ANSIBLE_ADMIN_PASSWORD + displayName: Ansible admin User password + required: true + description: The password for the Ansible container admin user + from: "[a-zA-Z0-9]{32}" + generate: expression +- name: ANSIBLE_SECRET_KEY + displayName: Ansible Secret Key + required: true + description: Encryption key for the Ansible container + from: "[a-f0-9]{32}" + generate: expression +- name: ANSIBLE_RABBITMQ_USER_NAME + displayName: RabbitMQ Username + required: true + description: Username for the Ansible RabbitMQ Server + value: ansible +- name: ANSIBLE_RABBITMQ_PASSWORD + displayName: RabbitMQ Server Password + required: true + description: Password for the Ansible RabbitMQ Server + from: "[a-zA-Z0-9]{32}" + generate: expression +- name: APPLICATION_CPU_REQ + displayName: Application Min CPU Requested + required: true + description: Minimum amount of CPU time the Application container will need (expressed in millicores). + value: 1000m +- name: POSTGRESQL_CPU_REQ + displayName: PostgreSQL Min CPU Requested + required: true + description: Minimum amount of CPU time the PostgreSQL container will need (expressed in millicores). + value: 500m +- name: MEMCACHED_CPU_REQ + displayName: Memcached Min CPU Requested + required: true + description: Minimum amount of CPU time the Memcached container will need (expressed in millicores). + value: 200m +- name: ANSIBLE_CPU_REQ + displayName: Ansible Min CPU Requested + required: true + description: Minimum amount of CPU time the Ansible container will need (expressed in millicores). + value: 1000m +- name: APPLICATION_MEM_REQ + displayName: Application Min RAM Requested + required: true + description: Minimum amount of memory the Application container will need. + value: 6144Mi +- name: POSTGRESQL_MEM_REQ + displayName: PostgreSQL Min RAM Requested + required: true + description: Minimum amount of memory the PostgreSQL container will need. + value: 4Gi +- name: MEMCACHED_MEM_REQ + displayName: Memcached Min RAM Requested + required: true + description: Minimum amount of memory the Memcached container will need. + value: 64Mi +- name: ANSIBLE_MEM_REQ + displayName: Ansible Min RAM Requested + required: true + description: Minimum amount of memory the Ansible container will need. + value: 2048Mi +- name: APPLICATION_MEM_LIMIT + displayName: Application Max RAM Limit + required: true + description: Maximum amount of memory the Application container can consume. + value: 16384Mi +- name: POSTGRESQL_MEM_LIMIT + displayName: PostgreSQL Max RAM Limit + required: true + description: Maximum amount of memory the PostgreSQL container can consume. + value: 8Gi +- name: MEMCACHED_MEM_LIMIT + displayName: Memcached Max RAM Limit + required: true + description: Maximum amount of memory the Memcached container can consume. + value: 256Mi +- name: ANSIBLE_MEM_LIMIT + displayName: Ansible Max RAM Limit + required: true + description: Maximum amount of memory the Ansible container can consume. + value: 8096Mi +- name: POSTGRESQL_IMG_NAME + displayName: PostgreSQL Image Name + description: This is the PostgreSQL image name requested to deploy. + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-postgresql +- name: POSTGRESQL_IMG_TAG + displayName: PostgreSQL Image Tag + description: This is the PostgreSQL image tag/version requested to deploy. + value: latest +- name: MEMCACHED_IMG_NAME + displayName: Memcached Image Name + description: This is the Memcached image name requested to deploy. + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-memcached +- name: MEMCACHED_IMG_TAG + displayName: Memcached Image Tag + description: This is the Memcached image tag/version requested to deploy. + value: latest +- name: FRONTEND_APPLICATION_IMG_NAME + displayName: Frontend Application Image Name + description: This is the Frontend Application image name requested to deploy. + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-app-ui +- name: BACKEND_APPLICATION_IMG_NAME + displayName: Backend Application Image Name + description: This is the Backend Application image name requested to deploy. + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-app +- name: FRONTEND_APPLICATION_IMG_TAG + displayName: Front end Application Image Tag + description: This is the CloudForms Frontend Application image tag/version requested to deploy. + value: latest +- name: BACKEND_APPLICATION_IMG_TAG + displayName: Back end Application Image Tag + description: This is the CloudForms Backend Application image tag/version requested to deploy. + value: latest +- name: ANSIBLE_IMG_NAME + displayName: Ansible Image Name + description: This is the Ansible image name requested to deploy. + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-embedded-ansible +- name: ANSIBLE_IMG_TAG + displayName: Ansible Image Tag + description: This is the Ansible image tag/version requested to deploy. + value: latest +- name: APPLICATION_DOMAIN + displayName: Application Hostname + description: The exposed hostname that will route to the application service, if left blank a value will be defaulted. + value: '' +- name: APPLICATION_REPLICA_COUNT + displayName: Application Replica Count + description: This is the number of Application replicas requested to deploy. + value: '1' +- name: APPLICATION_INIT_DELAY + displayName: Application Init Delay + required: true + description: Delay in seconds before we attempt to initialize the application. + value: '15' +- name: APPLICATION_VOLUME_CAPACITY + displayName: Application Volume Capacity + required: true + description: Volume space available for application data. + value: 5Gi +- name: DATABASE_VOLUME_CAPACITY + displayName: Database Volume Capacity + required: true + description: Volume space available for database. + value: 15Gi +- name: HTTPD_SERVICE_NAME + required: true + displayName: Apache httpd Service Name + description: The name of the OpenShift Service exposed for the httpd container. + value: httpd +- name: HTTPD_DBUS_API_SERVICE_NAME + required: true + displayName: Apache httpd DBus API Service Name + description: The name of httpd dbus api service. + value: httpd-dbus-api +- name: HTTPD_IMG_NAME + displayName: Apache httpd Image Name + description: This is the httpd image name requested to deploy. + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-httpd +- name: HTTPD_IMG_TAG + displayName: Apache httpd Image Tag + description: This is the httpd image tag/version requested to deploy. + value: latest +- name: HTTPD_CONFIG_DIR + displayName: Apache Configuration Directory + description: Directory used to store the Apache configuration files. + value: "/etc/httpd/conf.d" +- name: HTTPD_AUTH_CONFIG_DIR + displayName: External Authentication Configuration Directory + description: Directory used to store the external authentication configuration files. + value: "/etc/httpd/auth-conf.d" +- name: HTTPD_CPU_REQ + displayName: Apache httpd Min CPU Requested + required: true + description: Minimum amount of CPU time the httpd container will need (expressed in millicores). + value: 500m +- name: HTTPD_MEM_REQ + displayName: Apache httpd Min RAM Requested + required: true + description: Minimum amount of memory the httpd container will need. + value: 512Mi +- name: HTTPD_MEM_LIMIT + displayName: Apache httpd Max RAM Limit + required: true + description: Maximum amount of memory the httpd container can consume. + value: 8192Mi diff --git a/roles/openshift_examples/files/examples/v3.9/db-templates/mariadb-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.9/db-templates/mariadb-ephemeral-template.json index 5e7585eeb..1772dbbcf 100644 --- a/roles/openshift_examples/files/examples/v3.9/db-templates/mariadb-ephemeral-template.json +++ b/roles/openshift_examples/files/examples/v3.9/db-templates/mariadb-ephemeral-template.json @@ -5,16 +5,16 @@ "name": "mariadb-ephemeral", "annotations": { "openshift.io/display-name": "MariaDB (Ephemeral)", - "description": "MariaDB database service, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing", + "description": "MariaDB database service, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/blob/master/10.2/root/usr/share/container-scripts/mysql/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing", "iconClass": "icon-mariadb", "tags": "database,mariadb", "openshift.io/long-description": "This template provides a standalone MariaDB server with a database created. The database is not stored on persistent storage, so any restart of the service will result in all data being lost. The database name, username, and password are chosen via parameters when provisioning this service.", "openshift.io/provider-display-name": "Red Hat, Inc.", - "openshift.io/documentation-url": "https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md", + "openshift.io/documentation-url": "https://github.com/sclorg/mariadb-container/blob/master/10.2/root/usr/share/container-scripts/mysql/README.md", "openshift.io/support-url": "https://access.redhat.com" } }, - "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MYSQL_USER}\n Password: ${MYSQL_PASSWORD}\n Database Name: ${MYSQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:3306/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md.", + "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MYSQL_USER}\n Password: ${MYSQL_PASSWORD}\n Database Name: ${MYSQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:3306/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/blob/master/10.2/root/usr/share/container-scripts/mysql/README.md.", "labels": { "template": "mariadb-persistent-template" }, @@ -82,7 +82,7 @@ ], "from": { "kind": "ImageStreamTag", - "name": "mariadb:10.1", + "name": "mariadb:${MARIADB_VERSION}", "namespace": "${NAMESPACE}" } } @@ -242,6 +242,13 @@ "description": "Name of the MariaDB database accessed.", "value": "sampledb", "required": true + }, + { + "name": "MARIADB_VERSION", + "displayName": "Version of MariaDB Image", + "description": "Version of MariaDB image to be used (10.0, 10.1, 10.2 or latest).", + "value": "10.2", + "required": true } ] } diff --git a/roles/openshift_examples/files/examples/v3.9/db-templates/mariadb-persistent-template.json b/roles/openshift_examples/files/examples/v3.9/db-templates/mariadb-persistent-template.json index 217ef11dd..8424ecbc8 100644 --- a/roles/openshift_examples/files/examples/v3.9/db-templates/mariadb-persistent-template.json +++ b/roles/openshift_examples/files/examples/v3.9/db-templates/mariadb-persistent-template.json @@ -4,17 +4,17 @@ "metadata": { "name": "mariadb-persistent", "annotations": { - "openshift.io/display-name": "MariaDB (Persistent)", - "description": "MariaDB database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.", + "openshift.io/display-name": "MariaDB", + "description": "MariaDB database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/blob/master/10.2/root/usr/share/container-scripts/mysql/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.", "iconClass": "icon-mariadb", "tags": "database,mariadb", "openshift.io/long-description": "This template provides a standalone MariaDB server with a database created. The database is stored on persistent storage. The database name, username, and password are chosen via parameters when provisioning this service.", "openshift.io/provider-display-name": "Red Hat, Inc.", - "openshift.io/documentation-url": "https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md", + "openshift.io/documentation-url": "https://github.com/sclorg/mariadb-container/blob/master/10.2/root/usr/share/container-scripts/mysql/README.md", "openshift.io/support-url": "https://access.redhat.com" } }, - "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MYSQL_USER}\n Password: ${MYSQL_PASSWORD}\n Database Name: ${MYSQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:3306/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md.", + "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MYSQL_USER}\n Password: ${MYSQL_PASSWORD}\n Database Name: ${MYSQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:3306/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/blob/master/10.2/root/usr/share/container-scripts/mysql/README.md.", "labels": { "template": "mariadb-persistent-template" }, @@ -99,7 +99,7 @@ ], "from": { "kind": "ImageStreamTag", - "name": "mariadb:10.1", + "name": "mariadb:${MARIADB_VERSION}", "namespace": "${NAMESPACE}" } } @@ -261,6 +261,13 @@ "required": true }, { + "name": "MARIADB_VERSION", + "displayName": "Version of MariaDB Image", + "description": "Version of MariaDB image to be used (10.0, 10.1, 10.2 or latest).", + "value": "10.2", + "required": true + }, + { "name": "VOLUME_CAPACITY", "displayName": "Volume Capacity", "description": "Volume space available for data, e.g. 512Mi, 2Gi.", diff --git a/roles/openshift_examples/files/examples/v3.9/db-templates/mongodb-persistent-template.json b/roles/openshift_examples/files/examples/v3.9/db-templates/mongodb-persistent-template.json index 97e4128a4..4e3e64d48 100644 --- a/roles/openshift_examples/files/examples/v3.9/db-templates/mongodb-persistent-template.json +++ b/roles/openshift_examples/files/examples/v3.9/db-templates/mongodb-persistent-template.json @@ -4,7 +4,7 @@ "metadata": { "name": "mongodb-persistent", "annotations": { - "openshift.io/display-name": "MongoDB (Persistent)", + "openshift.io/display-name": "MongoDB", "description": "MongoDB database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/blob/master/3.2/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.", "iconClass": "icon-mongodb", "tags": "database,mongodb", diff --git a/roles/openshift_examples/files/examples/v3.9/db-templates/mysql-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.9/db-templates/mysql-ephemeral-template.json index c0946416d..bed940a37 100644 --- a/roles/openshift_examples/files/examples/v3.9/db-templates/mysql-ephemeral-template.json +++ b/roles/openshift_examples/files/examples/v3.9/db-templates/mysql-ephemeral-template.json @@ -5,7 +5,7 @@ "name": "mysql-ephemeral", "annotations": { "openshift.io/display-name": "MySQL (Ephemeral)", - "description": "MySQL database service, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.7/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing", + "description": "MySQL database service, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.7/root/usr/share/container-scripts/mysql/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing", "iconClass": "icon-mysql-database", "tags": "database,mysql", "openshift.io/long-description": "This template provides a standalone MySQL server with a database created. The database is not stored on persistent storage, so any restart of the service will result in all data being lost. The database name, username, and password are chosen via parameters when provisioning this service.", @@ -14,7 +14,7 @@ "openshift.io/support-url": "https://access.redhat.com" } }, - "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MYSQL_USER}\n Password: ${MYSQL_PASSWORD}\n Database Name: ${MYSQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:3306/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.7/README.md.", + "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MYSQL_USER}\n Password: ${MYSQL_PASSWORD}\n Database Name: ${MYSQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:3306/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.7/root/usr/share/container-scripts/mysql/README.md.", "labels": { "template": "mysql-ephemeral-template" }, diff --git a/roles/openshift_examples/files/examples/v3.9/db-templates/mysql-persistent-template.json b/roles/openshift_examples/files/examples/v3.9/db-templates/mysql-persistent-template.json index 48ac114fd..85adde65d 100644 --- a/roles/openshift_examples/files/examples/v3.9/db-templates/mysql-persistent-template.json +++ b/roles/openshift_examples/files/examples/v3.9/db-templates/mysql-persistent-template.json @@ -4,8 +4,8 @@ "metadata": { "name": "mysql-persistent", "annotations": { - "openshift.io/display-name": "MySQL (Persistent)", - "description": "MySQL database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.7/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.", + "openshift.io/display-name": "MySQL", + "description": "MySQL database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.7/root/usr/share/container-scripts/mysql/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.", "iconClass": "icon-mysql-database", "tags": "database,mysql", "openshift.io/long-description": "This template provides a standalone MySQL server with a database created. The database is stored on persistent storage. The database name, username, and password are chosen via parameters when provisioning this service.", @@ -14,7 +14,7 @@ "openshift.io/support-url": "https://access.redhat.com" } }, - "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MYSQL_USER}\n Password: ${MYSQL_PASSWORD}\n Database Name: ${MYSQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:3306/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.7/README.md.", + "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MYSQL_USER}\n Password: ${MYSQL_PASSWORD}\n Database Name: ${MYSQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:3306/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.7/root/usr/share/container-scripts/mysql/README.md.", "labels": { "template": "mysql-persistent-template" }, diff --git a/roles/openshift_examples/files/examples/v3.9/db-templates/postgresql-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.9/db-templates/postgresql-ephemeral-template.json index 7c419f1ae..f29698d0c 100644 --- a/roles/openshift_examples/files/examples/v3.9/db-templates/postgresql-ephemeral-template.json +++ b/roles/openshift_examples/files/examples/v3.9/db-templates/postgresql-ephemeral-template.json @@ -5,7 +5,7 @@ "name": "postgresql-ephemeral", "annotations": { "openshift.io/display-name": "PostgreSQL (Ephemeral)", - "description": "PostgreSQL database service, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing", + "description": "PostgreSQL database service, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing", "iconClass": "icon-postgresql", "tags": "database,postgresql", "openshift.io/long-description": "This template provides a standalone PostgreSQL server with a database created. The database is not stored on persistent storage, so any restart of the service will result in all data being lost. The database name, username, and password are chosen via parameters when provisioning this service.", @@ -14,7 +14,7 @@ "openshift.io/support-url": "https://access.redhat.com" } }, - "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${POSTGRESQL_USER}\n Password: ${POSTGRESQL_PASSWORD}\n Database Name: ${POSTGRESQL_DATABASE}\n Connection URL: postgresql://${DATABASE_SERVICE_NAME}:5432/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.", + "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${POSTGRESQL_USER}\n Password: ${POSTGRESQL_PASSWORD}\n Database Name: ${POSTGRESQL_DATABASE}\n Connection URL: postgresql://${DATABASE_SERVICE_NAME}:5432/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/.", "labels": { "template": "postgresql-ephemeral-template" }, @@ -127,11 +127,11 @@ } }, "livenessProbe": { + "exec": { + "command": [ "/bin/sh", "-i", "-c", "pg_isready -h 127.0.0.1 -p 5432" ] + }, "timeoutSeconds": 1, - "initialDelaySeconds": 30, - "tcpSocket": { - "port": 5432 - } + "initialDelaySeconds": 30 }, "env": [ { @@ -245,8 +245,8 @@ { "name": "POSTGRESQL_VERSION", "displayName": "Version of PostgreSQL Image", - "description": "Version of PostgreSQL image to be used (9.2, 9.4, 9.5 or latest).", - "value": "9.5", + "description": "Version of PostgreSQL image to be used (9.4, 9.5, 9.6 or latest).", + "value": "9.6", "required": true } ] diff --git a/roles/openshift_examples/files/examples/v3.9/db-templates/postgresql-persistent-template.json b/roles/openshift_examples/files/examples/v3.9/db-templates/postgresql-persistent-template.json index 8a2d23907..7feeb704a 100644 --- a/roles/openshift_examples/files/examples/v3.9/db-templates/postgresql-persistent-template.json +++ b/roles/openshift_examples/files/examples/v3.9/db-templates/postgresql-persistent-template.json @@ -4,8 +4,8 @@ "metadata": { "name": "postgresql-persistent", "annotations": { - "openshift.io/display-name": "PostgreSQL (Persistent)", - "description": "PostgreSQL database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.", + "openshift.io/display-name": "PostgreSQL", + "description": "PostgreSQL database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.", "iconClass": "icon-postgresql", "tags": "database,postgresql", "openshift.io/long-description": "This template provides a standalone PostgreSQL server with a database created. The database is stored on persistent storage. The database name, username, and password are chosen via parameters when provisioning this service.", @@ -14,7 +14,7 @@ "openshift.io/support-url": "https://access.redhat.com" } }, - "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${POSTGRESQL_USER}\n Password: ${POSTGRESQL_PASSWORD}\n Database Name: ${POSTGRESQL_DATABASE}\n Connection URL: postgresql://${DATABASE_SERVICE_NAME}:5432/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.", + "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${POSTGRESQL_USER}\n Password: ${POSTGRESQL_PASSWORD}\n Database Name: ${POSTGRESQL_DATABASE}\n Connection URL: postgresql://${DATABASE_SERVICE_NAME}:5432/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/.", "labels": { "template": "postgresql-persistent-template" }, @@ -144,11 +144,11 @@ } }, "livenessProbe": { + "exec": { + "command": [ "/bin/sh", "-i", "-c", "pg_isready -h 127.0.0.1 -p 5432" ] + }, "timeoutSeconds": 1, - "initialDelaySeconds": 30, - "tcpSocket": { - "port": 5432 - } + "initialDelaySeconds": 30 }, "env": [ { @@ -269,8 +269,8 @@ { "name": "POSTGRESQL_VERSION", "displayName": "Version of PostgreSQL Image", - "description": "Version of PostgreSQL image to be used (9.2, 9.4, 9.5 or latest).", - "value": "9.5", + "description": "Version of PostgreSQL image to be used (9.4, 9.5, 9.6 or latest).", + "value": "9.6", "required": true } ] diff --git a/roles/openshift_examples/files/examples/v3.9/db-templates/redis-persistent-template.json b/roles/openshift_examples/files/examples/v3.9/db-templates/redis-persistent-template.json index e0e0a88d5..d1103d3af 100644 --- a/roles/openshift_examples/files/examples/v3.9/db-templates/redis-persistent-template.json +++ b/roles/openshift_examples/files/examples/v3.9/db-templates/redis-persistent-template.json @@ -4,7 +4,7 @@ "metadata": { "name": "redis-persistent", "annotations": { - "openshift.io/display-name": "Redis (Persistent)", + "openshift.io/display-name": "Redis", "description": "Redis in-memory data structure store, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/redis-container/blob/master/3.2.\n\nNOTE: You must have persistent volumes available in your cluster to use this template.", "iconClass": "icon-redis", "tags": "database,redis", diff --git a/roles/openshift_examples/files/examples/v3.9/image-streams/image-streams-centos7.json b/roles/openshift_examples/files/examples/v3.9/image-streams/image-streams-centos7.json index e7af160d9..924c2884b 100644 --- a/roles/openshift_examples/files/examples/v3.9/image-streams/image-streams-centos7.json +++ b/roles/openshift_examples/files/examples/v3.9/image-streams/image-streams-centos7.json @@ -44,7 +44,7 @@ }, "from": { "kind": "DockerImage", - "name": "centos/httpd-24-centos7:latest" + "name": "docker.io/centos/httpd-24-centos7:latest" } } ] @@ -91,7 +91,7 @@ }, "from": { "kind": "DockerImage", - "name": "openshift/ruby-20-centos7:latest" + "name": "docker.io/openshift/ruby-20-centos7:latest" } }, { @@ -108,7 +108,7 @@ }, "from": { "kind": "DockerImage", - "name": "centos/ruby-22-centos7:latest" + "name": "docker.io/centos/ruby-22-centos7:latest" } }, { @@ -125,7 +125,7 @@ }, "from": { "kind": "DockerImage", - "name": "centos/ruby-23-centos7:latest" + "name": "docker.io/centos/ruby-23-centos7:latest" } }, { @@ -142,7 +142,7 @@ }, "from": { "kind": "DockerImage", - "name": "centos/ruby-24-centos7:latest" + "name": "docker.io/centos/ruby-24-centos7:latest" } } ] @@ -164,7 +164,7 @@ "annotations": { "openshift.io/display-name": "Node.js (Latest)", "openshift.io/provider-display-name": "Red Hat, Inc.", - "description": "Build and run Node.js applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/4/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Node.js available on OpenShift, including major versions updates.", + "description": "Build and run Node.js applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/8/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Node.js available on OpenShift, including major versions updates.", "iconClass": "icon-nodejs", "tags": "builder,nodejs", "supports":"nodejs", @@ -172,7 +172,7 @@ }, "from": { "kind": "ImageStreamTag", - "name": "6" + "name": "8" } }, { @@ -189,7 +189,7 @@ }, "from": { "kind": "DockerImage", - "name": "openshift/nodejs-010-centos7:latest" + "name": "docker.io/openshift/nodejs-010-centos7:latest" } }, { @@ -206,7 +206,7 @@ }, "from": { "kind": "DockerImage", - "name": "centos/nodejs-4-centos7:latest" + "name": "docker.io/centos/nodejs-4-centos7:latest" } }, { @@ -223,7 +223,23 @@ }, "from": { "kind": "DockerImage", - "name": "centos/nodejs-6-centos7:latest" + "name": "docker.io/centos/nodejs-6-centos7:latest" + } + }, + { + "name": "8", + "annotations": { + "openshift.io/display-name": "Node.js 8", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run Node.js 8 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/8/README.md.", + "iconClass": "icon-nodejs", + "tags": "builder,nodejs", + "version": "8", + "sampleRepo": "https://github.com/openshift/nodejs-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "docker.io/centos/nodejs-8-centos7:latest" } } ] @@ -270,7 +286,7 @@ }, "from": { "kind": "DockerImage", - "name": "openshift/perl-516-centos7:latest" + "name": "docker.io/openshift/perl-516-centos7:latest" } }, { @@ -287,7 +303,7 @@ }, "from": { "kind": "DockerImage", - "name": "centos/perl-520-centos7:latest" + "name": "docker.io/centos/perl-520-centos7:latest" } }, { @@ -304,7 +320,7 @@ }, "from": { "kind": "DockerImage", - "name": "centos/perl-524-centos7:latest" + "name": "docker.io/centos/perl-524-centos7:latest" } } ] @@ -326,7 +342,7 @@ "annotations": { "openshift.io/display-name": "PHP (Latest)", "openshift.io/provider-display-name": "Red Hat, Inc.", - "description": "Build and run PHP applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/5.6/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of PHP available on OpenShift, including major versions updates.", + "description": "Build and run PHP applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/7.1/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of PHP available on OpenShift, including major versions updates.", "iconClass": "icon-php", "tags": "builder,php", "supports":"php", @@ -334,7 +350,7 @@ }, "from": { "kind": "ImageStreamTag", - "name": "7.0" + "name": "7.1" } }, { @@ -351,7 +367,7 @@ }, "from": { "kind": "DockerImage", - "name": "openshift/php-55-centos7:latest" + "name": "docker.io/openshift/php-55-centos7:latest" } }, { @@ -368,7 +384,7 @@ }, "from": { "kind": "DockerImage", - "name": "centos/php-56-centos7:latest" + "name": "docker.io/centos/php-56-centos7:latest" } }, { @@ -385,7 +401,24 @@ }, "from": { "kind": "DockerImage", - "name": "centos/php-70-centos7:latest" + "name": "docker.io/centos/php-70-centos7:latest" + } + }, + { + "name": "7.1", + "annotations": { + "openshift.io/display-name": "PHP 7.1", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run PHP 7.1 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/7.1/README.md.", + "iconClass": "icon-php", + "tags": "builder,php", + "supports":"php:7.1,php", + "version": "7.1", + "sampleRepo": "https://github.com/openshift/cakephp-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "docker.io/centos/php-71-centos7:latest" } } ] @@ -407,7 +440,7 @@ "annotations": { "openshift.io/display-name": "Python (Latest)", "openshift.io/provider-display-name": "Red Hat, Inc.", - "description": "Build and run Python applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.5/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Python available on OpenShift, including major versions updates.", + "description": "Build and run Python applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.6/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Python available on OpenShift, including major versions updates.", "iconClass": "icon-python", "tags": "builder,python", "supports":"python", @@ -415,7 +448,7 @@ }, "from": { "kind": "ImageStreamTag", - "name": "3.5" + "name": "3.6" } }, { @@ -432,7 +465,7 @@ }, "from": { "kind": "DockerImage", - "name": "openshift/python-33-centos7:latest" + "name": "docker.io/openshift/python-33-centos7:latest" } }, { @@ -449,7 +482,7 @@ }, "from": { "kind": "DockerImage", - "name": "centos/python-27-centos7:latest" + "name": "docker.io/centos/python-27-centos7:latest" } }, { @@ -466,7 +499,7 @@ }, "from": { "kind": "DockerImage", - "name": "centos/python-34-centos7:latest" + "name": "docker.io/centos/python-34-centos7:latest" } }, { @@ -483,7 +516,24 @@ }, "from": { "kind": "DockerImage", - "name": "centos/python-35-centos7:latest" + "name": "docker.io/centos/python-35-centos7:latest" + } + }, + { + "name": "3.6", + "annotations": { + "openshift.io/display-name": "Python 3.6", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run Python 3.6 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.6/README.md.", + "iconClass": "icon-python", + "tags": "builder,python", + "supports":"python:3.6,python", + "version": "3.6", + "sampleRepo": "https://github.com/openshift/django-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "docker.io/centos/python-36-centos7:latest" } } ] @@ -530,7 +580,7 @@ }, "from": { "kind": "DockerImage", - "name": "openshift/wildfly-81-centos7:latest" + "name": "docker.io/openshift/wildfly-81-centos7:latest" } }, { @@ -547,7 +597,7 @@ }, "from": { "kind": "DockerImage", - "name": "openshift/wildfly-90-centos7:latest" + "name": "docker.io/openshift/wildfly-90-centos7:latest" } }, { @@ -564,7 +614,7 @@ }, "from": { "kind": "DockerImage", - "name": "openshift/wildfly-100-centos7:latest" + "name": "docker.io/openshift/wildfly-100-centos7:latest" } }, { @@ -581,7 +631,7 @@ }, "from": { "kind": "DockerImage", - "name": "openshift/wildfly-101-centos7:latest" + "name": "docker.io/openshift/wildfly-101-centos7:latest" } } ] @@ -624,7 +674,7 @@ }, "from": { "kind": "DockerImage", - "name": "openshift/mysql-55-centos7:latest" + "name": "docker.io/openshift/mysql-55-centos7:latest" } }, { @@ -639,7 +689,7 @@ }, "from": { "kind": "DockerImage", - "name": "centos/mysql-56-centos7:latest" + "name": "docker.io/centos/mysql-56-centos7:latest" } }, { @@ -654,7 +704,88 @@ }, "from": { "kind": "DockerImage", - "name": "centos/mysql-57-centos7:latest" + "name": "docker.io/centos/mysql-57-centos7:latest" + } + } + ] + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "nginx", + "annotations": { + "openshift.io/display-name": "Nginx HTTP server and a reverse proxy (nginx)" + } + }, + "spec": { + "tags": [ + { + "name": "1.8", + "annotations": { + "openshift.io/display-name": "Nginx HTTP server and a reverse proxy 1.8", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and serve static content via Nginx HTTP Server and a reverse proxy (nginx) on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/nginx-container/blob/master/1.8/README.md.", + "iconClass": "icon-nginx", + "tags": "builder,nginx", + "supports":"nginx", + "sampleRepo": "https://github.com/sclorg/nginx-ex.git", + "version": "1.8" + }, + "from": { + "kind": "DockerImage", + "name": "docker.io/centos/nginx-18-centos7:latest" + } + }, + { + "name": "1.10", + "annotations": { + "openshift.io/display-name": "Nginx HTTP server and a reverse proxy 1.10", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and serve static content via Nginx HTTP Server and a reverse proxy (nginx) on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/nginx-container/blob/master/1.10/README.md.", + "iconClass": "icon-nginx", + "tags": "builder,nginx", + "supports":"nginx", + "sampleRepo": "https://github.com/sclorg/nginx-ex.git", + "version": "1.10" + }, + "from": { + "kind": "DockerImage", + "name": "docker.io/centos/nginx-110-centos7:latest" + } + }, + { + "name": "1.12", + "annotations": { + "openshift.io/display-name": "Nginx HTTP server and a reverse proxy 1.12", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and serve static content via Nginx HTTP Server and a reverse proxy (nginx) on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/nginx-container/blob/master/1.12/README.md.", + "iconClass": "icon-nginx", + "tags": "builder,nginx", + "supports":"nginx", + "sampleRepo": "https://github.com/sclorg/nginx-ex.git", + "version": "1.12" + }, + "from": { + "kind": "DockerImage", + "name": "docker.io/centos/nginx-112-centos7:latest" + } + }, + { + "name": "latest", + "annotations": { + "openshift.io/display-name": "Nginx HTTP server and a reverse proxy (Latest)", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and serve static content via Nginx HTTP Server and a reverse proxy (nginx) on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/nginx-container/blob/master/1.12/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Nginx available on OpenShift, including major versions updates.", + "iconClass": "icon-nginx", + "tags": "builder,nginx", + "supports":"nginx", + "sampleRepo": "https://github.com/sclorg/nginx-ex.git" + }, + "from": { + "kind": "ImageStreamTag", + "name": "1.12" } } ] @@ -676,13 +807,13 @@ "annotations": { "openshift.io/display-name": "MariaDB (Latest)", "openshift.io/provider-display-name": "Red Hat, Inc.", - "description": "Provides a MariaDB database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/tree/master/10.1/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of MariaDB available on OpenShift, including major versions updates.", + "description": "Provides a MariaDB database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/tree/master/10.2/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of MariaDB available on OpenShift, including major versions updates.", "iconClass": "icon-mariadb", - "tags": "mariadb" + "tags": "database,mariadb" }, "from": { "kind": "ImageStreamTag", - "name": "10.1" + "name": "10.2" } }, { @@ -692,12 +823,27 @@ "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Provides a MariaDB 10.1 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/tree/master/10.1/README.md.", "iconClass": "icon-mariadb", - "tags": "mariadb", + "tags": "database,mariadb", "version": "10.1" }, "from": { "kind": "DockerImage", - "name": "centos/mariadb-101-centos7:latest" + "name": "docker.io/centos/mariadb-101-centos7:latest" + } + }, + { + "name": "10.2", + "annotations": { + "openshift.io/display-name": "MariaDB 10.2", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Provides a MariaDB 10.2 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/tree/master/10.2/README.md.", + "iconClass": "icon-mariadb", + "tags": "database,mariadb", + "version": "10.2" + }, + "from": { + "kind": "DockerImage", + "name": "docker.io/centos/mariadb-102-centos7:latest" } } ] @@ -719,13 +865,13 @@ "annotations": { "openshift.io/display-name": "PostgreSQL (Latest)", "openshift.io/provider-display-name": "Red Hat, Inc.", - "description": "Provides a PostgreSQL database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.5.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of PostgreSQL available on OpenShift, including major versions updates.", + "description": "Provides a PostgreSQL database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.6/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of PostgreSQL available on OpenShift, including major versions updates.", "iconClass": "icon-postgresql", - "tags": "postgresql" + "tags": "database,postgresql" }, "from": { "kind": "ImageStreamTag", - "name": "9.5" + "name": "9.6" } }, { @@ -733,14 +879,14 @@ "annotations": { "openshift.io/display-name": "PostgreSQL 9.2", "openshift.io/provider-display-name": "Red Hat, Inc.", - "description": "Provides a PostgreSQL 9.2 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.2.", + "description": "Provides a PostgreSQL 9.2 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.2/README.md.", "iconClass": "icon-postgresql", "tags": "hidden,postgresql", "version": "9.2" }, "from": { "kind": "DockerImage", - "name": "openshift/postgresql-92-centos7:latest" + "name": "docker.io/openshift/postgresql-92-centos7:latest" } }, { @@ -748,14 +894,14 @@ "annotations": { "openshift.io/display-name": "PostgreSQL 9.4", "openshift.io/provider-display-name": "Red Hat, Inc.", - "description": "Provides a PostgreSQL 9.4 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.4.", + "description": "Provides a PostgreSQL 9.4 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.4/README.md.", "iconClass": "icon-postgresql", - "tags": "postgresql", + "tags": "database,postgresql", "version": "9.4" }, "from": { "kind": "DockerImage", - "name": "centos/postgresql-94-centos7:latest" + "name": "docker.io/centos/postgresql-94-centos7:latest" } }, { @@ -763,14 +909,29 @@ "annotations": { "openshift.io/display-name": "PostgreSQL 9.5", "openshift.io/provider-display-name": "Red Hat, Inc.", - "description": "Provides a PostgreSQL 9.5 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.5.", + "description": "Provides a PostgreSQL 9.5 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.5/README.md.", "iconClass": "icon-postgresql", - "tags": "postgresql", + "tags": "database,postgresql", "version": "9.5" }, "from": { "kind": "DockerImage", - "name": "centos/postgresql-95-centos7:latest" + "name": "docker.io/centos/postgresql-95-centos7:latest" + } + }, + { + "name": "9.6", + "annotations": { + "openshift.io/display-name": "PostgreSQL 9.6", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Provides a PostgreSQL 9.6 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.6/README.md.", + "iconClass": "icon-postgresql", + "tags": "database,postgresql", + "version": "9.6" + }, + "from": { + "kind": "DockerImage", + "name": "docker.io/centos/postgresql-96-centos7:latest" } } ] @@ -792,13 +953,13 @@ "annotations": { "openshift.io/display-name": "MongoDB (Latest)", "openshift.io/provider-display-name": "Red Hat, Inc.", - "description": "Provides a MongoDB database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/3.2/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of MongoDB available on OpenShift, including major versions updates.", + "description": "Provides a MongoDB database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/3.4/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of MongoDB available on OpenShift, including major versions updates.", "iconClass": "icon-mongodb", - "tags": "mongodb" + "tags": "database,mongodb" }, "from": { "kind": "ImageStreamTag", - "name": "3.2" + "name": "3.4" } }, { @@ -813,7 +974,7 @@ }, "from": { "kind": "DockerImage", - "name": "openshift/mongodb-24-centos7:latest" + "name": "docker.io/openshift/mongodb-24-centos7:latest" } }, { @@ -823,12 +984,12 @@ "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Provides a MongoDB 2.6 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/2.6/README.md.", "iconClass": "icon-mongodb", - "tags": "mongodb", + "tags": "database,mongodb", "version": "2.6" }, "from": { "kind": "DockerImage", - "name": "centos/mongodb-26-centos7:latest" + "name": "docker.io/centos/mongodb-26-centos7:latest" } }, { @@ -838,12 +999,27 @@ "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Provides a MongoDB 3.2 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/3.2/README.md.", "iconClass": "icon-mongodb", - "tags": "mongodb", + "tags": "database,mongodb", "version": "3.2" }, "from": { "kind": "DockerImage", - "name": "centos/mongodb-32-centos7:latest" + "name": "docker.io/centos/mongodb-32-centos7:latest" + } + }, + { + "name": "3.4", + "annotations": { + "openshift.io/display-name": "MongoDB 3.4", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Provides a MongoDB 3.4 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/3.4/README.md.", + "iconClass": "icon-mongodb", + "tags": "database,mongodb", + "version": "3.4" + }, + "from": { + "kind": "DockerImage", + "name": "docker.io/centos/mongodb-34-centos7:latest" } } ] @@ -886,7 +1062,7 @@ }, "from": { "kind": "DockerImage", - "name": "centos/redis-32-centos7:latest" + "name": "docker.io/centos/redis-32-centos7:latest" } } ] @@ -929,7 +1105,7 @@ }, "from": { "kind": "DockerImage", - "name": "openshift/jenkins-1-centos7:latest" + "name": "docker.io/openshift/jenkins-1-centos7:latest" } }, { @@ -944,7 +1120,7 @@ }, "from": { "kind": "DockerImage", - "name": "openshift/jenkins-2-centos7:latest" + "name": "docker.io/openshift/jenkins-2-centos7:v3.9" } } ] diff --git a/roles/openshift_examples/files/examples/v3.9/image-streams/image-streams-rhel7.json b/roles/openshift_examples/files/examples/v3.9/image-streams/image-streams-rhel7.json index 2b082fc75..af319beed 100644 --- a/roles/openshift_examples/files/examples/v3.9/image-streams/image-streams-rhel7.json +++ b/roles/openshift_examples/files/examples/v3.9/image-streams/image-streams-rhel7.json @@ -164,7 +164,7 @@ "annotations": { "openshift.io/display-name": "Node.js (Latest)", "openshift.io/provider-display-name": "Red Hat, Inc.", - "description": "Build and run Node.js applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/4/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Node.js available on OpenShift, including major versions updates.", + "description": "Build and run Node.js applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/8/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Node.js available on OpenShift, including major versions updates.", "iconClass": "icon-nodejs", "tags": "builder,nodejs", "supports":"nodejs", @@ -172,7 +172,7 @@ }, "from": { "kind": "ImageStreamTag", - "name": "6" + "name": "8" } }, { @@ -225,6 +225,22 @@ "kind": "DockerImage", "name": "registry.access.redhat.com/rhscl/nodejs-6-rhel7:latest" } + }, + { + "name": "8", + "annotations": { + "openshift.io/display-name": "Node.js 8", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run Node.js 8 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container.", + "iconClass": "icon-nodejs", + "tags": "builder,nodejs", + "version": "8", + "sampleRepo": "https://github.com/openshift/nodejs-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/rhscl/nodejs-8-rhel7:latest" + } } ] } @@ -326,7 +342,7 @@ "annotations": { "openshift.io/display-name": "PHP (Latest)", "openshift.io/provider-display-name": "Red Hat, Inc.", - "description": "Build and run PHP applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/5.6/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of PHP available on OpenShift, including major versions updates.", + "description": "Build and run PHP applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/7.1/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of PHP available on OpenShift, including major versions updates.", "iconClass": "icon-php", "tags": "builder,php", "supports":"php", @@ -334,7 +350,7 @@ }, "from": { "kind": "ImageStreamTag", - "name": "7.0" + "name": "7.1" } }, { @@ -387,6 +403,23 @@ "kind": "DockerImage", "name": "registry.access.redhat.com/rhscl/php-70-rhel7:latest" } + }, + { + "name": "7.1", + "annotations": { + "openshift.io/display-name": "PHP 7.1", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run PHP 7.1 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/7.1/README.md.", + "iconClass": "icon-php", + "tags": "builder,php", + "supports":"php:7.1,php", + "version": "7.1", + "sampleRepo": "https://github.com/openshift/cakephp-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/rhscl/php-71-rhel7:latest" + } } ] } @@ -407,7 +440,7 @@ "annotations": { "openshift.io/display-name": "Python (Latest)", "openshift.io/provider-display-name": "Red Hat, Inc.", - "description": "Build and run Python applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.5/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Python available on OpenShift, including major versions updates.", + "description": "Build and run Python applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.6/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Python available on OpenShift, including major versions updates.", "iconClass": "icon-python", "tags": "builder,python", "supports":"python", @@ -415,7 +448,7 @@ }, "from": { "kind": "ImageStreamTag", - "name": "3.5" + "name": "3.6" } }, { @@ -485,6 +518,23 @@ "kind": "DockerImage", "name": "registry.access.redhat.com/rhscl/python-35-rhel7:latest" } + }, + { + "name": "3.6", + "annotations": { + "openshift.io/display-name": "Python 3.6", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run Python 3.6 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.6/README.md.", + "iconClass": "icon-python", + "tags": "builder,python", + "supports":"python:3.6,python", + "version": "3.6", + "sampleRepo": "https://github.com/openshift/django-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/rhscl/python-36-rhel7:latest" + } } ] } @@ -566,6 +616,87 @@ "kind": "ImageStream", "apiVersion": "v1", "metadata": { + "name": "nginx", + "annotations": { + "openshift.io/display-name": "Nginx HTTP server and a reverse proxy (nginx)" + } + }, + "spec": { + "tags": [ + { + "name": "1.8", + "annotations": { + "openshift.io/display-name": "Nginx HTTP server and a reverse proxy 1.8", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and serve static content via Nginx HTTP server and a reverse proxy (nginx) on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/nginx-container/blob/master/1.8/README.md.", + "iconClass": "icon-nginx", + "tags": "builder,nginx", + "supports":"nginx", + "sampleRepo": "https://github.com/sclorg/nginx-ex.git", + "version": "1.8" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/rhscl/nginx-18-rhel7:latest" + } + }, + { + "name": "1.10", + "annotations": { + "openshift.io/display-name": "Nginx HTTP server and a reverse proxy 1.10", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and serve static content via Nginx HTTP server and a reverse proxy (nginx) on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/nginx-container/blob/master/1.10/README.md.", + "iconClass": "icon-nginx", + "tags": "builder,nginx", + "supports":"nginx", + "sampleRepo": "https://github.com/sclorg/nginx-ex.git", + "version": "1.10" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/rhscl/nginx-110-rhel7:latest" + } + }, + { + "name": "1.12", + "annotations": { + "openshift.io/display-name": "Nginx HTTP server and a reverse proxy 1.12", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and serve static content via Nginx HTTP server and a reverse proxy (nginx) on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/nginx-container/blob/master/1.12/README.md.", + "iconClass": "icon-nginx", + "tags": "builder,nginx", + "supports":"nginx", + "sampleRepo": "https://github.com/sclorg/nginx-ex.git", + "version": "1.12" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/rhscl/nginx-112-rhel7:latest" + } + }, + { + "name": "latest", + "annotations": { + "openshift.io/display-name": "Nginx HTTP server and a reverse proxy (Latest)", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and serve static content via Nginx HTTP server and a reverse proxy (nginx) on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/nginx-container/blob/master/1.12/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Nginx available on OpenShift, including major versions updates.", + "iconClass": "icon-nginx", + "tags": "builder,nginx", + "supports":"nginx", + "sampleRepo": "https://github.com/sclorg/nginx-ex.git" + }, + "from": { + "kind": "ImageStreamTag", + "name": "1.12" + } + } + ] + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { "name": "mariadb", "annotations": { "openshift.io/display-name": "MariaDB" @@ -578,13 +709,13 @@ "annotations": { "openshift.io/display-name": "MariaDB (Latest)", "openshift.io/provider-display-name": "Red Hat, Inc.", - "description": "Provides a MariaDB database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/tree/master/10.1/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of MariaDB available on OpenShift, including major versions updates.", + "description": "Provides a MariaDB database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/tree/master/10.2/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of MariaDB available on OpenShift, including major versions updates.", "iconClass": "icon-mariadb", - "tags": "mariadb" + "tags": "database,mariadb" }, "from": { "kind": "ImageStreamTag", - "name": "10.1" + "name": "10.2" } }, { @@ -594,13 +725,28 @@ "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Provides a MariaDB 10.1 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/tree/master/10.1/README.md.", "iconClass": "icon-mariadb", - "tags": "mariadb", + "tags": "database,mariadb", "version": "10.1" }, "from": { "kind": "DockerImage", "name": "registry.access.redhat.com/rhscl/mariadb-101-rhel7:latest" } + }, + { + "name": "10.2", + "annotations": { + "openshift.io/display-name": "MariaDB 10.2", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Provides a MariaDB 10.2 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/tree/master/10.2/README.md.", + "iconClass": "icon-mariadb", + "tags": "database,mariadb", + "version": "10.2" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/rhscl/mariadb-102-rhel7:latest" + } } ] } @@ -621,13 +767,13 @@ "annotations": { "openshift.io/display-name": "PostgreSQL (Latest)", "openshift.io/provider-display-name": "Red Hat, Inc.", - "description": "Provides a PostgreSQL database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.5.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of PostgreSQL available on OpenShift, including major versions updates.", + "description": "Provides a PostgreSQL database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.6/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of PostgreSQL available on OpenShift, including major versions updates.", "iconClass": "icon-postgresql", - "tags": "postgresql" + "tags": "database,postgresql" }, "from": { "kind": "ImageStreamTag", - "name": "9.5" + "name": "9.6" } }, { @@ -635,7 +781,7 @@ "annotations": { "openshift.io/display-name": "PostgreSQL 9.2", "openshift.io/provider-display-name": "Red Hat, Inc.", - "description": "Provides a PostgreSQL 9.2 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.2.", + "description": "Provides a PostgreSQL 9.2 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.2/README.md.", "iconClass": "icon-postgresql", "tags": "hidden,postgresql", "version": "9.2" @@ -650,9 +796,9 @@ "annotations": { "openshift.io/display-name": "PostgreSQL 9.4", "openshift.io/provider-display-name": "Red Hat, Inc.", - "description": "Provides a PostgreSQL 9.4 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.4.", + "description": "Provides a PostgreSQL 9.4 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.4/README.md.", "iconClass": "icon-postgresql", - "tags": "postgresql", + "tags": "database,postgresql", "version": "9.4" }, "from": { @@ -665,15 +811,30 @@ "annotations": { "openshift.io/display-name": "PostgreSQL 9.5", "openshift.io/provider-display-name": "Red Hat, Inc.", - "description": "Provides a PostgreSQL 9.5 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.5.", + "description": "Provides a PostgreSQL 9.5 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.5/README.md.", "iconClass": "icon-postgresql", - "tags": "postgresql", + "tags": "database,postgresql", "version": "9.5" }, "from": { "kind": "DockerImage", "name": "registry.access.redhat.com/rhscl/postgresql-95-rhel7:latest" } + }, + { + "name": "9.6", + "annotations": { + "openshift.io/display-name": "PostgreSQL (Ephemeral) 9.6", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Provides a PostgreSQL 9.6 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.6/README.md.", + "iconClass": "icon-postgresql", + "tags": "database,postgresql", + "version": "9.6" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/rhscl/postgresql-96-rhel7:latest" + } } ] } @@ -694,13 +855,13 @@ "annotations": { "openshift.io/display-name": "MongoDB (Latest)", "openshift.io/provider-display-name": "Red Hat, Inc.", - "description": "Provides a MongoDB database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/3.2/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of MongoDB available on OpenShift, including major versions updates.", + "description": "Provides a MongoDB database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/3.4/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of MongoDB available on OpenShift, including major versions updates.", "iconClass": "icon-mongodb", "tags": "mongodb" }, "from": { "kind": "ImageStreamTag", - "name": "3.2" + "name": "3.4" } }, { @@ -725,7 +886,7 @@ "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Provides a MongoDB 2.6 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/2.6/README.md.", "iconClass": "icon-mongodb", - "tags": "mongodb", + "tags": "database,mongodb", "version": "2.6" }, "from": { @@ -740,13 +901,28 @@ "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Provides a MongoDB 3.2 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/3.2/README.md.", "iconClass": "icon-mongodb", - "tags": "mongodb", + "tags": "database,mongodb", "version": "3.2" }, "from": { "kind": "DockerImage", "name": "registry.access.redhat.com/rhscl/mongodb-32-rhel7:latest" } + }, + { + "name": "3.4", + "annotations": { + "openshift.io/display-name": "MongoDB 3.4", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Provides a MongoDB 3.4 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/3.4/README.md.", + "iconClass": "icon-mongodb", + "tags": "database,mongodb", + "version": "3.4" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/rhscl/mongodb-34-rhel7:latest" + } } ] } @@ -846,7 +1022,7 @@ }, "from": { "kind": "DockerImage", - "name": "registry.access.redhat.com/openshift3/jenkins-2-rhel7:latest" + "name": "registry.access.redhat.com/openshift3/jenkins-2-rhel7:v3.9" } } ] diff --git a/roles/openshift_examples/files/examples/v3.9/latest b/roles/openshift_examples/files/examples/v3.9/latest deleted file mode 120000 index b9bc2fdcb..000000000 --- a/roles/openshift_examples/files/examples/v3.9/latest +++ /dev/null @@ -1 +0,0 @@ -latest
\ No newline at end of file diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/README.md b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/README.md index 6d2ccbf7f..710d5f58d 100644 --- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/README.md +++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/README.md @@ -18,6 +18,7 @@ instantiating them. * [Django](https://raw.githubusercontent.com/openshift/django-ex/master/openshift/templates/django-postgresql.json) - Provides a basic Django (Python) application with a PostgreSQL database. For more information see the [source repository](https://github.com/openshift/django-ex). * [Django persistent](https://raw.githubusercontent.com/openshift/django-ex/master/openshift/templates/django-postgresql-persistent.json) - Provides a basic Django (Python) application with a persistent PostgreSQL database. Note: requires available persistent volumes. For more information see the [source repository](https://github.com/openshift/django-ex). * [Httpd](https://raw.githubusercontent.com/openshift/httpd-ex/master/openshift/templates/httpd.json) - Provides a basic Httpd static content application. For more information see the [source repository](https://github.com/openshift/httpd-ex). +* [Nginx](https://raw.githubusercontent.com/sclorg/nginx-ex/master/openshift/templates/nginx.json) - Provides a basic Nginx static content application. For more information see the [source repository](https://github.com/sclorg/nginx-ex). * [NodeJS](https://raw.githubusercontent.com/openshift/nodejs-ex/master/openshift/templates/nodejs-mongodb.json) - Provides a basic NodeJS application with a MongoDB database. For more information see the [source repository](https://github.com/openshift/nodejs-ex). * [NodeJS persistent](https://raw.githubusercontent.com/openshift/nodejs-ex/master/openshift/templates/nodejs-mongodb-persistent.json) - Provides a basic NodeJS application with a persistent MongoDB database. Note: requires available persistent volumes. For more information see the [source repository](https://github.com/openshift/nodejs-ex). * [Rails](https://raw.githubusercontent.com/openshift/rails-ex/master/openshift/templates/rails-postgresql.json) - Provides a basic Rails (Ruby) application with a PostgreSQL database. For more information see the [source repository](https://github.com/openshift/rails-ex). diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/cakephp-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/cakephp-mysql-persistent.json index 86ddc184a..8888f19d0 100644 --- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/cakephp-mysql-persistent.json +++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/cakephp-mysql-persistent.json @@ -4,7 +4,7 @@ "metadata": { "name": "cakephp-mysql-persistent", "annotations": { - "openshift.io/display-name": "CakePHP + MySQL (Persistent)", + "openshift.io/display-name": "CakePHP + MySQL", "description": "An example CakePHP application with a MySQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/cakephp-ex/blob/master/README.md.", "tags": "quickstart,php,cakephp", "iconClass": "icon-php", @@ -17,7 +17,8 @@ }, "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/cake-ex/blob/master/README.md.", "labels": { - "template": "cakephp-mysql-persistent" + "template": "cakephp-mysql-persistent", + "app": "cakephp-mysql-persistent" }, "objects": [ { @@ -208,6 +209,7 @@ "readinessProbe": { "timeoutSeconds": 3, "initialDelaySeconds": 3, + "periodSeconds": 60, "httpGet": { "path": "/health.php", "port": 8080 @@ -216,6 +218,7 @@ "livenessProbe": { "timeoutSeconds": 3, "initialDelaySeconds": 30, + "periodSeconds": 60, "httpGet": { "path": "/health.php", "port": 8080 diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/cakephp-mysql.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/cakephp-mysql.json index 3c964bd6a..2bf7acd8c 100644 --- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/cakephp-mysql.json +++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/cakephp-mysql.json @@ -17,7 +17,8 @@ }, "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/cake-ex/blob/master/README.md.", "labels": { - "template": "cakephp-mysql-example" + "template": "cakephp-mysql-example", + "app": "cakephp-mysql-example" }, "objects": [ { @@ -208,6 +209,7 @@ "readinessProbe": { "timeoutSeconds": 3, "initialDelaySeconds": 3, + "periodSeconds": 60, "httpGet": { "path": "/health.php", "port": 8080 @@ -216,6 +218,7 @@ "livenessProbe": { "timeoutSeconds": 3, "initialDelaySeconds": 30, + "periodSeconds": 60, "httpGet": { "path": "/health.php", "port": 8080 diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/dancer-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/dancer-mysql-persistent.json index 0a10c5fbc..b29f8ba40 100644 --- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/dancer-mysql-persistent.json +++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/dancer-mysql-persistent.json @@ -4,7 +4,7 @@ "metadata": { "name": "dancer-mysql-persistent", "annotations": { - "openshift.io/display-name": "Dancer + MySQL (Persistent)", + "openshift.io/display-name": "Dancer + MySQL", "description": "An example Dancer application with a MySQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/dancer-ex/blob/master/README.md.", "tags": "quickstart,perl,dancer", "iconClass": "icon-perl", @@ -17,7 +17,8 @@ }, "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/dancer-ex/blob/master/README.md.", "labels": { - "template": "dancer-mysql-persistent" + "template": "dancer-mysql-persistent", + "app": "dancer-mysql-persistent" }, "objects": [ { diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/dancer-mysql.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/dancer-mysql.json index 6122d5436..e76353764 100644 --- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/dancer-mysql.json +++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/dancer-mysql.json @@ -17,7 +17,8 @@ }, "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/dancer-ex/blob/master/README.md.", "labels": { - "template": "dancer-mysql-example" + "template": "dancer-mysql-example", + "app": "dancer-mysql-example" }, "objects": [ { diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/django-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/django-postgresql-persistent.json index f3b5838fa..7a0ab213a 100644 --- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/django-postgresql-persistent.json +++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/django-postgresql-persistent.json @@ -4,7 +4,7 @@ "metadata": { "name": "django-psql-persistent", "annotations": { - "openshift.io/display-name": "Django + PostgreSQL (Persistent)", + "openshift.io/display-name": "Django + PostgreSQL", "description": "An example Django application with a PostgreSQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/django-ex/blob/master/README.md.", "tags": "quickstart,python,django", "iconClass": "icon-python", @@ -17,7 +17,8 @@ }, "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/django-ex/blob/master/README.md.", "labels": { - "template": "django-psql-persistent" + "template": "django-psql-persistent", + "app": "django-psql-persistent" }, "objects": [ { diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/django-postgresql.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/django-postgresql.json index b21295df2..be3fc740c 100644 --- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/django-postgresql.json +++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/django-postgresql.json @@ -17,7 +17,8 @@ }, "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/django-ex/blob/master/README.md.", "labels": { - "template": "django-psql-example" + "template": "django-psql-example", + "app": "django-psql-example" }, "objects": [ { diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/httpd.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/httpd.json index 3771280bf..67ae3c751 100644 --- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/httpd.json +++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/httpd.json @@ -17,7 +17,8 @@ }, "message": "The following service(s) have been created in your project: ${NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/httpd-ex/blob/master/README.md.", "labels": { - "template": "httpd-example" + "template": "httpd-example", + "app": "httpd-example" }, "objects": [ { @@ -198,12 +199,7 @@ } }, "env": [ - ], - "resources": { - "limits": { - "memory": "${MEMORY_LIMIT}" - } - } + ] } ] } diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/jenkins-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/jenkins-ephemeral-template.json index 28b4b9d81..87ae6ed14 100644 --- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/jenkins-ephemeral-template.json +++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/jenkins-ephemeral-template.json @@ -15,6 +15,10 @@ } }, "message": "A Jenkins service has been created in your project. Log into Jenkins with your OpenShift account. The tutorial at https://github.com/openshift/origin/blob/master/examples/jenkins/README.md contains more information about using this template.", + "labels": { + "app": "jenkins-ephemeral", + "template": "jenkins-ephemeral-template" + }, "objects": [ { "kind": "Route", @@ -275,10 +279,7 @@ "name": "JENKINS_IMAGE_STREAM_TAG", "displayName": "Jenkins ImageStreamTag", "description": "Name of the ImageStreamTag to be used for the Jenkins image.", - "value": "jenkins:latest" + "value": "jenkins:2" } - ], - "labels": { - "template": "jenkins-ephemeral-template" - } + ] } diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/jenkins-persistent-template.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/jenkins-persistent-template.json index 4915bb12c..95d15b55f 100644 --- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/jenkins-persistent-template.json +++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/jenkins-persistent-template.json @@ -4,7 +4,7 @@ "metadata": { "name": "jenkins-persistent", "annotations": { - "openshift.io/display-name": "Jenkins (Persistent)", + "openshift.io/display-name": "Jenkins", "description": "Jenkins service, with persistent storage.\n\nNOTE: You must have persistent volumes available in your cluster to use this template.", "iconClass": "icon-jenkins", "tags": "instant-app,jenkins", @@ -15,6 +15,10 @@ } }, "message": "A Jenkins service has been created in your project. Log into Jenkins with your OpenShift account. The tutorial at https://github.com/openshift/origin/blob/master/examples/jenkins/README.md contains more information about using this template.", + "labels": { + "app": "jenkins-persistent", + "template": "jenkins-persistent-template" + }, "objects": [ { "kind": "Route", @@ -299,10 +303,7 @@ "name": "JENKINS_IMAGE_STREAM_TAG", "displayName": "Jenkins ImageStreamTag", "description": "Name of the ImageStreamTag to be used for the Jenkins image.", - "value": "jenkins:latest" + "value": "jenkins:2" } - ], - "labels": { - "template": "jenkins-persistent-template" - } + ] } diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/nginx.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/nginx.json new file mode 100644 index 000000000..84aa1f469 --- /dev/null +++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/nginx.json @@ -0,0 +1,283 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "name": "nginx-example", + "annotations": { + "openshift.io/display-name": "Nginx HTTP server and a reverse proxy", + "description": "An example Nginx HTTP server and a reverse proxy (nginx) application that serves static content. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/nginx-ex/blob/master/README.md.", + "tags": "quickstart,nginx", + "iconClass": "icon-nginx", + "openshift.io/long-description": "This template defines resources needed to develop a static application served by Nginx HTTP server and a reverse proxy (nginx), including a build configuration and application deployment configuration.", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "openshift.io/documentation-url": "https://github.com/sclorg/nginx-ex", + "openshift.io/support-url": "https://access.redhat.com" + } + }, + "message": "The following service(s) have been created in your project: ${NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/nginx-ex/blob/master/README.md.", + "labels": { + "template": "nginx-example" + }, + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "metadata": { + "name": "${NAME}", + "annotations": { + "description": "Exposes and load balances the application pods" + } + }, + "spec": { + "ports": [ + { + "name": "web", + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "name": "${NAME}" + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "metadata": { + "name": "${NAME}", + "annotations": { + "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}" + } + }, + "spec": { + "host": "${APPLICATION_DOMAIN}", + "to": { + "kind": "Service", + "name": "${NAME}" + } + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "${NAME}", + "annotations": { + "description": "Keeps track of changes in the application image" + } + } + }, + { + "kind": "BuildConfig", + "apiVersion": "v1", + "metadata": { + "name": "${NAME}", + "annotations": { + "description": "Defines how to build the application", + "template.alpha.openshift.io/wait-for-ready": "true" + } + }, + "spec": { + "source": { + "type": "Git", + "git": { + "uri": "${SOURCE_REPOSITORY_URL}", + "ref": "${SOURCE_REPOSITORY_REF}" + }, + "contextDir": "${CONTEXT_DIR}" + }, + "strategy": { + "type": "Source", + "sourceStrategy": { + "from": { + "kind": "ImageStreamTag", + "namespace": "${NAMESPACE}", + "name": "nginx:${NGINX_VERSION}" + } + } + }, + "output": { + "to": { + "kind": "ImageStreamTag", + "name": "${NAME}:latest" + } + }, + "triggers": [ + { + "type": "ImageChange" + }, + { + "type": "ConfigChange" + }, + { + "type": "GitHub", + "github": { + "secret": "${GITHUB_WEBHOOK_SECRET}" + } + }, + { + "type": "Generic", + "generic": { + "secret": "${GENERIC_WEBHOOK_SECRET}" + } + } + ] + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${NAME}", + "annotations": { + "description": "Defines how to deploy the application server", + "template.alpha.openshift.io/wait-for-ready": "true" + } + }, + "spec": { + "strategy": { + "type": "Rolling" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "nginx-example" + ], + "from": { + "kind": "ImageStreamTag", + "name": "${NAME}:latest" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "name": "${NAME}" + }, + "template": { + "metadata": { + "name": "${NAME}", + "labels": { + "name": "${NAME}" + } + }, + "spec": { + "containers": [ + { + "name": "nginx-example", + "image": " ", + "ports": [ + { + "containerPort": 8080 + } + ], + "readinessProbe": { + "timeoutSeconds": 3, + "initialDelaySeconds": 3, + "httpGet": { + "path": "/", + "port": 8080 + } + }, + "livenessProbe": { + "timeoutSeconds": 3, + "initialDelaySeconds": 30, + "httpGet": { + "path": "/", + "port": 8080 + } + }, + "resources": { + "limits": { + "memory": "${MEMORY_LIMIT}" + } + }, + "env": [ + ], + "resources": { + "limits": { + "memory": "${MEMORY_LIMIT}" + } + } + } + ] + } + } + } + } + ], + "parameters": [ + { + "name": "NAME", + "displayName": "Name", + "description": "The name assigned to all of the frontend objects defined in this template.", + "required": true, + "value": "nginx-example" + }, + { + "name": "NAMESPACE", + "displayName": "Namespace", + "description": "The OpenShift Namespace where the ImageStream resides.", + "required": true, + "value": "openshift" + }, + { + "name": "NGINX_VERSION", + "displayName": "NGINX Version", + "description": "Version of NGINX image to be used (1.12 by default).", + "required": true, + "value": "1.12" + }, + { + "name": "MEMORY_LIMIT", + "displayName": "Memory Limit", + "description": "Maximum amount of memory the container can use.", + "required": true, + "value": "512Mi" + }, + { + "name": "SOURCE_REPOSITORY_URL", + "displayName": "Git Repository URL", + "description": "The URL of the repository with your application source code.", + "required": true, + "value": "https://github.com/sclorg/nginx-ex.git" + }, + { + "name": "SOURCE_REPOSITORY_REF", + "displayName": "Git Reference", + "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch." + }, + { + "name": "CONTEXT_DIR", + "displayName": "Context Directory", + "description": "Set this to the relative path to your project if it is not in the root of your repository." + }, + { + "name": "APPLICATION_DOMAIN", + "displayName": "Application Hostname", + "description": "The exposed hostname that will route to the nginx service, if left blank a value will be defaulted.", + "value": "" + }, + { + "name": "GITHUB_WEBHOOK_SECRET", + "displayName": "GitHub Webhook Secret", + "description": "Github trigger secret. A difficult to guess string encoded as part of the webhook URL. Not encrypted.", + "generate": "expression", + "from": "[a-zA-Z0-9]{40}" + }, + { + "name": "GENERIC_WEBHOOK_SECRET", + "displayName": "Generic Webhook Secret", + "description": "A secret string used to configure the Generic webhook.", + "generate": "expression", + "from": "[a-zA-Z0-9]{40}" + } + ] +} diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/nodejs-mongodb-persistent.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/nodejs-mongodb-persistent.json index 7f2a5d804..787f51361 100644 --- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/nodejs-mongodb-persistent.json +++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/nodejs-mongodb-persistent.json @@ -4,7 +4,7 @@ "metadata": { "name": "nodejs-mongo-persistent", "annotations": { - "openshift.io/display-name": "Node.js + MongoDB (Persistent)", + "openshift.io/display-name": "Node.js + MongoDB", "description": "An example Node.js application with a MongoDB database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/nodejs-ex/blob/master/README.md.", "tags": "quickstart,nodejs", "iconClass": "icon-nodejs", diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/nodejs-mongodb.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/nodejs-mongodb.json index b3afae46e..0fcc540ab 100644 --- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/nodejs-mongodb.json +++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/nodejs-mongodb.json @@ -17,7 +17,8 @@ }, "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/nodejs-ex/blob/master/README.md.", "labels": { - "template": "nodejs-mongodb-example" + "template": "nodejs-mongodb-example", + "app": "nodejs-mongodb-example" }, "objects": [ { diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/rails-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/rails-postgresql-persistent.json index 1c03be28a..9f40f250b 100644 --- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/rails-postgresql-persistent.json +++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/rails-postgresql-persistent.json @@ -4,7 +4,7 @@ "metadata": { "name": "rails-pgsql-persistent", "annotations": { - "openshift.io/display-name": "Rails + PostgreSQL (Persistent)", + "openshift.io/display-name": "Rails + PostgreSQL", "description": "An example Rails application with a PostgreSQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/rails-ex/blob/master/README.md.", "tags": "quickstart,ruby,rails", "iconClass": "icon-ruby", @@ -17,7 +17,8 @@ }, "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/rails-ex/blob/master/README.md.", "labels": { - "template": "rails-pgsql-persistent" + "template": "rails-pgsql-persistent", + "app": "rails-pgsql-persistent" }, "objects": [ { diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/rails-postgresql.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/rails-postgresql.json index 240289d33..77d218aa5 100644 --- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/rails-postgresql.json +++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/rails-postgresql.json @@ -17,7 +17,8 @@ }, "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/rails-ex/blob/master/README.md.", "labels": { - "template": "rails-postgresql-example" + "template": "rails-postgresql-example", + "app": "rails-postgresql-example" }, "objects": [ { diff --git a/roles/openshift_examples/meta/main.yml b/roles/openshift_examples/meta/main.yml index f3fe2dcbe..9f46a4683 100644 --- a/roles/openshift_examples/meta/main.yml +++ b/roles/openshift_examples/meta/main.yml @@ -11,4 +11,6 @@ galaxy_info: - 7 categories: - cloud -dependencies: [] +dependencies: +- role: lib_utils +- role: openshift_facts diff --git a/roles/openshift_examples/tasks/main.yml b/roles/openshift_examples/tasks/main.yml index 356317431..7787da4f0 100644 --- a/roles/openshift_examples/tasks/main.yml +++ b/roles/openshift_examples/tasks/main.yml @@ -13,18 +13,23 @@ # use it either due to changes introduced in Ansible 2.x. - name: Create local temp dir for OpenShift examples copy local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX - become: False register: copy_examples_mktemp run_once: True +- name: Chmod local temp dir for OpenShift examples copy + local_action: command chmod 777 "{{ copy_examples_mktemp.stdout }}" + run_once: True + - name: Create tar of OpenShift examples local_action: command tar -C "{{ role_path }}/files/examples/{{ content_version }}/" -cvf "{{ copy_examples_mktemp.stdout }}/openshift-examples.tar" . args: # Disables the following warning: # Consider using unarchive module rather than running tar warn: no - become: False - register: copy_examples_tar + +- name: Chmod local temp dir for OpenShift examples copy + local_action: command chmod 744 "{{ copy_examples_mktemp.stdout }}/openshift-examples.tar" + run_once: True - name: Create the remote OpenShift examples directory file: @@ -38,7 +43,6 @@ dest: "{{ examples_base }}/" - name: Cleanup the OpenShift Examples temp dir - become: False local_action: file dest="{{ copy_examples_mktemp.stdout }}" state=absent # Done copying examples @@ -53,7 +57,7 @@ # RHEL and Centos image streams are mutually exclusive - name: Import RHEL streams command: > - {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ item }} + {{ openshift_client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ item }} when: openshift_examples_load_rhel | bool with_items: - "{{ rhel_image_streams }}" @@ -63,7 +67,7 @@ - name: Import Centos Image streams command: > - {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ item }} + {{ openshift_client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ item }} when: openshift_examples_load_centos | bool with_items: - "{{ centos_image_streams }}" @@ -73,7 +77,7 @@ - name: Import db templates command: > - {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ db_templates_base }} + {{ openshift_client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ db_templates_base }} when: openshift_examples_load_db_templates | bool register: oex_import_db_templates failed_when: "'already exists' not in oex_import_db_templates.stderr and oex_import_db_templates.rc != 0" @@ -90,7 +94,7 @@ - "{{ quickstarts_base }}/django.json" - name: Remove defunct quickstart templates from openshift namespace - command: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift delete templates/{{ item }}" + command: "{{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift delete templates/{{ item }}" with_items: - nodejs-example - cakephp-example @@ -102,7 +106,7 @@ - name: Import quickstart-templates command: > - {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ quickstarts_base }} + {{ openshift_client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ quickstarts_base }} when: openshift_examples_load_quickstarts | bool register: oex_import_quickstarts failed_when: "'already exists' not in oex_import_quickstarts.stderr and oex_import_quickstarts.rc != 0" @@ -116,7 +120,7 @@ - "{{ xpaas_templates_base }}/sso70-basic.json" - name: Remove old xPaas templates from openshift namespace - command: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift delete templates/{{ item }}" + command: "{{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift delete templates/{{ item }}" with_items: - sso70-basic register: oex_delete_old_xpaas_templates @@ -125,7 +129,7 @@ - name: Import xPaas image streams command: > - {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ xpaas_image_streams }} + {{ openshift_client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ xpaas_image_streams }} when: openshift_examples_load_xpaas | bool register: oex_import_xpaas_streams failed_when: "'already exists' not in oex_import_xpaas_streams.stderr and oex_import_xpaas_streams.rc != 0" @@ -133,7 +137,7 @@ - name: Import xPaas templates command: > - {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ xpaas_templates_base }} + {{ openshift_client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ xpaas_templates_base }} when: openshift_examples_load_xpaas | bool register: oex_import_xpaas_templates failed_when: "'already exists' not in oex_import_xpaas_templates.stderr and oex_import_xpaas_templates.rc != 0" diff --git a/roles/openshift_excluder/tasks/install.yml b/roles/openshift_excluder/tasks/install.yml index 3ac55894f..6532d7fe2 100644 --- a/roles/openshift_excluder/tasks/install.yml +++ b/roles/openshift_excluder/tasks/install.yml @@ -1,20 +1,20 @@ --- - when: - - not openshift.common.is_atomic | bool + - not openshift_is_atomic | bool - r_openshift_excluder_install_ran is not defined block: - name: Install docker excluder - yum package: - name: "{{ r_openshift_excluder_service_type }}-docker-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) + '*' }}" + name: "{{ r_openshift_excluder_service_type }}-docker-excluder{{ openshift_pkg_version | default('') | lib_utils_oo_image_tag_to_rpm_version(include_dash=True) + '*' }}" state: "{{ r_openshift_excluder_docker_package_state }}" when: - r_openshift_excluder_enable_docker_excluder | bool - ansible_pkg_mgr == "yum" register: result - until: result | success + until: result is succeeded # For DNF we do not need the "*" and if we add it, it causes an error because @@ -23,23 +23,23 @@ # https://bugzilla.redhat.com/show_bug.cgi?id=1199432 - name: Install docker excluder - dnf package: - name: "{{ r_openshift_excluder_service_type }}-docker-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}" + name: "{{ r_openshift_excluder_service_type }}-docker-excluder{{ openshift_pkg_version | default('') | lib_utils_oo_image_tag_to_rpm_version(include_dash=True) }}" state: "{{ r_openshift_excluder_docker_package_state }}" when: - r_openshift_excluder_enable_docker_excluder | bool - ansible_pkg_mgr == "dnf" register: result - until: result | success + until: result is succeeded - name: Install openshift excluder - yum package: - name: "{{ r_openshift_excluder_service_type }}-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) + '*' }}" + name: "{{ r_openshift_excluder_service_type }}-excluder{{ openshift_pkg_version | default('') | lib_utils_oo_image_tag_to_rpm_version(include_dash=True) + '*' }}" state: "{{ r_openshift_excluder_package_state }}" when: - r_openshift_excluder_enable_openshift_excluder | bool - ansible_pkg_mgr == "yum" register: result - until: result | success + until: result is succeeded # For DNF we do not need the "*" and if we add it, it causes an error because # it's not a valid pkg_spec @@ -47,13 +47,13 @@ # https://bugzilla.redhat.com/show_bug.cgi?id=1199432 - name: Install openshift excluder - dnf package: - name: "{{ r_openshift_excluder_service_type }}-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}" + name: "{{ r_openshift_excluder_service_type }}-excluder{{ openshift_pkg_version | default('') | lib_utils_oo_image_tag_to_rpm_version(include_dash=True) }}" state: "{{ r_openshift_excluder_package_state }}" when: - r_openshift_excluder_enable_openshift_excluder | bool - ansible_pkg_mgr == "dnf" register: result - until: result | success + until: result is succeeded - set_fact: r_openshift_excluder_install_ran: True diff --git a/roles/openshift_excluder/tasks/verify_excluder.yml b/roles/openshift_excluder/tasks/verify_excluder.yml index c35639c1b..22a3fcd3b 100644 --- a/roles/openshift_excluder/tasks/verify_excluder.yml +++ b/roles/openshift_excluder/tasks/verify_excluder.yml @@ -3,7 +3,7 @@ # - excluder - name: Get available excluder version repoquery: - name: "{{ excluder }}" + name: "{{ excluder }}{{ '-' ~ r_openshift_excluder_upgrade_target.split('.')[0:2] | join('.') ~ '*' if r_openshift_excluder_upgrade_target is defined else '' }}" ignore_excluders: true register: repoquery_out @@ -29,4 +29,4 @@ msg: "Available {{ excluder }} version {{ excluder_version }} is higher than the upgrade target version" when: - excluder_version != '' - - excluder_version.split('.')[0:2] | join('.') | version_compare(r_openshift_excluder_upgrade_target.split('.')[0:2] | join('.'), '>', strict=True) + - excluder_version.split('.')[0:2] | join('.') is version_compare(r_openshift_excluder_upgrade_target.split('.')[0:2] | join('.'), '>', strict=True) diff --git a/roles/openshift_expand_partition/README.md b/roles/openshift_expand_partition/README.md index c9c7b378c..402c3dc3e 100644 --- a/roles/openshift_expand_partition/README.md +++ b/roles/openshift_expand_partition/README.md @@ -45,7 +45,6 @@ space on /dev/xvda, and the file system will be expanded to fill the new partition space. - hosts: mynodes - become: no remote_user: root gather_facts: no roles: @@ -68,7 +67,6 @@ partition space. * Create an ansible playbook, say `expandvar.yaml`: ``` - hosts: mynodes - become: no remote_user: root gather_facts: no roles: diff --git a/roles/openshift_expand_partition/tasks/main.yml b/roles/openshift_expand_partition/tasks/main.yml index b7acb0c5a..b38ebdfb4 100644 --- a/roles/openshift_expand_partition/tasks/main.yml +++ b/roles/openshift_expand_partition/tasks/main.yml @@ -1,16 +1,16 @@ --- - name: Ensure growpart is installed package: name=cloud-utils-growpart state=present - when: not openshift.common.is_containerized | bool + when: not openshift_is_containerized | bool register: result - until: result | success + until: result is succeeded - name: Determine if growpart is installed command: "rpm -q cloud-utils-growpart" register: has_growpart - failed_when: has_growpart.cr != 0 and 'package cloud-utils-growpart is not installed' not in has_growpart.stdout + failed_when: has_growpart.rc != 0 and 'package cloud-utils-growpart is not installed' not in has_growpart.stdout changed_when: false - when: openshift.common.is_containerized | bool + when: openshift_is_containerized | bool - name: Grow the partitions command: "growpart {{oep_drive}} {{oep_partition}}" diff --git a/roles/openshift_facts/defaults/main.yml b/roles/openshift_facts/defaults/main.yml index 53a3bc87e..a223ffba6 100644 --- a/roles/openshift_facts/defaults/main.yml +++ b/roles/openshift_facts/defaults/main.yml @@ -1,8 +1,13 @@ --- +openshift_client_binary: "{{ (openshift_is_containerized | bool) | ternary('/usr/local/bin/oc', 'oc') }}" + openshift_cli_image_dict: origin: 'openshift/origin' openshift-enterprise: 'openshift3/ose' +repoquery_cmd: "{{ (ansible_pkg_mgr == 'dnf') | ternary('dnf repoquery --latest-limit 1 -d 0', 'repoquery --plugins') }}" +repoquery_installed: "{{ (ansible_pkg_mgr == 'dnf') | ternary('dnf repoquery --latest-limit 1 -d 0 --disableexcludes=all --installed', 'repoquery --plugins --installed') }}" + openshift_hosted_images_dict: origin: 'openshift/origin-${component}:${version}' openshift-enterprise: 'openshift3/ose-${component}:${version}' @@ -94,11 +99,6 @@ openshift_prometheus_alertbuffer_storage_access_modes: openshift_prometheus_alertbuffer_storage_create_pv: True openshift_prometheus_alertbuffer_storage_create_pvc: False - -openshift_router_selector: "region=infra" -openshift_hosted_router_selector: "{{ openshift_router_selector }}" -openshift_hosted_registry_selector: "{{ openshift_router_selector }}" - openshift_service_type_dict: origin: origin openshift-enterprise: atomic-openshift diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 520c00340..452cc4ef6 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -15,8 +15,10 @@ import os import yaml import struct import socket +import ipaddress from distutils.util import strtobool from distutils.version import LooseVersion +from ansible.module_utils.six import u from ansible.module_utils.six import string_types from ansible.module_utils.six.moves import configparser @@ -69,22 +71,6 @@ def migrate_common_facts(facts): return facts -def migrate_node_facts(facts): - """ Migrate facts from various roles into node """ - params = { - 'common': ('dns_ip'), - } - if 'node' not in facts: - facts['node'] = {} - # pylint: disable=consider-iterating-dictionary - for role in params.keys(): - if role in facts: - for param in params[role]: - if param in facts[role]: - facts['node'][param] = facts[role].pop(param) - return facts - - def migrate_admission_plugin_facts(facts): """ Apply migrations for admission plugin facts """ if 'master' in facts: @@ -104,7 +90,6 @@ def migrate_local_facts(facts): """ Apply migrations of local facts """ migrated_facts = copy.deepcopy(facts) migrated_facts = migrate_common_facts(migrated_facts) - migrated_facts = migrate_node_facts(migrated_facts) migrated_facts = migrate_admission_plugin_facts(migrated_facts) return migrated_facts @@ -458,7 +443,6 @@ def set_url_facts_if_unset(facts): etcd_urls = [] if etcd_hosts != '': facts['master']['etcd_port'] = ports['etcd'] - facts['master']['embedded_etcd'] = False for host in etcd_hosts: etcd_urls.append(format_url(use_ssl['etcd'], host, ports['etcd'])) @@ -537,8 +521,7 @@ def set_aggregate_facts(facts): def set_deployment_facts_if_unset(facts): """ Set Facts that vary based on deployment_type. This currently - includes master.registry_url, node.registry_url, - node.storage_plugin_deps + includes master.registry_url Args: facts (dict): existing facts @@ -546,29 +529,17 @@ def set_deployment_facts_if_unset(facts): dict: the facts dict updated with the generated deployment_type facts """ - # disabled to avoid breaking up facts related to deployment type into - # multiple methods for now. - # pylint: disable=too-many-statements, too-many-branches - for role in ('master', 'node'): - if role in facts: - deployment_type = facts['common']['deployment_type'] - if 'registry_url' not in facts[role]: - registry_url = 'openshift/origin-${component}:${version}' - if deployment_type == 'openshift-enterprise': - registry_url = 'openshift3/ose-${component}:${version}' - facts[role]['registry_url'] = registry_url - if 'master' in facts: deployment_type = facts['common']['deployment_type'] openshift_features = ['Builder', 'S2IBuilder', 'WebConsole'] if 'disabled_features' not in facts['master']: if facts['common']['deployment_subtype'] == 'registry': facts['master']['disabled_features'] = openshift_features - - if 'node' in facts: - deployment_type = facts['common']['deployment_type'] - if 'storage_plugin_deps' not in facts['node']: - facts['node']['storage_plugin_deps'] = ['ceph', 'glusterfs', 'iscsi'] + if 'registry_url' not in facts['master']: + registry_url = 'openshift/origin-${component}:${version}' + if deployment_type == 'openshift-enterprise': + registry_url = 'openshift3/ose-${component}:${version}' + facts['master']['registry_url'] = registry_url return facts @@ -687,26 +658,6 @@ def set_nodename(facts): return facts -def migrate_oauth_template_facts(facts): - """ - Migrate an old oauth template fact to a newer format if it's present. - - The legacy 'oauth_template' fact was just a filename, and assumed you were - setting the 'login' template. - - The new pluralized 'oauth_templates' fact is a dict mapping the template - name to a filename. - - Simplify the code after this by merging the old fact into the new. - """ - if 'master' in facts and 'oauth_template' in facts['master']: - if 'oauth_templates' not in facts['master']: - facts['master']['oauth_templates'] = {"login": facts['master']['oauth_template']} - elif 'login' not in facts['master']['oauth_templates']: - facts['master']['oauth_templates']['login'] = facts['master']['oauth_template'] - return facts - - def format_url(use_ssl, hostname, port, path=''): """ Format url based on ssl flag, hostname, port and path @@ -793,62 +744,6 @@ def get_current_config(facts): return current_config -def build_kubelet_args(facts): - """Build node kubelet_args - -In the node-config.yaml file, kubeletArgument sub-keys have their -values provided as a list. Hence the gratuitous use of ['foo'] below. - """ - cloud_cfg_path = os.path.join( - facts['common']['config_base'], - 'cloudprovider') - - # We only have to do this stuff on hosts that are nodes - if 'node' in facts: - # Any changes to the kubeletArguments parameter are stored - # here first. - kubelet_args = {} - - if 'cloudprovider' in facts: - # EVERY cloud is special <3 - if 'kind' in facts['cloudprovider']: - if facts['cloudprovider']['kind'] == 'aws': - kubelet_args['cloud-provider'] = ['aws'] - kubelet_args['cloud-config'] = [cloud_cfg_path + '/aws.conf'] - if facts['cloudprovider']['kind'] == 'openstack': - kubelet_args['cloud-provider'] = ['openstack'] - kubelet_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf'] - if facts['cloudprovider']['kind'] == 'gce': - kubelet_args['cloud-provider'] = ['gce'] - kubelet_args['cloud-config'] = [cloud_cfg_path + '/gce.conf'] - - # Automatically add node-labels to the kubeletArguments - # parameter. See BZ1359848 for additional details. - # - # Ref: https://bugzilla.redhat.com/show_bug.cgi?id=1359848 - if 'labels' in facts['node'] and isinstance(facts['node']['labels'], dict): - # tl;dr: os_node_labels="{'foo': 'bar', 'a': 'b'}" turns - # into ['foo=bar', 'a=b'] - # - # On the openshift_node_labels inventory variable we loop - # over each key-value tuple (from .items()) and join the - # key to the value with an '=' character, this produces a - # list. - # - # map() seems to be returning an itertools.imap object - # instead of a list. We cast it to a list ourselves. - # pylint: disable=unnecessary-lambda - labels_str = list(map(lambda x: '='.join(x), facts['node']['labels'].items())) - if labels_str != '': - kubelet_args['node-labels'] = labels_str - - # If we've added items to the kubelet_args dict then we need - # to merge the new items back into the main facts object. - if kubelet_args != {}: - facts = merge_facts({'node': {'kubelet_args': kubelet_args}}, facts, []) - return facts - - def build_controller_args(facts): """ Build master controller_args """ cloud_cfg_path = os.path.join(facts['common']['config_base'], @@ -974,7 +869,7 @@ def get_openshift_version(facts): if os.path.isfile('/usr/bin/openshift'): _, output, _ = module.run_command(['/usr/bin/openshift', 'version']) # noqa: F405 version = parse_openshift_version(output) - elif 'common' in facts and 'is_containerized' in facts['common']: + else: version = get_container_openshift_version(facts) # Handle containerized masters that have not yet been configured as a node. @@ -1253,6 +1148,8 @@ def set_proxy_facts(facts): if 'no_proxy_internal_hostnames' in common: common['no_proxy'].extend(common['no_proxy_internal_hostnames'].split(',')) # We always add local dns domain and ourselves no matter what + kube_svc_ip = str(ipaddress.ip_network(u(common['portal_net']))[1]) + common['no_proxy'].append(kube_svc_ip) common['no_proxy'].append('.' + common['dns_domain']) common['no_proxy'].append('.svc') common['no_proxy'].append(common['hostname']) @@ -1365,72 +1262,7 @@ def set_container_facts_if_unset(facts): dict: the facts dict updated with the generated containerization facts """ - deployment_type = facts['common']['deployment_type'] - if deployment_type == 'openshift-enterprise': - master_image = 'openshift3/ose' - node_image = 'openshift3/node' - ovs_image = 'openshift3/openvswitch' - pod_image = 'openshift3/ose-pod' - router_image = 'openshift3/ose-haproxy-router' - registry_image = 'openshift3/ose-docker-registry' - deployer_image = 'openshift3/ose-deployer' - else: - master_image = 'openshift/origin' - node_image = 'openshift/node' - ovs_image = 'openshift/openvswitch' - pod_image = 'openshift/origin-pod' - router_image = 'openshift/origin-haproxy-router' - registry_image = 'openshift/origin-docker-registry' - deployer_image = 'openshift/origin-deployer' - - facts['common']['is_atomic'] = os.path.isfile('/run/ostree-booted') - - if 'is_containerized' not in facts['common']: - facts['common']['is_containerized'] = facts['common']['is_atomic'] - if 'pod_image' not in facts['common']: - facts['common']['pod_image'] = pod_image - if 'router_image' not in facts['common']: - facts['common']['router_image'] = router_image - if 'registry_image' not in facts['common']: - facts['common']['registry_image'] = registry_image - if 'deployer_image' not in facts['common']: - facts['common']['deployer_image'] = deployer_image - if 'master' in facts and 'master_image' not in facts['master']: - facts['master']['master_image'] = master_image - facts['master']['master_system_image'] = master_image - if 'node' in facts: - if 'node_image' not in facts['node']: - facts['node']['node_image'] = node_image - facts['node']['node_system_image'] = node_image - if 'ovs_image' not in facts['node']: - facts['node']['ovs_image'] = ovs_image - facts['node']['ovs_system_image'] = ovs_image - - if safe_get_bool(facts['common']['is_containerized']): - facts['common']['client_binary'] = '/usr/local/bin/oc' - - return facts - -def set_installed_variant_rpm_facts(facts): - """ Set RPM facts of installed variant - Args: - facts (dict): existing facts - Returns: - dict: the facts dict updated with installed_variant_rpms - """ - installed_rpms = [] - for base_rpm in ['openshift', 'atomic-openshift', 'origin']: - optional_rpms = ['master', 'node', 'clients', 'sdn-ovs'] - variant_rpms = [base_rpm] + \ - ['{0}-{1}'.format(base_rpm, r) for r in optional_rpms] + \ - ['tuned-profiles-%s-node' % base_rpm] - for rpm in variant_rpms: - exit_code, _, _ = module.run_command(['rpm', '-q', rpm]) # noqa: F405 - if exit_code == 0: - installed_rpms.append(rpm) - - facts['common']['installed_variant_rpms'] = installed_rpms return facts @@ -1539,14 +1371,12 @@ class OpenShiftFacts(object): facts = merge_facts(facts, local_facts, additive_facts_to_overwrite) - facts = migrate_oauth_template_facts(facts) facts['current_config'] = get_current_config(facts) facts = set_url_facts_if_unset(facts) facts = set_identity_providers_if_unset(facts) facts = set_deployment_facts_if_unset(facts) facts = set_sdn_facts_if_unset(facts, self.system_facts) facts = set_container_facts_if_unset(facts) - facts = build_kubelet_args(facts) facts = build_controller_args(facts) facts = build_api_server_args(facts) facts = set_version_facts_if_unset(facts) @@ -1554,8 +1384,6 @@ class OpenShiftFacts(object): facts = set_proxy_facts(facts) facts = set_builddefaults_facts(facts) facts = set_buildoverrides_facts(facts) - if not safe_get_bool(facts['common']['is_containerized']): - facts = set_installed_variant_rpm_facts(facts) facts = set_nodename(facts) return dict(openshift=facts) @@ -1583,7 +1411,6 @@ class OpenShiftFacts(object): hostname=hostname, public_hostname=hostname, portal_net='172.30.0.0/16', - client_binary='oc', dns_domain='cluster.local', config_base='/etc/origin') @@ -1595,7 +1422,7 @@ class OpenShiftFacts(object): console_port='8443', etcd_use_ssl=True, etcd_hosts='', etcd_port='4001', portal_net='172.30.0.0/16', - embedded_etcd=True, embedded_kube=True, + embedded_kube=True, embedded_dns=True, bind_addr='0.0.0.0', session_max_seconds=3600, @@ -1607,12 +1434,6 @@ class OpenShiftFacts(object): dynamic_provisioning_enabled=True, max_requests_inflight=500) - if 'node' in roles: - defaults['node'] = dict(labels={}, annotations={}, - iptables_sync_period='30s', - local_quota_per_fsgroup="", - set_node_ip=False) - if 'cloudprovider' in roles: defaults['cloudprovider'] = dict(kind=None) @@ -1645,6 +1466,11 @@ class OpenShiftFacts(object): if metadata: metadata['project']['attributes'].pop('sshKeys', None) metadata['instance'].pop('serviceAccounts', None) + elif bios_vendor == 'Amazon EC2': + # Adds support for Amazon EC2 C5 instance types + provider = 'aws' + metadata_url = 'http://169.254.169.254/latest/meta-data/' + metadata = get_provider_metadata(metadata_url) elif virt_type == 'xen' and virt_role == 'guest' and re.match(r'.*\.amazon$', product_version): provider = 'aws' metadata_url = 'http://169.254.169.254/latest/meta-data/' diff --git a/roles/openshift_gcp/files/bootstrap-script.sh b/roles/openshift_gcp/files/bootstrap-script.sh new file mode 100644 index 000000000..0c3f1999b --- /dev/null +++ b/roles/openshift_gcp/files/bootstrap-script.sh @@ -0,0 +1,42 @@ +#!/bin/bash +# +# This script is a startup script for bootstrapping a GCP node +# from a config stored in the project metadata. It loops until +# it finds the script and then starts the origin-node service. +# TODO: generalize + +set -o errexit +set -o nounset +set -o pipefail + +if [[ "$( curl "http://metadata.google.internal/computeMetadata/v1/instance/attributes/bootstrap" -H "Metadata-Flavor: Google" )" != "true" ]]; then + echo "info: Bootstrap is not enabled for this instance, skipping" 1>&2 + exit 0 +fi + +if ! id=$( curl "http://metadata.google.internal/computeMetadata/v1/instance/attributes/cluster-id" -H "Metadata-Flavor: Google" ); then + echo "error: Unable to get cluster-id for instance from cluster metadata" 1>&2 + exit 1 +fi + +if ! node_group=$( curl "http://metadata.google.internal/computeMetadata/v1/instance/attributes/node-group" -H "Metadata-Flavor: Google" ); then + echo "error: Unable to get node-group for instance from cluster metadata" 1>&2 + exit 1 +fi + +if ! config=$( curl -f "http://metadata.google.internal/computeMetadata/v1/instance/attributes/bootstrap-config" -H "Metadata-Flavor: Google" 2>/dev/null ); then + while true; do + if config=$( curl -f "http://metadata.google.internal/computeMetadata/v1/project/attributes/${id}-bootstrap-config" -H "Metadata-Flavor: Google" 2>/dev/null ); then + break + fi + echo "info: waiting for ${id}-bootstrap-config to become available in cluster metadata ..." 1>&2 + sleep 5 + done +fi + +echo "Got bootstrap config from metadata" +mkdir -p /etc/origin/node +echo -n "${config}" > /etc/origin/node/bootstrap.kubeconfig +echo "BOOTSTRAP_CONFIG_NAME=node-config-${node_group}" >> /etc/sysconfig/origin-node +systemctl enable origin-node +systemctl start origin-node diff --git a/roles/openshift_gcp/files/openshift-bootstrap-update.service b/roles/openshift_gcp/files/openshift-bootstrap-update.service new file mode 100644 index 000000000..c65b1b34e --- /dev/null +++ b/roles/openshift_gcp/files/openshift-bootstrap-update.service @@ -0,0 +1,7 @@ +[Unit] +Description=Update the OpenShift node bootstrap configuration + +[Service] +Type=oneshot +ExecStart=/usr/bin/openshift-bootstrap-update +User=root diff --git a/roles/openshift_gcp/files/openshift-bootstrap-update.timer b/roles/openshift_gcp/files/openshift-bootstrap-update.timer new file mode 100644 index 000000000..1a517b33e --- /dev/null +++ b/roles/openshift_gcp/files/openshift-bootstrap-update.timer @@ -0,0 +1,10 @@ +[Unit] +Description=Update the OpenShift node bootstrap credentials hourly + +[Timer] +OnBootSec=30s +OnCalendar=hourly +Persistent=true + +[Install] +WantedBy=timers.target
\ No newline at end of file diff --git a/roles/openshift_gcp_image_prep/files/partition.conf b/roles/openshift_gcp/files/partition.conf index b87e5e0b6..76e65ab9c 100644 --- a/roles/openshift_gcp_image_prep/files/partition.conf +++ b/roles/openshift_gcp/files/partition.conf @@ -1,3 +1,3 @@ [Service] ExecStartPost=-/usr/bin/growpart /dev/sda 1 -ExecStartPost=-/sbin/xfs_growfs / +ExecStartPost=-/sbin/xfs_growfs /
\ No newline at end of file diff --git a/roles/openshift_hosted_facts/meta/main.yml b/roles/openshift_gcp/meta/main.yml index dd2de07bc..5e428f8de 100644 --- a/roles/openshift_hosted_facts/meta/main.yml +++ b/roles/openshift_gcp/meta/main.yml @@ -1,15 +1,17 @@ --- galaxy_info: - author: Andrew Butcher - description: OpenShift Hosted Facts + author: Clayton Coleman + description: company: Red Hat, Inc. license: Apache License, Version 2.0 - min_ansible_version: 1.9 + min_ansible_version: 1.8 platforms: - name: EL versions: - 7 categories: - cloud + - system dependencies: -- role: openshift_facts +- role: lib_utils +- role: lib_openshift diff --git a/roles/openshift_gcp/tasks/add_custom_repositories.yml b/roles/openshift_gcp/tasks/add_custom_repositories.yml new file mode 100644 index 000000000..04718f78e --- /dev/null +++ b/roles/openshift_gcp/tasks/add_custom_repositories.yml @@ -0,0 +1,20 @@ +--- +- name: Copy custom repository secrets + copy: + src: "{{ files_dir }}/{{ item.1.sslclientcert }}" + dest: /var/lib/yum/custom_secret_{{ item.0 }}_cert + when: item.1.sslclientcert | default(false) + with_indexed_items: "{{ provision_custom_repositories }}" +- name: Copy custom repository secrets + copy: + src: "{{ files_dir }}/{{ item.1.sslclientkey }}" + dest: /var/lib/yum/custom_secret_{{ item.0 }}_key + when: item.1.sslclientkey | default(false) + with_indexed_items: "{{ provision_custom_repositories }}" + +- name: Create any custom repos that are defined + template: + src: yum_repo.j2 + dest: /etc/yum.repos.d/provision_custom_repositories.repo + when: provision_custom_repositories | length > 0 + notify: refresh cache diff --git a/roles/openshift_gcp_image_prep/tasks/main.yaml b/roles/openshift_gcp/tasks/configure_gcp_base_image.yml index fee5ab618..2c6e2790a 100644 --- a/roles/openshift_gcp_image_prep/tasks/main.yaml +++ b/roles/openshift_gcp/tasks/configure_gcp_base_image.yml @@ -1,18 +1,10 @@ ---- # GCE instances are starting with xfs AND barrier=1, which is only for extfs. +--- - name: Remove barrier=1 from XFS fstab entries - lineinfile: - path: /etc/fstab - regexp: '^(.+)xfs(.+?),?barrier=1,?(.*?)$' - line: '\1xfs\2 \4' - backrefs: yes + command: sed -i -e 's/xfs\(.*\)barrier=1/xfs\1/g; s/, / /g' /etc/fstab - name: Ensure the root filesystem has XFS group quota turned on - lineinfile: - path: /boot/grub2/grub.cfg - regexp: '^(.*)linux16 (.*)$' - line: '\1linux16 \2 rootflags=gquota' - backrefs: yes + command: sed -i -e 's/linux16 \(.*\)$/linux16 \1 rootflags=gquota/g' /boot/grub2/grub.cfg - name: Ensure the root partition grows on startup copy: src=partition.conf dest=/etc/systemd/system/google-instance-setup.service.d/ diff --git a/roles/openshift_gcp/tasks/configure_master_bootstrap.yml b/roles/openshift_gcp/tasks/configure_master_bootstrap.yml new file mode 100644 index 000000000..591cb593c --- /dev/null +++ b/roles/openshift_gcp/tasks/configure_master_bootstrap.yml @@ -0,0 +1,36 @@ +# +# These tasks configure the instance to periodically update the project metadata with the +# latest bootstrap kubeconfig from the project metadata. This keeps the project metadata +# in sync with the cluster's configuration. We then invoke a CSR approve on any nodes that +# are waiting to join the cluster. +# +--- +- name: Copy unit service + copy: + src: openshift-bootstrap-update.timer + dest: /etc/systemd/system/openshift-bootstrap-update.timer + owner: root + group: root + mode: 0664 + +- name: Copy unit timer + copy: + src: openshift-bootstrap-update.service + dest: /etc/systemd/system/openshift-bootstrap-update.service + owner: root + group: root + mode: 0664 + +- name: Create bootstrap update script + template: src=openshift-bootstrap-update.j2 dest=/usr/bin/openshift-bootstrap-update mode=u+rx + +- name: Start bootstrap update timer + systemd: + name: "openshift-bootstrap-update.timer" + state: started + +- name: Bootstrap all nodes that were identified with bootstrap metadata + run_once: true + oc_adm_csr: + nodes: "{{ groups['all'] | map('extract', hostvars) | selectattr('gce_metadata.bootstrap', 'match', 'true') | map(attribute='gce_name') | list }}" + timeout: 60 diff --git a/roles/openshift_gcp/tasks/configure_master_healthcheck.yml b/roles/openshift_gcp/tasks/configure_master_healthcheck.yml new file mode 100644 index 000000000..aa9655977 --- /dev/null +++ b/roles/openshift_gcp/tasks/configure_master_healthcheck.yml @@ -0,0 +1,19 @@ +--- +- name: refresh yum cache + command: yum clean all + args: + warn: no + when: ansible_os_family == "RedHat" + +- name: install haproxy + package: name=haproxy state=present + register: result + until: '"failed" not in result' + retries: 10 + delay: 10 + +- name: configure haproxy + template: src=master_healthcheck.j2 dest=/etc/haproxy/haproxy.cfg + +- name: start and enable haproxy service + service: name=haproxy state=started enabled=yes diff --git a/roles/openshift_gcp/tasks/dynamic_inventory.yml b/roles/openshift_gcp/tasks/dynamic_inventory.yml new file mode 100644 index 000000000..1637da945 --- /dev/null +++ b/roles/openshift_gcp/tasks/dynamic_inventory.yml @@ -0,0 +1,5 @@ +--- +- name: Extract PEM from service account file + copy: content="{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).private_key }}" dest=/tmp/gce.pem mode=0600 +- name: Templatize environment script + template: src=inventory.j2.sh dest=/tmp/inventory.sh mode=u+rx diff --git a/roles/openshift_gcp/tasks/frequent_log_rotation.yml b/roles/openshift_gcp/tasks/frequent_log_rotation.yml new file mode 100644 index 000000000..0b4b27f84 --- /dev/null +++ b/roles/openshift_gcp/tasks/frequent_log_rotation.yml @@ -0,0 +1,18 @@ +--- +- name: Rotate logs daily + replace: + dest: /etc/logrotate.conf + regexp: '^weekly|monthly|yearly$' + replace: daily +- name: Rotate at a smaller size of log + lineinfile: + dest: /etc/logrotate.conf + state: present + regexp: '^size' + line: size 10M +- name: Limit total size of log files + lineinfile: + dest: /etc/logrotate.conf + state: present + regexp: '^maxsize' + line: maxsize 20M diff --git a/roles/openshift_gcp/tasks/main.yaml b/roles/openshift_gcp/tasks/main.yml index ad205ba33..fb147bc78 100644 --- a/roles/openshift_gcp/tasks/main.yaml +++ b/roles/openshift_gcp/tasks/main.yml @@ -17,7 +17,7 @@ - name: Provision GCP DNS domain command: /tmp/openshift_gcp_provision_dns.sh args: - chdir: "{{ playbook_dir }}/files" + chdir: "{{ files_dir }}" register: dns_provision when: - state | default('present') == 'present' @@ -33,7 +33,7 @@ - name: Provision GCP resources command: /tmp/openshift_gcp_provision.sh args: - chdir: "{{ playbook_dir }}/files" + chdir: "{{ files_dir }}" when: - state | default('present') == 'present' diff --git a/roles/openshift_gcp/tasks/node_cloud_config.yml b/roles/openshift_gcp/tasks/node_cloud_config.yml new file mode 100644 index 000000000..4e982f497 --- /dev/null +++ b/roles/openshift_gcp/tasks/node_cloud_config.yml @@ -0,0 +1,12 @@ +--- +- name: ensure the /etc/origin folder exists + file: name=/etc/origin state=directory + +- name: configure gce cloud config options + ini_file: dest=/etc/origin/cloudprovider/gce.conf section=Global option={{ item.key }} value={{ item.value }} state=present create=yes + with_items: + - { key: 'project-id', value: '{{ openshift_gcp_project }}' } + - { key: 'network-name', value: '{{ openshift_gcp_network_name }}' } + - { key: 'node-tags', value: '{{ openshift_gcp_prefix }}ocp' } + - { key: 'node-instance-prefix', value: '{{ openshift_gcp_prefix }}' } + - { key: 'multizone', value: 'false' } diff --git a/roles/openshift_gcp/tasks/publish_image.yml b/roles/openshift_gcp/tasks/publish_image.yml new file mode 100644 index 000000000..db8a7ca69 --- /dev/null +++ b/roles/openshift_gcp/tasks/publish_image.yml @@ -0,0 +1,32 @@ +--- +- name: Require openshift_gcp_image + fail: + msg: "A source image name or family is required for image publishing. Please ensure `openshift_gcp_image` is defined." + when: openshift_gcp_image is undefined + +- name: Require openshift_gcp_target_image + fail: + msg: "A target image name or family is required for image publishing. Please ensure `openshift_gcp_target_image` is defined." + when: openshift_gcp_target_image is undefined + +- block: + - name: Retrieve images in the {{ openshift_gcp_target_image }} family + command: > + gcloud --project "{{ openshift_gcp_project }}" compute images list + "--filter=family={{ openshift_gcp_target_image }}" + --format=json --sort-by ~creationTimestamp + register: images + - name: Prune oldest images + command: > + gcloud --project "{{ openshift_gcp_project }}" compute images delete "{{ item['name'] }}" + with_items: "{{ (images.stdout | default('[]') | from_json )[( openshift_gcp_keep_images | int ):] }}" + when: openshift_gcp_keep_images is defined + +- name: Copy the latest image in the family {{ openshift_gcp_image }} to {{ openshift_gcp_target_image }} + command: > + gcloud --project "{{ openshift_gcp_target_project | default(openshift_gcp_project) }}" + beta compute images create + "{{ openshift_gcp_target_image_name | default(openshift_gcp_target_image + '-' + lookup('pipe','date +%Y%m%d-%H%M%S')) }}" + --family "{{ openshift_gcp_target_image }}" + --source-image-family "{{ openshift_gcp_image }}" + --source-image-project "{{ openshift_gcp_project }}" diff --git a/roles/openshift_gcp/tasks/setup_scale_group_facts.yml b/roles/openshift_gcp/tasks/setup_scale_group_facts.yml new file mode 100644 index 000000000..0fda43123 --- /dev/null +++ b/roles/openshift_gcp/tasks/setup_scale_group_facts.yml @@ -0,0 +1,44 @@ +--- +- name: Add masters to requisite groups + add_host: + name: "{{ hostvars[item].gce_name }}" + groups: masters, etcd + with_items: "{{ groups['tag_ocp-master'] }}" + +- name: Add a master to the primary masters group + add_host: + name: "{{ hostvars[item].gce_name }}" + groups: primary_master + with_items: "{{ groups['tag_ocp-master'].0 }}" + +- name: Add non-bootstrapping master node instances to node group + add_host: + name: "{{ hostvars[item].gce_name }}" + groups: nodes + openshift_node_labels: + role: infra + with_items: "{{ groups['tag_ocp-master'] | default([]) | difference(groups['tag_ocp-bootstrap'] | default([])) }}" + +- name: Add infra node instances to node group + add_host: + name: "{{ hostvars[item].gce_name }}" + groups: nodes + openshift_node_labels: + role: infra + with_items: "{{ groups['tag_ocp-infra-node'] | default([]) | difference(groups['tag_ocp-bootstrap'] | default([])) }}" + +- name: Add node instances to node group + add_host: + name: "{{ hostvars[item].gce_name }}" + groups: nodes + openshift_node_labels: + role: app + with_items: "{{ groups['tag_ocp-node'] | default([]) | difference(groups['tag_ocp-bootstrap'] | default([])) }}" + +- name: Add bootstrap node instances + add_host: + name: "{{ hostvars[item].gce_name }}" + groups: bootstrap_nodes + openshift_node_bootstrap: True + with_items: "{{ groups['tag_ocp-node'] | default([]) | intersect(groups['tag_ocp-bootstrap'] | default([])) }}" + when: not (openshift_node_bootstrap | default(False)) diff --git a/roles/openshift_gcp/templates/inventory.j2.sh b/roles/openshift_gcp/templates/inventory.j2.sh new file mode 100644 index 000000000..dcaffb578 --- /dev/null +++ b/roles/openshift_gcp/templates/inventory.j2.sh @@ -0,0 +1,8 @@ +#!/bin/sh + +export GCE_PROJECT="{{ openshift_gcp_project }}" +export GCE_ZONE="{{ openshift_gcp_zone }}" +export GCE_EMAIL="{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}" +export GCE_PEM_FILE_PATH="/tmp/gce.pem" +export INVENTORY_IP_TYPE="{{ inventory_ip_type }}" +export GCE_TAGGED_INSTANCES="{{ openshift_gcp_prefix }}ocp"
\ No newline at end of file diff --git a/roles/openshift_gcp/templates/master_healthcheck.j2 b/roles/openshift_gcp/templates/master_healthcheck.j2 new file mode 100644 index 000000000..189e578c5 --- /dev/null +++ b/roles/openshift_gcp/templates/master_healthcheck.j2 @@ -0,0 +1,68 @@ +#--------------------------------------------------------------------- +# Example configuration for a possible web application. See the +# full configuration options online. +# +# http://haproxy.1wt.eu/download/1.4/doc/configuration.txt +# +#--------------------------------------------------------------------- + +#--------------------------------------------------------------------- +# Global settings +#--------------------------------------------------------------------- +global + # to have these messages end up in /var/log/haproxy.log you will + # need to: + # + # 1) configure syslog to accept network log events. This is done + # by adding the '-r' option to the SYSLOGD_OPTIONS in + # /etc/sysconfig/syslog + # + # 2) configure local2 events to go to the /var/log/haproxy.log + # file. A line like the following can be added to + # /etc/sysconfig/syslog + # + # local2.* /var/log/haproxy.log + # + log 127.0.0.1 local2 + + chroot /var/lib/haproxy + pidfile /var/run/haproxy.pid + maxconn 4000 + user haproxy + group haproxy + daemon + + # turn on stats unix socket + stats socket /var/lib/haproxy/stats + +#--------------------------------------------------------------------- +# common defaults that all the 'listen' and 'backend' sections will +# use if not designated in their block +#--------------------------------------------------------------------- +defaults + mode http + log global + option httplog + option dontlognull + option http-server-close + option forwardfor except 127.0.0.0/8 + option redispatch + retries 3 + timeout http-request 10s + timeout queue 1m + timeout connect 10s + timeout client 1m + timeout server 1m + timeout http-keep-alive 10s + timeout check 10s + maxconn 3000 + +#--------------------------------------------------------------------- +# main frontend which proxys to the backends +#--------------------------------------------------------------------- +frontend http-proxy *:8080 + acl url_healthz path_beg -i /healthz + use_backend ocp if url_healthz + +backend ocp + server ocp localhost:{{ internal_console_port }} ssl verify none diff --git a/roles/openshift_gcp/templates/openshift-bootstrap-update.j2 b/roles/openshift_gcp/templates/openshift-bootstrap-update.j2 new file mode 100644 index 000000000..5b0563724 --- /dev/null +++ b/roles/openshift_gcp/templates/openshift-bootstrap-update.j2 @@ -0,0 +1,7 @@ +#!/bin/bash + +set -euo pipefail + +oc serviceaccounts create-kubeconfig -n openshift-infra node-bootstrapper > /root/bootstrap.kubeconfig +gcloud compute project-info --project '{{ openshift_gcp_project }}' add-metadata --metadata-from-file '{{ openshift_gcp_prefix + openshift_gcp_clusterid | default("default") }}-bootstrap-config=/root/bootstrap.kubeconfig' +rm -f /root/bootstrap.kubeconfig diff --git a/roles/openshift_gcp/templates/provision.j2.sh b/roles/openshift_gcp/templates/provision.j2.sh index 4d150bc74..794985322 100644 --- a/roles/openshift_gcp/templates/provision.j2.sh +++ b/roles/openshift_gcp/templates/provision.j2.sh @@ -9,15 +9,26 @@ if [[ -n "{{ openshift_gcp_ssh_private_key }}" ]]; then ssh-add "{{ openshift_gcp_ssh_private_key }}" || true fi - # Check if the ~/.ssh/google_compute_engine.pub key is in the project metadata, and if not, add it there - pub_key=$(cut -d ' ' -f 2 < "{{ openshift_gcp_ssh_private_key }}.pub") + # Check if the public key is in the project metadata, and if not, add it there + if [ -f "{{ openshift_gcp_ssh_private_key }}.pub" ]; then + pub_file="{{ openshift_gcp_ssh_private_key }}.pub" + pub_key=$(cut -d ' ' -f 2 < "{{ openshift_gcp_ssh_private_key }}.pub") + else + keyfile="${HOME}/.ssh/google_compute_engine" + pub_file="${keyfile}.pub" + mkdir -p "${HOME}/.ssh" + cp "{{ openshift_gcp_ssh_private_key }}" "${keyfile}" + chmod 0600 "${keyfile}" + ssh-keygen -y -f "${keyfile}" > "${pub_file}" + pub_key=$(cut -d ' ' -f 2 < "${pub_file}") + fi key_tmp_file='/tmp/ocp-gce-keys' if ! gcloud --project "{{ openshift_gcp_project }}" compute project-info describe | grep -q "$pub_key"; then if gcloud --project "{{ openshift_gcp_project }}" compute project-info describe | grep -q ssh-rsa; then gcloud --project "{{ openshift_gcp_project }}" compute project-info describe | grep ssh-rsa | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' -e 's/value: //' > "$key_tmp_file" fi echo -n 'cloud-user:' >> "$key_tmp_file" - cat "{{ openshift_gcp_ssh_private_key }}.pub" >> "$key_tmp_file" + cat "${pub_file}" >> "$key_tmp_file" gcloud --project "{{ openshift_gcp_project }}" compute project-info add-metadata --metadata-from-file "sshKeys=${key_tmp_file}" rm -f "$key_tmp_file" fi diff --git a/roles/openshift_gcp/templates/yum_repo.j2 b/roles/openshift_gcp/templates/yum_repo.j2 new file mode 100644 index 000000000..77919ea75 --- /dev/null +++ b/roles/openshift_gcp/templates/yum_repo.j2 @@ -0,0 +1,20 @@ +{% for repo in provision_custom_repositories %} +[{{ repo.id | default(repo.name) }}] +name={{ repo.name | default(repo.id) }} +baseurl={{ repo.baseurl }} +{% set enable_repo = repo.enabled | default(1) %} +enabled={{ 1 if ( enable_repo == 1 or enable_repo == True ) else 0 }} +{% set enable_gpg_check = repo.gpgcheck | default(1) %} +gpgcheck={{ 1 if ( enable_gpg_check == 1 or enable_gpg_check == True ) else 0 }} +{% if 'sslclientcert' in repo %} +sslclientcert={{ "/var/lib/yum/custom_secret_" + (loop.index-1)|string + "_cert" if repo.sslclientcert }} +{% endif %} +{% if 'sslclientkey' in repo %} +sslclientkey={{ "/var/lib/yum/custom_secret_" + (loop.index-1)|string + "_key" if repo.sslclientkey }} +{% endif %} +{% for key, value in repo.iteritems() %} +{% if key not in ['id', 'name', 'baseurl', 'enabled', 'gpgcheck', 'sslclientkey', 'sslclientcert'] and value is defined %} +{{ key }}={{ value }} +{% endif %} +{% endfor %} +{% endfor %} diff --git a/roles/openshift_grafana/defaults/main.yml b/roles/openshift_grafana/defaults/main.yml new file mode 100644 index 000000000..7fd7a085d --- /dev/null +++ b/roles/openshift_grafana/defaults/main.yml @@ -0,0 +1,12 @@ +--- +gf_body_tmp: + name: grafana_name + type: prometheus + typeLogoUrl: '' + access: proxy + url: prometheus_url + basicAuth: false + withCredentials: false + jsonData: + tlsSkipVerify: true + token: satoken diff --git a/roles/openshift_grafana/files/grafana-ocp-oauth.yml b/roles/openshift_grafana/files/grafana-ocp-oauth.yml new file mode 100644 index 000000000..82fa89004 --- /dev/null +++ b/roles/openshift_grafana/files/grafana-ocp-oauth.yml @@ -0,0 +1,661 @@ +--- +kind: Template +apiVersion: v1 +metadata: + name: grafana-ocp + annotations: + "openshift.io/display-name": Grafana ocp + description: | + Grafana server with patched Prometheus datasource. + iconClass: icon-cogs + tags: "metrics,monitoring,grafana,prometheus" +parameters: +- description: The location of the proxy image + name: IMAGE_GF + value: mrsiano/grafana-ocp:latest +- description: The location of the proxy image + name: IMAGE_PROXY + value: openshift/oauth-proxy:v1.0.0 +- description: External URL for the grafana route + name: ROUTE_URL + value: "" +- description: The namespace to instantiate heapster under. Defaults to 'grafana'. + name: NAMESPACE + value: grafana +- description: The session secret for the proxy + name: SESSION_SECRET + generate: expression + from: "[a-zA-Z0-9]{43}" +objects: +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: grafana-ocp + namespace: "${NAMESPACE}" + annotations: + serviceaccounts.openshift.io/oauth-redirectreference.primary: '{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"grafana-ocp"}}' +- apiVersion: authorization.openshift.io/v1 + kind: ClusterRoleBinding + metadata: + name: gf-cluster-reader + roleRef: + name: cluster-reader + subjects: + - kind: ServiceAccount + name: grafana-ocp + namespace: "${NAMESPACE}" +- apiVersion: route.openshift.io/v1 + kind: Route + metadata: + name: grafana-ocp + namespace: "${NAMESPACE}" + spec: + host: "${ROUTE_URL}" + to: + name: grafana-ocp + tls: + termination: Reencrypt +- apiVersion: v1 + kind: Service + metadata: + name: grafana-ocp + annotations: + prometheus.io/scrape: "true" + prometheus.io/scheme: https + service.alpha.openshift.io/serving-cert-secret-name: gf-tls + namespace: "${NAMESPACE}" + labels: + metrics-infra: grafana-ocp + name: grafana-ocp + spec: + ports: + - name: grafana-ocp + port: 443 + protocol: TCP + targetPort: 8443 + selector: + app: grafana-ocp +- apiVersion: v1 + kind: Secret + metadata: + name: gf-proxy + namespace: "${NAMESPACE}" + stringData: + session_secret: "${SESSION_SECRET}=" +# Deploy Prometheus behind an oauth proxy +- apiVersion: extensions/v1beta1 + kind: Deployment + metadata: + labels: + app: grafana-ocp + name: grafana-ocp + namespace: "${NAMESPACE}" + spec: + replicas: 1 + selector: + matchLabels: + app: grafana-ocp + template: + metadata: + labels: + app: grafana-ocp + name: grafana-ocp-app + spec: + serviceAccountName: grafana-ocp + containers: + - name: oauth-proxy + image: ${IMAGE_PROXY} + imagePullPolicy: IfNotPresent + ports: + - containerPort: 8443 + name: web + args: + - -https-address=:8443 + - -http-address= + - -email-domain=* + - -client-id=system:serviceaccount:${NAMESPACE}:grafana-ocp + - -upstream=http://localhost:3000 + - -provider=openshift +# - '-openshift-delegate-urls={"/api/datasources": {"resource": "namespace", "verb": "get", "resourceName": "grafana-ocp", "namespace": "${NAMESPACE}"}}' + - '-openshift-sar={"namespace": "${NAMESPACE}", "verb": "list", "resource": "services"}' + - -tls-cert=/etc/tls/private/tls.crt + - -tls-key=/etc/tls/private/tls.key + - -client-secret-file=/var/run/secrets/kubernetes.io/serviceaccount/token + - -cookie-secret-file=/etc/proxy/secrets/session_secret + - -skip-auth-regex=^/metrics,/api/datasources,/api/dashboards + volumeMounts: + - mountPath: /etc/tls/private + name: gf-tls + - mountPath: /etc/proxy/secrets + name: secrets + + - name: grafana-ocp + image: ${IMAGE_GF} + ports: + - name: grafana-http + containerPort: 3000 + volumeMounts: + - mountPath: "/root/go/src/github.com/grafana/grafana/data" + name: gf-data + - mountPath: "/root/go/src/github.com/grafana/grafana/conf" + name: gfconfig + - mountPath: /etc/tls/private + name: gf-tls + - mountPath: /etc/proxy/secrets + name: secrets + command: + - "./bin/grafana-server" + + volumes: + - name: gfconfig + configMap: + name: gf-config + - name: secrets + secret: + secretName: gf-proxy + - name: gf-tls + secret: + secretName: gf-tls + - emptyDir: {} + name: gf-data +- apiVersion: v1 + kind: ConfigMap + metadata: + name: gf-config + namespace: "${NAMESPACE}" + data: + defaults.ini: |- + ##################### Grafana Configuration Defaults ##################### + # + # Do not modify this file in grafana installs + # + + # possible values : production, development + app_mode = production + + # instance name, defaults to HOSTNAME environment variable value or hostname if HOSTNAME var is empty + instance_name = ${HOSTNAME} + + #################################### Paths ############################### + [paths] + # Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used) + # + data = data + # + # Directory where grafana can store logs + # + logs = data/log + # + # Directory where grafana will automatically scan and look for plugins + # + plugins = data/plugins + + #################################### Server ############################## + [server] + # Protocol (http, https, socket) + protocol = http + + # The ip address to bind to, empty will bind to all interfaces + http_addr = + + # The http port to use + http_port = 3000 + + # The public facing domain name used to access grafana from a browser + domain = localhost + + # Redirect to correct domain if host header does not match domain + # Prevents DNS rebinding attacks + enforce_domain = false + + # The full public facing url + root_url = %(protocol)s://%(domain)s:%(http_port)s/ + + # Log web requests + router_logging = false + + # the path relative working path + static_root_path = public + + # enable gzip + enable_gzip = false + + # https certs & key file + cert_file = /etc/tls/private/tls.crt + cert_key = /etc/tls/private/tls.key + + # Unix socket path + socket = /tmp/grafana.sock + + #################################### Database ############################ + [database] + # You can configure the database connection by specifying type, host, name, user and password + # as separate properties or as on string using the url property. + + # Either "mysql", "postgres" or "sqlite3", it's your choice + type = sqlite3 + host = 127.0.0.1:3306 + name = grafana + user = root + # If the password contains # or ; you have to wrap it with triple quotes. Ex """#password;""" + password = + # Use either URL or the previous fields to configure the database + # Example: mysql://user:secret@host:port/database + url = + + # Max idle conn setting default is 2 + max_idle_conn = 2 + + # Max conn setting default is 0 (mean not set) + max_open_conn = + + # For "postgres", use either "disable", "require" or "verify-full" + # For "mysql", use either "true", "false", or "skip-verify". + ssl_mode = disable + + ca_cert_path = + client_key_path = + client_cert_path = + server_cert_name = + + # For "sqlite3" only, path relative to data_path setting + path = grafana.db + + #################################### Session ############################# + [session] + # Either "memory", "file", "redis", "mysql", "postgres", "memcache", default is "file" + provider = file + + # Provider config options + # memory: not have any config yet + # file: session dir path, is relative to grafana data_path + # redis: config like redis server e.g. `addr=127.0.0.1:6379,pool_size=100,db=grafana` + # postgres: user=a password=b host=localhost port=5432 dbname=c sslmode=disable + # mysql: go-sql-driver/mysql dsn config string, examples: + # `user:password@tcp(127.0.0.1:3306)/database_name` + # `user:password@unix(/var/run/mysqld/mysqld.sock)/database_name` + # memcache: 127.0.0.1:11211 + + + provider_config = sessions + + # Session cookie name + cookie_name = grafana_sess + + # If you use session in https only, default is false + cookie_secure = false + + # Session life time, default is 86400 + session_life_time = 86400 + gc_interval_time = 86400 + + #################################### Data proxy ########################### + [dataproxy] + + # This enables data proxy logging, default is false + logging = false + + #################################### Analytics ########################### + [analytics] + # Server reporting, sends usage counters to stats.grafana.org every 24 hours. + # No ip addresses are being tracked, only simple counters to track + # running instances, dashboard and error counts. It is very helpful to us. + # Change this option to false to disable reporting. + reporting_enabled = true + + # Set to false to disable all checks to https://grafana.com + # for new versions (grafana itself and plugins), check is used + # in some UI views to notify that grafana or plugin update exists + # This option does not cause any auto updates, nor send any information + # only a GET request to https://grafana.com to get latest versions + check_for_updates = true + + # Google Analytics universal tracking code, only enabled if you specify an id here + google_analytics_ua_id = + + # Google Tag Manager ID, only enabled if you specify an id here + google_tag_manager_id = + + #################################### Security ############################ + [security] + # default admin user, created on startup + admin_user = admin + + # default admin password, can be changed before first start of grafana, or in profile settings + admin_password = admin + + # used for signing + secret_key = SW2YcwTIb9zpOOhoPsMm + + # Auto-login remember days + login_remember_days = 7 + cookie_username = grafana_user + cookie_remember_name = grafana_remember + + # disable gravatar profile images + disable_gravatar = false + + # data source proxy whitelist (ip_or_domain:port separated by spaces) + data_source_proxy_whitelist = + + [snapshots] + # snapshot sharing options + external_enabled = true + external_snapshot_url = https://snapshots-origin.raintank.io + external_snapshot_name = Publish to snapshot.raintank.io + + # remove expired snapshot + snapshot_remove_expired = true + + # remove snapshots after 90 days + snapshot_TTL_days = 90 + + #################################### Users #################################### + [users] + # disable user signup / registration + allow_sign_up = true + + # Allow non admin users to create organizations + allow_org_create = true + + # Set to true to automatically assign new users to the default organization (id 1) + auto_assign_org = true + + # Default role new users will be automatically assigned (if auto_assign_org above is set to true) + auto_assign_org_role = Admin + + # Require email validation before sign up completes + verify_email_enabled = false + + # Background text for the user field on the login page + login_hint = email or username + + # Default UI theme ("dark" or "light") + default_theme = dark + + # External user management + external_manage_link_url = + external_manage_link_name = + external_manage_info = + + [auth] + # Set to true to disable (hide) the login form, useful if you use OAuth + disable_login_form = true + + # Set to true to disable the signout link in the side menu. useful if you use auth.proxy + disable_signout_menu = true + + #################################### Anonymous Auth ###################### + [auth.anonymous] + # enable anonymous access + enabled = true + + # specify organization name that should be used for unauthenticated users + org_name = Main Org. + + # specify role for unauthenticated users + org_role = Admin + + #################################### Github Auth ######################### + [auth.github] + enabled = false + allow_sign_up = true + client_id = some_id + client_secret = some_secret + scopes = user:email + auth_url = https://github.com/login/oauth/authorize + token_url = https://github.com/login/oauth/access_token + api_url = https://api.github.com/user + team_ids = + allowed_organizations = + + #################################### Google Auth ######################### + [auth.google] + enabled = false + allow_sign_up = true + client_id = some_client_id + client_secret = some_client_secret + scopes = https://www.googleapis.com/auth/userinfo.profile https://www.googleapis.com/auth/userinfo.email + auth_url = https://accounts.google.com/o/oauth2/auth + token_url = https://accounts.google.com/o/oauth2/token + api_url = https://www.googleapis.com/oauth2/v1/userinfo + allowed_domains = + hosted_domain = + + #################################### Grafana.com Auth #################### + # legacy key names (so they work in env variables) + [auth.grafananet] + enabled = false + allow_sign_up = true + client_id = some_id + client_secret = some_secret + scopes = user:email + allowed_organizations = + + [auth.grafana_com] + enabled = false + allow_sign_up = true + client_id = some_id + client_secret = some_secret + scopes = user:email + allowed_organizations = + + #################################### Generic OAuth ####################### + [auth.generic_oauth] + name = OAuth + enabled = false + allow_sign_up = true + client_id = some_id + client_secret = some_secret + scopes = user:email + auth_url = + token_url = + api_url = + team_ids = + allowed_organizations = + + #################################### Basic Auth ########################## + [auth.basic] + enabled = false + + #################################### Auth Proxy ########################## + [auth.proxy] + enabled = true + header_name = X-WEBAUTH-USER + header_property = username + auto_sign_up = true + ldap_sync_ttl = 60 + whitelist = + + #################################### Auth LDAP ########################### + [auth.ldap] + enabled = false + config_file = /etc/grafana/ldap.toml + allow_sign_up = true + + #################################### SMTP / Emailing ##################### + [smtp] + enabled = false + host = localhost:25 + user = + # If the password contains # or ; you have to wrap it with trippel quotes. Ex """#password;""" + password = + cert_file = + key_file = + skip_verify = false + from_address = admin@grafana.localhost + from_name = Grafana + ehlo_identity = + + [emails] + welcome_email_on_sign_up = false + templates_pattern = emails/*.html + + #################################### Logging ########################## + [log] + # Either "console", "file", "syslog". Default is console and file + # Use space to separate multiple modes, e.g. "console file" + mode = console file + + # Either "debug", "info", "warn", "error", "critical", default is "info" + level = error + + # optional settings to set different levels for specific loggers. Ex filters = sqlstore:debug + filters = + + # For "console" mode only + [log.console] + level = + + # log line format, valid options are text, console and json + format = console + + # For "file" mode only + [log.file] + level = + + # log line format, valid options are text, console and json + format = text + + # This enables automated log rotate(switch of following options), default is true + log_rotate = true + + # Max line number of single file, default is 1000000 + max_lines = 1000000 + + # Max size shift of single file, default is 28 means 1 << 28, 256MB + max_size_shift = 28 + + # Segment log daily, default is true + daily_rotate = true + + # Expired days of log file(delete after max days), default is 7 + max_days = 7 + + [log.syslog] + level = + + # log line format, valid options are text, console and json + format = text + + # Syslog network type and address. This can be udp, tcp, or unix. If left blank, the default unix endpoints will be used. + network = + address = + + # Syslog facility. user, daemon and local0 through local7 are valid. + facility = + + # Syslog tag. By default, the process' argv[0] is used. + tag = + + + #################################### AMQP Event Publisher ################ + [event_publisher] + enabled = false + rabbitmq_url = amqp://localhost/ + exchange = grafana_events + + #################################### Dashboard JSON files ################ + [dashboards.json] + enabled = false + path = /var/lib/grafana/dashboards + + #################################### Usage Quotas ######################## + [quota] + enabled = false + + #### set quotas to -1 to make unlimited. #### + # limit number of users per Org. + org_user = 10 + + # limit number of dashboards per Org. + org_dashboard = 100 + + # limit number of data_sources per Org. + org_data_source = 10 + + # limit number of api_keys per Org. + org_api_key = 10 + + # limit number of orgs a user can create. + user_org = 10 + + # Global limit of users. + global_user = -1 + + # global limit of orgs. + global_org = -1 + + # global limit of dashboards + global_dashboard = -1 + + # global limit of api_keys + global_api_key = -1 + + # global limit on number of logged in users. + global_session = -1 + + #################################### Alerting ############################ + [alerting] + # Disable alerting engine & UI features + enabled = true + # Makes it possible to turn off alert rule execution but alerting UI is visible + execute_alerts = true + + #################################### Internal Grafana Metrics ############ + # Metrics available at HTTP API Url /api/metrics + [metrics] + enabled = true + interval_seconds = 10 + + # Send internal Grafana metrics to graphite + [metrics.graphite] + # Enable by setting the address setting (ex localhost:2003) + address = + prefix = prod.grafana.%(instance_name)s. + + [grafana_net] + url = https://grafana.com + + [grafana_com] + url = https://grafana.com + + #################################### Distributed tracing ############ + [tracing.jaeger] + # jaeger destination (ex localhost:6831) + address = + # tag that will always be included in when creating new spans. ex (tag1:value1,tag2:value2) + always_included_tag = + # Type specifies the type of the sampler: const, probabilistic, rateLimiting, or remote + sampler_type = const + # jaeger samplerconfig param + # for "const" sampler, 0 or 1 for always false/true respectively + # for "probabilistic" sampler, a probability between 0 and 1 + # for "rateLimiting" sampler, the number of spans per second + # for "remote" sampler, param is the same as for "probabilistic" + # and indicates the initial sampling rate before the actual one + # is received from the mothership + sampler_param = 1 + + #################################### External Image Storage ############## + [external_image_storage] + # You can choose between (s3, webdav, gcs) + provider = + + [external_image_storage.s3] + bucket_url = + bucket = + region = + path = + access_key = + secret_key = + + [external_image_storage.webdav] + url = + username = + password = + public_url = + + [external_image_storage.gcs] + key_file = + bucket = diff --git a/roles/openshift_grafana/files/grafana-ocp.yml b/roles/openshift_grafana/files/grafana-ocp.yml new file mode 100644 index 000000000..bc7b4b286 --- /dev/null +++ b/roles/openshift_grafana/files/grafana-ocp.yml @@ -0,0 +1,76 @@ +--- +kind: Template +apiVersion: v1 +metadata: + name: grafana-ocp + annotations: + "openshift.io/display-name": Grafana ocp + description: | + Grafana server with patched Prometheus datasource. + iconClass: icon-cogs + tags: "metrics,monitoring,grafana,prometheus" +parameters: +- description: External URL for the grafana route + name: ROUTE_URL + value: "" +- description: The namespace to instantiate heapster under. Defaults to 'grafana'. + name: NAMESPACE + value: grafana +objects: +- apiVersion: route.openshift.io/v1 + kind: Route + metadata: + name: grafana-ocp + namespace: "${NAMESPACE}" + spec: + host: "${ROUTE_URL}" + to: + name: grafana-ocp +- apiVersion: v1 + kind: Service + metadata: + name: grafana-ocp + namespace: "${NAMESPACE}" + labels: + metrics-infra: grafana-ocp + name: grafana-ocp + spec: + selector: + name: grafana-ocp + ports: + - port: 8082 + protocol: TCP + targetPort: grafana-http +- apiVersion: v1 + kind: ReplicationController + metadata: + name: grafana-ocp + namespace: "${NAMESPACE}" + labels: + metrics-infra: grafana-ocp + name: grafana-ocp + spec: + selector: + name: grafana-ocp + replicas: 1 + template: + version: v1 + metadata: + labels: + metrics-infra: grafana-ocp + name: grafana-ocp + spec: + volumes: + - name: data + emptyDir: {} + containers: + - image: "mrsiano/grafana-ocp:latest" + name: grafana-ocp + ports: + - name: grafana-http + containerPort: 3000 + volumeMounts: + - name: data + mountPath: "/root/go/src/github.com/grafana/grafana/data" + command: + - "./bin/grafana-server" diff --git a/roles/openshift_grafana/files/openshift-cluster-monitoring.json b/roles/openshift_grafana/files/openshift-cluster-monitoring.json new file mode 100644 index 000000000..f59ca997f --- /dev/null +++ b/roles/openshift_grafana/files/openshift-cluster-monitoring.json @@ -0,0 +1,5138 @@ +{ + "dashboard": { + "description": "Monitors Openshift cluster using Prometheus. Shows overall cluster CPU / Memory / Filesystem usage as well as individual pod, containers, systemd services statistics. Uses cAdvisor metrics only.", + "editable": true, + "gnetId": 315, + "graphTooltip": 0, + "hideControls": false, + "id": null, + "links": [], + "rows": [ + { + "collapse": false, + "height": "200px", + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "decimals": 2, + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "height": "200px", + "id": 32, + "legend": { + "alignAsTable": false, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "sideWidth": 200, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum (irate (container_network_receive_bytes_total{kubernetes_io_hostname=~\"^$Node$\"}[2m]))", + "format": "time_series", + "instant": false, + "interval": "1s", + "intervalFactor": 1, + "legendFormat": "Received", + "metric": "network", + "refId": "A", + "step": 1 + }, + { + "expr": "- sum (irate (container_network_transmit_bytes_total{kubernetes_io_hostname=~\"^$Node$\"}[2m]))", + "format": "time_series", + "interval": "1s", + "intervalFactor": 1, + "legendFormat": "Sent", + "metric": "network", + "refId": "B", + "step": 1 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Network I/O pressure", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "transparent": false, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Network I/O pressure", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": true, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "${DS_PR}", + "editable": true, + "error": false, + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "height": "180px", + "id": 4, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 4, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum (container_memory_working_set_bytes{id=\"/\",kubernetes_io_hostname=~\"^$Node$\"}) / sum (machine_memory_bytes{kubernetes_io_hostname=~\"^$Node$\"}) * 100", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "refId": "A", + "step": 20 + } + ], + "thresholds": "", + "title": "Cluster memory usage", + "transparent": false, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": true, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "${DS_PR}", + "decimals": 2, + "editable": true, + "error": false, + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "height": "180px", + "id": 6, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 4, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum (irate (container_cpu_usage_seconds_total{id=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) / sum (machine_cpu_cores{kubernetes_io_hostname=~\"^$Node$\"}) * 100", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "refId": "A", + "step": 20 + } + ], + "thresholds": "", + "title": "Cluster CPU usage ", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": true, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "${DS_PR}", + "decimals": 2, + "editable": true, + "error": false, + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "height": "180px", + "id": 7, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 4, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum (container_fs_usage_bytes{device=~\"^/dev/mapper/docker_.*\",id=\"/\",kubernetes_io_hostname=~\"^$Node$\"}) / sum (container_fs_limit_bytes{device=~\"^/dev/mapper/docker_.*\",id=\"/\",kubernetes_io_hostname=~\"^$Node$\"}) * 100", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "metric": "", + "refId": "A", + "step": 20 + } + ], + "thresholds": "", + "title": "Cluster filesystem usage", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "${DS_PR}", + "decimals": 2, + "editable": true, + "error": false, + "format": "bytes", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "height": "1px", + "id": 9, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "20%", + "prefix": "", + "prefixFontSize": "20%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 2, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum (container_memory_working_set_bytes{id=\"/\",kubernetes_io_hostname=~\"^$Node$\"})", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "refId": "A", + "step": 20 + } + ], + "thresholds": "", + "title": "Used", + "type": "singlestat", + "valueFontSize": "50%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "${DS_PR}", + "decimals": 2, + "editable": true, + "error": false, + "format": "bytes", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "height": "1px", + "id": 10, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 2, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum (machine_memory_bytes{kubernetes_io_hostname=~\"^$Node$\"})", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "refId": "A", + "step": 20 + } + ], + "thresholds": "", + "title": "Total", + "type": "singlestat", + "valueFontSize": "50%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "${DS_PR}", + "decimals": 2, + "editable": true, + "error": false, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "height": "1px", + "id": 11, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": " cores", + "postfixFontSize": "30%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 2, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum (irate (container_cpu_usage_seconds_total{id=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[2m]))", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "refId": "A", + "step": 20 + } + ], + "thresholds": "", + "title": "Used", + "type": "singlestat", + "valueFontSize": "50%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "${DS_PR}", + "decimals": 2, + "editable": true, + "error": false, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "height": "1px", + "id": 12, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": " cores", + "postfixFontSize": "30%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 2, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum (machine_cpu_cores{kubernetes_io_hostname=~\"^$Node$\"})", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "refId": "A", + "step": 20 + } + ], + "thresholds": "", + "title": "Total", + "type": "singlestat", + "valueFontSize": "50%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "${DS_PR}", + "decimals": 2, + "editable": true, + "error": false, + "format": "bytes", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "height": "1px", + "id": 13, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 2, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum (container_fs_usage_bytes{device=~\"^/dev/mapper/docker_.*$\",id=\"/\",kubernetes_io_hostname=~\"^$Node$\"})", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "refId": "A", + "step": 20 + } + ], + "thresholds": "", + "title": "Used", + "type": "singlestat", + "valueFontSize": "50%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "${DS_PR}", + "decimals": 2, + "editable": true, + "error": false, + "format": "bytes", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "height": "1px", + "id": 14, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 2, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum (container_fs_limit_bytes{device=~\"^/dev/mapper/docker_.*$\",id=\"/\",kubernetes_io_hostname=~\"^$Node$\"})", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 1, + "refId": "A", + "step": 20 + } + ], + "thresholds": "", + "title": "Total", + "type": "singlestat", + "valueFontSize": "50%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Total usage", + "titleSize": "h6" + }, + { + "collapse": true, + "height": 250, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 33, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum (irate (container_cpu_usage_seconds_total{id=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) / sum (machine_cpu_cores{kubernetes_io_hostname=~\"^$Node$\"}) ", + "format": "time_series", + "hide": false, + "interval": "1s", + "intervalFactor": 1, + "legendFormat": "overall cpu usage", + "refId": "A", + "step": 1 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Cluster CPU Usage", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percent", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6" + }, + { + "collapse": true, + "height": "250px", + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "decimals": 3, + "editable": true, + "error": false, + "fill": 0, + "grid": {}, + "height": "", + "id": 17, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": true, + "targets": [ + { + "expr": "sum (irate (container_cpu_usage_seconds_total{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (pod_name) * 100", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{ pod_name }}", + "metric": "container_cpu", + "refId": "A", + "step": 2 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Pods CPU usage ", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "transparent": false, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percent", + "label": "% Usage", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Pods CPU usage", + "titleSize": "h6" + }, + { + "collapse": true, + "height": "250px", + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "decimals": 3, + "editable": true, + "error": false, + "fill": 0, + "grid": {}, + "height": "", + "id": 24, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": true, + "targets": [ + { + "expr": "sum (irate (container_cpu_usage_seconds_total{image!=\"\",name=~\"^k8s_.*\",container_name!=\"POD\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (container_name, pod_name)", + "format": "time_series", + "hide": false, + "interval": "1s", + "intervalFactor": 1, + "legendFormat": "pod: {{ pod_name }} | {{ container_name }}", + "metric": "container_cpu", + "refId": "A", + "step": 2 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Containers Cores Usage", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": "cores", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Containers CPU usage", + "titleSize": "h6" + }, + { + "collapse": true, + "height": "250px", + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "decimals": 3, + "editable": true, + "error": false, + "fill": 0, + "grid": {}, + "height": "", + "id": 23, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": true, + "targets": [ + { + "expr": "sum (irate (container_cpu_usage_seconds_total{id!=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (id)", + "format": "time_series", + "hide": false, + "interval": "1s", + "intervalFactor": 1, + "legendFormat": "{{ id }}", + "metric": "container_cpu", + "refId": "A", + "step": 2 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "System services CPU usage ", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": "cores", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "System services CPU usage", + "titleSize": "h6" + }, + { + "collapse": true, + "height": 411, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "decimals": 3, + "editable": true, + "error": false, + "fill": 0, + "grid": {}, + "id": 34, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": true, + "targets": [ + { + "expr": "sum (irate (container_memory_usage_bytes{id!=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (id)", + "format": "time_series", + "hide": false, + "interval": "1s", + "intervalFactor": 1, + "legendFormat": "{{ id }}", + "metric": "container_cpu", + "refId": "A", + "step": 2 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "All processes Memory usage ", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": "cores", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "All processes CPU usage", + "titleSize": "h6" + }, + { + "collapse": true, + "height": "250px", + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "decimals": 2, + "editable": true, + "error": false, + "fill": 0, + "grid": {}, + "id": 25, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": 200, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": true, + "targets": [ + { + "expr": "sum (container_memory_working_set_bytes{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}) by (pod_name)", + "format": "time_series", + "interval": "1s", + "intervalFactor": 1, + "legendFormat": "{{ pod_name }}", + "metric": "container_memory_usage:sort_desc", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Pods memory usage", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Pods memory usage", + "titleSize": "h6" + }, + { + "collapse": true, + "height": "250px", + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "decimals": 2, + "editable": true, + "error": false, + "fill": 0, + "grid": {}, + "id": 26, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": 200, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": true, + "targets": [ + { + "expr": "sum (container_memory_rss{systemd_service_name=\"\",kubernetes_io_hostname=~\"^$Node$\"}) by (systemd_service_name)", + "format": "time_series", + "hide": false, + "interval": "1s", + "intervalFactor": 1, + "legendFormat": "{{ systemd_service_name }}", + "metric": "container_memory_usage:sort_desc", + "refId": "B", + "step": 2 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "System services memory usage", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "System services memory usage", + "titleSize": "h6" + }, + { + "collapse": true, + "height": "250px", + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "decimals": 2, + "editable": true, + "error": false, + "fill": 0, + "grid": {}, + "id": 27, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": 200, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": true, + "targets": [ + { + "expr": "sum (container_memory_working_set_bytes{image!=\"\",name=~\"^k8s_.*\",container_name!=\"POD\",kubernetes_io_hostname=~\"^$Node$\"}) by (container_name, pod_name)", + "format": "time_series", + "interval": "1s", + "intervalFactor": 1, + "legendFormat": "pod: {{ pod_name }} | {{ container_name }}", + "metric": "container_memory_usage:sort_desc", + "refId": "A", + "step": 10 + }, + { + "expr": "sum (container_memory_working_set_bytes{image!=\"\",name!~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}) by (kubernetes_io_hostname, name, image)", + "format": "time_series", + "hide": false, + "interval": "1s", + "intervalFactor": 1, + "legendFormat": "docker: {{ kubernetes_io_hostname }} | {{ image }} ({{ name }})", + "metric": "container_memory_usage:sort_desc", + "refId": "B", + "step": 10 + }, + { + "expr": "sum (container_memory_working_set_bytes{rkt_container_name!=\"\",kubernetes_io_hostname=~\"^$Node$\"}) by (kubernetes_io_hostname, rkt_container_name)", + "format": "time_series", + "hide": false, + "interval": "1s", + "intervalFactor": 1, + "legendFormat": "rkt: {{ kubernetes_io_hostname }} | {{ rkt_container_name }}", + "metric": "container_memory_usage:sort_desc", + "refId": "C", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Containers memory usage", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Containers memory usage", + "titleSize": "h6" + }, + { + "collapse": true, + "height": "500px", + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "decimals": 2, + "editable": true, + "error": false, + "fill": 0, + "grid": {}, + "id": 28, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": 200, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": true, + "targets": [ + { + "expr": "sum (container_memory_working_set_bytes{id!=\"/\",kubernetes_io_hostname=~\"^$Node$\"}) by (id)", + "interval": "1s", + "intervalFactor": 1, + "legendFormat": "{{ id }}", + "metric": "container_memory_usage:sort_desc", + "refId": "A", + "step": 1 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "All processes memory usage", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "All processes memory usage", + "titleSize": "h6" + }, + { + "collapse": true, + "height": "250px", + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "decimals": 2, + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 30, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": 200, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum (irate (container_network_receive_bytes_total{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (container_name, pod_name)", + "format": "time_series", + "hide": false, + "interval": "1s", + "intervalFactor": 1, + "legendFormat": "-> pod: {{ pod_name }} | {{ container_name }}", + "metric": "network", + "refId": "B", + "step": 1 + }, + { + "expr": "- sum (irate (container_network_transmit_bytes_total{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (container_name, pod_name)", + "format": "time_series", + "hide": false, + "interval": "1s", + "intervalFactor": 1, + "legendFormat": "<- pod: {{ pod_name }} | {{ container_name }}", + "metric": "network", + "refId": "D", + "step": 1 + }, + { + "expr": "sum (irate (container_network_receive_bytes_total{image!=\"\",name!~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (kubernetes_io_hostname, name, image)", + "format": "time_series", + "hide": false, + "interval": "1s", + "intervalFactor": 1, + "legendFormat": "-> docker: {{ kubernetes_io_hostname }} | {{ image }} ({{ name }})", + "metric": "network", + "refId": "A", + "step": 1 + }, + { + "expr": "- sum (irate (container_network_transmit_bytes_total{image!=\"\",name!~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (kubernetes_io_hostname, name, image)", + "format": "time_series", + "hide": false, + "interval": "1s", + "intervalFactor": 1, + "legendFormat": "<- docker: {{ kubernetes_io_hostname }} | {{ image }} ({{ name }})", + "metric": "network", + "refId": "C", + "step": 1 + }, + { + "expr": "sum (irate (container_network_transmit_bytes_total{rkt_container_name!=\"\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (kubernetes_io_hostname, rkt_container_name)", + "format": "time_series", + "hide": false, + "interval": "1s", + "intervalFactor": 1, + "legendFormat": "-> rkt: {{ kubernetes_io_hostname }} | {{ rkt_container_name }}", + "metric": "network", + "refId": "E", + "step": 1 + }, + { + "expr": "- sum (irate (container_network_transmit_bytes_total{rkt_container_name!=\"\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (kubernetes_io_hostname, rkt_container_name)", + "format": "time_series", + "hide": false, + "interval": "1s", + "intervalFactor": 1, + "legendFormat": "<- rkt: {{ kubernetes_io_hostname }} | {{ rkt_container_name }}", + "metric": "network", + "refId": "F", + "step": 1 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Containers network I/O ", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Containers network I/O", + "titleSize": "h6" + }, + { + "collapse": true, + "height": 277, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "decimals": 2, + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 16, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": 200, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum (irate (container_network_receive_bytes_total{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (pod_name)", + "format": "time_series", + "interval": "1s", + "intervalFactor": 1, + "legendFormat": "-> {{ pod_name }}", + "metric": "network", + "refId": "A", + "step": 1 + }, + { + "expr": "- sum (irate (container_network_transmit_bytes_total{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (pod_name)", + "format": "time_series", + "interval": "1s", + "intervalFactor": 1, + "legendFormat": "<- {{ pod_name }}", + "metric": "network", + "refId": "B", + "step": 1 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Pods network I/O ", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Pods network I/O", + "titleSize": "h6" + }, + { + "collapse": true, + "height": "500px", + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "decimals": 2, + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 29, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": 200, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum (irate (container_network_receive_bytes_total{id!=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (id)", + "format": "time_series", + "instant": true, + "interval": "1s", + "intervalFactor": 1, + "legendFormat": "-> {{ id }}", + "metric": "network", + "refId": "A", + "step": 1 + }, + { + "expr": "- sum (irate (container_network_transmit_bytes_total{id!=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (id)", + "format": "time_series", + "interval": "1s", + "intervalFactor": 1, + "legendFormat": "<- {{ id }}", + "metric": "network", + "refId": "B", + "step": 1 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "All processes network I/O ", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "All processes network I/O", + "titleSize": "h6" + }, + { + "collapse": true, + "height": 250, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 35, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sort": "avg", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(openshift_build_total) by (phase,reason)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{ phase }} | {{ reason }}", + "refId": "A", + "step": 1 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "openshift_build_total", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 54, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "count(openshift_build_active_time_seconds{phase=\"running\"} offset 10m)", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 2 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Returns the number of builds that have been running for more than 10 minutes (600 seconds).", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 55, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "count(openshift_build_active_time_seconds{phase=\"pending\"} offset 10m)", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 2 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Returns the number of build that have been waiting at least 10 minutes (600 seconds) to start.", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 56, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(openshift_build_total{phase=\"Failed\"})", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 2 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Returns the number of failed builds, regardless of the failure reason.", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 57, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "openshift_build_total{phase=\"Failed\",reason=\"FetchSourceFailed\"}", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{ instance }}", + "refId": "A", + "step": 1 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Returns the number of failed builds because of problems retrieving source from the associated Git repository.", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 58, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": false, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(openshift_build_total{phase=\"Complete\"})", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 2 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Returns the number of successfully completed builds.", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 0, + "id": 59, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sort": "avg", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 1, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "openshift_build_total{phase=\"Failed\"} offset 5m", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{ reason }}", + "refId": "A", + "step": 2 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Returns the failed builds totals, per failure reason, from 5 minutes ago.", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "OpenShift Builds", + "titleSize": "h6" + }, + { + "collapse": true, + "height": 250, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 36, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(openshift_sdn_pod_setup_latency_sum)", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 1 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "openshift_sdn_pod_setup_latency_sum", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 41, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(openshift_sdn_pod_teardown_latency{quantile=\"0.9\"}) by (instance)", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 1 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "openshift_sdn_pod_teardown_latency", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 50, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sort": "avg", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "topk(10, (sum by (pod_name) (irate(container_network_receive_bytes_total{pod_name!=\"\"}[5m]))))", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{ pod_name }}", + "refId": "A", + "step": 1 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Top 10 pods doing the most receive network traffic", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "decbytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 37, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sort": "avg", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "openshift_sdn_pod_ips", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{ instance }} | {{ role }}", + "refId": "A", + "step": 1 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "openshift_sdn_pod_ips", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 39, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "garbage_collector_monitoring_route:openshift:io_v1_rate_limiter_use", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 1 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "garbage_collector_monitoring_route:openshift:io_v1_rate_limiter_use", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 42, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sort": "avg", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "openshift_sdn_arp_cache_entries", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{ role }} | {{ instance }}", + "refId": "A", + "step": 1 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "openshift_sdn_arp_cache_entries", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 40, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "openshift_sdn_arp_cache_entries", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 1 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "openshift_sdn_arp_cache_entries", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "OpenShift SDN", + "titleSize": "h6" + }, + { + "collapse": true, + "height": 250, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 44, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sort": "avg", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "irate(kubelet_pleg_relist_latency_microseconds{kubernetes_io_hostname=~\"$Node\",quantile=\"0.9\"}[2m])", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{ role }} | {{ instance }}", + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "kubelet_pleg_relist", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "µs", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 51, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sort": "avg", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "irate(kubelet_docker_operations_latency_microseconds{quantile=\"0.9\"}[2m])", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{ operation_type }}", + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "kubelet_docker_operations_latency_microseconds{quantile=\"0.9\"}", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "µs", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 52, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sort": "avg", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "kubelet_docker_operations_timeout", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{ operation_type }}", + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Returns a running count (not a rate) of docker operations that have timed out since the kubelet was started.", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 53, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sort": "avg", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "kubelet_docker_operations_errors", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{ operation_type }}", + "refId": "A", + "step": 2 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Returns a running count (not a rate) of docker operations that have failed since the kubelet was started.", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Kubelet", + "titleSize": "h6" + }, + { + "collapse": true, + "height": 250, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 46, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sort": "avg", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "irate(scrape_samples_scraped[2m])", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{ kubernetes_name }} | {{ instance }} ", + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "scrape_samples_scraped", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 68, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sort_desc(sum without (cpu) (irate(container_cpu_usage_seconds_total{container_name=\"prometheus\"}[5m])))", + "format": "time_series", + "interval": "1s", + "intervalFactor": 1, + "refId": "A", + "step": 2 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "CPU per instance of Prometheus container.", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Prometheus", + "titleSize": "h6" + }, + { + "collapse": true, + "height": 250, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 48, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sort": "avg", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sort_desc(sum without (instance,type,client,contentType) (irate(apiserver_request_count{verb!~\"GET|LIST|WATCH\"}[2m]))) > 0", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{ resource }} || {{ verb }}", + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Number of mutating API requests being made to the control plane.", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 49, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sort": "avg", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sort_desc(sum without (instance,type,client,contentType) (irate(apiserver_request_count{verb=~\"GET|LIST|WATCH\"}[2m]))) > 0", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{ resource }} || {{ pod }}", + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Number of non-mutating API requests being made to the control plane.", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 74, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "endpoint_queue_latency", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": " quantile {{ quantile }}", + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "endpoint_queue_latency", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "ms", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "API Server", + "titleSize": "h6" + }, + { + "collapse": true, + "height": 250, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 61, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "etcd_disk_wal_fsync_duration_seconds_count", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "etcd_disk_wal_fsync_duration_seconds_count", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "etcd", + "titleSize": "h6" + }, + { + "collapse": true, + "height": 250, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 62, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(changes(container_start_time_seconds[10m]))", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 2 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "The number of containers that start or restart over the last ten minutes.", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Changes in your cluster", + "titleSize": "h6" + }, + { + "collapse": true, + "height": 250, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 63, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(machine_cpu_cores)", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Total number of cores in the cluster.", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 64, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(sort_desc(irate(container_cpu_usage_seconds_total{id=\"/\"}[5m])))", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Total number of consumed cores.", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 65, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sort_desc(sum by (kubernetes_io_hostname,type) (irate(container_cpu_usage_seconds_total{id=\"/\"}[5m])))", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "CPU consumed per node in the cluster.", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 66, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sort_desc(sum by (cpu,id,pod_name,container_name) (irate(container_cpu_usage_seconds_total{role=\"infra\"}[5m])))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "CPU consumption per system service or container on the infrastructure nodes.", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 67, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sort_desc(sum by (namespace) (irate(container_cpu_usage_seconds_total[5m])))", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "CPU consumed per namespace on the cluster.", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 47, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(container_cpu_usage_seconds_total{id=\"/\"}[3m])) / sum(machine_cpu_cores)", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Percentage of total cluster CPU in use", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percent", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 69, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(container_memory_rss) / sum(machine_memory_bytes)", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Percentage of total cluster memory in use", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percent", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 70, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum by (kubernetes_io_hostname) (irate(container_cpu_usage_seconds_total{id=~\"/system.slice/(docker|etcd).service\"}[5m]))", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Aggregate CPU usage (seconds total) of etcd+docker", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "System and container CPU", + "titleSize": "h6" + }, + { + "collapse": true, + "height": 250, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 71, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + { + "title": "Kubernetes Storage Metrics via Prometheus", + "type": "absolute", + "url": "https://docs.google.com/document/d/1Fh0T60T_y888LsRwC51CQHO75b2IZ3A34ZQS71s_F0g" + } + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "volumes_queue_latency", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "volumes_queue_latency", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 72, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sort": "avg", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [ + { + "title": "Kubernetes Storage Metrics via Prometheus", + "type": "absolute", + "url": "https://docs.google.com/document/d/1Fh0T60T_y888LsRwC51CQHO75b2IZ3A34ZQS71s_F0g" + } + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "irate(cloudprovider_gce_api_request_duration_seconds_count[2m])", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{ request }}", + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "cloudprovider_aws_api_request_duration_seconds_count", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 73, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sort": "avg", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [ + { + "title": "Kubernetes Storage Metrics via Prometheus", + "type": "absolute", + "url": "https://docs.google.com/document/d/1Fh0T60T_y888LsRwC51CQHO75b2IZ3A34ZQS71s_F0g" + } + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum (irate(storage_operation_duration_seconds_sum{kubernetes_io_hostname=~\"$Node\"}[2m])) by (operation_name,kubernetes_io_hostname)", + "format": "time_series", + "interval": "1s", + "intervalFactor": 1, + "legendFormat": "{{ operation_name }} || {{ kubernetes_io_hostname }}", + "refId": "A", + "step": 2 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "storage_operation_duration_seconds_sum", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "OpenShift Volumes", + "titleSize": "h6" + } + ], + "schemaVersion": 14, + "style": "dark", + "tags": [ + "kubernetes", + "openshift" + ], + "templating": { + "list": [ + { + "allValue": ".*", + "current": {}, + "datasource": "${DS_PR}", + "hide": 0, + "includeAll": true, + "label": null, + "multi": false, + "name": "Node", + "options": [], + "query": "label_values(kubernetes_io_hostname)", + "refresh": 1, + "regex": "", + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-30m", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "1s", + "2m", + "20s", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "browser", + "title": "openshift cluster monitoring", + "version": 6 + } +} diff --git a/roles/openshift_grafana/meta/main.yml b/roles/openshift_grafana/meta/main.yml new file mode 100644 index 000000000..8dea6f197 --- /dev/null +++ b/roles/openshift_grafana/meta/main.yml @@ -0,0 +1,13 @@ +--- +galaxy_info: + author: Eldad Marciano + description: Setup grafana pod + company: Red Hat, Inc. + license: Apache License, Version 2.0 + min_ansible_version: 2.3 + platforms: + - name: EL + versions: + - 7 + categories: + - metrics diff --git a/roles/openshift_grafana/tasks/gf-permissions.yml b/roles/openshift_grafana/tasks/gf-permissions.yml new file mode 100644 index 000000000..9d3c741ee --- /dev/null +++ b/roles/openshift_grafana/tasks/gf-permissions.yml @@ -0,0 +1,12 @@ +--- +- name: Create gf user on htpasswd + command: htpasswd -c /etc/origin/master/htpasswd gfadmin + +- name: Make sure master config use HTPasswdPasswordIdentityProvider + command: "sed -ie 's|AllowAllPasswordIdentityProvider|HTPasswdPasswordIdentityProvider\n file: /etc/origin/master/htpasswd|' /etc/origin/master/master-config.yaml" + +- name: Grant permission for gfuser + command: oc adm policy add-cluster-role-to-user cluster-reader gfadmin + +- name: Restart mater api + command: systemctl restart atomic-openshift-master-api.service diff --git a/roles/openshift_grafana/tasks/main.yml b/roles/openshift_grafana/tasks/main.yml new file mode 100644 index 000000000..6a06d40a9 --- /dev/null +++ b/roles/openshift_grafana/tasks/main.yml @@ -0,0 +1,122 @@ +--- +- name: Create grafana namespace + oc_project: + state: present + name: grafana + +- name: Configure Grafana Permissions + include_tasks: tasks/gf-permissions.yml + when: gf_oauth | default(false) | bool == true + +# TODO: we should grab this yaml file from openshift/origin +- name: Templatize grafana yaml + template: src=grafana-ocp.yaml dest=/tmp/grafana-ocp.yaml + register: + cl_file: /tmp/grafana-ocp.yaml + when: gf_oauth | default(false) | bool == false + +# TODO: we should grab this yaml file from openshift/origin +- name: Templatize grafana yaml + template: src=grafana-ocp-oauth.yaml dest=/tmp/grafana-ocp-oauth.yaml + register: + cl_file: /tmp/grafana-ocp-oauth.yaml + when: gf_oauth | default(false) | bool == true + +- name: Process the grafana file + oc_process: + namespace: grafana + template_name: "{{ cl_file }}" + create: True + when: gf_oauth | default(false) | bool == true + +- name: Wait to grafana be running + command: oc rollout status deployment/grafana-ocp + +- name: oc adm policy add-role-to-user view -z grafana-ocp -n {{ gf_prometheus_namespace }} + oc_adm_policy_user: + user: grafana-ocp + resource_kind: cluster-role + resource_name: view + state: present + role_namespace: "{{ gf_prometheus_namespace }}" + +- name: Get grafana route + oc_obj: + kind: route + name: grafana + namespace: grafana + register: route + +- name: Get prometheus route + oc_obj: + kind: route + name: prometheus + namespace: "{{ gf_prometheus_namespace }}" + register: route + +- name: Get the prometheus SA + oc_serviceaccount_secret: + state: list + service_account: prometheus + namespace: "{{ gf_prometheus_namespace }}" + register: sa + +- name: Get the management SA bearer token + set_fact: + management_token: "{{ sa.results | oo_filter_sa_secrets }}" + +- name: Ensure the SA bearer token value is read + oc_secret: + state: list + name: "{{ management_token }}" + namespace: "{{ gf_prometheus_namespace }}" + no_log: True + register: sa_secret + +- name: Get the SA bearer token for prometheus + set_fact: + token: "{{ sa_secret.results.encoded.token }}" + +- name: Convert to json + var: + ds_json: "{{ gf_body_tmp }} | to_json }}" + +- name: Set protocol type + var: + protocol: "{{ 'https' if {{ gf_oauth }} == true else 'http' }}" + +- name: Add gf datasrouce + uri: + url: "{{ protocol }}://{{ route }}/api/datasources" + user: admin + password: admin + method: POST + body: "{{ ds_json | regex_replace('grafana_name', {{ gf_datasource_name }}) | regex_replace('prometheus_url', 'https://'{{ prometheus }} ) | regex_replace('satoken', {{ token }}) }}" + headers: + Content-Type: "Content-Type: application/json" + register: add_ds + +- name: Regex setup ds name + replace: + path: "{{ lookup('file', 'openshift-cluster-monitoring.json') }}" + regexp: '${DS_PR}' + replace: '{{ gf_datasource_name }}' + backup: yes + +- name: Add new dashboard + uri: + url: "{{ protocol }}://{{ route }}/api/dashboards/db" + user: admin + password: admin + method: POST + body: "{{ lookup('file', 'openshift-cluster-monitoring.json') }}" + headers: + Content-Type: "Content-Type: application/json" + register: add_ds + +- name: Regex json tear down + replace: + path: "{{ lookup('file', 'openshift-cluster-monitoring.json') }}" + regexp: '${DS_PR}' + replace: '{{ gf_datasource_name }}' + backup: yes diff --git a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py index dcaf87eca..c83adb26d 100644 --- a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py +++ b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py @@ -175,6 +175,8 @@ def format_failure(failure): play = failure['play'] task = failure['task'] msg = failure['msg'] + if not isinstance(msg, string_types): + msg = str(msg) checks = failure['checks'] fields = ( (u'Hosts', host), diff --git a/roles/openshift_health_checker/meta/main.yml b/roles/openshift_health_checker/meta/main.yml index bc8e7bdcf..b8a59ee14 100644 --- a/roles/openshift_health_checker/meta/main.yml +++ b/roles/openshift_health_checker/meta/main.yml @@ -1,3 +1,4 @@ --- dependencies: - role: openshift_facts +- role: lib_utils diff --git a/roles/openshift_health_checker/openshift_checks/__init__.py b/roles/openshift_health_checker/openshift_checks/__init__.py index b7b16e0ea..b9c41d1b4 100644 --- a/roles/openshift_health_checker/openshift_checks/__init__.py +++ b/roles/openshift_health_checker/openshift_checks/__init__.py @@ -5,6 +5,7 @@ Health checks for OpenShift clusters. import json import operator import os +import re import time import collections @@ -95,6 +96,13 @@ class OpenShiftCheck(object): # These are intended to be a sequential record of what the check observed and determined. self.logs = [] + def template_var(self, var_to_template): + """Return a templated variable if self._templar is not None, else + just return the variable as-is""" + if self._templar is not None: + return self._templar.template(var_to_template) + return var_to_template + @abstractproperty def name(self): """The name of this check, usually derived from the class name.""" @@ -302,28 +310,38 @@ class OpenShiftCheck(object): name_list = name_list.split(',') return [name.strip() for name in name_list if name.strip()] - @staticmethod - def get_major_minor_version(openshift_image_tag): + def get_major_minor_version(self, openshift_image_tag=None): """Parse and return the deployed version of OpenShift as a tuple.""" - if openshift_image_tag and openshift_image_tag[0] == 'v': - openshift_image_tag = openshift_image_tag[1:] - # map major release versions across releases - # to a common major version - openshift_major_release_version = { - "1": "3", - } + version = openshift_image_tag or self.get_var("openshift_image_tag") + components = [int(component) for component in re.findall(r'\d+', version)] - components = openshift_image_tag.split(".") - if not components or len(components) < 2: + if len(components) < 2: msg = "An invalid version of OpenShift was found for this host: {}" - raise OpenShiftCheckException(msg.format(openshift_image_tag)) + raise OpenShiftCheckException(msg.format(version)) + + # map major release version across releases to OCP major version + components[0] = {1: 3}.get(components[0], components[0]) + + return tuple(int(x) for x in components[:2]) + + def get_required_version(self, name, version_map): + """Return the correct required version(s) for the current (or nearest) OpenShift version.""" + openshift_version = self.get_major_minor_version() + + earliest = min(version_map) + latest = max(version_map) + if openshift_version < earliest: + return version_map[earliest] + if openshift_version > latest: + return version_map[latest] - if components[0] in openshift_major_release_version: - components[0] = openshift_major_release_version[components[0]] + required_version = version_map.get(openshift_version) + if not required_version: + msg = "There is no recommended version of {} for the current version of OpenShift ({})" + raise OpenShiftCheckException(msg.format(name, ".".join(str(comp) for comp in openshift_version))) - components = tuple(int(x) for x in components[:2]) - return components + return required_version def find_ansible_mount(self, path): """Return the mount point for path from ansible_mounts.""" diff --git a/roles/openshift_health_checker/openshift_checks/disk_availability.py b/roles/openshift_health_checker/openshift_checks/disk_availability.py index 87e6146d4..6e30a8610 100644 --- a/roles/openshift_health_checker/openshift_checks/disk_availability.py +++ b/roles/openshift_health_checker/openshift_checks/disk_availability.py @@ -21,7 +21,7 @@ class DiskAvailability(OpenShiftCheck): 'oo_etcd_to_config': 20 * 10**9, }, # Used to copy client binaries into, - # see roles/openshift_cli/library/openshift_container_binary_sync.py. + # see roles/lib_utils/library/openshift_container_binary_sync.py. '/usr/local/bin': { 'oo_masters_to_config': 1 * 10**9, 'oo_nodes_to_config': 1 * 10**9, diff --git a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py index 4f91f6bb3..145b82491 100644 --- a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py +++ b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py @@ -40,7 +40,7 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck): # to look for images available remotely without waiting to pull them. dependencies = ["python-docker-py", "skopeo"] # command for checking if remote registries have an image, without docker pull - skopeo_command = "timeout 10 skopeo inspect --tls-verify={tls} {creds} docker://{registry}/{image}" + skopeo_command = "{proxyvars} timeout 10 skopeo inspect --tls-verify={tls} {creds} docker://{registry}/{image}" skopeo_example_command = "skopeo inspect [--tls-verify=false] [--creds=<user>:<pass>] docker://<registry>/<image>" def __init__(self, *args, **kwargs): @@ -56,7 +56,7 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck): # ordered list of registries (according to inventory vars) that docker will try for unscoped images regs = self.ensure_list("openshift_docker_additional_registries") # currently one of these registries is added whether the user wants it or not. - deployment_type = self.get_var("openshift_deployment_type") + deployment_type = self.get_var("openshift_deployment_type", default="") if deployment_type == "origin" and "docker.io" not in regs: regs.append("docker.io") elif deployment_type == 'openshift-enterprise' and "registry.access.redhat.com" not in regs: @@ -64,7 +64,9 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck): self.registries["configured"] = regs # for the oreg_url registry there may be credentials specified - components = self.get_var("oreg_url", default="").split('/') + oreg_url = self.get_var("oreg_url", default="") + oreg_url = self.template_var(oreg_url) + components = oreg_url.split('/') self.registries["oreg"] = "" if len(components) < 3 else components[0] # Retrieve and template registry credentials, if provided @@ -72,14 +74,22 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck): oreg_auth_user = self.get_var('oreg_auth_user', default='') oreg_auth_password = self.get_var('oreg_auth_password', default='') if oreg_auth_user != '' and oreg_auth_password != '': - if self._templar is not None: - oreg_auth_user = self._templar.template(oreg_auth_user) - oreg_auth_password = self._templar.template(oreg_auth_password) - self.skopeo_command_creds = "--creds={}:{}".format(quote(oreg_auth_user), quote(oreg_auth_password)) + oreg_auth_user = self.template_var(oreg_auth_user) + oreg_auth_password = self.template_var(oreg_auth_password) + self.skopeo_command_creds = quote("--creds={}:{}".format(oreg_auth_user, oreg_auth_password)) # record whether we could reach a registry or not (and remember results) self.reachable_registries = {} + # take note of any proxy settings needed + proxies = [] + for var in ['http_proxy', 'https_proxy', 'no_proxy']: + # ansible vars are openshift_http_proxy, openshift_https_proxy, openshift_no_proxy + value = self.get_var("openshift_" + var, default=None) + if value: + proxies.append(var.upper() + "=" + quote(self.template_var(value))) + self.skopeo_proxy_vars = " ".join(proxies) + def is_active(self): """Skip hosts with unsupported deployment types.""" deployment_type = self.get_var("openshift_deployment_type") @@ -153,6 +163,7 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck): # template for images that run on top of OpenShift image_url = "{}/{}-{}:{}".format(image_info["namespace"], image_info["name"], "${component}", "${version}") image_url = self.get_var("oreg_url", default="") or image_url + image_url = self.template_var(image_url) if 'oo_nodes_to_config' in host_groups: for suffix in NODE_IMAGE_SUFFIXES: required.add(image_url.replace("${component}", suffix).replace("${version}", image_tag)) @@ -160,16 +171,21 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck): required.add(self._registry_console_image(image_tag, image_info)) # images for containerized components - if self.get_var("openshift", "common", "is_containerized"): - components = set() + def add_var_or_default_img(var_name, comp_name): + """Returns: default image from comp_name, overridden by var_name in task_vars""" + default = "{}/{}:{}".format(image_info["namespace"], comp_name, image_tag) + required.add(self.template_var(self.get_var(var_name, default=default))) + + if self.get_var("openshift_is_containerized", convert=bool): if 'oo_nodes_to_config' in host_groups: - components.update(["node", "openvswitch"]) + add_var_or_default_img("osn_image", "node") + add_var_or_default_img("osn_ovs_image", "openvswitch") if 'oo_masters_to_config' in host_groups: # name is "origin" or "ose" - components.add(image_info["name"]) - for component in components: - required.add("{}/{}:{}".format(image_info["namespace"], component, image_tag)) - if 'oo_etcd_to_config' in host_groups: # special case, note it is the same for origin/enterprise - required.add("registry.access.redhat.com/rhel7/etcd") # and no image tag + add_var_or_default_img("osm_image", image_info["name"]) + if 'oo_etcd_to_config' in host_groups: + # special case, note default is the same for origin/enterprise and has no image tag + etcd_img = self.get_var("osm_etcd_image", default="registry.access.redhat.com/rhel7/etcd") + required.add(self.template_var(etcd_img)) return required @@ -247,11 +263,18 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck): if not self.reachable_registries[registry]: continue # do not keep trying unreachable registries - args = dict(registry=registry, image=image) - args["tls"] = "false" if registry in self.registries["insecure"] else "true" - args["creds"] = self.skopeo_command_creds if registry == self.registries["oreg"] else "" + args = dict( + proxyvars=self.skopeo_proxy_vars, + tls="false" if registry in self.registries["insecure"] else "true", + creds=self.skopeo_command_creds if registry == self.registries["oreg"] else "", + registry=quote(registry), + image=quote(image), + ) - result = self.execute_module_with_retries("command", {"_raw_params": self.skopeo_command.format(**args)}) + result = self.execute_module_with_retries("command", { + "_uses_shell": True, + "_raw_params": self.skopeo_command.format(**args), + }) if result.get("rc", 0) == 0 and not result.get("failed"): return True if result.get("rc") == 124: # RC 124 == timed out; mark unreachable @@ -261,6 +284,10 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck): def connect_to_registry(self, registry): """Use ansible wait_for module to test connectivity from host to registry. Returns bool.""" + if self.skopeo_proxy_vars != "": + # assume we can't connect directly; just waive the test + return True + # test a simple TCP connection host, _, port = registry.partition(":") port = port or 443 diff --git a/roles/openshift_health_checker/openshift_checks/etcd_traffic.py b/roles/openshift_health_checker/openshift_checks/etcd_traffic.py index 8b20ccb49..b56d2092b 100644 --- a/roles/openshift_health_checker/openshift_checks/etcd_traffic.py +++ b/roles/openshift_health_checker/openshift_checks/etcd_traffic.py @@ -20,8 +20,8 @@ class EtcdTraffic(OpenShiftCheck): return super(EtcdTraffic, self).is_active() and valid_group_names and valid_version def run(self): - is_containerized = self.get_var("openshift", "common", "is_containerized") - unit = "etcd_container" if is_containerized else "etcd" + openshift_is_containerized = self.get_var("openshift_is_containerized") + unit = "etcd_container" if openshift_is_containerized else "etcd" log_matchers = [{ "start_regexp": r"Starting Etcd Server", diff --git a/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py b/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py index 986a01f38..7f8c6ebdc 100644 --- a/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py +++ b/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py @@ -170,7 +170,7 @@ class Elasticsearch(LoggingCheck): """ errors = [] for pod_name in pods_by_name.keys(): - df_cmd = 'exec {} -- df --output=ipcent,pcent /elasticsearch/persistent'.format(pod_name) + df_cmd = '-c elasticsearch exec {} -- df --output=ipcent,pcent /elasticsearch/persistent'.format(pod_name) disk_output = self.exec_oc(df_cmd, [], save_as_name='get_pv_diskspace.json') lines = disk_output.splitlines() # expecting one header looking like 'IUse% Use%' and one body line diff --git a/roles/openshift_health_checker/openshift_checks/logging/kibana.py b/roles/openshift_health_checker/openshift_checks/logging/kibana.py index 3b1cf8baa..16ec3a7f6 100644 --- a/roles/openshift_health_checker/openshift_checks/logging/kibana.py +++ b/roles/openshift_health_checker/openshift_checks/logging/kibana.py @@ -5,12 +5,11 @@ Module for performing checks on a Kibana logging deployment import json import ssl -try: - from urllib2 import HTTPError, URLError - import urllib2 -except ImportError: - from urllib.error import HTTPError, URLError - import urllib.request as urllib2 +# pylint can't find the package when its installed in virtualenv +# pylint: disable=import-error,no-name-in-module +from ansible.module_utils.six.moves.urllib import request +# pylint: disable=import-error,no-name-in-module +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError from openshift_checks.logging.logging import LoggingCheck, OpenShiftCheckException @@ -65,7 +64,7 @@ class Kibana(LoggingCheck): # Verify that the url is returning a valid response try: # We only care if the url connects and responds - return_code = urllib2.urlopen(url, context=ctx).getcode() + return_code = request.urlopen(url, context=ctx).getcode() except HTTPError as httperr: return httperr.reason except URLError as urlerr: diff --git a/roles/openshift_health_checker/openshift_checks/mixins.py b/roles/openshift_health_checker/openshift_checks/mixins.py index cfbdea303..567162be1 100644 --- a/roles/openshift_health_checker/openshift_checks/mixins.py +++ b/roles/openshift_health_checker/openshift_checks/mixins.py @@ -10,8 +10,8 @@ class NotContainerizedMixin(object): def is_active(self): """Only run on non-containerized hosts.""" - is_containerized = self.get_var("openshift", "common", "is_containerized") - return super(NotContainerizedMixin, self).is_active() and not is_containerized + openshift_is_containerized = self.get_var("openshift_is_containerized") + return super(NotContainerizedMixin, self).is_active() and not openshift_is_containerized class DockerHostMixin(object): @@ -23,7 +23,7 @@ class DockerHostMixin(object): """Only run on hosts that depend on Docker.""" group_names = set(self.get_var("group_names", default=[])) needs_docker = set(["oo_nodes_to_config"]) - if self.get_var("openshift.common.is_containerized"): + if self.get_var("openshift_is_containerized"): needs_docker.update(["oo_masters_to_config", "oo_etcd_to_config"]) return super(DockerHostMixin, self).is_active() and bool(group_names.intersection(needs_docker)) @@ -33,7 +33,7 @@ class DockerHostMixin(object): (which would not be able to install but should already have them). Returns: msg, failed """ - if self.get_var("openshift", "common", "is_atomic"): + if self.get_var("openshift_is_atomic"): return "", False # NOTE: we would use the "package" module but it's actually an action plugin diff --git a/roles/openshift_health_checker/openshift_checks/ovs_version.py b/roles/openshift_health_checker/openshift_checks/ovs_version.py index 0cad19842..58a2692bd 100644 --- a/roles/openshift_health_checker/openshift_checks/ovs_version.py +++ b/roles/openshift_health_checker/openshift_checks/ovs_version.py @@ -3,7 +3,7 @@ Ansible module for determining if an installed version of Open vSwitch is incomp currently installed version of OpenShift. """ -from openshift_checks import OpenShiftCheck, OpenShiftCheckException +from openshift_checks import OpenShiftCheck from openshift_checks.mixins import NotContainerizedMixin @@ -16,10 +16,12 @@ class OvsVersion(NotContainerizedMixin, OpenShiftCheck): tags = ["health"] openshift_to_ovs_version = { - "3.7": ["2.6", "2.7", "2.8"], - "3.6": ["2.6", "2.7", "2.8"], - "3.5": ["2.6", "2.7"], - "3.4": "2.4", + (3, 4): "2.4", + (3, 5): ["2.6", "2.7"], + (3, 6): ["2.6", "2.7", "2.8"], + (3, 7): ["2.6", "2.7", "2.8"], + (3, 8): ["2.6", "2.7", "2.8"], + (3, 9): ["2.6", "2.7", "2.8"], } def is_active(self): @@ -40,16 +42,5 @@ class OvsVersion(NotContainerizedMixin, OpenShiftCheck): return self.execute_module("rpm_version", args) def get_required_ovs_version(self): - """Return the correct Open vSwitch version for the current OpenShift version""" - openshift_version_tuple = self.get_major_minor_version(self.get_var("openshift_image_tag")) - - if openshift_version_tuple < (3, 5): - return self.openshift_to_ovs_version["3.4"] - - openshift_version = ".".join(str(x) for x in openshift_version_tuple) - ovs_version = self.openshift_to_ovs_version.get(openshift_version) - if ovs_version: - return self.openshift_to_ovs_version[openshift_version] - - msg = "There is no recommended version of Open vSwitch for the current version of OpenShift: {}" - raise OpenShiftCheckException(msg.format(openshift_version)) + """Return the correct Open vSwitch version(s) for the current OpenShift version.""" + return self.get_required_version("Open vSwitch", self.openshift_to_ovs_version) diff --git a/roles/openshift_health_checker/openshift_checks/package_version.py b/roles/openshift_health_checker/openshift_checks/package_version.py index f3a628e28..28aee8b35 100644 --- a/roles/openshift_health_checker/openshift_checks/package_version.py +++ b/roles/openshift_health_checker/openshift_checks/package_version.py @@ -1,8 +1,6 @@ """Check that available RPM packages match the required versions.""" -import re - -from openshift_checks import OpenShiftCheck, OpenShiftCheckException +from openshift_checks import OpenShiftCheck from openshift_checks.mixins import NotContainerizedMixin @@ -18,6 +16,8 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck): (3, 5): ["2.6", "2.7"], (3, 6): ["2.6", "2.7", "2.8"], (3, 7): ["2.6", "2.7", "2.8"], + (3, 8): ["2.6", "2.7", "2.8"], + (3, 9): ["2.6", "2.7", "2.8"], } openshift_to_docker_version = { @@ -27,11 +27,9 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck): (3, 4): "1.12", (3, 5): "1.12", (3, 6): "1.12", - } - - # map major OpenShift release versions across releases to a common major version - map_major_release_version = { - 1: 3, + (3, 7): "1.12", + (3, 8): "1.12", + (3, 9): ["1.12", "1.13"], } def is_active(self): @@ -83,48 +81,8 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck): def get_required_ovs_version(self): """Return the correct Open vSwitch version(s) for the current OpenShift version.""" - openshift_version = self.get_openshift_version_tuple() - - earliest = min(self.openshift_to_ovs_version) - latest = max(self.openshift_to_ovs_version) - if openshift_version < earliest: - return self.openshift_to_ovs_version[earliest] - if openshift_version > latest: - return self.openshift_to_ovs_version[latest] - - ovs_version = self.openshift_to_ovs_version.get(openshift_version) - if not ovs_version: - msg = "There is no recommended version of Open vSwitch for the current version of OpenShift: {}" - raise OpenShiftCheckException(msg.format(".".join(str(comp) for comp in openshift_version))) - - return ovs_version + return self.get_required_version("Open vSwitch", self.openshift_to_ovs_version) def get_required_docker_version(self): """Return the correct Docker version(s) for the current OpenShift version.""" - openshift_version = self.get_openshift_version_tuple() - - earliest = min(self.openshift_to_docker_version) - latest = max(self.openshift_to_docker_version) - if openshift_version < earliest: - return self.openshift_to_docker_version[earliest] - if openshift_version > latest: - return self.openshift_to_docker_version[latest] - - docker_version = self.openshift_to_docker_version.get(openshift_version) - if not docker_version: - msg = "There is no recommended version of Docker for the current version of OpenShift: {}" - raise OpenShiftCheckException(msg.format(".".join(str(comp) for comp in openshift_version))) - - return docker_version - - def get_openshift_version_tuple(self): - """Return received image tag as a normalized (X, Y) minor version tuple.""" - version = self.get_var("openshift_image_tag") - comps = [int(component) for component in re.findall(r'\d+', version)] - - if len(comps) < 2: - msg = "An invalid version of OpenShift was found for this host: {}" - raise OpenShiftCheckException(msg.format(version)) - - comps[0] = self.map_major_release_version.get(comps[0], comps[0]) - return tuple(comps[0:2]) + return self.get_required_version("Docker", self.openshift_to_docker_version) diff --git a/roles/openshift_health_checker/test/docker_image_availability_test.py b/roles/openshift_health_checker/test/docker_image_availability_test.py index fc333dfd4..d31f263dd 100644 --- a/roles/openshift_health_checker/test/docker_image_availability_test.py +++ b/roles/openshift_health_checker/test/docker_image_availability_test.py @@ -6,13 +6,8 @@ from openshift_checks.docker_image_availability import DockerImageAvailability, @pytest.fixture() def task_vars(): return dict( - openshift=dict( - common=dict( - is_containerized=False, - is_atomic=False, - ), - docker=dict(), - ), + openshift_is_atomic=False, + openshift_is_containerized=False, openshift_service_type='origin', openshift_deployment_type='origin', openshift_image_tag='', @@ -20,7 +15,7 @@ def task_vars(): ) -@pytest.mark.parametrize('deployment_type, is_containerized, group_names, expect_active', [ +@pytest.mark.parametrize('deployment_type, openshift_is_containerized, group_names, expect_active', [ ("invalid", True, [], False), ("", True, [], False), ("origin", False, [], False), @@ -30,20 +25,20 @@ def task_vars(): ("origin", True, ["nfs"], False), ("openshift-enterprise", True, ["lb"], False), ]) -def test_is_active(task_vars, deployment_type, is_containerized, group_names, expect_active): +def test_is_active(task_vars, deployment_type, openshift_is_containerized, group_names, expect_active): task_vars['openshift_deployment_type'] = deployment_type - task_vars['openshift']['common']['is_containerized'] = is_containerized + task_vars['openshift_is_containerized'] = openshift_is_containerized task_vars['group_names'] = group_names assert DockerImageAvailability(None, task_vars).is_active() == expect_active -@pytest.mark.parametrize("is_containerized,is_atomic", [ +@pytest.mark.parametrize("openshift_is_containerized,openshift_is_atomic", [ (True, True), (False, False), (True, False), (False, True), ]) -def test_all_images_available_locally(task_vars, is_containerized, is_atomic): +def test_all_images_available_locally(task_vars, openshift_is_containerized, openshift_is_atomic): def execute_module(module_name, module_args, *_): if module_name == "yum": return {} @@ -55,8 +50,8 @@ def test_all_images_available_locally(task_vars, is_containerized, is_atomic): 'images': [module_args['name']], } - task_vars['openshift']['common']['is_containerized'] = is_containerized - task_vars['openshift']['common']['is_atomic'] = is_atomic + task_vars['openshift_is_containerized'] = openshift_is_containerized + task_vars['openshift_is_atomic'] = openshift_is_atomic result = DockerImageAvailability(execute_module, task_vars).run() assert not result.get('failed', False) @@ -172,7 +167,7 @@ def test_registry_availability(image, registries, connection_test_failed, skopeo assert expect_registries_reached == check.reachable_registries -@pytest.mark.parametrize("deployment_type, is_containerized, groups, oreg_url, expected", [ +@pytest.mark.parametrize("deployment_type, openshift_is_containerized, groups, oreg_url, expected", [ ( # standard set of stuff required on nodes "origin", False, ['oo_nodes_to_config'], "", set([ @@ -232,14 +227,10 @@ def test_registry_availability(image, registries, connection_test_failed, skopeo ), ]) -def test_required_images(deployment_type, is_containerized, groups, oreg_url, expected): +def test_required_images(deployment_type, openshift_is_containerized, groups, oreg_url, expected): task_vars = dict( - openshift=dict( - common=dict( - is_containerized=is_containerized, - is_atomic=False, - ), - ), + openshift_is_containerized=openshift_is_containerized, + openshift_is_atomic=False, openshift_deployment_type=deployment_type, group_names=groups, oreg_url=oreg_url, @@ -285,15 +276,40 @@ def test_registry_console_image(task_vars, expected): assert expected == DockerImageAvailability(task_vars=task_vars)._registry_console_image(tag, info) -def test_containerized_etcd(): - task_vars = dict( - openshift=dict( - common=dict( - is_containerized=True, - ), +@pytest.mark.parametrize("task_vars, expected", [ + ( + dict( + group_names=['oo_nodes_to_config'], + osn_ovs_image='spam/ovs', + openshift_image_tag="veggs", + ), + set([ + 'spam/ovs', 'openshift/node:veggs', 'cockpit/kubernetes:latest', + 'openshift/origin-haproxy-router:veggs', 'openshift/origin-deployer:veggs', + 'openshift/origin-docker-registry:veggs', 'openshift/origin-pod:veggs', + ]), + ), ( + dict( + group_names=['oo_masters_to_config'], ), + set(['openshift/origin:latest']), + ), ( + dict( + group_names=['oo_etcd_to_config'], + ), + set(['registry.access.redhat.com/rhel7/etcd']), + ), ( + dict( + group_names=['oo_etcd_to_config'], + osm_etcd_image='spam/etcd', + ), + set(['spam/etcd']), + ), +]) +def test_containerized(task_vars, expected): + task_vars.update(dict( + openshift_is_containerized=True, openshift_deployment_type="origin", - group_names=['oo_etcd_to_config'], - ) - expected = set(['registry.access.redhat.com/rhel7/etcd']) + )) + assert expected == DockerImageAvailability(task_vars=task_vars).required_images() diff --git a/roles/openshift_health_checker/test/docker_storage_test.py b/roles/openshift_health_checker/test/docker_storage_test.py index 8fa68c378..33a5dd90a 100644 --- a/roles/openshift_health_checker/test/docker_storage_test.py +++ b/roles/openshift_health_checker/test/docker_storage_test.py @@ -4,21 +4,21 @@ from openshift_checks import OpenShiftCheckException from openshift_checks.docker_storage import DockerStorage -@pytest.mark.parametrize('is_containerized, group_names, is_active', [ +@pytest.mark.parametrize('openshift_is_containerized, group_names, is_active', [ (False, ["oo_masters_to_config", "oo_etcd_to_config"], False), (False, ["oo_masters_to_config", "oo_nodes_to_config"], True), (True, ["oo_etcd_to_config"], True), ]) -def test_is_active(is_containerized, group_names, is_active): +def test_is_active(openshift_is_containerized, group_names, is_active): task_vars = dict( - openshift=dict(common=dict(is_containerized=is_containerized)), + openshift_is_containerized=openshift_is_containerized, group_names=group_names, ) assert DockerStorage(None, task_vars).is_active() == is_active def non_atomic_task_vars(): - return {"openshift": {"common": {"is_atomic": False}}} + return {"openshift_is_atomic": False} @pytest.mark.parametrize('docker_info, failed, expect_msg', [ diff --git a/roles/openshift_health_checker/test/etcd_traffic_test.py b/roles/openshift_health_checker/test/etcd_traffic_test.py index a29dc166b..583c4c8dd 100644 --- a/roles/openshift_health_checker/test/etcd_traffic_test.py +++ b/roles/openshift_health_checker/test/etcd_traffic_test.py @@ -36,9 +36,7 @@ def test_log_matches_high_traffic_msg(group_names, matched, failed, extra_words) task_vars = dict( group_names=group_names, - openshift=dict( - common=dict(is_containerized=False), - ), + openshift_is_containerized=False, openshift_service_type="origin" ) @@ -50,15 +48,13 @@ def test_log_matches_high_traffic_msg(group_names, matched, failed, extra_words) assert result.get("failed", False) == failed -@pytest.mark.parametrize('is_containerized,expected_unit_value', [ +@pytest.mark.parametrize('openshift_is_containerized,expected_unit_value', [ (False, "etcd"), (True, "etcd_container"), ]) -def test_systemd_unit_matches_deployment_type(is_containerized, expected_unit_value): +def test_systemd_unit_matches_deployment_type(openshift_is_containerized, expected_unit_value): task_vars = dict( - openshift=dict( - common=dict(is_containerized=is_containerized), - ) + openshift_is_containerized=openshift_is_containerized ) def execute_module(module_name, args, *_): diff --git a/roles/openshift_health_checker/test/kibana_test.py b/roles/openshift_health_checker/test/kibana_test.py index 04a5e89c4..750d4b9e9 100644 --- a/roles/openshift_health_checker/test/kibana_test.py +++ b/roles/openshift_health_checker/test/kibana_test.py @@ -1,12 +1,10 @@ import pytest import json -try: - import urllib2 - from urllib2 import HTTPError, URLError -except ImportError: - from urllib.error import HTTPError, URLError - import urllib.request as urllib2 +# pylint can't find the package when its installed in virtualenv +from ansible.module_utils.six.moves.urllib import request # pylint: disable=import-error +# pylint: disable=import-error +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError from openshift_checks.logging.kibana import Kibana, OpenShiftCheckException @@ -202,7 +200,7 @@ def test_verify_url_external_failure(lib_result, expect, monkeypatch): if type(lib_result) is int: return _http_return(lib_result) raise lib_result - monkeypatch.setattr(urllib2, 'urlopen', urlopen) + monkeypatch.setattr(request, 'urlopen', urlopen) check = Kibana() check._get_kibana_url = lambda: 'url' diff --git a/roles/openshift_health_checker/test/mixins_test.py b/roles/openshift_health_checker/test/mixins_test.py index b1a41ca3c..b5d6f2e95 100644 --- a/roles/openshift_health_checker/test/mixins_test.py +++ b/roles/openshift_health_checker/test/mixins_test.py @@ -10,8 +10,8 @@ class NotContainerizedCheck(NotContainerizedMixin, OpenShiftCheck): @pytest.mark.parametrize('task_vars,expected', [ - (dict(openshift=dict(common=dict(is_containerized=False))), True), - (dict(openshift=dict(common=dict(is_containerized=True))), False), + (dict(openshift_is_containerized=False), True), + (dict(openshift_is_containerized=True), False), ]) def test_is_active(task_vars, expected): assert NotContainerizedCheck(None, task_vars).is_active() == expected @@ -20,4 +20,4 @@ def test_is_active(task_vars, expected): def test_is_active_missing_task_vars(): with pytest.raises(OpenShiftCheckException) as excinfo: NotContainerizedCheck().is_active() - assert 'is_containerized' in str(excinfo.value) + assert 'openshift_is_containerized' in str(excinfo.value) diff --git a/roles/openshift_health_checker/test/ovs_version_test.py b/roles/openshift_health_checker/test/ovs_version_test.py index dd98ff4d8..80c7a0541 100644 --- a/roles/openshift_health_checker/test/ovs_version_test.py +++ b/roles/openshift_health_checker/test/ovs_version_test.py @@ -1,26 +1,7 @@ import pytest -from openshift_checks.ovs_version import OvsVersion, OpenShiftCheckException - - -def test_openshift_version_not_supported(): - def execute_module(*_): - return {} - - openshift_release = '111.7.0' - - task_vars = dict( - openshift=dict(common=dict()), - openshift_release=openshift_release, - openshift_image_tag='v' + openshift_release, - openshift_deployment_type='origin', - openshift_service_type='origin' - ) - - with pytest.raises(OpenShiftCheckException) as excinfo: - OvsVersion(execute_module, task_vars).run() - - assert "no recommended version of Open vSwitch" in str(excinfo.value) +from openshift_checks.ovs_version import OvsVersion +from openshift_checks import OpenShiftCheckException def test_invalid_openshift_release_format(): @@ -70,7 +51,7 @@ def test_ovs_package_version(openshift_release, expected_ovs_version): assert result is return_value -@pytest.mark.parametrize('group_names,is_containerized,is_active', [ +@pytest.mark.parametrize('group_names,openshift_is_containerized,is_active', [ (['oo_masters_to_config'], False, True), # ensure check is skipped on containerized installs (['oo_masters_to_config'], True, False), @@ -82,9 +63,9 @@ def test_ovs_package_version(openshift_release, expected_ovs_version): (['lb'], False, False), (['nfs'], False, False), ]) -def test_ovs_version_skip_when_not_master_nor_node(group_names, is_containerized, is_active): +def test_ovs_version_skip_when_not_master_nor_node(group_names, openshift_is_containerized, is_active): task_vars = dict( group_names=group_names, - openshift=dict(common=dict(is_containerized=is_containerized)), + openshift_is_containerized=openshift_is_containerized, ) assert OvsVersion(None, task_vars).is_active() == is_active diff --git a/roles/openshift_health_checker/test/package_availability_test.py b/roles/openshift_health_checker/test/package_availability_test.py index a1e6e0879..52740093d 100644 --- a/roles/openshift_health_checker/test/package_availability_test.py +++ b/roles/openshift_health_checker/test/package_availability_test.py @@ -3,16 +3,16 @@ import pytest from openshift_checks.package_availability import PackageAvailability -@pytest.mark.parametrize('pkg_mgr,is_containerized,is_active', [ +@pytest.mark.parametrize('pkg_mgr,openshift_is_containerized,is_active', [ ('yum', False, True), ('yum', True, False), ('dnf', True, False), ('dnf', False, False), ]) -def test_is_active(pkg_mgr, is_containerized, is_active): +def test_is_active(pkg_mgr, openshift_is_containerized, is_active): task_vars = dict( ansible_pkg_mgr=pkg_mgr, - openshift=dict(common=dict(is_containerized=is_containerized)), + openshift_is_containerized=openshift_is_containerized, ) assert PackageAvailability(None, task_vars).is_active() == is_active diff --git a/roles/openshift_health_checker/test/package_version_test.py b/roles/openshift_health_checker/test/package_version_test.py index ea8e02b97..868b4bd12 100644 --- a/roles/openshift_health_checker/test/package_version_test.py +++ b/roles/openshift_health_checker/test/package_version_test.py @@ -1,6 +1,7 @@ import pytest -from openshift_checks.package_version import PackageVersion, OpenShiftCheckException +from openshift_checks.package_version import PackageVersion +from openshift_checks import OpenShiftCheckException def task_vars_for(openshift_release, deployment_type): @@ -18,7 +19,7 @@ def task_vars_for(openshift_release, deployment_type): def test_openshift_version_not_supported(): check = PackageVersion(None, task_vars_for("1.2.3", 'origin')) - check.get_openshift_version_tuple = lambda: (3, 4, 1) # won't be in the dict + check.get_major_minor_version = lambda: (3, 4, 1) # won't be in the dict with pytest.raises(OpenShiftCheckException) as excinfo: check.get_required_ovs_version() @@ -99,7 +100,7 @@ def test_docker_package_version(deployment_type, openshift_release, expected_doc assert result == return_value -@pytest.mark.parametrize('group_names,is_containerized,is_active', [ +@pytest.mark.parametrize('group_names,openshift_is_containerized,is_active', [ (['oo_masters_to_config'], False, True), # ensure check is skipped on containerized installs (['oo_masters_to_config'], True, False), @@ -111,9 +112,9 @@ def test_docker_package_version(deployment_type, openshift_release, expected_doc (['lb'], False, False), (['nfs'], False, False), ]) -def test_package_version_skip_when_not_master_nor_node(group_names, is_containerized, is_active): +def test_package_version_skip_when_not_master_nor_node(group_names, openshift_is_containerized, is_active): task_vars = dict( group_names=group_names, - openshift=dict(common=dict(is_containerized=is_containerized)), + openshift_is_containerized=openshift_is_containerized, ) assert PackageVersion(None, task_vars).is_active() == is_active diff --git a/roles/openshift_hosted/README.md b/roles/openshift_hosted/README.md index a1c2c3956..3cf5d97c5 100644 --- a/roles/openshift_hosted/README.md +++ b/roles/openshift_hosted/README.md @@ -43,9 +43,9 @@ variables also control configuration behavior: **NOTE:** Configuring a value for `openshift_hosted_registry_storage_glusterfs_ips` with a `glusterfs_registry` -host group is not allowed. Specifying a `glusterfs_registry` host group -indicates that a new GlusterFS cluster should be configured, whereas -specifying `openshift_hosted_registry_storage_glusterfs_ips` indicates wanting +host group is not allowed. Specifying a `glusterfs_registry` host group +indicates that a new GlusterFS cluster should be configured, whereas +specifying `openshift_hosted_registry_storage_glusterfs_ips` indicates wanting to use a pre-configured GlusterFS cluster for the registry storage. _ @@ -53,7 +53,6 @@ _ Dependencies ------------ -* openshift_hosted_facts * openshift_persistent_volumes Example Playbook diff --git a/roles/openshift_hosted/defaults/main.yml b/roles/openshift_hosted/defaults/main.yml index b6501d288..610de4f91 100644 --- a/roles/openshift_hosted/defaults/main.yml +++ b/roles/openshift_hosted/defaults/main.yml @@ -69,7 +69,7 @@ r_openshift_hosted_router_os_firewall_allow: [] ############ openshift_hosted_registry_selector: "{{ openshift_registry_selector | default(openshift_hosted_infra_selector) }}" -penshift_hosted_registry_registryurl: "{{ openshift_hosted_images_dict[openshift_deployment_type] }}" +openshift_hosted_registry_registryurl: "{{ openshift_hosted_images_dict[openshift_deployment_type] }}" openshift_hosted_registry_routecertificates: {} openshift_hosted_registry_routetermination: "passthrough" @@ -109,3 +109,5 @@ openshift_push_via_dns: False # NOTE: settting openshift_docker_hosted_registry_insecure may affect other roles openshift_hosted_docker_registry_insecure_default: "{{ openshift_docker_hosted_registry_insecure | default(False) }}" openshift_hosted_docker_registry_insecure: "{{ openshift_hosted_docker_registry_insecure_default }}" + +openshift_hosted_registry_storage_azure_blob_realm: core.windows.net diff --git a/roles/openshift_hosted/meta/main.yml b/roles/openshift_hosted/meta/main.yml index 1d70ef7eb..ace2d15b0 100644 --- a/roles/openshift_hosted/meta/main.yml +++ b/roles/openshift_hosted/meta/main.yml @@ -12,6 +12,6 @@ galaxy_info: categories: - cloud dependencies: -- role: openshift_hosted_facts +- role: openshift_facts - role: lib_openshift -- role: lib_os_firewall +- role: lib_utils diff --git a/roles/openshift_hosted/tasks/main.yml b/roles/openshift_hosted/tasks/main.yml index d306adf42..57f59f872 100644 --- a/roles/openshift_hosted/tasks/main.yml +++ b/roles/openshift_hosted/tasks/main.yml @@ -1,6 +1,6 @@ --- -# This role is intended to be used with include_role. -# include_role: +# This role is intended to be used with import_role. +# import_role: # name: openshift_hosted # tasks_from: "{{ item }}" # with_items: diff --git a/roles/openshift_hosted/tasks/registry.yml b/roles/openshift_hosted/tasks/registry.yml index 429f0c514..bc4d81eb7 100644 --- a/roles/openshift_hosted/tasks/registry.yml +++ b/roles/openshift_hosted/tasks/registry.yml @@ -1,10 +1,4 @@ --- -- name: Create temp directory for doing work in - command: mktemp -d /tmp/openshift-hosted-ansible-XXXXXX - register: mktempHosted - changed_when: False - check_mode: no - - name: setup firewall import_tasks: firewall.yml vars: @@ -49,7 +43,7 @@ - name: Update registry environment variables when pushing via dns set_fact: - openshift_hosted_registry_env_vars: "{{ openshift_hosted_registry_env_vars | combine({'OPENSHIFT_DEFAULT_REGISTRY':'docker-registry.default.svc:5000'}) }}" + openshift_hosted_registry_env_vars: "{{ openshift_hosted_registry_env_vars | combine({'REGISTRY_OPENSHIFT_SERVER_ADDR':'docker-registry.default.svc:5000'}) }}" when: openshift_push_via_dns | bool - name: Update registry proxy settings for dc/docker-registry @@ -132,25 +126,10 @@ edits: "{{ openshift_hosted_registry_edits }}" force: "{{ True|bool in openshift_hosted_registry_force }}" +# TODO(michaelgugino) remove this set fact. It is currently necessary due to +# custom module not properly templating variables. - name: setup registry list set_fact: r_openshift_hosted_registry_list: - name: "{{ openshift_hosted_registry_name }}" namespace: "{{ openshift_hosted_registry_namespace }}" - -- name: Wait for pod (Registry) - include_tasks: wait_for_pod.yml - vars: - l_openshift_hosted_wait_for_pod: "{{ openshift_hosted_registry_wait }}" - l_openshift_hosted_wfp_items: "{{ r_openshift_hosted_registry_list }}" - -- include_tasks: storage/glusterfs.yml - when: - - openshift_hosted_registry_storage_kind | default(none) == 'glusterfs' or openshift_hosted_registry_storage_glusterfs_swap - -- name: Delete temp directory - file: - name: "{{ mktempHosted.stdout }}" - state: absent - changed_when: False - check_mode: no diff --git a/roles/openshift_hosted/tasks/registry_storage.yml b/roles/openshift_hosted/tasks/registry_storage.yml new file mode 100644 index 000000000..aa66a7867 --- /dev/null +++ b/roles/openshift_hosted/tasks/registry_storage.yml @@ -0,0 +1,4 @@ +--- +- include_tasks: storage/glusterfs.yml + when: + - openshift_hosted_registry_storage_kind | default(none) == 'glusterfs' or openshift_hosted_registry_storage_glusterfs_swap diff --git a/roles/openshift_hosted/tasks/router.yml b/roles/openshift_hosted/tasks/router.yml index 4e9219477..c2be00d19 100644 --- a/roles/openshift_hosted/tasks/router.yml +++ b/roles/openshift_hosted/tasks/router.yml @@ -18,6 +18,7 @@ - name: set_fact replicas set_fact: + # get_router_replicas is a custom filter in role lib_utils replicas: "{{ openshift_hosted_router_replicas | default(None) | get_router_replicas(router_nodes) }}" - name: Get the certificate contents for router @@ -25,10 +26,10 @@ backup: True dest: "/etc/origin/master/{{ item | basename }}" src: "{{ item }}" - with_items: "{{ openshift_hosted_routers | oo_collect(attribute='certificate') | - oo_select_keys_from_list(['keyfile', 'certfile', 'cafile']) }}" + with_items: "{{ openshift_hosted_routers | lib_utils_oo_collect(attribute='certificate') | + lib_utils_oo_select_keys_from_list(['keyfile', 'certfile', 'cafile']) }}" when: ( not openshift_hosted_router_create_certificate | bool ) or openshift_hosted_router_certificate != {} or - ( openshift_hosted_routers | oo_collect(attribute='certificate') | oo_select_keys_from_list(['keyfile', 'certfile', 'cafile'])|length > 0 ) + ( openshift_hosted_routers | lib_utils_oo_collect(attribute='certificate') | lib_utils_oo_select_keys_from_list(['keyfile', 'certfile', 'cafile'])|length > 0 ) # This is for when we desire a cluster signed cert @@ -55,7 +56,7 @@ when: - openshift_hosted_router_create_certificate | bool - openshift_hosted_router_certificate == {} - - openshift_hosted_routers | oo_collect(attribute='certificate') | oo_select_keys_from_list(['keyfile', 'certfile', 'cafile'])|length == 0 + - openshift_hosted_routers | lib_utils_oo_collect(attribute='certificate') | lib_utils_oo_select_keys_from_list(['keyfile', 'certfile', 'cafile'])|length == 0 - name: Create the router service account(s) oc_serviceaccount: @@ -98,9 +99,3 @@ ports: "{{ item.ports }}" stats_port: "{{ item.stats_port }}" with_items: "{{ openshift_hosted_routers }}" - -- name: Wait for pod (Routers) - include_tasks: wait_for_pod.yml - vars: - l_openshift_hosted_wait_for_pod: "{{ openshift_hosted_router_wait }}" - l_openshift_hosted_wfp_items: "{{ openshift_hosted_routers }}" diff --git a/roles/openshift_hosted/tasks/storage/glusterfs.yml b/roles/openshift_hosted/tasks/storage/glusterfs.yml index 18b2edcc6..b39c44b01 100644 --- a/roles/openshift_hosted/tasks/storage/glusterfs.yml +++ b/roles/openshift_hosted/tasks/storage/glusterfs.yml @@ -17,7 +17,7 @@ until: - "registry_pods.results.results[0]['items'] | count > 0" # There must be as many matching pods with 'Ready' status True as there are expected replicas - - "registry_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == openshift_hosted_registry_replicas | default(l_default_replicas) | int" + - "registry_pods.results.results[0]['items'] | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == openshift_hosted_registry_replicas | default(l_default_replicas) | int" delay: 10 retries: "{{ (600 / 10) | int }}" diff --git a/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml b/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml index bd7181c17..fef945d51 100644 --- a/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml +++ b/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml @@ -1,4 +1,10 @@ --- +- name: Create temp directory for doing work in + command: mktemp -d /tmp/openshift-hosted-ansible-XXXXXX + register: mktempHosted + changed_when: False + check_mode: no + - name: Generate GlusterFS registry endpoints template: src: "{{ openshift.common.examples_content_version }}/glusterfs-registry-endpoints.yml.j2" @@ -10,7 +16,14 @@ dest: "{{ mktempHosted.stdout }}/glusterfs-registry-service.yml" - name: Create GlusterFS registry service and endpoint - command: "{{ openshift.common.client_binary }} apply -f {{ item }} -n {{ openshift_hosted_registry_namespace | default('default') }}" + command: "{{ openshift_client_binary }} apply -f {{ item }} -n {{ openshift_hosted_registry_namespace | default('default') }}" with_items: - "{{ mktempHosted.stdout }}/glusterfs-registry-service.yml" - "{{ mktempHosted.stdout }}/glusterfs-registry-endpoints.yml" + +- name: Delete temp directory + file: + name: "{{ mktempHosted.stdout }}" + state: absent + changed_when: False + check_mode: no diff --git a/roles/openshift_hosted/tasks/storage/registry_config.j2 b/roles/openshift_hosted/tasks/storage/registry_config.j2 deleted file mode 120000 index f3e82ad4f..000000000 --- a/roles/openshift_hosted/tasks/storage/registry_config.j2 +++ /dev/null @@ -1 +0,0 @@ -../../../templates/registry_config.j2
\ No newline at end of file diff --git a/roles/openshift_hosted/tasks/wait_for_pod.yml b/roles/openshift_hosted/tasks/wait_for_pod.yml index 056c79334..a14b0febc 100644 --- a/roles/openshift_hosted/tasks/wait_for_pod.yml +++ b/roles/openshift_hosted/tasks/wait_for_pod.yml @@ -3,17 +3,17 @@ block: - name: Ensure OpenShift pod correctly rolls out (best-effort today) command: | - {{ openshift.common.client_binary }} rollout status deploymentconfig {{ item.name }} \ + {{ openshift_client_binary }} rollout status deploymentconfig {{ item.name }} \ --namespace {{ item.namespace | default('default') }} \ --config {{ openshift_master_config_dir }}/admin.kubeconfig async: 600 - poll: 15 + poll: 5 with_items: "{{ l_openshift_hosted_wfp_items }}" failed_when: false - name: Determine the latest version of the OpenShift pod deployment command: | - {{ openshift.common.client_binary }} get deploymentconfig {{ item.name }} \ + {{ openshift_client_binary }} get deploymentconfig {{ item.name }} \ --namespace {{ item.namespace }} \ --config {{ openshift_master_config_dir }}/admin.kubeconfig \ -o jsonpath='{ .status.latestVersion }' @@ -22,14 +22,14 @@ - name: Poll for OpenShift pod deployment success command: | - {{ openshift.common.client_binary }} get replicationcontroller {{ item.0.name }}-{{ item.1.stdout }} \ + {{ openshift_client_binary }} get replicationcontroller {{ item.0.name }}-{{ item.1.stdout }} \ --namespace {{ item.0.namespace }} \ --config {{ openshift_master_config_dir }}/admin.kubeconfig \ -o jsonpath='{ .metadata.annotations.openshift\.io/deployment\.phase }' register: openshift_hosted_wfp_rc_phase until: "'Running' not in openshift_hosted_wfp_rc_phase.stdout" - delay: 15 - retries: 40 + delay: 5 + retries: 60 failed_when: "'Failed' in openshift_hosted_wfp_rc_phase.stdout" with_together: - "{{ l_openshift_hosted_wfp_items }}" diff --git a/roles/openshift_hosted_facts/tasks/main.yml b/roles/openshift_hosted_facts/tasks/main.yml deleted file mode 100644 index ed97d539c..000000000 --- a/roles/openshift_hosted_facts/tasks/main.yml +++ /dev/null @@ -1 +0,0 @@ ---- diff --git a/roles/openshift_hosted_templates/defaults/main.yml b/roles/openshift_hosted_templates/defaults/main.yml index f4fd15089..48d62c8df 100644 --- a/roles/openshift_hosted_templates/defaults/main.yml +++ b/roles/openshift_hosted_templates/defaults/main.yml @@ -1,5 +1,5 @@ --- -hosted_base: "{{ openshift.common.config_base if openshift.common.is_containerized | bool else '/usr/share/openshift' }}/hosted" +hosted_base: "{{ openshift.common.config_base if openshift_is_containerized | bool else '/usr/share/openshift' }}/hosted" hosted_deployment_type: "{{ 'origin' if openshift_deployment_type == 'origin' else 'enterprise' }}" content_version: "{{ openshift.common.examples_content_version }}" diff --git a/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml index cc3159a32..0786e2d2f 100644 --- a/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml @@ -102,7 +102,7 @@ objects: parameters: - description: 'Specify "registry/repository" prefix for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", set prefix "registry.access.redhat.com/openshift3/"' name: IMAGE_PREFIX - value: "openshift3/" + value: "registry.access.redhat.com/openshift3/" - description: 'Specify component name for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", use base name "registry-console"' name: IMAGE_BASENAME value: "registry-console" diff --git a/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml index 9f2e6125d..ccea54aaf 100644 --- a/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml @@ -102,7 +102,7 @@ objects: parameters: - description: 'Specify "registry/repository" prefix for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", set prefix "registry.access.redhat.com/openshift3/"' name: IMAGE_PREFIX - value: "openshift3/" + value: "registry.access.redhat.com/openshift3/" - description: 'Specify component name for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", use base name "registry-console"' name: IMAGE_BASENAME value: "registry-console" diff --git a/roles/openshift_hosted_templates/files/v3.8/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.8/enterprise/registry-console.yaml index f04ce06d3..15ad4e9af 100644 --- a/roles/openshift_hosted_templates/files/v3.8/enterprise/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v3.8/enterprise/registry-console.yaml @@ -102,7 +102,7 @@ objects: parameters: - description: 'Specify "registry/repository" prefix for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", set prefix "registry.access.redhat.com/openshift3/"' name: IMAGE_PREFIX - value: "openshift3/" + value: "registry.access.redhat.com/openshift3/" - description: 'Specify component name for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", use base name "registry-console"' name: IMAGE_BASENAME value: "registry-console" diff --git a/roles/openshift_hosted_templates/files/v3.9/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.9/enterprise/registry-console.yaml index c178cf432..7acefa0f0 100644 --- a/roles/openshift_hosted_templates/files/v3.9/enterprise/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v3.9/enterprise/registry-console.yaml @@ -102,7 +102,7 @@ objects: parameters: - description: 'Specify "registry/repository" prefix for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", set prefix "registry.access.redhat.com/openshift3/"' name: IMAGE_PREFIX - value: "openshift3/" + value: "registry.access.redhat.com/openshift3/" - description: 'Specify component name for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", use base name "registry-console"' name: IMAGE_BASENAME value: "registry-console" diff --git a/roles/openshift_hosted_templates/meta/main.yml b/roles/openshift_hosted_templates/meta/main.yml index 4027f524b..d7cc1e288 100644 --- a/roles/openshift_hosted_templates/meta/main.yml +++ b/roles/openshift_hosted_templates/meta/main.yml @@ -11,4 +11,6 @@ galaxy_info: - 7 categories: - cloud -dependencies: [] +dependencies: +- role: lib_utils +- role: openshift_facts diff --git a/roles/openshift_hosted_templates/tasks/main.yml b/roles/openshift_hosted_templates/tasks/main.yml index 89b92dfcc..34d39f3a5 100644 --- a/roles/openshift_hosted_templates/tasks/main.yml +++ b/roles/openshift_hosted_templates/tasks/main.yml @@ -1,20 +1,25 @@ --- - name: Create local temp dir for OpenShift hosted templates copy local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX - become: False register: copy_hosted_templates_mktemp run_once: True # AUDIT:changed_when: not set here because this task actually # creates something +- name: Chmod local temp dir for OpenShift examples copy + local_action: command chmod 777 "{{ copy_hosted_templates_mktemp.stdout }}" + run_once: True + - name: Create tar of OpenShift examples local_action: command tar -C "{{ role_path }}/files/{{ content_version }}/{{ hosted_deployment_type }}" -cvf "{{ copy_hosted_templates_mktemp.stdout }}/openshift-hosted-templates.tar" . args: # Disables the following warning: # Consider using unarchive module rather than running tar warn: no - become: False - register: copy_hosted_templates_tar + +- name: Chmod local tar of OpenShift examples + local_action: command chmod 744 "{{ copy_hosted_templates_mktemp.stdout }}/openshift-hosted-templates.tar" + run_once: True - name: Create remote OpenShift hosted templates directory file: @@ -28,7 +33,6 @@ dest: "{{ hosted_base }}/" - name: Cleanup the OpenShift hosted templates temp dir - become: False local_action: file dest="{{ copy_hosted_templates_mktemp.stdout }}" state=absent - name: Modify registry paths if registry_url is not registry.access.redhat.com @@ -52,7 +56,7 @@ - name: Create or update hosted templates command: > - {{ openshift.common.client_binary }} {{ openshift_hosted_templates_import_command }} + {{ openshift_client_binary }} {{ openshift_hosted_templates_import_command }} -f {{ hosted_base }} --config={{ openshift_hosted_templates_kubeconfig }} -n openshift diff --git a/roles/openshift_loadbalancer/defaults/main.yml b/roles/openshift_loadbalancer/defaults/main.yml index f9c16ba40..d8c45fb33 100644 --- a/roles/openshift_loadbalancer/defaults/main.yml +++ b/roles/openshift_loadbalancer/defaults/main.yml @@ -2,6 +2,12 @@ r_openshift_loadbalancer_firewall_enabled: "{{ os_firewall_enabled | default(True) }}" r_openshift_loadbalancer_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}" +openshift_router_image_default_dict: + origin: 'openshift/origin-haproxy-router' + openshift-enterprise: 'openshift3/ose-haproxy-router' +openshift_router_image_default: "{{ openshift_router_image_default_dict[openshift_deployment_type] }}" +openshift_router_image: "{{ openshift_router_image_default }}" + haproxy_frontends: - name: main binds: @@ -26,7 +32,7 @@ r_openshift_loadbalancer_os_firewall_allow: port: "{{ nuage_mon_rest_server_port | default(9443) }}/tcp" cond: "{{ r_openshift_lb_use_nuage | bool }}" -openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}" +openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False) | bool) else 'docker' }}" # NOTE # r_openshift_lb_use_nuage_default may be defined external to this role. diff --git a/roles/openshift_loadbalancer/meta/main.yml b/roles/openshift_loadbalancer/meta/main.yml index 72298b599..3b5b45c5f 100644 --- a/roles/openshift_loadbalancer/meta/main.yml +++ b/roles/openshift_loadbalancer/meta/main.yml @@ -10,5 +10,5 @@ galaxy_info: versions: - 7 dependencies: -- role: lib_os_firewall +- role: lib_utils - role: openshift_facts diff --git a/roles/openshift_loadbalancer/tasks/main.yml b/roles/openshift_loadbalancer/tasks/main.yml index 79c5793d9..4a11029ab 100644 --- a/roles/openshift_loadbalancer/tasks/main.yml +++ b/roles/openshift_loadbalancer/tasks/main.yml @@ -4,33 +4,33 @@ - name: Install haproxy package: name=haproxy state=present - when: not openshift.common.is_containerized | bool + when: not openshift_is_containerized | bool register: result - until: result | success + until: result is succeeded - name: Pull haproxy image command: > - docker pull {{ openshift.common.router_image }}:{{ openshift_image_tag }} - when: openshift.common.is_containerized | bool + docker pull {{ openshift_router_image }}:{{ openshift_image_tag }} + when: openshift_is_containerized | bool - name: Create config directory for haproxy file: path: /etc/haproxy state: directory - when: openshift.common.is_containerized | bool + when: openshift_is_containerized | bool - name: Create the systemd unit files template: src: "haproxy.docker.service.j2" dest: "/etc/systemd/system/haproxy.service" - when: openshift.common.is_containerized | bool + when: openshift_is_containerized | bool notify: restart haproxy - name: Configure systemd service directory for haproxy file: path: /etc/systemd/system/haproxy.service.d state: directory - when: not openshift.common.is_containerized | bool + when: not openshift_is_containerized | bool # Work around ini_file create option in 2.2 which defaults to no - name: Create limits.conf file @@ -41,7 +41,7 @@ owner: root group: root changed_when: false - when: not openshift.common.is_containerized | bool + when: not openshift_is_containerized | bool - name: Configure the nofile limits for haproxy ini_file: @@ -50,7 +50,7 @@ option: LimitNOFILE value: "{{ openshift_loadbalancer_limit_nofile | default(100000) }}" notify: restart haproxy - when: not openshift.common.is_containerized | bool + when: not openshift_is_containerized | bool - name: Configure haproxy template: @@ -70,4 +70,4 @@ register: start_result - set_fact: - haproxy_start_result_changed: "{{ start_result | changed }}" + haproxy_start_result_changed: "{{ start_result is changed }}" diff --git a/roles/openshift_loadbalancer/templates/haproxy.cfg.j2 b/roles/openshift_loadbalancer/templates/haproxy.cfg.j2 index 24fd635ec..823f012af 100644 --- a/roles/openshift_loadbalancer/templates/haproxy.cfg.j2 +++ b/roles/openshift_loadbalancer/templates/haproxy.cfg.j2 @@ -3,7 +3,7 @@ global maxconn {{ openshift_loadbalancer_global_maxconn | default(20000) }} log /dev/log local0 info -{% if openshift.common.is_containerized | bool %} +{% if openshift_is_containerized | bool %} stats socket /var/lib/haproxy/run/haproxy.sock mode 600 level admin {% else %} chroot /var/lib/haproxy @@ -38,7 +38,8 @@ defaults timeout check 10s maxconn {{ openshift_loadbalancer_default_maxconn | default(20000) }} -listen stats :9000 +listen stats + bind :9000 mode http stats enable stats uri / diff --git a/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2 b/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2 index 0343a7eb0..90111449c 100644 --- a/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2 +++ b/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2 @@ -5,7 +5,7 @@ PartOf={{ openshift_docker_service_name }}.service [Service] ExecStartPre=-/usr/bin/docker rm -f openshift_loadbalancer -ExecStart=/usr/bin/docker run --rm --name openshift_loadbalancer {% for frontend in openshift_loadbalancer_frontends %} {% for bind in frontend.binds %} -p {{ bind |regex_replace('^[^:]*:(\d+).*$', '\\1') }}:{{ bind |regex_replace('^[^:]*:(\d+).*$', '\\1') }} {% endfor %} {% endfor %} -v /etc/haproxy/haproxy.cfg:/etc/haproxy/haproxy.cfg:ro --entrypoint=haproxy {{ openshift.common.router_image }}:{{ openshift_image_tag }} -f /etc/haproxy/haproxy.cfg +ExecStart=/usr/bin/docker run --rm --name openshift_loadbalancer {% for frontend in openshift_loadbalancer_frontends %} {% for bind in frontend.binds %} -p {{ bind |regex_replace('^[^:]*:(\d+).*$', '\\1') }}:{{ bind |regex_replace('^[^:]*:(\d+).*$', '\\1') }} {% endfor %} {% endfor %} -v /etc/haproxy/haproxy.cfg:/etc/haproxy/haproxy.cfg:ro --entrypoint=haproxy {{ openshift_router_image }}:{{ openshift_image_tag }} -f /etc/haproxy/haproxy.cfg ExecStartPost=/usr/bin/sleep 10 ExecStop=/usr/bin/docker stop openshift_loadbalancer LimitNOFILE={{ openshift_loadbalancer_limit_nofile | default(100000) }} diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md index 27cfc17d6..a192bd67e 100644 --- a/roles/openshift_logging/README.md +++ b/roles/openshift_logging/README.md @@ -177,6 +177,9 @@ Elasticsearch OPS too, if using an OPS cluster: clients will use to connect to mux, and will be used in the TLS server cert subject. - `openshift_logging_mux_port`: 24284 +- `openshift_logging_mux_external_address`: The IP address that mux will listen + on for connections from *external* clients. Default is the default ipv4 + interface as reported by the `ansible_default_ipv4` fact. - `openshift_logging_mux_cpu_request`: 100m - `openshift_logging_mux_memory_limit`: 512Mi - `openshift_logging_mux_default_namespaces`: Default `["mux-undefined"]` - the diff --git a/roles/openshift_logging/filter_plugins/openshift_logging.py b/roles/openshift_logging/filter_plugins/openshift_logging.py index e1a5ea726..247c7e4df 100644 --- a/roles/openshift_logging/filter_plugins/openshift_logging.py +++ b/roles/openshift_logging/filter_plugins/openshift_logging.py @@ -79,14 +79,6 @@ def entry_from_named_pair(register_pairs, key): raise RuntimeError("There was no entry found in the dict that had an item with a name that matched {}".format(key)) -def map_from_pairs(source, delim="="): - ''' Returns a dict given the source and delim delimited ''' - if source == '': - return dict() - - return dict(item.split(delim) for item in source.split(",")) - - def serviceaccount_name(qualified_sa): ''' Returns the simple name from a fully qualified name ''' return qualified_sa.split(":")[-1] @@ -102,6 +94,28 @@ def serviceaccount_namespace(qualified_sa, default=None): return seg[-1] +def flatten_dict(data, parent_key=None): + """ This filter plugin will flatten a dict and its sublists into a single dict + """ + if not isinstance(data, dict): + raise RuntimeError("flatten_dict failed, expects to flatten a dict") + + merged = dict() + + for key in data: + if parent_key is not None: + insert_key = '.'.join((parent_key, key)) + else: + insert_key = key + + if isinstance(data[key], dict): + merged.update(flatten_dict(data[key], insert_key)) + else: + merged[insert_key] = data[key] + + return merged + + # pylint: disable=too-few-public-methods class FilterModule(object): ''' OpenShift Logging Filters ''' @@ -112,10 +126,10 @@ class FilterModule(object): return { 'random_word': random_word, 'entry_from_named_pair': entry_from_named_pair, - 'map_from_pairs': map_from_pairs, 'min_cpu': min_cpu, 'es_storage': es_storage, 'serviceaccount_name': serviceaccount_name, 'serviceaccount_namespace': serviceaccount_namespace, - 'walk': walk + 'walk': walk, + "flatten_dict": flatten_dict } diff --git a/roles/openshift_logging/library/logging_patch.py b/roles/openshift_logging/library/logging_patch.py new file mode 100644 index 000000000..d2c0bc456 --- /dev/null +++ b/roles/openshift_logging/library/logging_patch.py @@ -0,0 +1,112 @@ +#!/usr/bin/python + +""" Ansible module to help with creating context patch file with whitelisting for logging """ + +import difflib +import re + +from ansible.module_utils.basic import AnsibleModule + + +DOCUMENTATION = ''' +--- +module: logging_patch + +short_description: This will create a context patch file while giving ability + to whitelist some lines (excluding them from comparison) + +description: + - "To create configmap patches for logging" + +author: + - Eric Wolinetz ewolinet@redhat.com +''' + + +EXAMPLES = ''' +- logging_patch: + original_file: "{{ tempdir }}/current.yml" + new_file: "{{ configmap_new_file }}" + whitelist: "{{ configmap_protected_lines | default([]) }}" + +''' + + +def account_for_whitelist(file_contents, white_list=None): + """ This method will remove lines that contain whitelist values from the content + of the file so that we aren't build a patch based on that line + + Usage: + + for file_contents: + + index: + number_of_shards: {{ es_number_of_shards | default ('1') }} + number_of_replicas: {{ es_number_of_replicas | default ('0') }} + unassigned.node_left.delayed_timeout: 2m + translog: + flush_threshold_size: 256mb + flush_threshold_period: 5m + + + and white_list: + + ['number_of_shards', 'number_of_replicas'] + + + We would end up with: + + index: + unassigned.node_left.delayed_timeout: 2m + translog: + flush_threshold_size: 256mb + flush_threshold_period: 5m + + """ + + for line in white_list: + file_contents = re.sub(r".*%s:.*\n" % line, "", file_contents) + + return file_contents + + +def run_module(): + """ The body of the module, we check if the variable name specified as the value + for the key is defined. If it is then we use that value as for the original key """ + + module = AnsibleModule( + argument_spec=dict( + original_file=dict(type='str', required=True), + new_file=dict(type='str', required=True), + whitelist=dict(required=False, type='list', default=[]) + ), + supports_check_mode=True + ) + + original_fh = open(module.params['original_file'], "r") + original_contents = original_fh.read() + original_fh.close() + + original_contents = account_for_whitelist(original_contents, module.params['whitelist']) + + new_fh = open(module.params['new_file'], "r") + new_contents = new_fh.read() + new_fh.close() + + new_contents = account_for_whitelist(new_contents, module.params['whitelist']) + + uni_diff = difflib.unified_diff(new_contents.splitlines(), + original_contents.splitlines(), + lineterm='') + + return module.exit_json(changed=False, # noqa: F405 + raw_patch="\n".join(uni_diff)) + + +def main(): + """ main """ + run_module() + + +if __name__ == '__main__': + main() diff --git a/roles/openshift_logging/library/openshift_logging_facts.py b/roles/openshift_logging/library/openshift_logging_facts.py index 98d0d1c4f..37ffb0204 100644 --- a/roles/openshift_logging/library/openshift_logging_facts.py +++ b/roles/openshift_logging/library/openshift_logging_facts.py @@ -204,6 +204,14 @@ class OpenshiftLoggingFacts(OCBaseCommand): if comp is not None: self.add_facts_for(comp, "services", name, dict()) + # pylint: disable=too-many-arguments + def facts_from_configmap(self, comp, kind, name, config_key, yaml_file=None): + '''Extracts facts in logging namespace from configmap''' + if yaml_file is not None: + config_facts = yaml.load(yaml_file) + self.facts[comp][kind][name][config_key] = config_facts + self.facts[comp][kind][name]["raw"] = yaml_file + def facts_for_configmaps(self, namespace): ''' Gathers facts for configmaps in logging namespace ''' self.default_keys_for("configmaps") @@ -214,7 +222,10 @@ class OpenshiftLoggingFacts(OCBaseCommand): name = item["metadata"]["name"] comp = self.comp(name) if comp is not None: - self.add_facts_for(comp, "configmaps", name, item["data"]) + self.add_facts_for(comp, "configmaps", name, dict(item["data"])) + if comp in ["elasticsearch", "elasticsearch_ops"]: + for config_key in item["data"]: + self.facts_from_configmap(comp, "configmaps", name, config_key, item["data"][config_key]) def facts_for_oauthclients(self, namespace): ''' Gathers facts for oauthclients used with logging ''' @@ -265,7 +276,7 @@ class OpenshiftLoggingFacts(OCBaseCommand): return for item in role["subjects"]: comp = self.comp(item["name"]) - if comp is not None and namespace == item["namespace"]: + if comp is not None and namespace == item.get("namespace"): self.add_facts_for(comp, "clusterrolebindings", "cluster-readers", dict()) # this needs to end up nested under the service account... @@ -277,7 +288,7 @@ class OpenshiftLoggingFacts(OCBaseCommand): return for item in role["subjects"]: comp = self.comp(item["name"]) - if comp is not None and namespace == item["namespace"]: + if comp is not None and namespace == item.get("namespace"): self.add_facts_for(comp, "rolebindings", "logging-elasticsearch-view-role", dict()) # pylint: disable=no-self-use, too-many-return-statements diff --git a/roles/openshift_logging/meta/main.yaml b/roles/openshift_logging/meta/main.yaml index 9c480f73a..01ed4918f 100644 --- a/roles/openshift_logging/meta/main.yaml +++ b/roles/openshift_logging/meta/main.yaml @@ -14,3 +14,4 @@ galaxy_info: dependencies: - role: lib_openshift - role: openshift_facts +- role: lib_utils diff --git a/roles/openshift_logging/tasks/annotate_ops_projects.yaml b/roles/openshift_logging/tasks/annotate_ops_projects.yaml index fcb4c94d3..6fdba6580 100644 --- a/roles/openshift_logging/tasks/annotate_ops_projects.yaml +++ b/roles/openshift_logging/tasks/annotate_ops_projects.yaml @@ -1,17 +1,21 @@ --- -- oc_obj: - state: list - kind: project - name: "{{ item }}" - with_items: "{{ __default_logging_ops_projects }}" +- command: > + {{ openshift_client_binary }} + --config={{ openshift.common.config_base }}/master/admin.kubeconfig + get namespaces -o jsonpath={.items[*].metadata.name} {{ __default_logging_ops_projects | join(' ') }} register: __logging_ops_projects - name: Annotate Operations Projects oc_edit: kind: ns - name: "{{ item.item }}" + name: "{{ project }}" separator: '#' content: metadata#annotations#openshift.io/logging.ui.hostname: "{{ openshift_logging_kibana_ops_hostname }}" - with_items: "{{ __logging_ops_projects.results }}" - when: item.results.stderr is not defined + metadata#annotations#openshift.io/logging.data.prefix: ".operations" + with_items: "{{ __logging_ops_projects.stdout.split(' ') }}" + loop_control: + loop_var: project + when: + - __logging_ops_projects.stderr | length == 0 + - openshift_logging_use_ops | default(false) | bool diff --git a/roles/openshift_logging/tasks/delete_logging.yaml b/roles/openshift_logging/tasks/delete_logging.yaml index ffed956a4..ced7397b5 100644 --- a/roles/openshift_logging/tasks/delete_logging.yaml +++ b/roles/openshift_logging/tasks/delete_logging.yaml @@ -107,8 +107,37 @@ - logging-fluentd - logging-mux +# remove annotations added by logging +- command: > + {{ openshift_client_binary }} + --config={{ openshift.common.config_base }}/master/admin.kubeconfig + get namespaces -o name {{ __default_logging_ops_projects | join(' ') }} + register: __logging_ops_projects + +- name: Remove Annotation of Operations Projects + command: > + {{ openshift_client_binary }} + --config={{ openshift.common.config_base }}/master/admin.kubeconfig + annotate {{ project }} openshift.io/logging.ui.hostname- + with_items: "{{ __logging_ops_projects.stdout_lines }}" + loop_control: + loop_var: project + when: + - __logging_ops_projects.stderr | length == 0 + ## EventRouter -- include_role: +- import_role: name: openshift_logging_eventrouter when: not openshift_logging_install_eventrouter | default(false) | bool + +# Update console config in openshift-web-console namespace +- name: Remove Kibana route information from the web console config + include_role: + name: openshift_web_console + tasks_from: update_console_config.yml + vars: + console_config_edits: + - key: clusterInfo#loggingPublicURL + value: "" + when: openshift_web_console_install | default(true) | bool diff --git a/roles/openshift_logging/tasks/generate_certs.yaml b/roles/openshift_logging/tasks/generate_certs.yaml index 082c0128f..a40449bf6 100644 --- a/roles/openshift_logging/tasks/generate_certs.yaml +++ b/roles/openshift_logging/tasks/generate_certs.yaml @@ -17,9 +17,9 @@ - name: Generate certificates command: > - {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig ca create-signer-cert + {{ openshift_client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig ca create-signer-cert --key={{generated_certs_dir}}/ca.key --cert={{generated_certs_dir}}/ca.crt - --serial={{generated_certs_dir}}/ca.serial.txt --name=logging-signer-test + --serial={{generated_certs_dir}}/ca.serial.txt --name=logging-signer-test --overwrite=false check_mode: no when: - not ca_key_file.stat.exists @@ -139,10 +139,10 @@ # TODO: make idempotent - name: Generate proxy session - set_fact: session_secret={{ 200 | oo_random_word}} + set_fact: session_secret={{ 200 | lib_utils_oo_random_word}} check_mode: no # TODO: make idempotent - name: Generate oauth client secret - set_fact: oauth_secret={{ 64 | oo_random_word}} + set_fact: oauth_secret={{ 64 | lib_utils_oo_random_word}} check_mode: no diff --git a/roles/openshift_logging/tasks/generate_jks.yaml b/roles/openshift_logging/tasks/generate_jks.yaml index d6ac88dcc..6e3204589 100644 --- a/roles/openshift_logging/tasks/generate_jks.yaml +++ b/roles/openshift_logging/tasks/generate_jks.yaml @@ -24,25 +24,21 @@ local_action: file path="{{local_tmp.stdout}}/elasticsearch.jks" state=touch mode="u=rw,g=r,o=r" when: elasticsearch_jks.stat.exists changed_when: False - become: no - name: Create placeholder for previously created JKS certs to prevent recreating... local_action: file path="{{local_tmp.stdout}}/logging-es.jks" state=touch mode="u=rw,g=r,o=r" when: logging_es_jks.stat.exists changed_when: False - become: no - name: Create placeholder for previously created JKS certs to prevent recreating... local_action: file path="{{local_tmp.stdout}}/system.admin.jks" state=touch mode="u=rw,g=r,o=r" when: system_admin_jks.stat.exists changed_when: False - become: no - name: Create placeholder for previously created JKS certs to prevent recreating... local_action: file path="{{local_tmp.stdout}}/truststore.jks" state=touch mode="u=rw,g=r,o=r" when: truststore_jks.stat.exists changed_when: False - become: no - name: pulling down signing items from host fetch: @@ -61,12 +57,10 @@ vars: - top_dir: "{{local_tmp.stdout}}" when: not elasticsearch_jks.stat.exists or not logging_es_jks.stat.exists or not system_admin_jks.stat.exists or not truststore_jks.stat.exists - become: no - name: Run JKS generation script local_action: script generate-jks.sh {{local_tmp.stdout}} {{openshift_logging_namespace}} check_mode: no - become: no when: not elasticsearch_jks.stat.exists or not logging_es_jks.stat.exists or not system_admin_jks.stat.exists or not truststore_jks.stat.exists - name: Pushing locally generated JKS certs to remote host... diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml index bb8ebec6b..e4883bfa0 100644 --- a/roles/openshift_logging/tasks/install_logging.yaml +++ b/roles/openshift_logging/tasks/install_logging.yaml @@ -1,9 +1,12 @@ --- - name: Gather OpenShift Logging Facts openshift_logging_facts: - oc_bin: "{{openshift.common.client_binary}}" + oc_bin: "{{openshift_client_binary}}" openshift_logging_namespace: "{{openshift_logging_namespace}}" +## This is include vs import because we need access to group/inventory variables +- include_tasks: set_defaults_from_current.yml + - name: Set logging project oc_project: state: present @@ -84,14 +87,14 @@ openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}" openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}" - openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_pvc_storage_class_name }}" + openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_pvc_storage_class_name | default() }}" openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_nodeselector if outer_item.0.nodeSelector | default(None) is none else outer_item.0.nodeSelector }}" openshift_logging_elasticsearch_storage_group: "{{ [openshift_logging_es_storage_group] if outer_item.0.storageGroups | default([]) | length == 0 else outer_item.0.storageGroups }}" _es_containers: "{{ outer_item.0.containers}}" _es_configmap: "{{ openshift_logging_facts | walk('elasticsearch#configmaps#logging-elasticsearch#elasticsearch.yml', '{}', delimiter='#') | from_yaml }}" with_together: - - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.values() }}" + - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.values() | list }}" - "{{ openshift_logging_facts.elasticsearch.pvcs }}" - "{{ es_indices }}" loop_control: @@ -111,7 +114,7 @@ openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}" openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}" - openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_pvc_storage_class_name }}" + openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_pvc_storage_class_name | default() }}" with_sequence: count={{ openshift_logging_es_cluster_size | int - openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count }} loop_control: @@ -148,7 +151,7 @@ openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}" openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic }}" openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}" - openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_ops_pvc_storage_class_name }}" + openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_ops_pvc_storage_class_name | default() }}" openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}" openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}" openshift_logging_elasticsearch_cpu_request: "{{ openshift_logging_es_ops_cpu_request }}" @@ -166,7 +169,7 @@ _es_configmap: "{{ openshift_logging_facts | walk('elasticsearch_ops#configmaps#logging-elasticsearch-ops#elasticsearch.yml', '{}', delimiter='#') | from_yaml }}" with_together: - - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.values() }}" + - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.values() | list }}" - "{{ openshift_logging_facts.elasticsearch_ops.pvcs }}" - "{{ es_ops_indices }}" loop_control: @@ -190,7 +193,7 @@ openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}" openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic }}" openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}" - openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_ops_pvc_storage_class_name }}" + openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_ops_pvc_storage_class_name | default() }}" openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}" openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}" openshift_logging_elasticsearch_cpu_request: "{{ openshift_logging_es_ops_cpu_request }}" @@ -210,7 +213,7 @@ ## Kibana -- include_role: +- import_role: name: openshift_logging_kibana vars: generated_certs_dir: "{{openshift.common.config_base}}/logging" @@ -223,7 +226,7 @@ openshift_logging_kibana_image_pull_secret: "{{ openshift_logging_image_pull_secret }}" -- include_role: +- import_role: name: openshift_logging_kibana vars: generated_certs_dir: "{{openshift.common.config_base}}/logging" @@ -253,7 +256,7 @@ - include_tasks: annotate_ops_projects.yaml ## Curator -- include_role: +- import_role: name: openshift_logging_curator vars: generated_certs_dir: "{{openshift.common.config_base}}/logging" @@ -263,7 +266,7 @@ openshift_logging_curator_master_url: "{{ openshift_logging_master_url }}" openshift_logging_curator_image_pull_secret: "{{ openshift_logging_image_pull_secret }}" -- include_role: +- import_role: name: openshift_logging_curator vars: generated_certs_dir: "{{openshift.common.config_base}}/logging" @@ -281,7 +284,7 @@ - openshift_logging_use_ops | bool ## Mux -- include_role: +- import_role: name: openshift_logging_mux vars: generated_certs_dir: "{{openshift.common.config_base}}/logging" @@ -294,7 +297,7 @@ ## Fluentd -- include_role: +- import_role: name: openshift_logging_fluentd vars: generated_certs_dir: "{{openshift.common.config_base}}/logging" @@ -305,10 +308,22 @@ ## EventRouter -- include_role: +- import_role: name: openshift_logging_eventrouter when: openshift_logging_install_eventrouter | default(false) | bool - include_tasks: update_master_config.yaml + when: not openshift.common.version_gte_3_9 + +# Update asset config in openshift-web-console namespace +- name: Add Kibana route information to web console asset config + include_role: + name: openshift_web_console + tasks_from: update_console_config.yml + vars: + console_config_edits: + - key: clusterInfo#loggingPublicURL + value: "https://{{ openshift_logging_kibana_hostname }}" + when: openshift_web_console_install | default(true) | bool diff --git a/roles/openshift_logging/tasks/main.yaml b/roles/openshift_logging/tasks/main.yaml index 91db457d1..60cc399fa 100644 --- a/roles/openshift_logging/tasks/main.yaml +++ b/roles/openshift_logging/tasks/main.yaml @@ -17,7 +17,11 @@ register: local_tmp changed_when: False check_mode: no - become: no + +- name: Chmod local temp directory for doing work in + local_action: command chmod 777 "{{ local_tmp.stdout }}" + changed_when: False + check_mode: no - include_tasks: install_logging.yaml when: diff --git a/roles/openshift_logging/tasks/patch_configmap_file.yaml b/roles/openshift_logging/tasks/patch_configmap_file.yaml new file mode 100644 index 000000000..30087fe6a --- /dev/null +++ b/roles/openshift_logging/tasks/patch_configmap_file.yaml @@ -0,0 +1,35 @@ +--- +## The purpose of this task file is to get a patch that is based on the diff +## between configmap_current_file and configmap_new_file. The module +## logging_patch takes the paths of two files to compare and also a list of +## variables whose line we exclude from the diffs. +## We then patch the new configmap file so that we can build a configmap +## using that file later. We then use oc apply to idempotenly modify any +## existing configmap. + +## The following variables are expected to be provided when including this task: +# __configmap_output -- This is provided to us from patch_configmap_files.yaml +# it is a dict of the configmap where configmap_current_file exists +# configmap_current_file -- The name of the data file in the __configmap_output +# configmap_new_file -- The path to the file that we intend to oc apply later +# we apply our generated patch to this file. +# configmap_protected_lines -- The list of variables to exclude from the diff + +- copy: + content: "{{ __configmap_output.results.results[0]['data'][configmap_current_file] }}" + dest: "{{ tempdir }}/current.yml" + +- logging_patch: + original_file: "{{ tempdir }}/current.yml" + new_file: "{{ configmap_new_file }}" + whitelist: "{{ configmap_protected_lines | default([]) }}" + register: patch_output + +- copy: + content: "{{ patch_output.raw_patch }}\n" + dest: "{{ tempdir }}/patch.patch" + when: patch_output.raw_patch | length > 0 + +- command: > + patch --force --quiet -u "{{ configmap_new_file }}" "{{ tempdir }}/patch.patch" + when: patch_output.raw_patch | length > 0 diff --git a/roles/openshift_logging/tasks/patch_configmap_files.yaml b/roles/openshift_logging/tasks/patch_configmap_files.yaml new file mode 100644 index 000000000..74a9cc287 --- /dev/null +++ b/roles/openshift_logging/tasks/patch_configmap_files.yaml @@ -0,0 +1,31 @@ +--- +## The purpose of this task file is to take in a list of configmap files provided +## in the variable configmap_file_names, which correspond to the data sections +## within a configmap. We iterate over each of these files and create a patch +## from the diff between current_file and new_file to try to maintain any custom +## changes that a user may have made to a currently deployed configmap while +## trying to idempotently update with any role provided files. + +## The following variables are expected to be provided when including this task: +# configmap_name -- This is the name of the configmap that the files exist in +# configmap_namespace -- The namespace that the configmap lives in +# configmap_file_names -- This is expected to be passed in as a dict +# current_file -- The name of the data entry within the configmap +# new_file -- The file path to the file we are comparing to current_file +# protected_lines -- List of variables whose line will be excluded when creating a diff + +- oc_configmap: + name: "{{ configmap_name }}" + state: list + namespace: "{{ configmap_namespace }}" + register: __configmap_output + +- when: __configmap_output.results.stderr is undefined + include_tasks: patch_configmap_file.yaml + vars: + configmap_current_file: "{{ configmap_files.current_file }}" + configmap_new_file: "{{ configmap_files.new_file }}" + configmap_protected_lines: "{{ configmap_files.protected_lines | default([]) }}" + with_items: "{{ configmap_file_names }}" + loop_control: + loop_var: configmap_files diff --git a/roles/openshift_logging/tasks/procure_server_certs.yaml b/roles/openshift_logging/tasks/procure_server_certs.yaml index 00de0ca06..d28d1d160 100644 --- a/roles/openshift_logging/tasks/procure_server_certs.yaml +++ b/roles/openshift_logging/tasks/procure_server_certs.yaml @@ -27,10 +27,10 @@ - name: Creating signed server cert and key for {{ cert_info.procure_component }} command: > - {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig ca create-server-cert + {{ openshift_client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig ca create-server-cert --key={{generated_certs_dir}}/{{cert_info.procure_component}}.key --cert={{generated_certs_dir}}/{{cert_info.procure_component}}.crt --hostnames={{cert_info.hostnames|quote}} --signer-cert={{generated_certs_dir}}/ca.crt --signer-key={{generated_certs_dir}}/ca.key - --signer-serial={{generated_certs_dir}}/ca.serial.txt + --signer-serial={{generated_certs_dir}}/ca.serial.txt --overwrite=false check_mode: no when: - cert_info.hostnames is defined diff --git a/roles/openshift_logging/tasks/set_defaults_from_current.yml b/roles/openshift_logging/tasks/set_defaults_from_current.yml new file mode 100644 index 000000000..dde362abe --- /dev/null +++ b/roles/openshift_logging/tasks/set_defaults_from_current.yml @@ -0,0 +1,34 @@ +--- + +## We are pulling default values from configmaps if they exist already +## Using conditional_set_fact allows us to set the value of a variable based on +## the value of another one, if it is already defined. Else we don't set the +## left hand side (it stays undefined as well). + +## conditional_set_fact allows us to specify a fact source, so first we try to +## set variables in the logging-elasticsearch & logging-elasticsearch-ops configmaps +## afterwards we set the value of the variable based on the value in the inventory +## but fall back to using the value from a configmap as a default. If neither is set +## then the variable remains undefined and the role default will be used. + +- conditional_set_fact: + facts: "{{ openshift_logging_facts['elasticsearch']['configmaps']['logging-elasticsearch']['elasticsearch.yml'] | flatten_dict }}" + vars: + __openshift_logging_es_number_of_shards: index.number_of_shards + __openshift_logging_es_number_of_replicas: index.number_of_replicas + when: openshift_logging_facts['elasticsearch']['configmaps']['logging-elasticsearch'] is defined + +- conditional_set_fact: + facts: "{{ openshift_logging_facts['elasticsearch_ops']['configmaps']['logging-elasticsearch-ops']['elasticsearch.yml'] | flatten_dict }}" + vars: + __openshift_logging_es_ops_number_of_shards: index.number_of_shards + __openshift_logging_es_ops_number_of_replicas: index.number_of_replicas + when: openshift_logging_facts['elasticsearch_ops']['configmaps']['logging-elasticsearch-ops'] is defined + +- conditional_set_fact: + facts: "{{ hostvars[inventory_hostname] }}" + vars: + openshift_logging_es_number_of_shards: openshift_logging_es_number_of_shards | __openshift_logging_es_number_of_shards + openshift_logging_es_number_of_replicas: openshift_logging_es_number_of_replicas | __openshift_logging_es_number_of_replicas + openshift_logging_es_ops_number_of_shards: openshift_logging_es_ops_number_of_shards | __openshift_logging_es_ops_number_of_shards + openshift_logging_es_ops_number_of_replicas: openshift_logging_es_ops_number_of_replicas | __openshift_logging_es_ops_number_of_replicas diff --git a/roles/openshift_logging/tasks/update_master_config.yaml b/roles/openshift_logging/tasks/update_master_config.yaml index b96b8e29d..c0f42ba97 100644 --- a/roles/openshift_logging/tasks/update_master_config.yaml +++ b/roles/openshift_logging/tasks/update_master_config.yaml @@ -1,4 +1,5 @@ --- +# TODO: Remove when asset config is removed from master-config.yaml - name: Adding Kibana route information to loggingPublicURL modify_yaml: dest: "{{ openshift.common.config_base }}/master/master-config.yaml" diff --git a/roles/openshift_logging_curator/meta/main.yaml b/roles/openshift_logging_curator/meta/main.yaml index d4635aab0..9f7c6341c 100644 --- a/roles/openshift_logging_curator/meta/main.yaml +++ b/roles/openshift_logging_curator/meta/main.yaml @@ -14,3 +14,4 @@ galaxy_info: dependencies: - role: lib_openshift - role: openshift_facts +- role: lib_utils diff --git a/roles/openshift_logging_curator/tasks/main.yaml b/roles/openshift_logging_curator/tasks/main.yaml index e7ef5ff22..cc68998f5 100644 --- a/roles/openshift_logging_curator/tasks/main.yaml +++ b/roles/openshift_logging_curator/tasks/main.yaml @@ -2,7 +2,7 @@ - name: Set default image variables based on deployment_type include_vars: "{{ var_file_name }}" with_first_found: - - "{{ openshift_deployment_type | default(deployment_type) }}.yml" + - "{{ openshift_deployment_type }}.yml" - "default_images.yml" loop_control: loop_var: var_file_name @@ -54,14 +54,17 @@ - copy: src: curator.yml dest: "{{ tempdir }}/curator.yml" - when: curator_config_contents is undefined changed_when: no -- copy: - content: "{{ curator_config_contents }}" - dest: "{{ tempdir }}/curator.yml" - when: curator_config_contents is defined - changed_when: no +- import_role: + name: openshift_logging + tasks_from: patch_configmap_files.yaml + vars: + configmap_name: "logging-curator" + configmap_namespace: "logging" + configmap_file_names: + - current_file: "config.yaml" + new_file: "{{ tempdir }}/curator.yml" - name: Set Curator configmap oc_configmap: diff --git a/roles/openshift_logging_curator/vars/main.yml b/roles/openshift_logging_curator/vars/main.yml index 95bf462d1..df5299a83 100644 --- a/roles/openshift_logging_curator/vars/main.yml +++ b/roles/openshift_logging_curator/vars/main.yml @@ -1,3 +1,3 @@ --- -__latest_curator_version: "3_6" -__allowed_curator_versions: ["3_5", "3_6", "3_7"] +__latest_curator_version: "3_9" +__allowed_curator_versions: ["3_5", "3_6", "3_7", "3_8", "3_9"] diff --git a/roles/openshift_logging_elasticsearch/handlers/main.yml b/roles/openshift_logging_elasticsearch/handlers/main.yml new file mode 100644 index 000000000..fa56897d0 --- /dev/null +++ b/roles/openshift_logging_elasticsearch/handlers/main.yml @@ -0,0 +1,13 @@ +--- +- name: "Restarting logging-{{ _cluster_component }} cluster" + listen: "restart elasticsearch" + include_tasks: restart_cluster.yml + with_items: "{{ _restart_logging_components }}" + loop_control: + loop_var: _cluster_component + when: not logging_elasticsearch_rollout_override | bool + +## Stop this from running more than once +- set_fact: + logging_elasticsearch_rollout_override: True + listen: "restart elasticsearch" diff --git a/roles/openshift_logging_elasticsearch/meta/main.yaml b/roles/openshift_logging_elasticsearch/meta/main.yaml index 6a9a6539c..e93d6b73e 100644 --- a/roles/openshift_logging_elasticsearch/meta/main.yaml +++ b/roles/openshift_logging_elasticsearch/meta/main.yaml @@ -14,3 +14,4 @@ galaxy_info: dependencies: - role: lib_openshift - role: openshift_facts +- role: lib_utils diff --git a/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml b/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml index c53a06019..c55e7c5ea 100644 --- a/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml +++ b/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml @@ -15,3 +15,5 @@ - fail: msg: Invalid version specified for Elasticsearch when: es_version not in __allowed_es_versions + +- include_tasks: get_es_version.yml diff --git a/roles/openshift_logging_elasticsearch/tasks/get_es_version.yml b/roles/openshift_logging_elasticsearch/tasks/get_es_version.yml new file mode 100644 index 000000000..16de6f252 --- /dev/null +++ b/roles/openshift_logging_elasticsearch/tasks/get_es_version.yml @@ -0,0 +1,42 @@ +--- +- command: > + oc get pod -l component=es,provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name} + register: _cluster_pods + +- name: "Getting ES version for logging-es cluster" + command: > + oc exec {{ _cluster_pods.stdout.split(' ')[0] }} -c elasticsearch -n {{ openshift_logging_elasticsearch_namespace }} -- {{ __es_local_curl }} -XGET 'https://localhost:9200/' + register: _curl_output + when: _cluster_pods.stdout_lines | count > 0 + +- command: > + oc get pod -l component=es-ops,provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name} + register: _ops_cluster_pods + +- name: "Getting ES version for logging-es-ops cluster" + command: > + oc exec {{ _ops_cluster_pods.stdout.split(' ')[0] }} -c elasticsearch -n {{ openshift_logging_elasticsearch_namespace }} -- {{ __es_local_curl }} -XGET 'https://localhost:9200/' + register: _ops_curl_output + when: _ops_cluster_pods.stdout_lines | count > 0 + +- set_fact: + _es_output: "{{ _curl_output.stdout | from_json }}" + when: _curl_output.stdout is defined + +- set_fact: + _es_ops_output: "{{ _ops_curl_output.stdout | from_json }}" + when: _ops_curl_output.stdout is defined + +- set_fact: + _es_installed_version: "{{ _es_output.version.number }}" + when: + - _es_output is defined + - _es_output.version is defined + - _es_output.version.number is defined + +- set_fact: + _es_ops_installed_version: "{{ _es_ops_output.version.number }}" + when: + - _es_ops_output is defined + - _es_ops_output.version is defined + - _es_ops_output.version.number is defined diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml index 8f2050043..ff5ad1045 100644 --- a/roles/openshift_logging_elasticsearch/tasks/main.yaml +++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml @@ -15,10 +15,10 @@ elasticsearch_name: "{{ 'logging-elasticsearch' ~ ( (openshift_logging_elasticsearch_ops_deployment | default(false) | bool) | ternary('-ops', '')) }}" es_component: "{{ 'es' ~ ( (openshift_logging_elasticsearch_ops_deployment | default(false) | bool) | ternary('-ops', '') ) }}" -- name: Set default image variables based on deployment_type +- name: Set default image variables based on openshift_deployment_type include_vars: "{{ var_file_name }}" with_first_found: - - "{{ openshift_deployment_type | default(deployment_type) }}.yml" + - "{{ openshift_deployment_type }}.yml" - "default_images.yml" loop_control: loop_var: var_file_name @@ -32,6 +32,18 @@ - include_tasks: determine_version.yaml +- set_fact: + full_restart_cluster: True + when: + - _es_installed_version is defined + - _es_installed_version.split('.')[0] | int < __es_version.split('.')[0] | int + +- set_fact: + full_restart_cluster: True + when: + - _es_ops_installed_version is defined + - _es_ops_installed_version.split('.')[0] | int < __es_version.split('.')[0] | int + # allow passing in a tempdir - name: Create temp directory for doing work in command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX @@ -111,7 +123,7 @@ - name: Create logging-metrics-reader-role command: > - {{ openshift.common.client_binary }} + {{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n "{{ openshift_logging_elasticsearch_namespace }}" create -f "{{mktemp.stdout}}/templates/logging-metrics-role.yml" @@ -168,33 +180,33 @@ when: es_logging_contents is undefined changed_when: no -- set_fact: - __es_num_of_shards: "{{ _es_configmap | default({}) | walk('index.number_of_shards', '1') }}" - __es_num_of_replicas: "{{ _es_configmap | default({}) | walk('index.number_of_replicas', '0') }}" - - template: src: elasticsearch.yml.j2 dest: "{{ tempdir }}/elasticsearch.yml" vars: allow_cluster_reader: "{{ openshift_logging_elasticsearch_ops_allow_cluster_reader | lower | default('false') }}" - es_number_of_shards: "{{ openshift_logging_es_number_of_shards | default(None) or __es_num_of_shards }}" - es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas | default(None) or __es_num_of_replicas }}" + es_number_of_shards: "{{ openshift_logging_es_number_of_shards | default(1) }}" + es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas| default(0) }}" es_kibana_index_mode: "{{ openshift_logging_elasticsearch_kibana_index_mode | default('unique') }}" when: es_config_contents is undefined changed_when: no -- copy: - content: "{{ es_logging_contents }}" - dest: "{{ tempdir }}/elasticsearch-logging.yml" - when: es_logging_contents is defined - changed_when: no - -- copy: - content: "{{ es_config_contents }}" - dest: "{{ tempdir }}/elasticsearch.yml" - when: es_config_contents is defined - changed_when: no +# create diff between current configmap files and our current files +# NOTE: include_role must be used instead of import_role because +# this task file is looped over from another role. +- include_role: + name: openshift_logging + tasks_from: patch_configmap_files.yaml + vars: + configmap_name: "logging-elasticsearch" + configmap_namespace: "logging" + configmap_file_names: + - current_file: "elasticsearch.yml" + new_file: "{{ tempdir }}/elasticsearch.yml" + protected_lines: ["number_of_shards", "number_of_replicas"] + - current_file: "logging.yml" + new_file: "{{ tempdir }}/elasticsearch-logging.yml" - name: Set ES configmap oc_configmap: @@ -204,7 +216,21 @@ from_file: elasticsearch.yml: "{{ tempdir }}/elasticsearch.yml" logging.yml: "{{ tempdir }}/elasticsearch-logging.yml" + register: es_config_creation + notify: "restart elasticsearch" +- when: es_config_creation.changed | bool + block: + - set_fact: + _restart_logging_components: "{{ _restart_logging_components | default([]) + [es_component] | unique }}" + + - shell: > + oc get dc -l component="{{ es_component }}" -n "{{ openshift_logging_elasticsearch_namespace }}" -o name | cut -d'/' -f2 + register: _es_dcs + + - set_fact: + _restart_logging_nodes: "{{ _restart_logging_nodes | default([]) + [_es_dcs.stdout] | unique }}" + when: _es_dcs.stdout != "" # secret - name: Set ES secret @@ -338,7 +364,7 @@ delete_after: true - set_fact: - es_deploy_name: "logging-{{ es_component }}-{{ openshift_logging_elasticsearch_deployment_type }}-{{ 8 | oo_random_word('abcdefghijklmnopqrstuvwxyz0123456789') }}" + es_deploy_name: "logging-{{ es_component }}-{{ openshift_logging_elasticsearch_deployment_type }}-{{ 8 | lib_utils_oo_random_word('abcdefghijklmnopqrstuvwxyz0123456789') }}" when: openshift_logging_elasticsearch_deployment_name == "" - set_fact: @@ -375,6 +401,13 @@ files: - "{{ tempdir }}/templates/logging-es-dc.yml" delete_after: true + register: es_dc_creation + notify: "restart elasticsearch" + +- set_fact: + _restart_logging_components: "{{ _restart_logging_components | default([]) + [es_component] | unique }}" + _restart_logging_nodes: "{{ _restart_logging_nodes | default([]) + [es_deploy_name] | unique }}" + when: es_dc_creation.changed | bool - name: Retrieving the cert to use when generating secrets for the {{ es_component }} component slurp: diff --git a/roles/openshift_logging_elasticsearch/tasks/restart_cluster.yml b/roles/openshift_logging_elasticsearch/tasks/restart_cluster.yml new file mode 100644 index 000000000..879459cf6 --- /dev/null +++ b/roles/openshift_logging_elasticsearch/tasks/restart_cluster.yml @@ -0,0 +1,113 @@ +--- +## get all pods for the cluster +- command: > + oc get pod -l component={{ _cluster_component }},provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name} + register: _cluster_pods + +### Check for cluster state before making changes -- if its red then we don't want to continue +- name: "Checking current health for {{ _es_node }} cluster" + shell: > + oc exec "{{ _cluster_pods.stdout.split(' ')[0] }}" -c elasticsearch -n "{{ openshift_logging_elasticsearch_namespace }}" -- es_cluster_health + register: _pod_status + when: _cluster_pods.stdout_lines | count > 0 + +- when: + - _pod_status.stdout is defined + - (_pod_status.stdout | from_json)['status'] in ['red'] + block: + - name: Set Logging message to manually restart + run_once: true + set_stats: + data: + installer_phase_logging: + message: "Cluster logging-{{ _cluster_component }} was in a red state and will not be automatically restarted. Please see documentation regarding doing a {{ 'full' if full_restart_cluster | bool else 'rolling'}} cluster restart." + + - debug: msg="Cluster logging-{{ _cluster_component }} was in a red state and will not be automatically restarted. Please see documentation regarding doing a {{ 'full' if full_restart_cluster | bool else 'rolling'}} cluster restart." + +- when: _pod_status.stdout is undefined or (_pod_status.stdout | from_json)['status'] in ['green', 'yellow'] + block: + # Disable external communication for {{ _cluster_component }} + - name: Disable external communication for logging-{{ _cluster_component }} + oc_service: + state: present + name: "logging-{{ _cluster_component }}" + namespace: "{{ openshift_logging_elasticsearch_namespace }}" + selector: + component: "{{ _cluster_component }}" + provider: openshift + connection: blocked + labels: + logging-infra: 'support' + ports: + - port: 9200 + targetPort: "restapi" + when: + - full_restart_cluster | bool + + - name: "Disable shard balancing for logging-{{ _cluster_component }} cluster" + command: > + oc exec {{ _cluster_pods.stdout.split(' ')[0] }} -c elasticsearch -n {{ openshift_logging_elasticsearch_namespace }} -- {{ __es_local_curl }} -XPUT 'https://localhost:9200/_cluster/settings' -d '{ "transient": { "cluster.routing.allocation.enable" : "none" } }' + register: _disable_output + changed_when: "'\"acknowledged\":true' in _disable_output.stdout" + when: _cluster_pods.stdout_lines | count > 0 + + # Flush ES + - name: "Flushing for logging-{{ _cluster_component }} cluster" + command: > + oc exec {{ _cluster_pods.stdout.split(' ')[0] }} -c elasticsearch -n {{ openshift_logging_elasticsearch_namespace }} -- {{ __es_local_curl }} -XPUT 'https://localhost:9200/_flush/synced' + register: _flush_output + changed_when: "'\"acknowledged\":true' in _flush_output.stdout" + when: + - _cluster_pods.stdout_lines | count > 0 + - full_restart_cluster | bool + + - command: > + oc get dc -l component={{ _cluster_component }},provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[*].metadata.name} + register: _cluster_dcs + + ## restart all dcs for full restart + - name: "Restart ES node {{ _es_node }}" + include_tasks: restart_es_node.yml + with_items: "{{ _cluster_dcs }}" + loop_control: + loop_var: _es_node + when: + - full_restart_cluster | bool + + ## restart the node if it's dc is in the list of nodes to restart? + - name: "Restart ES node {{ _es_node }}" + include_tasks: restart_es_node.yml + with_items: "{{ _restart_logging_nodes }}" + loop_control: + loop_var: _es_node + when: + - not full_restart_cluster | bool + - _es_node in _cluster_dcs.stdout + + ## we may need a new first pod to run against -- fetch them all again + - command: > + oc get pod -l component={{ _cluster_component }},provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name} + register: _cluster_pods + + - name: "Enable shard balancing for logging-{{ _cluster_component }} cluster" + command: > + oc exec {{ _cluster_pods.stdout.split(' ')[0] }} -c elasticsearch -n {{ openshift_logging_elasticsearch_namespace }} -- {{ __es_local_curl }} -XPUT 'https://localhost:9200/_cluster/settings' -d '{ "transient": { "cluster.routing.allocation.enable" : "all" } }' + register: _enable_output + changed_when: "'\"acknowledged\":true' in _enable_output.stdout" + + # Reenable external communication for {{ _cluster_component }} + - name: Reenable external communication for logging-{{ _cluster_component }} + oc_service: + state: present + name: "logging-{{ _cluster_component }}" + namespace: "{{ openshift_logging_elasticsearch_namespace }}" + selector: + component: "{{ _cluster_component }}" + provider: openshift + labels: + logging-infra: 'support' + ports: + - port: 9200 + targetPort: "restapi" + when: + - full_restart_cluster | bool diff --git a/roles/openshift_logging_elasticsearch/tasks/restart_es_node.yml b/roles/openshift_logging_elasticsearch/tasks/restart_es_node.yml new file mode 100644 index 000000000..fe15e40fd --- /dev/null +++ b/roles/openshift_logging_elasticsearch/tasks/restart_es_node.yml @@ -0,0 +1,37 @@ +--- +- name: "Rolling out new pod(s) for {{ _es_node }}" + command: > + oc rollout latest {{ _es_node }} -n {{ openshift_logging_elasticsearch_namespace }} + +- name: "Waiting for {{ _es_node }} to finish scaling up" + oc_obj: + state: list + name: "{{ _es_node }}" + namespace: "{{ openshift_logging_elasticsearch_namespace }}" + kind: dc + register: _dc_output + until: + - _dc_output.results.results[0].status is defined + - _dc_output.results.results[0].status.readyReplicas is defined + - _dc_output.results.results[0].status.readyReplicas > 0 + - _dc_output.results.results[0].status.updatedReplicas is defined + - _dc_output.results.results[0].status.updatedReplicas > 0 + retries: 60 + delay: 30 + +- name: Gettings name(s) of replica pod(s) + command: > + oc get pods -l deploymentconfig={{ _es_node }} -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[*].metadata.name} + register: _pods + +- name: "Waiting for ES to be ready for {{ _es_node }}" + shell: > + oc exec "{{ _pod }}" -c elasticsearch -n "{{ openshift_logging_elasticsearch_namespace }}" -- es_cluster_health + with_items: "{{ _pods.stdout.split(' ') }}" + loop_control: + loop_var: _pod + register: _pod_status + until: (_pod_status.stdout | from_json)['status'] in ['green', 'yellow'] + retries: 60 + delay: 5 + changed_when: false diff --git a/roles/openshift_logging_elasticsearch/templates/es.j2 b/roles/openshift_logging_elasticsearch/templates/es.j2 index bf04094a3..4b189f255 100644 --- a/roles/openshift_logging_elasticsearch/templates/es.j2 +++ b/roles/openshift_logging_elasticsearch/templates/es.j2 @@ -17,6 +17,7 @@ spec: logging-infra: "{{logging_component}}" strategy: type: Recreate + triggers: [] template: metadata: name: "{{deploy_name}}" @@ -49,7 +50,7 @@ spec: - -provider=openshift - -client-id={{openshift_logging_elasticsearch_prometheus_sa}} - -client-secret-file=/var/run/secrets/kubernetes.io/serviceaccount/token - - -cookie-secret={{ 16 | oo_random_word | b64encode }} + - -cookie-secret={{ 16 | lib_utils_oo_random_word | b64encode }} - -upstream=https://localhost:9200 - '-openshift-sar={"namespace": "{{ openshift_logging_elasticsearch_namespace}}", "verb": "view", "resource": "prometheus", "group": "metrics.openshift.io"}' - '-openshift-delegate-urls={"/": {"resource": "prometheus", "verb": "view", "group": "metrics.openshift.io", "namespace": "{{ openshift_logging_elasticsearch_namespace}}"}}' diff --git a/roles/openshift_logging_elasticsearch/vars/main.yml b/roles/openshift_logging_elasticsearch/vars/main.yml index 09e2ee4d0..122231031 100644 --- a/roles/openshift_logging_elasticsearch/vars/main.yml +++ b/roles/openshift_logging_elasticsearch/vars/main.yml @@ -1,9 +1,12 @@ --- -__latest_es_version: "3_6" -__allowed_es_versions: ["3_5", "3_6", "3_7"] +__latest_es_version: "3_9" +__allowed_es_versions: ["3_5", "3_6", "3_7", "3_8", "3_9"] __allowed_es_types: ["data-master", "data-client", "master", "client"] __es_log_appenders: ['file', 'console'] __kibana_index_modes: ["unique", "shared_ops"] +__es_version: "2.4.4" + +__es_local_curl: "curl -s --cacert /etc/elasticsearch/secret/admin-ca --cert /etc/elasticsearch/secret/admin-cert --key /etc/elasticsearch/secret/admin-key" # TODO: integrate these openshift_master_config_dir: "{{ openshift.common.config_base }}/master" @@ -12,3 +15,4 @@ es_min_masters_default: "{{ (openshift_logging_elasticsearch_replica_count | int es_min_masters: "{{ (openshift_logging_elasticsearch_replica_count == 1) | ternary(1, es_min_masters_default) }}" es_recover_after_nodes: "{{ openshift_logging_elasticsearch_replica_count | int }}" es_recover_expected_nodes: "{{ openshift_logging_elasticsearch_replica_count | int }}" +full_restart_cluster: False diff --git a/roles/openshift_node_facts/meta/main.yml b/roles/openshift_logging_eventrouter/meta/main.yaml index 59bf680ce..711bb8f22 100644 --- a/roles/openshift_node_facts/meta/main.yml +++ b/roles/openshift_logging_eventrouter/meta/main.yaml @@ -1,10 +1,10 @@ --- galaxy_info: - author: Andrew Butcher - description: OpenShift Node Facts + author: OpenShift Red Hat + description: OpenShift Aggregated Logging Eventrouter company: Red Hat, Inc. license: Apache License, Version 2.0 - min_ansible_version: 1.9 + min_ansible_version: 2.2 platforms: - name: EL versions: @@ -12,4 +12,6 @@ galaxy_info: categories: - cloud dependencies: +- role: lib_openshift - role: openshift_facts +- role: lib_utils diff --git a/roles/openshift_logging_eventrouter/tasks/main.yaml b/roles/openshift_logging_eventrouter/tasks/main.yaml index 96b181d61..31780a343 100644 --- a/roles/openshift_logging_eventrouter/tasks/main.yaml +++ b/roles/openshift_logging_eventrouter/tasks/main.yaml @@ -1,8 +1,8 @@ --- -- name: Set default image variables based on deployment_type +- name: Set default image variables based on openshift_deployment_type include_vars: "{{ var_file_name }}" with_first_found: - - "{{ openshift_deployment_type | default(deployment_type) }}.yml" + - "{{ openshift_deployment_type }}.yml" - "default_images.yml" loop_control: loop_var: var_file_name diff --git a/roles/openshift_logging_fluentd/defaults/main.yml b/roles/openshift_logging_fluentd/defaults/main.yml index 9b58e4456..87b4204b5 100644 --- a/roles/openshift_logging_fluentd/defaults/main.yml +++ b/roles/openshift_logging_fluentd/defaults/main.yml @@ -5,6 +5,7 @@ openshift_logging_fluentd_master_url: "https://kubernetes.default.svc.{{ openshi openshift_logging_fluentd_namespace: logging ### Common settings +# map_from_pairs is a custom filter plugin in role lib_utils openshift_logging_fluentd_nodeselector: "{{ openshift_hosted_logging_fluentd_nodeselector_label | default('logging-infra-fluentd=true') | map_from_pairs }}" openshift_logging_fluentd_cpu_limit: null openshift_logging_fluentd_cpu_request: 100m diff --git a/roles/openshift_logging_fluentd/meta/main.yaml b/roles/openshift_logging_fluentd/meta/main.yaml index 89c98204f..62f076780 100644 --- a/roles/openshift_logging_fluentd/meta/main.yaml +++ b/roles/openshift_logging_fluentd/meta/main.yaml @@ -14,3 +14,4 @@ galaxy_info: dependencies: - role: lib_openshift - role: openshift_facts +- role: lib_utils diff --git a/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml b/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml index e92a35f27..2721438f0 100644 --- a/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml +++ b/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml @@ -4,7 +4,7 @@ name: "{{ node }}" kind: node state: add - labels: "{{ openshift_logging_fluentd_nodeselector | oo_dict_to_list_of_dict }}" + labels: "{{ openshift_logging_fluentd_nodeselector | lib_utils_oo_dict_to_list_of_dict }}" # wait half a second between labels - local_action: command sleep {{ openshift_logging_fluentd_label_delay | default('.5') }} diff --git a/roles/openshift_logging_fluentd/tasks/main.yaml b/roles/openshift_logging_fluentd/tasks/main.yaml index 87eedfb4b..79ebbca08 100644 --- a/roles/openshift_logging_fluentd/tasks/main.yaml +++ b/roles/openshift_logging_fluentd/tasks/main.yaml @@ -34,10 +34,10 @@ msg: WARNING Use of openshift_logging_mux_client_mode=minimal is not recommended due to current scaling issues when: openshift_logging_mux_client_mode is defined and openshift_logging_mux_client_mode == 'minimal' -- name: Set default image variables based on deployment_type +- name: Set default image variables based on openshift_deployment_type include_vars: "{{ var_file_name }}" with_first_found: - - "{{ openshift_deployment_type | default(deployment_type) }}.yml" + - "{{ openshift_deployment_type }}.yml" - "default_images.yml" loop_control: loop_var: var_file_name @@ -108,38 +108,28 @@ dest: "{{ tempdir }}/fluent.conf" vars: deploy_type: "{{ openshift_logging_fluentd_deployment_type }}" - when: fluentd_config_contents is undefined - changed_when: no - copy: src: fluentd-throttle-config.yaml dest: "{{ tempdir }}/fluentd-throttle-config.yaml" - when: fluentd_throttle_contents is undefined - changed_when: no - copy: src: secure-forward.conf dest: "{{ tempdir }}/secure-forward.conf" - when: fluentd_secureforward_contents is undefined - changed_when: no - -- copy: - content: "{{ fluentd_config_contents }}" - dest: "{{ tempdir }}/fluent.conf" - when: fluentd_config_contents is defined - changed_when: no -- copy: - content: "{{ fluentd_throttle_contents }}" - dest: "{{ tempdir }}/fluentd-throttle-config.yaml" - when: fluentd_throttle_contents is defined - changed_when: no - -- copy: - content: "{{ fluentd_secureforward_contents }}" - dest: "{{ tempdir }}/secure-forward.conf" - when: fluentd_secureforward_contents is defined - changed_when: no +- import_role: + name: openshift_logging + tasks_from: patch_configmap_files.yaml + vars: + configmap_name: "logging-fluentd" + configmap_namespace: "logging" + configmap_file_names: + - current_file: "fluent.conf" + new_file: "{{ tempdir }}/fluent.conf" + - current_file: "throttle-config.yaml" + new_file: "{{ tempdir }}/fluentd-throttle-config.yaml" + - current_file: "secure-forward.conf" + new_file: "{{ tempdir }}/secure-forward.conf" - name: Set Fluentd configmap oc_configmap: @@ -182,8 +172,8 @@ app_port: "{{ openshift_logging_fluentd_app_port }}" ops_host: "{{ openshift_logging_fluentd_ops_host }}" ops_port: "{{ openshift_logging_fluentd_ops_port }}" - fluentd_nodeselector_key: "{{ openshift_logging_fluentd_nodeselector.keys()[0] }}" - fluentd_nodeselector_value: "{{ openshift_logging_fluentd_nodeselector.values()[0] }}" + fluentd_nodeselector_key: "{{ openshift_logging_fluentd_nodeselector.keys() | first }}" + fluentd_nodeselector_value: "{{ openshift_logging_fluentd_nodeselector.values() | first }}" fluentd_cpu_limit: "{{ openshift_logging_fluentd_cpu_limit }}" fluentd_cpu_request: "{{ openshift_logging_fluentd_cpu_request | min_cpu(openshift_logging_fluentd_cpu_limit | default(none)) }}" fluentd_memory_limit: "{{ openshift_logging_fluentd_memory_limit }}" diff --git a/roles/openshift_logging_fluentd/templates/fluentd.j2 b/roles/openshift_logging_fluentd/templates/fluentd.j2 index 10283316c..c6256cf49 100644 --- a/roles/openshift_logging_fluentd/templates/fluentd.j2 +++ b/roles/openshift_logging_fluentd/templates/fluentd.j2 @@ -130,7 +130,7 @@ spec: containerName: "{{ daemonset_container_name }}" resource: limits.memory - name: "FILE_BUFFER_LIMIT" - value: "{{ openshift_logging_fluentd_file_buffer_limit | default('256i') }}" + value: "{{ openshift_logging_fluentd_file_buffer_limit | default('256Mi') }}" {% if openshift_logging_mux_client_mode is defined and ((openshift_logging_mux_allow_external is defined and openshift_logging_mux_allow_external | bool) or (openshift_logging_use_mux is defined and openshift_logging_use_mux | bool)) %} diff --git a/roles/openshift_logging_fluentd/vars/main.yml b/roles/openshift_logging_fluentd/vars/main.yml index 92a426952..b60da814f 100644 --- a/roles/openshift_logging_fluentd/vars/main.yml +++ b/roles/openshift_logging_fluentd/vars/main.yml @@ -1,5 +1,5 @@ --- -__latest_fluentd_version: "3_6" -__allowed_fluentd_versions: ["3_5", "3_6", "3_7"] +__latest_fluentd_version: "3_9" +__allowed_fluentd_versions: ["3_5", "3_6", "3_7", "3_8", "3_9"] __allowed_fluentd_types: ["hosted", "secure-aggregator", "secure-host"] __allowed_mux_client_modes: ["minimal", "maximal"] diff --git a/roles/openshift_logging_kibana/meta/main.yaml b/roles/openshift_logging_kibana/meta/main.yaml index d97586a37..d9d76dfe0 100644 --- a/roles/openshift_logging_kibana/meta/main.yaml +++ b/roles/openshift_logging_kibana/meta/main.yaml @@ -14,3 +14,4 @@ galaxy_info: dependencies: - role: lib_openshift - role: openshift_facts +- role: lib_utils diff --git a/roles/openshift_logging_kibana/tasks/main.yaml b/roles/openshift_logging_kibana/tasks/main.yaml index 77bf8042a..3c3bd902e 100644 --- a/roles/openshift_logging_kibana/tasks/main.yaml +++ b/roles/openshift_logging_kibana/tasks/main.yaml @@ -1,9 +1,9 @@ --- # fail is we don't have an endpoint for ES to connect to? -- name: Set default image variables based on deployment_type +- name: Set default image variables based on openshift_deployment_type include_vars: "{{ var_file_name }}" with_first_found: - - "{{ openshift_deployment_type | default(deployment_type) }}.yml" + - "{{ openshift_deployment_type }}.yml" - "default_images.yml" loop_control: loop_var: var_file_name @@ -69,7 +69,7 @@ # gen session_secret if necessary - name: Generate session secret copy: - content: "{{ 200 | oo_random_word }}" + content: "{{ 200 | lib_utils_oo_random_word }}" dest: "{{ generated_certs_dir }}/session_secret" when: - not session_secret_file.stat.exists @@ -77,7 +77,7 @@ # gen oauth_secret if necessary - name: Generate oauth secret copy: - content: "{{ 64 | oo_random_word }}" + content: "{{ 64 | lib_utils_oo_random_word }}" dest: "{{ generated_certs_dir }}/oauth_secret" when: - not oauth_secret_file.stat.exists diff --git a/roles/openshift_logging_kibana/vars/main.yml b/roles/openshift_logging_kibana/vars/main.yml index 241877a02..fed926a3b 100644 --- a/roles/openshift_logging_kibana/vars/main.yml +++ b/roles/openshift_logging_kibana/vars/main.yml @@ -1,3 +1,3 @@ --- -__latest_kibana_version: "3_6" -__allowed_kibana_versions: ["3_5", "3_6", "3_7"] +__latest_kibana_version: "3_9" +__allowed_kibana_versions: ["3_5", "3_6", "3_7", "3_8", "3_9"] diff --git a/roles/openshift_logging_mux/defaults/main.yml b/roles/openshift_logging_mux/defaults/main.yml index 1e6c501bf..e87c8d33e 100644 --- a/roles/openshift_logging_mux/defaults/main.yml +++ b/roles/openshift_logging_mux/defaults/main.yml @@ -6,6 +6,7 @@ openshift_logging_mux_master_public_url: "{{ openshift_hosted_logging_master_pub openshift_logging_mux_namespace: logging ### Common settings +# map_from_pairs is a custom filter plugin in role lib_utils openshift_logging_mux_nodeselector: "{{ openshift_hosted_logging_mux_nodeselector_label | default('') | map_from_pairs }}" openshift_logging_mux_cpu_limit: null openshift_logging_mux_cpu_request: 100m @@ -30,6 +31,7 @@ openshift_logging_mux_allow_external: False openshift_logging_use_mux: "{{ openshift_logging_mux_allow_external | default(False) }}" openshift_logging_mux_hostname: "{{ 'mux.' ~ openshift_master_default_subdomain }}" openshift_logging_mux_port: 24284 +openshift_logging_mux_external_address: "{{ ansible_default_ipv4.address }}" # the namespace to use for undefined projects should come first, followed by any # additional namespaces to create by default - users will typically not need to set this openshift_logging_mux_default_namespaces: ["mux-undefined"] @@ -63,4 +65,4 @@ openshift_logging_mux_file_buffer_pvc_access_modes: ['ReadWriteOnce'] openshift_logging_mux_file_buffer_storage_group: '65534' openshift_logging_mux_file_buffer_pvc_prefix: "logging-mux" -openshift_logging_mux_file_buffer_limit: 256Mi +openshift_logging_mux_file_buffer_limit: 2Gi diff --git a/roles/openshift_logging_mux/meta/main.yaml b/roles/openshift_logging_mux/meta/main.yaml index f271d8d7d..969752f15 100644 --- a/roles/openshift_logging_mux/meta/main.yaml +++ b/roles/openshift_logging_mux/meta/main.yaml @@ -14,3 +14,4 @@ galaxy_info: dependencies: - role: lib_openshift - role: openshift_facts +- role: lib_utils diff --git a/roles/openshift_logging_mux/tasks/main.yaml b/roles/openshift_logging_mux/tasks/main.yaml index 68948bce2..7eba3cda4 100644 --- a/roles/openshift_logging_mux/tasks/main.yaml +++ b/roles/openshift_logging_mux/tasks/main.yaml @@ -7,10 +7,10 @@ msg: Operations logs destination is required when: not openshift_logging_mux_ops_host or openshift_logging_mux_ops_host == '' -- name: Set default image variables based on deployment_type +- name: Set default image variables based on openshift_deployment_type include_vars: "{{ var_file_name }}" with_first_found: - - "{{ openshift_deployment_type | default(deployment_type) }}.yml" + - "{{ openshift_deployment_type }}.yml" - "default_images.yml" loop_control: loop_var: var_file_name @@ -88,26 +88,24 @@ - copy: src: fluent.conf dest: "{{mktemp.stdout}}/fluent-mux.conf" - when: fluentd_mux_config_contents is undefined changed_when: no - copy: src: secure-forward.conf dest: "{{mktemp.stdout}}/secure-forward-mux.conf" - when: fluentd_mux_securefoward_contents is undefined changed_when: no -- copy: - content: "{{fluentd_mux_config_contents}}" - dest: "{{mktemp.stdout}}/fluent-mux.conf" - when: fluentd_mux_config_contents is defined - changed_when: no - -- copy: - content: "{{fluentd_mux_secureforward_contents}}" - dest: "{{mktemp.stdout}}/secure-forward-mux.conf" - when: fluentd_mux_secureforward_contents is defined - changed_when: no +- import_role: + name: openshift_logging + tasks_from: patch_configmap_files.yaml + vars: + configmap_name: "logging-mux" + configmap_namespace: "{{ openshift_logging_mux_namespace }}" + configmap_file_names: + - current_file: "fluent.conf" + new_file: "{{ tempdir }}/fluent-mux.conf" + - current_file: "secure-forward.conf" + new_file: "{{ tempdir }}/secure-forward-mux.conf" - name: Set Mux configmap oc_configmap: @@ -150,7 +148,7 @@ port: "{{ openshift_logging_mux_port }}" targetPort: "mux-forward" external_ips: - - "{{ ansible_eth0.ipv4.address }}" + - "{{ openshift_logging_mux_external_address }}" when: openshift_logging_mux_allow_external | bool - name: Set logging-mux service for internal communication diff --git a/roles/openshift_logging_mux/vars/main.yml b/roles/openshift_logging_mux/vars/main.yml index e7b57f4b5..e87205bad 100644 --- a/roles/openshift_logging_mux/vars/main.yml +++ b/roles/openshift_logging_mux/vars/main.yml @@ -1,3 +1,3 @@ --- -__latest_mux_version: "3_6" -__allowed_mux_versions: ["3_5", "3_6", "3_7"] +__latest_mux_version: "3_9" +__allowed_mux_versions: ["3_5", "3_6", "3_7", "3_8", "3_9"] diff --git a/roles/openshift_manage_node/defaults/main.yml b/roles/openshift_manage_node/defaults/main.yml index f0e728a3f..00e04b9f2 100644 --- a/roles/openshift_manage_node/defaults/main.yml +++ b/roles/openshift_manage_node/defaults/main.yml @@ -4,3 +4,6 @@ openshift_manage_node_is_master: False # Default is to be schedulable except for master nodes. l_openshift_manage_schedulable: "{{ openshift_schedulable | default(not openshift_manage_node_is_master) }}" + +openshift_master_node_labels: + node-role.kubernetes.io/master: 'true' diff --git a/roles/openshift_manage_node/meta/main.yml b/roles/openshift_manage_node/meta/main.yml index d90cd28cf..a09808a39 100644 --- a/roles/openshift_manage_node/meta/main.yml +++ b/roles/openshift_manage_node/meta/main.yml @@ -13,3 +13,4 @@ galaxy_info: - cloud dependencies: - role: lib_openshift +- role: lib_utils diff --git a/roles/openshift_manage_node/tasks/config.yml b/roles/openshift_manage_node/tasks/config.yml new file mode 100644 index 000000000..4f00351b5 --- /dev/null +++ b/roles/openshift_manage_node/tasks/config.yml @@ -0,0 +1,27 @@ +--- +- name: Set node schedulability + oc_adm_manage_node: + node: "{{ openshift.node.nodename | lower }}" + schedulable: "{{ 'true' if l_openshift_manage_schedulable | bool else 'false' }}" + retries: 10 + delay: 5 + register: node_schedulable + until: node_schedulable is succeeded + when: "'nodename' in openshift.node" + delegate_to: "{{ openshift_master_host }}" + +- name: Label nodes + oc_label: + name: "{{ openshift.node.nodename }}" + kind: node + state: add + labels: "{{ l_all_labels | lib_utils_oo_dict_to_list_of_dict }}" + namespace: default + when: + - "'nodename' in openshift.node" + - l_all_labels != {} + delegate_to: "{{ openshift_master_host }}" + vars: + l_node_labels: "{{ openshift_node_labels | default({}) }}" + l_master_labels: "{{ ('oo_masters_to_config' in group_names) | ternary(openshift_master_node_labels, {}) }}" + l_all_labels: "{{ l_node_labels | combine(l_master_labels) }}" diff --git a/roles/openshift_manage_node/tasks/main.yml b/roles/openshift_manage_node/tasks/main.yml index 247757ca9..154e2b45f 100644 --- a/roles/openshift_manage_node/tasks/main.yml +++ b/roles/openshift_manage_node/tasks/main.yml @@ -18,7 +18,7 @@ retries: 120 delay: 1 changed_when: false - when: openshift.common.is_containerized | bool + when: openshift_is_containerized | bool delegate_to: "{{ openshift_master_host }}" run_once: true @@ -34,26 +34,4 @@ when: "'nodename' in openshift.node" delegate_to: "{{ openshift_master_host }}" -- name: Set node schedulability - oc_adm_manage_node: - node: "{{ openshift.node.nodename | lower }}" - schedulable: "{{ 'true' if l_openshift_manage_schedulable | bool else 'false' }}" - retries: 10 - delay: 5 - register: node_schedulable - until: node_schedulable|succeeded - when: "'nodename' in openshift.node" - delegate_to: "{{ openshift_master_host }}" - -- name: Label nodes - oc_label: - name: "{{ openshift.node.nodename }}" - kind: node - state: add - labels: "{{ openshift.node.labels | oo_dict_to_list_of_dict }}" - namespace: default - when: - - "'nodename' in openshift.node" - - "'labels' in openshift.node" - - openshift.node.labels != {} - delegate_to: "{{ openshift_master_host }}" +- include_tasks: config.yml diff --git a/roles/openshift_manageiq/meta/main.yml b/roles/openshift_manageiq/meta/main.yml index 6c96a91bf..5c9481430 100644 --- a/roles/openshift_manageiq/meta/main.yml +++ b/roles/openshift_manageiq/meta/main.yml @@ -13,3 +13,4 @@ galaxy_info: - cloud dependencies: - role: lib_openshift +- role: lib_utils diff --git a/roles/openshift_management/defaults/main.yml b/roles/openshift_management/defaults/main.yml index b5e234b7f..57bc97e3e 100644 --- a/roles/openshift_management/defaults/main.yml +++ b/roles/openshift_management/defaults/main.yml @@ -15,6 +15,8 @@ openshift_management_pod_rollout_retries: 30 # # Choose 'miq-template' for a podified database install # Choose 'miq-template-ext-db' for an external database install +# TODO: Swap this var declaration once CFME is fully supported +#openshift_management_app_template: "{{ 'cfme-template' if openshift_deployment_type == 'openshift-enterprise' else 'miq-template' }}" openshift_management_app_template: miq-template # If you are using the miq-template-ext-db template then you must add # the required database parameters to the diff --git a/roles/openshift_management/files/templates/cloudforms/cfme-backup-job.yaml b/roles/openshift_management/files/templates/cloudforms/cfme-backup-job.yaml index c3bc1d20c..48d1d4e26 100644 --- a/roles/openshift_management/files/templates/cloudforms/cfme-backup-job.yaml +++ b/roles/openshift_management/files/templates/cloudforms/cfme-backup-job.yaml @@ -9,7 +9,7 @@ spec: spec: containers: - name: postgresql - image: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-postgresql:latest + image: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-postgresql:latest command: - "/opt/rh/cfme-container-scripts/backup_db" env: diff --git a/roles/openshift_management/files/templates/cloudforms/cfme-restore-job.yaml b/roles/openshift_management/files/templates/cloudforms/cfme-restore-job.yaml index 8b23f8a33..7fd4fc2e1 100644 --- a/roles/openshift_management/files/templates/cloudforms/cfme-restore-job.yaml +++ b/roles/openshift_management/files/templates/cloudforms/cfme-restore-job.yaml @@ -9,7 +9,7 @@ spec: spec: containers: - name: postgresql - image: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-postgresql:latest + image: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-postgresql:latest command: - "/opt/rh/cfme-container-scripts/restore_db" env: diff --git a/roles/openshift_management/files/templates/cloudforms/cfme-template-ext-db.yaml b/roles/openshift_management/files/templates/cloudforms/cfme-template-ext-db.yaml index 4a04f3372..9866c29c3 100644 --- a/roles/openshift_management/files/templates/cloudforms/cfme-template-ext-db.yaml +++ b/roles/openshift_management/files/templates/cloudforms/cfme-template-ext-db.yaml @@ -31,6 +31,7 @@ objects: name: "${NAME}-secrets" stringData: pg-password: "${DATABASE_PASSWORD}" + admin-password: "${APPLICATION_ADMIN_PASSWORD}" database-url: postgresql://${DATABASE_USER}:${DATABASE_PASSWORD}@${DATABASE_SERVICE_NAME}/${DATABASE_NAME}?encoding=utf8&pool=5&wait_timeout=5 v2-key: "${V2_KEY}" - apiVersion: v1 @@ -90,15 +91,15 @@ objects: - name: cloudforms image: "${FRONTEND_APPLICATION_IMG_NAME}:${FRONTEND_APPLICATION_IMG_TAG}" livenessProbe: - tcpSocket: - port: 80 + exec: + command: + - pidof + - MIQ Server initialDelaySeconds: 480 timeoutSeconds: 3 readinessProbe: - httpGet: - path: "/" + tcpSocket: port: 80 - scheme: HTTP initialDelaySeconds: 200 timeoutSeconds: 3 ports: @@ -126,6 +127,11 @@ objects: secretKeyRef: name: "${NAME}-secrets" key: v2-key + - name: APPLICATION_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: admin-password - name: ANSIBLE_ADMIN_PASSWORD valueFrom: secretKeyRef: @@ -433,18 +439,173 @@ objects: <VirtualHost *:80> KeepAlive on + # Without ServerName mod_auth_mellon compares against http:// and not https:// from the IdP + ServerName https://%{REQUEST_HOST} + ProxyPreserveHost on - ProxyPass /ws/ ws://${NAME}/ws/ - ProxyPassReverse /ws/ ws://${NAME}/ws/ - ProxyPass / http://${NAME}/ + + RewriteCond %{REQUEST_URI} ^/ws [NC] + RewriteCond %{HTTP:UPGRADE} ^websocket$ [NC] + RewriteCond %{HTTP:CONNECTION} ^Upgrade$ [NC] + RewriteRule .* ws://${NAME}%{REQUEST_URI} [P,QSA,L] + + # For httpd, some ErrorDocuments must by served by the httpd pod + RewriteCond %{REQUEST_URI} !^/proxy_pages + + # For SAML /saml2 is only served by mod_auth_mellon in the httpd pod + RewriteCond %{REQUEST_URI} !^/saml2 + RewriteRule ^/ http://${NAME}%{REQUEST_URI} [P,QSA,L] ProxyPassReverse / http://${NAME}/ + + # Ensures httpd stdout/stderr are seen by docker logs. + ErrorLog "| /usr/bin/tee /proc/1/fd/2 /var/log/httpd/error_log" + CustomLog "| /usr/bin/tee /proc/1/fd/1 /var/log/httpd/access_log" common </VirtualHost> + authentication.conf: | + # Load appropriate authentication configuration files + # + Include "conf.d/configuration-${HTTPD_AUTH_TYPE}-auth" + configuration-internal-auth: | + # Internal authentication + # + configuration-external-auth: | + Include "conf.d/external-auth-load-modules-conf" + + <Location /dashboard/kerberos_authenticate> + AuthType Kerberos + AuthName "Kerberos Login" + KrbMethodNegotiate On + KrbMethodK5Passwd Off + KrbAuthRealms ${HTTPD_AUTH_KERBEROS_REALMS} + Krb5KeyTab /etc/http.keytab + KrbServiceName Any + Require pam-account httpd-auth + + ErrorDocument 401 /proxy_pages/invalid_sso_credentials.js + </Location> + + Include "conf.d/external-auth-login-form-conf" + Include "conf.d/external-auth-application-api-conf" + Include "conf.d/external-auth-lookup-user-details-conf" + Include "conf.d/external-auth-remote-user-conf" + configuration-active-directory-auth: | + Include "conf.d/external-auth-load-modules-conf" + + <Location /dashboard/kerberos_authenticate> + AuthType Kerberos + AuthName "Kerberos Login" + KrbMethodNegotiate On + KrbMethodK5Passwd Off + KrbAuthRealms ${HTTPD_AUTH_KERBEROS_REALMS} + Krb5KeyTab /etc/krb5.keytab + KrbServiceName Any + Require pam-account httpd-auth + + ErrorDocument 401 /proxy_pages/invalid_sso_credentials.js + </Location> + + Include "conf.d/external-auth-login-form-conf" + Include "conf.d/external-auth-application-api-conf" + Include "conf.d/external-auth-lookup-user-details-conf" + Include "conf.d/external-auth-remote-user-conf" + configuration-saml-auth: | + LoadModule auth_mellon_module modules/mod_auth_mellon.so + + <Location /> + MellonEnable "info" + + MellonIdPMetadataFile "/etc/httpd/saml2/idp-metadata.xml" + + MellonSPPrivateKeyFile "/etc/httpd/saml2/sp-key.key" + MellonSPCertFile "/etc/httpd/saml2/sp-cert.cert" + MellonSPMetadataFile "/etc/httpd/saml2/sp-metadata.xml" + + MellonVariable "sp-cookie" + MellonSecureCookie On + MellonCookiePath "/" + + MellonIdP "IDP" + + MellonEndpointPath "/saml2" + + MellonUser username + MellonMergeEnvVars On + + MellonSetEnvNoPrefix "REMOTE_USER" username + MellonSetEnvNoPrefix "REMOTE_USER_EMAIL" email + MellonSetEnvNoPrefix "REMOTE_USER_FIRSTNAME" firstname + MellonSetEnvNoPrefix "REMOTE_USER_LASTNAME" lastname + MellonSetEnvNoPrefix "REMOTE_USER_FULLNAME" fullname + MellonSetEnvNoPrefix "REMOTE_USER_GROUPS" groups + </Location> + + <Location /saml_login> + AuthType "Mellon" + MellonEnable "auth" + Require valid-user + </Location> + + Include "conf.d/external-auth-remote-user-conf" + external-auth-load-modules-conf: | + LoadModule authnz_pam_module modules/mod_authnz_pam.so + LoadModule intercept_form_submit_module modules/mod_intercept_form_submit.so + LoadModule lookup_identity_module modules/mod_lookup_identity.so + LoadModule auth_kerb_module modules/mod_auth_kerb.so + external-auth-login-form-conf: | + <Location /dashboard/external_authenticate> + InterceptFormPAMService httpd-auth + InterceptFormLogin user_name + InterceptFormPassword user_password + InterceptFormLoginSkip admin + InterceptFormClearRemoteUserForSkipped on + </Location> + external-auth-application-api-conf: | + <LocationMatch ^/api> + SetEnvIf Authorization '^Basic +YWRtaW46' let_admin_in + SetEnvIf X-Auth-Token '^.+$' let_api_token_in + SetEnvIf X-MIQ-Token '^.+$' let_sys_token_in + + AuthType Basic + AuthName "External Authentication (httpd) for API" + AuthBasicProvider PAM + + AuthPAMService httpd-auth + Require valid-user + Order Allow,Deny + Allow from env=let_admin_in + Allow from env=let_api_token_in + Allow from env=let_sys_token_in + Satisfy Any + </LocationMatch> + external-auth-lookup-user-details-conf: | + <LocationMatch ^/dashboard/external_authenticate$|^/dashboard/kerberos_authenticate$|^/api> + LookupUserAttr mail REMOTE_USER_EMAIL + LookupUserAttr givenname REMOTE_USER_FIRSTNAME + LookupUserAttr sn REMOTE_USER_LASTNAME + LookupUserAttr displayname REMOTE_USER_FULLNAME + LookupUserAttr domainname REMOTE_USER_DOMAIN + + LookupUserGroups REMOTE_USER_GROUPS ":" + LookupDbusTimeout 5000 + </LocationMatch> + external-auth-remote-user-conf: | + RequestHeader unset X_REMOTE_USER + + RequestHeader set X_REMOTE_USER %{REMOTE_USER}e env=REMOTE_USER + RequestHeader set X_EXTERNAL_AUTH_ERROR %{EXTERNAL_AUTH_ERROR}e env=EXTERNAL_AUTH_ERROR + RequestHeader set X_REMOTE_USER_EMAIL %{REMOTE_USER_EMAIL}e env=REMOTE_USER_EMAIL + RequestHeader set X_REMOTE_USER_FIRSTNAME %{REMOTE_USER_FIRSTNAME}e env=REMOTE_USER_FIRSTNAME + RequestHeader set X_REMOTE_USER_LASTNAME %{REMOTE_USER_LASTNAME}e env=REMOTE_USER_LASTNAME + RequestHeader set X_REMOTE_USER_FULLNAME %{REMOTE_USER_FULLNAME}e env=REMOTE_USER_FULLNAME + RequestHeader set X_REMOTE_USER_GROUPS %{REMOTE_USER_GROUPS}e env=REMOTE_USER_GROUPS + RequestHeader set X_REMOTE_USER_DOMAIN %{REMOTE_USER_DOMAIN}e env=REMOTE_USER_DOMAIN - apiVersion: v1 kind: ConfigMap metadata: name: "${HTTPD_SERVICE_NAME}-auth-configs" data: auth-type: internal + auth-kerberos-realms: undefined auth-configuration.conf: | # External Authentication Configuration File # @@ -464,6 +625,20 @@ objects: selector: name: httpd - apiVersion: v1 + kind: Service + metadata: + name: "${HTTPD_DBUS_API_SERVICE_NAME}" + annotations: + description: Exposes the httpd server dbus api + service.alpha.openshift.io/dependencies: '[{"name":"${NAME}","namespace":"","kind":"Service"}]' + spec: + ports: + - name: http-dbus-api + port: 8080 + targetPort: 8080 + selector: + name: httpd +- apiVersion: v1 kind: DeploymentConfig metadata: name: "${HTTPD_SERVICE_NAME}" @@ -497,6 +672,9 @@ objects: image: "${HTTPD_IMG_NAME}:${HTTPD_IMG_TAG}" ports: - containerPort: 80 + protocol: TCP + - containerPort: 8080 + protocol: TCP livenessProbe: exec: command: @@ -526,6 +704,11 @@ objects: configMapKeyRef: name: "${HTTPD_SERVICE_NAME}-auth-configs" key: auth-type + - name: HTTPD_AUTH_KERBEROS_REALMS + valueFrom: + configMapKeyRef: + name: "${HTTPD_SERVICE_NAME}-auth-configs" + key: auth-kerberos-realms lifecycle: postStart: exec: @@ -581,6 +764,11 @@ parameters: displayName: Application Database Region description: Database region that will be used for application. value: '0' +- name: APPLICATION_ADMIN_PASSWORD + displayName: Application Admin Password + required: true + description: Admin password that will be set on the application. + value: smartvm - name: ANSIBLE_DATABASE_NAME displayName: Ansible PostgreSQL database name required: true @@ -678,7 +866,7 @@ parameters: - name: MEMCACHED_IMG_NAME displayName: Memcached Image Name description: This is the Memcached image name requested to deploy. - value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-memcached + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-memcached - name: MEMCACHED_IMG_TAG displayName: Memcached Image Tag description: This is the Memcached image tag/version requested to deploy. @@ -686,11 +874,11 @@ parameters: - name: FRONTEND_APPLICATION_IMG_NAME displayName: Frontend Application Image Name description: This is the Frontend Application image name requested to deploy. - value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-app-ui + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-app-ui - name: BACKEND_APPLICATION_IMG_NAME displayName: Backend Application Image Name description: This is the Backend Application image name requested to deploy. - value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-app + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-app - name: FRONTEND_APPLICATION_IMG_TAG displayName: Front end Application Image Tag description: This is the CloudForms Frontend Application image tag/version requested to deploy. @@ -702,7 +890,7 @@ parameters: - name: ANSIBLE_IMG_NAME displayName: Ansible Image Name description: This is the Ansible image name requested to deploy. - value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-embedded-ansible + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-embedded-ansible - name: ANSIBLE_IMG_TAG displayName: Ansible Image Tag description: This is the Ansible image tag/version requested to deploy. @@ -730,10 +918,15 @@ parameters: displayName: Apache httpd Service Name description: The name of the OpenShift Service exposed for the httpd container. value: httpd +- name: HTTPD_DBUS_API_SERVICE_NAME + required: true + displayName: Apache httpd DBus API Service Name + description: The name of httpd dbus api service. + value: httpd-dbus-api - name: HTTPD_IMG_NAME displayName: Apache httpd Image Name description: This is the httpd image name requested to deploy. - value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-httpd + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-httpd - name: HTTPD_IMG_TAG displayName: Apache httpd Image Tag description: This is the httpd image tag/version requested to deploy. diff --git a/roles/openshift_management/files/templates/cloudforms/cfme-template.yaml b/roles/openshift_management/files/templates/cloudforms/cfme-template.yaml index d7c9f5af7..5c757b6c2 100644 --- a/roles/openshift_management/files/templates/cloudforms/cfme-template.yaml +++ b/roles/openshift_management/files/templates/cloudforms/cfme-template.yaml @@ -31,6 +31,7 @@ objects: name: "${NAME}-secrets" stringData: pg-password: "${DATABASE_PASSWORD}" + admin-password: "${APPLICATION_ADMIN_PASSWORD}" database-url: postgresql://${DATABASE_USER}:${DATABASE_PASSWORD}@${DATABASE_SERVICE_NAME}/${DATABASE_NAME}?encoding=utf8&pool=5&wait_timeout=5 v2-key: "${V2_KEY}" - apiVersion: v1 @@ -128,18 +129,173 @@ objects: <VirtualHost *:80> KeepAlive on + # Without ServerName mod_auth_mellon compares against http:// and not https:// from the IdP + ServerName https://%{REQUEST_HOST} + ProxyPreserveHost on - ProxyPass /ws/ ws://${NAME}/ws/ - ProxyPassReverse /ws/ ws://${NAME}/ws/ - ProxyPass / http://${NAME}/ + + RewriteCond %{REQUEST_URI} ^/ws [NC] + RewriteCond %{HTTP:UPGRADE} ^websocket$ [NC] + RewriteCond %{HTTP:CONNECTION} ^Upgrade$ [NC] + RewriteRule .* ws://${NAME}%{REQUEST_URI} [P,QSA,L] + + # For httpd, some ErrorDocuments must by served by the httpd pod + RewriteCond %{REQUEST_URI} !^/proxy_pages + + # For SAML /saml2 is only served by mod_auth_mellon in the httpd pod + RewriteCond %{REQUEST_URI} !^/saml2 + RewriteRule ^/ http://${NAME}%{REQUEST_URI} [P,QSA,L] ProxyPassReverse / http://${NAME}/ + + # Ensures httpd stdout/stderr are seen by docker logs. + ErrorLog "| /usr/bin/tee /proc/1/fd/2 /var/log/httpd/error_log" + CustomLog "| /usr/bin/tee /proc/1/fd/1 /var/log/httpd/access_log" common </VirtualHost> + authentication.conf: | + # Load appropriate authentication configuration files + # + Include "conf.d/configuration-${HTTPD_AUTH_TYPE}-auth" + configuration-internal-auth: | + # Internal authentication + # + configuration-external-auth: | + Include "conf.d/external-auth-load-modules-conf" + + <Location /dashboard/kerberos_authenticate> + AuthType Kerberos + AuthName "Kerberos Login" + KrbMethodNegotiate On + KrbMethodK5Passwd Off + KrbAuthRealms ${HTTPD_AUTH_KERBEROS_REALMS} + Krb5KeyTab /etc/http.keytab + KrbServiceName Any + Require pam-account httpd-auth + + ErrorDocument 401 /proxy_pages/invalid_sso_credentials.js + </Location> + + Include "conf.d/external-auth-login-form-conf" + Include "conf.d/external-auth-application-api-conf" + Include "conf.d/external-auth-lookup-user-details-conf" + Include "conf.d/external-auth-remote-user-conf" + configuration-active-directory-auth: | + Include "conf.d/external-auth-load-modules-conf" + + <Location /dashboard/kerberos_authenticate> + AuthType Kerberos + AuthName "Kerberos Login" + KrbMethodNegotiate On + KrbMethodK5Passwd Off + KrbAuthRealms ${HTTPD_AUTH_KERBEROS_REALMS} + Krb5KeyTab /etc/krb5.keytab + KrbServiceName Any + Require pam-account httpd-auth + + ErrorDocument 401 /proxy_pages/invalid_sso_credentials.js + </Location> + + Include "conf.d/external-auth-login-form-conf" + Include "conf.d/external-auth-application-api-conf" + Include "conf.d/external-auth-lookup-user-details-conf" + Include "conf.d/external-auth-remote-user-conf" + configuration-saml-auth: | + LoadModule auth_mellon_module modules/mod_auth_mellon.so + + <Location /> + MellonEnable "info" + + MellonIdPMetadataFile "/etc/httpd/saml2/idp-metadata.xml" + + MellonSPPrivateKeyFile "/etc/httpd/saml2/sp-key.key" + MellonSPCertFile "/etc/httpd/saml2/sp-cert.cert" + MellonSPMetadataFile "/etc/httpd/saml2/sp-metadata.xml" + + MellonVariable "sp-cookie" + MellonSecureCookie On + MellonCookiePath "/" + + MellonIdP "IDP" + + MellonEndpointPath "/saml2" + + MellonUser username + MellonMergeEnvVars On + + MellonSetEnvNoPrefix "REMOTE_USER" username + MellonSetEnvNoPrefix "REMOTE_USER_EMAIL" email + MellonSetEnvNoPrefix "REMOTE_USER_FIRSTNAME" firstname + MellonSetEnvNoPrefix "REMOTE_USER_LASTNAME" lastname + MellonSetEnvNoPrefix "REMOTE_USER_FULLNAME" fullname + MellonSetEnvNoPrefix "REMOTE_USER_GROUPS" groups + </Location> + + <Location /saml_login> + AuthType "Mellon" + MellonEnable "auth" + Require valid-user + </Location> + + Include "conf.d/external-auth-remote-user-conf" + external-auth-load-modules-conf: | + LoadModule authnz_pam_module modules/mod_authnz_pam.so + LoadModule intercept_form_submit_module modules/mod_intercept_form_submit.so + LoadModule lookup_identity_module modules/mod_lookup_identity.so + LoadModule auth_kerb_module modules/mod_auth_kerb.so + external-auth-login-form-conf: | + <Location /dashboard/external_authenticate> + InterceptFormPAMService httpd-auth + InterceptFormLogin user_name + InterceptFormPassword user_password + InterceptFormLoginSkip admin + InterceptFormClearRemoteUserForSkipped on + </Location> + external-auth-application-api-conf: | + <LocationMatch ^/api> + SetEnvIf Authorization '^Basic +YWRtaW46' let_admin_in + SetEnvIf X-Auth-Token '^.+$' let_api_token_in + SetEnvIf X-MIQ-Token '^.+$' let_sys_token_in + + AuthType Basic + AuthName "External Authentication (httpd) for API" + AuthBasicProvider PAM + + AuthPAMService httpd-auth + Require valid-user + Order Allow,Deny + Allow from env=let_admin_in + Allow from env=let_api_token_in + Allow from env=let_sys_token_in + Satisfy Any + </LocationMatch> + external-auth-lookup-user-details-conf: | + <LocationMatch ^/dashboard/external_authenticate$|^/dashboard/kerberos_authenticate$|^/api> + LookupUserAttr mail REMOTE_USER_EMAIL + LookupUserAttr givenname REMOTE_USER_FIRSTNAME + LookupUserAttr sn REMOTE_USER_LASTNAME + LookupUserAttr displayname REMOTE_USER_FULLNAME + LookupUserAttr domainname REMOTE_USER_DOMAIN + + LookupUserGroups REMOTE_USER_GROUPS ":" + LookupDbusTimeout 5000 + </LocationMatch> + external-auth-remote-user-conf: | + RequestHeader unset X_REMOTE_USER + + RequestHeader set X_REMOTE_USER %{REMOTE_USER}e env=REMOTE_USER + RequestHeader set X_EXTERNAL_AUTH_ERROR %{EXTERNAL_AUTH_ERROR}e env=EXTERNAL_AUTH_ERROR + RequestHeader set X_REMOTE_USER_EMAIL %{REMOTE_USER_EMAIL}e env=REMOTE_USER_EMAIL + RequestHeader set X_REMOTE_USER_FIRSTNAME %{REMOTE_USER_FIRSTNAME}e env=REMOTE_USER_FIRSTNAME + RequestHeader set X_REMOTE_USER_LASTNAME %{REMOTE_USER_LASTNAME}e env=REMOTE_USER_LASTNAME + RequestHeader set X_REMOTE_USER_FULLNAME %{REMOTE_USER_FULLNAME}e env=REMOTE_USER_FULLNAME + RequestHeader set X_REMOTE_USER_GROUPS %{REMOTE_USER_GROUPS}e env=REMOTE_USER_GROUPS + RequestHeader set X_REMOTE_USER_DOMAIN %{REMOTE_USER_DOMAIN}e env=REMOTE_USER_DOMAIN - apiVersion: v1 kind: ConfigMap metadata: name: "${HTTPD_SERVICE_NAME}-auth-configs" data: auth-type: internal + auth-kerberos-realms: undefined auth-configuration.conf: | # External Authentication Configuration File # @@ -203,15 +359,15 @@ objects: - name: cloudforms image: "${FRONTEND_APPLICATION_IMG_NAME}:${FRONTEND_APPLICATION_IMG_TAG}" livenessProbe: - tcpSocket: - port: 80 + exec: + command: + - pidof + - MIQ Server initialDelaySeconds: 480 timeoutSeconds: 3 readinessProbe: - httpGet: - path: "/" + tcpSocket: port: 80 - scheme: HTTP initialDelaySeconds: 200 timeoutSeconds: 3 ports: @@ -239,6 +395,11 @@ objects: secretKeyRef: name: "${NAME}-secrets" key: v2-key + - name: APPLICATION_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: admin-password - name: ANSIBLE_ADMIN_PASSWORD valueFrom: secretKeyRef: @@ -611,6 +772,20 @@ objects: selector: name: httpd - apiVersion: v1 + kind: Service + metadata: + name: "${HTTPD_DBUS_API_SERVICE_NAME}" + annotations: + description: Exposes the httpd server dbus api + service.alpha.openshift.io/dependencies: '[{"name":"${NAME}","namespace":"","kind":"Service"}]' + spec: + ports: + - name: http-dbus-api + port: 8080 + targetPort: 8080 + selector: + name: httpd +- apiVersion: v1 kind: DeploymentConfig metadata: name: "${HTTPD_SERVICE_NAME}" @@ -644,6 +819,9 @@ objects: image: "${HTTPD_IMG_NAME}:${HTTPD_IMG_TAG}" ports: - containerPort: 80 + protocol: TCP + - containerPort: 8080 + protocol: TCP livenessProbe: exec: command: @@ -673,6 +851,11 @@ objects: configMapKeyRef: name: "${HTTPD_SERVICE_NAME}-auth-configs" key: auth-type + - name: HTTPD_AUTH_KERBEROS_REALMS + valueFrom: + configMapKeyRef: + name: "${HTTPD_SERVICE_NAME}-auth-configs" + key: auth-kerberos-realms lifecycle: postStart: exec: @@ -718,6 +901,11 @@ parameters: displayName: Application Database Region description: Database region that will be used for application. value: '0' +- name: APPLICATION_ADMIN_PASSWORD + displayName: Application Admin Password + required: true + description: Admin password that will be set on the application. + value: smartvm - name: ANSIBLE_DATABASE_NAME displayName: Ansible PostgreSQL database name required: true @@ -842,7 +1030,7 @@ parameters: - name: POSTGRESQL_IMG_NAME displayName: PostgreSQL Image Name description: This is the PostgreSQL image name requested to deploy. - value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-postgresql + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-postgresql - name: POSTGRESQL_IMG_TAG displayName: PostgreSQL Image Tag description: This is the PostgreSQL image tag/version requested to deploy. @@ -850,7 +1038,7 @@ parameters: - name: MEMCACHED_IMG_NAME displayName: Memcached Image Name description: This is the Memcached image name requested to deploy. - value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-memcached + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-memcached - name: MEMCACHED_IMG_TAG displayName: Memcached Image Tag description: This is the Memcached image tag/version requested to deploy. @@ -858,11 +1046,11 @@ parameters: - name: FRONTEND_APPLICATION_IMG_NAME displayName: Frontend Application Image Name description: This is the Frontend Application image name requested to deploy. - value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-app-ui + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-app-ui - name: BACKEND_APPLICATION_IMG_NAME displayName: Backend Application Image Name description: This is the Backend Application image name requested to deploy. - value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-app + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-app - name: FRONTEND_APPLICATION_IMG_TAG displayName: Front end Application Image Tag description: This is the CloudForms Frontend Application image tag/version requested to deploy. @@ -874,7 +1062,7 @@ parameters: - name: ANSIBLE_IMG_NAME displayName: Ansible Image Name description: This is the Ansible image name requested to deploy. - value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-embedded-ansible + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-embedded-ansible - name: ANSIBLE_IMG_TAG displayName: Ansible Image Tag description: This is the Ansible image tag/version requested to deploy. @@ -907,10 +1095,15 @@ parameters: displayName: Apache httpd Service Name description: The name of the OpenShift Service exposed for the httpd container. value: httpd +- name: HTTPD_DBUS_API_SERVICE_NAME + required: true + displayName: Apache httpd DBus API Service Name + description: The name of httpd dbus api service. + value: httpd-dbus-api - name: HTTPD_IMG_NAME displayName: Apache httpd Image Name description: This is the httpd image name requested to deploy. - value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-httpd + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-httpd - name: HTTPD_IMG_TAG displayName: Apache httpd Image Tag description: This is the httpd image tag/version requested to deploy. diff --git a/roles/openshift_management/meta/main.yml b/roles/openshift_management/meta/main.yml index 07ad51126..9f19704a8 100644 --- a/roles/openshift_management/meta/main.yml +++ b/roles/openshift_management/meta/main.yml @@ -16,3 +16,4 @@ galaxy_info: dependencies: - role: lib_openshift - role: lib_utils +- role: openshift_facts diff --git a/roles/openshift_management/tasks/accounts.yml b/roles/openshift_management/tasks/accounts.yml index e45ea8d43..80318fec0 100644 --- a/roles/openshift_management/tasks/accounts.yml +++ b/roles/openshift_management/tasks/accounts.yml @@ -5,14 +5,14 @@ oc_serviceaccount: namespace: "{{ openshift_management_project }}" state: present - name: "{{ openshift_management_flavor_short }}{{ item.name }}" + name: "{{ __openshift_management_flavor_short }}{{ item.name }}" with_items: - "{{ __openshift_system_account_sccs }}" - name: Ensure the CFME system accounts have all the required SCCs oc_adm_policy_user: namespace: "{{ openshift_management_project }}" - user: "system:serviceaccount:{{ openshift_management_project }}:{{ openshift_management_flavor_short }}{{ item.name }}" + user: "system:serviceaccount:{{ openshift_management_project }}:{{ __openshift_management_flavor_short }}{{ item.name }}" resource_kind: scc resource_name: "{{ item.resource_name }}" with_items: @@ -21,7 +21,7 @@ - name: Ensure the CFME system accounts have the required roles oc_adm_policy_user: namespace: "{{ openshift_management_project }}" - user: "system:serviceaccount:{{ openshift_management_project }}:{{ openshift_management_flavor_short }}{{ item.name }}" + user: "system:serviceaccount:{{ openshift_management_project }}:{{ __openshift_management_flavor_short }}{{ item.name }}" resource_kind: role resource_name: "{{ item.resource_name }}" with_items: diff --git a/roles/openshift_management/tasks/add_container_provider.yml b/roles/openshift_management/tasks/add_container_provider.yml index 24b2ce6ac..357e6a710 100644 --- a/roles/openshift_management/tasks/add_container_provider.yml +++ b/roles/openshift_management/tasks/add_container_provider.yml @@ -1,6 +1,6 @@ --- - name: Ensure OpenShift facts module is available - include_role: + import_role: role: openshift_facts - name: Ensure OpenShift facts are loaded @@ -27,7 +27,7 @@ - name: Ensure the management SA bearer token is identified set_fact: - management_token: "{{ sa.results | oo_filter_sa_secrets }}" + management_token: "{{ sa.results | lib_utils_oo_filter_sa_secrets }}" - name: Ensure the SA bearer token value is read oc_secret: diff --git a/roles/openshift_management/tasks/main.yml b/roles/openshift_management/tasks/main.yml index f212dba7c..5209eba56 100644 --- a/roles/openshift_management/tasks/main.yml +++ b/roles/openshift_management/tasks/main.yml @@ -8,7 +8,7 @@ # This creates a service account allowing Container Provider # integration (managing OCP/Origin via MIQ/Management) - name: Enable Container Provider Integration - include_role: + import_role: role: openshift_manageiq - name: "Ensure the Management '{{ openshift_management_project }}' namespace exists" @@ -71,15 +71,15 @@ # CREATE APP - name: Note the correct ext-db template name set_fact: - openshift_management_template_name: "{{ openshift_management_flavor }}-ext-db" + openshift_management_template_name: "{{ __openshift_management_flavor }}-ext-db" when: - - openshift_management_app_template in ['miq-template-ext-db', 'cfme-template-ext-db'] + - __openshift_management_use_ext_db - name: Note the correct podified db template name set_fact: - openshift_management_template_name: "{{ openshift_management_flavor }}" + openshift_management_template_name: "{{ __openshift_management_flavor }}" when: - - openshift_management_app_template in ['miq-template', 'cfme-template'] + - not __openshift_management_use_ext_db - name: Ensure the Management App is created oc_process: @@ -89,7 +89,7 @@ params: "{{ openshift_management_template_parameters }}" - name: Wait for the app to come up. May take several minutes, 30s check intervals, {{ openshift_management_pod_rollout_retries }} retries - command: "oc logs {{ openshift_management_flavor }}-0 -n {{ openshift_management_project }}" + command: "oc logs {{ __openshift_management_flavor }}-0 -n {{ openshift_management_project }}" register: app_seeding_logs until: app_seeding_logs.stdout.find('Server starting complete') != -1 delay: 30 diff --git a/roles/openshift_management/tasks/storage/create_nfs_pvs.yml b/roles/openshift_management/tasks/storage/create_nfs_pvs.yml index d1b9a8d5c..1f8cac6c6 100644 --- a/roles/openshift_management/tasks/storage/create_nfs_pvs.yml +++ b/roles/openshift_management/tasks/storage/create_nfs_pvs.yml @@ -12,7 +12,7 @@ when: - openshift_management_template_parameters.APPLICATION_VOLUME_CAPACITY is not defined -- when: openshift_management_app_template in ['miq-template', 'cfme-template'] +- when: not __openshift_management_use_ext_db block: - name: Note the DB PV Size from Template Parameters set_fact: @@ -31,7 +31,7 @@ namespace: "{{ openshift_management_project }}" state: list kind: pv - name: "{{ openshift_management_flavor_short }}-app" + name: "{{ __openshift_management_flavor_short }}-app" register: miq_app_pv_check - name: Check if the Management DB PV has been created @@ -39,15 +39,15 @@ namespace: "{{ openshift_management_project }}" state: list kind: pv - name: "{{ openshift_management_flavor_short }}-db" + name: "{{ __openshift_management_flavor_short }}-db" register: miq_db_pv_check when: - - openshift_management_app_template in ['miq-template', 'cfme-template'] + - not __openshift_management_use_ext_db - name: Ensure the Management App PV is created oc_process: namespace: "{{ openshift_management_project }}" - template_name: "{{ openshift_management_flavor }}-app-pv" + template_name: "{{ __openshift_management_flavor }}-app-pv" create: True params: PV_SIZE: "{{ openshift_management_app_pv_size }}" @@ -58,12 +58,12 @@ - name: Ensure the Management DB PV is created oc_process: namespace: "{{ openshift_management_project }}" - template_name: "{{ openshift_management_flavor }}-db-pv" + template_name: "{{ __openshift_management_flavor }}-db-pv" create: True params: PV_SIZE: "{{ openshift_management_db_pv_size }}" BASE_PATH: "{{ openshift_management_storage_nfs_base_dir }}" NFS_HOST: "{{ openshift_management_nfs_server }}" when: - - openshift_management_app_template in ['miq-template', 'cfme-template'] + - not __openshift_management_use_ext_db - miq_db_pv_check.results.results == [{}] diff --git a/roles/openshift_management/tasks/storage/nfs.yml b/roles/openshift_management/tasks/storage/nfs.yml index 94e11137c..4a00efb1d 100644 --- a/roles/openshift_management/tasks/storage/nfs.yml +++ b/roles/openshift_management/tasks/storage/nfs.yml @@ -5,32 +5,32 @@ - name: Setting up NFS storage block: - name: Include the NFS Setup role tasks - include_role: + import_role: role: openshift_nfs tasks_from: setup vars: l_nfs_base_dir: "{{ openshift_management_storage_nfs_base_dir }}" - name: Create the App export - include_role: + import_role: role: openshift_nfs tasks_from: create_export vars: l_nfs_base_dir: "{{ openshift_management_storage_nfs_base_dir }}" - l_nfs_export_config: "{{ openshift_management_flavor_short }}" - l_nfs_export_name: "{{ openshift_management_flavor_short }}-app" + l_nfs_export_config: "{{ __openshift_management_flavor_short }}" + l_nfs_export_name: "{{ __openshift_management_flavor_short }}-app" l_nfs_options: "*(rw,no_root_squash,no_wdelay)" - name: Create the DB export - include_role: + import_role: role: openshift_nfs tasks_from: create_export vars: l_nfs_base_dir: "{{ openshift_management_storage_nfs_base_dir }}" - l_nfs_export_config: "{{ openshift_management_flavor_short }}" - l_nfs_export_name: "{{ openshift_management_flavor_short }}-db" + l_nfs_export_config: "{{ __openshift_management_flavor_short }}" + l_nfs_export_name: "{{ __openshift_management_flavor_short }}-db" l_nfs_options: "*(rw,no_root_squash,no_wdelay)" when: - - openshift_management_app_template in ['miq-template', 'cfme-template'] + - not __openshift_management_use_ext_db delegate_to: "{{ openshift_management_nfs_server }}" diff --git a/roles/openshift_management/tasks/template.yml b/roles/openshift_management/tasks/template.yml index 9f97cdcb9..f40af7349 100644 --- a/roles/openshift_management/tasks/template.yml +++ b/roles/openshift_management/tasks/template.yml @@ -13,59 +13,59 @@ ###################################################################### # STANDARD PODIFIED DATABASE TEMPLATE -- when: openshift_management_app_template in ['miq-template', 'cfme-template'] +- when: not __openshift_management_use_ext_db block: - name: Check if the Management Server template has been created already oc_obj: namespace: "{{ openshift_management_project }}" state: list kind: template - name: "{{ openshift_management_flavor }}" + name: "{{ __openshift_management_flavor }}" register: miq_server_check - when: miq_server_check.results.results == [{}] block: - name: Copy over Management Server template copy: - src: "templates/{{ openshift_management_flavor }}/{{ openshift_management_flavor_short }}-template.yaml" + src: "templates/{{ __openshift_management_flavor }}/{{ __openshift_management_flavor_short }}-template.yaml" dest: "{{ template_dir }}/" - name: Ensure Management Server Template is created oc_obj: namespace: "{{ openshift_management_project }}" - name: "{{ openshift_management_flavor }}" + name: "{{ __openshift_management_flavor }}" state: present kind: template files: - - "{{ template_dir }}/{{ openshift_management_flavor_short }}-template.yaml" + - "{{ template_dir }}/{{ __openshift_management_flavor_short }}-template.yaml" ###################################################################### # EXTERNAL DATABASE TEMPLATE -- when: openshift_management_app_template in ['miq-template-ext-db', 'cfme-template-ext-db'] +- when: __openshift_management_use_ext_db block: - name: Check if the Management Ext-DB Server template has been created already oc_obj: namespace: "{{ openshift_management_project }}" state: list kind: template - name: "{{ openshift_management_flavor }}-ext-db" + name: "{{ __openshift_management_flavor }}-ext-db" register: miq_ext_db_server_check - when: miq_ext_db_server_check.results.results == [{}] block: - name: Copy over Management Ext-DB Server template copy: - src: "templates/{{ openshift_management_flavor }}/{{openshift_management_flavor_short}}-template-ext-db.yaml" + src: "templates/{{ __openshift_management_flavor }}/{{__openshift_management_flavor_short}}-template-ext-db.yaml" dest: "{{ template_dir }}/" - name: Ensure Management Ext-DB Server Template is created oc_obj: namespace: "{{ openshift_management_project }}" - name: "{{ openshift_management_flavor }}-ext-db" + name: "{{ __openshift_management_flavor }}-ext-db" state: present kind: template files: - - "{{ template_dir }}/{{ openshift_management_flavor_short }}-template-ext-db.yaml" + - "{{ template_dir }}/{{ __openshift_management_flavor_short }}-template-ext-db.yaml" # End app template creation. ###################################################################### @@ -79,50 +79,50 @@ namespace: "{{ openshift_management_project }}" state: list kind: template - name: "{{ openshift_management_flavor }}-app-pv" + name: "{{ __openshift_management_flavor }}-app-pv" register: miq_app_pv_check - when: miq_app_pv_check.results.results == [{}] block: - name: Copy over Management App PV template copy: - src: "templates/{{ openshift_management_flavor }}/{{ openshift_management_flavor_short }}-pv-server-example.yaml" + src: "templates/{{ __openshift_management_flavor }}/{{ __openshift_management_flavor_short }}-pv-server-example.yaml" dest: "{{ template_dir }}/" - name: Ensure Management App PV Template is created oc_obj: namespace: "{{ openshift_management_project }}" - name: "{{ openshift_management_flavor }}-app-pv" + name: "{{ __openshift_management_flavor }}-app-pv" state: present kind: template files: - - "{{ template_dir }}/{{ openshift_management_flavor_short }}-pv-server-example.yaml" + - "{{ template_dir }}/{{ __openshift_management_flavor_short }}-pv-server-example.yaml" #--------------------------------------------------------------------- # Required for database if the installation is fully podified -- when: openshift_management_app_template in ['miq-template', 'cfme-template'] +- when: not __openshift_management_use_ext_db block: - name: Check if the Management DB PV template has been created already oc_obj: namespace: "{{ openshift_management_project }}" state: list kind: template - name: "{{ openshift_management_flavor }}-db-pv" + name: "{{ __openshift_management_flavor }}-db-pv" register: miq_db_pv_check - when: miq_db_pv_check.results.results == [{}] block: - name: Copy over Management DB PV template copy: - src: "templates/{{ openshift_management_flavor }}/{{ openshift_management_flavor_short }}-pv-db-example.yaml" + src: "templates/{{ __openshift_management_flavor }}/{{ __openshift_management_flavor_short }}-pv-db-example.yaml" dest: "{{ template_dir }}/" - name: Ensure Management DB PV Template is created oc_obj: namespace: "{{ openshift_management_project }}" - name: "{{ openshift_management_flavor }}-db-pv" + name: "{{ __openshift_management_flavor }}-db-pv" state: present kind: template files: - - "{{ template_dir }}/{{ openshift_management_flavor_short }}-pv-db-example.yaml" + - "{{ template_dir }}/{{ __openshift_management_flavor_short }}-pv-db-example.yaml" diff --git a/roles/openshift_management/tasks/validate.yml b/roles/openshift_management/tasks/validate.yml index b22f36a4f..2dc895190 100644 --- a/roles/openshift_management/tasks/validate.yml +++ b/roles/openshift_management/tasks/validate.yml @@ -100,4 +100,4 @@ 'openshift_management_template_parameters'" with_items: "{{ __openshift_management_required_db_conn_params }}" when: - - openshift_management_app_template in ['miq-template-ext-db', 'cfme-template-ext-db'] + - __openshift_management_use_ext_db diff --git a/roles/openshift_management/vars/main.yml b/roles/openshift_management/vars/main.yml index da3ad0af7..d7b18df3a 100644 --- a/roles/openshift_management/vars/main.yml +++ b/roles/openshift_management/vars/main.yml @@ -30,14 +30,18 @@ __openshift_management_db_parameters: - DATABASE_PORT - DATABASE_NAME -# # Commented out until we can support both CFME and MIQ -# # openshift_management_flavor: "{{ 'cloudforms' if openshift_deployment_type == 'openshift-enterprise' else 'manageiq' }}" -#openshift_management_flavor: cloudforms -openshift_management_flavor: manageiq -# TODO: Make this conditional as well based on the prior variable -# # openshift_management_flavor_short: "{{ 'cfme' if openshift_deployment_type == 'openshift-enterprise' else 'miq' }}" -# openshift_management_flavor_short: cfme -openshift_management_flavor_short: miq +__openshift_management_flavors: + miq: + short: miq + long: manageiq + cfme: + short: cfme + long: cloudforms + +__openshift_management_flavor: "{{ __openshift_management_flavors[openshift_management_app_template.split('-')[0]]['long'] }}" +__openshift_management_flavor_short: "{{ __openshift_management_flavors[openshift_management_app_template.split('-')[0]]['short'] }}" + +__openshift_management_use_ext_db: "{{ true if 'ext-db' in openshift_management_app_template else false }}" ###################################################################### # ACCOUNTING diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml index 38b2fd8b8..7d96a467e 100644 --- a/roles/openshift_master/defaults/main.yml +++ b/roles/openshift_master/defaults/main.yml @@ -7,6 +7,12 @@ openshift_master_debug_level: "{{ debug_level | default(2) }}" r_openshift_master_firewall_enabled: "{{ os_firewall_enabled | default(True) }}" r_openshift_master_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}" +osm_image_default_dict: + origin: 'openshift/origin' + openshift-enterprise: 'openshift3/ose' +osm_image_default: "{{ osm_image_default_dict[openshift_deployment_type] }}" +osm_image: "{{ osm_image_default }}" + system_images_registry_dict: openshift-enterprise: "registry.access.redhat.com" origin: "docker.io" @@ -47,12 +53,63 @@ oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_ur oreg_auth_credentials_path: "{{ r_openshift_master_data_dir }}/.docker" oreg_auth_credentials_replace: False l_bind_docker_reg_auth: False -openshift_docker_alternative_creds: "{{ (openshift_docker_use_system_container | default(False)) or (openshift_use_crio_only | default(False)) }}" +openshift_docker_alternative_creds: "{{ (openshift_docker_use_system_container | default(False) | bool) or (openshift_use_crio_only | default(False)) }}" containerized_svc_dir: "/usr/lib/systemd/system" ha_svc_template_path: "native-cluster" -openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}" +openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False) | bool) else 'docker' }}" + +openshift_master_loopback_config: "{{ openshift_master_config_dir }}/openshift-master.kubeconfig" +loopback_context_string: "current-context: {{ openshift.master.loopback_context_name }}" +openshift_master_session_secrets_file: "{{ openshift_master_config_dir }}/session-secrets.yaml" +openshift_master_policy: "{{ openshift_master_config_dir }}/policy.json" + +scheduler_config: + kind: Policy + apiVersion: v1 + predicates: "{{ openshift_master_scheduler_predicates + | default(openshift_master_scheduler_current_predicates + | default(openshift_master_scheduler_default_predicates)) }}" + priorities: "{{ openshift_master_scheduler_priorities + | default(openshift_master_scheduler_current_priorities + | default(openshift_master_scheduler_default_priorities)) }}" + +openshift_master_valid_grant_methods: +- auto +- prompt +- deny + +openshift_master_is_scaleup_host: False + +# openshift_master_oauth_template is deprecated. Should be added to deprecations +# and removed. +openshift_master_oauth_template: False +openshift_master_oauth_templates_default: + login: "{{ openshift_master_oauth_template }}" +openshift_master_oauth_templates: "{{ openshift_master_oauth_template | ternary(openshift_master_oauth_templates_default, False) }}" +# Here we combine openshift_master_oath_template into 'login' key of openshift_master_oath_templates, if not present. +l_openshift_master_oauth_templates: "{{ openshift_master_oauth_templates | default(openshift_master_oauth_templates_default) }}" + +# These defaults assume forcing journald persistence, fsync to disk once +# a second, rate-limiting to 10,000 logs a second, no forwarding to +# syslog or wall, using 8GB of disk space maximum, using 10MB journal +# files, keeping only a days worth of logs per journal file, and +# retaining journal files no longer than a month. +journald_vars_to_replace: +- { var: Storage, val: persistent } +- { var: Compress, val: yes } +- { var: SyncIntervalSec, val: 1s } +- { var: RateLimitInterval, val: 1s } +- { var: RateLimitBurst, val: 10000 } +- { var: SystemMaxUse, val: 8G } +- { var: SystemKeepFree, val: 20% } +- { var: SystemMaxFileSize, val: 10M } +- { var: MaxRetentionSec, val: 1month } +- { var: MaxFileSec, val: 1day } +- { var: ForwardToSyslog, val: no } +- { var: ForwardToWall, val: no } + # NOTE # r_openshift_master_*_default may be defined external to this role. diff --git a/roles/openshift_master/meta/main.yml b/roles/openshift_master/meta/main.yml index bf0cbbf18..3460efec9 100644 --- a/roles/openshift_master/meta/main.yml +++ b/roles/openshift_master/meta/main.yml @@ -14,5 +14,4 @@ galaxy_info: dependencies: - role: lib_openshift - role: lib_utils -- role: lib_os_firewall - role: openshift_facts diff --git a/roles/openshift_master/tasks/journald.yml b/roles/openshift_master/tasks/journald.yml index a16cbe78e..6166062ed 100644 --- a/roles/openshift_master/tasks/journald.yml +++ b/roles/openshift_master/tasks/journald.yml @@ -26,4 +26,4 @@ delay: 5 register: result until: result.rc == 0 - when: journald_update | changed + when: journald_update is changed diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index 9be5508aa..b12a6b346 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -16,12 +16,12 @@ - name: Install Master package package: - name: "{{ openshift_service_type }}-master{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}" + name: "{{ openshift_service_type }}-master{{ openshift_pkg_version | default('') | lib_utils_oo_image_tag_to_rpm_version(include_dash=True) }}" state: present when: - - not openshift.common.is_containerized | bool + - not openshift_is_containerized | bool register: result - until: result | success + until: result is succeeded - name: Create r_openshift_master_data_dir file: @@ -31,12 +31,12 @@ owner: root group: root when: - - openshift.common.is_containerized | bool + - openshift_is_containerized | bool - name: Reload systemd units command: systemctl daemon-reload when: - - openshift.common.is_containerized | bool + - openshift_is_containerized | bool - name: Re-gather package dependent master facts openshift_facts: @@ -48,7 +48,7 @@ - name: Create the policy file if it does not already exist command: > - {{ openshift.common.client_binary }} adm create-bootstrap-policy-file + {{ openshift_client_binary }} adm create-bootstrap-policy-file --filename={{ openshift_master_policy }} args: creates: "{{ openshift_master_policy }}" @@ -69,10 +69,10 @@ package: name=httpd-tools state=present when: - item.kind == 'HTPasswdPasswordIdentityProvider' - - not openshift.common.is_atomic | bool + - not openshift_is_atomic | bool with_items: "{{ openshift.master.identity_providers }}" register: result - until: result | success + until: result is succeeded - name: Ensure htpasswd directory exists file: @@ -147,7 +147,7 @@ register: l_already_set - set_fact: - openshift_push_via_dns: "{{ openshift.common.version_gte_3_6 or (l_already_set.stdout is defined and l_already_set.stdout | match('OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000')) }}" + openshift_push_via_dns: "{{ openshift.common.version_gte_3_6 or (l_already_set.stdout is defined and l_already_set.stdout is match('OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000')) }}" - name: Set fact of all etcd host IPs openshift_facts: @@ -164,7 +164,7 @@ - name: Install Master system container include_tasks: system_container.yml when: - - openshift.common.is_containerized | bool + - openshift_is_containerized | bool - l_is_master_system_container | bool - name: Create session secrets file @@ -181,6 +181,7 @@ - restart master api - set_fact: + # translate_idps is a custom filter in role lib_utils translated_identity_providers: "{{ openshift.master.identity_providers | translate_idps('v1') }}" # TODO: add the validate parameter when there is a validation command to run @@ -209,17 +210,17 @@ when: - inventory_hostname == openshift_master_hosts[0] register: l_start_result - until: not l_start_result | failed + until: not (l_start_result is failed) retries: 1 delay: 60 - name: Dump logs from master-api if it failed command: journalctl --no-pager -n 100 -u {{ openshift_service_type }}-master-api when: - - l_start_result | failed + - l_start_result is failed - set_fact: - master_api_service_status_changed: "{{ l_start_result | changed }}" + master_api_service_status_changed: "{{ l_start_result is changed }}" when: - inventory_hostname == openshift_master_hosts[0] @@ -236,17 +237,17 @@ when: - inventory_hostname != openshift_master_hosts[0] register: l_start_result - until: not l_start_result | failed + until: not (l_start_result is failed) retries: 1 delay: 60 - name: Dump logs from master-api if it failed command: journalctl --no-pager -n 100 -u {{ openshift_service_type }}-master-api when: - - l_start_result | failed + - l_start_result is failed - set_fact: - master_api_service_status_changed: "{{ l_start_result | changed }}" + master_api_service_status_changed: "{{ l_start_result is changed }}" when: - inventory_hostname != openshift_master_hosts[0] @@ -262,18 +263,18 @@ enabled: yes state: started register: l_start_result - until: not l_start_result | failed + until: not (l_start_result is failed) retries: 1 delay: 60 - name: Dump logs from master-controllers if it failed command: journalctl --no-pager -n 100 -u {{ openshift_service_type }}-master-controllers when: - - l_start_result | failed + - l_start_result is failed - name: Set fact master_controllers_service_status_changed set_fact: - master_controllers_service_status_changed: "{{ l_start_result | changed }}" + master_controllers_service_status_changed: "{{ l_start_result is changed }}" - name: node bootstrap settings include_tasks: bootstrap.yml diff --git a/roles/openshift_master/tasks/registry_auth.yml b/roles/openshift_master/tasks/registry_auth.yml index 8b342a5b4..911a9bd3d 100644 --- a/roles/openshift_master/tasks/registry_auth.yml +++ b/roles/openshift_master/tasks/registry_auth.yml @@ -43,7 +43,7 @@ set_fact: l_bind_docker_reg_auth: True when: - - openshift.common.is_containerized | bool + - openshift_is_containerized | bool - oreg_auth_user is defined - > (master_oreg_auth_credentials_stat.stat.exists diff --git a/roles/openshift_master/tasks/set_loopback_context.yml b/roles/openshift_master/tasks/set_loopback_context.yml index 308b2f4cd..7e013a699 100644 --- a/roles/openshift_master/tasks/set_loopback_context.yml +++ b/roles/openshift_master/tasks/set_loopback_context.yml @@ -1,13 +1,13 @@ --- - name: Test local loopback context command: > - {{ openshift.common.client_binary }} config view + {{ openshift_client_binary }} config view --config={{ openshift_master_loopback_config }} changed_when: false register: l_loopback_config - command: > - {{ openshift.common.client_binary }} config set-cluster + {{ openshift_client_binary }} config set-cluster --certificate-authority={{ openshift_master_config_dir }}/ca.crt --embed-certs=true --server={{ openshift.master.loopback_api_url }} {{ openshift.master.loopback_cluster_name }} @@ -17,18 +17,18 @@ register: set_loopback_cluster - command: > - {{ openshift.common.client_binary }} config set-context + {{ openshift_client_binary }} config set-context --cluster={{ openshift.master.loopback_cluster_name }} --namespace=default --user={{ openshift.master.loopback_user }} {{ openshift.master.loopback_context_name }} --config={{ openshift_master_loopback_config }} when: - - set_loopback_cluster | changed + - set_loopback_cluster is changed register: l_set_loopback_context - command: > - {{ openshift.common.client_binary }} config use-context {{ openshift.master.loopback_context_name }} + {{ openshift_client_binary }} config use-context {{ openshift.master.loopback_context_name }} --config={{ openshift_master_loopback_config }} when: - - l_set_loopback_context | changed + - l_set_loopback_context is changed register: set_current_context diff --git a/roles/openshift_master/tasks/system_container.yml b/roles/openshift_master/tasks/system_container.yml index f6c5ce0dd..dcbf7fd9f 100644 --- a/roles/openshift_master/tasks/system_container.yml +++ b/roles/openshift_master/tasks/system_container.yml @@ -2,7 +2,7 @@ - name: Pre-pull master system container image command: > - atomic pull --storage=ostree {{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.master.master_system_image }}:{{ openshift_image_tag }} + atomic pull --storage=ostree {{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ osm_image }}:{{ openshift_image_tag }} register: l_pull_result changed_when: "'Pulling layer' in l_pull_result.stdout" @@ -14,7 +14,7 @@ - name: Install or Update HA api master system container oc_atomic_container: name: "{{ openshift_service_type }}-master-api" - image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}" + image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ osm_image }}:{{ openshift_image_tag }}" state: latest values: - COMMAND=api @@ -22,7 +22,7 @@ - name: Install or Update HA controller master system container oc_atomic_container: name: "{{ openshift_service_type }}-master-controllers" - image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}" + image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ osm_image }}:{{ openshift_image_tag }}" state: latest values: - COMMAND=controllers diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml index 76b6f46aa..870ab7c57 100644 --- a/roles/openshift_master/tasks/systemd_units.yml +++ b/roles/openshift_master/tasks/systemd_units.yml @@ -7,7 +7,7 @@ containerized_svc_dir: "/etc/systemd/system" ha_svc_template_path: "docker-cluster" when: - - openshift.common.is_containerized | bool + - openshift_is_containerized | bool - include_tasks: registry_auth.yml @@ -30,11 +30,11 @@ # This is the image used for both HA and non-HA clusters: - name: Pre-pull master image command: > - docker pull {{ openshift.master.master_image }}:{{ openshift_image_tag }} + docker pull {{ osm_image }}:{{ openshift_image_tag }} register: l_pull_result changed_when: "'Downloaded newer image' in l_pull_result.stdout" when: - - openshift.common.is_containerized | bool + - openshift_is_containerized | bool - not l_is_master_system_container | bool - name: Create the ha systemd unit files @@ -50,7 +50,7 @@ - command: systemctl daemon-reload when: - - l_create_ha_unit_files | changed + - l_create_ha_unit_files is changed # end workaround for missing systemd unit files - name: enable master services diff --git a/roles/openshift_master/tasks/upgrade.yml b/roles/openshift_master/tasks/upgrade.yml index f84cf2f6e..f143673cf 100644 --- a/roles/openshift_master/tasks/upgrade.yml +++ b/roles/openshift_master/tasks/upgrade.yml @@ -1,6 +1,6 @@ --- - include_tasks: upgrade/rpm_upgrade.yml - when: not openshift.common.is_containerized | bool + when: not openshift_is_containerized | bool - include_tasks: upgrade/upgrade_scheduler.yml diff --git a/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml b/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml index f50b91ff5..4564f33dd 100644 --- a/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml +++ b/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml @@ -8,8 +8,25 @@ # TODO: If the sdn package isn't already installed this will install it, we # should fix that -- name: Upgrade master packages - package: name={{ master_pkgs | join(',') }} state=present +- name: Upgrade master packages - yum + command: + yum install -y {{ master_pkgs | join(' ') }} \ + {{ ' --exclude *' ~ openshift_service_type ~ '*3.9*' if openshift_release | version_compare('3.9','<') else '' }} + vars: + master_pkgs: + - "{{ openshift_service_type }}{{ openshift_pkg_version | default('') }}" + - "{{ openshift_service_type }}-master{{ openshift_pkg_version | default('') }}" + - "{{ openshift_service_type }}-node{{ openshift_pkg_version | default('') }}" + - "{{ openshift_service_type }}-sdn-ovs{{ openshift_pkg_version | default('') }}" + - "{{ openshift_service_type }}-clients{{ openshift_pkg_version | default('') }}" + register: result + until: result is succeeded + when: ansible_pkg_mgr == 'yum' + +- name: Upgrade master packages - dnf + dnf: + name: "{{ master_pkgs | join(',') }}" + state: present vars: master_pkgs: - "{{ openshift_service_type }}{{ openshift_pkg_version }}" @@ -17,6 +34,6 @@ - "{{ openshift_service_type }}-node{{ openshift_pkg_version }}" - "{{ openshift_service_type }}-sdn-ovs{{ openshift_pkg_version }}" - "{{ openshift_service_type }}-clients{{ openshift_pkg_version }}" - - "tuned-profiles-{{ openshift_service_type }}-node{{ openshift_pkg_version }}" register: result - until: result | success + until: result is succeeded + when: ansible_pkg_mgr == 'dnf' diff --git a/roles/openshift_master/tasks/upgrade/upgrade_scheduler.yml b/roles/openshift_master/tasks/upgrade/upgrade_scheduler.yml index 8558bf3e9..995a5ab70 100644 --- a/roles/openshift_master/tasks/upgrade/upgrade_scheduler.yml +++ b/roles/openshift_master/tasks/upgrade/upgrade_scheduler.yml @@ -1,6 +1,8 @@ --- # Upgrade predicates - vars: + # openshift_master_facts_default_predicates is a custom lookup plugin in + # role lib_utils prev_predicates: "{{ lookup('openshift_master_facts_default_predicates', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type) }}" prev_predicates_no_region: "{{ lookup('openshift_master_facts_default_predicates', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type, regions_enabled=False) }}" default_predicates_no_region: "{{ lookup('openshift_master_facts_default_predicates', regions_enabled=False) }}" diff --git a/roles/openshift_master/templates/atomic-openshift-master.j2 b/roles/openshift_master/templates/atomic-openshift-master.j2 index 3f7a528a9..4c68155ea 100644 --- a/roles/openshift_master/templates/atomic-openshift-master.j2 +++ b/roles/openshift_master/templates/atomic-openshift-master.j2 @@ -6,7 +6,7 @@ CONFIG_FILE={{ openshift_master_config_file }} {% elif openshift_push_via_dns | default(false) %} OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000 {% endif %} -{% if openshift.common.is_containerized | bool %} +{% if openshift_is_containerized | bool %} IMAGE_VERSION={{ openshift_image_tag }} {% endif %} diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 index 5e46d9121..a56c0340c 100644 --- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 +++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 @@ -21,7 +21,7 @@ ExecStart=/usr/bin/docker run --rm --privileged --net=host \ {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} \ -v /etc/pki:/etc/pki:ro \ {% if l_bind_docker_reg_auth | default(False) %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\ - {{ openshift.master.master_image }}:${IMAGE_VERSION} start master api \ + {{ osm_image }}:${IMAGE_VERSION} start master api \ --config=${CONFIG_FILE} $OPTIONS ExecStartPost=/usr/bin/sleep 10 ExecStop=/usr/bin/docker stop {{ openshift_service_type }}-master-api diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 index 899575f1a..79171d511 100644 --- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 +++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 @@ -20,7 +20,7 @@ ExecStart=/usr/bin/docker run --rm --privileged --net=host \ {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} \ -v /etc/pki:/etc/pki:ro \ {% if l_bind_docker_reg_auth | default(False) %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\ - {{ openshift.master.master_image }}:${IMAGE_VERSION} start master controllers \ + {{ osm_image }}:${IMAGE_VERSION} start master controllers \ --config=${CONFIG_FILE} $OPTIONS ExecStartPost=/usr/bin/sleep 10 ExecStop=/usr/bin/docker stop {{ openshift_service_type }}-master-controllers diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2 index 92668b227..4c9ab1864 100644 --- a/roles/openshift_master/templates/master.yaml.v1.j2 +++ b/roles/openshift_master/templates/master.yaml.v1.j2 @@ -1,10 +1,11 @@ admissionConfig: {% if 'admission_plugin_config' in openshift.master %} - pluginConfig:{{ openshift.master.admission_plugin_config | to_padded_yaml(level=2) }} + pluginConfig:{{ openshift.master.admission_plugin_config | lib_utils_to_padded_yaml(level=2) }} {% endif %} apiLevels: - v1 apiVersion: v1 +{% if not openshift.common.version_gte_3_9 %} assetConfig: logoutURL: "{{ openshift.master.logout_url | default('') }}" masterPublicURL: {{ openshift.master.public_api_url }} @@ -16,13 +17,13 @@ assetConfig: metricsPublicURL: {{ openshift_hosted_metrics_deploy_url }} {% endif %} {% if 'extension_scripts' in openshift.master %} - extensionScripts: {{ openshift.master.extension_scripts | to_padded_yaml(1, 2) }} + extensionScripts: {{ openshift.master.extension_scripts | lib_utils_to_padded_yaml(1, 2) }} {% endif %} {% if 'extension_stylesheets' in openshift.master %} - extensionStylesheets: {{ openshift.master.extension_stylesheets | to_padded_yaml(1, 2) }} + extensionStylesheets: {{ openshift.master.extension_stylesheets | lib_utils_to_padded_yaml(1, 2) }} {% endif %} {% if 'extensions' in openshift.master %} - extensions: {{ openshift.master.extensions | to_padded_yaml(1, 2) }} + extensions: {{ openshift.master.extensions | lib_utils_to_padded_yaml(1, 2) }} {% endif %} servingInfo: bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.console_port }} @@ -41,8 +42,10 @@ assetConfig: - {{ cipher_suite }} {% endfor %} {% endif %} +# assetconfig end +{% endif %} {% if openshift.master.audit_config | default(none) is not none %} -auditConfig:{{ openshift.master.audit_config | to_padded_yaml(level=1) }} +auditConfig:{{ openshift.master.audit_config | lib_utils_to_padded_yaml(level=1) }} {% endif %} controllerConfig: election: @@ -69,29 +72,13 @@ dnsConfig: bindNetwork: tcp4 {% endif %} etcdClientInfo: - ca: {{ "ca-bundle.crt" if (openshift.master.embedded_etcd | bool) else "master.etcd-ca.crt" }} + ca: master.etcd-ca.crt certFile: master.etcd-client.crt keyFile: master.etcd-client.key urls: {% for etcd_url in openshift.master.etcd_urls %} - {{ etcd_url }} {% endfor %} -{% if openshift.master.embedded_etcd | bool %} -etcdConfig: - address: {{ openshift.common.hostname }}:{{ openshift.master.etcd_port }} - peerAddress: {{ openshift.common.hostname }}:7001 - peerServingInfo: - bindAddress: {{ openshift.master.bind_addr }}:7001 - certFile: etcd.server.crt - clientCA: ca-bundle.crt - keyFile: etcd.server.key - servingInfo: - bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.etcd_port }} - certFile: etcd.server.crt - clientCA: ca-bundle.crt - keyFile: etcd.server.key - storageDirectory: {{ r_openshift_master_data_dir }}/openshift.local.etcd -{% endif %} etcdStorageConfig: kubernetesStoragePrefix: kubernetes.io kubernetesStorageVersion: v1 @@ -101,7 +88,7 @@ imageConfig: format: {{ openshift.master.registry_url }} latest: {{ openshift_master_image_config_latest }} {% if 'image_policy_config' in openshift.master %} -imagePolicyConfig:{{ openshift.master.image_policy_config | to_padded_yaml(level=1) }} +imagePolicyConfig:{{ openshift.master.image_policy_config | lib_utils_to_padded_yaml(level=1) }} {% endif %} kind: MasterConfig kubeletClientInfo: @@ -112,21 +99,21 @@ kubeletClientInfo: port: 10250 {% if openshift.master.embedded_kube | bool %} kubernetesMasterConfig: - apiServerArguments: {{ openshift.master.api_server_args | default(None) | to_padded_yaml( level=2 ) }} + apiServerArguments: {{ openshift.master.api_server_args | default(None) | lib_utils_to_padded_yaml( level=2 ) }} {% if r_openshift_master_etcd3_storage or ( r_openshift_master_clean_install and openshift.common.version_gte_3_6 ) %} storage-backend: - etcd3 storage-media-type: - application/vnd.kubernetes.protobuf {% endif %} - controllerArguments: {{ openshift.master.controller_args | default(None) | to_padded_yaml( level=2 ) }} + controllerArguments: {{ openshift.master.controller_args | default(None) | lib_utils_to_padded_yaml( level=2 ) }} masterCount: {{ openshift.master.master_count }} masterIP: {{ openshift.common.ip }} podEvictionTimeout: {{ openshift.master.pod_eviction_timeout | default("") }} proxyClientInfo: certFile: master.proxy-client.crt keyFile: master.proxy-client.key - schedulerArguments: {{ openshift_master_scheduler_args | default(None) | to_padded_yaml( level=3 ) }} + schedulerArguments: {{ openshift_master_scheduler_args | default(None) | lib_utils_to_padded_yaml( level=3 ) }} schedulerConfigFile: {{ openshift_master_scheduler_conf }} servicesNodePortRange: "{{ openshift_node_port_range | default("") }}" servicesSubnet: {{ openshift.common.portal_net }} @@ -160,7 +147,7 @@ networkConfig: {% endif %} # serviceNetworkCIDR must match kubernetesMasterConfig.servicesSubnet serviceNetworkCIDR: {{ openshift.common.portal_net }} - externalIPNetworkCIDRs: {{ openshift_master_external_ip_network_cidrs | default(["0.0.0.0/0"]) | to_padded_yaml(1,2) }} + externalIPNetworkCIDRs: {{ openshift_master_external_ip_network_cidrs | default(["0.0.0.0/0"]) | lib_utils_to_padded_yaml(1,2) }} {% if openshift_master_ingress_ip_network_cidr is defined %} ingressIPNetworkCIDR: {{ openshift_master_ingress_ip_network_cidr }} {% endif %} @@ -168,8 +155,8 @@ oauthConfig: {% if 'oauth_always_show_provider_selection' in openshift.master %} alwaysShowProviderSelection: {{ openshift.master.oauth_always_show_provider_selection }} {% endif %} -{% if 'oauth_templates' in openshift.master %} - templates:{{ openshift.master.oauth_templates | to_padded_yaml(level=2) }} +{% if l_openshift_master_oauth_templates %} + templates:{{ l_openshift_master_oauth_templates | lib_utils_to_padded_yaml(level=2) }} {% endif %} assetPublicURL: {{ openshift.master.public_console_url }}/ grantConfig: diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2 index cc21b37af..bff32b2e3 100644 --- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2 +++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2 @@ -6,7 +6,7 @@ CONFIG_FILE={{ openshift_master_config_file }} {% elif openshift_push_via_dns | default(false) %} OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000 {% endif %} -{% if openshift.common.is_containerized | bool %} +{% if openshift_is_containerized | bool %} IMAGE_VERSION={{ openshift_image_tag }} {% endif %} diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2 index 493fc510e..b8a519baa 100644 --- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2 +++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2 @@ -6,7 +6,7 @@ CONFIG_FILE={{ openshift_master_config_file }} {% elif openshift_push_via_dns | default(false) %} OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000 {% endif %} -{% if openshift.common.is_containerized | bool %} +{% if openshift_is_containerized | bool %} IMAGE_VERSION={{ openshift_image_tag }} {% endif %} diff --git a/roles/openshift_master/vars/main.yml b/roles/openshift_master/vars/main.yml deleted file mode 100644 index 0c681c764..000000000 --- a/roles/openshift_master/vars/main.yml +++ /dev/null @@ -1,41 +0,0 @@ ---- -openshift_master_loopback_config: "{{ openshift_master_config_dir }}/openshift-master.kubeconfig" -loopback_context_string: "current-context: {{ openshift.master.loopback_context_name }}" -openshift_master_session_secrets_file: "{{ openshift_master_config_dir }}/session-secrets.yaml" -openshift_master_policy: "{{ openshift_master_config_dir }}/policy.json" - -scheduler_config: - kind: Policy - apiVersion: v1 - predicates: "{{ openshift_master_scheduler_predicates - | default(openshift_master_scheduler_current_predicates - | default(openshift_master_scheduler_default_predicates)) }}" - priorities: "{{ openshift_master_scheduler_priorities - | default(openshift_master_scheduler_current_priorities - | default(openshift_master_scheduler_default_priorities)) }}" - -openshift_master_valid_grant_methods: -- auto -- prompt -- deny - -openshift_master_is_scaleup_host: False - -# These defaults assume forcing journald persistence, fsync to disk once -# a second, rate-limiting to 10,000 logs a second, no forwarding to -# syslog or wall, using 8GB of disk space maximum, using 10MB journal -# files, keeping only a days worth of logs per journal file, and -# retaining journal files no longer than a month. -journald_vars_to_replace: -- { var: Storage, val: persistent } -- { var: Compress, val: yes } -- { var: SyncIntervalSec, val: 1s } -- { var: RateLimitInterval, val: 1s } -- { var: RateLimitBurst, val: 10000 } -- { var: SystemMaxUse, val: 8G } -- { var: SystemKeepFree, val: 20% } -- { var: SystemMaxFileSize, val: 10M } -- { var: MaxRetentionSec, val: 1month } -- { var: MaxFileSec, val: 1day } -- { var: ForwardToSyslog, val: no } -- { var: ForwardToWall, val: no } diff --git a/roles/openshift_master_certificates/meta/main.yml b/roles/openshift_master_certificates/meta/main.yml index 300b2cbff..e7d9f5bba 100644 --- a/roles/openshift_master_certificates/meta/main.yml +++ b/roles/openshift_master_certificates/meta/main.yml @@ -12,4 +12,5 @@ galaxy_info: categories: - cloud - system -dependencies: [] +dependencies: +- role: lib_utils diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml index ec1fbb1ee..ce27e238f 100644 --- a/roles/openshift_master_certificates/tasks/main.yml +++ b/roles/openshift_master_certificates/tasks/main.yml @@ -27,7 +27,7 @@ master_certs_missing: "{{ true if openshift_certificates_redeploy | default(false) | bool else (False in (g_master_cert_stat_result.results | default({}) - | oo_collect(attribute='stat.exists') + | lib_utils_oo_collect(attribute='stat.exists') | list)) }}" - name: Ensure the generated_configs directory present @@ -47,11 +47,11 @@ - name: Create the master server certificate command: > - {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm ca create-server-cert - {% for named_ca_certificate in openshift.master.named_certificates | default([]) | oo_collect('cafile') %} + {{ hostvars[openshift_ca_host]['first_master_client_binary'] }} adm ca create-server-cert + {% for named_ca_certificate in openshift.master.named_certificates | default([]) | lib_utils_oo_collect('cafile') %} --certificate-authority {{ named_ca_certificate }} {% endfor %} - {% for legacy_ca_certificate in g_master_legacy_ca_result.files | default([]) | oo_collect('path') %} + {% for legacy_ca_certificate in g_master_legacy_ca_result.files | default([]) | lib_utils_oo_collect('path') %} --certificate-authority {{ legacy_ca_certificate }} {% endfor %} --hostnames={{ hostvars[item].openshift.common.all_hostnames | join(',') }} @@ -64,16 +64,16 @@ --overwrite=false when: item != openshift_ca_host with_items: "{{ hostvars - | oo_select_keys(groups['oo_masters_to_config']) - | oo_collect(attribute='inventory_hostname', filters={'master_certs_missing':True}) }}" + | lib_utils_oo_select_keys(groups['oo_masters_to_config']) + | lib_utils_oo_collect(attribute='inventory_hostname', filters={'master_certs_missing':True}) }}" delegate_to: "{{ openshift_ca_host }}" run_once: true - name: Generate the loopback master client config command: > - {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm create-api-client-config + {{ hostvars[openshift_ca_host]['first_master_client_binary'] }} adm create-api-client-config --certificate-authority={{ openshift_ca_cert }} - {% for named_ca_certificate in openshift.master.named_certificates | default([]) | oo_collect('cafile') %} + {% for named_ca_certificate in openshift.master.named_certificates | default([]) | lib_utils_oo_collect('cafile') %} --certificate-authority {{ named_ca_certificate }} {% endfor %} --client-dir={{ openshift_generated_configs_dir }}/master-{{ hostvars[item].openshift.common.hostname }} @@ -89,8 +89,8 @@ args: creates: "{{ openshift_generated_configs_dir }}/master-{{ hostvars[item].openshift.common.hostname }}/openshift-master.kubeconfig" with_items: "{{ hostvars - | oo_select_keys(groups['oo_masters_to_config']) - | oo_collect(attribute='inventory_hostname', filters={'master_certs_missing':True}) }}" + | lib_utils_oo_select_keys(groups['oo_masters_to_config']) + | lib_utils_oo_collect(attribute='inventory_hostname', filters={'master_certs_missing':True}) }}" when: item != openshift_ca_host delegate_to: "{{ openshift_ca_host }}" run_once: true @@ -101,6 +101,7 @@ state: hard force: true with_items: + # certificates_to_synchronize is a custom filter in lib_utils - "{{ hostvars[inventory_hostname] | certificates_to_synchronize }}" when: master_certs_missing | bool and inventory_hostname != openshift_ca_host delegate_to: "{{ openshift_ca_host }}" @@ -120,7 +121,11 @@ register: g_master_certs_mktemp changed_when: False when: master_certs_missing | bool - become: no + +- name: Chmod local temp directory for syncing certs + local_action: command chmod 777 "{{ g_master_certs_mktemp.stdout }}" + changed_when: False + when: master_certs_missing | bool - name: Create a tarball of the master certs command: > @@ -157,7 +162,6 @@ local_action: file path="{{ g_master_certs_mktemp.stdout }}" state=absent changed_when: False when: master_certs_missing | bool - become: no - name: Lookup default group for ansible_ssh_user command: "/usr/bin/id -g {{ ansible_ssh_user | quote }}" diff --git a/roles/openshift_master_facts/filter_plugins/oo_filters.py b/roles/openshift_master_facts/filter_plugins/oo_filters.py deleted file mode 120000 index 6f9bc47c1..000000000 --- a/roles/openshift_master_facts/filter_plugins/oo_filters.py +++ /dev/null @@ -1 +0,0 @@ -../../../filter_plugins/oo_filters.py
\ No newline at end of file diff --git a/roles/openshift_master_facts/meta/main.yml b/roles/openshift_master_facts/meta/main.yml index 9dbf719f8..0ab2311d3 100644 --- a/roles/openshift_master_facts/meta/main.yml +++ b/roles/openshift_master_facts/meta/main.yml @@ -13,3 +13,4 @@ galaxy_info: - cloud dependencies: - role: openshift_facts +- role: lib_utils diff --git a/roles/openshift_master_facts/tasks/main.yml b/roles/openshift_master_facts/tasks/main.yml index 0cb87dcaa..f450c916a 100644 --- a/roles/openshift_master_facts/tasks/main.yml +++ b/roles/openshift_master_facts/tasks/main.yml @@ -15,7 +15,7 @@ set_fact: g_metrics_hostname: "{{ openshift_hosted_metrics_public_url | default('hawkular-metrics.' ~ openshift_master_default_subdomain) - | oo_hostname_from_url }}" + | lib_utils_oo_hostname_from_url }}" - set_fact: openshift_hosted_metrics_deploy_url: "https://{{ g_metrics_hostname }}/hawkular/metrics" @@ -45,7 +45,6 @@ etcd_port: "{{ openshift_master_etcd_port | default(None) }}" etcd_use_ssl: "{{ openshift_master_etcd_use_ssl | default(None) }}" etcd_urls: "{{ openshift_master_etcd_urls | default(None) }}" - embedded_etcd: "{{ openshift_master_embedded_etcd | default(None) }}" embedded_kube: "{{ openshift_master_embedded_kube | default(None) }}" embedded_dns: "{{ openshift_master_embedded_dns | default(None) }}" bind_addr: "{{ openshift_master_bind_addr | default(None) }}" @@ -58,6 +57,7 @@ access_token_max_seconds: "{{ openshift_master_access_token_max_seconds | default(None) }}" auth_token_max_seconds: "{{ openshift_master_auth_token_max_seconds | default(None) }}" identity_providers: "{{ openshift_master_identity_providers | default(None) }}" + # oo_htpasswd_users_from_file is a custom filter in role lib_utils htpasswd_users: "{{ openshift_master_htpasswd_users | default(lookup('file', openshift_master_htpasswd_file) | oo_htpasswd_users_from_file if openshift_master_htpasswd_file is defined else None) }}" manage_htpasswd: "{{ openshift_master_manage_htpasswd | default(true) }}" ldap_ca: "{{ openshift_master_ldap_ca | default(lookup('file', openshift_master_ldap_ca_file) if openshift_master_ldap_ca_file is defined else None) }}" @@ -73,11 +73,8 @@ controller_args: "{{ osm_controller_args | default(None) }}" disabled_features: "{{ osm_disabled_features | default(None) }}" master_count: "{{ openshift_master_count | default(None) }}" - master_image: "{{ osm_image | default(None) }}" admission_plugin_config: "{{openshift_master_admission_plugin_config }}" kube_admission_plugin_config: "{{openshift_master_kube_admission_plugin_config | default(None) }}" # deprecated, merged with admission_plugin_config - oauth_template: "{{ openshift_master_oauth_template | default(None) }}" # deprecated in origin 1.2 / OSE 3.2 - oauth_templates: "{{ openshift_master_oauth_templates | default(None) }}" oauth_always_show_provider_selection: "{{ openshift_master_oauth_always_show_provider_selection | default(None) }}" image_policy_config: "{{ openshift_master_image_policy_config | default(None) }}" dynamic_provisioning_enabled: "{{ openshift_master_dynamic_provisioning_enabled | default(None) }}" @@ -94,6 +91,8 @@ - name: Set Default scheduler predicates and priorities set_fact: + # openshift_master_facts_default_predicates is a custom lookup plugin in + # role lib_utils openshift_master_scheduler_default_predicates: "{{ lookup('openshift_master_facts_default_predicates') }}" openshift_master_scheduler_default_priorities: "{{ lookup('openshift_master_facts_default_priorities') }}" diff --git a/roles/openshift_metrics/defaults/main.yaml b/roles/openshift_metrics/defaults/main.yaml index 8da74430f..293d8f451 100644 --- a/roles/openshift_metrics/defaults/main.yaml +++ b/roles/openshift_metrics/defaults/main.yaml @@ -54,7 +54,7 @@ openshift_metrics_master_url: https://kubernetes.default.svc openshift_metrics_node_id: nodename openshift_metrics_project: openshift-infra -openshift_metrics_cassandra_pvc_prefix: "{{ openshift_metrics_storage_volume_name | default('metrics-cassandra') }}" +openshift_metrics_cassandra_pvc_prefix: metrics-cassandra openshift_metrics_cassandra_pvc_access: "{{ openshift_metrics_storage_access_modes | default(['ReadWriteOnce']) }}" openshift_metrics_hawkular_user_write_access: False diff --git a/roles/openshift_metrics/meta/main.yaml b/roles/openshift_metrics/meta/main.yaml index 50214135c..675ec112f 100644 --- a/roles/openshift_metrics/meta/main.yaml +++ b/roles/openshift_metrics/meta/main.yaml @@ -15,5 +15,6 @@ galaxy_info: categories: - openshift dependencies: -- { role: lib_openshift } -- { role: openshift_facts } +- role: lib_openshift +- role: lib_utils +- role: openshift_facts diff --git a/roles/openshift_metrics/tasks/generate_certificates.yaml b/roles/openshift_metrics/tasks/generate_certificates.yaml index bb842d710..b71e35263 100644 --- a/roles/openshift_metrics/tasks/generate_certificates.yaml +++ b/roles/openshift_metrics/tasks/generate_certificates.yaml @@ -1,7 +1,7 @@ --- - name: generate ca certificate chain command: > - {{ openshift.common.client_binary }} adm ca create-signer-cert + {{ openshift_client_binary }} adm ca create-signer-cert --config={{ mktemp.stdout }}/admin.kubeconfig --key='{{ mktemp.stdout }}/ca.key' --cert='{{ mktemp.stdout }}/ca.crt' diff --git a/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml b/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml index 0fd19c9f8..9395fceca 100644 --- a/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml +++ b/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml @@ -14,7 +14,7 @@ changed_when: no - name: generate password for hawkular metrics - local_action: copy dest="{{ local_tmp.stdout }}/{{ item }}.pwd" content="{{ 15 | oo_random_word }}" + local_action: copy dest="{{ local_tmp.stdout }}/{{ item }}.pwd" content="{{ 15 | lib_utils_oo_random_word }}" with_items: - hawkular-metrics become: false diff --git a/roles/openshift_metrics/tasks/install_cassandra.yaml b/roles/openshift_metrics/tasks/install_cassandra.yaml index 48584bd64..9026cc897 100644 --- a/roles/openshift_metrics/tasks/install_cassandra.yaml +++ b/roles/openshift_metrics/tasks/install_cassandra.yaml @@ -1,6 +1,6 @@ --- - shell: > - {{ openshift.common.client_binary }} -n {{ openshift_metrics_project | quote }} + {{ openshift_client_binary }} -n {{ openshift_metrics_project | quote }} --config={{ mktemp.stdout }}/admin.kubeconfig get rc hawkular-cassandra-{{node}} -o jsonpath='{.spec.replicas}' || echo 0 vars: diff --git a/roles/openshift_metrics/tasks/install_hawkular.yaml b/roles/openshift_metrics/tasks/install_hawkular.yaml index b63f5ca8c..f45e7a042 100644 --- a/roles/openshift_metrics/tasks/install_hawkular.yaml +++ b/roles/openshift_metrics/tasks/install_hawkular.yaml @@ -1,6 +1,6 @@ --- - command: > - {{ openshift.common.client_binary }} -n {{ openshift_metrics_project | quote }} + {{ openshift_client_binary }} -n {{ openshift_metrics_project | quote }} --config={{ mktemp.stdout }}/admin.kubeconfig get rc hawkular-metrics -o jsonpath='{.spec.replicas}' register: hawkular_metrics_replica_count @@ -23,15 +23,15 @@ - block: - set_fact: hawkular_key={{ lookup('file', openshift_metrics_hawkular_key) }} - when: openshift_metrics_hawkular_key | exists + when: openshift_metrics_hawkular_key is exists changed_when: false - set_fact: hawkular_cert={{ lookup('file', openshift_metrics_hawkular_cert) }} - when: openshift_metrics_hawkular_cert | exists + when: openshift_metrics_hawkular_cert is exists changed_when: false - set_fact: hawkular_ca={{ lookup('file', openshift_metrics_hawkular_ca) }} - when: openshift_metrics_hawkular_ca | exists + when: openshift_metrics_hawkular_ca is exists changed_when: false - name: generate the hawkular-metrics route diff --git a/roles/openshift_metrics/tasks/install_heapster.yaml b/roles/openshift_metrics/tasks/install_heapster.yaml index a33b28ba7..73e7454f0 100644 --- a/roles/openshift_metrics/tasks/install_heapster.yaml +++ b/roles/openshift_metrics/tasks/install_heapster.yaml @@ -1,6 +1,6 @@ --- - command: > - {{ openshift.common.client_binary }} -n {{ openshift_metrics_project | quote }} + {{ openshift_client_binary }} -n {{ openshift_metrics_project | quote }} --config={{ mktemp.stdout }}/admin.kubeconfig get rc heapster -o jsonpath='{.spec.replicas}' register: heapster_replica_count diff --git a/roles/openshift_metrics/tasks/install_metrics.yaml b/roles/openshift_metrics/tasks/install_metrics.yaml index 49d1d8cf1..6b6c21d71 100644 --- a/roles/openshift_metrics/tasks/install_metrics.yaml +++ b/roles/openshift_metrics/tasks/install_metrics.yaml @@ -68,9 +68,21 @@ when: openshift_metrics_install_hawkular_agent | bool - include_tasks: update_master_config.yaml + when: not openshift.common.version_gte_3_9 + +# Update asset config in openshift-web-console namespace +- name: Add metrics route information to web console asset config + include_role: + name: openshift_web_console + tasks_from: update_console_config.yml + vars: + console_config_edits: + - key: clusterInfo#metricsPublicURL + value: "https://{{ openshift_metrics_hawkular_hostname}}/hawkular/metrics" + when: openshift_web_console_install | default(true) | bool - command: > - {{openshift.common.client_binary}} + {{openshift_client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get rc -l metrics-infra diff --git a/roles/openshift_metrics/tasks/main.yaml b/roles/openshift_metrics/tasks/main.yaml index 9dfe360bb..b67077bca 100644 --- a/roles/openshift_metrics/tasks/main.yaml +++ b/roles/openshift_metrics/tasks/main.yaml @@ -9,10 +9,10 @@ - "'not installed' not in passlib_result.stdout" msg: "python-passlib rpm must be installed on control host" -- name: Set default image variables based on deployment_type +- name: Set default image variables based on openshift_deployment_type include_vars: "{{ item }}" with_first_found: - - "{{ openshift_deployment_type | default(deployment_type) }}.yml" + - "{{ openshift_deployment_type }}.yml" - "default_images.yml" - name: Set metrics image facts diff --git a/roles/openshift_metrics/tasks/oc_apply.yaml b/roles/openshift_metrics/tasks/oc_apply.yaml index 1e1af40e8..30fdde94c 100644 --- a/roles/openshift_metrics/tasks/oc_apply.yaml +++ b/roles/openshift_metrics/tasks/oc_apply.yaml @@ -1,7 +1,7 @@ --- - name: Checking generation of {{file_content.kind}} {{file_content.metadata.name}} command: > - {{ openshift.common.client_binary }} + {{ openshift_client_binary }} --config={{ kubeconfig }} get {{file_content.kind}} {{file_content.metadata.name}} -o jsonpath='{.metadata.resourceVersion}' @@ -12,21 +12,21 @@ - name: Applying {{file_name}} command: > - {{ openshift.common.client_binary }} --config={{ kubeconfig }} + {{ openshift_client_binary }} --config={{ kubeconfig }} apply -f {{ file_name }} -n {{namespace}} register: generation_apply - failed_when: "'error' in generation_apply.stderr" + failed_when: "'error' in generation_apply.stderr or (generation_apply.rc | int != 0)" changed_when: no - name: Determine change status of {{file_content.kind}} {{file_content.metadata.name}} command: > - {{ openshift.common.client_binary }} --config={{ kubeconfig }} + {{ openshift_client_binary }} --config={{ kubeconfig }} get {{file_content.kind}} {{file_content.metadata.name}} -o jsonpath='{.metadata.resourceVersion}' -n {{namespace}} register: version_changed vars: init_version: "{{ (generation_init is defined) | ternary(generation_init.stdout, '0') }}" - failed_when: "'error' in version_changed.stderr" + failed_when: "'error' in version_changed.stderr or version_changed.rc | int != 0" changed_when: version_changed.stdout | int > init_version | int diff --git a/roles/openshift_metrics/tasks/pre_install.yaml b/roles/openshift_metrics/tasks/pre_install.yaml index d6756f9b9..976763236 100644 --- a/roles/openshift_metrics/tasks/pre_install.yaml +++ b/roles/openshift_metrics/tasks/pre_install.yaml @@ -14,7 +14,7 @@ - name: list existing secrets command: > - {{ openshift.common.client_binary }} -n {{ openshift_metrics_project }} + {{ openshift_client_binary }} -n {{ openshift_metrics_project }} --config={{ mktemp.stdout }}/admin.kubeconfig get secrets -o name register: metrics_secrets diff --git a/roles/openshift_metrics/tasks/setup_certificate.yaml b/roles/openshift_metrics/tasks/setup_certificate.yaml index 2d880f4d6..223bd975e 100644 --- a/roles/openshift_metrics/tasks/setup_certificate.yaml +++ b/roles/openshift_metrics/tasks/setup_certificate.yaml @@ -1,7 +1,7 @@ --- - name: generate {{ component }} keys command: > - {{ openshift.common.client_binary }} adm ca create-server-cert + {{ openshift_client_binary }} adm ca create-server-cert --config={{ mktemp.stdout }}/admin.kubeconfig --key='{{ mktemp.stdout }}/{{ component }}.key' --cert='{{ mktemp.stdout }}/{{ component }}.crt' @@ -23,7 +23,7 @@ - name: generate random password for the {{ component }} keystore copy: - content: "{{ 15 | oo_random_word }}" + content: "{{ 15 | lib_utils_oo_random_word }}" dest: '{{ mktemp.stdout }}/{{ component }}-keystore.pwd' - slurp: src={{ mktemp.stdout | quote }}/{{ component|quote }}-keystore.pwd @@ -39,5 +39,5 @@ - name: generate random password for the {{ component }} truststore copy: - content: "{{ 15 | oo_random_word }}" + content: "{{ 15 | lib_utils_oo_random_word }}" dest: '{{ mktemp.stdout | quote }}/{{ component|quote }}-truststore.pwd' diff --git a/roles/openshift_metrics/tasks/start_metrics.yaml b/roles/openshift_metrics/tasks/start_metrics.yaml index 2037e8dc3..899251727 100644 --- a/roles/openshift_metrics/tasks/start_metrics.yaml +++ b/roles/openshift_metrics/tasks/start_metrics.yaml @@ -1,6 +1,6 @@ --- - command: > - {{openshift.common.client_binary}} + {{openshift_client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get rc -l metrics-infra=hawkular-cassandra @@ -23,7 +23,7 @@ changed_when: metrics_cassandra_rc | length > 0 - command: > - {{openshift.common.client_binary}} + {{openshift_client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get rc -l metrics-infra=hawkular-metrics @@ -45,7 +45,7 @@ changed_when: metrics_metrics_rc | length > 0 - command: > - {{openshift.common.client_binary}} + {{openshift_client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get rc -l metrics-infra=heapster diff --git a/roles/openshift_metrics/tasks/stop_metrics.yaml b/roles/openshift_metrics/tasks/stop_metrics.yaml index 9a2ce9267..4b1d7119d 100644 --- a/roles/openshift_metrics/tasks/stop_metrics.yaml +++ b/roles/openshift_metrics/tasks/stop_metrics.yaml @@ -1,6 +1,6 @@ --- - command: > - {{openshift.common.client_binary}} + {{openshift_client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get rc -l metrics-infra=heapster @@ -22,7 +22,7 @@ loop_var: object - command: > - {{openshift.common.client_binary}} + {{openshift_client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get rc -l metrics-infra=hawkular-metrics @@ -44,7 +44,7 @@ changed_when: metrics_hawkular_rc | length > 0 - command: > - {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig + {{openshift_client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get rc -o name -l metrics-infra=hawkular-cassandra diff --git a/roles/openshift_metrics/tasks/uninstall_hosa.yaml b/roles/openshift_metrics/tasks/uninstall_hosa.yaml index 42ed02460..ae3306496 100644 --- a/roles/openshift_metrics/tasks/uninstall_hosa.yaml +++ b/roles/openshift_metrics/tasks/uninstall_hosa.yaml @@ -1,7 +1,7 @@ --- - name: remove Hawkular Agent (HOSA) components command: > - {{ openshift.common.client_binary }} -n {{ openshift_metrics_hawkular_agent_namespace }} --config={{ mktemp.stdout }}/admin.kubeconfig + {{ openshift_client_binary }} -n {{ openshift_metrics_hawkular_agent_namespace }} --config={{ mktemp.stdout }}/admin.kubeconfig delete --ignore-not-found --selector=metrics-infra=agent all,sa,secrets,templates,routes,pvc,rolebindings,clusterrolebindings register: delete_metrics @@ -9,7 +9,7 @@ - name: remove rolebindings command: > - {{ openshift.common.client_binary }} -n {{ openshift_metrics_hawkular_agent_namespace }} --config={{ mktemp.stdout }}/admin.kubeconfig + {{ openshift_client_binary }} -n {{ openshift_metrics_hawkular_agent_namespace }} --config={{ mktemp.stdout }}/admin.kubeconfig delete --ignore-not-found clusterrolebinding/hawkular-openshift-agent-rb changed_when: delete_metrics.stdout != 'No resources found' diff --git a/roles/openshift_metrics/tasks/uninstall_metrics.yaml b/roles/openshift_metrics/tasks/uninstall_metrics.yaml index 1265c7bfd..1664e9975 100644 --- a/roles/openshift_metrics/tasks/uninstall_metrics.yaml +++ b/roles/openshift_metrics/tasks/uninstall_metrics.yaml @@ -4,7 +4,7 @@ - name: remove metrics components command: > - {{ openshift.common.client_binary }} -n {{ openshift_metrics_project }} --config={{ mktemp.stdout }}/admin.kubeconfig + {{ openshift_client_binary }} -n {{ openshift_metrics_project }} --config={{ mktemp.stdout }}/admin.kubeconfig delete --ignore-not-found --selector=metrics-infra all,sa,secrets,templates,routes,pvc,rolebindings,clusterrolebindings,clusterrole register: delete_metrics @@ -12,9 +12,20 @@ - name: remove rolebindings command: > - {{ openshift.common.client_binary }} -n {{ openshift_metrics_project }} --config={{ mktemp.stdout }}/admin.kubeconfig + {{ openshift_client_binary }} -n {{ openshift_metrics_project }} --config={{ mktemp.stdout }}/admin.kubeconfig delete --ignore-not-found rolebinding/hawkular-view clusterrolebinding/heapster-cluster-reader clusterrolebinding/hawkular-metrics changed_when: delete_metrics.stdout != 'No resources found' + +# Update the web config in openshift-web-console namespace +- name: Remove metrics route information from the web console config + include_role: + name: openshift_web_console + tasks_from: update_console_config.yml + vars: + console_config_edits: + - key: clusterInfo#metricsPublicURL + value: "" + when: openshift_web_console_install | default(true) | bool diff --git a/roles/openshift_metrics/tasks/update_master_config.yaml b/roles/openshift_metrics/tasks/update_master_config.yaml index 5059d8d94..6567fcb4f 100644 --- a/roles/openshift_metrics/tasks/update_master_config.yaml +++ b/roles/openshift_metrics/tasks/update_master_config.yaml @@ -1,4 +1,5 @@ --- +# TODO: Remove when asset config is removed from master-config.yaml - name: Adding metrics route information to metricsPublicURL modify_yaml: dest: "{{ openshift.common.config_base }}/master/master-config.yaml" diff --git a/roles/openshift_metrics/templates/hawkular_metrics_rc.j2 b/roles/openshift_metrics/templates/hawkular_metrics_rc.j2 index e976bc222..7c75b2f97 100644 --- a/roles/openshift_metrics/templates/hawkular_metrics_rc.j2 +++ b/roles/openshift_metrics/templates/hawkular_metrics_rc.j2 @@ -64,7 +64,7 @@ spec: - name: MASTER_URL value: "{{ openshift_metrics_master_url }}" - name: JGROUPS_PASSWORD - value: "{{ 17 | oo_random_word }}" + value: "{{ 17 | lib_utils_oo_random_word }}" - name: TRUSTSTORE_AUTHORITIES value: "/hawkular-metrics-certs/tls.truststore.crt" - name: ENABLE_PROMETHEUS_ENDPOINT diff --git a/roles/openshift_named_certificates/filter_plugins/openshift_named_certificates.py b/roles/openshift_named_certificates/filter_plugins/openshift_named_certificates.py deleted file mode 100644 index 6ed6d404c..000000000 --- a/roles/openshift_named_certificates/filter_plugins/openshift_named_certificates.py +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -''' -Custom filters for use with openshift named certificates -''' - - -class FilterModule(object): - ''' Custom ansible filters for use with openshift named certificates''' - - @staticmethod - def oo_named_certificates_list(named_certificates): - ''' Returns named certificates list with correct fields for the master - config file.''' - return [{'certFile': named_certificate['certfile'], - 'keyFile': named_certificate['keyfile'], - 'names': named_certificate['names']} for named_certificate in named_certificates] - - def filters(self): - ''' returns a mapping of filters to methods ''' - return {"oo_named_certificates_list": self.oo_named_certificates_list} diff --git a/roles/openshift_named_certificates/meta/main.yml b/roles/openshift_named_certificates/meta/main.yml index 2c6e12494..e7d81df53 100644 --- a/roles/openshift_named_certificates/meta/main.yml +++ b/roles/openshift_named_certificates/meta/main.yml @@ -14,3 +14,4 @@ galaxy_info: - system dependencies: - role: openshift_facts +- role: lib_utils diff --git a/roles/openshift_named_certificates/tasks/main.yml b/roles/openshift_named_certificates/tasks/main.yml index 1bcf9ef67..021fa8385 100644 --- a/roles/openshift_named_certificates/tasks/main.yml +++ b/roles/openshift_named_certificates/tasks/main.yml @@ -1,9 +1,8 @@ --- - set_fact: - parsed_named_certificates: "{{ named_certificates | oo_parse_named_certificates(named_certs_dir, internal_hostnames) }}" + parsed_named_certificates: "{{ named_certificates | lib_utils_oo_parse_named_certificates(named_certs_dir, internal_hostnames) }}" when: named_certificates | length > 0 delegate_to: localhost - become: no run_once: true - openshift_facts: @@ -43,4 +42,4 @@ src: "{{ item }}" dest: "{{ named_certs_dir }}/{{ item | basename }}" mode: 0600 - with_items: "{{ named_certificates | oo_collect('cafile') }}" + with_items: "{{ named_certificates | lib_utils_oo_collect('cafile') }}" diff --git a/roles/openshift_nfs/meta/main.yml b/roles/openshift_nfs/meta/main.yml index d7b5910f2..17c0cf33f 100644 --- a/roles/openshift_nfs/meta/main.yml +++ b/roles/openshift_nfs/meta/main.yml @@ -13,4 +13,4 @@ galaxy_info: - cloud dependencies: - role: lib_utils -- role: lib_os_firewall +- role: lib_utils diff --git a/roles/openshift_nfs/tasks/create_export.yml b/roles/openshift_nfs/tasks/create_export.yml index b0b888d56..331685289 100644 --- a/roles/openshift_nfs/tasks/create_export.yml +++ b/roles/openshift_nfs/tasks/create_export.yml @@ -3,7 +3,7 @@ # # Include signature # -# include_role: +# import_role: # role: openshift_nfs # tasks_from: create_export # vars: @@ -31,4 +31,4 @@ - name: Re-export NFS filesystems command: exportfs -ar when: - - created_export | changed + - created_export is changed diff --git a/roles/openshift_nfs/tasks/setup.yml b/roles/openshift_nfs/tasks/setup.yml index 1aa7e7079..bd8fb44a2 100644 --- a/roles/openshift_nfs/tasks/setup.yml +++ b/roles/openshift_nfs/tasks/setup.yml @@ -5,7 +5,7 @@ - name: Install nfs-utils package: name=nfs-utils state=present register: result - until: result | success + until: result is succeeded - name: Configure NFS lineinfile: @@ -16,7 +16,7 @@ - name: Restart nfs-config systemd: name=nfs-config state=restarted - when: nfs_config | changed + when: nfs_config is changed - name: Ensure exports directory exists file: diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml index fff927944..0fe4c2035 100644 --- a/roles/openshift_node/defaults/main.yml +++ b/roles/openshift_node/defaults/main.yml @@ -1,6 +1,70 @@ --- openshift_node_debug_level: "{{ debug_level | default(2) }}" - +openshift_node_iptables_sync_period: '30s' +osn_storage_plugin_deps: +- ceph +- glusterfs +- iscsi +openshift_node_local_quota_per_fsgroup: "" +openshift_node_proxy_mode: iptables +openshift_set_node_ip: False +openshift_config_base: '/etc/origin' + +openshift_oreg_url_default_dict: + origin: "openshift/origin-${component}:${version}" + openshift-enterprise: "openshift3/ose-${component}:${version}" +openshift_oreg_url_default: "{{ openshift_oreg_url_default_dict[openshift_deployment_type] }}" +oreg_url_node: "{{ oreg_url | default(openshift_oreg_url_default) }}" + +osn_ovs_image_default_dict: + origin: "openshift/openvswitch" + openshift-enterprise: "openshift3/openvswitch" +osn_ovs_image_default: "{{ osn_ovs_image_default_dict[openshift_deployment_type] }}" +osn_ovs_image: "{{ osn_ovs_image_default }}" + +openshift_dns_ip: "{{ ansible_default_ipv4['address'] }}" + +openshift_node_env_vars: {} + +# Create list of 'k=v' pairs. +l_node_kubelet_node_labels: "{{ openshift_node_labels | default({}) | lib_utils_oo_dict_to_keqv_list }}" + +openshift_node_kubelet_args_dict: + aws: + cloud-provider: + - aws + cloud-config: + - "{{ openshift_config_base ~ '/cloudprovider/aws.conf' }}" + node-labels: "{{ l_node_kubelet_node_labels }}" + openstack: + cloud-provider: + - openstack + cloud-config: + - "{{ openshift_config_base ~ '/cloudprovider/openstack.conf' }}" + node-labels: "{{ l_node_kubelet_node_labels }}" + gce: + cloud-provider: + - gce + cloud-config: + - "{{ openshift_config_base ~ '/cloudprovider/gce.conf' }}" + node-labels: "{{ l_node_kubelet_node_labels }}" + azure: + cloud-provider: + - azure + cloud-config: + - "{{ openshift_config_base ~ '/cloudprovider/azure.conf' }}" + node-labels: "{{ l_node_kubelet_node_labels }}" + undefined: + node-labels: "{{ l_node_kubelet_node_labels }}" + +l_node_kubelet_args_default: "{{ openshift_node_kubelet_args_dict[openshift_cloudprovider_kind | default('undefined')] }}" + +l_openshift_node_kubelet_args: "{{ openshift_node_kubelet_args | default({}) }}" +# Combine the default kubelet_args dictionary (based on cloud provider, if provided) +# with user-supplied openshift_node_kubelet_args. +# openshift_node_kubelet_args will override the defaults, if keys and/or subkeys +# are present in both. +l2_openshift_node_kubelet_args: "{{ l_node_kubelet_args_default | combine(l_openshift_node_kubelet_args, recursive=True) }}" openshift_node_dnsmasq_install_network_manager_hook: true # lo must always be present in this list or dnsmasq will conflict with @@ -13,11 +77,29 @@ r_openshift_node_use_firewalld: "{{ os_firewall_use_firewalld | default(False) } l_is_node_system_container: "{{ (openshift_use_node_system_container | default(openshift_use_system_containers | default(false)) | bool) }}" +openshift_node_syscon_auth_mounts_l: +- type: bind + source: "{{ oreg_auth_credentials_path }}" + destination: "/root/.docker" + options: + - ro + - bind + +# If we need to add new mounts in the future, or the user wants to mount data. +# This should be in the same format as auth_mounts_l above. +openshift_node_syscon_add_mounts_l: [] + + openshift_deployment_type: "{{ openshift_deployment_type | default('origin') }}" + +openshift_node_image_dict: + origin: 'openshift/node' + openshift-enterprise: 'openshift3/node' +osn_image: "{{ openshift_node_image_dict[openshift_deployment_type] }}" + openshift_service_type_dict: origin: origin openshift-enterprise: atomic-openshift - openshift_service_type: "{{ openshift_service_type_dict[openshift_deployment_type] }}" system_images_registry_dict: @@ -106,9 +188,9 @@ oreg_auth_credentials_path: "{{ openshift_node_data_dir }}/.docker" oreg_auth_credentials_replace: False l_bind_docker_reg_auth: False openshift_use_crio: False -openshift_docker_alternative_creds: "{{ (openshift_docker_use_system_container | default(False)) or (openshift_use_crio_only | default(False)) }}" +openshift_docker_alternative_creds: "{{ (openshift_docker_use_system_container | default(False) | bool) or (openshift_use_crio_only | default(False) | bool) }}" -openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}" +openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False) | bool) else 'docker' }}" # NOTE # r_openshift_node_*_default may be defined external to this role. diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml index 170a3dc6e..779916335 100644 --- a/roles/openshift_node/handlers/main.yml +++ b/roles/openshift_node/handlers/main.yml @@ -4,11 +4,15 @@ name: NetworkManager state: restarted enabled: True + when: + - (not skip_node_svc_handlers | default(False) | bool) - name: restart dnsmasq systemd: name: dnsmasq state: restarted + when: + - (not skip_node_svc_handlers | default(False) | bool) - name: restart openvswitch systemd: @@ -20,7 +24,7 @@ - openshift_node_use_openshift_sdn | bool - not openshift_node_bootstrap register: l_openshift_node_stop_openvswitch_result - until: not l_openshift_node_stop_openvswitch_result | failed + until: not (l_openshift_node_stop_openvswitch_result is failed) retries: 3 delay: 30 notify: @@ -30,14 +34,14 @@ pause: seconds=15 when: - (not skip_node_svc_handlers | default(False) | bool) - - openshift.common.is_containerized | bool + - openshift_is_containerized | bool - name: restart node systemd: name: "{{ openshift_service_type }}-node" state: restarted register: l_openshift_node_restart_node_result - until: not l_openshift_node_restart_node_result | failed + until: not (l_openshift_node_restart_node_result is failed) retries: 3 delay: 30 when: @@ -47,3 +51,5 @@ - name: reload systemd units command: systemctl daemon-reload + when: + - (not skip_node_svc_handlers | default(False) | bool) diff --git a/roles/openshift_node/meta/main.yml b/roles/openshift_node/meta/main.yml index 70057c7f3..59e743dce 100644 --- a/roles/openshift_node/meta/main.yml +++ b/roles/openshift_node/meta/main.yml @@ -12,12 +12,5 @@ galaxy_info: categories: - cloud dependencies: -- role: openshift_node_facts - when: not (openshift_node_upgrade_in_progress | default(False)) - role: lib_openshift -- role: lib_os_firewall - when: not (openshift_node_upgrade_in_progress | default(False)) -- role: openshift_cloud_provider - when: not (openshift_node_upgrade_in_progress | default(False)) - role: lib_utils - when: openshift_node_upgrade_in_progress | default(False) diff --git a/roles/openshift_node/tasks/bootstrap.yml b/roles/openshift_node/tasks/bootstrap.yml index a042bc01b..1a6f209e0 100644 --- a/roles/openshift_node/tasks/bootstrap.yml +++ b/roles/openshift_node/tasks/bootstrap.yml @@ -5,7 +5,7 @@ state: present with_items: "{{ r_openshift_node_image_prep_packages }}" register: result - until: result | success + until: result is succeeded - name: create the directory for node file: diff --git a/roles/openshift_node/tasks/config.yml b/roles/openshift_node/tasks/config.yml index e5c80bd09..1103fe4c9 100644 --- a/roles/openshift_node/tasks/config.yml +++ b/roles/openshift_node/tasks/config.yml @@ -2,6 +2,10 @@ - name: Install the systemd units include_tasks: systemd_units.yml +- name: Pull container images + include_tasks: container_images.yml + when: openshift_is_containerized | bool + - name: Start and enable openvswitch service systemd: name: openvswitch.service @@ -9,20 +13,20 @@ state: started daemon_reload: yes when: - - openshift.common.is_containerized | bool + - openshift_is_containerized | bool - openshift_node_use_openshift_sdn | default(true) | bool register: ovs_start_result - until: not ovs_start_result | failed + until: not (ovs_start_result is failed) retries: 3 delay: 30 - set_fact: - ovs_service_status_changed: "{{ ovs_start_result | changed }}" + ovs_service_status_changed: "{{ ovs_start_result is changed }}" - file: - dest: "{{ (openshift_node_kubelet_args|default({'config':None})).config}}" + dest: "{{ l2_openshift_node_kubelet_args['config'] }}" state: directory - when: openshift_node_kubelet_args is defined and 'config' in openshift_node_kubelet_args + when: ('config' in l2_openshift_node_kubelet_args) | bool # TODO: add the validate parameter when there is a validation command to run - name: Create the Node config @@ -42,7 +46,7 @@ regexp: "^{{ item.key }}=" line: "{{ item.key }}={{ item.value }}" create: true - with_dict: "{{ openshift.node.env_vars | default({}) }}" + with_dict: "{{ openshift_node_env_vars }}" notify: - restart node @@ -54,7 +58,7 @@ # restarted after the node restarts docker and it will take up to 60 seconds for # systemd to start the master again - when: - - openshift.common.is_containerized | bool + - openshift_is_containerized | bool - not openshift_node_bootstrap block: - name: Wait for master API to become available before proceeding @@ -89,19 +93,19 @@ state: started daemon_reload: yes register: node_start_result - until: not node_start_result | failed + until: not node_start_result is failed retries: 1 delay: 30 ignore_errors: true - name: Dump logs from node service if it failed command: journalctl --no-pager -n 100 -u {{ openshift_service_type }}-node - when: node_start_result | failed + when: node_start_result is failed - name: Abort if node failed to start fail: msg: Node failed to start please inspect the logs and try again - when: node_start_result | failed + when: node_start_result is failed - set_fact: - node_service_status_changed: "{{ node_start_result | changed }}" + node_service_status_changed: "{{ node_start_result is changed }}" diff --git a/roles/openshift_node/tasks/container_images.yml b/roles/openshift_node/tasks/container_images.yml new file mode 100644 index 000000000..bb788e2f1 --- /dev/null +++ b/roles/openshift_node/tasks/container_images.yml @@ -0,0 +1,20 @@ +--- +- name: Install Node system container + include_tasks: node_system_container.yml + when: + - l_is_node_system_container | bool + +- name: Install OpenvSwitch system containers + include_tasks: openvswitch_system_container.yml + when: + - openshift_node_use_openshift_sdn | bool + - l_is_openvswitch_system_container | bool + +- name: Pre-pull openvswitch image + command: > + docker pull {{ osn_ovs_image }}:{{ openshift_image_tag }} + register: pull_result + changed_when: "'Downloaded newer image' in pull_result.stdout" + when: + - openshift_node_use_openshift_sdn | bool + - not l_is_openvswitch_system_container | bool diff --git a/roles/openshift_node/tasks/dnsmasq.yml b/roles/openshift_node/tasks/dnsmasq.yml index f210a3a21..31ca46ec0 100644 --- a/roles/openshift_node/tasks/dnsmasq.yml +++ b/roles/openshift_node/tasks/dnsmasq.yml @@ -1,43 +1,4 @@ --- -- name: Check for NetworkManager service - command: > - systemctl show NetworkManager - register: nm_show - changed_when: false - ignore_errors: True - -- name: Set fact using_network_manager - set_fact: - network_manager_active: "{{ True if 'ActiveState=active' in nm_show.stdout else False }}" - -- name: Install dnsmasq - package: name=dnsmasq state=installed - when: not openshift.common.is_atomic | bool - register: result - until: result | success - -- name: ensure origin/node directory exists - file: - state: directory - path: "{{ item }}" - owner: root - group: root - mode: '0700' - with_items: - - /etc/origin - - /etc/origin/node - -# this file is copied to /etc/dnsmasq.d/ when the node starts and is removed -# when the node stops. A dbus-message is sent to dnsmasq to add the same entries -# so that dnsmasq doesn't need to be restarted. Once we can use dnsmasq 2.77 or -# newer we can use --server-file option to update the servers dynamically and -# reload them by sending dnsmasq a SIGHUP. We write the file in case someone else -# triggers a restart of dnsmasq but not a node restart. -- name: Install node-dnsmasq.conf - template: - src: node-dnsmasq.conf.j2 - dest: /etc/origin/node/node-dnsmasq.conf - - name: Install dnsmasq configuration template: src: origin-dns.conf.j2 @@ -63,7 +24,3 @@ # Dynamic NetworkManager based dispatcher - include_tasks: dnsmasq/network-manager.yml when: network_manager_active | bool - -# Relies on ansible in order to configure static config -- include_tasks: dnsmasq/no-network-manager.yml - when: not network_manager_active | bool diff --git a/roles/openshift_node/tasks/dnsmasq/no-network-manager.yml b/roles/openshift_node/tasks/dnsmasq/no-network-manager.yml index 541c8115a..5d2c67b86 100644 --- a/roles/openshift_node/tasks/dnsmasq/no-network-manager.yml +++ b/roles/openshift_node/tasks/dnsmasq/no-network-manager.yml @@ -8,6 +8,6 @@ state: present notify: restart NetworkManager register: result - until: result | success + until: result is succeeded - include_tasks: network-manager.yml diff --git a/roles/openshift_node/tasks/dnsmasq_install.yml b/roles/openshift_node/tasks/dnsmasq_install.yml new file mode 100644 index 000000000..5e06ba032 --- /dev/null +++ b/roles/openshift_node/tasks/dnsmasq_install.yml @@ -0,0 +1,43 @@ +--- +- name: Check for NetworkManager service + command: > + systemctl show NetworkManager + register: nm_show + changed_when: false + ignore_errors: True + +- name: Set fact using_network_manager + set_fact: + network_manager_active: "{{ True if 'ActiveState=active' in nm_show.stdout else False }}" + +- name: Install dnsmasq + package: name=dnsmasq state=installed + when: not openshift_is_atomic | bool + register: result + until: result is succeeded + +- name: ensure origin/node directory exists + file: + state: directory + path: "{{ item }}" + owner: root + group: root + mode: '0700' + with_items: + - /etc/origin + - /etc/origin/node + +# this file is copied to /etc/dnsmasq.d/ when the node starts and is removed +# when the node stops. A dbus-message is sent to dnsmasq to add the same entries +# so that dnsmasq doesn't need to be restarted. Once we can use dnsmasq 2.77 or +# newer we can use --server-file option to update the servers dynamically and +# reload them by sending dnsmasq a SIGHUP. We write the file in case someone else +# triggers a restart of dnsmasq but not a node restart. +- name: Install node-dnsmasq.conf + template: + src: node-dnsmasq.conf.j2 + dest: /etc/origin/node/node-dnsmasq.conf + +# Relies on ansible in order to configure static config +- include_tasks: dnsmasq/no-network-manager.yml + when: not network_manager_active | bool diff --git a/roles/openshift_node/tasks/docker/upgrade.yml b/roles/openshift_node/tasks/docker/upgrade.yml deleted file mode 100644 index c13a6cf6c..000000000 --- a/roles/openshift_node/tasks/docker/upgrade.yml +++ /dev/null @@ -1,42 +0,0 @@ ---- -# input variables: -# - openshift_service_type -# - openshift.common.is_containerized -# - docker_upgrade_nuke_images -# - docker_version -# - skip_docker_restart - -- name: Check Docker image count - shell: "docker images -aq | wc -l" - register: docker_image_count - -- debug: var=docker_image_count.stdout - -# TODO(jchaloup): put all docker_upgrade_nuke_images into a block with only one condition -- name: Remove all containers and images - script: nuke_images.sh - register: nuke_images_result - when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool - -- name: Check Docker image count - shell: "docker images -aq | wc -l" - register: docker_image_count - when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool - -- debug: var=docker_image_count.stdout - when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool - -- service: - name: docker - state: stopped - register: l_openshift_node_upgrade_docker_stop_result - until: not l_openshift_node_upgrade_docker_stop_result | failed - retries: 3 - delay: 30 - -- name: Upgrade Docker - package: name=docker{{ '-' + docker_version }} state=present - register: result - until: result | success - -# starting docker happens back in ../main.yml where it calls ../restart.yml diff --git a/roles/openshift_node/tasks/install.yml b/roles/openshift_node/tasks/install.yml index f93aed246..a4a9c1237 100644 --- a/roles/openshift_node/tasks/install.yml +++ b/roles/openshift_node/tasks/install.yml @@ -1,35 +1,25 @@ --- -- when: not openshift.common.is_containerized | bool - block: - - name: Install Node package - package: - name: "{{ openshift_service_type }}-node{{ (openshift_pkg_version | default('')) | oo_image_tag_to_rpm_version(include_dash=True) }}" - state: present - register: result - until: result | success - - - name: Install sdn-ovs package - package: - name: "{{ openshift_service_type }}-sdn-ovs{{ (openshift_pkg_version | default('')) | oo_image_tag_to_rpm_version(include_dash=True) }}" - state: present - when: - - openshift_node_use_openshift_sdn | bool - register: result - until: result | success - - - name: Install conntrack-tools package - package: - name: "conntrack-tools" - state: present - register: result - until: result | success +- name: Install Node package, sdn-ovs, conntrack packages + package: + name: "{{ item.name }}" + state: present + register: result + until: result is succeeded + with_items: + - name: "{{ openshift_service_type }}-node{{ (openshift_pkg_version | default('')) | lib_utils_oo_image_tag_to_rpm_version(include_dash=True) }}" + - name: "{{ openshift_service_type }}-sdn-ovs{{ (openshift_pkg_version | default('')) | lib_utils_oo_image_tag_to_rpm_version(include_dash=True) }}" + install: "{{ openshift_node_use_openshift_sdn | bool }}" + - name: "conntrack-tools" + when: + - not openshift_is_containerized | bool + - item['install'] | default(True) | bool - when: - - openshift.common.is_containerized | bool + - openshift_is_containerized | bool - not l_is_node_system_container | bool block: - name: Pre-pull node image when containerized command: > - docker pull {{ openshift.node.node_image }}:{{ openshift_image_tag }} + docker pull {{ osn_image }}:{{ openshift_image_tag }} register: pull_result changed_when: "'Downloaded newer image' in pull_result.stdout" diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index 32c5f495f..f56f24e12 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -3,9 +3,10 @@ msg: "SELinux is disabled, This deployment type requires that SELinux is enabled." when: - (not ansible_selinux or ansible_selinux.status != 'enabled') - - deployment_type == 'openshift-enterprise' - - not openshift_use_crio + - openshift_deployment_type == 'openshift-enterprise' + - not openshift_use_crio | bool +- include_tasks: dnsmasq_install.yml - include_tasks: dnsmasq.yml - name: setup firewall @@ -13,33 +14,11 @@ #### Disable SWAP ##### # https://docs.openshift.com/container-platform/3.4/admin_guide/overcommit.html#disabling-swap-memory -- name: Check for swap usage - command: grep "^[^#].*swap" /etc/fstab - # grep: match any lines which don't begin with '#' and contain 'swap' - changed_when: false - failed_when: false - register: swap_result - -- when: - - swap_result.stdout_lines | length > 0 - - openshift_disable_swap | default(true) | bool - block: - - name: Disable swap - command: swapoff --all - - - name: Remove swap entries from /etc/fstab - replace: - dest: /etc/fstab - regexp: '(^[^#].*swap.*)' - replace: '# \1' - backup: yes - - - name: Add notice about disabling swap - lineinfile: - dest: /etc/fstab - line: '# OpenShift-Ansible Installer disabled swap per overcommit guidelines' - state: present -#### End Disable Swap Block #### +# swapoff is a custom module in lib_utils that comments out swap entries in +# /etc/fstab and runs swapoff -a, if necessary. +- name: Disable swap + swapoff: {} + when: openshift_disable_swap | default(true) | bool - name: include node installer include_tasks: install.yml @@ -49,9 +28,11 @@ name: cri-o enabled: yes state: restarted - when: openshift_use_crio + when: openshift_use_crio | bool register: task_result - failed_when: task_result|failed and 'could not find the requested service' not in task_result.msg|lower + failed_when: + - task_result is failed + - ('could not find the requested service' not in task_result.msg|lower) - name: restart NetworkManager to ensure resolv.conf is present systemd: @@ -82,21 +63,17 @@ - name: GlusterFS storage plugin configuration include_tasks: storage_plugins/glusterfs.yml - when: "'glusterfs' in openshift.node.storage_plugin_deps" + when: "'glusterfs' in osn_storage_plugin_deps" - name: Ceph storage plugin configuration include_tasks: storage_plugins/ceph.yml - when: "'ceph' in openshift.node.storage_plugin_deps" + when: "'ceph' in osn_storage_plugin_deps" - name: iSCSI storage plugin configuration include_tasks: storage_plugins/iscsi.yml - when: "'iscsi' in openshift.node.storage_plugin_deps" + when: "'iscsi' in osn_storage_plugin_deps" ##### END Storage ##### - include_tasks: config/workaround-bz1331590-ovs-oom-fix.yml when: openshift_node_use_openshift_sdn | default(true) | bool - -- name: include bootstrap node config - include_tasks: bootstrap.yml - when: openshift_node_bootstrap diff --git a/roles/openshift_node/tasks/node_system_container.yml b/roles/openshift_node/tasks/node_system_container.yml index 98978ec6f..008f209d7 100644 --- a/roles/openshift_node/tasks/node_system_container.yml +++ b/roles/openshift_node/tasks/node_system_container.yml @@ -2,16 +2,35 @@ - name: Pre-pull node system container image command: > - atomic pull --storage=ostree {{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.node.node_system_image }}:{{ openshift_image_tag }} + atomic pull --storage=ostree {{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ osn_image }}:{{ openshift_image_tag }} register: pull_result changed_when: "'Pulling layer' in pull_result.stdout" - name: Install or Update node system container oc_atomic_container: name: "{{ openshift_service_type }}-node" - image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.node.node_system_image }}:{{ openshift_image_tag }}" + image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ osn_image }}:{{ openshift_image_tag }}" values: - "DNS_DOMAIN={{ openshift.common.dns_domain }}" - "DOCKER_SERVICE={{ openshift_docker_service_name }}.service" - "MASTER_SERVICE={{ openshift_service_type }}.service" + - 'ADDTL_MOUNTS={{ l_node_syscon_add_mounts2 }}' state: latest + vars: + # We need to evaluate some variables here to ensure + # l_bind_docker_reg_auth is evaluated after registry_auth.yml has been + # processed. + + # Determine if we want to include auth credentials mount. + l_node_syscon_auth_mounts_l: "{{ l_bind_docker_reg_auth | ternary(openshift_node_syscon_auth_mounts_l,[]) }}" + + # Join any user-provided mounts and auth_mounts into a combined list. + l_node_syscon_add_mounts_l: "{{ openshift_node_syscon_add_mounts_l | union(l_node_syscon_auth_mounts_l) }}" + + # We must prepend a ',' here to ensure the value is inserted properly into an + # existing json list in the container's config.json + # lib_utils_oo_l_of_d_to_csv is a custom filter plugin in roles/lib_utils/oo_filters.py + l_node_syscon_add_mounts: ",{{ l_node_syscon_add_mounts_l | lib_utils_oo_l_of_d_to_csv }}" + # if we have just a ',' then both mount lists were empty, we don't want to add + # anything to config.json + l_node_syscon_add_mounts2: "{{ (l_node_syscon_add_mounts != ',') | bool | ternary(l_node_syscon_add_mounts,'') }}" diff --git a/roles/openshift_node/tasks/openvswitch_system_container.yml b/roles/openshift_node/tasks/openvswitch_system_container.yml index b61bc84c1..d7dce6969 100644 --- a/roles/openshift_node/tasks/openvswitch_system_container.yml +++ b/roles/openshift_node/tasks/openvswitch_system_container.yml @@ -1,22 +1,22 @@ --- - set_fact: l_service_name: "cri-o" - when: openshift_use_crio + when: openshift_use_crio | bool - set_fact: l_service_name: "{{ openshift_docker_service_name }}" - when: not openshift_use_crio + when: not openshift_use_crio | bool - name: Pre-pull OpenVSwitch system container image command: > - atomic pull --storage=ostree {{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.node.ovs_system_image }}:{{ openshift_image_tag }} + atomic pull --storage=ostree {{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ osn_ovs_image }}:{{ openshift_image_tag }} register: pull_result changed_when: "'Pulling layer' in pull_result.stdout" - name: Install or Update OpenVSwitch system container oc_atomic_container: name: openvswitch - image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.node.ovs_system_image }}:{{ openshift_image_tag }}" + image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ osn_ovs_image }}:{{ openshift_image_tag }}" state: latest values: - "DOCKER_SERVICE={{ l_service_name }}" diff --git a/roles/openshift_node/tasks/registry_auth.yml b/roles/openshift_node/tasks/registry_auth.yml index ab43ec049..92650e6b7 100644 --- a/roles/openshift_node/tasks/registry_auth.yml +++ b/roles/openshift_node/tasks/registry_auth.yml @@ -41,7 +41,7 @@ set_fact: l_bind_docker_reg_auth: True when: - - openshift.common.is_containerized | bool + - openshift_is_containerized | bool - oreg_auth_user is defined - > (node_oreg_auth_credentials_stat.stat.exists diff --git a/roles/openshift_node/tasks/storage_plugins/ceph.yml b/roles/openshift_node/tasks/storage_plugins/ceph.yml index 72a3b837f..e30f58a9a 100644 --- a/roles/openshift_node/tasks/storage_plugins/ceph.yml +++ b/roles/openshift_node/tasks/storage_plugins/ceph.yml @@ -1,6 +1,6 @@ --- - name: Install Ceph storage plugin dependencies package: name=ceph-common state=present - when: not openshift.common.is_atomic | bool + when: not openshift_is_atomic | bool register: result - until: result | success + until: result is succeeded diff --git a/roles/openshift_node/tasks/storage_plugins/glusterfs.yml b/roles/openshift_node/tasks/storage_plugins/glusterfs.yml index 08ea71a0c..c04a6922a 100644 --- a/roles/openshift_node/tasks/storage_plugins/glusterfs.yml +++ b/roles/openshift_node/tasks/storage_plugins/glusterfs.yml @@ -1,9 +1,9 @@ --- - name: Install GlusterFS storage plugin dependencies package: name=glusterfs-fuse state=present - when: not openshift.common.is_atomic | bool + when: not openshift_is_atomic | bool register: result - until: result | success + until: result is succeeded - name: Check for existence of fusefs sebooleans command: getsebool {{ item }} @@ -31,7 +31,7 @@ # since getsebool prints the resolved name. (At some point Ansible's seboolean module # should learn to deal with aliases) - item.item in item.stdout # Boolean does not have an alias. - - ansible_python_version | version_compare('3', '<') + - ansible_python_version is version_compare('3', '<') with_items: "{{ fusefs_getsebool_status.results }}" # Workaround for https://github.com/openshift/openshift-ansible/issues/4438 @@ -52,5 +52,5 @@ # should learn to deal with aliases) - item.item in item.stdout # Boolean does not have an alias. - ('--> off' in item.stdout) # Boolean is currently off. - - ansible_python_version | version_compare('3', '>=') + - ansible_python_version is version_compare('3', '>=') with_items: "{{ fusefs_getsebool_status.results }}" diff --git a/roles/openshift_node/tasks/storage_plugins/iscsi.yml b/roles/openshift_node/tasks/storage_plugins/iscsi.yml index ece68dc71..a8048c42f 100644 --- a/roles/openshift_node/tasks/storage_plugins/iscsi.yml +++ b/roles/openshift_node/tasks/storage_plugins/iscsi.yml @@ -1,6 +1,6 @@ --- - name: Install iSCSI storage plugin dependencies package: name=iscsi-initiator-utils state=present - when: not openshift.common.is_atomic | bool + when: not openshift_is_atomic | bool register: result - until: result | success + until: result is succeeded diff --git a/roles/openshift_node/tasks/storage_plugins/nfs.yml b/roles/openshift_node/tasks/storage_plugins/nfs.yml index 5eacf42e8..c2922644f 100644 --- a/roles/openshift_node/tasks/storage_plugins/nfs.yml +++ b/roles/openshift_node/tasks/storage_plugins/nfs.yml @@ -1,9 +1,9 @@ --- - name: Install NFS storage plugin dependencies package: name=nfs-utils state=present - when: not openshift.common.is_atomic | bool + when: not openshift_is_atomic | bool register: result - until: result | success + until: result is succeeded - name: Check for existence of nfs sebooleans command: getsebool {{ item }} @@ -31,7 +31,7 @@ # since getsebool prints the resolved name. (At some point Ansible's seboolean module # should learn to deal with aliases) - item.item in item.stdout # Boolean does not have an alias. - - ansible_python_version | version_compare('3', '<') + - ansible_python_version is version_compare('3', '<') with_items: "{{ nfs_getsebool_status.results }}" # Workaround for https://github.com/openshift/openshift-ansible/issues/4438 @@ -52,5 +52,5 @@ # should learn to deal with aliases) - item.item in item.stdout # Boolean does not have an alias. - ('--> off' in item.stdout) # Boolean is currently off. - - ansible_python_version | version_compare('3', '>=') + - ansible_python_version is version_compare('3', '>=') with_items: "{{ nfs_getsebool_status.results }}" diff --git a/roles/openshift_node/tasks/systemd_units.yml b/roles/openshift_node/tasks/systemd_units.yml index c532147b1..e33a4999f 100644 --- a/roles/openshift_node/tasks/systemd_units.yml +++ b/roles/openshift_node/tasks/systemd_units.yml @@ -2,13 +2,13 @@ - name: Install Node service file template: dest: "/etc/systemd/system/{{ openshift_service_type }}-node.service" - src: "{{ openshift.common.is_containerized | bool | ternary('openshift.docker.node.service', 'node.service.j2') }}" + src: "{{ openshift_is_containerized | bool | ternary('openshift.docker.node.service', 'node.service.j2') }}" when: not l_is_node_system_container | bool notify: - reload systemd units - restart node -- when: openshift.common.is_containerized | bool +- when: openshift_is_containerized | bool block: - name: include node deps docker service file include_tasks: config/install-node-deps-docker-service-file.yml @@ -16,29 +16,10 @@ - name: include ovs service environment file include_tasks: config/install-ovs-service-env-file.yml - - name: Install Node system container - include_tasks: node_system_container.yml - when: - - l_is_node_system_container | bool - - - name: Install OpenvSwitch system containers - include_tasks: openvswitch_system_container.yml + - include_tasks: config/install-ovs-docker-service-file.yml when: - openshift_node_use_openshift_sdn | bool - - l_is_openvswitch_system_container | bool - -- block: - - name: Pre-pull openvswitch image - command: > - docker pull {{ openshift.node.ovs_image }}:{{ openshift_image_tag }} - register: pull_result - changed_when: "'Downloaded newer image' in pull_result.stdout" - - - include_tasks: config/install-ovs-docker-service-file.yml - when: - - openshift.common.is_containerized | bool - - openshift_node_use_openshift_sdn | bool - - not l_is_openvswitch_system_container | bool + - not l_is_openvswitch_system_container | bool - include_tasks: config/configure-node-settings.yml - include_tasks: config/configure-proxy-settings.yml diff --git a/roles/openshift_node/tasks/upgrade.yml b/roles/openshift_node/tasks/upgrade.yml index 9f333645a..02e417937 100644 --- a/roles/openshift_node/tasks/upgrade.yml +++ b/roles/openshift_node/tasks/upgrade.yml @@ -1,170 +1,37 @@ --- # input variables: # - l_docker_upgrade -# - openshift.common.is_atomic +# - openshift_is_atomic # - node_config_hook # - openshift_pkg_version -# - openshift.common.is_containerized -# - deployment_type +# - openshift_is_containerized # - openshift_release # tasks file for openshift_node_upgrade -- include_tasks: registry_auth.yml +- name: stop services for upgrade + include_tasks: upgrade/stop_services.yml -- name: Stop node and openvswitch services - service: - name: "{{ item }}" - state: stopped - with_items: - - "{{ openshift_service_type }}-node" - - openvswitch - failed_when: false - -- name: Stop additional containerized services - service: - name: "{{ item }}" - state: stopped - with_items: - - "{{ openshift_service_type }}-master-controllers" - - "{{ openshift_service_type }}-master-api" - - etcd_container - failed_when: false - when: openshift.common.is_containerized | bool - -- name: Pre-pull node image - command: > - docker pull {{ openshift.node.node_image }}:{{ openshift_image_tag }} - register: pull_result - changed_when: "'Downloaded newer image' in pull_result.stdout" - when: openshift.common.is_containerized | bool - -- name: Pre-pull openvswitch image - command: > - docker pull {{ openshift.node.ovs_image }}:{{ openshift_image_tag }} - register: pull_result - changed_when: "'Downloaded newer image' in pull_result.stdout" - when: - - openshift.common.is_containerized | bool - - openshift_use_openshift_sdn | bool - -- include_tasks: docker/upgrade.yml - vars: - # We will restart Docker ourselves after everything is ready: - skip_docker_restart: True +# Ensure actually install latest package. +- name: download docker upgrade rpm + command: "{{ ansible_pkg_mgr }} install -C -y docker{{ '-' + docker_version }}" + register: result + until: result is succeeded when: - l_docker_upgrade is defined - l_docker_upgrade | bool -- include_tasks: "{{ node_config_hook }}" - when: node_config_hook is defined - -- include_tasks: upgrade/rpm_upgrade.yml +- name: install pre-pulled rpms. + include_tasks: upgrade/rpm_upgrade_install.yml vars: - component: "node" openshift_version: "{{ openshift_pkg_version | default('') }}" - when: not openshift.common.is_containerized | bool - -- name: Remove obsolete docker-sdn-ovs.conf - file: - path: "/etc/systemd/system/docker.service.d/docker-sdn-ovs.conf" - state: absent - -- include_tasks: upgrade/containerized_node_upgrade.yml - when: openshift.common.is_containerized | bool - -- name: Ensure containerized services stopped before Docker restart - service: - name: "{{ item }}" - state: stopped - with_items: - - etcd_container - - openvswitch - - "{{ openshift_service_type }}-master-api" - - "{{ openshift_service_type }}-master-controllers" - - "{{ openshift_service_type }}-node" - failed_when: false - when: openshift.common.is_containerized | bool - -- name: Stop rpm based services - service: - name: "{{ item }}" - state: stopped - with_items: - - "{{ openshift_service_type }}-node" - - openvswitch - failed_when: false - when: not openshift.common.is_containerized | bool + when: not openshift_is_containerized | bool -# https://bugzilla.redhat.com/show_bug.cgi?id=1513054 -- name: Clean up dockershim data - file: - path: "/var/lib/dockershim/sandbox/" - state: absent -- name: Upgrade openvswitch - package: - name: openvswitch - state: latest - when: not openshift.common.is_containerized | bool - register: result - until: result | success - -- name: Update oreg value - yedit: - src: "{{ openshift.common.config_base }}/node/node-config.yaml" - key: 'imageConfig.format' - value: "{{ oreg_url | default(oreg_url_node) }}" - when: oreg_url is defined or oreg_url_node is defined - -# https://docs.openshift.com/container-platform/3.4/admin_guide/overcommit.html#disabling-swap-memory -- name: Check for swap usage - command: grep "^[^#].*swap" /etc/fstab - # grep: match any lines which don't begin with '#' and contain 'swap' - changed_when: false - failed_when: false - register: swap_result - - # Disable Swap Block -- block: - - - name: Disable swap - command: swapoff --all - - - name: Remove swap entries from /etc/fstab - replace: - dest: /etc/fstab - regexp: '(^[^#].*swap.*)' - replace: '# \1' - backup: yes - - - name: Add notice about disabling swap - lineinfile: - dest: /etc/fstab - line: '# OpenShift-Ansible Installer disabled swap per overcommit guidelines' - state: present - - when: - - swap_result.stdout_lines | length > 0 - - openshift_disable_swap | default(true) | bool - # End Disable Swap Block - -- name: Reset selinux context - command: restorecon -RF {{ openshift_node_data_dir }}/openshift.local.volumes - when: - - ansible_selinux is defined - - ansible_selinux.status == 'enabled' +- include_tasks: "{{ node_config_hook }}" + when: node_config_hook is defined -- name: Apply 3.6 dns config changes - yedit: - src: /etc/origin/node/node-config.yaml - key: "{{ item.key }}" - value: "{{ item.value }}" - with_items: - - key: "dnsBindAddress" - value: "127.0.0.1:53" - - key: "dnsRecursiveResolvConf" - value: "/etc/origin/node/resolv.conf" +- include_tasks: upgrade/config_changes.yml # Restart all services - include_tasks: upgrade/restart.yml @@ -181,4 +48,7 @@ retries: 24 delay: 5 +- include_tasks: dnsmasq_install.yml - include_tasks: dnsmasq.yml + +- meta: flush_handlers diff --git a/roles/openshift_node/tasks/upgrade/config_changes.yml b/roles/openshift_node/tasks/upgrade/config_changes.yml new file mode 100644 index 000000000..15ac76f7d --- /dev/null +++ b/roles/openshift_node/tasks/upgrade/config_changes.yml @@ -0,0 +1,67 @@ +--- +- name: Update systemd units + include_tasks: ../systemd_units.yml + when: openshift_is_containerized | bool + +- name: Update oreg value + yedit: + src: "{{ openshift.common.config_base }}/node/node-config.yaml" + key: 'imageConfig.format' + value: "{{ oreg_url | default(oreg_url_node) }}" + when: oreg_url is defined or oreg_url_node is defined + +- name: Remove obsolete docker-sdn-ovs.conf + file: + path: "/etc/systemd/system/docker.service.d/docker-sdn-ovs.conf" + state: absent + +# https://bugzilla.redhat.com/show_bug.cgi?id=1513054 +- name: Clean up dockershim data + file: + path: "/var/lib/dockershim/sandbox/" + state: absent + +# https://bugzilla.redhat.com/show_bug.cgi?id=1518912 +- name: Clean up IPAM data + file: + path: "/var/lib/cni/networks/openshift-sdn/" + state: absent + +# https://docs.openshift.com/container-platform/3.4/admin_guide/overcommit.html#disabling-swap-memory +# swapoff is a custom module in lib_utils that comments out swap entries in +# /etc/fstab and runs swapoff -a, if necessary. +- name: Disable swap + swapoff: {} + when: openshift_disable_swap | default(true) | bool + +- name: Apply 3.6 dns config changes + yedit: + src: /etc/origin/node/node-config.yaml + key: "{{ item.key }}" + value: "{{ item.value }}" + with_items: + - key: "dnsBindAddress" + value: "127.0.0.1:53" + - key: "dnsRecursiveResolvConf" + value: "/etc/origin/node/resolv.conf" + +- name: Install Node service file + template: + dest: "/etc/systemd/system/{{ openshift_service_type }}-node.service" + src: "node.service.j2" + register: l_node_unit + when: not openshift_is_containerized | bool + +- name: Reset selinux context + command: restorecon -RF {{ openshift_node_data_dir }}/openshift.local.volumes + when: + - ansible_selinux is defined + - ansible_selinux.status == 'enabled' + +# NOTE: This is needed to make sure we are using the correct set +# of systemd unit files. The RPMs lay down defaults but +# the install/upgrade may override them in /etc/systemd/system/. +# NOTE: We don't use the systemd module as some versions of the module +# require a service to be part of the call. +- name: Reload systemd units + command: systemctl daemon-reload diff --git a/roles/openshift_node/tasks/upgrade/containerized_node_upgrade.yml b/roles/openshift_node/tasks/upgrade/containerized_node_upgrade.yml deleted file mode 100644 index 245de60a7..000000000 --- a/roles/openshift_node/tasks/upgrade/containerized_node_upgrade.yml +++ /dev/null @@ -1,14 +0,0 @@ ---- -# This is a hack to allow us to use systemd_units.yml, but skip the handlers which -# restart services. We will unconditionally restart all containerized services -# because we have to unconditionally restart Docker: -- set_fact: - skip_node_svc_handlers: True - -- name: Update systemd units - include_tasks: ../systemd_units.yml - -# This is a no-op because of skip_node_svc_handlers, but lets us trigger it before end of -# play when the node has already been marked schedulable again. (this would look strange -# in logs otherwise) -- meta: flush_handlers diff --git a/roles/openshift_node/tasks/upgrade/containerized_upgrade_pull.yml b/roles/openshift_node/tasks/upgrade/containerized_upgrade_pull.yml new file mode 100644 index 000000000..e5477f389 --- /dev/null +++ b/roles/openshift_node/tasks/upgrade/containerized_upgrade_pull.yml @@ -0,0 +1,15 @@ +--- +- name: Pre-pull node image + command: > + docker pull {{ osn_image }}:{{ openshift_image_tag }} + register: pull_result + changed_when: "'Downloaded newer image' in pull_result.stdout" + +- name: Pre-pull openvswitch image + command: > + docker pull {{ osn_ovs_image }}:{{ openshift_image_tag }} + register: pull_result + changed_when: "'Downloaded newer image' in pull_result.stdout" + when: openshift_node_use_openshift_sdn | bool + +- include_tasks: ../container_images.yml diff --git a/roles/openshift_node/tasks/upgrade/restart.yml b/roles/openshift_node/tasks/upgrade/restart.yml index 65c301783..bd6f42182 100644 --- a/roles/openshift_node/tasks/upgrade/restart.yml +++ b/roles/openshift_node/tasks/upgrade/restart.yml @@ -1,7 +1,7 @@ --- # input variables: # - openshift_service_type -# - openshift.common.is_containerized +# - openshift_is_containerized # - openshift.common.hostname # - openshift.master.api_port @@ -13,12 +13,21 @@ - name: Reload systemd to ensure latest unit files command: systemctl daemon-reload +- name: Restart support services + service: + name: "{{ item }}" + state: restarted + enabled: True + with_items: + - NetworkManager + - dnsmasq + - name: Restart container runtime service: name: "{{ openshift_docker_service_name }}" state: started register: docker_start_result - until: not docker_start_result | failed + until: not (docker_start_result is failed) retries: 3 delay: 30 diff --git a/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml b/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml index 120b93bc3..d4b47bb9e 100644 --- a/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml +++ b/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml @@ -3,31 +3,22 @@ # - openshift_service_type # - component # - openshift_pkg_version -# - openshift.common.is_atomic +# - openshift_is_atomic -# We verified latest rpm available is suitable, so just yum update. -- name: Upgrade packages - package: "name={{ openshift_service_type }}-{{ component }}{{ openshift_pkg_version }} state=present" +# Pre-pull new node rpm, but don't install +- name: download new node packages + command: "{{ ansible_pkg_mgr }} install -y --downloadonly {{ openshift_node_upgrade_rpm_list | join(' ')}}" register: result - until: result | success + until: result is succeeded + vars: + openshift_node_upgrade_rpm_list: + - "{{ openshift_service_type }}-node{{ openshift_pkg_version | default('') }}" + - "PyYAML" + - "dnsmasq" -- name: Ensure python-yaml present for config upgrade - package: name=PyYAML state=present - when: not openshift.common.is_atomic | bool +# Pre-pull the rpms for openvswitch, but don't install +# openvswitch requires the latest version to be installed. +- name: download openvswitch upgrade rpm + command: "{{ ansible_pkg_mgr }} update -y --downloadonly openvswitch" register: result - until: result | success - -- name: Install Node service file - template: - dest: "/etc/systemd/system/{{ openshift_service_type }}-node.service" - src: "node.service.j2" - register: l_node_unit - -# NOTE: This is needed to make sure we are using the correct set -# of systemd unit files. The RPMs lay down defaults but -# the install/upgrade may override them in /etc/systemd/system/. -# NOTE: We don't use the systemd module as some versions of the module -# require a service to be part of the call. -- name: Reload systemd units - command: systemctl daemon-reload - when: l_node_unit | changed + until: result is succeeded diff --git a/roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml b/roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml new file mode 100644 index 000000000..ef5d8d662 --- /dev/null +++ b/roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml @@ -0,0 +1,19 @@ +--- +# input variables: +# - openshift_service_type +# - component +# - openshift_pkg_version +# - openshift_is_atomic + +# Install the pre-pulled RPM +# Note: dnsmasq is covered in it's own play. openvswitch is included here +# because once we have the latest rpm downloaded, it will happily be installed. +- name: download new node packages + command: "{{ ansible_pkg_mgr }} install -C -y {{ openshift_node_upgrade_rpm_list | join(' ')}}" + register: result + until: result is succeeded + vars: + openshift_node_upgrade_rpm_list: + - "{{ openshift_service_type }}-node{{ openshift_pkg_version | default('') }}" + - "PyYAML" + - "openvswitch" diff --git a/roles/openshift_node/tasks/upgrade/stop_services.yml b/roles/openshift_node/tasks/upgrade/stop_services.yml new file mode 100644 index 000000000..6d92516c3 --- /dev/null +++ b/roles/openshift_node/tasks/upgrade/stop_services.yml @@ -0,0 +1,43 @@ +--- +- name: Stop node and openvswitch services + service: + name: "{{ item }}" + state: stopped + with_items: + - "{{ openshift_service_type }}-node" + - openvswitch + failed_when: false + +- name: Ensure containerized services stopped before Docker restart + service: + name: "{{ item }}" + state: stopped + with_items: + - etcd_container + - openvswitch + - "{{ openshift_service_type }}-master-api" + - "{{ openshift_service_type }}-master-controllers" + - "{{ openshift_service_type }}-node" + failed_when: false + when: openshift_is_containerized | bool + +- service: + name: docker + state: stopped + register: l_openshift_node_upgrade_docker_stop_result + until: not (l_openshift_node_upgrade_docker_stop_result is failed) + retries: 3 + delay: 30 + when: + - l_docker_upgrade is defined + - l_docker_upgrade | bool + +- name: Stop rpm based services + service: + name: "{{ item }}" + state: stopped + with_items: + - "{{ openshift_service_type }}-node" + - openvswitch + failed_when: false + when: not openshift_is_containerized | bool diff --git a/roles/openshift_node/tasks/upgrade_pre.yml b/roles/openshift_node/tasks/upgrade_pre.yml new file mode 100644 index 000000000..aa1a75100 --- /dev/null +++ b/roles/openshift_node/tasks/upgrade_pre.yml @@ -0,0 +1,43 @@ +--- +# This is a hack to allow us to update various components without restarting +# services. This will persist into the upgrade play as well, so everything +# needs to be restarted by hand. +- set_fact: + skip_node_svc_handlers: True + +- include_tasks: registry_auth.yml + +- name: update package meta data to speed install later. + command: "{{ ansible_pkg_mgr }} makecache" + register: result + until: result is succeeded + when: not openshift_is_containerized | bool + +- name: Check Docker image count + shell: "docker images -aq | wc -l" + register: docker_image_count + when: + - l_docker_upgrade is defined + - l_docker_upgrade | bool + +- debug: var=docker_image_count.stdout + when: + - l_docker_upgrade is defined + - l_docker_upgrade | bool + +- include_tasks: upgrade/containerized_upgrade_pull.yml + when: openshift_is_containerized | bool + +# Prepull the rpms for docker upgrade, but don't install +- name: download docker upgrade rpm + command: "{{ ansible_pkg_mgr }} install -y --downloadonly docker{{ '-' + docker_version }}" + register: result + until: result is succeeded + when: + - l_docker_upgrade is defined + - l_docker_upgrade | bool + +- include_tasks: upgrade/rpm_upgrade.yml + vars: + openshift_version: "{{ openshift_pkg_version | default('') }}" + when: not openshift_is_containerized | bool diff --git a/roles/openshift_node/templates/node.service.j2 b/roles/openshift_node/templates/node.service.j2 index da751bd65..7405cfd73 100644 --- a/roles/openshift_node/templates/node.service.j2 +++ b/roles/openshift_node/templates/node.service.j2 @@ -6,9 +6,9 @@ After=ovsdb-server.service After=ovs-vswitchd.service Wants={{ openshift_docker_service_name }}.service Documentation=https://github.com/openshift/origin -Requires=dnsmasq.service +Wants=dnsmasq.service After=dnsmasq.service -{% if openshift_use_crio %}Wants=cri-o.service{% endif %} +{% if openshift_use_crio | bool %}Wants=cri-o.service{% endif %} [Service] Type=notify diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2 index 16fdde02e..7d817463c 100644 --- a/roles/openshift_node/templates/node.yaml.v1.j2 +++ b/roles/openshift_node/templates/node.yaml.v1.j2 @@ -5,24 +5,22 @@ dnsBindAddress: 127.0.0.1:53 dnsRecursiveResolvConf: /etc/origin/node/resolv.conf {% endif %} dnsDomain: {{ openshift.common.dns_domain }} -{% if 'dns_ip' in openshift.node %} -dnsIP: {{ openshift.node.dns_ip }} -{% endif %} +dnsIP: {{ openshift_dns_ip }} dockerConfig: execHandlerName: "" -iptablesSyncPeriod: "{{ openshift.node.iptables_sync_period }}" +iptablesSyncPeriod: "{{ openshift_node_iptables_sync_period }}" imageConfig: - format: {{ openshift.node.registry_url }} + format: {{ oreg_url_node }} latest: {{ openshift_node_image_config_latest }} kind: NodeConfig -kubeletArguments: {{ openshift.node.kubelet_args | default(None) | to_padded_yaml(level=1) }} -{% if openshift_use_crio %} +kubeletArguments: {{ l2_openshift_node_kubelet_args | default(None) | lib_utils_to_padded_yaml(level=1) }} +{% if openshift_use_crio | bool %} container-runtime: - remote container-runtime-endpoint: - - /var/run/crio.sock + - /var/run/crio/crio.sock image-service-endpoint: - - /var/run/crio.sock + - /var/run/crio/crio.sock node-labels: - router=true - registry=true @@ -34,7 +32,7 @@ masterClientConnectionOverrides: contentType: application/vnd.kubernetes.protobuf burst: 200 qps: 100 -masterKubeConfig: system:node:{{ openshift.common.hostname }}.kubeconfig +masterKubeConfig: system:node:{{ openshift.common.hostname | lower }}.kubeconfig {% if openshift_node_use_openshift_sdn | bool %} networkPluginName: {{ openshift_node_sdn_network_plugin_name }} {% endif %} @@ -45,7 +43,7 @@ networkConfig: {% if openshift_node_use_openshift_sdn | bool or openshift_node_use_nuage | bool or openshift_node_use_contiv | bool or openshift_node_use_kuryr | bool or openshift_node_sdn_network_plugin_name == 'cni' %} networkPluginName: {{ openshift_node_sdn_network_plugin_name }} {% endif %} -{% if openshift.node.set_node_ip | bool %} +{% if openshift_set_node_ip | bool %} nodeIP: {{ openshift.common.ip }} {% endif %} nodeName: {{ openshift.node.nodename }} @@ -68,8 +66,8 @@ volumeDirectory: {{ openshift_node_data_dir }}/openshift.local.volumes {% if not (openshift_node_use_kuryr | default(False)) | bool %} proxyArguments: proxy-mode: - - {{ openshift.node.proxy_mode }} + - {{ openshift_node_proxy_mode }} {% endif %} volumeConfig: localQuota: - perFSGroup: {{ openshift.node.local_quota_per_fsgroup }} + perFSGroup: {{ openshift_node_local_quota_per_fsgroup }} diff --git a/roles/openshift_node/templates/openshift.docker.node.dep.service b/roles/openshift_node/templates/openshift.docker.node.dep.service index 8b43beb07..9fe779057 100644 --- a/roles/openshift_node/templates/openshift.docker.node.dep.service +++ b/roles/openshift_node/templates/openshift.docker.node.dep.service @@ -3,9 +3,15 @@ Requires={{ openshift_docker_service_name }}.service After={{ openshift_docker_service_name }}.service PartOf={{ openshift_service_type }}-node.service Before={{ openshift_service_type }}-node.service -{% if openshift_use_crio %}Wants=cri-o.service{% endif %} +{% if openshift_use_crio | bool %}Wants=cri-o.service{% endif %} [Service] -ExecStart=/bin/bash -c "if [[ -f /usr/bin/docker-current ]]; then echo \"DOCKER_ADDTL_BIND_MOUNTS=--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro --volume=/etc/containers/registries:/etc/containers/registries:ro\" > /etc/sysconfig/{{ openshift_service_type }}-node-dep; else echo \"#DOCKER_ADDTL_BIND_MOUNTS=\" > /etc/sysconfig/{{ openshift_service_type }}-node-dep; fi" +ExecStart=/bin/bash -c 'if [[ -f /usr/bin/docker-current ]]; \ + then echo DOCKER_ADDTL_BIND_MOUNTS=\"--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro \ + --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro \ + --volume=/etc/containers/registries:/etc/containers/registries:ro \ + {% if l_bind_docker_reg_auth %} --volume={{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\" > \ + /etc/sysconfig/{{ openshift_service_type }}-node-dep; \ + else echo "#DOCKER_ADDTL_BIND_MOUNTS=" > /etc/sysconfig/{{ openshift_service_type }}-node-dep; fi' ExecStop= SyslogIdentifier={{ openshift_service_type }}-node-dep diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service index b174c7023..23823e3e5 100644 --- a/roles/openshift_node/templates/openshift.docker.node.service +++ b/roles/openshift_node/templates/openshift.docker.node.service @@ -13,7 +13,7 @@ After=ovs-vswitchd.service Wants={{ openshift_service_type }}-master.service Requires={{ openshift_service_type }}-node-dep.service After={{ openshift_service_type }}-node-dep.service -Requires=dnsmasq.service +Wants=dnsmasq.service After=dnsmasq.service [Service] @@ -38,7 +38,7 @@ ExecStart=/usr/bin/docker run --name {{ openshift_service_type }}-node \ {% if openshift_use_nuage | default(false) -%} $NUAGE_ADDTL_BIND_MOUNTS {% endif -%} \ -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS -v /etc/pki:/etc/pki:ro \ {% if l_bind_docker_reg_auth %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\ - {{ openshift.node.node_image }}:${IMAGE_VERSION} + {{ osn_image }}:${IMAGE_VERSION} ExecStartPost=/usr/bin/sleep 10 ExecStop=/usr/bin/docker stop {{ openshift_service_type }}-node ExecStopPost=/usr/bin/rm /etc/dnsmasq.d/node-dnsmasq.conf diff --git a/roles/openshift_node/templates/openvswitch.docker.service b/roles/openshift_node/templates/openvswitch.docker.service index 37f091c76..1fc9b6e72 100644 --- a/roles/openshift_node/templates/openvswitch.docker.service +++ b/roles/openshift_node/templates/openvswitch.docker.service @@ -6,7 +6,7 @@ PartOf={{ openshift_docker_service_name }}.service [Service] EnvironmentFile=/etc/sysconfig/openvswitch ExecStartPre=-/usr/bin/docker rm -f openvswitch -ExecStart=/usr/bin/docker run --name openvswitch --rm --privileged --net=host --pid=host -v /lib/modules:/lib/modules -v /run:/run -v /sys:/sys:ro -v /etc/origin/openvswitch:/etc/openvswitch {{ openshift.node.ovs_image }}:${IMAGE_VERSION} +ExecStart=/usr/bin/docker run --name openvswitch --rm --privileged --net=host --pid=host -v /lib/modules:/lib/modules -v /run:/run -v /sys:/sys:ro -v /etc/origin/openvswitch:/etc/openvswitch {{ osn_ovs_image }}:${IMAGE_VERSION} ExecStartPost=/usr/bin/sleep 5 ExecStop=/usr/bin/docker stop openvswitch SyslogIdentifier=openvswitch diff --git a/roles/openshift_node_certificates/defaults/main.yml b/roles/openshift_node_certificates/defaults/main.yml index b42b75be9..da1570528 100644 --- a/roles/openshift_node_certificates/defaults/main.yml +++ b/roles/openshift_node_certificates/defaults/main.yml @@ -2,4 +2,4 @@ openshift_node_cert_expire_days: 730 openshift_ca_host: '' -openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}" +openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False) | bool) else 'docker' }}" diff --git a/roles/openshift_node_certificates/handlers/main.yml b/roles/openshift_node_certificates/handlers/main.yml index 0686ac101..3531e30b8 100644 --- a/roles/openshift_node_certificates/handlers/main.yml +++ b/roles/openshift_node_certificates/handlers/main.yml @@ -22,6 +22,6 @@ state: restarted when: not openshift_certificates_redeploy | default(false) | bool register: l_docker_restart_docker_in_cert_result - until: not l_docker_restart_docker_in_cert_result | failed + until: not (l_docker_restart_docker_in_cert_result is failed) retries: 3 delay: 30 diff --git a/roles/openshift_node_certificates/meta/main.yml b/roles/openshift_node_certificates/meta/main.yml index 0440bf11a..4362c644a 100644 --- a/roles/openshift_node_certificates/meta/main.yml +++ b/roles/openshift_node_certificates/meta/main.yml @@ -12,4 +12,5 @@ galaxy_info: categories: - cloud - system -dependencies: [] +dependencies: +- role: lib_utils diff --git a/roles/openshift_node_certificates/tasks/main.yml b/roles/openshift_node_certificates/tasks/main.yml index 97f1fbbdd..13d9fd718 100644 --- a/roles/openshift_node_certificates/tasks/main.yml +++ b/roles/openshift_node_certificates/tasks/main.yml @@ -18,9 +18,9 @@ stat: path: "{{ openshift.common.config_base }}/node/{{ item }}" with_items: - - "system:node:{{ openshift.common.hostname }}.crt" - - "system:node:{{ openshift.common.hostname }}.key" - - "system:node:{{ openshift.common.hostname }}.kubeconfig" + - "system:node:{{ openshift.common.hostname | lower }}.crt" + - "system:node:{{ openshift.common.hostname | lower }}.key" + - "system:node:{{ openshift.common.hostname | lower }}.kubeconfig" - ca.crt - server.key - server.crt @@ -31,7 +31,7 @@ node_certs_missing: "{{ true if openshift_certificates_redeploy | default(false) | bool else (False in (g_node_cert_stat_result.results | default({}) - | oo_collect(attribute='stat.exists') + | lib_utils_oo_collect(attribute='stat.exists') | list)) }}" - name: Create openshift_generated_configs_dir if it does not exist @@ -51,56 +51,49 @@ - name: Generate the node client config command: > - {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm create-api-client-config - {% for named_ca_certificate in hostvars[openshift_ca_host].openshift.master.named_certificates | default([]) | oo_collect('cafile') %} + {{ hostvars[openshift_ca_host]['first_master_client_binary'] }} adm create-api-client-config + {% for named_ca_certificate in hostvars[openshift_ca_host].openshift.master.named_certificates | default([]) | lib_utils_oo_collect('cafile') %} --certificate-authority {{ named_ca_certificate }} {% endfor %} - {% for legacy_ca_certificate in g_master_legacy_ca_result.files | default([]) | oo_collect('path') %} + {% for legacy_ca_certificate in g_master_legacy_ca_result.files | default([]) | lib_utils_oo_collect('path') %} --certificate-authority {{ legacy_ca_certificate }} {% endfor %} --certificate-authority={{ openshift_ca_cert }} - --client-dir={{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }} + --client-dir={{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname | lower }} --groups=system:nodes --master={{ hostvars[openshift_ca_host].openshift.master.api_url }} --signer-cert={{ openshift_ca_cert }} --signer-key={{ openshift_ca_key }} --signer-serial={{ openshift_ca_serial }} - --user=system:node:{{ hostvars[item].openshift.common.hostname }} + --user=system:node:{{ hostvars[item].openshift.common.hostname | lower }} --expire-days={{ openshift_node_cert_expire_days }} args: - creates: "{{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }}" + creates: "{{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname | lower }}" with_items: "{{ hostvars - | oo_select_keys(groups['oo_nodes_to_config']) - | oo_collect(attribute='inventory_hostname', filters={'node_certs_missing':True}) }}" + | lib_utils_oo_select_keys(groups['oo_nodes_to_config']) + | lib_utils_oo_collect(attribute='inventory_hostname', filters={'node_certs_missing':True}) }}" delegate_to: "{{ openshift_ca_host }}" run_once: true - name: Generate the node server certificate command: > - {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm ca create-server-cert - --cert={{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }}/server.crt - --key={{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }}/server.key + {{ hostvars[openshift_ca_host]['first_master_client_binary'] }} adm ca create-server-cert + --cert={{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname | lower }}/server.crt + --key={{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname | lower }}/server.key --expire-days={{ openshift_node_cert_expire_days }} --overwrite=true - --hostnames={{ hostvars[item].openshift.common.hostname }},{{ hostvars[item].openshift.common.public_hostname }},{{ hostvars[item].openshift.common.ip }},{{ hostvars[item].openshift.common.public_ip }} + --hostnames={{ hostvars[item].openshift.common.hostname }},{{ hostvars[item].openshift.common.hostname | lower }},{{ hostvars[item].openshift.common.public_hostname }},{{ hostvars[item].openshift.common.public_hostname | lower }},{{ hostvars[item].openshift.common.ip }},{{ hostvars[item].openshift.common.public_ip }} --signer-cert={{ openshift_ca_cert }} --signer-key={{ openshift_ca_key }} --signer-serial={{ openshift_ca_serial }} args: - creates: "{{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }}/server.crt" + creates: "{{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname | lower }}/server.crt" with_items: "{{ hostvars - | oo_select_keys(groups['oo_nodes_to_config']) - | oo_collect(attribute='inventory_hostname', filters={'node_certs_missing':True}) }}" + | lib_utils_oo_select_keys(groups['oo_nodes_to_config']) + | lib_utils_oo_collect(attribute='inventory_hostname', filters={'node_certs_missing':True}) }}" delegate_to: "{{ openshift_ca_host }}" run_once: true -- name: Create local temp directory for syncing certs - local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX - register: node_cert_mktemp - changed_when: False - when: node_certs_missing | bool - become: no - - name: Create a tarball of the node config directories command: > tar -czvf {{ openshift_node_generated_config_dir }}.tgz @@ -117,8 +110,7 @@ - name: Retrieve the node config tarballs from the master fetch: src: "{{ openshift_node_generated_config_dir }}.tgz" - dest: "{{ node_cert_mktemp.stdout }}/" - flat: yes + dest: "/tmp" fail_on_missing: yes validate_checksum: yes when: node_certs_missing | bool @@ -132,15 +124,14 @@ - name: Unarchive the tarball on the node unarchive: - src: "{{ node_cert_mktemp.stdout }}/{{ openshift_node_cert_subdir }}.tgz" + src: "/tmp/{{ inventory_hostname }}/{{ openshift_node_generated_config_dir }}.tgz" dest: "{{ openshift_node_cert_dir }}" when: node_certs_missing | bool - name: Delete local temp directory - local_action: file path="{{ node_cert_mktemp.stdout }}" state=absent + local_action: file path="/tmp/{{ inventory_hostname }}" state=absent changed_when: False when: node_certs_missing | bool - become: no - name: Copy OpenShift CA to system CA trust copy: diff --git a/roles/openshift_node_certificates/vars/main.yml b/roles/openshift_node_certificates/vars/main.yml index 17ad8106d..12a6d3f94 100644 --- a/roles/openshift_node_certificates/vars/main.yml +++ b/roles/openshift_node_certificates/vars/main.yml @@ -1,7 +1,7 @@ --- openshift_generated_configs_dir: "{{ openshift.common.config_base }}/generated-configs" openshift_node_cert_dir: "{{ openshift.common.config_base }}/node" -openshift_node_cert_subdir: "node-{{ openshift.common.hostname }}" +openshift_node_cert_subdir: "node-{{ openshift.common.hostname | lower }}" openshift_node_config_dir: "{{ openshift.common.config_base }}/node" openshift_node_generated_config_dir: "{{ openshift_generated_configs_dir }}/{{ openshift_node_cert_subdir }}" diff --git a/roles/openshift_node_facts/filter_plugins/openshift_node_facts_filters.py b/roles/openshift_node_facts/filter_plugins/openshift_node_facts_filters.py deleted file mode 100644 index 69069f2dc..000000000 --- a/roles/openshift_node_facts/filter_plugins/openshift_node_facts_filters.py +++ /dev/null @@ -1,32 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -''' -Custom filters for use in openshift-node -''' -from ansible import errors - - -class FilterModule(object): - ''' Custom ansible filters for use by openshift_node_facts role''' - - @staticmethod - def node_get_dns_ip(openshift_dns_ip, hostvars): - ''' Navigates the complicated logic of when to set dnsIP - - In all situations if they've set openshift_dns_ip use that - For 1.0/3.0 installs we use the openshift_master_cluster_vip, openshift_node_first_master_ip, else None - For 1.1/3.1 installs we use openshift_master_cluster_vip, else None (product will use kube svc ip) - For 1.2/3.2+ installs we set to the node's default interface ip - ''' - - if not issubclass(type(hostvars), dict): - raise errors.AnsibleFilterError("|failed expects hostvars is a dict") - - # We always use what they've specified if they've specified a value - if openshift_dns_ip is not None: - return openshift_dns_ip - return hostvars['ansible_default_ipv4']['address'] - - def filters(self): - ''' returns a mapping of filters to methods ''' - return {'node_get_dns_ip': self.node_get_dns_ip} diff --git a/roles/openshift_node_facts/tasks/main.yml b/roles/openshift_node_facts/tasks/main.yml deleted file mode 100644 index c234a3000..000000000 --- a/roles/openshift_node_facts/tasks/main.yml +++ /dev/null @@ -1,25 +0,0 @@ ---- -- name: Set node facts - openshift_facts: - role: "{{ item.role }}" - local_facts: "{{ item.local_facts }}" - with_items: - # Reset node labels to an empty dictionary. - - role: node - local_facts: - labels: {} - - role: node - local_facts: - annotations: "{{ openshift_node_annotations | default(none) }}" - iptables_sync_period: "{{ openshift_node_iptables_sync_period | default(None) }}" - kubelet_args: "{{ openshift_node_kubelet_args | default(None) }}" - labels: "{{ openshift_node_labels | default(None) }}" - registry_url: "{{ oreg_url_node | default(oreg_url) | default(None) }}" - storage_plugin_deps: "{{ osn_storage_plugin_deps | default(None) }}" - set_node_ip: "{{ openshift_set_node_ip | default(None) }}" - node_image: "{{ osn_image | default(None) }}" - ovs_image: "{{ osn_ovs_image | default(None) }}" - proxy_mode: "{{ openshift_node_proxy_mode | default('iptables') }}" - local_quota_per_fsgroup: "{{ openshift_node_local_quota_per_fsgroup | default(None) }}" - dns_ip: "{{ openshift_dns_ip | default(none) | node_get_dns_ip(hostvars[inventory_hostname])}}" - env_vars: "{{ openshift_node_env_vars | default(None) }}" diff --git a/roles/openshift_node_group/defaults/main.yml b/roles/openshift_node_group/defaults/main.yml index 7c81409a5..cccdea66f 100644 --- a/roles/openshift_node_group/defaults/main.yml +++ b/roles/openshift_node_group/defaults/main.yml @@ -17,7 +17,13 @@ openshift_node_group_edits: [] openshift_node_group_namespace: openshift-node openshift_node_group_labels: [] -openshift_imageconfig_format: "{{ oreg_url if oreg_url is defined else openshift.node.registry_url }}" +openshift_oreg_url_default_dict: + origin: "openshift/origin-${component}:${version}" + openshift-enterprise: openshift3/ose-${component}:${version} +openshift_oreg_url_default: "{{ openshift_oreg_url_default_dict[openshift_deployment_type] }}" +oreg_url_node: "{{ oreg_url | default(openshift_oreg_url_default) }}" + +openshift_imageconfig_format: "{{ oreg_url_node }}" openshift_node_group_cloud_provider: "{{ openshift_cloudprovider_kind | default('aws') }}" openshift_node_group_network_plugin_default: "{{ os_sdn_network_plugin_name | default('redhat/openshift-ovs-subnet') }}" openshift_node_group_network_plugin: "{{ openshift_node_group_network_plugin_default }}" diff --git a/roles/openshift_openstack/defaults/main.yml b/roles/openshift_openstack/defaults/main.yml index 929b76f54..77be1f2b1 100644 --- a/roles/openshift_openstack/defaults/main.yml +++ b/roles/openshift_openstack/defaults/main.yml @@ -8,6 +8,7 @@ openshift_openstack_num_etcd: 0 openshift_openstack_num_masters: 1 openshift_openstack_num_nodes: 1 openshift_openstack_num_infra: 1 +openshift_openstack_num_cns: 0 openshift_openstack_dns_nameservers: [] openshift_openstack_nodes_to_remove: [] @@ -44,6 +45,9 @@ openshift_openstack_container_storage_setup: # populate-dns openshift_openstack_dns_records_add: [] +openshift_openstack_public_hostname_suffix: "" +openshift_openstack_private_hostname_suffix: "" +openshift_openstack_public_dns_domain: "example.com" openshift_openstack_full_dns_domain: "{{ (openshift_openstack_clusterid|trim == '') | ternary(openshift_openstack_public_dns_domain, openshift_openstack_clusterid + '.' + openshift_openstack_public_dns_domain) }}" openshift_openstack_app_subdomain: "apps" @@ -54,6 +58,7 @@ openshift_openstack_stack_name: "{{ openshift_openstack_clusterid }}.{{ openshif openshift_openstack_subnet_prefix: "192.168.99" openshift_openstack_master_hostname: master openshift_openstack_infra_hostname: infra-node +openshift_openstack_cns_hostname: cns openshift_openstack_node_hostname: app-node openshift_openstack_lb_hostname: lb openshift_openstack_etcd_hostname: etcd @@ -63,8 +68,10 @@ openshift_openstack_etcd_flavor: "{{ openshift_openstack_default_flavor }}" openshift_openstack_master_flavor: "{{ openshift_openstack_default_flavor }}" openshift_openstack_node_flavor: "{{ openshift_openstack_default_flavor }}" openshift_openstack_infra_flavor: "{{ openshift_openstack_default_flavor }}" +openshift_openstack_cns_flavor: "{{ openshift_openstack_default_flavor }}" openshift_openstack_master_image: "{{ openshift_openstack_default_image_name }}" openshift_openstack_infra_image: "{{ openshift_openstack_default_image_name }}" +openshift_openstack_cns_image: "{{ openshift_openstack_default_image_name }}" openshift_openstack_node_image: "{{ openshift_openstack_default_image_name }}" openshift_openstack_lb_image: "{{ openshift_openstack_default_image_name }}" openshift_openstack_etcd_image: "{{ openshift_openstack_default_image_name }}" @@ -81,6 +88,7 @@ openshift_openstack_infra_server_group_policies: [] openshift_openstack_docker_volume_size: 15 openshift_openstack_master_volume_size: "{{ openshift_openstack_docker_volume_size }}" openshift_openstack_infra_volume_size: "{{ openshift_openstack_docker_volume_size }}" +openshift_openstack_cns_volume_size: "{{ openshift_openstack_docker_volume_size }}" openshift_openstack_node_volume_size: "{{ openshift_openstack_docker_volume_size }}" openshift_openstack_etcd_volume_size: 2 openshift_openstack_lb_volume_size: 5 diff --git a/roles/openshift_openstack/tasks/check-prerequisites.yml b/roles/openshift_openstack/tasks/check-prerequisites.yml index 30996cc47..1e487d434 100644 --- a/roles/openshift_openstack/tasks/check-prerequisites.yml +++ b/roles/openshift_openstack/tasks/check-prerequisites.yml @@ -91,6 +91,7 @@ with_items: - "{{ openshift_openstack_master_image }}" - "{{ openshift_openstack_infra_image }}" + - "{{ openshift_openstack_cns_image }}" - "{{ openshift_openstack_node_image }}" - "{{ openshift_openstack_lb_image }}" - "{{ openshift_openstack_etcd_image }}" @@ -100,6 +101,7 @@ with_items: - "{{ openshift_openstack_master_flavor }}" - "{{ openshift_openstack_infra_flavor }}" + - "{{ openshift_openstack_cns_flavor }}" - "{{ openshift_openstack_node_flavor }}" - "{{ openshift_openstack_lb_flavor }}" - "{{ openshift_openstack_etcd_flavor }}" diff --git a/roles/openshift_openstack/tasks/container-storage-setup.yml b/roles/openshift_openstack/tasks/container-storage-setup.yml index 82307b208..be73d18be 100644 --- a/roles/openshift_openstack/tasks/container-storage-setup.yml +++ b/roles/openshift_openstack/tasks/container-storage-setup.yml @@ -8,7 +8,7 @@ group: root mode: 0644 when: - - ansible_distribution_version | version_compare('7.4', '>=') + - ansible_distribution_version is version_compare('7.4', '>=') - ansible_distribution == "RedHat" - block: @@ -20,7 +20,7 @@ group: root mode: 0644 when: - - ansible_distribution_version | version_compare('7.4', '<') + - ansible_distribution_version is version_compare('7.4', '<') - ansible_distribution == "RedHat" - block: diff --git a/roles/openshift_openstack/tasks/node-packages.yml b/roles/openshift_openstack/tasks/node-packages.yml index e41104af1..c95c9e607 100644 --- a/roles/openshift_openstack/tasks/node-packages.yml +++ b/roles/openshift_openstack/tasks/node-packages.yml @@ -7,7 +7,7 @@ state: latest with_items: "{{ openshift_openstack_required_packages }}" register: result - until: result | success + until: result is succeeded - name: Install debug packages (optional) yum: @@ -16,4 +16,4 @@ with_items: "{{ openshift_openstack_debug_packages }}" when: openshift_openstack_install_debug_packages|bool register: result - until: result | success + until: result is succeeded diff --git a/roles/openshift_openstack/tasks/populate-dns.yml b/roles/openshift_openstack/tasks/populate-dns.yml index eae4967f7..858dd9e57 100644 --- a/roles/openshift_openstack/tasks/populate-dns.yml +++ b/roles/openshift_openstack/tasks/populate-dns.yml @@ -1,7 +1,7 @@ --- - name: "Generate list of private A records" set_fact: - private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'], 'ip': hostvars[item]['private_v4'] } ] }}" + private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'] + openshift_openstack_private_hostname_suffix, 'ip': hostvars[item]['private_v4'] } ] }}" with_items: "{{ groups['cluster_hosts'] }}" - name: "Add wildcard records to the private A records for infrahosts" @@ -48,7 +48,7 @@ - name: "Generate list of public A records" set_fact: - public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'], 'ip': hostvars[item]['public_v4'] } ] }}" + public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'] + openshift_openstack_public_hostname_suffix, 'ip': hostvars[item]['public_v4'] } ] }}" with_items: "{{ groups['cluster_hosts'] }}" when: hostvars[item]['public_v4'] is defined @@ -116,6 +116,6 @@ - "{{ openshift_openstack_dns_records_add | default([]) }}" - entries register: nsupdate_add_result - until: nsupdate_add_result|succeeded + until: nsupdate_add_result is succeeded retries: 10 delay: 1 diff --git a/roles/openshift_openstack/templates/heat_stack.yaml.j2 b/roles/openshift_openstack/templates/heat_stack.yaml.j2 index 8d13eb81e..8e7c6288a 100644 --- a/roles/openshift_openstack/templates/heat_stack.yaml.j2 +++ b/roles/openshift_openstack/templates/heat_stack.yaml.j2 @@ -419,6 +419,46 @@ resources: port_range_min: 443 port_range_max: 443 + cns-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-cns-secgrp + params: + cluster_id: {{ openshift_openstack_stack_name }} + description: + str_replace: + template: Security group for cluster_id OpenShift cns cluster nodes + params: + cluster_id: {{ openshift_openstack_stack_name }} + rules: + # glusterfs_sshd + - direction: ingress + protocol: tcp + port_range_min: 2222 + port_range_max: 2222 + # heketi dialing backends + - direction: ingress + protocol: tcp + port_range_min: 10250 + port_range_max: 10250 + # glusterfs_management + - direction: ingress + protocol: tcp + port_range_min: 24007 + port_range_max: 24007 + # glusterfs_rdma + - direction: ingress + protocol: tcp + port_range_min: 24008 + port_range_max: 24008 + # glusterfs_bricks + - direction: ingress + protocol: tcp + port_range_min: 49152 + port_range_max: 49251 + {% if openshift_openstack_num_masters|int > 1 %} lb-secgrp: type: OS::Neutron::SecurityGroup @@ -483,7 +523,7 @@ resources: floating_network: if: - no_floating - - null + - '' - {{ openshift_openstack_external_network_name }} {% if openshift_openstack_provider_network_name %} attach_float_net: false @@ -549,8 +589,13 @@ resources: secgrp: - { get_resource: lb-secgrp } - { get_resource: common-secgrp } -{% if not openshift_openstack_provider_network_name %} - floating_network: {{ openshift_openstack_external_network_name }} + floating_network: + if: + - no_floating + - '' + - {{ openshift_openstack_external_network_name }} +{% if openshift_openstack_provider_network_name %} + attach_float_net: false {% endif %} volume_size: {{ openshift_openstack_lb_volume_size }} {% if not openshift_openstack_provider_network_name %} @@ -615,7 +660,7 @@ resources: floating_network: if: - no_floating - - null + - '' - {{ openshift_openstack_external_network_name }} {% if openshift_openstack_provider_network_name %} attach_float_net: false @@ -685,7 +730,7 @@ resources: floating_network: if: - no_floating - - null + - '' - {{ openshift_openstack_external_network_name }} {% if openshift_openstack_provider_network_name %} attach_float_net: false @@ -752,8 +797,13 @@ resources: {% endif %} - { get_resource: infra-secgrp } - { get_resource: common-secgrp } -{% if not openshift_openstack_provider_network_name %} - floating_network: {{ openshift_openstack_external_network_name }} + floating_network: + if: + - no_floating + - '' + - {{ openshift_openstack_external_network_name }} +{% if openshift_openstack_provider_network_name %} + attach_float_net: false {% endif %} volume_size: {{ openshift_openstack_infra_volume_size }} {% if openshift_openstack_infra_server_group_policies|length > 0 %} @@ -764,3 +814,58 @@ resources: depends_on: - interface {% endif %} + + cns: + type: OS::Heat::ResourceGroup + properties: + count: {{ openshift_openstack_num_cns }} + resource_def: + type: server.yaml + properties: + name: + str_replace: + template: sub_type_k8s_type-%index%.cluster_id + params: + cluster_id: {{ openshift_openstack_stack_name }} + sub_type_k8s_type: {{ openshift_openstack_cns_hostname }} + cluster_env: {{ openshift_openstack_public_dns_domain }} + cluster_id: {{ openshift_openstack_stack_name }} + group: + str_replace: + template: k8s_type.cluster_id + params: + k8s_type: cns + cluster_id: {{ openshift_openstack_stack_name }} + type: cns + image: {{ openshift_openstack_cns_image }} + flavor: {{ openshift_openstack_cns_flavor }} + key_name: {{ openshift_openstack_keypair_name }} +{% if openshift_openstack_provider_network_name %} + net: {{ openshift_openstack_provider_network_name }} + net_name: {{ openshift_openstack_provider_network_name }} +{% else %} + net: { get_resource: net } + subnet: { get_resource: subnet } + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: {{ openshift_openstack_stack_name }} +{% if openshift_use_flannel|default(False)|bool %} + attach_data_net: true + data_net: { get_resource: data_net } + data_subnet: { get_resource: data_subnet } +{% endif %} +{% endif %} + secgrp: +{% if openshift_openstack_flat_secgrp|default(False)|bool %} + - { get_resource: flat-secgrp } +{% else %} + - { get_resource: node-secgrp } +{% endif %} + - { get_resource: cns-secgrp } + - { get_resource: common-secgrp } +{% if not openshift_openstack_provider_network_name %} + floating_network: {{ openshift_openstack_external_network_name }} +{% endif %} + volume_size: {{ openshift_openstack_cns_volume_size }} diff --git a/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 b/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 index a829da34f..29b09f3c9 100644 --- a/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 +++ b/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 @@ -102,13 +102,11 @@ parameters: label: Attach-float-net description: A switch for floating network port connection -{% if not openshift_openstack_provider_network_name %} floating_network: type: string default: '' label: Floating network description: Network to allocate floating IP from -{% endif %} availability_zone: type: string @@ -212,6 +210,9 @@ resources: host-type: { get_param: type } sub-host-type: { get_param: subtype } node_labels: { get_param: node_labels } +{% if openshift_openstack_dns_nameservers %} + openshift_hostname: { get_param: name } +{% endif %} scheduler_hints: { get_param: scheduler_hints } {% if use_trunk_ports|default(false)|bool %} diff --git a/roles/openshift_persistent_volumes/meta/main.yml b/roles/openshift_persistent_volumes/meta/main.yml index 48b0699ab..aea7616bf 100644 --- a/roles/openshift_persistent_volumes/meta/main.yml +++ b/roles/openshift_persistent_volumes/meta/main.yml @@ -11,3 +11,4 @@ galaxy_info: - 7 dependencies: - role: openshift_facts +- role: lib_utils diff --git a/roles/openshift_persistent_volumes/tasks/main.yml b/roles/openshift_persistent_volumes/tasks/main.yml index 0b4dd7d1f..b1d9c8cca 100644 --- a/roles/openshift_persistent_volumes/tasks/main.yml +++ b/roles/openshift_persistent_volumes/tasks/main.yml @@ -26,7 +26,8 @@ when: openshift_hosted_registry_storage_glusterfs_swap | default(False) - name: create standard pv and pvc lists - # generate_pv_pvcs_list is a custom action module defined in ../action_plugins + # generate_pv_pvcs_list is a custom action module defined in + # roles/lib_utils/action_plugins/generate_pv_pvcs_list.py generate_pv_pvcs_list: {} register: l_pv_pvcs_list diff --git a/roles/openshift_persistent_volumes/tasks/pv.yml b/roles/openshift_persistent_volumes/tasks/pv.yml index 346605ff7..865269b7a 100644 --- a/roles/openshift_persistent_volumes/tasks/pv.yml +++ b/roles/openshift_persistent_volumes/tasks/pv.yml @@ -8,10 +8,10 @@ - name: Create PersistentVolumes command: > - {{ openshift.common.client_binary }} create + {{ openshift_client_binary }} create -f {{ mktemp.stdout }}/persistent-volumes.yml --config={{ mktemp.stdout }}/admin.kubeconfig register: pv_create_output when: persistent_volumes | length > 0 - failed_when: ('already exists' not in pv_create_output.stderr) and ('created' not in pv_create_output.stdout) + failed_when: "('already exists' not in pv_create_output.stderr) and ('created' not in pv_create_output.stdout) and pv_create_output.rc != 0" changed_when: ('created' in pv_create_output.stdout) diff --git a/roles/openshift_persistent_volumes/tasks/pvc.yml b/roles/openshift_persistent_volumes/tasks/pvc.yml index e44f9b18f..6c12d128c 100644 --- a/roles/openshift_persistent_volumes/tasks/pvc.yml +++ b/roles/openshift_persistent_volumes/tasks/pvc.yml @@ -8,10 +8,10 @@ - name: Create PersistentVolumeClaims command: > - {{ openshift.common.client_binary }} create + {{ openshift_client_binary }} create -f {{ mktemp.stdout }}/persistent-volume-claims.yml --config={{ mktemp.stdout }}/admin.kubeconfig register: pvc_create_output when: persistent_volume_claims | length > 0 - failed_when: ('already exists' not in pvc_create_output.stderr) and ('created' not in pvc_create_output.stdout) + failed_when: "('already exists' not in pvc_create_output.stderr) and ('created' not in pvc_create_output.stdout) and pvc_create_output.rc != 0" changed_when: ('created' in pvc_create_output.stdout) diff --git a/roles/openshift_persistent_volumes/templates/persistent-volume-claim.yml.j2 b/roles/openshift_persistent_volumes/templates/persistent-volume-claim.yml.j2 index d40417a9a..fac589a92 100644 --- a/roles/openshift_persistent_volumes/templates/persistent-volume-claim.yml.j2 +++ b/roles/openshift_persistent_volumes/templates/persistent-volume-claim.yml.j2 @@ -8,7 +8,7 @@ items: metadata: name: "{{ claim.name }}" spec: - accessModes: {{ claim.access_modes | to_padded_yaml(2, 2) }} + accessModes: {{ claim.access_modes | lib_utils_to_padded_yaml(2, 2) }} resources: requests: storage: "{{ claim.capacity }}" diff --git a/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2 b/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2 index 9ec14208b..354561432 100644 --- a/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2 +++ b/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2 @@ -16,6 +16,6 @@ items: spec: capacity: storage: "{{ volume.capacity }}" - accessModes: {{ volume.access_modes | to_padded_yaml(2, 2) }} - {{ (volume.storage.keys() | list)[0] }}: {{ volume.storage[(volume.storage.keys() | list)[0]] | to_padded_yaml(3, 2) }} + accessModes: {{ volume.access_modes | lib_utils_to_padded_yaml(2, 2) }} + {{ (volume.storage.keys() | list)[0] }}: {{ volume.storage[(volume.storage.keys() | list)[0]] | lib_utils_to_padded_yaml(3, 2) }} {% endfor %} diff --git a/roles/openshift_project_request_template/tasks/main.yml b/roles/openshift_project_request_template/tasks/main.yml index c31ee5795..3403840fb 100644 --- a/roles/openshift_project_request_template/tasks/main.yml +++ b/roles/openshift_project_request_template/tasks/main.yml @@ -6,7 +6,7 @@ - name: Generate default project template command: | - {{ openshift.common.client_binary | quote }} \ + {{ openshift_client_binary | quote }} \ --config {{ openshift.common.config_base | quote }}/master/admin.kubeconfig \ --output yaml \ adm create-bootstrap-project-template \ @@ -28,7 +28,7 @@ - name: Create or update project request template command: | - {{ openshift.common.client_binary }} \ + {{ openshift_client_binary }} \ --config {{ openshift.common.config_base }}/master/admin.kubeconfig \ --namespace {{ openshift_project_request_template_namespace | quote }} \ apply --filename {{ mktemp.stdout }} diff --git a/roles/openshift_prometheus/README.md b/roles/openshift_prometheus/README.md index f1eca1da6..6079e6016 100644 --- a/roles/openshift_prometheus/README.md +++ b/roles/openshift_prometheus/README.md @@ -31,18 +31,20 @@ For default values, see [`defaults/main.yaml`](defaults/main.yaml). e.g ``` -openshift_prometheus_args=['--storage.tsdb.retention=6h', '--storage.tsdb.min-block-duration=5s', '--storage.tsdb.max-block-duration=6m'] +openshift_prometheus_args=['--storage.tsdb.retention=6h', '--query.timeout=2m'] ``` ## PVC related variables Each prometheus component (prometheus, alertmanager, alertbuffer) can set pv claim by setting corresponding role variable: ``` openshift_prometheus_<COMPONENT>_storage_type: <VALUE> (pvc, emptydir) +openshift_prometheus_<COMPONENT>_storage_class: <VALUE> openshift_prometheus_<COMPONENT>_pvc_(name|size|access_modes|pv_selector): <VALUE> ``` e.g ``` openshift_prometheus_storage_type: pvc +openshift_prometheus_storage_class: glusterfs-storage openshift_prometheus_alertmanager_pvc_name: alertmanager openshift_prometheus_alertbuffer_pvc_size: 10G openshift_prometheus_pvc_access_modes: [ReadWriteOnce] diff --git a/roles/openshift_prometheus/defaults/main.yaml b/roles/openshift_prometheus/defaults/main.yaml index df331a4bb..37a05f3f0 100644 --- a/roles/openshift_prometheus/defaults/main.yaml +++ b/roles/openshift_prometheus/defaults/main.yaml @@ -7,14 +7,29 @@ openshift_prometheus_namespace: openshift-metrics # defaults hosts for routes openshift_prometheus_hostname: prometheus-{{openshift_prometheus_namespace}}.{{openshift_master_default_subdomain}} openshift_prometheus_alerts_hostname: alerts-{{openshift_prometheus_namespace}}.{{openshift_master_default_subdomain}} +openshift_prometheus_alertmanager_hostname: alertmanager-{{openshift_prometheus_namespace}}.{{openshift_master_default_subdomain}} + openshift_prometheus_node_selector: {"region":"infra"} +openshift_prometheus_service_port: 443 +openshift_prometheus_service_targetport: 8443 +openshift_prometheus_service_name: prometheus +openshift_prometheus_alerts_service_targetport: 9443 +openshift_prometheus_alerts_service_name: alerts +openshift_prometheus_alertmanager_service_targetport: 10443 +openshift_prometheus_alertmanager_service_name: alertmanager +openshift_prometheus_serviceaccount_annotations: [] +l_openshift_prometheus_serviceaccount_annotations: + - serviceaccounts.openshift.io/oauth-redirectreference.prom='{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"prometheus"}}' + - serviceaccounts.openshift.io/oauth-redirectreference.alerts='{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"alerts"}}' + - serviceaccounts.openshift.io/oauth-redirectreference.alertmanager='{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"alertmanager"}}' + # additional prometheus rules file openshift_prometheus_additional_rules_file: null #prometheus application arguments -openshift_prometheus_args: ['--storage.tsdb.retention=6h', '--storage.tsdb.min-block-duration=2m'] +openshift_prometheus_args: ['--storage.tsdb.retention=6h'] # storage # One of ['emptydir', 'pvc'] @@ -23,6 +38,7 @@ openshift_prometheus_pvc_name: prometheus openshift_prometheus_pvc_size: "{{ openshift_prometheus_storage_volume_size | default('10Gi') }}" openshift_prometheus_pvc_access_modes: [ReadWriteOnce] openshift_prometheus_pvc_pv_selector: "{{ openshift_prometheus_storage_labels | default({}) }}" +openshift_prometheus_sc_name: "{{ openshift_prometheus_storage_class | default(None) }}" # One of ['emptydir', 'pvc'] openshift_prometheus_alertmanager_storage_type: "emptydir" @@ -30,6 +46,7 @@ openshift_prometheus_alertmanager_pvc_name: prometheus-alertmanager openshift_prometheus_alertmanager_pvc_size: "{{ openshift_prometheus_alertmanager_storage_volume_size | default('10Gi') }}" openshift_prometheus_alertmanager_pvc_access_modes: [ReadWriteOnce] openshift_prometheus_alertmanager_pvc_pv_selector: "{{ openshift_prometheus_alertmanager_storage_labels | default({}) }}" +openshift_prometheus_alertmanager_sc_name: "{{ openshift_prometheus_alertmanager_storage_class | default(None) }}" # One of ['emptydir', 'pvc'] openshift_prometheus_alertbuffer_storage_type: "emptydir" @@ -37,6 +54,7 @@ openshift_prometheus_alertbuffer_pvc_name: prometheus-alertbuffer openshift_prometheus_alertbuffer_pvc_size: "{{ openshift_prometheus_alertbuffer_storage_volume_size | default('10Gi') }}" openshift_prometheus_alertbuffer_pvc_access_modes: [ReadWriteOnce] openshift_prometheus_alertbuffer_pvc_pv_selector: "{{ openshift_prometheus_alertbuffer_storage_labels | default({}) }}" +openshift_prometheus_alertbuffer_sc_name: "{{ openshift_prometheus_alertbuffer_storage_class | default(None) }}" # container resources openshift_prometheus_cpu_limit: null diff --git a/roles/openshift_prometheus/meta/main.yaml b/roles/openshift_prometheus/meta/main.yaml index 33188bb7e..69c5e0ee2 100644 --- a/roles/openshift_prometheus/meta/main.yaml +++ b/roles/openshift_prometheus/meta/main.yaml @@ -15,5 +15,6 @@ galaxy_info: categories: - openshift dependencies: -- { role: lib_openshift } -- { role: openshift_facts } +- role: lib_openshift +- role: openshift_facts +- role: lib_utils diff --git a/roles/openshift_prometheus/tasks/facts.yaml b/roles/openshift_prometheus/tasks/facts.yaml new file mode 100644 index 000000000..214089732 --- /dev/null +++ b/roles/openshift_prometheus/tasks/facts.yaml @@ -0,0 +1,10 @@ +--- +# The kubernetes version impacts the prometheus scraping endpoint +# so gathering it before constructing the configmap +- name: get oc version + oc_version: + register: oc_version + +- set_fact: + kubernetes_version: "{{ oc_version.results.kubernetes_short | float }}" + openshift_prometheus_serviceaccount_annotations: "{{ l_openshift_prometheus_serviceaccount_annotations + openshift_prometheus_serviceaccount_annotations|list }}" diff --git a/roles/openshift_prometheus/tasks/install_prometheus.yaml b/roles/openshift_prometheus/tasks/install_prometheus.yaml index ad15dc65f..0b565502f 100644 --- a/roles/openshift_prometheus/tasks/install_prometheus.yaml +++ b/roles/openshift_prometheus/tasks/install_prometheus.yaml @@ -1,49 +1,45 @@ --- +# set facts +- include_tasks: facts.yaml # namespace - name: Add prometheus project oc_project: state: present name: "{{ openshift_prometheus_namespace }}" - node_selector: "{{ openshift_prometheus_node_selector | oo_selector_to_string_list() }}" + node_selector: "{{ openshift_prometheus_node_selector | lib_utils_oo_selector_to_string_list() }}" description: Prometheus # secrets -- name: Set alert and prometheus secrets +- name: Set alert, alertmanager and prometheus secrets oc_secret: state: present name: "{{ item }}-proxy" namespace: "{{ openshift_prometheus_namespace }}" contents: - path: session_secret - data: "{{ 43 | oo_random_word }}=" + data: "{{ 43 | lib_utils_oo_random_word }}=" with_items: - prometheus - alerts + - alertmanager # serviceaccount - name: create prometheus serviceaccount oc_serviceaccount: state: present - name: prometheus + name: "{{ openshift_prometheus_service_name }}" namespace: "{{ openshift_prometheus_namespace }}" - # TODO add annotations when supproted - # annotations: - # serviceaccounts.openshift.io/oauth-redirectreference.prom: '{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"prometheus"}}' - # serviceaccounts.openshift.io/oauth-redirectreference.alerts: '{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"alerts"}}' - - secrets: - - prometheus-secrets changed_when: no + # TODO remove this when annotations are supported by oc_serviceaccount - name: annotate serviceaccount command: > - {{ openshift.common.client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }} - serviceaccount prometheus - serviceaccounts.openshift.io/oauth-redirectreference.prom='{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"prometheus"}}' - serviceaccounts.openshift.io/oauth-redirectreference.alerts='{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"alerts"}}' - + {{ openshift_client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }} + serviceaccount {{ openshift_prometheus_service_name }} {{ item }} + with_items: + "{{ openshift_prometheus_serviceaccount_annotations }}" # create clusterrolebinding for prometheus serviceaccount - name: Set cluster-reader permissions for prometheus @@ -52,63 +48,61 @@ namespace: "{{ openshift_prometheus_namespace }}" resource_kind: cluster-role resource_name: cluster-reader - user: "system:serviceaccount:{{ openshift_prometheus_namespace }}:prometheus" + user: "system:serviceaccount:{{ openshift_prometheus_namespace }}:{{ openshift_prometheus_service_name }}" -# create prometheus and alerts services -# TODO join into 1 task with loop -- name: Create prometheus service + +- name: create services for prometheus oc_service: - state: present - name: "{{ item.name }}" + name: "{{ openshift_prometheus_service_name }}" namespace: "{{ openshift_prometheus_namespace }}" - selector: - app: prometheus labels: - name: "{{ item.name }}" - # TODO add annotations when supported - # annotations: - # service.alpha.openshift.io/serving-cert-secret-name: "{{item.name}}-tls" + name: prometheus + annotations: + oprometheus.io/scrape: 'true' + oprometheus.io/scheme: https + service.alpha.openshift.io/serving-cert-secret-name: prometheus-tls ports: - - port: 443 - targetPort: 8443 - with_items: - - name: prometheus + - name: prometheus + port: "{{ openshift_prometheus_service_port }}" + targetPort: "{{ openshift_prometheus_service_targetport }}" + protocol: TCP + selector: + app: prometheus -- name: Create alerts service +- name: create services for alert buffer oc_service: - state: present - name: "{{ item.name }}" + name: "{{ openshift_prometheus_alerts_service_name }}" namespace: "{{ openshift_prometheus_namespace }}" + labels: + name: prometheus + annotations: + service.alpha.openshift.io/serving-cert-secret-name: alerts-tls + ports: + - name: prometheus + port: "{{ openshift_prometheus_service_port }}" + targetPort: "{{ openshift_prometheus_alerts_service_targetport }}" + protocol: TCP selector: app: prometheus + +- name: create services for alertmanager + oc_service: + name: "{{ openshift_prometheus_alertmanager_service_name }}" + namespace: "{{ openshift_prometheus_namespace }}" labels: - name: "{{ item.name }}" - # TODO add annotations when supported - # annotations: - # service.alpha.openshift.io/serving-cert-secret-name: "{{item.name}}-tls" + name: prometheus + annotations: + service.alpha.openshift.io/serving-cert-secret-name: alertmanager-tls ports: - - port: 443 - targetPort: 9443 - with_items: - - name: alerts - - -# Annotate services with secret name -# TODO remove this when annotations are supported by oc_service -- name: annotate prometheus service - command: > - {{ openshift.common.client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }} - service prometheus - prometheus.io/scrape='true' - prometheus.io/scheme=https - service.alpha.openshift.io/serving-cert-secret-name=prometheus-tls - -- name: annotate alerts service - command: > - {{ openshift.common.client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }} - service alerts 'service.alpha.openshift.io/serving-cert-secret-name=prometheus-alerts-tls' + - name: prometheus + port: "{{ openshift_prometheus_service_port }}" + targetPort: "{{ openshift_prometheus_alertmanager_service_targetport }}" + protocol: TCP + selector: + app: prometheus # create prometheus and alerts routes +# TODO: oc_route module should support insecureEdgeTerminationPolicy: Redirect - name: create prometheus and alerts routes oc_route: state: present @@ -122,6 +116,8 @@ host: "{{ openshift_prometheus_hostname }}" - name: alerts host: "{{ openshift_prometheus_alerts_hostname }}" + - name: alertmanager + host: "{{ openshift_prometheus_alertmanager_hostname }}" # Storage - name: create prometheus pvc @@ -131,6 +127,7 @@ access_modes: "{{ openshift_prometheus_pvc_access_modes }}" volume_capacity: "{{ openshift_prometheus_pvc_size }}" selector: "{{ openshift_prometheus_pvc_pv_selector }}" + storage_class_name: "{{ openshift_prometheus_sc_name }}" when: openshift_prometheus_storage_type == 'pvc' - name: create alertmanager pvc @@ -140,6 +137,7 @@ access_modes: "{{ openshift_prometheus_alertmanager_pvc_access_modes }}" volume_capacity: "{{ openshift_prometheus_alertmanager_pvc_size }}" selector: "{{ openshift_prometheus_alertmanager_pvc_pv_selector }}" + storage_class_name: "{{ openshift_prometheus_alertmanager_sc_name }}" when: openshift_prometheus_alertmanager_storage_type == 'pvc' - name: create alertbuffer pvc @@ -149,6 +147,7 @@ access_modes: "{{ openshift_prometheus_alertbuffer_pvc_access_modes }}" volume_capacity: "{{ openshift_prometheus_alertbuffer_pvc_size }}" selector: "{{ openshift_prometheus_alertbuffer_pvc_pv_selector }}" + storage_class_name: "{{ openshift_prometheus_alertbuffer_sc_name }}" when: openshift_prometheus_alertbuffer_storage_type == 'pvc' # prometheus configmap @@ -166,15 +165,6 @@ path: "{{ tempdir }}/prometheus.additional.rules" register: additional_rules_stat -# The kubernetes version impacts the prometheus scraping endpoint -# so gathering it before constructing the configmap -- name: get oc version - oc_version: - register: oc_version - -- set_fact: - kubernetes_version: "{{ oc_version.results.kubernetes_short | float }}" - - template: src: prometheus.yml.j2 dest: "{{ tempdir }}/prometheus.yml" @@ -216,7 +206,7 @@ - name: Set alertmanager configmap oc_configmap: state: present - name: "prometheus-alerts" + name: "alertmanager" namespace: "{{ openshift_prometheus_namespace }}" from_file: alertmanager.yml: "{{ tempdir }}/alertmanager.yml" diff --git a/roles/openshift_prometheus/tasks/main.yaml b/roles/openshift_prometheus/tasks/main.yaml index 38798e1f5..66d65a3f2 100644 --- a/roles/openshift_prometheus/tasks/main.yaml +++ b/roles/openshift_prometheus/tasks/main.yaml @@ -1,5 +1,5 @@ --- -- name: Set default image variables based on deployment_type +- name: Set default image variables based on openshift_deployment_type include_vars: "{{ item }}" with_first_found: - "{{ openshift_deployment_type }}.yml" @@ -16,9 +16,11 @@ - name: Create templates subdirectory file: state: directory - path: "{{ tempdir }}/templates" + path: "{{ tempdir }}/{{ item }}" mode: 0755 changed_when: False + with_items: + - templates - include_tasks: install_prometheus.yaml when: openshift_prometheus_state == 'present' diff --git a/roles/openshift_prometheus/tasks/uninstall_prometheus.yaml b/roles/openshift_prometheus/tasks/uninstall.yaml index d746402db..d746402db 100644 --- a/roles/openshift_prometheus/tasks/uninstall_prometheus.yaml +++ b/roles/openshift_prometheus/tasks/uninstall.yaml diff --git a/roles/openshift_prometheus/templates/prometheus.j2 b/roles/openshift_prometheus/templates/prometheus.j2 index d780550b8..c0abd483b 100644 --- a/roles/openshift_prometheus/templates/prometheus.j2 +++ b/roles/openshift_prometheus/templates/prometheus.j2 @@ -19,7 +19,7 @@ spec: labels: app: prometheus spec: - serviceAccountName: prometheus + serviceAccountName: "{{ openshift_prometheus_service_name }}" {% if openshift_prometheus_node_selector is iterable and openshift_prometheus_node_selector | length > 0 %} nodeSelector: {% for key, value in openshift_prometheus_node_selector.items() %} @@ -47,15 +47,15 @@ spec: cpu: "{{ openshift_prometheus_oauth_proxy_cpu_limit }}" {% endif %} ports: - - containerPort: 8443 + - containerPort: {{ openshift_prometheus_service_targetport }} name: web args: - -provider=openshift - - -https-address=:8443 + - -https-address=:{{ openshift_prometheus_service_targetport }} - -http-address= - -email-domain=* - -upstream=http://localhost:9090 - - -client-id=system:serviceaccount:{{ namespace }}:prometheus + - -client-id=system:serviceaccount:{{ namespace }}:{{ openshift_prometheus_service_name }} - '-openshift-sar={"resource": "namespaces", "verb": "get", "resourceName": "{{ namespace }}", "namespace": "{{ namespace }}"}' - '-openshift-delegate-urls={"/": {"resource": "namespaces", "verb": "get", "resourceName": "{{ namespace }}", "namespace": "{{ namespace }}"}}' - -tls-cert=/etc/tls/private/tls.crt @@ -67,9 +67,9 @@ spec: - -skip-auth-regex=^/metrics volumeMounts: - mountPath: /etc/tls/private - name: prometheus-tls + name: prometheus-tls-secret - mountPath: /etc/proxy/secrets - name: prometheus-secrets + name: prometheus-proxy-secret - mountPath: /prometheus name: prometheus-data @@ -104,7 +104,7 @@ spec: - mountPath: /prometheus name: prometheus-data - # Deploy alertmanager behind prometheus-alert-buffer behind an oauth proxy + # Deploy alert-buffer behind oauth alerts-proxy - name: alerts-proxy image: "{{ l_openshift_prometheus_proxy_image_prefix }}oauth-proxy:{{ l_openshift_prometheus_proxy_image_version }}" imagePullPolicy: IfNotPresent @@ -124,15 +124,15 @@ spec: cpu: "{{ openshift_prometheus_oauth_proxy_cpu_limit }}" {% endif %} ports: - - containerPort: 9443 + - containerPort: {{ openshift_prometheus_alerts_service_targetport }} name: web args: - -provider=openshift - - -https-address=:9443 + - -https-address=:{{ openshift_prometheus_alerts_service_targetport }} - -http-address= - -email-domain=* - -upstream=http://localhost:9099 - - -client-id=system:serviceaccount:{{ namespace }}:prometheus + - -client-id=system:serviceaccount:{{ namespace }}:{{ openshift_prometheus_service_name }} - '-openshift-sar={"resource": "namespaces", "verb": "get", "resourceName": "{{ namespace }}", "namespace": "{{ namespace }}"}' - '-openshift-delegate-urls={"/": {"resource": "namespaces", "verb": "get", "resourceName": "{{ namespace }}", "namespace": "{{ namespace }}"}}' - -tls-cert=/etc/tls/private/tls.crt @@ -143,9 +143,9 @@ spec: - -openshift-ca=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt volumeMounts: - mountPath: /etc/tls/private - name: alerts-tls + name: alerts-tls-secret - mountPath: /etc/proxy/secrets - name: alerts-secrets + name: alerts-proxy-secret - name: alert-buffer args: @@ -169,11 +169,54 @@ spec: {% endif %} volumeMounts: - mountPath: /alert-buffer - name: alert-buffer-data + name: alerts-data ports: - containerPort: 9099 name: alert-buf + # Deploy alertmanager behind oauth alertmanager-proxy + - name: alertmanager-proxy + image: "{{ l_openshift_prometheus_proxy_image_prefix }}oauth-proxy:{{ l_openshift_prometheus_proxy_image_version }}" + imagePullPolicy: IfNotPresent + requests: +{% if openshift_prometheus_oauth_proxy_memory_requests is defined and openshift_prometheus_oauth_proxy_memory_requests is not none %} + memory: "{{ openshift_prometheus_oauth_proxy_memory_requests }}" +{% endif %} +{% if openshift_prometheus_oauth_proxy_cpu_requests is defined and openshift_prometheus_oauth_proxy_cpu_requests is not none %} + cpu: "{{ openshift_prometheus_oauth_proxy_cpu_requests }}" +{% endif %} + limits: +{% if openshift_prometheus_oauth_proxy_memory_limit is defined and openshift_prometheus_oauth_proxy_memory_limit is not none %} + memory: "{{ openshift_prometheus_oauth_proxy_memory_limit }}" +{% endif %} +{% if openshift_prometheus_oauth_proxy_cpu_limit is defined and openshift_prometheus_oauth_proxy_cpu_limit is not none %} + cpu: "{{ openshift_prometheus_oauth_proxy_cpu_limit }}" +{% endif %} + ports: + - containerPort: {{ openshift_prometheus_alertmanager_service_targetport }} + name: web + args: + - -provider=openshift + - -https-address=:{{ openshift_prometheus_alertmanager_service_targetport }} + - -http-address= + - -email-domain=* + - -upstream=http://localhost:9093 + - -client-id=system:serviceaccount:{{ namespace }}:{{ openshift_prometheus_service_name }} + - -openshift-ca=/etc/pki/tls/cert.pem + - -openshift-ca=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt + - '-openshift-sar={"resource": "namespaces", "verb": "get", "resourceName": "{{ namespace }}", "namespace": "{{ namespace }}"}' + - '-openshift-delegate-urls={"/": {"resource": "namespaces", "verb": "get", "resourceName": "{{ namespace }}", "namespace": "{{ namespace }}"}}' + - -tls-cert=/etc/tls/private/tls.crt + - -tls-key=/etc/tls/private/tls.key + - -client-secret-file=/var/run/secrets/kubernetes.io/serviceaccount/token + - -cookie-secret-file=/etc/proxy/secrets/session_secret + - -skip-auth-regex=^/metrics + volumeMounts: + - mountPath: /etc/tls/private + name: alertmanager-tls-secret + - mountPath: /etc/proxy/secrets + name: alertmanager-proxy-secret + - name: alertmanager args: - -config.file=/etc/alertmanager/alertmanager.yml @@ -205,14 +248,15 @@ spec: restartPolicy: Always volumes: + - name: prometheus-config configMap: defaultMode: 420 name: prometheus - - name: prometheus-secrets + - name: prometheus-proxy-secret secret: secretName: prometheus-proxy - - name: prometheus-tls + - name: prometheus-tls-secret secret: secretName: prometheus-tls - name: prometheus-data @@ -225,13 +269,19 @@ spec: - name: alertmanager-config configMap: defaultMode: 420 - name: prometheus-alerts - - name: alerts-secrets + name: alertmanager + - name: alertmanager-proxy-secret secret: - secretName: alerts-proxy - - name: alerts-tls + secretName: alertmanager-proxy + - name: alertmanager-tls-secret + secret: + secretName: alertmanager-tls + - name: alerts-tls-secret secret: - secretName: prometheus-alerts-tls + secretName: alerts-tls + - name: alerts-proxy-secret + secret: + secretName: alerts-proxy - name: alertmanager-data {% if openshift_prometheus_alertmanager_storage_type == 'pvc' %} persistentVolumeClaim: @@ -239,7 +289,7 @@ spec: {% else %} emptydir: {} {% endif %} - - name: alert-buffer-data + - name: alerts-data {% if openshift_prometheus_alertbuffer_storage_type == 'pvc' %} persistentVolumeClaim: claimName: {{ openshift_prometheus_alertbuffer_pvc_name }} diff --git a/roles/openshift_prometheus/templates/prometheus.yml.j2 b/roles/openshift_prometheus/templates/prometheus.yml.j2 index 63430f834..005c2c564 100644 --- a/roles/openshift_prometheus/templates/prometheus.yml.j2 +++ b/roles/openshift_prometheus/templates/prometheus.yml.j2 @@ -1,10 +1,5 @@ rule_files: - - 'prometheus.rules' -{% if openshift_prometheus_additional_rules_file is defined and openshift_prometheus_additional_rules_file is not none %} - - 'prometheus.additional.rules' -{% endif %} - - + - '*.rules' # A scrape configuration for running Prometheus on a Kubernetes cluster. # This uses separate scrape configs for cluster components (i.e. API server, node) @@ -39,31 +34,11 @@ scrape_configs: action: keep regex: default;kubernetes;https -# Scrape config for nodes. -# -# Each node exposes a /metrics endpoint that contains operational metrics for -# the Kubelet and other components. -- job_name: 'kubernetes-nodes' - - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - # Scrape config for controllers. # # Each master node exposes a /metrics endpoint on :8444 that contains operational metrics for # the controllers. # -# TODO: move this to a pure endpoints based metrics gatherer when controllers are exposed via -# endpoints. - job_name: 'kubernetes-controllers' scheme: https @@ -87,6 +62,27 @@ scrape_configs: regex: (.+)(?::\d+) replacement: $1:8444 +# Scrape config for nodes. +# +# Each node exposes a /metrics endpoint that contains operational metrics for +# the Kubelet and other components. +- job_name: 'kubernetes-nodes' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + # Drop a very high cardinality metric that is incorrect in 3.7. It will be + # fixed in 3.9. + metric_relabel_configs: + - source_labels: [__name__] + action: drop + regex: 'openshift_sdn_pod_(setup|teardown)_latency(.*)' + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + # Scrape config for cAdvisor. # # Beginning in Kube 1.7, each node exposes a /metrics/cadvisor endpoint that @@ -107,6 +103,14 @@ scrape_configs: kubernetes_sd_configs: - role: node + # Exclude a set of high cardinality metrics that can contribute to significant + # memory use in large clusters. These can be selectively enabled as necessary + # for medium or small clusters. + metric_relabel_configs: + - source_labels: [__name__] + action: drop + regex: 'container_(cpu_user_seconds_total|cpu_cfs_periods_total|memory_usage_bytes|memory_swap|memory_working_set_bytes|memory_cache|last_seen|fs_(read_seconds_total|write_seconds_total|sector_(.*)|io_(.*)|reads_merged_total|writes_merged_total)|tasks_state|memory_failcnt|memory_failures_total|spec_memory_swap_limit_bytes|fs_(.*)_bytes_total|spec_(.*))' + relabel_configs: - action: labelmap regex: __meta_kubernetes_node_label_(.+) @@ -133,38 +137,101 @@ scrape_configs: - role: endpoints relabel_configs: - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape] - action: keep - regex: true - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] - action: replace - target_label: __scheme__ - regex: (https?) - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] + # only scrape infrastructure components + - source_labels: [__meta_kubernetes_namespace] + action: keep + regex: 'default|logging|metrics|kube-.+|openshift|openshift-.+' + # drop infrastructure components managed by other scrape targets + - source_labels: [__meta_kubernetes_service_name] + action: drop + regex: 'prometheus-node-exporter' + # only those that have requested scraping + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape] + action: keep + regex: true + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] + action: replace + target_label: __scheme__ + regex: (https?) + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] + action: replace + target_label: __address__ + regex: (.+)(?::\d+);(\d+) + replacement: $1:$2 + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: kubernetes_namespace + - source_labels: [__meta_kubernetes_service_name] + action: replace + target_label: kubernetes_name + +# Scrape config for node-exporter, which is expected to be running on port 9100. +- job_name: 'kubernetes-nodes-exporter' + + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + + kubernetes_sd_configs: + - role: node + + metric_relabel_configs: + - source_labels: [__name__] + action: drop + regex: 'node_cpu|node_(disk|scrape_collector)_.+' + # preserve a subset of the network, netstat, vmstat, and filesystem series + - source_labels: [__name__] action: replace - target_label: __metrics_path__ - regex: (.+) - - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] + regex: '(node_(netstat_Ip_.+|vmstat_(nr|thp)_.+|filesystem_(free|size|device_error)|network_(transmit|receive)_(drop|errs)))' + target_label: __name__ + replacement: renamed_$1 + - source_labels: [__name__] + action: drop + regex: 'node_(netstat|vmstat|filesystem|network)_.+' + - source_labels: [__name__] action: replace + regex: 'renamed_(.+)' + target_label: __name__ + replacement: $1 + # drop any partial expensive series + - source_labels: [__name__, device] + action: drop + regex: 'node_network_.+;veth.+' + - source_labels: [__name__, mountpoint] + action: drop + regex: 'node_filesystem_(free|size|device_error);([^/].*|/.+)' + + relabel_configs: + - source_labels: [__address__] + regex: '(.*):10250' + replacement: '${1}:9100' target_label: __address__ - regex: (.+)(?::\d+);(\d+) - replacement: $1:$2 - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_username] - action: replace - target_label: __basic_auth_username__ - regex: (.+) - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_password] - action: replace - target_label: __basic_auth_password__ - regex: (.+) + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ - action: labelmap - regex: __meta_kubernetes_service_label_(.+) - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: kubernetes_namespace - - source_labels: [__meta_kubernetes_service_name] - action: replace - target_label: kubernetes_name + regex: __meta_kubernetes_node_label_(.+) + +# Scrape config for the template service broker +- job_name: 'openshift-template-service-broker' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt + server_name: apiserver.openshift-template-service-broker.svc + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: endpoints + + relabel_configs: + - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name] + action: keep + regex: openshift-template-service-broker;apiserver;https + alerting: alertmanagers: diff --git a/roles/openshift_provisioners/meta/main.yaml b/roles/openshift_provisioners/meta/main.yaml index cb9278eb7..5ef352bcd 100644 --- a/roles/openshift_provisioners/meta/main.yaml +++ b/roles/openshift_provisioners/meta/main.yaml @@ -14,3 +14,4 @@ galaxy_info: dependencies: - role: lib_openshift - role: openshift_facts +- role: lib_utils diff --git a/roles/openshift_provisioners/tasks/install_efs.yaml b/roles/openshift_provisioners/tasks/install_efs.yaml index e543d753c..de763f6cf 100644 --- a/roles/openshift_provisioners/tasks/install_efs.yaml +++ b/roles/openshift_provisioners/tasks/install_efs.yaml @@ -1,7 +1,7 @@ --- - name: Check efs current replica count command: > - {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc provisioners-efs + {{ openshift_client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc provisioners-efs -o jsonpath='{.spec.replicas}' -n {{openshift_provisioners_project}} register: efs_replica_count when: not ansible_check_mode @@ -58,7 +58,7 @@ # anyuid in order to run as root & chgrp shares with allocated gids - name: "Check efs anyuid permissions" command: > - {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig + {{ openshift_client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get scc/anyuid -o jsonpath='{.users}' register: efs_anyuid check_mode: no @@ -66,7 +66,7 @@ - name: "Set anyuid permissions for efs" command: > - {{ openshift.common.client_binary}} adm --config={{ mktemp.stdout }}/admin.kubeconfig policy + {{ openshift_client_binary}} adm --config={{ mktemp.stdout }}/admin.kubeconfig policy add-scc-to-user anyuid system:serviceaccount:{{openshift_provisioners_project}}:provisioners-efs register: efs_output failed_when: efs_output.rc == 1 and 'exists' not in efs_output.stderr diff --git a/roles/openshift_provisioners/tasks/oc_apply.yaml b/roles/openshift_provisioners/tasks/oc_apply.yaml index 49d03f203..27c8a4b81 100644 --- a/roles/openshift_provisioners/tasks/oc_apply.yaml +++ b/roles/openshift_provisioners/tasks/oc_apply.yaml @@ -1,7 +1,7 @@ --- - name: Checking generation of {{file_content.kind}} {{file_content.metadata.name}} command: > - {{ openshift.common.client_binary }} --config={{ kubeconfig }} + {{ openshift_client_binary }} --config={{ kubeconfig }} get {{file_content.kind}} {{file_content.metadata.name}} -o jsonpath='{.metadata.resourceVersion}' -n {{namespace}} @@ -11,16 +11,16 @@ - name: Applying {{file_name}} command: > - {{ openshift.common.client_binary }} --config={{ kubeconfig }} + {{ openshift_client_binary }} --config={{ kubeconfig }} apply -f {{ file_name }} -n {{ namespace }} register: generation_apply - failed_when: "'error' in generation_apply.stderr" + failed_when: "'error' in generation_apply.stderr or generation_apply.rc != 0" changed_when: no - name: Determine change status of {{file_content.kind}} {{file_content.metadata.name}} command: > - {{ openshift.common.client_binary }} --config={{ kubeconfig }} + {{ openshift_client_binary }} --config={{ kubeconfig }} get {{file_content.kind}} {{file_content.metadata.name}} -o jsonpath='{.metadata.resourceVersion}' -n {{namespace}} @@ -32,20 +32,20 @@ - name: Removing previous {{file_name}} command: > - {{ openshift.common.client_binary }} --config={{ kubeconfig }} + {{ openshift_client_binary }} --config={{ kubeconfig }} delete -f {{ file_name }} -n {{ namespace }} register: generation_delete - failed_when: "'error' in generation_delete.stderr" + failed_when: "'error' in generation_delete.stderr or generation_delete.rc != 0" changed_when: generation_delete.rc == 0 when: generation_apply.rc != 0 - name: Recreating {{file_name}} command: > - {{ openshift.common.client_binary }} --config={{ kubeconfig }} + {{ openshift_client_binary }} --config={{ kubeconfig }} apply -f {{ file_name }} -n {{ namespace }} register: generation_apply - failed_when: "'error' in generation_apply.stderr" + failed_when: "'error' in generation_apply.stderr or generation_apply.rc | int != 0" changed_when: generation_apply.rc == 0 when: generation_apply.rc != 0 diff --git a/roles/openshift_provisioners/tasks/uninstall_provisioners.yaml b/roles/openshift_provisioners/tasks/uninstall_provisioners.yaml index 602dee773..ac12087ec 100644 --- a/roles/openshift_provisioners/tasks/uninstall_provisioners.yaml +++ b/roles/openshift_provisioners/tasks/uninstall_provisioners.yaml @@ -5,7 +5,7 @@ # delete the deployment objects that we had created - name: delete provisioner api objects command: > - {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig + {{ openshift_client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete {{ item }} --selector provisioners-infra -n {{ openshift_provisioners_project }} --ignore-not-found=true with_items: - dc @@ -15,7 +15,7 @@ # delete our old secrets - name: delete provisioner secrets command: > - {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig + {{ openshift_client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete secret {{ item }} -n {{ openshift_provisioners_project }} --ignore-not-found=true with_items: - provisioners-efs @@ -26,7 +26,7 @@ # delete cluster role bindings - name: delete cluster role bindings command: > - {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig + {{ openshift_client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete clusterrolebindings {{ item }} -n {{ openshift_provisioners_project }} --ignore-not-found=true with_items: - run-provisioners-efs diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml index 5e7bde1e1..911005bb6 100644 --- a/roles/openshift_repos/tasks/main.yaml +++ b/roles/openshift_repos/tasks/main.yaml @@ -10,7 +10,7 @@ - name: Ensure libselinux-python is installed package: name=libselinux-python state=present register: result - until: result | success + until: result is succeeded - name: Remove openshift_additional.repo file file: @@ -37,6 +37,13 @@ - when: r_openshift_repos_has_run is not defined block: + - include_tasks: rhel_repos.yml + when: + - ansible_distribution == 'RedHat' + - openshift_deployment_type == 'openshift-enterprise' + - rhsub_user is defined + - rhsub_pass is defined + - include_tasks: centos_repos.yml when: - ansible_os_family == "RedHat" diff --git a/roles/openshift_repos/tasks/rhel_repos.yml b/roles/openshift_repos/tasks/rhel_repos.yml new file mode 100644 index 000000000..8d16629cc --- /dev/null +++ b/roles/openshift_repos/tasks/rhel_repos.yml @@ -0,0 +1,33 @@ +--- +- name: Ensure RHEL rhui repositories are disabled + command: bash -c "yum -q --noplugins repolist | grep -v 'repo id' | grep 'rhui'" + register: repo_rhui + changed_when: "repo_rhui.rc != 1" + failed_when: repo_rhui.rc == 11 + +- name: Disable RHEL rhui repositories + command: yum-config-manager \ + --disable 'rhui-REGION-client-config-server-7' \ + --disable 'rhui-REGION-rhel-server-rh-common' \ + --disable 'rhui-REGION-rhel-server-releases' + when: repo_rhui.changed + +- name: Ensure RHEL repositories are enabled + command: bash -c "yum -q --noplugins repolist | grep -v 'repo id' | grep 'Red Hat' | wc -l" + register: repo_rhel + changed_when: "'4' not in repo_rhel.stdout" + failed_when: repo_rhel.rc == 11 + +- name: Disable all repositories + command: bash -c "subscription-manager repos --disable='*'" + when: repo_rhel.changed + +- name: Enable RHEL repositories + command: subscription-manager repos \ + --enable="rhel-7-server-rpms" \ + --enable="rhel-7-server-extras-rpms" \ + --enable="rhel-7-server-ose-{{ (openshift_release | default('')).split('.')[0:2] | join('.') }}-rpms" \ + --enable="rhel-7-fast-datapath-rpms" + register: subscribe_repos + until: subscribe_repos | succeeded + when: repo_rhel.changed diff --git a/roles/openshift_repos/templates/CentOS-OpenShift-Origin37.repo.j2 b/roles/openshift_repos/templates/CentOS-OpenShift-Origin37.repo.j2 new file mode 100644 index 000000000..db214af2c --- /dev/null +++ b/roles/openshift_repos/templates/CentOS-OpenShift-Origin37.repo.j2 @@ -0,0 +1,27 @@ +[centos-openshift-origin37] +name=CentOS OpenShift Origin +baseurl=http://mirror.centos.org/centos/7/paas/x86_64/openshift-origin37/ +enabled=1 +gpgcheck=1 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS + +[centos-openshift-origin37-testing] +name=CentOS OpenShift Origin Testing +baseurl=http://buildlogs.centos.org/centos/7/paas/x86_64/openshift-origin37/ +enabled={{ 1 if openshift_repos_enable_testing else 0 }} +gpgcheck=0 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS + +[centos-openshift-origin37-debuginfo] +name=CentOS OpenShift Origin DebugInfo +baseurl=http://debuginfo.centos.org/centos/7/paas/x86_64/ +enabled=0 +gpgcheck=1 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS + +[centos-openshift-origin37-source] +name=CentOS OpenShift Origin Source +baseurl=http://vault.centos.org/centos/7/paas/Source/openshift-origin37/ +enabled=0 +gpgcheck=1 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS diff --git a/roles/openshift_sanitize_inventory/filter_plugins/openshift_sanitize_inventory.py b/roles/openshift_sanitize_inventory/filter_plugins/openshift_sanitize_inventory.py index 72c47b8ee..14f1f72c2 100644 --- a/roles/openshift_sanitize_inventory/filter_plugins/openshift_sanitize_inventory.py +++ b/roles/openshift_sanitize_inventory/filter_plugins/openshift_sanitize_inventory.py @@ -6,15 +6,6 @@ import re -# This should be removed after map_from_pairs is no longer used in __deprecations_logging.yml -def map_from_pairs(source, delim="="): - ''' Returns a dict given the source and delim delimited ''' - if source == '': - return dict() - - return dict(item.split(delim) for item in source.split(",")) - - def vars_with_pattern(source, pattern=""): ''' Returns a list of variables whose name matches the given pattern ''' if source == '': @@ -39,6 +30,5 @@ class FilterModule(object): def filters(self): ''' Returns the names of the filters provided by this class ''' return { - 'map_from_pairs': map_from_pairs, 'vars_with_pattern': vars_with_pattern } diff --git a/roles/openshift_sanitize_inventory/meta/main.yml b/roles/openshift_sanitize_inventory/meta/main.yml index f5b37186e..cde3eccb6 100644 --- a/roles/openshift_sanitize_inventory/meta/main.yml +++ b/roles/openshift_sanitize_inventory/meta/main.yml @@ -12,4 +12,6 @@ galaxy_info: categories: - cloud - system -dependencies: [] +dependencies: +- role: lib_utils +- role: lib_openshift diff --git a/roles/openshift_sanitize_inventory/tasks/deprecations.yml b/roles/openshift_sanitize_inventory/tasks/deprecations.yml index 795b8ee60..b1ddbc07a 100644 --- a/roles/openshift_sanitize_inventory/tasks/deprecations.yml +++ b/roles/openshift_sanitize_inventory/tasks/deprecations.yml @@ -2,15 +2,18 @@ - name: Check for usage of deprecated variables set_fact: - __deprecation_message: "{{ __deprecation_message | default([]) }} + ['{{ __deprecation_header }} {{ item }} is a deprecated variable and will be no longer be used in the next minor release. Please update your inventory accordingly.']" + __deprecation_message: "{{ __deprecation_message | default( __deprecation_header ) }} \n\t{{ item }}" when: - hostvars[inventory_hostname][item] is defined with_items: "{{ __warn_deprecated_vars }}" - block: - debug: msg="{{__deprecation_message}}" - - pause: - seconds: "{{ 10 }}" + - run_once: true + set_stats: + data: + installer_phase_initialize: + message: "{{ __deprecation_message }}" when: - __deprecation_message | default ('') | length > 0 diff --git a/roles/openshift_sanitize_inventory/tasks/main.yml b/roles/openshift_sanitize_inventory/tasks/main.yml index 77428272c..62d460272 100644 --- a/roles/openshift_sanitize_inventory/tasks/main.yml +++ b/roles/openshift_sanitize_inventory/tasks/main.yml @@ -3,37 +3,11 @@ # the user would also be aware of any deprecated variables they should note to adjust - include_tasks: deprecations.yml -- name: Abort when conflicting deployment type variables are set - when: - - deployment_type is defined - - openshift_deployment_type is defined - - openshift_deployment_type != deployment_type - fail: - msg: |- - openshift_deployment_type is set to "{{ openshift_deployment_type }}". - deployment_type is set to "{{ deployment_type }}". - To avoid unexpected results, this conflict is not allowed. - deployment_type is deprecated in favor of openshift_deployment_type. - Please specify only openshift_deployment_type, or make both the same. - - name: Standardize on latest variable names set_fact: - # goal is to deprecate deployment_type in favor of openshift_deployment_type. - # both will be accepted for now, but code should refer to the new name. - # TODO: once this is well-documented, add deprecation notice if using old name. - deployment_type: "{{ openshift_deployment_type | default(deployment_type) | default | string }}" - openshift_deployment_type: "{{ openshift_deployment_type | default(deployment_type) | default | string }}" deployment_subtype: "{{ openshift_deployment_subtype | default(deployment_subtype) | default('basic') | string }}" openshift_deployment_subtype: "{{ openshift_deployment_subtype | default(deployment_subtype) | default('basic') | string }}" -- name: Abort when deployment type is invalid - # this variable is required; complain early and clearly if it is invalid. - when: openshift_deployment_type not in known_openshift_deployment_types - fail: - msg: |- - Please set openshift_deployment_type to one of: - {{ known_openshift_deployment_types | join(', ') }} - - name: Normalize openshift_release set_fact: # Normalize release if provided, e.g. "v3.5" => "3.5" @@ -47,7 +21,7 @@ - name: Abort when openshift_release is invalid when: - openshift_release is defined - - not openshift_release | match('^\d+(\.\d+){1,3}$') + - not (openshift_release is match('^\d+(\.\d+){1,3}$')) fail: msg: |- openshift_release is "{{ openshift_release }}" which is not a valid version string. diff --git a/roles/openshift_sanitize_inventory/tasks/unsupported.yml b/roles/openshift_sanitize_inventory/tasks/unsupported.yml index 1c4984467..be0715ab5 100644 --- a/roles/openshift_sanitize_inventory/tasks/unsupported.yml +++ b/roles/openshift_sanitize_inventory/tasks/unsupported.yml @@ -45,7 +45,8 @@ - name: Ensure the hosted registry's GlusterFS storage is configured correctly when: - openshift_hosted_registry_storage_kind | default(none) in ['glusterfs'] - - openshift_hosted_registry_storage_glusterfs_ips is defined and openshift_hosted_registry_storage_glusterfs_ips != '' + - openshift_hosted_registry_storage_glusterfs_ips is defined + - openshift_hosted_registry_storage_glusterfs_ips != [] - "'glusterfs_registry' in groups | default([])" fail: msg: |- diff --git a/roles/openshift_sanitize_inventory/vars/main.yml b/roles/openshift_sanitize_inventory/vars/main.yml index 0fc2372d2..51c6e0a64 100644 --- a/roles/openshift_sanitize_inventory/vars/main.yml +++ b/roles/openshift_sanitize_inventory/vars/main.yml @@ -1,9 +1,6 @@ --- -# origin uses community packages named 'origin' -# openshift-enterprise uses Red Hat packages named 'atomic-openshift' -known_openshift_deployment_types: ['origin', 'openshift-enterprise'] -__deprecation_header: "[DEPRECATION WARNING]:" +__deprecation_header: "[DEPRECATION WARNING]: The following are deprecated variables and will be no longer be used in the next minor release. Please update your inventory accordingly." # this is a list of variables that we will be deprecating within the next minor release, this list should be expected to change from release to release __warn_deprecated_vars: diff --git a/roles/openshift_service_catalog/defaults/main.yml b/roles/openshift_service_catalog/defaults/main.yml index 7c848cb12..15ca9838c 100644 --- a/roles/openshift_service_catalog/defaults/main.yml +++ b/roles/openshift_service_catalog/defaults/main.yml @@ -1,6 +1,7 @@ --- openshift_service_catalog_remove: false openshift_service_catalog_nodeselector: {"openshift-infra": "apiserver"} +openshift_service_catalog_async_bindings_enabled: false openshift_use_openshift_sdn: True # os_sdn_network_plugin_name: "{% if openshift_use_openshift_sdn %}redhat/openshift-ovs-subnet{% else %}{% endif %}" diff --git a/roles/openshift_service_catalog/files/openshift_catalog_clusterroles.yml b/roles/openshift_service_catalog/files/openshift_catalog_clusterroles.yml new file mode 100644 index 000000000..28abcbcfc --- /dev/null +++ b/roles/openshift_service_catalog/files/openshift_catalog_clusterroles.yml @@ -0,0 +1,86 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + labels: + rbac.authorization.k8s.io/aggregate-to-admin: "true" + name: system:service-catalog:aggregate-to-admin +rules: +- apiGroups: + - "servicecatalog.k8s.io" + attributeRestrictions: null + resources: + - serviceinstances + - servicebindings + verbs: + - create + - update + - delete + - get + - list + - watch + - patch +- apiGroups: + - "settings.k8s.io" + attributeRestrictions: null + resources: + - podpresets + verbs: + - create + - update + - delete + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + labels: + rbac.authorization.k8s.io/aggregate-to-edit: "true" + name: system:service-catalog:aggregate-to-edit +rules: +- apiGroups: + - "servicecatalog.k8s.io" + attributeRestrictions: null + resources: + - serviceinstances + - servicebindings + verbs: + - create + - update + - delete + - get + - list + - watch + - patch +- apiGroups: + - "settings.k8s.io" + attributeRestrictions: null + resources: + - podpresets + verbs: + - create + - update + - delete + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + labels: + rbac.authorization.k8s.io/aggregate-to-view: "true" + name: system:service-catalog:aggregate-to-view +rules: +- apiGroups: + - "servicecatalog.k8s.io" + attributeRestrictions: null + resources: + - serviceinstances + - servicebindings + verbs: + - get + - list + - watch diff --git a/roles/openshift_service_catalog/tasks/generate_certs.yml b/roles/openshift_service_catalog/tasks/generate_certs.yml index cd7bda2c6..72110b18c 100644 --- a/roles/openshift_service_catalog/tasks/generate_certs.yml +++ b/roles/openshift_service_catalog/tasks/generate_certs.yml @@ -12,7 +12,7 @@ - name: Generate signing cert command: > - {{ openshift.common.client_binary }} adm --config=/etc/origin/master/admin.kubeconfig ca create-signer-cert + {{ openshift_client_binary }} adm --config=/etc/origin/master/admin.kubeconfig ca create-signer-cert --key={{ generated_certs_dir }}/ca.key --cert={{ generated_certs_dir }}/ca.crt --serial={{ generated_certs_dir }}/apiserver.serial.txt --name=service-catalog-signer @@ -59,11 +59,6 @@ src: "{{ generated_certs_dir }}/ca.crt" register: apiserver_ca -- shell: > - {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig get apiservices.apiregistration.k8s.io/v1beta1.servicecatalog.k8s.io -n kube-service-catalog || echo "not found" - register: get_apiservices - changed_when: no - - name: Create api service oc_obj: state: present @@ -86,4 +81,3 @@ caBundle: "{{ apiserver_ca.content }}" groupPriorityMinimum: 20 versionPriority: 10 - when: "'not found' in get_apiservices.stdout" diff --git a/roles/openshift_service_catalog/tasks/install.yml b/roles/openshift_service_catalog/tasks/install.yml index 41a6691c9..4d06c1872 100644 --- a/roles/openshift_service_catalog/tasks/install.yml +++ b/roles/openshift_service_catalog/tasks/install.yml @@ -6,10 +6,10 @@ register: mktemp changed_when: False -- name: Set default image variables based on deployment_type +- name: Set default image variables based on openshift_deployment_type include_vars: "{{ item }}" with_first_found: - - "{{ openshift_deployment_type | default(deployment_type) }}.yml" + - "{{ openshift_deployment_type }}.yml" - "default_images.yml" - name: Set service_catalog image facts @@ -38,7 +38,7 @@ - name: Make kube-service-catalog project network global command: > - {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig adm pod-network make-projects-global kube-service-catalog + {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig adm pod-network make-projects-global kube-service-catalog - include_tasks: generate_certs.yml @@ -74,74 +74,17 @@ template_name: kube-system-service-catalog-role-bindings namespace: kube-system -- oc_obj: - name: edit - kind: clusterrole - state: list - register: edit_yaml - -# only do this if we don't already have the updated role info -- name: Generate apply template for clusterrole/edit - template: - src: sc_admin_edit_role_patching.j2 - dest: "{{ mktemp.stdout }}/edit_sc_patch.yml" - vars: - original_content: "{{ edit_yaml.results.results[0] | to_yaml }}" - when: - - not edit_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not edit_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) - -# only do this if we don't already have the updated role info -- name: update edit role for service catalog and pod preset access - command: > - {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig replace -f {{ mktemp.stdout }}/edit_sc_patch.yml - when: - - not edit_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not edit_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) - -- oc_obj: - name: admin - kind: clusterrole - state: list - register: admin_yaml - -# only do this if we don't already have the updated role info -- name: Generate apply template for clusterrole/admin - template: - src: sc_admin_edit_role_patching.j2 - dest: "{{ mktemp.stdout }}/admin_sc_patch.yml" - vars: - original_content: "{{ admin_yaml.results.results[0] | to_yaml }}" - when: - - not admin_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not admin_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) - -# only do this if we don't already have the updated role info -- name: update admin role for service catalog and pod preset access - command: > - {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig replace -f {{ mktemp.stdout }}/admin_sc_patch.yml - when: - - not admin_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not admin_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) - -- oc_obj: - name: view - kind: clusterrole - state: list - register: view_yaml - -# only do this if we don't already have the updated role info -- name: Generate apply template for clusterrole/view - template: - src: sc_view_role_patching.j2 - dest: "{{ mktemp.stdout }}/view_sc_patch.yml" - vars: - original_content: "{{ view_yaml.results.results[0] | to_yaml }}" - when: - - not view_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['get', 'list', 'watch']) +- copy: + src: openshift_catalog_clusterroles.yml + dest: "{{ mktemp.stdout }}/openshift_catalog_clusterroles.yml" -# only do this if we don't already have the updated role info -- name: update view role for service catalog access - command: > - {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig replace -f {{ mktemp.stdout }}/view_sc_patch.yml - when: - - not view_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['get', 'list', 'watch']) +- name: Apply Service Catalog cluster roles + retries: 5 + delay: 2 + register: task_result + until: task_result.rc == 0 + shell: > + {{ openshift_client_binary }} auth reconcile --config={{ openshift.common.config_base }}/master/admin.kubeconfig -f {{ mktemp.stdout}}/openshift_catalog_clusterroles.yml - oc_adm_policy_user: namespace: kube-service-catalog @@ -179,6 +122,8 @@ etcd_servers: "{{ openshift.master.etcd_urls | join(',') }}" etcd_cafile: "{{ '/etc/origin/master/master.etcd-ca.crt' if etcd_ca_crt.stat.exists else '/etc/origin/master/ca-bundle.crt' }}" node_selector: "{{ openshift_service_catalog_nodeselector | default ({'openshift-infra': 'apiserver'}) }}" + # apiserver_ca is defined in generate_certs.yml + ca_hash: "{{ apiserver_ca.content|hash('sha1') }}" - name: Set Service Catalog API Server daemonset oc_obj: diff --git a/roles/openshift_service_catalog/tasks/remove.yml b/roles/openshift_service_catalog/tasks/remove.yml index a832e1f85..aa32d0513 100644 --- a/roles/openshift_service_catalog/tasks/remove.yml +++ b/roles/openshift_service_catalog/tasks/remove.yml @@ -1,7 +1,7 @@ --- - name: Remove Service Catalog APIServer command: > - {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig delete apiservices.apiregistration.k8s.io/v1beta1.servicecatalog.k8s.io --ignore-not-found -n kube-service-catalog + {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig delete apiservices.apiregistration.k8s.io/v1beta1.servicecatalog.k8s.io --ignore-not-found -n kube-service-catalog # TODO: this module doesn't currently remove this #- name: Remove service catalog api service @@ -48,7 +48,7 @@ - name: Remove Service Catalog kube-system Role Bindinds shell: > - {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig process kube-system-service-catalog-role-bindings -n kube-system | {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig delete --ignore-not-found -f - + {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig process kube-system-service-catalog-role-bindings -n kube-system | {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig delete --ignore-not-found -f - - oc_obj: kind: template @@ -58,7 +58,7 @@ - name: Remove Service Catalog kube-service-catalog Role Bindinds shell: > - {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig process service-catalog-role-bindings -n kube-service-catalog | {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig delete --ignore-not-found -f - + {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig process service-catalog-role-bindings -n kube-service-catalog | {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig delete --ignore-not-found -f - - oc_obj: kind: template diff --git a/roles/openshift_service_catalog/tasks/start_api_server.yml b/roles/openshift_service_catalog/tasks/start_api_server.yml index b143292b6..84e542eaf 100644 --- a/roles/openshift_service_catalog/tasks/start_api_server.yml +++ b/roles/openshift_service_catalog/tasks/start_api_server.yml @@ -5,7 +5,7 @@ name: "{{ openshift.node.nodename }}" kind: node state: add - labels: "{{ openshift_service_catalog_nodeselector | default ({'openshift-infra': 'apiserver'}) | oo_dict_to_list_of_dict }}" + labels: "{{ openshift_service_catalog_nodeselector | default ({'openshift-infra': 'apiserver'}) | lib_utils_oo_dict_to_list_of_dict }}" # wait to see that the apiserver is available - name: wait for api server to be ready diff --git a/roles/openshift_service_catalog/templates/api_server.j2 b/roles/openshift_service_catalog/templates/api_server.j2 index 4f51b8c3c..a18d29ef0 100644 --- a/roles/openshift_service_catalog/templates/api_server.j2 +++ b/roles/openshift_service_catalog/templates/api_server.j2 @@ -14,6 +14,8 @@ spec: type: RollingUpdate template: metadata: + annotations: + ca_hash: {{ ca_hash }} labels: app: apiserver spec: @@ -47,7 +49,7 @@ spec: - OriginatingIdentity=true image: {{ openshift_service_catalog_image_prefix }}service-catalog:{{ openshift_service_catalog_image_version }} command: ["/usr/bin/service-catalog"] - imagePullPolicy: Always + imagePullPolicy: IfNotPresent name: apiserver ports: - containerPort: 6443 diff --git a/roles/openshift_service_catalog/templates/controller_manager.j2 b/roles/openshift_service_catalog/templates/controller_manager.j2 index 137222f04..6d3ee7d01 100644 --- a/roles/openshift_service_catalog/templates/controller_manager.j2 +++ b/roles/openshift_service_catalog/templates/controller_manager.j2 @@ -8,7 +8,7 @@ spec: selector: matchLabels: app: controller-manager - strategy: + updateStrategy: rollingUpdate: maxUnavailable: 1 type: RollingUpdate @@ -38,9 +38,13 @@ spec: - "5m" - --feature-gates - OriginatingIdentity=true +{% if openshift_service_catalog_async_bindings_enabled | bool %} + - --feature-gates + - AsyncBindingOperations=true +{% endif %} image: {{ openshift_service_catalog_image_prefix }}service-catalog:{{ openshift_service_catalog_image_version }} command: ["/usr/bin/service-catalog"] - imagePullPolicy: Always + imagePullPolicy: IfNotPresent name: controller-manager ports: - containerPort: 8080 diff --git a/roles/openshift_service_catalog/templates/sc_admin_edit_role_patching.j2 b/roles/openshift_service_catalog/templates/sc_admin_edit_role_patching.j2 deleted file mode 100644 index 59cceafcf..000000000 --- a/roles/openshift_service_catalog/templates/sc_admin_edit_role_patching.j2 +++ /dev/null @@ -1,27 +0,0 @@ -{{ original_content }} -- apiGroups: - - "servicecatalog.k8s.io" - attributeRestrictions: null - resources: - - serviceinstances - - servicebindings - verbs: - - create - - update - - delete - - get - - list - - watch - - patch -- apiGroups: - - "settings.k8s.io" - attributeRestrictions: null - resources: - - podpresets - verbs: - - create - - update - - delete - - get - - list - - watch diff --git a/roles/openshift_service_catalog/templates/sc_view_role_patching.j2 b/roles/openshift_service_catalog/templates/sc_view_role_patching.j2 deleted file mode 100644 index 838993854..000000000 --- a/roles/openshift_service_catalog/templates/sc_view_role_patching.j2 +++ /dev/null @@ -1,11 +0,0 @@ -{{ original_content }} -- apiGroups: - - "servicecatalog.k8s.io" - attributeRestrictions: null - resources: - - serviceinstances - - servicebindings - verbs: - - get - - list - - watch diff --git a/roles/openshift_storage_glusterfs/README.md b/roles/openshift_storage_glusterfs/README.md index be749a2e1..f7bd58db3 100644 --- a/roles/openshift_storage_glusterfs/README.md +++ b/roles/openshift_storage_glusterfs/README.md @@ -82,6 +82,7 @@ GlusterFS cluster into a new or existing OpenShift cluster: | openshift_storage_glusterfs_nodeselector | 'glusterfs=storage-host'| Selector to determine which nodes will host GlusterFS pods in native mode. **NOTE:** The label value is taken from the cluster name | openshift_storage_glusterfs_use_default_selector | False | Whether to use a default node selector for the GlusterFS namespace/project. If False, the namespace/project will have no restricting node selector. If True, uses pre-existing or default (e.g. osm_default_node_selector) node selectors. **NOTE:** If True, nodes which will host GlusterFS pods must already have the additional labels. | openshift_storage_glusterfs_storageclass | True | Automatically create a StorageClass for each GlusterFS cluster +| openshift_storage_glusterfs_storageclass_default | False | Sets the StorageClass for each GlusterFS cluster as default | openshift_storage_glusterfs_image | 'gluster/gluster-centos'| Container image to use for GlusterFS pods, enterprise default is 'rhgs3/rhgs-server-rhel7' | openshift_storage_glusterfs_version | 'latest' | Container image version to use for GlusterFS pods | openshift_storage_glusterfs_block_deploy | True | Deploy glusterblock provisioner service @@ -125,13 +126,14 @@ registry. These variables start with the prefix values in their corresponding non-registry variables. The following variables are an exception: -| Name | Default value | Description | -|-------------------------------------------------------|-----------------------|-----------------------------------------| -| openshift_storage_glusterfs_registry_namespace | registry namespace | Default is to use the hosted registry's namespace, otherwise 'glusterfs' -| openshift_storage_glusterfs_registry_name | 'registry' | This allows for the logical separation of the registry GlusterFS cluster from other GlusterFS clusters -| openshift_storage_glusterfs_registry_storageclass | False | It is recommended to not create a StorageClass for GlusterFS clusters serving registry storage, so as to avoid performance penalties -| openshift_storage_glusterfs_registry_heketi_admin_key | auto-generated | Separate from the above -| openshift_storage_glusterfs_registry_heketi_user_key | auto-generated | Separate from the above +| Name | Default value | Description | +|-----------------------------------------------------------|-----------------------|-----------------------------------------| +| openshift_storage_glusterfs_registry_namespace | registry namespace | Default is to use the hosted registry's namespace, otherwise 'glusterfs' +| openshift_storage_glusterfs_registry_name | 'registry' | This allows for the logical separation of the registry GlusterFS cluster from other GlusterFS clusters +| openshift_storage_glusterfs_registry_storageclass | False | It is recommended to not create a StorageClass for GlusterFS clusters serving registry storage, so as to avoid performance penalties +| openshift_storage_glusterfs_registry_storageclass_default | False | Sets the StorageClass for each GlusterFS cluster as default +| openshift_storage_glusterfs_registry_heketi_admin_key | auto-generated | Separate from the above +| openshift_storage_glusterfs_registry_heketi_user_key | auto-generated | Separate from the above Additionally, this role's behavior responds to several registry-specific variables in the [openshift_hosted role](../openshift_hosted/README.md): @@ -147,7 +149,6 @@ Dependencies ------------ * os_firewall -* openshift_hosted_facts * openshift_repos * lib_openshift diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml index b7b3c0db2..4cbe262d2 100644 --- a/roles/openshift_storage_glusterfs/defaults/main.yml +++ b/roles/openshift_storage_glusterfs/defaults/main.yml @@ -5,16 +5,17 @@ openshift_storage_glusterfs_name: 'storage' openshift_storage_glusterfs_nodeselector: "glusterfs={{ openshift_storage_glusterfs_name }}-host" openshift_storage_glusterfs_use_default_selector: False openshift_storage_glusterfs_storageclass: True -openshift_storage_glusterfs_image: "{{ 'rhgs3/rhgs-server-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/gluster-centos' | quote }}" +openshift_storage_glusterfs_storageclass_default: False +openshift_storage_glusterfs_image: "{{ 'rhgs3/rhgs-server-rhel7' | quote if openshift_deployment_type == 'openshift-enterprise' else 'gluster/gluster-centos' | quote }}" openshift_storage_glusterfs_version: 'latest' openshift_storage_glusterfs_block_deploy: True -openshift_storage_glusterfs_block_image: "{{ 'rhgs3/rhgs-gluster-block-prov-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/glusterblock-provisioner' | quote }}" +openshift_storage_glusterfs_block_image: "{{ 'rhgs3/rhgs-gluster-block-prov-rhel7' | quote if openshift_deployment_type == 'openshift-enterprise' else 'gluster/glusterblock-provisioner' | quote }}" openshift_storage_glusterfs_block_version: 'latest' openshift_storage_glusterfs_block_host_vol_create: True openshift_storage_glusterfs_block_host_vol_size: 100 openshift_storage_glusterfs_block_host_vol_max: 15 openshift_storage_glusterfs_s3_deploy: True -openshift_storage_glusterfs_s3_image: "{{ 'rhgs3/rhgs-gluster-s3-server-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/gluster-object' | quote }}" +openshift_storage_glusterfs_s3_image: "{{ 'rhgs3/rhgs-gluster-s3-server-rhel7' | quote if openshift_deployment_type == 'openshift-enterprise' else 'gluster/gluster-object' | quote }}" openshift_storage_glusterfs_s3_version: 'latest' openshift_storage_glusterfs_s3_account: "{{ omit }}" openshift_storage_glusterfs_s3_user: "{{ omit }}" @@ -28,7 +29,7 @@ openshift_storage_glusterfs_heketi_is_native: "{{ openshift_storage_glusterfs_is openshift_storage_glusterfs_heketi_is_missing: True openshift_storage_glusterfs_heketi_deploy_is_missing: True openshift_storage_glusterfs_heketi_cli: 'heketi-cli' -openshift_storage_glusterfs_heketi_image: "{{ 'rhgs3/rhgs-volmanager-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'heketi/heketi' | quote }}" +openshift_storage_glusterfs_heketi_image: "{{ 'rhgs3/rhgs-volmanager-rhel7' | quote if openshift_deployment_type == 'openshift-enterprise' else 'heketi/heketi' | quote }}" openshift_storage_glusterfs_heketi_version: 'latest' openshift_storage_glusterfs_heketi_admin_key: "{{ omit }}" openshift_storage_glusterfs_heketi_user_key: "{{ omit }}" @@ -51,6 +52,7 @@ openshift_storage_glusterfs_registry_name: 'registry' openshift_storage_glusterfs_registry_nodeselector: "glusterfs={{ openshift_storage_glusterfs_registry_name }}-host" openshift_storage_glusterfs_registry_use_default_selector: "{{ openshift_storage_glusterfs_use_default_selector }}" openshift_storage_glusterfs_registry_storageclass: False +openshift_storage_glusterfs_registry_storageclass_default: False openshift_storage_glusterfs_registry_image: "{{ openshift_storage_glusterfs_image }}" openshift_storage_glusterfs_registry_version: "{{ openshift_storage_glusterfs_version }}" openshift_storage_glusterfs_registry_block_deploy: "{{ openshift_storage_glusterfs_block_deploy }}" diff --git a/roles/openshift_storage_glusterfs/files/v3.9/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.9/deploy-heketi-template.yml new file mode 100644 index 000000000..34af652c2 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v3.9/deploy-heketi-template.yml @@ -0,0 +1,133 @@ +--- +kind: Template +apiVersion: v1 +metadata: + name: deploy-heketi + labels: + glusterfs: heketi-template + deploy-heketi: support + annotations: + description: Bootstrap Heketi installation + tags: glusterfs,heketi,installation +objects: +- kind: Service + apiVersion: v1 + metadata: + name: deploy-heketi-${CLUSTER_NAME} + labels: + glusterfs: deploy-heketi-${CLUSTER_NAME}-service + deploy-heketi: support + annotations: + description: Exposes Heketi service + spec: + ports: + - name: deploy-heketi-${CLUSTER_NAME} + port: 8080 + targetPort: 8080 + selector: + glusterfs: deploy-heketi-${CLUSTER_NAME}-pod +- kind: Route + apiVersion: v1 + metadata: + name: ${HEKETI_ROUTE} + labels: + glusterfs: deploy-heketi-${CLUSTER_NAME}-route + deploy-heketi: support + spec: + to: + kind: Service + name: deploy-heketi-${CLUSTER_NAME} +- kind: DeploymentConfig + apiVersion: v1 + metadata: + name: deploy-heketi-${CLUSTER_NAME} + labels: + glusterfs: deploy-heketi-${CLUSTER_NAME}-dc + deploy-heketi: support + annotations: + description: Defines how to deploy Heketi + spec: + replicas: 1 + selector: + glusterfs: deploy-heketi-${CLUSTER_NAME}-pod + triggers: + - type: ConfigChange + strategy: + type: Recreate + template: + metadata: + name: deploy-heketi + labels: + glusterfs: deploy-heketi-${CLUSTER_NAME}-pod + deploy-heketi: support + spec: + serviceAccountName: heketi-${CLUSTER_NAME}-service-account + containers: + - name: heketi + image: ${IMAGE_NAME}:${IMAGE_VERSION} + env: + - name: HEKETI_USER_KEY + value: ${HEKETI_USER_KEY} + - name: HEKETI_ADMIN_KEY + value: ${HEKETI_ADMIN_KEY} + - name: HEKETI_EXECUTOR + value: ${HEKETI_EXECUTOR} + - name: HEKETI_FSTAB + value: ${HEKETI_FSTAB} + - name: HEKETI_SNAPSHOT_LIMIT + value: '14' + - name: HEKETI_KUBE_GLUSTER_DAEMONSET + value: '1' + ports: + - containerPort: 8080 + volumeMounts: + - name: db + mountPath: /var/lib/heketi + - name: config + mountPath: /etc/heketi + readinessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 3 + httpGet: + path: /hello + port: 8080 + livenessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 30 + httpGet: + path: /hello + port: 8080 + volumes: + - name: db + - name: config + secret: + secretName: heketi-${CLUSTER_NAME}-config-secret +parameters: +- name: HEKETI_USER_KEY + displayName: Heketi User Secret + description: Set secret for those creating volumes as type _user_ +- name: HEKETI_ADMIN_KEY + displayName: Heketi Administrator Secret + description: Set secret for administration of the Heketi service as user _admin_ +- name: HEKETI_EXECUTOR + displayName: heketi executor type + description: Set the executor type, kubernetes or ssh + value: kubernetes +- name: HEKETI_FSTAB + displayName: heketi fstab path + description: Set the fstab path, file that is populated with bricks that heketi creates + value: /var/lib/heketi/fstab +- name: HEKETI_ROUTE + displayName: heketi route name + description: Set the hostname for the route URL + value: "heketi-glusterfs" +- name: IMAGE_NAME + displayName: heketi container image name + required: True +- name: IMAGE_VERSION + displayName: heketi container image version + required: True +- name: CLUSTER_NAME + displayName: GlusterFS cluster name + description: A unique name to identify this heketi service, useful for running multiple heketi instances + value: glusterfs diff --git a/roles/openshift_storage_glusterfs/files/v3.9/gluster-s3-pvcs-template.yml b/roles/openshift_storage_glusterfs/files/v3.9/gluster-s3-pvcs-template.yml new file mode 100644 index 000000000..064b51473 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v3.9/gluster-s3-pvcs-template.yml @@ -0,0 +1,67 @@ +--- +kind: Template +apiVersion: v1 +metadata: + name: gluster-s3-pvcs + labels: + glusterfs: s3-pvcs-template + gluster-s3: pvcs-template + annotations: + description: Gluster S3 service template + tags: glusterfs,heketi,gluster-s3 +objects: +- kind: PersistentVolumeClaim + apiVersion: v1 + metadata: + name: "${PVC}" + labels: + glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-storage + gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-pvc + annotations: + volume.beta.kubernetes.io/storage-class: "glusterfs-${CLUSTER_NAME}" + spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: "${PVC_SIZE}" +- kind: PersistentVolumeClaim + apiVersion: v1 + metadata: + name: "${META_PVC}" + labels: + glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-storage + gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-meta-pvc + annotations: + volume.beta.kubernetes.io/storage-class: "glusterfs-${CLUSTER_NAME}" + spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: "${META_PVC_SIZE}" +parameters: +- name: S3_ACCOUNT + displayName: S3 Account Name + description: S3 storage account which will provide storage on GlusterFS volumes + required: true +- name: PVC + displayName: Primary GlusterFS-backed PVC + description: GlusterFS-backed PVC for object storage + required: true +- name: PVC_SIZE + displayName: Primary GlusterFS-backed PVC capacity + description: Capacity for GlusterFS-backed PVC for object storage + value: 2Gi +- name: META_PVC + displayName: Metadata GlusterFS-backed PVC + description: GlusterFS-backed PVC for object storage metadata + required: true +- name: META_PVC_SIZE + displayName: Metadata GlusterFS-backed PVC capacity + description: Capacity for GlusterFS-backed PVC for object storage metadata + value: 1Gi +- name: CLUSTER_NAME + displayName: GlusterFS cluster name + description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances + value: storage diff --git a/roles/openshift_storage_glusterfs/files/v3.9/gluster-s3-template.yml b/roles/openshift_storage_glusterfs/files/v3.9/gluster-s3-template.yml new file mode 100644 index 000000000..896a1b226 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v3.9/gluster-s3-template.yml @@ -0,0 +1,140 @@ +--- +kind: Template +apiVersion: v1 +metadata: + name: gluster-s3 + labels: + glusterfs: s3-template + gluster-s3: template + annotations: + description: Gluster S3 service template + tags: glusterfs,heketi,gluster-s3 +objects: +- kind: Service + apiVersion: v1 + metadata: + name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-service + labels: + glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-service + gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-service + spec: + ports: + - protocol: TCP + port: 8080 + targetPort: 8080 + selector: + glusterfs: s3-pod + type: ClusterIP + sessionAffinity: None + status: + loadBalancer: {} +- kind: Route + apiVersion: v1 + metadata: + name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-route + labels: + glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-route + gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-route + spec: + to: + kind: Service + name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-service +- kind: DeploymentConfig + apiVersion: v1 + metadata: + name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-dc + labels: + glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-dc + gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-dc + annotations: + openshift.io/scc: privileged + description: Defines how to deploy gluster s3 object storage + spec: + replicas: 1 + selector: + glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-pod + template: + metadata: + name: gluster-${CLUSTER_NAME}-${S3_ACCOUNT}-s3 + labels: + glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-pod + gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-pod + spec: + containers: + - name: gluster-s3 + image: ${IMAGE_NAME}:${IMAGE_VERSION} + imagePullPolicy: IfNotPresent + ports: + - name: gluster + containerPort: 8080 + protocol: TCP + env: + - name: S3_ACCOUNT + value: "${S3_ACCOUNT}" + - name: S3_USER + value: "${S3_USER}" + - name: S3_PASSWORD + value: "${S3_PASSWORD}" + resources: {} + volumeMounts: + - name: gluster-vol1 + mountPath: "/mnt/gluster-object/${S3_ACCOUNT}" + - name: gluster-vol2 + mountPath: "/mnt/gluster-object/gsmetadata" + - name: glusterfs-cgroup + readOnly: true + mountPath: "/sys/fs/cgroup" + terminationMessagePath: "/dev/termination-log" + securityContext: + privileged: true + volumes: + - name: glusterfs-cgroup + hostPath: + path: "/sys/fs/cgroup" + - name: gluster-vol1 + persistentVolumeClaim: + claimName: ${PVC} + - name: gluster-vol2 + persistentVolumeClaim: + claimName: ${META_PVC} + restartPolicy: Always + terminationGracePeriodSeconds: 30 + dnsPolicy: ClusterFirst + serviceAccountName: default + serviceAccount: default + securityContext: {} +parameters: +- name: IMAGE_NAME + displayName: glusterblock provisioner container image name + required: True +- name: IMAGE_VERSION + displayName: glusterblock provisioner container image version + required: True +- name: CLUSTER_NAME + displayName: GlusterFS cluster name + description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances + value: storage +- name: S3_ACCOUNT + displayName: S3 Account Name + description: S3 storage account which will provide storage on GlusterFS volumes + required: true +- name: S3_USER + displayName: S3 User + description: S3 user who can access the S3 storage account + required: true +- name: S3_PASSWORD + displayName: S3 User Password + description: Password for the S3 user + required: true +- name: PVC + displayName: Primary GlusterFS-backed PVC + description: GlusterFS-backed PVC for object storage + value: gluster-s3-claim +- name: META_PVC + displayName: Metadata GlusterFS-backed PVC + description: GlusterFS-backed PVC for object storage metadata + value: gluster-s3-meta-claim +- name: CLUSTER_NAME + displayName: GlusterFS cluster name + description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances + value: storage diff --git a/roles/openshift_storage_glusterfs/files/v3.9/glusterblock-provisioner.yml b/roles/openshift_storage_glusterfs/files/v3.9/glusterblock-provisioner.yml new file mode 100644 index 000000000..63dd5cce6 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v3.9/glusterblock-provisioner.yml @@ -0,0 +1,104 @@ +--- +kind: Template +apiVersion: v1 +metadata: + name: glusterblock-provisioner + labels: + glusterfs: block-template + glusterblock: template + annotations: + description: glusterblock provisioner template + tags: glusterfs +objects: +- kind: ClusterRole + apiVersion: v1 + metadata: + name: glusterblock-provisioner-runner + labels: + glusterfs: block-provisioner-runner-clusterrole + glusterblock: provisioner-runner-clusterrole + rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: [""] + resources: ["services"] + verbs: ["get"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "create", "delete"] + - apiGroups: [""] + resources: ["routes"] + verbs: ["get", "list"] +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: glusterblock-${CLUSTER_NAME}-provisioner + labels: + glusterfs: block-${CLUSTER_NAME}-provisioner-sa + glusterblock: ${CLUSTER_NAME}-provisioner-sa +- apiVersion: v1 + kind: ClusterRoleBinding + metadata: + name: glusterblock-${CLUSTER_NAME}-provisioner + roleRef: + name: glusterblock-provisioner-runner + subjects: + - kind: ServiceAccount + name: glusterblock-${CLUSTER_NAME}-provisioner + namespace: ${NAMESPACE} +- kind: DeploymentConfig + apiVersion: v1 + metadata: + name: glusterblock-${CLUSTER_NAME}-provisioner-dc + labels: + glusterfs: block-${CLUSTER_NAME}-provisioner-dc + glusterblock: ${CLUSTER_NAME}-provisioner-dc + annotations: + description: Defines how to deploy the glusterblock provisioner pod. + spec: + replicas: 1 + selector: + glusterfs: block-${CLUSTER_NAME}-provisioner-pod + triggers: + - type: ConfigChange + strategy: + type: Recreate + template: + metadata: + name: glusterblock-provisioner + labels: + glusterfs: block-${CLUSTER_NAME}-provisioner-pod + spec: + serviceAccountName: glusterblock-${CLUSTER_NAME}-provisioner + containers: + - name: glusterblock-provisioner + image: ${IMAGE_NAME}:${IMAGE_VERSION} + imagePullPolicy: IfNotPresent + env: + - name: PROVISIONER_NAME + value: gluster.org/glusterblock +parameters: +- name: IMAGE_NAME + displayName: glusterblock provisioner container image name + required: True +- name: IMAGE_VERSION + displayName: glusterblock provisioner container image version + required: True +- name: NAMESPACE + displayName: glusterblock provisioner namespace + description: The namespace in which these resources are being created + required: True +- name: CLUSTER_NAME + displayName: GlusterFS cluster name + description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances + value: storage diff --git a/roles/openshift_storage_glusterfs/files/v3.9/glusterfs-template.yml b/roles/openshift_storage_glusterfs/files/v3.9/glusterfs-template.yml new file mode 100644 index 000000000..09850a2c2 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v3.9/glusterfs-template.yml @@ -0,0 +1,154 @@ +--- +kind: Template +apiVersion: v1 +metadata: + name: glusterfs + labels: + glusterfs: template + annotations: + description: GlusterFS DaemonSet template + tags: glusterfs +objects: +- kind: DaemonSet + apiVersion: extensions/v1beta1 + metadata: + name: glusterfs-${CLUSTER_NAME} + labels: + glusterfs: ${CLUSTER_NAME}-daemonset + annotations: + description: GlusterFS DaemonSet + tags: glusterfs + spec: + selector: + matchLabels: + glusterfs: ${CLUSTER_NAME}-pod + template: + metadata: + name: glusterfs-${CLUSTER_NAME} + labels: + glusterfs: ${CLUSTER_NAME}-pod + glusterfs-node: pod + spec: + nodeSelector: "${{NODE_LABELS}}" + hostNetwork: true + containers: + - name: glusterfs + image: ${IMAGE_NAME}:${IMAGE_VERSION} + imagePullPolicy: IfNotPresent + env: + - name: GB_GLFS_LRU_COUNT + value: "${GB_GLFS_LRU_COUNT}" + - name: TCMU_LOGDIR + value: "${TCMU_LOGDIR}" + resources: + requests: + memory: 100Mi + cpu: 100m + volumeMounts: + - name: glusterfs-heketi + mountPath: "/var/lib/heketi" + - name: glusterfs-run + mountPath: "/run" + - name: glusterfs-lvm + mountPath: "/run/lvm" + - name: glusterfs-etc + mountPath: "/etc/glusterfs" + - name: glusterfs-logs + mountPath: "/var/log/glusterfs" + - name: glusterfs-config + mountPath: "/var/lib/glusterd" + - name: glusterfs-dev + mountPath: "/dev" + - name: glusterfs-misc + mountPath: "/var/lib/misc/glusterfsd" + - name: glusterfs-cgroup + mountPath: "/sys/fs/cgroup" + readOnly: true + - name: glusterfs-ssl + mountPath: "/etc/ssl" + readOnly: true + securityContext: + capabilities: {} + privileged: true + readinessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 40 + exec: + command: + - "/bin/bash" + - "-c" + - systemctl status glusterd.service + periodSeconds: 25 + successThreshold: 1 + failureThreshold: 15 + livenessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 40 + exec: + command: + - "/bin/bash" + - "-c" + - systemctl status glusterd.service + periodSeconds: 25 + successThreshold: 1 + failureThreshold: 15 + terminationMessagePath: "/dev/termination-log" + volumes: + - name: glusterfs-heketi + hostPath: + path: "/var/lib/heketi" + - name: glusterfs-run + emptyDir: {} + - name: glusterfs-lvm + hostPath: + path: "/run/lvm" + - name: glusterfs-etc + hostPath: + path: "/etc/glusterfs" + - name: glusterfs-logs + hostPath: + path: "/var/log/glusterfs" + - name: glusterfs-config + hostPath: + path: "/var/lib/glusterd" + - name: glusterfs-dev + hostPath: + path: "/dev" + - name: glusterfs-misc + hostPath: + path: "/var/lib/misc/glusterfsd" + - name: glusterfs-cgroup + hostPath: + path: "/sys/fs/cgroup" + - name: glusterfs-ssl + hostPath: + path: "/etc/ssl" + restartPolicy: Always + terminationGracePeriodSeconds: 30 + dnsPolicy: ClusterFirst + securityContext: {} +parameters: +- name: NODE_LABELS + displayName: Daemonset Node Labels + description: Labels which define the daemonset node selector. Must contain at least one label of the format \'glusterfs=<CLUSTER_NAME>-host\' + value: '{ "glusterfs": "storage-host" }' +- name: IMAGE_NAME + displayName: GlusterFS container image name + required: True +- name: IMAGE_VERSION + displayName: GlusterFS container image version + required: True +- name: CLUSTER_NAME + displayName: GlusterFS cluster name + description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances + value: storage +- name: GB_GLFS_LRU_COUNT + displayName: Maximum number of block hosting volumes + description: This value is to set maximum number of block hosting volumes. + value: "15" + required: true +- name: TCMU_LOGDIR + displayName: Tcmu runner log directory + description: This value is to set tcmu runner log directory + value: "/var/log/glusterfs/gluster-block" + required: true diff --git a/roles/openshift_storage_glusterfs/files/v3.9/heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.9/heketi-template.yml new file mode 100644 index 000000000..28cdb2982 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v3.9/heketi-template.yml @@ -0,0 +1,136 @@ +--- +kind: Template +apiVersion: v1 +metadata: + name: heketi + labels: + glusterfs: heketi-template + annotations: + description: Heketi service deployment template + tags: glusterfs,heketi +objects: +- kind: Service + apiVersion: v1 + metadata: + name: heketi-${CLUSTER_NAME} + labels: + glusterfs: heketi-${CLUSTER_NAME}-service + heketi: ${CLUSTER_NAME}-service + annotations: + description: Exposes Heketi service + spec: + ports: + - name: heketi + port: 8080 + targetPort: 8080 + selector: + glusterfs: heketi-${CLUSTER_NAME}-pod +- kind: Route + apiVersion: v1 + metadata: + name: ${HEKETI_ROUTE} + labels: + glusterfs: heketi-${CLUSTER_NAME}-route + heketi: ${CLUSTER_NAME}-route + spec: + to: + kind: Service + name: heketi-${CLUSTER_NAME} +- kind: DeploymentConfig + apiVersion: v1 + metadata: + name: heketi-${CLUSTER_NAME} + labels: + glusterfs: heketi-${CLUSTER_NAME}-dc + heketi: ${CLUSTER_NAME}-dc + annotations: + description: Defines how to deploy Heketi + spec: + replicas: 1 + selector: + glusterfs: heketi-${CLUSTER_NAME}-pod + triggers: + - type: ConfigChange + strategy: + type: Recreate + template: + metadata: + name: heketi-${CLUSTER_NAME} + labels: + glusterfs: heketi-${CLUSTER_NAME}-pod + heketi: ${CLUSTER_NAME}-pod + spec: + serviceAccountName: heketi-${CLUSTER_NAME}-service-account + containers: + - name: heketi + image: ${IMAGE_NAME}:${IMAGE_VERSION} + imagePullPolicy: IfNotPresent + env: + - name: HEKETI_USER_KEY + value: ${HEKETI_USER_KEY} + - name: HEKETI_ADMIN_KEY + value: ${HEKETI_ADMIN_KEY} + - name: HEKETI_EXECUTOR + value: ${HEKETI_EXECUTOR} + - name: HEKETI_FSTAB + value: ${HEKETI_FSTAB} + - name: HEKETI_SNAPSHOT_LIMIT + value: '14' + - name: HEKETI_KUBE_GLUSTER_DAEMONSET + value: '1' + ports: + - containerPort: 8080 + volumeMounts: + - name: db + mountPath: /var/lib/heketi + - name: config + mountPath: /etc/heketi + readinessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 3 + httpGet: + path: /hello + port: 8080 + livenessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 30 + httpGet: + path: /hello + port: 8080 + volumes: + - name: db + glusterfs: + endpoints: heketi-db-${CLUSTER_NAME}-endpoints + path: heketidbstorage + - name: config + secret: + secretName: heketi-${CLUSTER_NAME}-config-secret +parameters: +- name: HEKETI_USER_KEY + displayName: Heketi User Secret + description: Set secret for those creating volumes as type _user_ +- name: HEKETI_ADMIN_KEY + displayName: Heketi Administrator Secret + description: Set secret for administration of the Heketi service as user _admin_ +- name: HEKETI_EXECUTOR + displayName: heketi executor type + description: Set the executor type, kubernetes or ssh + value: kubernetes +- name: HEKETI_FSTAB + displayName: heketi fstab path + description: Set the fstab path, file that is populated with bricks that heketi creates + value: /var/lib/heketi/fstab +- name: HEKETI_ROUTE + displayName: heketi route name + description: Set the hostname for the route URL + value: "heketi-glusterfs" +- name: IMAGE_NAME + displayName: heketi container image name + required: True +- name: IMAGE_VERSION + displayName: heketi container image version + required: True +- name: CLUSTER_NAME + displayName: GlusterFS cluster name + description: A unique name to identify this heketi service, useful for running multiple heketi instances + value: glusterfs diff --git a/roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py b/roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py deleted file mode 100644 index a86c96df7..000000000 --- a/roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py +++ /dev/null @@ -1,23 +0,0 @@ -''' - Openshift Storage GlusterFS class that provides useful filters used in GlusterFS -''' - - -def map_from_pairs(source, delim="="): - ''' Returns a dict given the source and delim delimited ''' - if source == '': - return dict() - - return dict(item.split(delim) for item in source.split(",")) - - -# pylint: disable=too-few-public-methods -class FilterModule(object): - ''' OpenShift Storage GlusterFS Filters ''' - - # pylint: disable=no-self-use, too-few-public-methods - def filters(self): - ''' Returns the names of the filters provided by this class ''' - return { - 'map_from_pairs': map_from_pairs - } diff --git a/roles/openshift_storage_glusterfs/meta/main.yml b/roles/openshift_storage_glusterfs/meta/main.yml index 0cdd33880..aa20245d5 100644 --- a/roles/openshift_storage_glusterfs/meta/main.yml +++ b/roles/openshift_storage_glusterfs/meta/main.yml @@ -10,6 +10,6 @@ galaxy_info: versions: - 7 dependencies: -- role: openshift_hosted_facts +- role: openshift_facts - role: lib_openshift -- role: lib_os_firewall +- role: lib_utils diff --git a/roles/openshift_storage_glusterfs/tasks/gluster_s3_deploy.yml b/roles/openshift_storage_glusterfs/tasks/gluster_s3_deploy.yml index 1664ecc1e..5b4c16740 100644 --- a/roles/openshift_storage_glusterfs/tasks/gluster_s3_deploy.yml +++ b/roles/openshift_storage_glusterfs/tasks/gluster_s3_deploy.yml @@ -63,7 +63,7 @@ until: - "gluster_s3_pvcs.results.results[0]['items'] | count > 0" # Pod's 'Bound' status must be True - - "gluster_s3_pvcs.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Bound'}) | map('bool') | select | list | count == 2" + - "gluster_s3_pvcs.results.results[0]['items'] | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Bound'}) | map('bool') | select | list | count == 2" delay: 10 retries: "{{ (glusterfs_timeout | int / 10) | int }}" @@ -108,6 +108,6 @@ until: - "gluster_s3_pod.results.results[0]['items'] | count > 0" # Pod's 'Ready' status must be True - - "gluster_s3_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1" + - "gluster_s3_pod.results.results[0]['items'] | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1" delay: 10 retries: "{{ (glusterfs_timeout | int / 10) | int }}" diff --git a/roles/openshift_storage_glusterfs/tasks/glusterblock_deploy.yml b/roles/openshift_storage_glusterfs/tasks/glusterblock_deploy.yml index d6be8c726..e5dcdcab7 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterblock_deploy.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterblock_deploy.yml @@ -61,6 +61,6 @@ until: - "glusterblock_pod.results.results[0]['items'] | count > 0" # Pod's 'Ready' status must be True - - "glusterblock_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1" + - "glusterblock_pod.results.results[0]['items'] | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1" delay: 10 retries: "{{ (glusterfs_timeout | int / 10) | int }}" diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml index 315bc5614..001578406 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml @@ -2,10 +2,10 @@ - name: Make sure heketi-client is installed package: name=heketi-client state=present when: - - not openshift.common.is_atomic | bool + - not openshift_is_atomic | bool - not glusterfs_heketi_is_native | bool register: result - until: result | success + until: result is succeeded - name: Verify heketi-cli is installed shell: "command -v {{ glusterfs_heketi_cli }} >/dev/null 2>&1 || { echo >&2 'ERROR: Make sure heketi-cli is available, then re-run the installer'; exit 1; }" @@ -126,7 +126,7 @@ - "glusterfs_heketi_is_native" - "deploy_heketi_pod.results.results[0]['items'] | count > 0" # deploy-heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True - - "deploy_heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0" + - "deploy_heketi_pod.results.results[0]['items'] | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0" - name: Check for existing heketi pod oc_obj: @@ -144,7 +144,7 @@ - "glusterfs_heketi_is_native" - "heketi_pod.results.results[0]['items'] | count > 0" # heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True - - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0" + - "heketi_pod.results.results[0]['items'] | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0" - name: Generate topology file template: @@ -177,14 +177,14 @@ - name: Generate heketi admin key set_fact: - glusterfs_heketi_admin_key: "{{ 32 | oo_generate_secret }}" + glusterfs_heketi_admin_key: "{{ 32 | lib_utils_oo_generate_secret }}" when: - glusterfs_heketi_is_native - glusterfs_heketi_admin_key is undefined - name: Generate heketi user key set_fact: - glusterfs_heketi_user_key: "{{ 32 | oo_generate_secret }}" + glusterfs_heketi_user_key: "{{ 32 | lib_utils_oo_generate_secret }}" until: "glusterfs_heketi_user_key != glusterfs_heketi_admin_key" delay: 1 retries: 10 @@ -228,7 +228,7 @@ until: - "deploy_heketi_pod.results.results[0]['items'] | count > 0" # Pod's 'Ready' status must be True - - "deploy_heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1" + - "deploy_heketi_pod.results.results[0]['items'] | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1" delay: 10 retries: "{{ (glusterfs_timeout | int / 10) | int }}" when: @@ -238,14 +238,14 @@ - name: Set heketi-cli command set_fact: - glusterfs_heketi_client: "{% if glusterfs_heketi_is_native %}{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {%if not glusterfs_heketi_is_missing %}{{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% else %}{{ deploy_heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% endif %} {% endif %}{{ glusterfs_heketi_cli }} -s http://{% if glusterfs_heketi_is_native %}localhost:8080{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %} --user admin {% if glusterfs_heketi_admin_key is defined %}--secret '{{ glusterfs_heketi_admin_key }}'{% endif %}" + glusterfs_heketi_client: "{% if glusterfs_heketi_is_native %}{{ openshift_client_binary }} rsh --namespace={{ glusterfs_namespace }} {%if not glusterfs_heketi_is_missing %}{{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% else %}{{ deploy_heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% endif %} {% endif %}{{ glusterfs_heketi_cli }} -s http://{% if glusterfs_heketi_is_native %}localhost:8080{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %} --user admin {% if glusterfs_heketi_admin_key is defined %}--secret '{{ glusterfs_heketi_admin_key }}'{% endif %}" - name: Verify heketi service command: "{{ glusterfs_heketi_client }} cluster list" changed_when: False - name: Place heketi topology on heketi Pod - shell: "{{ openshift.common.client_binary }} exec --namespace={{ glusterfs_namespace }} -i {%if not glusterfs_heketi_is_missing %}{{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% else %}{{ deploy_heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% endif %} -- bash -c 'mkdir -p {{ mktemp.stdout }} && cat > {{ mktemp.stdout }}/topology.json' < {{ mktemp.stdout }}/topology.json" + shell: "{{ openshift_client_binary }} exec --namespace={{ glusterfs_namespace }} -i {%if not glusterfs_heketi_is_missing %}{{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% else %}{{ deploy_heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% endif %} -- bash -c 'mkdir -p {{ mktemp.stdout }} && cat > {{ mktemp.stdout }}/topology.json' < {{ mktemp.stdout }}/topology.json" when: - glusterfs_heketi_is_native diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml index 73b9791eb..a374df0ce 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml @@ -4,9 +4,11 @@ glusterfs_namespace: "{{ openshift_storage_glusterfs_namespace }}" glusterfs_is_native: "{{ openshift_storage_glusterfs_is_native | bool }}" glusterfs_name: "{{ openshift_storage_glusterfs_name }}" + # map_from_pairs is a custom filter plugin in role lib_utils glusterfs_nodeselector: "{{ openshift_storage_glusterfs_nodeselector | default(['storagenode', openshift_storage_glusterfs_name] | join('=')) | map_from_pairs }}" glusterfs_use_default_selector: "{{ openshift_storage_glusterfs_use_default_selector }}" glusterfs_storageclass: "{{ openshift_storage_glusterfs_storageclass }}" + glusterfs_storageclass_default: "{{ openshift_storage_glusterfs_storageclass_default | bool }}" glusterfs_image: "{{ openshift_storage_glusterfs_image }}" glusterfs_version: "{{ openshift_storage_glusterfs_version }}" glusterfs_block_deploy: "{{ openshift_storage_glusterfs_block_deploy | bool }}" diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml index 30e83e79b..4cc82f1ad 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml @@ -21,9 +21,9 @@ name: "{{ hostvars[item].openshift.node.nodename }}" kind: node state: absent - labels: "{{ glusterfs_nodeselector | oo_dict_to_list_of_dict }}" + labels: "{{ glusterfs_nodeselector | lib_utils_oo_dict_to_list_of_dict }}" with_items: "{{ groups.all }}" - when: glusterfs_wipe + when: "'openshift' in hostvars[item] and glusterfs_wipe" - name: Delete pre-existing GlusterFS config file: @@ -60,7 +60,7 @@ name: "{{ hostvars[item].openshift.node.nodename }}" kind: node state: add - labels: "{{ glusterfs_nodeselector | oo_dict_to_list_of_dict }}" + labels: "{{ glusterfs_nodeselector | lib_utils_oo_dict_to_list_of_dict }}" with_items: "{{ glusterfs_nodes | default([]) }}" - name: Copy GlusterFS DaemonSet template @@ -109,6 +109,6 @@ until: - "glusterfs_pods.results.results[0]['items'] | count > 0" # There must be as many pods with 'Ready' staus True as there are nodes expecting those pods - - "glusterfs_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == glusterfs_nodes | count" + - "glusterfs_pods.results.results[0]['items'] | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == glusterfs_nodes | count" delay: 10 retries: "{{ (glusterfs_timeout | int / 10) | int }}" diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml index 7466702b8..544a6f491 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml @@ -4,9 +4,11 @@ glusterfs_namespace: "{{ openshift_storage_glusterfs_registry_namespace }}" glusterfs_is_native: "{{ openshift_storage_glusterfs_registry_is_native | bool }}" glusterfs_name: "{{ openshift_storage_glusterfs_registry_name }}" + # map_from_pairs is a custom filter plugin in role lib_utils glusterfs_nodeselector: "{{ openshift_storage_glusterfs_registry_nodeselector | default(['storagenode', openshift_storage_glusterfs_registry_name] | join('=')) | map_from_pairs }}" glusterfs_use_default_selector: "{{ openshift_storage_glusterfs_registry_use_default_selector }}" glusterfs_storageclass: "{{ openshift_storage_glusterfs_registry_storageclass }}" + glusterfs_storageclass_default: "{{ openshift_storage_glusterfs_registry_storageclass_default | bool }}" glusterfs_image: "{{ openshift_storage_glusterfs_registry_image }}" glusterfs_version: "{{ openshift_storage_glusterfs_registry_version }}" glusterfs_block_deploy: "{{ openshift_storage_glusterfs_registry_block_deploy | bool }}" diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml index d23bd42b9..c0a8c53de 100644 --- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml +++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml @@ -4,7 +4,7 @@ register: setup_storage - name: Copy heketi-storage list - shell: "{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ deploy_heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} cat /tmp/heketi-storage.json > {{ mktemp.stdout }}/heketi-storage.json" + shell: "{{ openshift_client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ deploy_heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} cat /tmp/heketi-storage.json > {{ mktemp.stdout }}/heketi-storage.json" # This is used in the subsequent task - name: Copy the admin client config @@ -15,7 +15,7 @@ # Need `command` here because heketi-storage.json contains multiple objects. - name: Copy heketi DB to GlusterFS volume - command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{ mktemp.stdout }}/heketi-storage.json -n {{ glusterfs_namespace }}" + command: "{{ openshift_client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{ mktemp.stdout }}/heketi-storage.json -n {{ glusterfs_namespace }}" when: setup_storage.rc == 0 - name: Wait for copy job to finish @@ -28,14 +28,14 @@ until: - "'results' in heketi_job.results and heketi_job.results.results | count > 0" # Pod's 'Complete' status must be True - - "heketi_job.results.results | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Complete'}) | map('bool') | select | list | count == 1" + - "heketi_job.results.results | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Complete'}) | map('bool') | select | list | count == 1" delay: 10 retries: "{{ (glusterfs_timeout | int / 10) | int }}" failed_when: - "'results' in heketi_job.results" - "heketi_job.results.results | count > 0" # Fail when pod's 'Failed' status is True - - "heketi_job.results.results | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Failed'}) | map('bool') | select | list | count == 1" + - "heketi_job.results.results | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Failed'}) | map('bool') | select | list | count == 1" when: setup_storage.rc == 0 - name: Delete deploy resources @@ -120,13 +120,13 @@ until: - "heketi_pod.results.results[0]['items'] | count > 0" # Pod's 'Ready' status must be True - - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1" + - "heketi_pod.results.results[0]['items'] | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1" delay: 10 retries: "{{ (glusterfs_timeout | int / 10) | int }}" - name: Set heketi-cli command set_fact: - glusterfs_heketi_client: "{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} {{ glusterfs_heketi_cli }} -s http://localhost:8080 --user admin --secret '{{ glusterfs_heketi_admin_key }}'" + glusterfs_heketi_client: "{{ openshift_client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} {{ glusterfs_heketi_cli }} -s http://localhost:8080 --user admin --secret '{{ glusterfs_heketi_admin_key }}'" - name: Verify heketi service command: "{{ glusterfs_heketi_client }} cluster list" diff --git a/roles/openshift_storage_glusterfs/tasks/kernel_modules.yml b/roles/openshift_storage_glusterfs/tasks/kernel_modules.yml index 030fa81c9..3bdfa183f 100644 --- a/roles/openshift_storage_glusterfs/tasks/kernel_modules.yml +++ b/roles/openshift_storage_glusterfs/tasks/kernel_modules.yml @@ -9,4 +9,4 @@ systemd: name: systemd-modules-load.service state: restarted - when: km | changed + when: km is changed diff --git a/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-storageclass.yml.j2 index 454e84aaf..5ea17428f 100644 --- a/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-storageclass.yml.j2 +++ b/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-storageclass.yml.j2 @@ -3,6 +3,10 @@ apiVersion: storage.k8s.io/v1beta1 kind: StorageClass metadata: name: glusterfs-{{ glusterfs_name }} +{% if glusterfs_storageclass_default is defined and glusterfs_storageclass_default %} + annotations: + storageclass.kubernetes.io/is-default-class: "true" +{% endif %} provisioner: kubernetes.io/glusterfs parameters: resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}" diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2 index 095fb780f..ca87807fe 100644 --- a/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2 +++ b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2 @@ -3,6 +3,10 @@ apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: glusterfs-{{ glusterfs_name }} +{% if glusterfs_storageclass_default is defined and glusterfs_storageclass_default %} + annotations: + storageclass.kubernetes.io/is-default-class: "true" +{% endif %} provisioner: kubernetes.io/glusterfs parameters: resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}" diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-storageclass.yml.j2 index 095fb780f..ca87807fe 100644 --- a/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-storageclass.yml.j2 +++ b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-storageclass.yml.j2 @@ -3,6 +3,10 @@ apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: glusterfs-{{ glusterfs_name }} +{% if glusterfs_storageclass_default is defined and glusterfs_storageclass_default %} + annotations: + storageclass.kubernetes.io/is-default-class: "true" +{% endif %} provisioner: kubernetes.io/glusterfs parameters: resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}" diff --git a/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-storageclass.yml.j2 index 095fb780f..ca87807fe 100644 --- a/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-storageclass.yml.j2 +++ b/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-storageclass.yml.j2 @@ -3,6 +3,10 @@ apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: glusterfs-{{ glusterfs_name }} +{% if glusterfs_storageclass_default is defined and glusterfs_storageclass_default %} + annotations: + storageclass.kubernetes.io/is-default-class: "true" +{% endif %} provisioner: kubernetes.io/glusterfs parameters: resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}" diff --git a/roles/openshift_storage_glusterfs/templates/v3.9/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.9/glusterfs-registry-endpoints.yml.j2 new file mode 100644 index 000000000..11c9195bb --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.9/glusterfs-registry-endpoints.yml.j2 @@ -0,0 +1,12 @@ +--- +apiVersion: v1 +kind: Endpoints +metadata: + name: glusterfs-{{ glusterfs_name }}-endpoints +subsets: +- addresses: +{% for node in glusterfs_nodes %} + - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }} +{% endfor %} + ports: + - port: 1 diff --git a/roles/openshift_storage_glusterfs/templates/v3.9/glusterfs-registry-service.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.9/glusterfs-registry-service.yml.j2 new file mode 100644 index 000000000..3f869d2b7 --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.9/glusterfs-registry-service.yml.j2 @@ -0,0 +1,10 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: glusterfs-{{ glusterfs_name }}-endpoints +spec: + ports: + - port: 1 +status: + loadBalancer: {} diff --git a/roles/openshift_storage_glusterfs/templates/v3.9/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.9/glusterfs-storageclass.yml.j2 new file mode 100644 index 000000000..ca87807fe --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.9/glusterfs-storageclass.yml.j2 @@ -0,0 +1,17 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: glusterfs-{{ glusterfs_name }} +{% if glusterfs_storageclass_default is defined and glusterfs_storageclass_default %} + annotations: + storageclass.kubernetes.io/is-default-class: "true" +{% endif %} +provisioner: kubernetes.io/glusterfs +parameters: + resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}" + restuser: "admin" +{% if glusterfs_heketi_admin_key is defined %} + secretNamespace: "{{ glusterfs_namespace }}" + secretName: "heketi-{{ glusterfs_name }}-admin-secret" +{%- endif -%} diff --git a/roles/openshift_storage_glusterfs/templates/v3.9/heketi-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.9/heketi-endpoints.yml.j2 new file mode 100644 index 000000000..99cbdf748 --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.9/heketi-endpoints.yml.j2 @@ -0,0 +1,12 @@ +--- +apiVersion: v1 +kind: Endpoints +metadata: + name: heketi-db-{{ glusterfs_name }}-endpoints +subsets: +- addresses: +{% for node in glusterfs_nodes %} + - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }} +{% endfor %} + ports: + - port: 1 diff --git a/roles/openshift_storage_glusterfs/templates/v3.9/heketi-service.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.9/heketi-service.yml.j2 new file mode 100644 index 000000000..dcb896441 --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.9/heketi-service.yml.j2 @@ -0,0 +1,10 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: heketi-db-{{ glusterfs_name }}-endpoints +spec: + ports: + - port: 1 +status: + loadBalancer: {} diff --git a/roles/openshift_storage_glusterfs/templates/v3.9/heketi.json.j2 b/roles/openshift_storage_glusterfs/templates/v3.9/heketi.json.j2 new file mode 100644 index 000000000..565e9be98 --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.9/heketi.json.j2 @@ -0,0 +1,42 @@ +{ + "_port_comment": "Heketi Server Port Number", + "port" : "8080", + + "_use_auth": "Enable JWT authorization. Please enable for deployment", + "use_auth" : false, + + "_jwt" : "Private keys for access", + "jwt" : { + "_admin" : "Admin has access to all APIs", + "admin" : { + "key" : "My Secret" + }, + "_user" : "User only has access to /volumes endpoint", + "user" : { + "key" : "My Secret" + } + }, + + "_glusterfs_comment": "GlusterFS Configuration", + "glusterfs" : { + + "_executor_comment": "Execute plugin. Possible choices: mock, kubernetes, ssh", + "executor" : "{{ glusterfs_heketi_executor }}", + + "_db_comment": "Database file name", + "db" : "/var/lib/heketi/heketi.db", + + "sshexec" : { + "keyfile" : "/etc/heketi/private_key", + "port" : "{{ glusterfs_heketi_ssh_port }}", + "user" : "{{ glusterfs_heketi_ssh_user }}", + "sudo" : {{ glusterfs_heketi_ssh_sudo | lower }} + }, + + "_auto_create_block_hosting_volume": "Creates Block Hosting volumes automatically if not found or exsisting volume exhausted", + "auto_create_block_hosting_volume": {{ glusterfs_block_host_vol_create | lower }}, + + "_block_hosting_volume_size": "New block hosting volume will be created in size mentioned, This is considered only if auto-create is enabled.", + "block_hosting_volume_size": {{ glusterfs_block_host_vol_size }} + } +} diff --git a/roles/openshift_storage_glusterfs/templates/v3.9/topology.json.j2 b/roles/openshift_storage_glusterfs/templates/v3.9/topology.json.j2 new file mode 100644 index 000000000..d6c28f6dd --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.9/topology.json.j2 @@ -0,0 +1,49 @@ +{ + "clusters": [ +{%- set clusters = {} -%} +{%- for node in glusterfs_nodes -%} + {%- set cluster = hostvars[node].glusterfs_cluster if 'glusterfs_cluster' in node else '1' -%} + {%- if cluster in clusters -%} + {%- set _dummy = clusters[cluster].append(node) -%} + {%- else -%} + {%- set _dummy = clusters.update({cluster: [ node, ]}) -%} + {%- endif -%} +{%- endfor -%} +{%- for cluster in clusters -%} + { + "nodes": [ +{%- for node in clusters[cluster] -%} + { + "node": { + "hostnames": { + "manage": [ +{%- if 'glusterfs_hostname' in hostvars[node] -%} + "{{ hostvars[node].glusterfs_hostname }}" +{%- elif 'openshift' in hostvars[node] -%} + "{{ hostvars[node].openshift.node.nodename }}" +{%- else -%} + "{{ node }}" +{%- endif -%} + ], + "storage": [ +{%- if 'glusterfs_ip' in hostvars[node] -%} + "{{ hostvars[node].glusterfs_ip }}" +{%- else -%} + "{{ hostvars[node].openshift.common.ip }}" +{%- endif -%} + ] + }, + "zone": {{ hostvars[node].glusterfs_zone | default(1) }} + }, + "devices": [ +{%- for device in hostvars[node].glusterfs_devices -%} + "{{ device }}"{% if not loop.last %},{% endif %} +{%- endfor -%} + ] + }{% if not loop.last %},{% endif %} +{%- endfor -%} + ] + }{% if not loop.last %},{% endif %} +{%- endfor -%} + ] +} diff --git a/roles/openshift_storage_nfs/meta/main.yml b/roles/openshift_storage_nfs/meta/main.yml index 98f7c317e..3ae04e59f 100644 --- a/roles/openshift_storage_nfs/meta/main.yml +++ b/roles/openshift_storage_nfs/meta/main.yml @@ -10,5 +10,5 @@ galaxy_info: versions: - 7 dependencies: -- role: lib_os_firewall -- role: openshift_hosted_facts +- role: lib_utils +- role: openshift_facts diff --git a/roles/openshift_storage_nfs/tasks/main.yml b/roles/openshift_storage_nfs/tasks/main.yml index 55e4024ec..5c043bc14 100644 --- a/roles/openshift_storage_nfs/tasks/main.yml +++ b/roles/openshift_storage_nfs/tasks/main.yml @@ -5,7 +5,7 @@ - name: Install nfs-utils package: name=nfs-utils state=present register: result - until: result | success + until: result is succeeded - name: Configure NFS lineinfile: @@ -16,7 +16,7 @@ - name: Restart nfs-config systemd: name=nfs-config state=restarted - when: nfs_config | changed + when: nfs_config is changed - name: Ensure exports directory exists file: @@ -70,4 +70,4 @@ register: start_result - set_fact: - nfs_service_status_changed: "{{ start_result | changed }}" + nfs_service_status_changed: "{{ start_result is changed }}" diff --git a/roles/openshift_storage_nfs/templates/exports.j2 b/roles/openshift_storage_nfs/templates/exports.j2 index 2ec8db019..13bd5370c 100644 --- a/roles/openshift_storage_nfs/templates/exports.j2 +++ b/roles/openshift_storage_nfs/templates/exports.j2 @@ -1,8 +1,8 @@ -{{ openshift_hosted_registry_storage_nfs_directory }}/{{ openshift_hosted_registry_storage_volume_name }} {{ openshift_hosted_registry_storage_nfs_options }} -{{ openshift_metrics_storage_nfs_directory }}/{{ openshift_metrics_storage_volume_name }} {{ openshift_metrics_storage_nfs_options }} -{{ openshift_logging_storage_nfs_directory }}/{{ openshift_logging_storage_volume_name }} {{ openshift_logging_storage_nfs_options }} -{{ openshift_loggingops_storage_nfs_directory }}/{{ openshift_loggingops_storage_volume_name }} {{ openshift_loggingops_storage_nfs_options }} -{{ openshift_hosted_etcd_storage_nfs_directory }}/{{ openshift_hosted_etcd_storage_volume_name }} {{ openshift_hosted_etcd_storage_nfs_options }} -{{ openshift_prometheus_storage_nfs_directory }}/{{ openshift_prometheus_storage_volume_name }} {{ openshift_prometheus_storage_nfs_options }} -{{ openshift_prometheus_alertmanager_storage_nfs_directory }}/{{ openshift_prometheus_alertmanager_storage_volume_name }} {{ openshift_prometheus_alertmanager_storage_nfs_options }} -{{ openshift_prometheus_alertbuffer_storage_nfs_directory }}/{{ openshift_prometheus_alertbuffer_storage_volume_name }} {{ openshift_prometheus_alertbuffer_storage_nfs_options }} +"{{ openshift_hosted_registry_storage_nfs_directory }}/{{ openshift_hosted_registry_storage_volume_name }}" {{ openshift_hosted_registry_storage_nfs_options }} +"{{ openshift_metrics_storage_nfs_directory }}/{{ openshift_metrics_storage_volume_name }}" {{ openshift_metrics_storage_nfs_options }} +"{{ openshift_logging_storage_nfs_directory }}/{{ openshift_logging_storage_volume_name }}" {{ openshift_logging_storage_nfs_options }} +"{{ openshift_loggingops_storage_nfs_directory }}/{{ openshift_loggingops_storage_volume_name }}" {{ openshift_loggingops_storage_nfs_options }} +"{{ openshift_hosted_etcd_storage_nfs_directory }}/{{ openshift_hosted_etcd_storage_volume_name }}" {{ openshift_hosted_etcd_storage_nfs_options }} +"{{ openshift_prometheus_storage_nfs_directory }}/{{ openshift_prometheus_storage_volume_name }}" {{ openshift_prometheus_storage_nfs_options }} +"{{ openshift_prometheus_alertmanager_storage_nfs_directory }}/{{ openshift_prometheus_alertmanager_storage_volume_name }}" {{ openshift_prometheus_alertmanager_storage_nfs_options }} +"{{ openshift_prometheus_alertbuffer_storage_nfs_directory }}/{{ openshift_prometheus_alertbuffer_storage_volume_name }}" {{ openshift_prometheus_alertbuffer_storage_nfs_options }} diff --git a/roles/openshift_storage_nfs_lvm/README.md b/roles/openshift_storage_nfs_lvm/README.md index cc674d3fd..a11219f6d 100644 --- a/roles/openshift_storage_nfs_lvm/README.md +++ b/roles/openshift_storage_nfs_lvm/README.md @@ -1,7 +1,7 @@ # openshift_storage_nfs_lvm This role is useful to create and export nfs disks for openshift persistent volumes. -It does so by creating lvm partitions on an already setup pv/vg, creating xfs +It does so by creating lvm partitions on an already setup pv/vg, creating xfs filesystem on each partition, mounting the partitions, exporting the mounts via NFS and creating a json file for each mount that an openshift master can use to create persistent volumes. @@ -20,7 +20,7 @@ create persistent volumes. osnl_nfs_export_options: "*(rw,sync,all_squash)" # Directory, where the created partitions should be mounted. They will be -# mounted as <osnl_mount_dir>/<lvm volume name> +# mounted as <osnl_mount_dir>/<lvm volume name> osnl_mount_dir: /exports/openshift # Volume Group to use. @@ -64,11 +64,10 @@ None ## Example Playbook With this playbook, 2 5Gig lvm partitions are created, named stg5g0003 and stg5g0004 -Both of them are mounted into `/exports/openshift` directory. Both directories are +Both of them are mounted into `/exports/openshift` directory. Both directories are exported via NFS. json files are created in /root. - hosts: nfsservers - become: no remote_user: root gather_facts: no roles: @@ -94,7 +93,6 @@ exported via NFS. json files are created in /root. * Create an ansible playbook, say `setupnfs.yaml`: ``` - hosts: nfsservers - become: no remote_user: root gather_facts: no roles: diff --git a/roles/openshift_storage_nfs_lvm/meta/main.yml b/roles/openshift_storage_nfs_lvm/meta/main.yml index 50d94f6a3..de47708a5 100644 --- a/roles/openshift_storage_nfs_lvm/meta/main.yml +++ b/roles/openshift_storage_nfs_lvm/meta/main.yml @@ -16,3 +16,4 @@ galaxy_info: - openshift dependencies: - role: openshift_facts +- role: lib_utils diff --git a/roles/openshift_storage_nfs_lvm/tasks/main.yml b/roles/openshift_storage_nfs_lvm/tasks/main.yml index c8e7b6d7c..ff92e59e5 100644 --- a/roles/openshift_storage_nfs_lvm/tasks/main.yml +++ b/roles/openshift_storage_nfs_lvm/tasks/main.yml @@ -2,7 +2,7 @@ # TODO -- this may actually work on atomic hosts - fail: msg: "openshift_storage_nfs_lvm is not compatible with atomic host" - when: openshift.common.is_atomic | bool + when: openshift_is_atomic | bool - name: Create lvm volumes lvol: vg={{osnl_volume_group}} lv={{ item }} size={{osnl_volume_size}}G diff --git a/roles/openshift_storage_nfs_lvm/tasks/nfs.yml b/roles/openshift_storage_nfs_lvm/tasks/nfs.yml index bee786a90..9a72adbdc 100644 --- a/roles/openshift_storage_nfs_lvm/tasks/nfs.yml +++ b/roles/openshift_storage_nfs_lvm/tasks/nfs.yml @@ -1,9 +1,9 @@ --- - name: Install NFS server package: name=nfs-utils state=present - when: not openshift.common.is_containerized | bool + when: not openshift_is_containerized | bool register: result - until: result | success + until: result is succeeded - name: Start rpcbind systemd: diff --git a/roles/openshift_version/defaults/main.yml b/roles/openshift_version/defaults/main.yml index 354699637..513dff045 100644 --- a/roles/openshift_version/defaults/main.yml +++ b/roles/openshift_version/defaults/main.yml @@ -8,3 +8,6 @@ openshift_service_type_dict: openshift_service_type: "{{ openshift_service_type_dict[openshift_deployment_type] }}" openshift_use_crio_only: False + +l_first_master_version_task_file: "{{ openshift_is_containerized | ternary('first_master_containerized_version.yml', 'first_master_rpm_version.yml') }}" +l_force_image_tag_to_version: False diff --git a/roles/openshift_version/tasks/check_available_rpms.yml b/roles/openshift_version/tasks/check_available_rpms.yml new file mode 100644 index 000000000..fea0daf77 --- /dev/null +++ b/roles/openshift_version/tasks/check_available_rpms.yml @@ -0,0 +1,10 @@ +--- +- name: Get available {{ openshift_service_type}} version + repoquery: + name: "{{ openshift_service_type}}{{ '-' ~ openshift_release ~ '*' if openshift_release is defined else '' }}" + ignore_excluders: true + register: rpm_results + +- fail: + msg: "Package {{ openshift_service_type}} not found" + when: not rpm_results.results.package_found diff --git a/roles/openshift_version/tasks/first_master.yml b/roles/openshift_version/tasks/first_master.yml new file mode 100644 index 000000000..e01a56dc1 --- /dev/null +++ b/roles/openshift_version/tasks/first_master.yml @@ -0,0 +1,32 @@ +--- +# Determine the openshift_version to configure if none has been specified or set previously. + +# Protect the installed version by default unless explicitly told not to, or given an +# openshift_version already. +- name: Use openshift.common.version fact as version to configure if already installed + set_fact: + openshift_version: "{{ openshift.common.version }}" + when: + - openshift.common.version is defined + - openshift_version is not defined or openshift_version == "" + - openshift_protect_installed_version | bool + +- include_tasks: "{{ l_first_master_version_task_file }}" + +- block: + - debug: + msg: "openshift_pkg_version was not defined. Falling back to -{{ openshift_version }}" + - set_fact: + openshift_pkg_version: -{{ openshift_version }} + when: + - openshift_pkg_version is not defined + - openshift_upgrade_target is not defined + +- block: + - debug: + msg: "openshift_image_tag set to v{{ openshift_version }}" + - set_fact: + openshift_image_tag: v{{ openshift_version }} + when: > + openshift_image_tag is not defined + or l_force_image_tag_to_version | bool diff --git a/roles/openshift_version/tasks/set_version_containerized.yml b/roles/openshift_version/tasks/first_master_containerized_version.yml index 71f957b78..3ed1d2cfe 100644 --- a/roles/openshift_version/tasks/set_version_containerized.yml +++ b/roles/openshift_version/tasks/first_master_containerized_version.yml @@ -7,6 +7,7 @@ when: - openshift_image_tag is defined - openshift_version is not defined + - not (openshift_version_reinit | default(false)) - name: Set containerized version to configure if openshift_release specified set_fact: @@ -20,7 +21,7 @@ docker run --rm {{ openshift_cli_image }}:latest version register: cli_image_version when: - - openshift_version is not defined + - openshift_version is not defined or openshift_version_reinit | default(false) - not openshift_use_crio_only # Origin latest = pre-release version (i.e. v1.3.0-alpha.1-321-gb095e3a) @@ -34,7 +35,7 @@ - set_fact: openshift_version: "{{ cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0][1:] }}" - when: openshift_version is not defined + when: openshift_version is not defined or openshift_version_reinit | default(false) # If we got an openshift_version like "3.2", lookup the latest 3.2 container version # and use that value instead. @@ -62,4 +63,4 @@ # dangly +c0mm1t-offset tags in the version. See also, # openshift_facts.py - set_fact: - openshift_version: "{{ openshift_version | oo_chomp_commit_offset }}" + openshift_version: "{{ openshift_version | lib_utils_oo_chomp_commit_offset }}" diff --git a/roles/openshift_version/tasks/first_master_rpm_version.yml b/roles/openshift_version/tasks/first_master_rpm_version.yml new file mode 100644 index 000000000..5d92f90c6 --- /dev/null +++ b/roles/openshift_version/tasks/first_master_rpm_version.yml @@ -0,0 +1,20 @@ +--- +- name: Set rpm version to configure if openshift_pkg_version specified + set_fact: + # Expects a leading "-" in inventory, strip it off here, and remove trailing release, + openshift_version: "{{ openshift_pkg_version[1:].split('-')[0] }}" + when: + - openshift_pkg_version is defined + - openshift_version is not defined + - not (openshift_version_reinit | default(false)) + +# These tasks should only be run against masters and nodes +- name: Set openshift_version for rpm installation + include_tasks: check_available_rpms.yml + +- set_fact: + openshift_version: "{{ rpm_results.results.versions.available_versions.0 }}" + when: openshift_version is not defined or ( openshift_version_reinit | default(false) ) +- set_fact: + openshift_pkg_version: "-{{ rpm_results.results.versions.available_versions.0 }}" + when: openshift_version_reinit | default(false) diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml index ae0f68a5b..b42794858 100644 --- a/roles/openshift_version/tasks/main.yml +++ b/roles/openshift_version/tasks/main.yml @@ -1,210 +1,2 @@ --- -# Determine the openshift_version to configure if none has been specified or set previously. - -- set_fact: - is_containerized: "{{ openshift.common.is_containerized | default(False) | bool }}" - is_atomic: "{{ openshift.common.is_atomic | default(False) | bool }}" - -# Block attempts to install origin without specifying some kind of version information. -# This is because the latest tags for origin are usually alpha builds, which should not -# be used by default. Users must indicate what they want. -- name: Abort when we cannot safely guess what Origin image version the user wanted - fail: - msg: |- - To install a containerized Origin release, you must set openshift_release or - openshift_image_tag in your inventory to specify which version of the OpenShift - component images to use. You may want the latest (usually alpha) releases or - a more stable release. (Suggestion: add openshift_release="x.y" to inventory.) - when: - - is_containerized | bool - - openshift.common.deployment_type == 'origin' - - openshift_release is not defined - - openshift_image_tag is not defined - -# Normalize some values that we need in a certain format that might be confusing: -- set_fact: - openshift_release: "{{ openshift_release[1:] }}" - when: - - openshift_release is defined - - openshift_release[0] == 'v' - -- set_fact: - openshift_release: "{{ openshift_release | string }}" - when: - - openshift_release is defined - -# Verify that the image tag is in a valid format -- when: - - openshift_image_tag is defined - - openshift_image_tag != "latest" - block: - - # Verifies that when the deployment type is origin the version: - # - starts with a v - # - Has 3 integers seperated by dots - # It also allows for optional trailing data which: - # - must start with a dash - # - may contain numbers, letters, dashes and dots. - - name: (Origin) Verify openshift_image_tag is valid - when: openshift.common.deployment_type == 'origin' - assert: - that: - - "{{ openshift_image_tag|match('(^v?\\d+\\.\\d+\\.\\d+(-[\\w\\-\\.]*)?$)') }}" - msg: |- - openshift_image_tag must be in the format v#.#.#[-optional.#]. Examples: v1.2.3, v3.5.1-alpha.1 - You specified openshift_image_tag={{ openshift_image_tag }} - - # Verifies that when the deployment type is openshift-enterprise the version: - # - starts with a v - # - Has at least 2 integers seperated by dots - # It also allows for optional trailing data which: - # - must start with a dash - # - may contain numbers - # - may containe dots (https://github.com/openshift/openshift-ansible/issues/5192) - # - - name: (Enterprise) Verify openshift_image_tag is valid - when: openshift.common.deployment_type == 'openshift-enterprise' - assert: - that: - - "{{ openshift_image_tag|match('(^v\\d+\\.\\d+(\\.\\d+)*(-\\d+(\\.\\d+)*)?$)') }}" - msg: |- - openshift_image_tag must be in the format v#.#[.#[.#]]. Examples: v1.2, v3.4.1, v3.5.1.3, - v3.5.1.3.4, v1.2-1, v1.2.3-4, v1.2.3-4.5, v1.2.3-4.5.6 - You specified openshift_image_tag={{ openshift_image_tag }} - -# Make sure we copy this to a fact if given a var: -- set_fact: - openshift_version: "{{ openshift_version | string }}" - when: openshift_version is defined - -# Protect the installed version by default unless explicitly told not to, or given an -# openshift_version already. -- name: Use openshift.common.version fact as version to configure if already installed - set_fact: - openshift_version: "{{ openshift.common.version }}" - when: - - openshift.common.version is defined - - openshift_version is not defined or openshift_version == "" - - openshift_protect_installed_version | bool - -# The rest of these tasks should only execute on -# masters and nodes as we can verify they have subscriptions -- when: - - inventory_hostname in groups['oo_masters_to_config'] or inventory_hostname in groups['oo_nodes_to_config'] - block: - - name: Set openshift_version for rpm installation - include_tasks: set_version_rpm.yml - when: not is_containerized | bool - - - name: Set openshift_version for containerized installation - include_tasks: set_version_containerized.yml - when: is_containerized | bool - - - block: - - name: Get available {{ openshift_service_type}} version - repoquery: - name: "{{ openshift_service_type}}" - ignore_excluders: true - register: rpm_results - - fail: - msg: "Package {{ openshift_service_type}} not found" - when: not rpm_results.results.package_found - - set_fact: - openshift_rpm_version: "{{ rpm_results.results.versions.available_versions.0 | default('0.0', True) }}" - - name: Fail if rpm version and docker image version are different - fail: - msg: "OCP rpm version {{ openshift_rpm_version }} is different from OCP image version {{ openshift_version }}" - # Both versions have the same string representation - when: - - openshift_rpm_version != openshift_version - # if openshift_pkg_version or openshift_image_tag is defined, user gives a permission the rpm and docker image versions can differ - - openshift_pkg_version is not defined - - openshift_image_tag is not defined - when: - - is_containerized | bool - - not is_atomic | bool - - # Warn if the user has provided an openshift_image_tag but is not doing a containerized install - # NOTE: This will need to be modified/removed for future container + rpm installations work. - - name: Warn if openshift_image_tag is defined when not doing a containerized install - debug: - msg: > - openshift_image_tag is used for containerized installs. If you are trying to - specify an image for a non-container install see oreg_url or oreg_url_master or oreg_url_node. - when: - - not is_containerized | bool - - openshift_image_tag is defined - - # At this point we know openshift_version is set appropriately. Now we set - # openshift_image_tag and openshift_pkg_version, so all roles can always assume - # each of this variables *will* be set correctly and can use them per their - # intended purpose. - - - block: - - debug: - msg: "openshift_image_tag was not defined. Falling back to v{{ openshift_version }}" - - - set_fact: - openshift_image_tag: v{{ openshift_version }} - - when: openshift_image_tag is not defined - - - block: - - debug: - msg: "openshift_pkg_version was not defined. Falling back to -{{ openshift_version }}" - - - set_fact: - openshift_pkg_version: -{{ openshift_version }} - - when: - - openshift_pkg_version is not defined - - openshift_upgrade_target is not defined - - - fail: - msg: openshift_version role was unable to set openshift_version - name: Abort if openshift_version was not set - when: openshift_version is not defined - - - fail: - msg: openshift_version role was unable to set openshift_image_tag - name: Abort if openshift_image_tag was not set - when: openshift_image_tag is not defined - - - fail: - msg: openshift_version role was unable to set openshift_pkg_version - name: Abort if openshift_pkg_version was not set - when: - - openshift_pkg_version is not defined - - openshift_upgrade_target is not defined - - - - fail: - msg: "No OpenShift version available; please ensure your systems are fully registered and have access to appropriate yum repositories." - name: Abort if openshift_pkg_version was not set - when: - - not is_containerized | bool - - openshift_version == '0.0' - - # We can't map an openshift_release to full rpm version like we can with containers; make sure - # the rpm version we looked up matches the release requested and error out if not. - - name: For an RPM install, abort when the release requested does not match the available version. - when: - - not is_containerized | bool - - openshift_release is defined - assert: - that: - - openshift_version.startswith(openshift_release) | bool - msg: |- - You requested openshift_release {{ openshift_release }}, which is not matched by - the latest OpenShift RPM we detected as {{ openshift_service_type }}-{{ openshift_version }} - on host {{ inventory_hostname }}. - We will only install the latest RPMs, so please ensure you are getting the release - you expect. You may need to adjust your Ansible inventory, modify the repositories - available on the host, or run the appropriate OpenShift upgrade playbook. - - # The end result of these three variables is quite important so make sure they are displayed and logged: - - debug: var=openshift_release - - - debug: var=openshift_image_tag - - - debug: var=openshift_pkg_version +# This role is meant to be used with include_role. diff --git a/roles/openshift_version/tasks/masters_and_nodes.yml b/roles/openshift_version/tasks/masters_and_nodes.yml new file mode 100644 index 000000000..eddd5ff42 --- /dev/null +++ b/roles/openshift_version/tasks/masters_and_nodes.yml @@ -0,0 +1,42 @@ +--- +# These tasks should only be run against masters and nodes + +- block: + - name: Check openshift_version for rpm installation + include_tasks: check_available_rpms.yml + - name: Fail if rpm version and docker image version are different + fail: + msg: "OCP rpm version {{ rpm_results.results.versions.available_versions.0 }} is different from OCP image version {{ openshift_version }}" + # Both versions have the same string representation + when: + - openshift_version not in rpm_results.results.versions.available_versions.0 + - openshift_version_reinit | default(false) + + # block when + when: not openshift_is_atomic | bool + +# We can't map an openshift_release to full rpm version like we can with containers; make sure +# the rpm version we looked up matches the release requested and error out if not. +- name: For an RPM install, abort when the release requested does not match the available version. + when: + - not openshift_is_containerized | bool + - openshift_release is defined + assert: + that: + - l_rpm_version.startswith(openshift_release) | bool + msg: |- + You requested openshift_release {{ openshift_release }}, which is not matched by + the latest OpenShift RPM we detected as {{ openshift_service_type }}-{{ l_rpm_version }} + on host {{ inventory_hostname }}. + We will only install the latest RPMs, so please ensure you are getting the release + you expect. You may need to adjust your Ansible inventory, modify the repositories + available on the host, or run the appropriate OpenShift upgrade playbook. + vars: + l_rpm_version: "{{ rpm_results.results.versions.available_versions.0 }}" + +# The end result of these three variables is quite important so make sure they are displayed and logged: +- debug: var=openshift_release + +- debug: var=openshift_image_tag + +- debug: var=openshift_pkg_version diff --git a/roles/openshift_version/tasks/set_version_rpm.yml b/roles/openshift_version/tasks/set_version_rpm.yml deleted file mode 100644 index c7ca5ceae..000000000 --- a/roles/openshift_version/tasks/set_version_rpm.yml +++ /dev/null @@ -1,24 +0,0 @@ ---- -- name: Set rpm version to configure if openshift_pkg_version specified - set_fact: - # Expects a leading "-" in inventory, strip it off here, and remove trailing release, - openshift_version: "{{ openshift_pkg_version[1:].split('-')[0] }}" - when: - - openshift_pkg_version is defined - - openshift_version is not defined - -- block: - - name: Get available {{ openshift_service_type}} version - repoquery: - name: "{{ openshift_service_type}}" - ignore_excluders: true - register: rpm_results - - - fail: - msg: "Package {{ openshift_service_type}} not found" - when: not rpm_results.results.package_found - - - set_fact: - openshift_version: "{{ rpm_results.results.versions.available_versions.0 | default('0.0', True) }}" - when: - - openshift_version is not defined diff --git a/roles/openshift_web_console/defaults/main.yml b/roles/openshift_web_console/defaults/main.yml new file mode 100644 index 000000000..627db393a --- /dev/null +++ b/roles/openshift_web_console/defaults/main.yml @@ -0,0 +1,2 @@ +--- +openshift_web_console_nodeselector: {"node-role.kubernetes.io/master":"true"} diff --git a/roles/openshift_web_console/files/console-config.yaml b/roles/openshift_web_console/files/console-config.yaml new file mode 100644 index 000000000..55c650fbe --- /dev/null +++ b/roles/openshift_web_console/files/console-config.yaml @@ -0,0 +1,24 @@ +apiVersion: webconsole.config.openshift.io/v1 +kind: WebConsoleConfiguration +clusterInfo: + consolePublicURL: https://127.0.0.1:8443/console/ + loggingPublicURL: "" + logoutPublicURL: "" + masterPublicURL: https://127.0.0.1:8443 + metricsPublicURL: "" +extensions: + scriptURLs: [] + stylesheetURLs: [] + properties: null +features: + inactivityTimeoutMinutes: 0 + clusterResourceOverridesEnabled: false +servingInfo: + bindAddress: 0.0.0.0:8443 + bindNetwork: tcp4 + certFile: /var/serving-cert/tls.crt + clientCA: "" + keyFile: /var/serving-cert/tls.key + maxRequestsInFlight: 0 + namedCertificates: null + requestTimeoutSeconds: 0 diff --git a/roles/openshift_web_console/files/console-rbac-template.yaml b/roles/openshift_web_console/files/console-rbac-template.yaml new file mode 100644 index 000000000..9ee117199 --- /dev/null +++ b/roles/openshift_web_console/files/console-rbac-template.yaml @@ -0,0 +1,38 @@ +apiVersion: template.openshift.io/v1 +kind: Template +metadata: + name: web-console-server-rbac +parameters: +- name: NAMESPACE + # This namespace cannot be changed. Only `openshift-web-console` is supported. + value: openshift-web-console +objects: + + +# allow grant powers to the webconsole server for cluster inspection +- apiVersion: rbac.authorization.k8s.io/v1beta1 + kind: ClusterRole + metadata: + name: system:openshift:web-console-server + rules: + - apiGroups: + - "servicecatalog.k8s.io" + resources: + - clusterservicebrokers + verbs: + - get + - list + - watch + +# Grant the service account for the web console +- apiVersion: rbac.authorization.k8s.io/v1beta1 + kind: ClusterRoleBinding + metadata: + name: system:openshift:web-console-server + roleRef: + kind: ClusterRole + name: system:openshift:web-console-server + subjects: + - kind: ServiceAccount + namespace: ${NAMESPACE} + name: webconsole diff --git a/roles/openshift_web_console/files/console-template.yaml b/roles/openshift_web_console/files/console-template.yaml new file mode 100644 index 000000000..547e7a265 --- /dev/null +++ b/roles/openshift_web_console/files/console-template.yaml @@ -0,0 +1,127 @@ +apiVersion: template.openshift.io/v1 +kind: Template +metadata: + name: openshift-web-console + annotations: + openshift.io/display-name: OpenShift Web Console + description: The server for the OpenShift web console. + iconClass: icon-openshift + tags: openshift,infra + openshift.io/documentation-url: https://github.com/openshift/origin-web-console-server + openshift.io/support-url: https://access.redhat.com + openshift.io/provider-display-name: Red Hat, Inc. +parameters: +- name: IMAGE + value: openshift/origin-web-console:latest +- name: NAMESPACE + # This namespace cannot be changed. Only `openshift-web-console` is supported. + value: openshift-web-console +- name: LOGLEVEL + value: "0" +- name: API_SERVER_CONFIG +- name: NODE_SELECTOR + value: "{}" +- name: REPLICA_COUNT + value: "1" +objects: + +# to create the web console server +- apiVersion: apps/v1beta1 + kind: Deployment + metadata: + namespace: ${NAMESPACE} + name: webconsole + labels: + app: openshift-web-console + webconsole: "true" + spec: + replicas: "${{REPLICA_COUNT}}" + strategy: + type: Recreate + template: + metadata: + name: webconsole + labels: + webconsole: "true" + spec: + serviceAccountName: webconsole + containers: + - name: webconsole + image: ${IMAGE} + imagePullPolicy: IfNotPresent + command: + - "/usr/bin/origin-web-console" + - "--audit-log-path=-" + - "-v=${LOGLEVEL}" + - "--config=/var/webconsole-config/webconsole-config.yaml" + ports: + - containerPort: 8443 + volumeMounts: + - mountPath: /var/serving-cert + name: serving-cert + - mountPath: /var/webconsole-config + name: webconsole-config + readinessProbe: + httpGet: + path: /healthz + port: 8443 + scheme: HTTPS + livenessProbe: + httpGet: + path: / + port: 8443 + scheme: HTTPS + resources: + requests: + cpu: 100m + memory: 100Mi + nodeSelector: "${{NODE_SELECTOR}}" + volumes: + - name: serving-cert + secret: + defaultMode: 400 + secretName: webconsole-serving-cert + - name: webconsole-config + configMap: + defaultMode: 440 + name: webconsole-config + +# to create the config for the web console +- apiVersion: v1 + kind: ConfigMap + metadata: + namespace: ${NAMESPACE} + name: webconsole-config + labels: + app: openshift-web-console + data: + webconsole-config.yaml: ${API_SERVER_CONFIG} + +# to be able to assign powers to the process +- apiVersion: v1 + kind: ServiceAccount + metadata: + namespace: ${NAMESPACE} + name: webconsole + labels: + app: openshift-web-console + +# to be able to expose web console inside the cluster +- apiVersion: v1 + kind: Service + metadata: + namespace: ${NAMESPACE} + name: webconsole + labels: + app: openshift-web-console + annotations: + service.alpha.openshift.io/serving-cert-secret-name: webconsole-serving-cert + prometheus.io/scrape: "true" + prometheus.io/scheme: https + spec: + selector: + webconsole: "true" + ports: + - name: https + port: 443 + targetPort: 8443 diff --git a/roles/openshift_web_console/meta/main.yaml b/roles/openshift_web_console/meta/main.yaml new file mode 100644 index 000000000..033c1e3a3 --- /dev/null +++ b/roles/openshift_web_console/meta/main.yaml @@ -0,0 +1,19 @@ +--- +galaxy_info: + author: OpenShift Development <dev@lists.openshift.redhat.com> + description: Deploy OpenShift web console + company: Red Hat, Inc. + license: Apache License, Version 2.0 + min_ansible_version: 2.4 + platforms: + - name: EL + versions: + - 7 + - name: Fedora + versions: + - all + categories: + - openshift +dependencies: +- role: lib_openshift +- role: openshift_facts diff --git a/roles/openshift_web_console/tasks/install.yml b/roles/openshift_web_console/tasks/install.yml new file mode 100644 index 000000000..f79a05c94 --- /dev/null +++ b/roles/openshift_web_console/tasks/install.yml @@ -0,0 +1,164 @@ +--- +# Fact setting +- name: Set default image variables based on deployment type + include_vars: "{{ item }}" + with_first_found: + - "{{ openshift_deployment_type | default(deployment_type) }}.yml" + - "default_images.yml" + +- name: Set openshift_web_console facts + set_fact: + openshift_web_console_prefix: "{{ openshift_web_console_prefix | default(__openshift_web_console_prefix) }}" + openshift_web_console_version: "{{ openshift_web_console_version | default(__openshift_web_console_version) }}" + openshift_web_console_image_name: "{{ openshift_web_console_image_name | default(__openshift_web_console_image_name) }}" + # Default the replica count to the number of masters. + openshift_web_console_replica_count: "{{ openshift_web_console_replica_count | default(groups.oo_masters_to_config | length) }}" + +- name: Ensure openshift-web-console project exists + oc_project: + name: openshift-web-console + state: present + node_selector: + - "" + +- name: Make temp directory for web console templates + command: mktemp -d /tmp/console-ansible-XXXXXX + register: mktemp + changed_when: False + +- name: Copy admin client config + command: > + cp {{ openshift.common.config_base }}/master//admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig + changed_when: false + +- name: Copy web console templates to temp directory + copy: + src: "{{ item }}" + dest: "{{ mktemp.stdout }}/{{ item }}" + with_items: + - "{{ __console_template_file }}" + - "{{ __console_rbac_file }}" + - "{{ __console_config_file }}" + +# Check if an existing webconsole-config config map exists. If so, use those +# contents so we don't overwrite changes. +- name: Read the existing web console config map + oc_configmap: + namespace: openshift-web-console + name: webconsole-config + state: list + register: webconsole_config_map + +- set_fact: + existing_config_map_data: "{{ webconsole_config_map.results.results[0].data | default({}) }}" + +- name: Copy the existing web console config to temp directory + copy: + content: "{{ existing_config_map_data['webconsole-config.yaml'] }}" + dest: "{{ mktemp.stdout }}/{{ __console_config_file }}" + when: existing_config_map_data['webconsole-config.yaml'] is defined + +# Generate a new config when a config map is not defined. +- when: existing_config_map_data['webconsole-config.yaml'] is not defined + block: + # Migrate the previous master-config.yaml asset config if it exists into the new + # web console config config map. + - name: Read existing assetConfig in master-config.yaml + slurp: + src: "{{ openshift.common.config_base }}/master/master-config.yaml" + register: master_config_output + + - set_fact: + config_to_migrate: "{{ master_config_output.content | b64decode | from_yaml }}" + + - set_fact: + cro_plugin_enabled: "{{ config_to_migrate.admissionConfig is defined and config_to_migrate.admissionConfig.pluginConfig is defined and config_to_migrate.admissionConfig.pluginConfig.ClusterResourceOverrides is defined }}" + + # Update properties in the config template based on inventory vars when the + # asset config does not exist. + - name: Set web console config properties from inventory variables + yedit: + src: "{{ mktemp.stdout }}/{{ __console_config_file }}" + edits: + - key: clusterInfo#consolePublicURL + # Must have a trailing slash + value: "{{ openshift.master.public_console_url }}/" + - key: clusterInfo#masterPublicURL + value: "{{ openshift.master.public_api_url }}" + - key: clusterInfo#logoutPublicURL + value: "{{ openshift.master.logout_url | default('') }}" + - key: features#inactivityTimeoutMinutes + value: "{{ openshift_web_console_inactivity_timeout_minutes | default(0) }}" + - key: features#clusterResourceOverridesEnabled + value: "{{ openshift_web_console_cluster_resource_overrides_enabled | default(cro_plugin_enabled) }}" + - key: extensions#scriptURLs + value: "{{ openshift_web_console_extension_script_urls | default([]) }}" + - key: extensions#stylesheetURLs + value: "{{ openshift_web_console_extension_stylesheet_urls | default([]) }}" + - key: extensions#properties + value: "{{ openshift_web_console_extension_properties | default({}) }}" + separator: '#' + state: present + when: config_to_migrate.assetConfig is not defined + + - name: Migrate assetConfig from master-config.yaml + yedit: + src: "{{ mktemp.stdout }}/{{ __console_config_file }}" + edits: + - key: clusterInfo#consolePublicURL + value: "{{ config_to_migrate.assetConfig.publicURL }}" + - key: clusterInfo#masterPublicURL + value: "{{ config_to_migrate.assetConfig.masterPublicURL }}" + - key: clusterInfo#logoutPublicURL + value: "{{ config_to_migrate.assetConfig.logoutURL | default('') }}" + - key: clusterInfo#metricsPublicURL + value: "{{ config_to_migrate.assetConfig.metricsPublicURL | default('') }}" + - key: clusterInfo#loggingPublicURL + value: "{{ config_to_migrate.assetConfig.loggingPublicURL | default('') }}" + - key: servingInfo#maxRequestsInFlight + value: "{{ config_to_migrate.assetConfig.servingInfo.maxRequestsInFlight | default(0) }}" + - key: servingInfo#requestTimeoutSeconds + value: "{{ config_to_migrate.assetConfig.servingInfo.requestTimeoutSeconds | default(0) }}" + - key: features#clusterResourceOverridesEnabled + value: "{{ openshift_web_console_cluster_resource_overrides_enabled | default(cro_plugin_enabled) }}" + separator: '#' + state: present + when: config_to_migrate.assetConfig is defined + +- slurp: + src: "{{ mktemp.stdout }}/{{ __console_config_file }}" + register: updated_console_config + +- name: Reconcile with the web console RBAC file + shell: > + {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __console_rbac_file }}" --config={{ mktemp.stdout }}/admin.kubeconfig + | {{ openshift_client_binary }} auth reconcile --config={{ mktemp.stdout }}/admin.kubeconfig -f - + +- name: Apply the web console template file + shell: > + {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __console_template_file }}" + --param API_SERVER_CONFIG="{{ updated_console_config['content'] | b64decode }}" + --param IMAGE="{{ openshift_web_console_prefix }}{{ openshift_web_console_image_name }}:{{ openshift_web_console_version }}" + --param NODE_SELECTOR={{ openshift_web_console_nodeselector | to_json | quote }} + --param REPLICA_COUNT="{{ openshift_web_console_replica_count }}" + --config={{ mktemp.stdout }}/admin.kubeconfig + | {{ openshift_client_binary }} apply --config={{ mktemp.stdout }}/admin.kubeconfig -f - + +- name: Verify that the web console is running + command: > + curl -k https://webconsole.openshift-web-console.svc/healthz + args: + # Disables the following warning: + # Consider using get_url or uri module rather than running curl + warn: no + register: console_health + until: console_health.stdout == 'ok' + retries: 120 + delay: 1 + changed_when: false + +- name: Remove temp directory + file: + state: absent + name: "{{ mktemp.stdout }}" + changed_when: False diff --git a/roles/openshift_web_console/tasks/main.yml b/roles/openshift_web_console/tasks/main.yml new file mode 100644 index 000000000..937bebf25 --- /dev/null +++ b/roles/openshift_web_console/tasks/main.yml @@ -0,0 +1,8 @@ +--- +# do any asserts here + +- include_tasks: install.yml + when: openshift_web_console_install | default(true) | bool + +- include_tasks: remove.yml + when: not openshift_web_console_install | default(true) | bool diff --git a/roles/openshift_web_console/tasks/remove.yml b/roles/openshift_web_console/tasks/remove.yml new file mode 100644 index 000000000..f0712a993 --- /dev/null +++ b/roles/openshift_web_console/tasks/remove.yml @@ -0,0 +1,5 @@ +--- +- name: Remove openshift-web-console project + oc_project: + name: openshift-web-console + state: absent diff --git a/roles/openshift_web_console/tasks/remove_old_asset_config.yml b/roles/openshift_web_console/tasks/remove_old_asset_config.yml new file mode 100644 index 000000000..34158150c --- /dev/null +++ b/roles/openshift_web_console/tasks/remove_old_asset_config.yml @@ -0,0 +1,19 @@ +--- +# Remove the obsolete assetConfig stanza from master-config.yaml. Since the +# web console has been split out into a separate deployment, those settings +# are no longer used. +- name: Remove assetConfig from master-config.yaml + yedit: + state: absent + src: "{{ openshift.common.config_base }}/master/master-config.yaml" + key: assetConfig + +# This file was written by wire_aggregator.yml. It is no longer needed since +# the web console now discovers if the template service broker is running on +# startup. Remove the file if it exists. +- name: Remove obsolete web console / service catalog extension file + file: + state: absent + # Hard-code the path instead of using `openshift.common.config_base` since + # the path is hard-coded in wire_aggregator.yml. + path: /etc/origin/master/openshift-ansible-catalog-console.js diff --git a/roles/openshift_web_console/tasks/rollout_console.yml b/roles/openshift_web_console/tasks/rollout_console.yml new file mode 100644 index 000000000..75682ba1d --- /dev/null +++ b/roles/openshift_web_console/tasks/rollout_console.yml @@ -0,0 +1,20 @@ +--- +- name: Check if console deployment exists + oc_obj: + kind: deployments + name: webconsole + namespace: openshift-web-console + state: list + register: console_deployment + +# There's currently no command to trigger a rollout for a k8s deployment +# without changing the pod spec. Add an annotation to force a rollout. +- name: Rollout updated web console deployment + oc_edit: + kind: deployments + name: webconsole + namespace: openshift-web-console + separator: '#' + content: + spec#template#metadata#annotations#installer-triggered-rollout: "{{ ansible_date_time.iso8601_micro }}" + when: console_deployment.results.results.0 | length > 0 diff --git a/roles/openshift_web_console/tasks/update_console_config.yml b/roles/openshift_web_console/tasks/update_console_config.yml new file mode 100644 index 000000000..967222ea4 --- /dev/null +++ b/roles/openshift_web_console/tasks/update_console_config.yml @@ -0,0 +1,67 @@ +--- +# This task updates asset config values in the webconsole-config config map in +# the openshift-web-console namespace. The values to set are pased in the +# variable `console_config_edits`, which is an array of objects with `key` and +# `value` properties in the same format as `yedit` module `edits`. Only +# properties passed are updated. The separator for nested properties is `#`. +# +# Note that this triggers a redeployment on the console and a brief downtime +# since it uses a `Recreate` strategy. +# +# Example usage: +# +# - include_role: +# name: openshift_web_console +# tasks_from: update_console_config.yml +# vars: +# console_config_edits: +# - key: clusterInfo#loggingPublicURL +# value: "https://{{ openshift_logging_kibana_hostname }}" +# when: openshift_web_console_install | default(true) | bool + +- name: Read the existing web console config map + oc_configmap: + namespace: openshift-web-console + name: webconsole-config + state: list + register: webconsole_config_map + +- set_fact: + existing_config_map_data: "{{ webconsole_config_map.results.results[0].data | default({}) }}" + +- when: existing_config_map_data['webconsole-config.yaml'] is defined + block: + - name: Make temp directory + command: mktemp -d /tmp/console-ansible-XXXXXX + register: mktemp_console + changed_when: False + + - name: Copy the existing web console config to temp directory + copy: + content: "{{ existing_config_map_data['webconsole-config.yaml'] }}" + dest: "{{ mktemp_console.stdout }}/webconsole-config.yaml" + + - name: Change web console config properties + yedit: + src: "{{ mktemp_console.stdout }}/webconsole-config.yaml" + edits: "{{console_config_edits}}" + separator: '#' + state: present + + - name: Update web console config map + oc_configmap: + namespace: openshift-web-console + name: webconsole-config + state: present + from_file: + webconsole-config.yaml: "{{ mktemp_console.stdout }}/webconsole-config.yaml" + register: update_console_config_map + + - name: Remove temp directory + file: + state: absent + name: "{{ mktemp_console.stdout }}" + changed_when: False + + - include_tasks: rollout_console.yml + when: update_console_config_map.changed | bool diff --git a/roles/openshift_web_console/vars/default_images.yml b/roles/openshift_web_console/vars/default_images.yml new file mode 100644 index 000000000..42d331ac5 --- /dev/null +++ b/roles/openshift_web_console/vars/default_images.yml @@ -0,0 +1,4 @@ +--- +__openshift_web_console_prefix: "docker.io/openshift/origin-" +__openshift_web_console_version: "latest" +__openshift_web_console_image_name: "web-console" diff --git a/roles/openshift_web_console/vars/main.yml b/roles/openshift_web_console/vars/main.yml new file mode 100644 index 000000000..72bff5d01 --- /dev/null +++ b/roles/openshift_web_console/vars/main.yml @@ -0,0 +1,4 @@ +--- +__console_template_file: "console-template.yaml" +__console_rbac_file: "console-rbac-template.yaml" +__console_config_file: "console-config.yaml" diff --git a/roles/openshift_web_console/vars/openshift-enterprise.yml b/roles/openshift_web_console/vars/openshift-enterprise.yml new file mode 100644 index 000000000..375c22067 --- /dev/null +++ b/roles/openshift_web_console/vars/openshift-enterprise.yml @@ -0,0 +1,4 @@ +--- +__openshift_web_console_prefix: "registry.access.redhat.com/openshift3/ose-" +__openshift_web_console_version: "v3.9" +__openshift_web_console_image_name: "web-console" diff --git a/roles/os_firewall/README.md b/roles/os_firewall/README.md index be0b8291a..5ee11f7bd 100644 --- a/roles/os_firewall/README.md +++ b/roles/os_firewall/README.md @@ -32,7 +32,7 @@ Use iptables: --- - hosts: servers task: - - include_role: + - import_role: name: os_firewall vars: os_firewall_use_firewalld: false @@ -44,7 +44,7 @@ Use firewalld: - hosts: servers vars: tasks: - - include_role: + - import_role: name: os_firewall vars: os_firewall_use_firewalld: true diff --git a/roles/os_firewall/tasks/firewalld.yml b/roles/os_firewall/tasks/firewalld.yml index 1e27ebaf9..fa933da51 100644 --- a/roles/os_firewall/tasks/firewalld.yml +++ b/roles/os_firewall/tasks/firewalld.yml @@ -2,14 +2,17 @@ - name: Fail - Firewalld is not supported on Atomic Host fail: msg: "Firewalld is not supported on Atomic Host" - when: r_os_firewall_is_atomic | bool + when: + - r_os_firewall_is_atomic | bool + - not openshift_enable_unsupported_configurations | default(false) - name: Install firewalld packages package: name: firewalld state: present register: result - until: result | success + until: result is succeeded + when: not r_os_firewall_is_atomic | bool - name: Ensure iptables services are not enabled systemd: @@ -21,12 +24,14 @@ - iptables - ip6tables register: task_result - failed_when: task_result|failed and 'could not' not in task_result.msg|lower + failed_when: + - task_result is failed + - ('could not' not in task_result.msg|lower) - name: Wait 10 seconds after disabling iptables pause: seconds: 10 - when: task_result | changed + when: task_result is changed - name: Start and enable firewalld service systemd: @@ -40,13 +45,13 @@ - name: need to pause here, otherwise the firewalld service starting can sometimes cause ssh to fail pause: seconds: 10 - when: result | changed + when: result is changed - name: Restart polkitd systemd: name: polkit state: restarted - when: result | changed + when: result is changed # Fix suspected race between firewalld and polkit BZ1436964 - name: Wait for polkit action to have been created diff --git a/roles/os_firewall/tasks/iptables.yml b/roles/os_firewall/tasks/iptables.yml index a7c13e487..49d658d37 100644 --- a/roles/os_firewall/tasks/iptables.yml +++ b/roles/os_firewall/tasks/iptables.yml @@ -7,12 +7,14 @@ enabled: no masked: yes register: task_result - failed_when: task_result|failed and 'could not' not in task_result.msg|lower + failed_when: + - task_result is failed + - ('could not' not in task_result.msg|lower) - name: Wait 10 seconds after disabling firewalld pause: seconds: 10 - when: task_result | changed + when: task_result is changed - name: Install iptables packages package: @@ -23,7 +25,7 @@ - iptables-services when: not r_os_firewall_is_atomic | bool register: result - until: result | success + until: result is succeeded - name: Start and enable iptables service systemd: @@ -40,4 +42,4 @@ - name: need to pause here, otherwise the iptables service starting can sometimes cause ssh to fail pause: seconds: 10 - when: result | changed + when: result is changed diff --git a/roles/os_update_latest/tasks/main.yml b/roles/os_update_latest/tasks/main.yml index 60d665587..9d8ec7887 100644 --- a/roles/os_update_latest/tasks/main.yml +++ b/roles/os_update_latest/tasks/main.yml @@ -2,4 +2,4 @@ - name: Update all packages package: name=* state=latest register: result - until: result | success + until: result is succeeded diff --git a/roles/rhel_subscribe/tasks/enterprise.yml b/roles/rhel_subscribe/tasks/enterprise.yml deleted file mode 100644 index 8acdfb969..000000000 --- a/roles/rhel_subscribe/tasks/enterprise.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- -- set_fact: - openshift_release: "{{ openshift_release[1:] }}" - when: - - openshift_release is defined - - openshift_release[0] == 'v' - -- name: Disable all repositories - command: subscription-manager repos --disable="*" - -- name: Enable RHEL repositories - command: subscription-manager repos \ - --enable="rhel-7-server-rpms" \ - --enable="rhel-7-server-extras-rpms" \ - --enable="rhel-7-server-ose-{{ (openshift_release | default('')).split('.')[0:2] | join('.') }}-rpms" \ - --enable="rhel-7-fast-datapath-rpms" - register: subscribe_repos - until: subscribe_repos | succeeded diff --git a/roles/rhel_subscribe/tasks/main.yml b/roles/rhel_subscribe/tasks/main.yml index 3466b7e44..e7eb6c572 100644 --- a/roles/rhel_subscribe/tasks/main.yml +++ b/roles/rhel_subscribe/tasks/main.yml @@ -1,68 +1,48 @@ --- -# TODO: Enhance redhat_subscription module -# to make it able to attach to a pool -# to make it able to enable repositories - - fail: msg: "This role is only supported for Red Hat hosts" when: ansible_distribution != 'RedHat' -- fail: - msg: The rhsub_user variable is required for this role. - when: rhsub_user is not defined or not rhsub_user - -- fail: - msg: The rhsub_pass variable is required for this role. - when: rhsub_pass is not defined or not rhsub_pass - -- name: Detecting Atomic Host Operating System - stat: - path: /run/ostree-booted - register: ostree_booted - -- name: Satellite preparation - command: "rpm -Uvh http://{{ rhsub_server }}/pub/katello-ca-consumer-latest.noarch.rpm" - args: - creates: /etc/rhsm/ca/katello-server-ca.pem - when: rhsub_server is defined and rhsub_server - - name: Install Red Hat Subscription manager yum: name: subscription-manager state: present register: result - until: result | success + until: result is succeeded + +- name: Is host already registered? + command: "subscription-manager version" + register: rh_subscribed + changed_when: False -- name: RedHat subscriptions +- name: Register host redhat_subscription: username: "{{ rhsub_user }}" password: "{{ rhsub_pass }}" register: rh_subscription - until: rh_subscription | succeeded + until: rh_subscription is succeeded + when: + - "'not registered' in rh_subscribed.stdout" -- name: Retrieve the OpenShift Pool ID - command: subscription-manager list --available --matches="{{ rhsub_pool }}" --pool-only - register: openshift_pool_id - until: openshift_pool_id | succeeded - changed_when: False +- fail: + msg: 'Unable to register host with Red Hat Subscription Manager' + when: + - "'not registered' in rh_subscribed.stdout" + - rh_subscription.failed - name: Determine if OpenShift Pool Already Attached - command: subscription-manager list --consumed --matches="{{ rhsub_pool }}" --pool-only + command: "subscription-manager list --consumed --pool-only --matches '*OpenShift*'" register: openshift_pool_attached - until: openshift_pool_attached | succeeded changed_when: False - when: openshift_pool_id.stdout == '' - -- fail: - msg: "Unable to find pool matching {{ rhsub_pool }} in available or consumed pools" - when: openshift_pool_id.stdout == '' and openshift_pool_attached is defined and openshift_pool_attached.stdout == '' + ignore_errors: yes - name: Attach to OpenShift Pool - command: subscription-manager attach --pool {{ openshift_pool_id.stdout_lines[0] }} - register: subscribe_pool - until: subscribe_pool | succeeded - when: openshift_pool_id.stdout != '' + command: "subscription-manager attach --pool {{ rhsub_pool }}" + register: openshift_pool_attached + changed_when: "'Successfully attached a subscription' in openshift_pool_attached.stdout" + when: rhsub_pool not in openshift_pool_attached.stdout -- include_tasks: enterprise.yml +- include_tasks: satellite.yml when: - - not ostree_booted.stat.exists | bool + - rhsub_server is defined + - rhsub_server diff --git a/roles/rhel_subscribe/tasks/satellite.yml b/roles/rhel_subscribe/tasks/satellite.yml new file mode 100644 index 000000000..dadbe3487 --- /dev/null +++ b/roles/rhel_subscribe/tasks/satellite.yml @@ -0,0 +1,5 @@ +--- +- name: Satellite preparation + command: "rpm -Uvh http://{{ rhsub_server }}/pub/katello-ca-consumer-latest.noarch.rpm" + args: + creates: /etc/rhsm/ca/katello-server-ca.pem diff --git a/roles/template_service_broker/defaults/main.yml b/roles/template_service_broker/defaults/main.yml index 421b4ecf9..3465832cc 100644 --- a/roles/template_service_broker/defaults/main.yml +++ b/roles/template_service_broker/defaults/main.yml @@ -3,3 +3,4 @@ template_service_broker_remove: False template_service_broker_install: True openshift_template_service_broker_namespaces: ['openshift'] +template_service_broker_selector: "{{ openshift_hosted_infra_selector | default('region=infra') | map_from_pairs }}" diff --git a/roles/template_service_broker/files/apiserver-config.yaml b/roles/template_service_broker/files/apiserver-config.yaml new file mode 100644 index 000000000..e4048d1da --- /dev/null +++ b/roles/template_service_broker/files/apiserver-config.yaml @@ -0,0 +1,4 @@ +kind: TemplateServiceBrokerConfig +apiVersion: config.templateservicebroker.openshift.io/v1 +templateNamespaces: +- openshift diff --git a/roles/template_service_broker/files/apiserver-template.yaml b/roles/template_service_broker/files/apiserver-template.yaml new file mode 100644 index 000000000..4dd9395d0 --- /dev/null +++ b/roles/template_service_broker/files/apiserver-template.yaml @@ -0,0 +1,125 @@ +apiVersion: template.openshift.io/v1 +kind: Template +metadata: + name: template-service-broker-apiserver +parameters: +- name: IMAGE + value: openshift/origin-template-service-broker:latest +- name: NAMESPACE + value: openshift-template-service-broker +- name: LOGLEVEL + value: "0" +- name: API_SERVER_CONFIG + value: | + kind: TemplateServiceBrokerConfig + apiVersion: config.templateservicebroker.openshift.io/v1 + templateNamespaces: + - openshift +- name: NODE_SELECTOR + value: "{}" +objects: + +# to create the tsb server +- apiVersion: extensions/v1beta1 + kind: DaemonSet + metadata: + namespace: ${NAMESPACE} + name: apiserver + labels: + apiserver: "true" + spec: + template: + metadata: + name: apiserver + labels: + apiserver: "true" + spec: + serviceAccountName: apiserver + containers: + - name: c + image: ${IMAGE} + imagePullPolicy: IfNotPresent + command: + - "/usr/bin/template-service-broker" + - "start" + - "template-service-broker" + - "--secure-port=8443" + - "--audit-log-path=-" + - "--tls-cert-file=/var/serving-cert/tls.crt" + - "--tls-private-key-file=/var/serving-cert/tls.key" + - "--v=${LOGLEVEL}" + - "--config=/var/apiserver-config/apiserver-config.yaml" + ports: + - containerPort: 8443 + volumeMounts: + - mountPath: /var/serving-cert + name: serving-cert + - mountPath: /var/apiserver-config + name: apiserver-config + readinessProbe: + httpGet: + path: /healthz + port: 8443 + scheme: HTTPS + nodeSelector: "${{NODE_SELECTOR}}" + volumes: + - name: serving-cert + secret: + defaultMode: 420 + secretName: apiserver-serving-cert + - name: apiserver-config + configMap: + defaultMode: 420 + name: apiserver-config + +# to create the config for the TSB +- apiVersion: v1 + kind: ConfigMap + metadata: + namespace: ${NAMESPACE} + name: apiserver-config + data: + apiserver-config.yaml: ${API_SERVER_CONFIG} + +# to be able to assign powers to the process +- apiVersion: v1 + kind: ServiceAccount + metadata: + namespace: ${NAMESPACE} + name: apiserver + +# to be able to expose TSB inside the cluster +- apiVersion: v1 + kind: Service + metadata: + namespace: ${NAMESPACE} + name: apiserver + annotations: + service.alpha.openshift.io/serving-cert-secret-name: apiserver-serving-cert + spec: + selector: + apiserver: "true" + ports: + - port: 443 + targetPort: 8443 + +# This service account will be granted permission to call the TSB. +# The token for this SA will be provided to the service catalog for +# use when calling the TSB. +- apiVersion: v1 + kind: ServiceAccount + metadata: + namespace: ${NAMESPACE} + name: templateservicebroker-client + +# This secret will be populated with a copy of the templateservicebroker-client SA's +# auth token. Since this secret has a static name, it can be referenced more +# easily than the auto-generated secret for the service account. +- apiVersion: v1 + kind: Secret + metadata: + namespace: ${NAMESPACE} + name: templateservicebroker-client + annotations: + kubernetes.io/service-account.name: templateservicebroker-client + type: kubernetes.io/service-account-token diff --git a/roles/template_service_broker/files/rbac-template.yaml b/roles/template_service_broker/files/rbac-template.yaml new file mode 100644 index 000000000..0937a9065 --- /dev/null +++ b/roles/template_service_broker/files/rbac-template.yaml @@ -0,0 +1,92 @@ +apiVersion: template.openshift.io/v1 +kind: Template +metadata: + name: template-service-broker-rbac +parameters: +- name: NAMESPACE + value: openshift-template-service-broker +- name: KUBE_SYSTEM + value: kube-system +objects: + +# Grant the service account permission to call the TSB +- apiVersion: rbac.authorization.k8s.io/v1beta1 + kind: ClusterRoleBinding + metadata: + name: templateservicebroker-client + roleRef: + kind: ClusterRole + name: system:openshift:templateservicebroker-client + subjects: + - kind: ServiceAccount + namespace: ${NAMESPACE} + name: templateservicebroker-client + +# to delegate authentication and authorization +- apiVersion: rbac.authorization.k8s.io/v1beta1 + kind: ClusterRoleBinding + metadata: + name: auth-delegator-${NAMESPACE} + roleRef: + kind: ClusterRole + name: system:auth-delegator + subjects: + - kind: ServiceAccount + namespace: ${NAMESPACE} + name: apiserver + +# to have the template service broker powers +- apiVersion: rbac.authorization.k8s.io/v1beta1 + kind: ClusterRoleBinding + metadata: + name: tsb-${NAMESPACE} + roleRef: + kind: ClusterRole + name: system:openshift:controller:template-service-broker + subjects: + - kind: ServiceAccount + namespace: ${NAMESPACE} + name: apiserver + +# to read the config for terminating authentication +- apiVersion: rbac.authorization.k8s.io/v1beta1 + kind: RoleBinding + metadata: + namespace: ${KUBE_SYSTEM} + name: extension-apiserver-authentication-reader-${NAMESPACE} + roleRef: + kind: Role + name: extension-apiserver-authentication-reader + subjects: + - kind: ServiceAccount + namespace: ${NAMESPACE} + name: apiserver + +# allow the kube service catalog's SA to read the static secret defined +# above, which will contain the token for the SA that can call the TSB. +- apiVersion: rbac.authorization.k8s.io/v1beta1 + kind: Role + metadata: + name: templateservicebroker-auth-reader + namespace: ${NAMESPACE} + rules: + - apiGroups: + - "" + resourceNames: + - templateservicebroker-client + resources: + - secrets + verbs: + - get +- apiVersion: rbac.authorization.k8s.io/v1beta1 + kind: RoleBinding + metadata: + namespace: ${NAMESPACE} + name: templateservicebroker-auth-reader + roleRef: + kind: Role + name: templateservicebroker-auth-reader + subjects: + - kind: ServiceAccount + namespace: kube-service-catalog + name: service-catalog-controller diff --git a/roles/template_service_broker/files/template-service-broker-registration.yaml b/roles/template_service_broker/files/template-service-broker-registration.yaml new file mode 100644 index 000000000..95fb72924 --- /dev/null +++ b/roles/template_service_broker/files/template-service-broker-registration.yaml @@ -0,0 +1,25 @@ +apiVersion: template.openshift.io/v1 +kind: Template +metadata: + name: template-service-broker-registration +parameters: +- name: TSB_NAMESPACE + value: openshift-template-service-broker +- name: CA_BUNDLE + required: true +objects: +# register the tsb with the service catalog +- apiVersion: servicecatalog.k8s.io/v1beta1 + kind: ClusterServiceBroker + metadata: + name: template-service-broker + spec: + url: https://apiserver.${TSB_NAMESPACE}.svc:443/brokers/template.openshift.io + insecureSkipTLSVerify: false + caBundle: ${CA_BUNDLE} + authInfo: + bearer: + secretRef: + kind: Secret + name: templateservicebroker-client + namespace: ${TSB_NAMESPACE} diff --git a/roles/template_service_broker/meta/main.yml b/roles/template_service_broker/meta/main.yml index ab5a0cf08..f1b56b771 100644 --- a/roles/template_service_broker/meta/main.yml +++ b/roles/template_service_broker/meta/main.yml @@ -11,3 +11,5 @@ galaxy_info: - 7 categories: - cloud +dependencies: +- role: lib_utils diff --git a/roles/template_service_broker/tasks/install.yml b/roles/template_service_broker/tasks/install.yml index 99a58baff..d0a07c48d 100644 --- a/roles/template_service_broker/tasks/install.yml +++ b/roles/template_service_broker/tasks/install.yml @@ -1,9 +1,9 @@ --- # Fact setting -- name: Set default image variables based on deployment type +- name: Set default image variables based on openshift_deployment_type include_vars: "{{ item }}" with_first_found: - - "{{ openshift_deployment_type | default(deployment_type) }}.yml" + - "{{ openshift_deployment_type }}.yml" - "default_images.yml" - name: set template_service_broker facts @@ -15,14 +15,20 @@ - oc_project: name: openshift-template-service-broker state: present + node_selector: + - "" - command: mktemp -d /tmp/tsb-ansible-XXXXXX register: mktemp changed_when: False - become: no + +- name: Copy admin client config + command: > + cp {{ openshift.common.config_base }}/master//admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig + changed_when: false - copy: - src: "{{ __tsb_files_location }}/{{ item }}" + src: "{{ item }}" dest: "{{ mktemp.stdout }}/{{ item }}" with_items: - "{{ __tsb_template_file }}" @@ -42,15 +48,18 @@ - name: Apply template file shell: > - {{ openshift.common.client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}" + {{ openshift_client_binary }} process --config={{ mktemp.stdout }}/admin.kubeconfig + -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}" --param API_SERVER_CONFIG="{{ config['content'] | b64decode }}" --param IMAGE="{{ template_service_broker_prefix }}{{ template_service_broker_image_name }}:{{ template_service_broker_version }}" - | {{ openshift.common.client_binary }} apply -f - + --param NODE_SELECTOR={{ template_service_broker_selector | to_json | quote }} + | {{ openshift_client_binary }} apply --config={{ mktemp.stdout }}/admin.kubeconfig -f - # reconcile with rbac - name: Reconcile with RBAC file shell: > - {{ openshift.common.client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_rbac_file }}" | {{ openshift.common.client_binary }} auth reconcile -f - + {{ openshift_client_binary }} process --config={{ mktemp.stdout }}/admin.kubeconfig -f "{{ mktemp.stdout }}/{{ __tsb_rbac_file }}" + | {{ openshift_client_binary }} auth reconcile --config={{ mktemp.stdout }}/admin.kubeconfig -f - # Check that the TSB is running - name: Verify that TSB is running @@ -77,10 +86,15 @@ # Register with broker - name: Register TSB with broker shell: > - {{ openshift.common.client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_broker_file }}" --param CA_BUNDLE="{{ __ca_bundle.content }}" | {{ openshift.common.client_binary }} apply -f - + {{ openshift_client_binary }} process --config={{ mktemp.stdout }}/admin.kubeconfig -f "{{ mktemp.stdout }}/{{ __tsb_broker_file }}" --param CA_BUNDLE="{{ __ca_bundle.content }}" | {{ openshift_client_binary }} apply --config={{ mktemp.stdout }}/admin.kubeconfig -f - - file: state: absent name: "{{ mktemp.stdout }}" changed_when: False - become: no + +- name: Rollout console so it discovers the template service broker is installed + include_role: + name: openshift_web_console + tasks_from: rollout_console.yml + when: openshift_web_console_install | default(true) | bool diff --git a/roles/template_service_broker/tasks/remove.yml b/roles/template_service_broker/tasks/remove.yml index 8b5593ff9..b46dd4771 100644 --- a/roles/template_service_broker/tasks/remove.yml +++ b/roles/template_service_broker/tasks/remove.yml @@ -2,10 +2,14 @@ - command: mktemp -d /tmp/tsb-ansible-XXXXXX register: mktemp changed_when: False - become: no + +- name: Copy admin client config + command: > + cp {{ openshift.common.config_base }}/master//admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig + changed_when: false - copy: - src: "{{ __tsb_files_location }}/{{ item }}" + src: "{{ item }}" dest: "{{ mktemp.stdout }}/{{ item }}" with_items: - "{{ __tsb_template_file }}" @@ -13,11 +17,11 @@ - name: Delete TSB broker shell: > - {{ openshift.common.client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_broker_file }}" | {{ openshift.common.client_binary }} delete --ignore-not-found -f - + {{ openshift_client_binary }} process --config={{ mktemp.stdout }}/admin.kubeconfig -f "{{ mktemp.stdout }}/{{ __tsb_broker_file }}" | {{ openshift_client_binary }} delete --config={{ mktemp.stdout }}/admin.kubeconfig --ignore-not-found -f - - name: Delete TSB objects shell: > - {{ openshift.common.client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}" | {{ openshift.common.client_binary }} delete --ignore-not-found -f - + {{ openshift_client_binary }} process --config={{ mktemp.stdout }}/admin.kubeconfig -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}" | {{ openshift_client_binary }} delete --config={{ mktemp.stdout }}/admin.kubeconfig --ignore-not-found -f - - name: empty out tech preview extension file for service console UI copy: @@ -32,4 +36,9 @@ state: absent name: "{{ mktemp.stdout }}" changed_when: False - become: no + +- name: Rollout console so it discovers the template service broker is removed + include_role: + name: openshift_web_console + tasks_from: rollout_console.yml + when: openshift_web_console_install | default(true) | bool diff --git a/roles/template_service_broker/vars/default_images.yml b/roles/template_service_broker/vars/default_images.yml index 77afe1f43..dc164a4db 100644 --- a/roles/template_service_broker/vars/default_images.yml +++ b/roles/template_service_broker/vars/default_images.yml @@ -1,4 +1,4 @@ --- -__template_service_broker_prefix: "docker.io/openshift/" +__template_service_broker_prefix: "docker.io/openshift/origin-" __template_service_broker_version: "latest" -__template_service_broker_image_name: "origin" +__template_service_broker_image_name: "template-service-broker" diff --git a/roles/template_service_broker/vars/main.yml b/roles/template_service_broker/vars/main.yml index a65340f16..7dec24a79 100644 --- a/roles/template_service_broker/vars/main.yml +++ b/roles/template_service_broker/vars/main.yml @@ -1,6 +1,4 @@ --- -__tsb_files_location: "../../../files/origin-components/" - __tsb_template_file: "apiserver-template.yaml" __tsb_config_file: "apiserver-config.yaml" __tsb_rbac_file: "rbac-template.yaml" diff --git a/roles/template_service_broker/vars/openshift-enterprise.yml b/roles/template_service_broker/vars/openshift-enterprise.yml index dfab1e01b..b65b97691 100644 --- a/roles/template_service_broker/vars/openshift-enterprise.yml +++ b/roles/template_service_broker/vars/openshift-enterprise.yml @@ -1,4 +1,4 @@ --- -__template_service_broker_prefix: "registry.access.redhat.com/openshift3/" +__template_service_broker_prefix: "registry.access.redhat.com/openshift3/ose-" __template_service_broker_version: "v3.7" -__template_service_broker_image_name: "ose" +__template_service_broker_image_name: "template-service-broker" diff --git a/roles/tuned/tasks/main.yml b/roles/tuned/tasks/main.yml index e95d274d5..5129f4471 100644 --- a/roles/tuned/tasks/main.yml +++ b/roles/tuned/tasks/main.yml @@ -11,7 +11,7 @@ block: - name: Set tuned OpenShift variables set_fact: - openshift_tuned_guest_profile: "{{ 'atomic-guest' if openshift.common.is_atomic else 'virtual-guest' }}" + openshift_tuned_guest_profile: "{{ 'atomic-guest' if openshift_is_atomic else 'virtual-guest' }}" - name: Ensure directory structure exists file: @@ -28,7 +28,12 @@ when: item.state == 'file' - name: Make tuned use the recommended tuned profile on restart - file: path=/etc/tuned/active_profile state=absent + file: + path: '{{ item }}' + state: absent + with_items: + - /etc/tuned/active_profile + - /etc/tuned/profile_mode - name: Restart tuned service systemd: |