From 801779eeb6f6308f81ae7c48409de7686c04a0aa Mon Sep 17 00:00:00 2001 From: Michael Gugino Date: Wed, 13 Dec 2017 12:42:32 -0500 Subject: Relocate filter plugins to lib_utils This commit relocates filter_plugings to lib_utils, changes the namespacing to prevent unintended use of older versions that may be present in filter_plugins/ directory on existing installs. Add lib_utils to meta depends for roles Also consolidate some plugins into lib_utils from various other areas. Update rpm spec, obsolete plugin rpms. --- roles/openshift_persistent_volumes/meta/main.yml | 1 + .../templates/persistent-volume-claim.yml.j2 | 2 +- roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2 | 4 ++-- 3 files changed, 4 insertions(+), 3 deletions(-) (limited to 'roles/openshift_persistent_volumes') diff --git a/roles/openshift_persistent_volumes/meta/main.yml b/roles/openshift_persistent_volumes/meta/main.yml index 48b0699ab..aea7616bf 100644 --- a/roles/openshift_persistent_volumes/meta/main.yml +++ b/roles/openshift_persistent_volumes/meta/main.yml @@ -11,3 +11,4 @@ galaxy_info: - 7 dependencies: - role: openshift_facts +- role: lib_utils diff --git a/roles/openshift_persistent_volumes/templates/persistent-volume-claim.yml.j2 b/roles/openshift_persistent_volumes/templates/persistent-volume-claim.yml.j2 index d40417a9a..fac589a92 100644 --- a/roles/openshift_persistent_volumes/templates/persistent-volume-claim.yml.j2 +++ b/roles/openshift_persistent_volumes/templates/persistent-volume-claim.yml.j2 @@ -8,7 +8,7 @@ items: metadata: name: "{{ claim.name }}" spec: - accessModes: {{ claim.access_modes | to_padded_yaml(2, 2) }} + accessModes: {{ claim.access_modes | lib_utils_to_padded_yaml(2, 2) }} resources: requests: storage: "{{ claim.capacity }}" diff --git a/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2 b/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2 index 9ec14208b..354561432 100644 --- a/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2 +++ b/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2 @@ -16,6 +16,6 @@ items: spec: capacity: storage: "{{ volume.capacity }}" - accessModes: {{ volume.access_modes | to_padded_yaml(2, 2) }} - {{ (volume.storage.keys() | list)[0] }}: {{ volume.storage[(volume.storage.keys() | list)[0]] | to_padded_yaml(3, 2) }} + accessModes: {{ volume.access_modes | lib_utils_to_padded_yaml(2, 2) }} + {{ (volume.storage.keys() | list)[0] }}: {{ volume.storage[(volume.storage.keys() | list)[0]] | lib_utils_to_padded_yaml(3, 2) }} {% endfor %} -- cgit v1.2.3 From e6c159afb4ba39a7266c750d43d6a5e911cc8f21 Mon Sep 17 00:00:00 2001 From: Michael Gugino Date: Mon, 18 Dec 2017 16:13:36 -0500 Subject: Remove openshift.common.{is_atomic|is_containerized} We set these variables using facts in init, no need to duplicate the logic all around the codebase. --- playbooks/adhoc/uninstall.yml | 15 ++++---- .../upgrades/create_service_signer_cert.yml | 2 +- .../upgrades/docker/docker_upgrade.yml | 4 +-- .../upgrades/docker/tasks/restart.yml | 2 +- .../upgrades/docker/tasks/upgrade.yml | 2 +- .../upgrades/pre/verify_cluster.yml | 2 +- .../upgrades/pre/verify_upgrade_targets.yml | 4 +-- .../upgrades/upgrade_control_plane.yml | 18 +++++----- .../openshift-cluster/upgrades/upgrade_nodes.yml | 2 +- .../upgrades/upgrade_scale_group.yml | 2 +- .../openshift-cluster/upgrades/v3_7/validator.yml | 2 +- playbooks/init/facts.yml | 27 ++++++++------ .../private/upgrade_image_members.yml | 4 +-- .../openshift-etcd/private/upgrade_rpm_members.yml | 4 +-- playbooks/openshift-etcd/private/upgrade_step.yml | 2 +- .../private/redeploy-registry-certificates.yml | 8 ++--- .../private/redeploy-router-certificates.yml | 12 +++---- .../openshift-master/private/additional_config.yml | 2 +- .../private/tasks/wire_aggregator.yml | 4 +-- playbooks/openshift-node/private/restart.yml | 2 +- playbooks/openshift-node/private/setup.yml | 2 +- roles/calico_master/tasks/main.yml | 2 +- roles/cockpit-ui/meta/main.yml | 1 + roles/cockpit-ui/tasks/main.yml | 2 +- roles/cockpit/tasks/main.yml | 4 +-- .../tasks/common/syscontainer_packages.yml | 6 ++-- .../tasks/docker_upgrade_check.yml | 6 ++-- roles/container_runtime/tasks/package_docker.yml | 4 +-- .../tasks/systemcontainer_crio.yml | 2 +- .../tasks/systemcontainer_docker.yml | 2 +- roles/contiv/defaults/main.yml | 1 - roles/contiv/tasks/packageManagerInstall.yml | 2 +- roles/contiv_facts/tasks/main.yml | 15 -------- roles/etcd/defaults/main.yaml | 2 +- roles/etcd/tasks/auxiliary/drop_etcdctl.yml | 2 +- roles/etcd/tasks/migration/add_ttls.yml | 2 +- roles/etcd/tasks/migration/migrate.yml | 2 +- roles/etcd/tasks/version_detect.yml | 4 +-- roles/flannel/tasks/main.yml | 2 +- roles/nickhammond.logrotate/tasks/main.yml | 2 +- roles/nuage_ca/tasks/main.yaml | 2 +- roles/nuage_common/tasks/main.yml | 6 ++-- roles/nuage_master/tasks/main.yaml | 18 +++++----- roles/nuage_master/tasks/serviceaccount.yml | 2 +- roles/nuage_node/tasks/main.yaml | 8 ++--- roles/openshift_ca/tasks/main.yml | 8 ++--- roles/openshift_cli/tasks/main.yml | 8 ++--- roles/openshift_etcd_facts/vars/main.yml | 4 +-- roles/openshift_examples/defaults/main.yml | 2 +- roles/openshift_examples/tasks/main.yml | 16 ++++----- roles/openshift_excluder/tasks/install.yml | 2 +- roles/openshift_expand_partition/tasks/main.yml | 4 +-- roles/openshift_facts/defaults/main.yml | 2 ++ roles/openshift_facts/library/openshift_facts.py | 34 +----------------- .../openshift_checks/docker_image_availability.py | 2 +- .../openshift_checks/etcd_traffic.py | 4 +-- .../openshift_checks/mixins.py | 8 ++--- .../test/docker_image_availability_test.py | 41 ++++++++-------------- .../test/docker_storage_test.py | 8 ++--- .../test/etcd_traffic_test.py | 12 +++---- roles/openshift_health_checker/test/mixins_test.py | 6 ++-- .../test/ovs_version_test.py | 6 ++-- .../test/package_availability_test.py | 6 ++-- .../test/package_version_test.py | 6 ++-- .../tasks/storage/glusterfs_endpoints.yml | 2 +- roles/openshift_hosted/tasks/wait_for_pod.yml | 6 ++-- roles/openshift_hosted_templates/defaults/main.yml | 2 +- roles/openshift_hosted_templates/tasks/main.yml | 2 +- roles/openshift_loadbalancer/tasks/main.yml | 14 ++++---- .../templates/haproxy.cfg.j2 | 2 +- .../tasks/annotate_ops_projects.yaml | 2 +- roles/openshift_logging/tasks/delete_logging.yaml | 4 +-- roles/openshift_logging/tasks/generate_certs.yaml | 2 +- roles/openshift_logging/tasks/install_logging.yaml | 2 +- .../tasks/procure_server_certs.yaml | 2 +- .../tasks/main.yaml | 2 +- roles/openshift_manage_node/tasks/main.yml | 2 +- roles/openshift_master/tasks/main.yml | 12 +++---- roles/openshift_master/tasks/registry_auth.yml | 2 +- .../tasks/set_loopback_context.yml | 8 ++--- roles/openshift_master/tasks/systemd_units.yml | 4 +-- roles/openshift_master/tasks/upgrade.yml | 2 +- .../templates/atomic-openshift-master.j2 | 2 +- .../native-cluster/atomic-openshift-master-api.j2 | 2 +- .../atomic-openshift-master-controllers.j2 | 2 +- roles/openshift_master_certificates/tasks/main.yml | 4 +-- .../tasks/generate_certificates.yaml | 2 +- .../openshift_metrics/tasks/install_cassandra.yaml | 2 +- .../openshift_metrics/tasks/install_hawkular.yaml | 2 +- .../openshift_metrics/tasks/install_heapster.yaml | 2 +- roles/openshift_metrics/tasks/install_metrics.yaml | 2 +- roles/openshift_metrics/tasks/oc_apply.yaml | 6 ++-- roles/openshift_metrics/tasks/pre_install.yaml | 2 +- .../openshift_metrics/tasks/setup_certificate.yaml | 2 +- roles/openshift_metrics/tasks/start_metrics.yaml | 6 ++-- roles/openshift_metrics/tasks/stop_metrics.yaml | 6 ++-- roles/openshift_metrics/tasks/uninstall_hosa.yaml | 4 +-- .../openshift_metrics/tasks/uninstall_metrics.yaml | 4 +-- roles/openshift_node/handlers/main.yml | 2 +- roles/openshift_node/tasks/config.yml | 6 ++-- roles/openshift_node/tasks/dnsmasq_install.yml | 2 +- roles/openshift_node/tasks/install.yml | 4 +-- roles/openshift_node/tasks/registry_auth.yml | 2 +- .../openshift_node/tasks/storage_plugins/ceph.yml | 2 +- .../tasks/storage_plugins/glusterfs.yml | 2 +- .../openshift_node/tasks/storage_plugins/iscsi.yml | 2 +- roles/openshift_node/tasks/storage_plugins/nfs.yml | 2 +- roles/openshift_node/tasks/systemd_units.yml | 4 +-- roles/openshift_node/tasks/upgrade.yml | 6 ++-- .../tasks/upgrade/config_changes.yml | 2 +- roles/openshift_node/tasks/upgrade/restart.yml | 2 +- roles/openshift_node/tasks/upgrade/rpm_upgrade.yml | 2 +- .../tasks/upgrade/rpm_upgrade_install.yml | 2 +- .../openshift_node/tasks/upgrade/stop_services.yml | 4 +-- roles/openshift_node/tasks/upgrade_pre.yml | 6 ++-- roles/openshift_node_certificates/tasks/main.yml | 4 +-- roles/openshift_persistent_volumes/tasks/pv.yml | 2 +- roles/openshift_persistent_volumes/tasks/pvc.yml | 2 +- .../tasks/main.yml | 4 +-- .../tasks/install_prometheus.yaml | 6 ++-- .../openshift_provisioners/tasks/install_efs.yaml | 6 ++-- roles/openshift_provisioners/tasks/oc_apply.yaml | 10 +++--- .../tasks/uninstall_provisioners.yaml | 6 ++-- .../tasks/generate_certs.yml | 4 +-- roles/openshift_service_catalog/tasks/install.yml | 8 ++--- roles/openshift_service_catalog/tasks/remove.yml | 6 ++-- .../tasks/glusterfs_common.yml | 6 ++-- .../tasks/heketi_deploy_part2.yml | 6 ++-- roles/openshift_storage_nfs_lvm/tasks/main.yml | 2 +- roles/openshift_storage_nfs_lvm/tasks/nfs.yml | 2 +- roles/openshift_version/tasks/main.yml | 20 +++++------ roles/template_service_broker/tasks/install.yml | 8 ++--- roles/template_service_broker/tasks/remove.yml | 4 +-- roles/tuned/tasks/main.yml | 2 +- 134 files changed, 310 insertions(+), 372 deletions(-) (limited to 'roles/openshift_persistent_volumes') diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml index 584117e6b..0e0e2b425 100644 --- a/playbooks/adhoc/uninstall.yml +++ b/playbooks/adhoc/uninstall.yml @@ -18,9 +18,8 @@ # Since we're not calling openshift_facts we'll do this for now - set_fact: - is_atomic: "{{ ostree_output.rc == 0 }}" - - set_fact: - is_containerized: "{{ is_atomic or containerized | default(false) | bool }}" + openshift_is_atomic: "{{ ostree_output.rc == 0 }}" + openshift_is_containerized: "{{ ostree_output.rc == 0 or containerized | default(false) | bool }}" # Stop services on all hosts prior to removing files. - hosts: nodes @@ -133,7 +132,7 @@ when: openshift_use_flannel | default(false) | bool register: result until: result is succeeded - when: not is_atomic | bool + when: not openshift_is_atomic | bool - shell: systemctl reset-failed changed_when: False @@ -363,7 +362,7 @@ - name: Remove packages package: name={{ item }} state=absent - when: not is_atomic | bool and openshift_remove_all | default(True) | bool + when: not openshift_is_atomic | bool and openshift_remove_all | default(True) | bool with_items: - atomic-openshift - atomic-openshift-clients @@ -487,14 +486,14 @@ - name: Stop additional atomic services service: name={{ item }} state=stopped - when: is_containerized | bool + when: openshift_is_containerized | bool with_items: - etcd_container failed_when: false - name: Remove packages package: name={{ item }} state=absent - when: not is_atomic | bool and openshift_remove_all | default(True) | bool + when: not openshift_is_atomic | bool and openshift_remove_all | default(True) | bool with_items: - etcd - etcd3 @@ -554,7 +553,7 @@ - name: Remove packages package: name={{ item }} state=absent - when: not is_atomic | bool and openshift_remove_all | default(True) | bool + when: not openshift_is_atomic | bool and openshift_remove_all | default(True) | bool with_items: - haproxy register: result diff --git a/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml b/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml index 23cf8cf76..372a39e74 100644 --- a/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml +++ b/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml @@ -22,7 +22,7 @@ - name: Create service signer certificate command: > - {{ openshift.common.client_binary }} adm ca create-signer-cert + {{ openshift_client_binary }} adm ca create-signer-cert --cert="{{ remote_cert_create_tmpdir.stdout }}/"service-signer.crt --key="{{ remote_cert_create_tmpdir.stdout }}/"service-signer.key --name="{{ remote_cert_create_tmpdir.stdout }}/"openshift-service-serving-signer diff --git a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml index 42cd51bd9..5b8746f2a 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml @@ -17,7 +17,7 @@ - fail: msg: Cannot upgrade Docker on Atomic operating systems. - when: openshift.common.is_atomic | bool + when: openshift_is_atomic | bool - include_role: name: container_runtime @@ -54,7 +54,7 @@ - name: Drain Node for Kubelet upgrade command: > - {{ openshift.common.client_binary }} adm drain {{ openshift.node.nodename }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets + {{ openshift_client_binary }} adm drain {{ openshift.node.nodename }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets delegate_to: "{{ groups.oo_first_master.0 }}" when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade register: l_docker_upgrade_drain_result diff --git a/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml index 385a141ea..3b47a11e0 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml @@ -15,7 +15,7 @@ - "{{ openshift_service_type }}-master-controllers" - "{{ openshift_service_type }}-node" failed_when: false - when: openshift.common.is_containerized | bool + when: openshift_is_containerized | bool - name: Wait for master API to come back online wait_for: diff --git a/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml index b5000d3a1..54eeb2ef5 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml @@ -10,7 +10,7 @@ - etcd_container - openvswitch failed_when: false - when: openshift.common.is_containerized | bool + when: openshift_is_containerized | bool - name: Check Docker image count shell: "docker images -aq | wc -l" diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml index 3fc18c9b7..4713f8633 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml @@ -71,7 +71,7 @@ local_facts: ha: "{{ groups.oo_masters_to_config | length > 1 }}" - - when: openshift.common.is_containerized | bool + - when: openshift_is_containerized | bool block: - set_fact: master_services: diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml index 065a9a8ab..95c37c38c 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml @@ -15,9 +15,9 @@ docker pull {{ openshift_cli_image }}:{{ openshift_image_tag }} register: pull_result changed_when: "'Downloaded newer image' in pull_result.stdout" - when: openshift.common.is_containerized | bool + when: openshift_is_containerized | bool -- when: not openshift.common.is_containerized | bool +- when: not openshift_is_containerized | bool block: - name: Check latest available OpenShift RPM version repoquery: diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index 0ddccfa98..0263e721d 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -25,7 +25,7 @@ tasks: - name: Upgrade all storage command: > - {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig + {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig migrate storage --include=* --confirm register: l_pb_upgrade_control_plane_pre_upgrade_storage when: openshift_upgrade_pre_storage_migration_enabled | default(true) | bool @@ -86,7 +86,7 @@ - name: Post master upgrade - Upgrade clusterpolicies storage command: > - {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig + {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig migrate storage --include=clusterpolicies --confirm register: l_pb_upgrade_control_plane_post_upgrade_storage when: @@ -133,7 +133,7 @@ tasks: - name: Reconcile Cluster Roles command: > - {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig + {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig policy reconcile-cluster-roles --additive-only=true --confirm -o name register: reconcile_cluster_role_result when: openshift_version is version_compare('3.7','<') @@ -144,7 +144,7 @@ - name: Reconcile Cluster Role Bindings command: > - {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig + {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig policy reconcile-cluster-role-bindings --exclude-groups=system:authenticated --exclude-groups=system:authenticated:oauth @@ -160,7 +160,7 @@ - name: Reconcile Jenkins Pipeline Role Bindings command: > - {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig policy reconcile-cluster-role-bindings system:build-strategy-jenkinspipeline --confirm -o name + {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig policy reconcile-cluster-role-bindings system:build-strategy-jenkinspipeline --confirm -o name run_once: true register: reconcile_jenkins_role_binding_result changed_when: @@ -214,7 +214,7 @@ - name: Reconcile Security Context Constraints command: > - {{ openshift.common.client_binary }} adm policy --config={{ openshift.common.config_base }}/master/admin.kubeconfig reconcile-sccs --confirm --additive-only=true -o name + {{ openshift_client_binary }} adm policy --config={{ openshift.common.config_base }}/master/admin.kubeconfig reconcile-sccs --confirm --additive-only=true -o name register: reconcile_scc_result changed_when: - reconcile_scc_result.stdout != '' @@ -223,7 +223,7 @@ - name: Migrate storage post policy reconciliation command: > - {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig + {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig migrate storage --include=* --confirm run_once: true register: l_pb_upgrade_control_plane_post_upgrade_storage @@ -262,7 +262,7 @@ - openshift_facts tasks: - include_tasks: docker/tasks/upgrade.yml - when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool + when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift_is_atomic | bool - name: Drain and upgrade master nodes hosts: oo_masters_to_config:&oo_nodes_to_upgrade @@ -291,7 +291,7 @@ - name: Drain Node for Kubelet upgrade command: > - {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets + {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets delegate_to: "{{ groups.oo_first_master.0 }}" register: l_upgrade_control_plane_drain_result until: not (l_upgrade_control_plane_drain_result is failed) diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml index 956ad0d53..ece69a3d5 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml @@ -35,7 +35,7 @@ - name: Drain Node for Kubelet upgrade command: > - {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets + {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets delegate_to: "{{ groups.oo_first_master.0 }}" register: l_upgrade_nodes_drain_result until: not (l_upgrade_nodes_drain_result is failed) diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml index e8c0f361a..a90082760 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml @@ -43,7 +43,7 @@ tasks: - name: Drain Node for Kubelet upgrade command: > - {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }} + {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml index c8c87a9c3..49e691352 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml @@ -14,7 +14,7 @@ # DO NOT DISABLE THIS, YOUR UPGRADE WILL FAIL IF YOU DO SO - name: Confirm OpenShift authorization objects are in sync command: > - {{ openshift.common.client_binary }} adm migrate authorization + {{ openshift_client_binary }} adm migrate authorization when: - openshift_currently_installed_version is version_compare('3.7','<') - openshift_upgrade_pre_authorization_migration_enabled | default(true) | bool diff --git a/playbooks/init/facts.yml b/playbooks/init/facts.yml index 9fec95b17..ac4429b23 100644 --- a/playbooks/init/facts.yml +++ b/playbooks/init/facts.yml @@ -21,14 +21,10 @@ path: /run/ostree-booted register: ostree_booted - # Locally setup containerized facts for now - - name: initialize_facts set fact l_is_atomic + - name: initialize_facts set fact openshift_is_atomic and openshift_is_containerized set_fact: - l_is_atomic: "{{ ostree_booted.stat.exists }}" - - - name: initialize_facts set fact for containerized and l_is_*_system_container - set_fact: - l_is_containerized: "{{ (l_is_atomic | bool) or (containerized | default(false) | bool) }}" + openshift_is_atomic: "{{ ostree_booted.stat.exists }}" + openshift_is_containerized: "{{ ostree_booted.stat.exists or (containerized | default(false) | bool) }}" # TODO: Should this be moved into health checks?? # Seems as though any check that happens with a corresponding fail should move into health_checks @@ -54,7 +50,7 @@ # Seems as though any check that happens with a corresponding fail should move into health_checks # Fail as early as possible if Atomic and old version of Docker - when: - - l_is_atomic | bool + - openshift_is_atomic | bool block: # See https://access.redhat.com/articles/2317361 @@ -73,7 +69,7 @@ msg: Installation on Atomic Host requires Docker 1.12 or later. Please upgrade and restart the Atomic Host. - when: - - not l_is_atomic | bool + - not openshift_is_atomic | bool block: - name: Ensure openshift-ansible installer package deps are installed package: @@ -105,7 +101,7 @@ register: result until: result is succeeded - - name: Gather Cluster facts and set is_containerized if needed + - name: Gather Cluster facts openshift_facts: role: common local_facts: @@ -113,7 +109,6 @@ deployment_subtype: "{{ openshift_deployment_subtype | default(None) }}" hostname: "{{ openshift_hostname | default(None) }}" ip: "{{ openshift_ip | default(None) }}" - is_containerized: "{{ l_is_containerized | default(None) }}" public_hostname: "{{ openshift_public_hostname | default(None) }}" public_ip: "{{ openshift_public_ip | default(None) }}" portal_net: "{{ openshift_portal_net | default(openshift_master_portal_net) | default(None) }}" @@ -145,3 +140,13 @@ set_fact: repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}" repoquery_installed: "{{ 'dnf repoquery --latest-limit 1 -d 0 --disableexcludes=all --installed' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins --installed' }}" + +- name: Initialize special first-master variables + hosts: oo_first_master + roles: + - role: openshift_facts + tasks: + - set_fact: + # We need to setup openshift_client_binary here for special uses of delegate_to in + # later roles and plays. + first_master_client_binary: "{{ openshift_client_binary }}" diff --git a/playbooks/openshift-etcd/private/upgrade_image_members.yml b/playbooks/openshift-etcd/private/upgrade_image_members.yml index 339fc6b74..d4386249e 100644 --- a/playbooks/openshift-etcd/private/upgrade_image_members.yml +++ b/playbooks/openshift-etcd/private/upgrade_image_members.yml @@ -1,7 +1,7 @@ --- # INPUT etcd_upgrade_version # INPUT etcd_container_version -# INPUT openshift.common.is_containerized +# INPUT openshift_is_containerized - name: Upgrade containerized hosts to {{ etcd_upgrade_version }} hosts: oo_etcd_hosts_to_upgrade serial: 1 @@ -14,4 +14,4 @@ etcd_peer: "{{ openshift.common.hostname }}" when: - etcd_container_version | default('99') is version_compare(etcd_upgrade_version,'<') - - openshift.common.is_containerized | bool + - openshift_is_containerized | bool diff --git a/playbooks/openshift-etcd/private/upgrade_rpm_members.yml b/playbooks/openshift-etcd/private/upgrade_rpm_members.yml index 327a35b09..f7fe6cd9c 100644 --- a/playbooks/openshift-etcd/private/upgrade_rpm_members.yml +++ b/playbooks/openshift-etcd/private/upgrade_rpm_members.yml @@ -1,7 +1,7 @@ --- # INPUT etcd_upgrade_version # INPUT etcd_rpm_version -# INPUT openshift.common.is_containerized +# INPUT openshift_is_containerized - name: Upgrade to {{ etcd_upgrade_version }} hosts: oo_etcd_hosts_to_upgrade serial: 1 @@ -15,4 +15,4 @@ when: - etcd_rpm_version.stdout | default('99') is version_compare(etcd_upgrade_version, '<') - ansible_distribution == 'RedHat' - - not openshift.common.is_containerized | bool + - not openshift_is_containerized | bool diff --git a/playbooks/openshift-etcd/private/upgrade_step.yml b/playbooks/openshift-etcd/private/upgrade_step.yml index 60127fc68..05c543d62 100644 --- a/playbooks/openshift-etcd/private/upgrade_step.yml +++ b/playbooks/openshift-etcd/private/upgrade_step.yml @@ -61,4 +61,4 @@ etcd_peer: "{{ openshift.common.hostname }}" when: - ansible_distribution == 'Fedora' - - not openshift.common.is_containerized | bool + - not openshift_is_containerized | bool diff --git a/playbooks/openshift-hosted/private/redeploy-registry-certificates.yml b/playbooks/openshift-hosted/private/redeploy-registry-certificates.yml index 3943720e3..b817221b8 100644 --- a/playbooks/openshift-hosted/private/redeploy-registry-certificates.yml +++ b/playbooks/openshift-hosted/private/redeploy-registry-certificates.yml @@ -17,7 +17,7 @@ - name: Determine if docker-registry exists command: > - {{ openshift.common.client_binary }} get dc/docker-registry -o json + {{ openshift_client_binary }} get dc/docker-registry -o json --config={{ mktemp.stdout }}/admin.kubeconfig -n default register: l_docker_registry_dc @@ -38,7 +38,7 @@ # Replace dc/docker-registry environment variable certificate data if set. - name: Update docker-registry environment variables shell: > - {{ openshift.common.client_binary }} env dc/docker-registry + {{ openshift_client_binary }} env dc/docker-registry OPENSHIFT_CA_DATA="$(cat /etc/origin/master/ca.crt)" OPENSHIFT_CERT_DATA="$(cat /etc/origin/master/openshift-registry.crt)" OPENSHIFT_KEY_DATA="$(cat /etc/origin/master/openshift-registry.key)" @@ -62,7 +62,7 @@ - name: Generate registry certificate command: > - {{ openshift.common.client_binary }} adm ca create-server-cert + {{ openshift_client_binary }} adm ca create-server-cert --signer-cert={{ openshift.common.config_base }}/master/ca.crt --signer-key={{ openshift.common.config_base }}/master/ca.key --signer-serial={{ openshift.common.config_base }}/master/ca.serial.txt @@ -88,7 +88,7 @@ - name: Redeploy docker registry command: > - {{ openshift.common.client_binary }} deploy dc/docker-registry + {{ openshift_client_binary }} deploy dc/docker-registry --latest --config={{ mktemp.stdout }}/admin.kubeconfig -n default diff --git a/playbooks/openshift-hosted/private/redeploy-router-certificates.yml b/playbooks/openshift-hosted/private/redeploy-router-certificates.yml index b1f60f1ae..c19147d41 100644 --- a/playbooks/openshift-hosted/private/redeploy-router-certificates.yml +++ b/playbooks/openshift-hosted/private/redeploy-router-certificates.yml @@ -17,7 +17,7 @@ - name: Determine if router exists command: > - {{ openshift.common.client_binary }} get dc/router -o json + {{ openshift_client_binary }} get dc/router -o json --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig -n default register: l_router_dc @@ -26,7 +26,7 @@ - name: Determine if router service exists command: > - {{ openshift.common.client_binary }} get svc/router -o json + {{ openshift_client_binary }} get svc/router -o json --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig -n default register: l_router_svc @@ -52,7 +52,7 @@ - name: Update router environment variables shell: > - {{ openshift.common.client_binary }} env dc/router + {{ openshift_client_binary }} env dc/router OPENSHIFT_CA_DATA="$(cat /etc/origin/master/ca.crt)" OPENSHIFT_CERT_DATA="$(cat /etc/origin/master/openshift-router.crt)" OPENSHIFT_KEY_DATA="$(cat /etc/origin/master/openshift-router.key)" @@ -78,7 +78,7 @@ - name: Remove router service annotations command: > - {{ openshift.common.client_binary }} annotate service/router + {{ openshift_client_binary }} annotate service/router service.alpha.openshift.io/serving-cert-secret-name- service.alpha.openshift.io/serving-cert-signed-by- --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig @@ -86,7 +86,7 @@ - name: Add serving-cert-secret annotation to router service command: > - {{ openshift.common.client_binary }} annotate service/router + {{ openshift_client_binary }} annotate service/router service.alpha.openshift.io/serving-cert-secret-name=router-certs --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig -n default @@ -129,7 +129,7 @@ - name: Redeploy router command: > - {{ openshift.common.client_binary }} deploy dc/router + {{ openshift_client_binary }} deploy dc/router --latest --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig -n default diff --git a/playbooks/openshift-master/private/additional_config.yml b/playbooks/openshift-master/private/additional_config.yml index a90cd6b22..81bb8cc5c 100644 --- a/playbooks/openshift-master/private/additional_config.yml +++ b/playbooks/openshift-master/private/additional_config.yml @@ -30,7 +30,7 @@ when: openshift_use_manageiq | default(true) | bool - role: cockpit when: - - not openshift.common.is_atomic | bool + - not openshift_is_atomic | bool - deployment_type == 'openshift-enterprise' - osm_use_cockpit is undefined or osm_use_cockpit | bool - openshift.common.deployment_subtype != 'registry' diff --git a/playbooks/openshift-master/private/tasks/wire_aggregator.yml b/playbooks/openshift-master/private/tasks/wire_aggregator.yml index 4f55d5c82..59e2b515c 100644 --- a/playbooks/openshift-master/private/tasks/wire_aggregator.yml +++ b/playbooks/openshift-master/private/tasks/wire_aggregator.yml @@ -21,7 +21,7 @@ # TODO: this currently has a bug where hostnames are required - name: Creating First Master Aggregator signer certs command: > - {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm ca create-signer-cert + {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm ca create-signer-cert --cert=/etc/origin/master/front-proxy-ca.crt --key=/etc/origin/master/front-proxy-ca.key --serial=/etc/origin/master/ca.serial.txt @@ -84,7 +84,7 @@ - block: - name: Create first master api-client config for Aggregator command: > - {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm create-api-client-config + {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm create-api-client-config --certificate-authority=/etc/origin/master/front-proxy-ca.crt --signer-cert=/etc/origin/master/front-proxy-ca.crt --signer-key=/etc/origin/master/front-proxy-ca.key diff --git a/playbooks/openshift-node/private/restart.yml b/playbooks/openshift-node/private/restart.yml index c2092b23c..7249ced70 100644 --- a/playbooks/openshift-node/private/restart.yml +++ b/playbooks/openshift-node/private/restart.yml @@ -28,7 +28,7 @@ - "{{ openshift_service_type }}-master-controllers" - "{{ openshift_service_type }}-node" failed_when: false - when: openshift.common.is_containerized | bool + when: openshift_is_containerized | bool - name: Wait for master API to come back online wait_for: diff --git a/playbooks/openshift-node/private/setup.yml b/playbooks/openshift-node/private/setup.yml index 541913aef..802dce37e 100644 --- a/playbooks/openshift-node/private/setup.yml +++ b/playbooks/openshift-node/private/setup.yml @@ -21,6 +21,6 @@ when: - hostvars[item].openshift is defined - hostvars[item].openshift.common is defined - - hostvars[item].openshift.common.is_containerized | bool + - hostvars[item].openshift_is_containerized | bool - (item in groups.oo_nodes_to_config and item in groups.oo_masters_to_config) changed_when: False diff --git a/roles/calico_master/tasks/main.yml b/roles/calico_master/tasks/main.yml index 16d960d8b..05415a4d6 100644 --- a/roles/calico_master/tasks/main.yml +++ b/roles/calico_master/tasks/main.yml @@ -19,7 +19,7 @@ - name: Calico Master | Launch Calico Policy Controller command: > - {{ openshift.common.client_binary }} create + {{ openshift_client_binary }} create -f {{ mktemp.stdout }}/calico-policy-controller.yml --config={{ openshift.common.config_base }}/master/admin.kubeconfig register: calico_create_output diff --git a/roles/cockpit-ui/meta/main.yml b/roles/cockpit-ui/meta/main.yml index 2250fe4cb..372c29c28 100644 --- a/roles/cockpit-ui/meta/main.yml +++ b/roles/cockpit-ui/meta/main.yml @@ -14,3 +14,4 @@ galaxy_info: dependencies: - role: lib_utils - role: lib_openshift +- role: openshift_facts diff --git a/roles/cockpit-ui/tasks/main.yml b/roles/cockpit-ui/tasks/main.yml index f60912033..d4174d879 100644 --- a/roles/cockpit-ui/tasks/main.yml +++ b/roles/cockpit-ui/tasks/main.yml @@ -39,7 +39,7 @@ - name: Deploy registry-console command: > - {{ openshift.common.client_binary }} new-app --template=registry-console + {{ openshift_client_binary }} new-app --template=registry-console {% if openshift_cockpit_deployer_prefix is defined %}-p IMAGE_PREFIX="{{ openshift_cockpit_deployer_prefix }}"{% endif %} {% if openshift_cockpit_deployer_basename is defined %}-p IMAGE_BASENAME="{{ openshift_cockpit_deployer_basename }}"{% endif %} {% if openshift_cockpit_deployer_version is defined %}-p IMAGE_VERSION="{{ openshift_cockpit_deployer_version }}"{% endif %} diff --git a/roles/cockpit/tasks/main.yml b/roles/cockpit/tasks/main.yml index fc13afed3..577cd7daf 100644 --- a/roles/cockpit/tasks/main.yml +++ b/roles/cockpit/tasks/main.yml @@ -10,7 +10,7 @@ - cockpit-bridge - cockpit-docker - "{{ cockpit_plugins }}" - when: not openshift.common.is_containerized | bool + when: not openshift_is_containerized | bool register: result until: result is succeeded @@ -19,4 +19,4 @@ name: cockpit.socket enabled: true state: started - when: not openshift.common.is_containerized | bool + when: not openshift_is_containerized | bool diff --git a/roles/container_runtime/tasks/common/syscontainer_packages.yml b/roles/container_runtime/tasks/common/syscontainer_packages.yml index b41122880..d429047e6 100644 --- a/roles/container_runtime/tasks/common/syscontainer_packages.yml +++ b/roles/container_runtime/tasks/common/syscontainer_packages.yml @@ -4,7 +4,7 @@ package: name: container-selinux state: present - when: not openshift.common.is_atomic | bool + when: not openshift_is_atomic | bool register: result until: result is succeeded @@ -13,7 +13,7 @@ package: name: atomic state: present - when: not openshift.common.is_atomic | bool + when: not openshift_is_atomic | bool register: result until: result is succeeded @@ -23,6 +23,6 @@ package: name: runc state: present - when: not openshift.common.is_atomic | bool + when: not openshift_is_atomic | bool register: result until: result is succeeded diff --git a/roles/container_runtime/tasks/docker_upgrade_check.yml b/roles/container_runtime/tasks/docker_upgrade_check.yml index 6731963dd..7831f4c7d 100644 --- a/roles/container_runtime/tasks/docker_upgrade_check.yml +++ b/roles/container_runtime/tasks/docker_upgrade_check.yml @@ -61,14 +61,14 @@ - name: Determine available Docker shell: "rpm -q --queryformat '---\ncurr_version: %{VERSION}\navail_version: \n' docker" register: g_atomic_docker_version_result - when: openshift.common.is_atomic | bool + when: openshift_is_atomic | bool - set_fact: l_docker_version: "{{ g_atomic_docker_version_result.stdout | from_yaml }}" - when: openshift.common.is_atomic | bool + when: openshift_is_atomic | bool - fail: msg: This playbook requires access to Docker 1.12 or later when: - - openshift.common.is_atomic | bool + - openshift_is_atomic | bool - l_docker_version.avail_version | default(l_docker_version.curr_version, true) is version_compare('1.12','<') diff --git a/roles/container_runtime/tasks/package_docker.yml b/roles/container_runtime/tasks/package_docker.yml index 6604e6ad5..d6e7e7fed 100644 --- a/roles/container_runtime/tasks/package_docker.yml +++ b/roles/container_runtime/tasks/package_docker.yml @@ -3,7 +3,7 @@ - name: Get current installed Docker version command: "{{ repoquery_installed }} --qf '%{version}' docker" - when: not openshift.common.is_atomic | bool + when: not openshift_is_atomic | bool register: curr_docker_version retries: 4 until: curr_docker_version is succeeded @@ -20,7 +20,7 @@ name: "docker{{ '-' + docker_version if docker_version is defined else '' }}" state: present when: - - not (openshift.common.is_atomic | bool) + - not (openshift_is_atomic | bool) - not (curr_docker_version is skipped) - not (curr_docker_version.stdout != '') register: result diff --git a/roles/container_runtime/tasks/systemcontainer_crio.yml b/roles/container_runtime/tasks/systemcontainer_crio.yml index 61f122f3c..6a195a938 100644 --- a/roles/container_runtime/tasks/systemcontainer_crio.yml +++ b/roles/container_runtime/tasks/systemcontainer_crio.yml @@ -3,7 +3,7 @@ - name: Check we are not using node as a Docker container with CRI-O fail: msg='Cannot use CRI-O with node configured as a Docker container' when: - - openshift.common.is_containerized | bool + - openshift_is_containerized | bool - not l_is_node_system_container | bool - include_tasks: common/pre.yml diff --git a/roles/container_runtime/tasks/systemcontainer_docker.yml b/roles/container_runtime/tasks/systemcontainer_docker.yml index 639585367..dc0452553 100644 --- a/roles/container_runtime/tasks/systemcontainer_docker.yml +++ b/roles/container_runtime/tasks/systemcontainer_docker.yml @@ -18,7 +18,7 @@ # Make sure Docker is installed so we are able to use the client - name: Install Docker so we can use the client package: name=docker{{ '-' + docker_version if docker_version is defined else '' }} state=present - when: not openshift.common.is_atomic | bool + when: not openshift_is_atomic | bool register: result until: result is succeeded diff --git a/roles/contiv/defaults/main.yml b/roles/contiv/defaults/main.yml index aa976d921..8d06a5e96 100644 --- a/roles/contiv/defaults/main.yml +++ b/roles/contiv/defaults/main.yml @@ -101,7 +101,6 @@ apic_epg_bridge_domain: not_specified apic_configure_default_policy: false apic_default_external_contract: "uni/tn-common/brc-default" apic_default_app_profile: "contiv-infra-app-profile" -is_atomic: False kube_cert_dir: "/data/src/github.com/openshift/origin/openshift.local.config/master" master_name: "{{ groups['masters'][0] }}" contiv_etcd_port: 22379 diff --git a/roles/contiv/tasks/packageManagerInstall.yml b/roles/contiv/tasks/packageManagerInstall.yml index d5726476c..3367844a8 100644 --- a/roles/contiv/tasks/packageManagerInstall.yml +++ b/roles/contiv/tasks/packageManagerInstall.yml @@ -5,7 +5,7 @@ - include_tasks: pkgMgrInstallers/centos-install.yml when: (ansible_os_family == "RedHat") and - not is_atomic + not openshift_is_atomic - name: Package Manager | Set fact saying we did CentOS package install set_fact: diff --git a/roles/contiv_facts/tasks/main.yml b/roles/contiv_facts/tasks/main.yml index 3267a4ab0..c6f8ad1d6 100644 --- a/roles/contiv_facts/tasks/main.yml +++ b/roles/contiv_facts/tasks/main.yml @@ -1,19 +1,4 @@ --- -- name: Determine if Atomic - stat: path=/run/ostree-booted - register: s - changed_when: false - check_mode: no - -- name: Init the is_atomic fact - set_fact: - is_atomic: false - -- name: Set the is_atomic fact - set_fact: - is_atomic: true - when: s.stat.exists - - name: Determine if CoreOS raw: "grep '^NAME=' /etc/os-release | sed s'/NAME=//'" register: distro diff --git a/roles/etcd/defaults/main.yaml b/roles/etcd/defaults/main.yaml index 86cea5c46..337727e47 100644 --- a/roles/etcd/defaults/main.yaml +++ b/roles/etcd/defaults/main.yaml @@ -5,7 +5,7 @@ r_etcd_common_backup_sufix_name: '' l_is_etcd_system_container: "{{ (openshift_use_etcd_system_container | default(openshift_use_system_containers | default(false)) | bool) }}" # runc, docker, host -r_etcd_common_etcd_runtime: "{{ 'runc' if l_is_etcd_system_container else 'docker' if l_is_containerized else 'host' }}" +r_etcd_common_etcd_runtime: "{{ 'runc' if l_is_etcd_system_container else 'docker' if openshift_is_containerized else 'host' }}" r_etcd_common_embedded_etcd: false osm_etcd_image: 'registry.access.redhat.com/rhel7/etcd' diff --git a/roles/etcd/tasks/auxiliary/drop_etcdctl.yml b/roles/etcd/tasks/auxiliary/drop_etcdctl.yml index ccfd9da14..881a8c270 100644 --- a/roles/etcd/tasks/auxiliary/drop_etcdctl.yml +++ b/roles/etcd/tasks/auxiliary/drop_etcdctl.yml @@ -1,7 +1,7 @@ --- - name: Install etcd for etcdctl package: name=etcd{{ '-' + etcd_version if etcd_version is defined else '' }} state=present - when: not openshift.common.is_atomic | bool + when: not openshift_is_atomic | bool register: result until: result is succeeded diff --git a/roles/etcd/tasks/migration/add_ttls.yml b/roles/etcd/tasks/migration/add_ttls.yml index a4b0ff31d..3d945344c 100644 --- a/roles/etcd/tasks/migration/add_ttls.yml +++ b/roles/etcd/tasks/migration/add_ttls.yml @@ -11,7 +11,7 @@ - name: Re-introduce leases (as a replacement for key TTLs) command: > - {{ openshift.common.client_binary }} adm migrate etcd-ttl \ + {{ openshift_client_binary }} adm migrate etcd-ttl \ --cert {{ r_etcd_common_master_peer_cert_file }} \ --key {{ r_etcd_common_master_peer_key_file }} \ --cacert {{ r_etcd_common_master_peer_ca_file }} \ diff --git a/roles/etcd/tasks/migration/migrate.yml b/roles/etcd/tasks/migration/migrate.yml index 54a9c74ff..847b1d722 100644 --- a/roles/etcd/tasks/migration/migrate.yml +++ b/roles/etcd/tasks/migration/migrate.yml @@ -1,7 +1,7 @@ --- # Should this be run in a serial manner? - set_fact: - l_etcd_service: "{{ 'etcd_container' if openshift.common.is_containerized else 'etcd' }}" + l_etcd_service: "{{ 'etcd_container' if openshift_is_containerized else 'etcd' }}" - name: Migrate etcd data command: > diff --git a/roles/etcd/tasks/version_detect.yml b/roles/etcd/tasks/version_detect.yml index fe1e418d8..ab3626cec 100644 --- a/roles/etcd/tasks/version_detect.yml +++ b/roles/etcd/tasks/version_detect.yml @@ -12,7 +12,7 @@ - debug: msg: "Etcd rpm version {{ etcd_rpm_version.stdout }} detected" when: - - not openshift.common.is_containerized | bool + - not openshift_is_containerized | bool - block: - name: Record containerized etcd version (docker) @@ -52,4 +52,4 @@ - debug: msg: "Etcd containerized version {{ etcd_container_version }} detected" when: - - openshift.common.is_containerized | bool + - openshift_is_containerized | bool diff --git a/roles/flannel/tasks/main.yml b/roles/flannel/tasks/main.yml index 9b9250f31..4627bf69c 100644 --- a/roles/flannel/tasks/main.yml +++ b/roles/flannel/tasks/main.yml @@ -2,7 +2,7 @@ - name: Install flannel become: yes package: name=flannel state=present - when: not openshift.common.is_atomic | bool + when: not openshift_is_atomic | bool register: result until: result is succeeded diff --git a/roles/nickhammond.logrotate/tasks/main.yml b/roles/nickhammond.logrotate/tasks/main.yml index 677f206ea..50ad7e373 100644 --- a/roles/nickhammond.logrotate/tasks/main.yml +++ b/roles/nickhammond.logrotate/tasks/main.yml @@ -1,7 +1,7 @@ --- - name: nickhammond.logrotate | Install logrotate package: name=logrotate state=present - when: not openshift.common.is_atomic | bool + when: not openshift_is_atomic | bool register: result until: result is succeeded diff --git a/roles/nuage_ca/tasks/main.yaml b/roles/nuage_ca/tasks/main.yaml index d96d0d802..cb7844bc5 100644 --- a/roles/nuage_ca/tasks/main.yaml +++ b/roles/nuage_ca/tasks/main.yaml @@ -1,7 +1,7 @@ --- - name: Install openssl package: name=openssl state=present - when: not openshift.common.is_atomic | bool + when: not openshift_is_atomic | bool register: result until: result is succeeded diff --git a/roles/nuage_common/tasks/main.yml b/roles/nuage_common/tasks/main.yml index 6c8c9f8d2..ec42518ff 100644 --- a/roles/nuage_common/tasks/main.yml +++ b/roles/nuage_common/tasks/main.yml @@ -2,17 +2,17 @@ - name: Set the Nuage plugin openshift directory fact to handle Atomic host install set_fact: nuage_node_plugin_dir: /var/usr/share/vsp-openshift - when: openshift.common.is_atomic | bool + when: openshift_is_atomic | bool - name: Set the Nuage CNI network config directory fact to handle Atomic host install set_fact: nuage_node_cni_netconf_dir: /var/etc/cni/net.d/ - when: openshift.common.is_atomic | bool + when: openshift_is_atomic | bool - name: Set the Nuage CNI binary directory fact to handle Atomic host install set_fact: nuage_node_cni_bin_dir: /var/opt/cni/bin/ - when: openshift.common.is_atomic | bool + when: openshift_is_atomic | bool - name: Assure CNI plugin config dir exists before daemon set install become: yes diff --git a/roles/nuage_master/tasks/main.yaml b/roles/nuage_master/tasks/main.yaml index c264427de..29e16b6f8 100644 --- a/roles/nuage_master/tasks/main.yaml +++ b/roles/nuage_master/tasks/main.yaml @@ -5,22 +5,22 @@ - name: Set the Nuage certificate directory fact for Atomic hosts set_fact: cert_output_dir: /var/usr/share/nuage-openshift-monitor - when: openshift.common.is_atomic | bool + when: openshift_is_atomic | bool - name: Set the Nuage kubeconfig file path fact for Atomic hosts set_fact: kube_config: /var/usr/share/nuage-openshift-monitor/nuage.kubeconfig - when: openshift.common.is_atomic | bool + when: openshift_is_atomic | bool - name: Set the Nuage monitor yaml location fact for Atomic hosts set_fact: kubemon_yaml: /var/usr/share/nuage-openshift-monitor/nuage-openshift-monitor.yaml - when: openshift.common.is_atomic | bool + when: openshift_is_atomic | bool - name: Set the Nuage monitor certs location fact for Atomic hosts set_fact: nuage_master_crt_dir: /var/usr/share/nuage-openshift-monitor/ - when: openshift.common.is_atomic | bool + when: openshift_is_atomic | bool - name: Set the Nuage master config directory for daemon sets install set_fact: @@ -35,27 +35,27 @@ - name: Set the Nuage CNI plugin binary directory for daemon sets install set_fact: nuage_cni_bin_dsets_mount_dir: /var/opt/cni/bin - when: openshift.common.is_atomic | bool + when: openshift_is_atomic | bool - name: Create directory /usr/share/nuage-openshift-monitor become: yes file: path=/usr/share/nuage-openshift-monitor state=directory - when: not openshift.common.is_atomic | bool + when: not openshift_is_atomic | bool - name: Create directory /var/usr/share/nuage-openshift-monitor become: yes file: path=/var/usr/share/nuage-openshift-monitor state=directory - when: openshift.common.is_atomic | bool + when: openshift_is_atomic | bool - name: Create directory /var/usr/bin for monitor binary on atomic become: yes file: path=/var/usr/bin state=directory - when: openshift.common.is_atomic | bool + when: openshift_is_atomic | bool - name: Create CNI bin directory /var/opt/cni/bin become: yes file: path=/var/opt/cni/bin state=directory - when: openshift.common.is_atomic | bool + when: openshift_is_atomic | bool - name: Create the log directory become: yes diff --git a/roles/nuage_master/tasks/serviceaccount.yml b/roles/nuage_master/tasks/serviceaccount.yml index fbf2c4f8d..9127b33d6 100644 --- a/roles/nuage_master/tasks/serviceaccount.yml +++ b/roles/nuage_master/tasks/serviceaccount.yml @@ -19,7 +19,7 @@ - name: Generate the node client config command: > - {{ openshift.common.client_binary }} adm create-api-client-config + {{ openshift_client_binary }} adm create-api-client-config --certificate-authority={{ openshift_master_ca_cert }} --client-dir={{ cert_output_dir }} --master={{ openshift.master.api_url }} diff --git a/roles/nuage_node/tasks/main.yaml b/roles/nuage_node/tasks/main.yaml index c6b7a9b10..1f1bd1653 100644 --- a/roles/nuage_node/tasks/main.yaml +++ b/roles/nuage_node/tasks/main.yaml @@ -2,17 +2,17 @@ - name: Set the Nuage plugin openshift directory fact for Atomic hosts set_fact: vsp_openshift_dir: /var/usr/share/vsp-openshift - when: openshift.common.is_atomic | bool + when: openshift_is_atomic | bool - name: Set the Nuage CNI binary directory fact for Atomic hosts set_fact: cni_bin_dir: /var/opt/cni/bin/ - when: openshift.common.is_atomic | bool + when: openshift_is_atomic | bool - name: Set the Nuage plugin certs directory fact for Atomic hosts set_fact: nuage_plugin_crt_dir: /var/usr/share/vsp-openshift - when: openshift.common.is_atomic | bool + when: openshift_is_atomic | bool - name: Assure CNI conf dir exists become: yes @@ -36,7 +36,7 @@ - name: Add additional Docker mounts for Nuage for atomic hosts become: yes lineinfile: dest="{{ openshift_atomic_node_config_file }}" line="{{ nuage_atomic_docker_additional_mounts }}" - when: openshift.common.is_atomic | bool + when: openshift_is_atomic | bool - name: Restart node services command: /bin/true diff --git a/roles/openshift_ca/tasks/main.yml b/roles/openshift_ca/tasks/main.yml index 358b8528f..b94cd9fba 100644 --- a/roles/openshift_ca/tasks/main.yml +++ b/roles/openshift_ca/tasks/main.yml @@ -11,7 +11,7 @@ package: name: "{{ openshift_service_type }}{{ openshift_pkg_version | default('') | lib_utils_oo_image_tag_to_rpm_version(include_dash=True) }}" state: present - when: not openshift.common.is_containerized | bool + when: not openshift_is_containerized | bool register: install_result until: install_result is succeeded delegate_to: "{{ openshift_ca_host }}" @@ -87,7 +87,7 @@ # This should NOT replace the CA due to --overwrite=false when a CA already exists. - name: Create the master certificates if they do not already exist command: > - {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm ca create-master-certs + {{ hostvars[openshift_ca_host]['first_master_client_binary'] }} adm ca create-master-certs {% for named_ca_certificate in openshift.master.named_certificates | default([]) | lib_utils_oo_collect('cafile') %} --certificate-authority {{ named_ca_certificate }} {% endfor %} @@ -137,7 +137,7 @@ - name: Test local loopback context command: > - {{ hostvars[openshift_ca_host].openshift.common.client_binary }} config view + {{ hostvars[openshift_ca_host]['first_master_client_binary'] }} config view --config={{ openshift_master_loopback_config }} changed_when: false register: loopback_config @@ -154,7 +154,7 @@ register: openshift_ca_loopback_tmpdir - name: Generate the loopback master client config command: > - {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm create-api-client-config + {{ hostvars[openshift_ca_host]['first_master_client_binary'] }} adm create-api-client-config --certificate-authority={{ openshift_ca_cert }} {% for named_ca_certificate in openshift.master.named_certificates | default([]) | lib_utils_oo_collect('cafile') %} --certificate-authority {{ named_ca_certificate }} diff --git a/roles/openshift_cli/tasks/main.yml b/roles/openshift_cli/tasks/main.yml index 68d82e436..37bed9dbe 100644 --- a/roles/openshift_cli/tasks/main.yml +++ b/roles/openshift_cli/tasks/main.yml @@ -1,7 +1,7 @@ --- - name: Install clients package: name={{ openshift_service_type }}-clients{{ openshift_pkg_version | default('') }} state=present - when: not openshift.common.is_containerized | bool + when: not openshift_is_containerized | bool register: result until: result is succeeded @@ -18,7 +18,7 @@ tag: "{{ openshift_image_tag }}" backend: "docker" when: - - openshift.common.is_containerized | bool + - openshift_is_containerized | bool - not l_use_cli_atomic_image | bool - block: @@ -34,7 +34,7 @@ tag: "{{ openshift_image_tag }}" backend: "atomic" when: - - openshift.common.is_containerized | bool + - openshift_is_containerized | bool - l_use_cli_atomic_image | bool - name: Reload facts to pick up installed OpenShift version @@ -42,6 +42,6 @@ - name: Install bash completion for oc tools package: name=bash-completion state=present - when: not openshift.common.is_containerized | bool + when: not openshift_is_containerized | bool register: result until: result is succeeded diff --git a/roles/openshift_etcd_facts/vars/main.yml b/roles/openshift_etcd_facts/vars/main.yml index 0c072b64a..9e635b34f 100644 --- a/roles/openshift_etcd_facts/vars/main.yml +++ b/roles/openshift_etcd_facts/vars/main.yml @@ -1,6 +1,6 @@ --- -etcd_is_containerized: "{{ openshift.common.is_containerized }}" -etcd_is_atomic: "{{ openshift.common.is_atomic }}" +etcd_is_containerized: "{{ openshift_is_containerized }}" +etcd_is_atomic: "{{ openshift_is_atomic }}" etcd_hostname: "{{ openshift.common.hostname }}" etcd_ip: "{{ openshift.common.ip }}" etcd_cert_subdir: "etcd-{{ openshift.common.hostname }}" diff --git a/roles/openshift_examples/defaults/main.yml b/roles/openshift_examples/defaults/main.yml index e623b33f3..0a6e8f20c 100644 --- a/roles/openshift_examples/defaults/main.yml +++ b/roles/openshift_examples/defaults/main.yml @@ -8,7 +8,7 @@ openshift_examples_load_quickstarts: true content_version: "{{ openshift.common.examples_content_version }}" -examples_base: "{{ openshift.common.config_base if openshift.common.is_containerized | bool else '/usr/share/openshift' }}/examples" +examples_base: "{{ openshift.common.config_base if openshift_is_containerized | bool else '/usr/share/openshift' }}/examples" image_streams_base: "{{ examples_base }}/image-streams" centos_image_streams: - "{{ image_streams_base }}/image-streams-centos7.json" diff --git a/roles/openshift_examples/tasks/main.yml b/roles/openshift_examples/tasks/main.yml index 356317431..a09a598bd 100644 --- a/roles/openshift_examples/tasks/main.yml +++ b/roles/openshift_examples/tasks/main.yml @@ -53,7 +53,7 @@ # RHEL and Centos image streams are mutually exclusive - name: Import RHEL streams command: > - {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ item }} + {{ openshift_client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ item }} when: openshift_examples_load_rhel | bool with_items: - "{{ rhel_image_streams }}" @@ -63,7 +63,7 @@ - name: Import Centos Image streams command: > - {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ item }} + {{ openshift_client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ item }} when: openshift_examples_load_centos | bool with_items: - "{{ centos_image_streams }}" @@ -73,7 +73,7 @@ - name: Import db templates command: > - {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ db_templates_base }} + {{ openshift_client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ db_templates_base }} when: openshift_examples_load_db_templates | bool register: oex_import_db_templates failed_when: "'already exists' not in oex_import_db_templates.stderr and oex_import_db_templates.rc != 0" @@ -90,7 +90,7 @@ - "{{ quickstarts_base }}/django.json" - name: Remove defunct quickstart templates from openshift namespace - command: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift delete templates/{{ item }}" + command: "{{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift delete templates/{{ item }}" with_items: - nodejs-example - cakephp-example @@ -102,7 +102,7 @@ - name: Import quickstart-templates command: > - {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ quickstarts_base }} + {{ openshift_client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ quickstarts_base }} when: openshift_examples_load_quickstarts | bool register: oex_import_quickstarts failed_when: "'already exists' not in oex_import_quickstarts.stderr and oex_import_quickstarts.rc != 0" @@ -116,7 +116,7 @@ - "{{ xpaas_templates_base }}/sso70-basic.json" - name: Remove old xPaas templates from openshift namespace - command: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift delete templates/{{ item }}" + command: "{{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift delete templates/{{ item }}" with_items: - sso70-basic register: oex_delete_old_xpaas_templates @@ -125,7 +125,7 @@ - name: Import xPaas image streams command: > - {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ xpaas_image_streams }} + {{ openshift_client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ xpaas_image_streams }} when: openshift_examples_load_xpaas | bool register: oex_import_xpaas_streams failed_when: "'already exists' not in oex_import_xpaas_streams.stderr and oex_import_xpaas_streams.rc != 0" @@ -133,7 +133,7 @@ - name: Import xPaas templates command: > - {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ xpaas_templates_base }} + {{ openshift_client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ xpaas_templates_base }} when: openshift_examples_load_xpaas | bool register: oex_import_xpaas_templates failed_when: "'already exists' not in oex_import_xpaas_templates.stderr and oex_import_xpaas_templates.rc != 0" diff --git a/roles/openshift_excluder/tasks/install.yml b/roles/openshift_excluder/tasks/install.yml index 12fecaff5..6532d7fe2 100644 --- a/roles/openshift_excluder/tasks/install.yml +++ b/roles/openshift_excluder/tasks/install.yml @@ -1,7 +1,7 @@ --- - when: - - not openshift.common.is_atomic | bool + - not openshift_is_atomic | bool - r_openshift_excluder_install_ran is not defined block: diff --git a/roles/openshift_expand_partition/tasks/main.yml b/roles/openshift_expand_partition/tasks/main.yml index c7e21ba99..5ae863871 100644 --- a/roles/openshift_expand_partition/tasks/main.yml +++ b/roles/openshift_expand_partition/tasks/main.yml @@ -1,7 +1,7 @@ --- - name: Ensure growpart is installed package: name=cloud-utils-growpart state=present - when: not openshift.common.is_containerized | bool + when: not openshift_is_containerized | bool register: result until: result is succeeded @@ -10,7 +10,7 @@ register: has_growpart failed_when: has_growpart.cr != 0 and 'package cloud-utils-growpart is not installed' not in has_growpart.stdout changed_when: false - when: openshift.common.is_containerized | bool + when: openshift_is_containerized | bool - name: Grow the partitions command: "growpart {{oep_drive}} {{oep_partition}}" diff --git a/roles/openshift_facts/defaults/main.yml b/roles/openshift_facts/defaults/main.yml index 804b274a2..af0a72737 100644 --- a/roles/openshift_facts/defaults/main.yml +++ b/roles/openshift_facts/defaults/main.yml @@ -1,4 +1,6 @@ --- +openshift_client_binary: "{{ openshift_is_containerized | ternary('/usr/local/bin/oc', 'oc') }}" + openshift_cli_image_dict: origin: 'openshift/origin' openshift-enterprise: 'openshift3/ose' diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 6170f15d9..d659286dc 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -887,7 +887,7 @@ def get_openshift_version(facts): if os.path.isfile('/usr/bin/openshift'): _, output, _ = module.run_command(['/usr/bin/openshift', 'version']) # noqa: F405 version = parse_openshift_version(output) - elif 'common' in facts and 'is_containerized' in facts['common']: + else: version = get_container_openshift_version(facts) # Handle containerized masters that have not yet been configured as a node. @@ -1278,36 +1278,7 @@ def set_container_facts_if_unset(facts): dict: the facts dict updated with the generated containerization facts """ - facts['common']['is_atomic'] = os.path.isfile('/run/ostree-booted') - - if 'is_containerized' not in facts['common']: - facts['common']['is_containerized'] = facts['common']['is_atomic'] - - if safe_get_bool(facts['common']['is_containerized']): - facts['common']['client_binary'] = '/usr/local/bin/oc' - - return facts - -def set_installed_variant_rpm_facts(facts): - """ Set RPM facts of installed variant - Args: - facts (dict): existing facts - Returns: - dict: the facts dict updated with installed_variant_rpms - """ - installed_rpms = [] - for base_rpm in ['openshift', 'atomic-openshift', 'origin']: - optional_rpms = ['master', 'node', 'clients', 'sdn-ovs'] - variant_rpms = [base_rpm] + \ - ['{0}-{1}'.format(base_rpm, r) for r in optional_rpms] + \ - ['tuned-profiles-%s-node' % base_rpm] - for rpm in variant_rpms: - exit_code, _, _ = module.run_command(['rpm', '-q', rpm]) # noqa: F405 - if exit_code == 0: - installed_rpms.append(rpm) - - facts['common']['installed_variant_rpms'] = installed_rpms return facts @@ -1430,8 +1401,6 @@ class OpenShiftFacts(object): facts = set_proxy_facts(facts) facts = set_builddefaults_facts(facts) facts = set_buildoverrides_facts(facts) - if not safe_get_bool(facts['common']['is_containerized']): - facts = set_installed_variant_rpm_facts(facts) facts = set_nodename(facts) return dict(openshift=facts) @@ -1459,7 +1428,6 @@ class OpenShiftFacts(object): hostname=hostname, public_hostname=hostname, portal_net='172.30.0.0/16', - client_binary='oc', dns_domain='cluster.local', config_base='/etc/origin') diff --git a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py index 4f91f6bb3..744b79c1a 100644 --- a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py +++ b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py @@ -160,7 +160,7 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck): required.add(self._registry_console_image(image_tag, image_info)) # images for containerized components - if self.get_var("openshift", "common", "is_containerized"): + if self.get_var("openshift_is_containerized"): components = set() if 'oo_nodes_to_config' in host_groups: components.update(["node", "openvswitch"]) diff --git a/roles/openshift_health_checker/openshift_checks/etcd_traffic.py b/roles/openshift_health_checker/openshift_checks/etcd_traffic.py index 8b20ccb49..b56d2092b 100644 --- a/roles/openshift_health_checker/openshift_checks/etcd_traffic.py +++ b/roles/openshift_health_checker/openshift_checks/etcd_traffic.py @@ -20,8 +20,8 @@ class EtcdTraffic(OpenShiftCheck): return super(EtcdTraffic, self).is_active() and valid_group_names and valid_version def run(self): - is_containerized = self.get_var("openshift", "common", "is_containerized") - unit = "etcd_container" if is_containerized else "etcd" + openshift_is_containerized = self.get_var("openshift_is_containerized") + unit = "etcd_container" if openshift_is_containerized else "etcd" log_matchers = [{ "start_regexp": r"Starting Etcd Server", diff --git a/roles/openshift_health_checker/openshift_checks/mixins.py b/roles/openshift_health_checker/openshift_checks/mixins.py index cfbdea303..567162be1 100644 --- a/roles/openshift_health_checker/openshift_checks/mixins.py +++ b/roles/openshift_health_checker/openshift_checks/mixins.py @@ -10,8 +10,8 @@ class NotContainerizedMixin(object): def is_active(self): """Only run on non-containerized hosts.""" - is_containerized = self.get_var("openshift", "common", "is_containerized") - return super(NotContainerizedMixin, self).is_active() and not is_containerized + openshift_is_containerized = self.get_var("openshift_is_containerized") + return super(NotContainerizedMixin, self).is_active() and not openshift_is_containerized class DockerHostMixin(object): @@ -23,7 +23,7 @@ class DockerHostMixin(object): """Only run on hosts that depend on Docker.""" group_names = set(self.get_var("group_names", default=[])) needs_docker = set(["oo_nodes_to_config"]) - if self.get_var("openshift.common.is_containerized"): + if self.get_var("openshift_is_containerized"): needs_docker.update(["oo_masters_to_config", "oo_etcd_to_config"]) return super(DockerHostMixin, self).is_active() and bool(group_names.intersection(needs_docker)) @@ -33,7 +33,7 @@ class DockerHostMixin(object): (which would not be able to install but should already have them). Returns: msg, failed """ - if self.get_var("openshift", "common", "is_atomic"): + if self.get_var("openshift_is_atomic"): return "", False # NOTE: we would use the "package" module but it's actually an action plugin diff --git a/roles/openshift_health_checker/test/docker_image_availability_test.py b/roles/openshift_health_checker/test/docker_image_availability_test.py index fc333dfd4..9fd6e049d 100644 --- a/roles/openshift_health_checker/test/docker_image_availability_test.py +++ b/roles/openshift_health_checker/test/docker_image_availability_test.py @@ -6,13 +6,8 @@ from openshift_checks.docker_image_availability import DockerImageAvailability, @pytest.fixture() def task_vars(): return dict( - openshift=dict( - common=dict( - is_containerized=False, - is_atomic=False, - ), - docker=dict(), - ), + openshift_is_atomic=False, + openshift_is_containerized=False, openshift_service_type='origin', openshift_deployment_type='origin', openshift_image_tag='', @@ -20,7 +15,7 @@ def task_vars(): ) -@pytest.mark.parametrize('deployment_type, is_containerized, group_names, expect_active', [ +@pytest.mark.parametrize('deployment_type, openshift_is_containerized, group_names, expect_active', [ ("invalid", True, [], False), ("", True, [], False), ("origin", False, [], False), @@ -30,20 +25,20 @@ def task_vars(): ("origin", True, ["nfs"], False), ("openshift-enterprise", True, ["lb"], False), ]) -def test_is_active(task_vars, deployment_type, is_containerized, group_names, expect_active): +def test_is_active(task_vars, deployment_type, openshift_is_containerized, group_names, expect_active): task_vars['openshift_deployment_type'] = deployment_type - task_vars['openshift']['common']['is_containerized'] = is_containerized + task_vars['openshift_is_containerized'] = openshift_is_containerized task_vars['group_names'] = group_names assert DockerImageAvailability(None, task_vars).is_active() == expect_active -@pytest.mark.parametrize("is_containerized,is_atomic", [ +@pytest.mark.parametrize("openshift_is_containerized,openshift_is_atomic", [ (True, True), (False, False), (True, False), (False, True), ]) -def test_all_images_available_locally(task_vars, is_containerized, is_atomic): +def test_all_images_available_locally(task_vars, openshift_is_containerized, openshift_is_atomic): def execute_module(module_name, module_args, *_): if module_name == "yum": return {} @@ -55,8 +50,8 @@ def test_all_images_available_locally(task_vars, is_containerized, is_atomic): 'images': [module_args['name']], } - task_vars['openshift']['common']['is_containerized'] = is_containerized - task_vars['openshift']['common']['is_atomic'] = is_atomic + task_vars['openshift_is_containerized'] = openshift_is_containerized + task_vars['openshift_is_atomic'] = openshift_is_atomic result = DockerImageAvailability(execute_module, task_vars).run() assert not result.get('failed', False) @@ -172,7 +167,7 @@ def test_registry_availability(image, registries, connection_test_failed, skopeo assert expect_registries_reached == check.reachable_registries -@pytest.mark.parametrize("deployment_type, is_containerized, groups, oreg_url, expected", [ +@pytest.mark.parametrize("deployment_type, openshift_is_containerized, groups, oreg_url, expected", [ ( # standard set of stuff required on nodes "origin", False, ['oo_nodes_to_config'], "", set([ @@ -232,14 +227,10 @@ def test_registry_availability(image, registries, connection_test_failed, skopeo ), ]) -def test_required_images(deployment_type, is_containerized, groups, oreg_url, expected): +def test_required_images(deployment_type, openshift_is_containerized, groups, oreg_url, expected): task_vars = dict( - openshift=dict( - common=dict( - is_containerized=is_containerized, - is_atomic=False, - ), - ), + openshift_is_containerized=openshift_is_containerized, + openshift_is_atomic=False, openshift_deployment_type=deployment_type, group_names=groups, oreg_url=oreg_url, @@ -287,11 +278,7 @@ def test_registry_console_image(task_vars, expected): def test_containerized_etcd(): task_vars = dict( - openshift=dict( - common=dict( - is_containerized=True, - ), - ), + openshift_is_containerized=True, openshift_deployment_type="origin", group_names=['oo_etcd_to_config'], ) diff --git a/roles/openshift_health_checker/test/docker_storage_test.py b/roles/openshift_health_checker/test/docker_storage_test.py index 8fa68c378..33a5dd90a 100644 --- a/roles/openshift_health_checker/test/docker_storage_test.py +++ b/roles/openshift_health_checker/test/docker_storage_test.py @@ -4,21 +4,21 @@ from openshift_checks import OpenShiftCheckException from openshift_checks.docker_storage import DockerStorage -@pytest.mark.parametrize('is_containerized, group_names, is_active', [ +@pytest.mark.parametrize('openshift_is_containerized, group_names, is_active', [ (False, ["oo_masters_to_config", "oo_etcd_to_config"], False), (False, ["oo_masters_to_config", "oo_nodes_to_config"], True), (True, ["oo_etcd_to_config"], True), ]) -def test_is_active(is_containerized, group_names, is_active): +def test_is_active(openshift_is_containerized, group_names, is_active): task_vars = dict( - openshift=dict(common=dict(is_containerized=is_containerized)), + openshift_is_containerized=openshift_is_containerized, group_names=group_names, ) assert DockerStorage(None, task_vars).is_active() == is_active def non_atomic_task_vars(): - return {"openshift": {"common": {"is_atomic": False}}} + return {"openshift_is_atomic": False} @pytest.mark.parametrize('docker_info, failed, expect_msg', [ diff --git a/roles/openshift_health_checker/test/etcd_traffic_test.py b/roles/openshift_health_checker/test/etcd_traffic_test.py index a29dc166b..583c4c8dd 100644 --- a/roles/openshift_health_checker/test/etcd_traffic_test.py +++ b/roles/openshift_health_checker/test/etcd_traffic_test.py @@ -36,9 +36,7 @@ def test_log_matches_high_traffic_msg(group_names, matched, failed, extra_words) task_vars = dict( group_names=group_names, - openshift=dict( - common=dict(is_containerized=False), - ), + openshift_is_containerized=False, openshift_service_type="origin" ) @@ -50,15 +48,13 @@ def test_log_matches_high_traffic_msg(group_names, matched, failed, extra_words) assert result.get("failed", False) == failed -@pytest.mark.parametrize('is_containerized,expected_unit_value', [ +@pytest.mark.parametrize('openshift_is_containerized,expected_unit_value', [ (False, "etcd"), (True, "etcd_container"), ]) -def test_systemd_unit_matches_deployment_type(is_containerized, expected_unit_value): +def test_systemd_unit_matches_deployment_type(openshift_is_containerized, expected_unit_value): task_vars = dict( - openshift=dict( - common=dict(is_containerized=is_containerized), - ) + openshift_is_containerized=openshift_is_containerized ) def execute_module(module_name, args, *_): diff --git a/roles/openshift_health_checker/test/mixins_test.py b/roles/openshift_health_checker/test/mixins_test.py index b1a41ca3c..b5d6f2e95 100644 --- a/roles/openshift_health_checker/test/mixins_test.py +++ b/roles/openshift_health_checker/test/mixins_test.py @@ -10,8 +10,8 @@ class NotContainerizedCheck(NotContainerizedMixin, OpenShiftCheck): @pytest.mark.parametrize('task_vars,expected', [ - (dict(openshift=dict(common=dict(is_containerized=False))), True), - (dict(openshift=dict(common=dict(is_containerized=True))), False), + (dict(openshift_is_containerized=False), True), + (dict(openshift_is_containerized=True), False), ]) def test_is_active(task_vars, expected): assert NotContainerizedCheck(None, task_vars).is_active() == expected @@ -20,4 +20,4 @@ def test_is_active(task_vars, expected): def test_is_active_missing_task_vars(): with pytest.raises(OpenShiftCheckException) as excinfo: NotContainerizedCheck().is_active() - assert 'is_containerized' in str(excinfo.value) + assert 'openshift_is_containerized' in str(excinfo.value) diff --git a/roles/openshift_health_checker/test/ovs_version_test.py b/roles/openshift_health_checker/test/ovs_version_test.py index dd98ff4d8..0238f49d5 100644 --- a/roles/openshift_health_checker/test/ovs_version_test.py +++ b/roles/openshift_health_checker/test/ovs_version_test.py @@ -70,7 +70,7 @@ def test_ovs_package_version(openshift_release, expected_ovs_version): assert result is return_value -@pytest.mark.parametrize('group_names,is_containerized,is_active', [ +@pytest.mark.parametrize('group_names,openshift_is_containerized,is_active', [ (['oo_masters_to_config'], False, True), # ensure check is skipped on containerized installs (['oo_masters_to_config'], True, False), @@ -82,9 +82,9 @@ def test_ovs_package_version(openshift_release, expected_ovs_version): (['lb'], False, False), (['nfs'], False, False), ]) -def test_ovs_version_skip_when_not_master_nor_node(group_names, is_containerized, is_active): +def test_ovs_version_skip_when_not_master_nor_node(group_names, openshift_is_containerized, is_active): task_vars = dict( group_names=group_names, - openshift=dict(common=dict(is_containerized=is_containerized)), + openshift_is_containerized=openshift_is_containerized, ) assert OvsVersion(None, task_vars).is_active() == is_active diff --git a/roles/openshift_health_checker/test/package_availability_test.py b/roles/openshift_health_checker/test/package_availability_test.py index a1e6e0879..52740093d 100644 --- a/roles/openshift_health_checker/test/package_availability_test.py +++ b/roles/openshift_health_checker/test/package_availability_test.py @@ -3,16 +3,16 @@ import pytest from openshift_checks.package_availability import PackageAvailability -@pytest.mark.parametrize('pkg_mgr,is_containerized,is_active', [ +@pytest.mark.parametrize('pkg_mgr,openshift_is_containerized,is_active', [ ('yum', False, True), ('yum', True, False), ('dnf', True, False), ('dnf', False, False), ]) -def test_is_active(pkg_mgr, is_containerized, is_active): +def test_is_active(pkg_mgr, openshift_is_containerized, is_active): task_vars = dict( ansible_pkg_mgr=pkg_mgr, - openshift=dict(common=dict(is_containerized=is_containerized)), + openshift_is_containerized=openshift_is_containerized, ) assert PackageAvailability(None, task_vars).is_active() == is_active diff --git a/roles/openshift_health_checker/test/package_version_test.py b/roles/openshift_health_checker/test/package_version_test.py index ea8e02b97..d2916f617 100644 --- a/roles/openshift_health_checker/test/package_version_test.py +++ b/roles/openshift_health_checker/test/package_version_test.py @@ -99,7 +99,7 @@ def test_docker_package_version(deployment_type, openshift_release, expected_doc assert result == return_value -@pytest.mark.parametrize('group_names,is_containerized,is_active', [ +@pytest.mark.parametrize('group_names,openshift_is_containerized,is_active', [ (['oo_masters_to_config'], False, True), # ensure check is skipped on containerized installs (['oo_masters_to_config'], True, False), @@ -111,9 +111,9 @@ def test_docker_package_version(deployment_type, openshift_release, expected_doc (['lb'], False, False), (['nfs'], False, False), ]) -def test_package_version_skip_when_not_master_nor_node(group_names, is_containerized, is_active): +def test_package_version_skip_when_not_master_nor_node(group_names, openshift_is_containerized, is_active): task_vars = dict( group_names=group_names, - openshift=dict(common=dict(is_containerized=is_containerized)), + openshift_is_containerized=openshift_is_containerized, ) assert PackageVersion(None, task_vars).is_active() == is_active diff --git a/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml b/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml index bd7181c17..77f020357 100644 --- a/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml +++ b/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml @@ -10,7 +10,7 @@ dest: "{{ mktempHosted.stdout }}/glusterfs-registry-service.yml" - name: Create GlusterFS registry service and endpoint - command: "{{ openshift.common.client_binary }} apply -f {{ item }} -n {{ openshift_hosted_registry_namespace | default('default') }}" + command: "{{ openshift_client_binary }} apply -f {{ item }} -n {{ openshift_hosted_registry_namespace | default('default') }}" with_items: - "{{ mktempHosted.stdout }}/glusterfs-registry-service.yml" - "{{ mktempHosted.stdout }}/glusterfs-registry-endpoints.yml" diff --git a/roles/openshift_hosted/tasks/wait_for_pod.yml b/roles/openshift_hosted/tasks/wait_for_pod.yml index 056c79334..f4b9939cc 100644 --- a/roles/openshift_hosted/tasks/wait_for_pod.yml +++ b/roles/openshift_hosted/tasks/wait_for_pod.yml @@ -3,7 +3,7 @@ block: - name: Ensure OpenShift pod correctly rolls out (best-effort today) command: | - {{ openshift.common.client_binary }} rollout status deploymentconfig {{ item.name }} \ + {{ openshift_client_binary }} rollout status deploymentconfig {{ item.name }} \ --namespace {{ item.namespace | default('default') }} \ --config {{ openshift_master_config_dir }}/admin.kubeconfig async: 600 @@ -13,7 +13,7 @@ - name: Determine the latest version of the OpenShift pod deployment command: | - {{ openshift.common.client_binary }} get deploymentconfig {{ item.name }} \ + {{ openshift_client_binary }} get deploymentconfig {{ item.name }} \ --namespace {{ item.namespace }} \ --config {{ openshift_master_config_dir }}/admin.kubeconfig \ -o jsonpath='{ .status.latestVersion }' @@ -22,7 +22,7 @@ - name: Poll for OpenShift pod deployment success command: | - {{ openshift.common.client_binary }} get replicationcontroller {{ item.0.name }}-{{ item.1.stdout }} \ + {{ openshift_client_binary }} get replicationcontroller {{ item.0.name }}-{{ item.1.stdout }} \ --namespace {{ item.0.namespace }} \ --config {{ openshift_master_config_dir }}/admin.kubeconfig \ -o jsonpath='{ .metadata.annotations.openshift\.io/deployment\.phase }' diff --git a/roles/openshift_hosted_templates/defaults/main.yml b/roles/openshift_hosted_templates/defaults/main.yml index f4fd15089..48d62c8df 100644 --- a/roles/openshift_hosted_templates/defaults/main.yml +++ b/roles/openshift_hosted_templates/defaults/main.yml @@ -1,5 +1,5 @@ --- -hosted_base: "{{ openshift.common.config_base if openshift.common.is_containerized | bool else '/usr/share/openshift' }}/hosted" +hosted_base: "{{ openshift.common.config_base if openshift_is_containerized | bool else '/usr/share/openshift' }}/hosted" hosted_deployment_type: "{{ 'origin' if openshift_deployment_type == 'origin' else 'enterprise' }}" content_version: "{{ openshift.common.examples_content_version }}" diff --git a/roles/openshift_hosted_templates/tasks/main.yml b/roles/openshift_hosted_templates/tasks/main.yml index 89b92dfcc..b2313c297 100644 --- a/roles/openshift_hosted_templates/tasks/main.yml +++ b/roles/openshift_hosted_templates/tasks/main.yml @@ -52,7 +52,7 @@ - name: Create or update hosted templates command: > - {{ openshift.common.client_binary }} {{ openshift_hosted_templates_import_command }} + {{ openshift_client_binary }} {{ openshift_hosted_templates_import_command }} -f {{ hosted_base }} --config={{ openshift_hosted_templates_kubeconfig }} -n openshift diff --git a/roles/openshift_loadbalancer/tasks/main.yml b/roles/openshift_loadbalancer/tasks/main.yml index 3ca6c8cbe..4a11029ab 100644 --- a/roles/openshift_loadbalancer/tasks/main.yml +++ b/roles/openshift_loadbalancer/tasks/main.yml @@ -4,33 +4,33 @@ - name: Install haproxy package: name=haproxy state=present - when: not openshift.common.is_containerized | bool + when: not openshift_is_containerized | bool register: result until: result is succeeded - name: Pull haproxy image command: > docker pull {{ openshift_router_image }}:{{ openshift_image_tag }} - when: openshift.common.is_containerized | bool + when: openshift_is_containerized | bool - name: Create config directory for haproxy file: path: /etc/haproxy state: directory - when: openshift.common.is_containerized | bool + when: openshift_is_containerized | bool - name: Create the systemd unit files template: src: "haproxy.docker.service.j2" dest: "/etc/systemd/system/haproxy.service" - when: openshift.common.is_containerized | bool + when: openshift_is_containerized | bool notify: restart haproxy - name: Configure systemd service directory for haproxy file: path: /etc/systemd/system/haproxy.service.d state: directory - when: not openshift.common.is_containerized | bool + when: not openshift_is_containerized | bool # Work around ini_file create option in 2.2 which defaults to no - name: Create limits.conf file @@ -41,7 +41,7 @@ owner: root group: root changed_when: false - when: not openshift.common.is_containerized | bool + when: not openshift_is_containerized | bool - name: Configure the nofile limits for haproxy ini_file: @@ -50,7 +50,7 @@ option: LimitNOFILE value: "{{ openshift_loadbalancer_limit_nofile | default(100000) }}" notify: restart haproxy - when: not openshift.common.is_containerized | bool + when: not openshift_is_containerized | bool - name: Configure haproxy template: diff --git a/roles/openshift_loadbalancer/templates/haproxy.cfg.j2 b/roles/openshift_loadbalancer/templates/haproxy.cfg.j2 index 24fd635ec..de5a8d7c2 100644 --- a/roles/openshift_loadbalancer/templates/haproxy.cfg.j2 +++ b/roles/openshift_loadbalancer/templates/haproxy.cfg.j2 @@ -3,7 +3,7 @@ global maxconn {{ openshift_loadbalancer_global_maxconn | default(20000) }} log /dev/log local0 info -{% if openshift.common.is_containerized | bool %} +{% if openshift_is_containerized | bool %} stats socket /var/lib/haproxy/run/haproxy.sock mode 600 level admin {% else %} chroot /var/lib/haproxy diff --git a/roles/openshift_logging/tasks/annotate_ops_projects.yaml b/roles/openshift_logging/tasks/annotate_ops_projects.yaml index 59d6098d4..4a2ee64f0 100644 --- a/roles/openshift_logging/tasks/annotate_ops_projects.yaml +++ b/roles/openshift_logging/tasks/annotate_ops_projects.yaml @@ -1,6 +1,6 @@ --- - command: > - {{ openshift.common.client_binary }} + {{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig get namespaces -o jsonpath={.items[*].metadata.name} {{ __default_logging_ops_projects | join(' ') }} register: __logging_ops_projects diff --git a/roles/openshift_logging/tasks/delete_logging.yaml b/roles/openshift_logging/tasks/delete_logging.yaml index af36d67c6..51d6d0efd 100644 --- a/roles/openshift_logging/tasks/delete_logging.yaml +++ b/roles/openshift_logging/tasks/delete_logging.yaml @@ -109,14 +109,14 @@ # remove annotations added by logging - command: > - {{ openshift.common.client_binary }} + {{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig get namespaces -o name {{ __default_logging_ops_projects | join(' ') }} register: __logging_ops_projects - name: Remove Annotation of Operations Projects command: > - {{ openshift.common.client_binary }} + {{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig annotate {{ project }} openshift.io/logging.ui.hostname- with_items: "{{ __logging_ops_projects.stdout_lines }}" diff --git a/roles/openshift_logging/tasks/generate_certs.yaml b/roles/openshift_logging/tasks/generate_certs.yaml index d5cfacae3..0d7f8c056 100644 --- a/roles/openshift_logging/tasks/generate_certs.yaml +++ b/roles/openshift_logging/tasks/generate_certs.yaml @@ -17,7 +17,7 @@ - name: Generate certificates command: > - {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig ca create-signer-cert + {{ openshift_client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig ca create-signer-cert --key={{generated_certs_dir}}/ca.key --cert={{generated_certs_dir}}/ca.crt --serial={{generated_certs_dir}}/ca.serial.txt --name=logging-signer-test check_mode: no diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml index bb8ebec6b..11f59652c 100644 --- a/roles/openshift_logging/tasks/install_logging.yaml +++ b/roles/openshift_logging/tasks/install_logging.yaml @@ -1,7 +1,7 @@ --- - name: Gather OpenShift Logging Facts openshift_logging_facts: - oc_bin: "{{openshift.common.client_binary}}" + oc_bin: "{{openshift_client_binary}}" openshift_logging_namespace: "{{openshift_logging_namespace}}" - name: Set logging project diff --git a/roles/openshift_logging/tasks/procure_server_certs.yaml b/roles/openshift_logging/tasks/procure_server_certs.yaml index 00de0ca06..bc817075d 100644 --- a/roles/openshift_logging/tasks/procure_server_certs.yaml +++ b/roles/openshift_logging/tasks/procure_server_certs.yaml @@ -27,7 +27,7 @@ - name: Creating signed server cert and key for {{ cert_info.procure_component }} command: > - {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig ca create-server-cert + {{ openshift_client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig ca create-server-cert --key={{generated_certs_dir}}/{{cert_info.procure_component}}.key --cert={{generated_certs_dir}}/{{cert_info.procure_component}}.crt --hostnames={{cert_info.hostnames|quote}} --signer-cert={{generated_certs_dir}}/ca.crt --signer-key={{generated_certs_dir}}/ca.key --signer-serial={{generated_certs_dir}}/ca.serial.txt diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml index e91248d08..7790dc435 100644 --- a/roles/openshift_logging_elasticsearch/tasks/main.yaml +++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml @@ -111,7 +111,7 @@ - name: Create logging-metrics-reader-role command: > - {{ openshift.common.client_binary }} + {{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n "{{ openshift_logging_elasticsearch_namespace }}" create -f "{{mktemp.stdout}}/templates/logging-metrics-role.yml" diff --git a/roles/openshift_manage_node/tasks/main.yml b/roles/openshift_manage_node/tasks/main.yml index 9f315b9af..9251d380b 100644 --- a/roles/openshift_manage_node/tasks/main.yml +++ b/roles/openshift_manage_node/tasks/main.yml @@ -18,7 +18,7 @@ retries: 120 delay: 1 changed_when: false - when: openshift.common.is_containerized | bool + when: openshift_is_containerized | bool delegate_to: "{{ openshift_master_host }}" run_once: true diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index 1c43d335f..eea1401b8 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -19,7 +19,7 @@ name: "{{ openshift_service_type }}-master{{ openshift_pkg_version | default('') | lib_utils_oo_image_tag_to_rpm_version(include_dash=True) }}" state: present when: - - not openshift.common.is_containerized | bool + - not openshift_is_containerized | bool register: result until: result is succeeded @@ -31,12 +31,12 @@ owner: root group: root when: - - openshift.common.is_containerized | bool + - openshift_is_containerized | bool - name: Reload systemd units command: systemctl daemon-reload when: - - openshift.common.is_containerized | bool + - openshift_is_containerized | bool - name: Re-gather package dependent master facts openshift_facts: @@ -48,7 +48,7 @@ - name: Create the policy file if it does not already exist command: > - {{ openshift.common.client_binary }} adm create-bootstrap-policy-file + {{ openshift_client_binary }} adm create-bootstrap-policy-file --filename={{ openshift_master_policy }} args: creates: "{{ openshift_master_policy }}" @@ -69,7 +69,7 @@ package: name=httpd-tools state=present when: - item.kind == 'HTPasswdPasswordIdentityProvider' - - not openshift.common.is_atomic | bool + - not openshift_is_atomic | bool with_items: "{{ openshift.master.identity_providers }}" register: result until: result is succeeded @@ -164,7 +164,7 @@ - name: Install Master system container include_tasks: system_container.yml when: - - openshift.common.is_containerized | bool + - openshift_is_containerized | bool - l_is_master_system_container | bool - name: Create session secrets file diff --git a/roles/openshift_master/tasks/registry_auth.yml b/roles/openshift_master/tasks/registry_auth.yml index 8b342a5b4..911a9bd3d 100644 --- a/roles/openshift_master/tasks/registry_auth.yml +++ b/roles/openshift_master/tasks/registry_auth.yml @@ -43,7 +43,7 @@ set_fact: l_bind_docker_reg_auth: True when: - - openshift.common.is_containerized | bool + - openshift_is_containerized | bool - oreg_auth_user is defined - > (master_oreg_auth_credentials_stat.stat.exists diff --git a/roles/openshift_master/tasks/set_loopback_context.yml b/roles/openshift_master/tasks/set_loopback_context.yml index 487fefb63..7e013a699 100644 --- a/roles/openshift_master/tasks/set_loopback_context.yml +++ b/roles/openshift_master/tasks/set_loopback_context.yml @@ -1,13 +1,13 @@ --- - name: Test local loopback context command: > - {{ openshift.common.client_binary }} config view + {{ openshift_client_binary }} config view --config={{ openshift_master_loopback_config }} changed_when: false register: l_loopback_config - command: > - {{ openshift.common.client_binary }} config set-cluster + {{ openshift_client_binary }} config set-cluster --certificate-authority={{ openshift_master_config_dir }}/ca.crt --embed-certs=true --server={{ openshift.master.loopback_api_url }} {{ openshift.master.loopback_cluster_name }} @@ -17,7 +17,7 @@ register: set_loopback_cluster - command: > - {{ openshift.common.client_binary }} config set-context + {{ openshift_client_binary }} config set-context --cluster={{ openshift.master.loopback_cluster_name }} --namespace=default --user={{ openshift.master.loopback_user }} {{ openshift.master.loopback_context_name }} @@ -27,7 +27,7 @@ register: l_set_loopback_context - command: > - {{ openshift.common.client_binary }} config use-context {{ openshift.master.loopback_context_name }} + {{ openshift_client_binary }} config use-context {{ openshift.master.loopback_context_name }} --config={{ openshift_master_loopback_config }} when: - l_set_loopback_context is changed diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml index 906ecf277..870ab7c57 100644 --- a/roles/openshift_master/tasks/systemd_units.yml +++ b/roles/openshift_master/tasks/systemd_units.yml @@ -7,7 +7,7 @@ containerized_svc_dir: "/etc/systemd/system" ha_svc_template_path: "docker-cluster" when: - - openshift.common.is_containerized | bool + - openshift_is_containerized | bool - include_tasks: registry_auth.yml @@ -34,7 +34,7 @@ register: l_pull_result changed_when: "'Downloaded newer image' in l_pull_result.stdout" when: - - openshift.common.is_containerized | bool + - openshift_is_containerized | bool - not l_is_master_system_container | bool - name: Create the ha systemd unit files diff --git a/roles/openshift_master/tasks/upgrade.yml b/roles/openshift_master/tasks/upgrade.yml index f84cf2f6e..f143673cf 100644 --- a/roles/openshift_master/tasks/upgrade.yml +++ b/roles/openshift_master/tasks/upgrade.yml @@ -1,6 +1,6 @@ --- - include_tasks: upgrade/rpm_upgrade.yml - when: not openshift.common.is_containerized | bool + when: not openshift_is_containerized | bool - include_tasks: upgrade/upgrade_scheduler.yml diff --git a/roles/openshift_master/templates/atomic-openshift-master.j2 b/roles/openshift_master/templates/atomic-openshift-master.j2 index 3f7a528a9..4c68155ea 100644 --- a/roles/openshift_master/templates/atomic-openshift-master.j2 +++ b/roles/openshift_master/templates/atomic-openshift-master.j2 @@ -6,7 +6,7 @@ CONFIG_FILE={{ openshift_master_config_file }} {% elif openshift_push_via_dns | default(false) %} OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000 {% endif %} -{% if openshift.common.is_containerized | bool %} +{% if openshift_is_containerized | bool %} IMAGE_VERSION={{ openshift_image_tag }} {% endif %} diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2 index cc21b37af..bff32b2e3 100644 --- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2 +++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2 @@ -6,7 +6,7 @@ CONFIG_FILE={{ openshift_master_config_file }} {% elif openshift_push_via_dns | default(false) %} OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000 {% endif %} -{% if openshift.common.is_containerized | bool %} +{% if openshift_is_containerized | bool %} IMAGE_VERSION={{ openshift_image_tag }} {% endif %} diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2 index 493fc510e..b8a519baa 100644 --- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2 +++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2 @@ -6,7 +6,7 @@ CONFIG_FILE={{ openshift_master_config_file }} {% elif openshift_push_via_dns | default(false) %} OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000 {% endif %} -{% if openshift.common.is_containerized | bool %} +{% if openshift_is_containerized | bool %} IMAGE_VERSION={{ openshift_image_tag }} {% endif %} diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml index 273414f8d..00cabe574 100644 --- a/roles/openshift_master_certificates/tasks/main.yml +++ b/roles/openshift_master_certificates/tasks/main.yml @@ -47,7 +47,7 @@ - name: Create the master server certificate command: > - {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm ca create-server-cert + {{ hostvars[openshift_ca_host]['first_master_client_binary'] }} adm ca create-server-cert {% for named_ca_certificate in openshift.master.named_certificates | default([]) | lib_utils_oo_collect('cafile') %} --certificate-authority {{ named_ca_certificate }} {% endfor %} @@ -71,7 +71,7 @@ - name: Generate the loopback master client config command: > - {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm create-api-client-config + {{ hostvars[openshift_ca_host]['first_master_client_binary'] }} adm create-api-client-config --certificate-authority={{ openshift_ca_cert }} {% for named_ca_certificate in openshift.master.named_certificates | default([]) | lib_utils_oo_collect('cafile') %} --certificate-authority {{ named_ca_certificate }} diff --git a/roles/openshift_metrics/tasks/generate_certificates.yaml b/roles/openshift_metrics/tasks/generate_certificates.yaml index bb842d710..b71e35263 100644 --- a/roles/openshift_metrics/tasks/generate_certificates.yaml +++ b/roles/openshift_metrics/tasks/generate_certificates.yaml @@ -1,7 +1,7 @@ --- - name: generate ca certificate chain command: > - {{ openshift.common.client_binary }} adm ca create-signer-cert + {{ openshift_client_binary }} adm ca create-signer-cert --config={{ mktemp.stdout }}/admin.kubeconfig --key='{{ mktemp.stdout }}/ca.key' --cert='{{ mktemp.stdout }}/ca.crt' diff --git a/roles/openshift_metrics/tasks/install_cassandra.yaml b/roles/openshift_metrics/tasks/install_cassandra.yaml index 48584bd64..9026cc897 100644 --- a/roles/openshift_metrics/tasks/install_cassandra.yaml +++ b/roles/openshift_metrics/tasks/install_cassandra.yaml @@ -1,6 +1,6 @@ --- - shell: > - {{ openshift.common.client_binary }} -n {{ openshift_metrics_project | quote }} + {{ openshift_client_binary }} -n {{ openshift_metrics_project | quote }} --config={{ mktemp.stdout }}/admin.kubeconfig get rc hawkular-cassandra-{{node}} -o jsonpath='{.spec.replicas}' || echo 0 vars: diff --git a/roles/openshift_metrics/tasks/install_hawkular.yaml b/roles/openshift_metrics/tasks/install_hawkular.yaml index a4ffa1890..f45e7a042 100644 --- a/roles/openshift_metrics/tasks/install_hawkular.yaml +++ b/roles/openshift_metrics/tasks/install_hawkular.yaml @@ -1,6 +1,6 @@ --- - command: > - {{ openshift.common.client_binary }} -n {{ openshift_metrics_project | quote }} + {{ openshift_client_binary }} -n {{ openshift_metrics_project | quote }} --config={{ mktemp.stdout }}/admin.kubeconfig get rc hawkular-metrics -o jsonpath='{.spec.replicas}' register: hawkular_metrics_replica_count diff --git a/roles/openshift_metrics/tasks/install_heapster.yaml b/roles/openshift_metrics/tasks/install_heapster.yaml index a33b28ba7..73e7454f0 100644 --- a/roles/openshift_metrics/tasks/install_heapster.yaml +++ b/roles/openshift_metrics/tasks/install_heapster.yaml @@ -1,6 +1,6 @@ --- - command: > - {{ openshift.common.client_binary }} -n {{ openshift_metrics_project | quote }} + {{ openshift_client_binary }} -n {{ openshift_metrics_project | quote }} --config={{ mktemp.stdout }}/admin.kubeconfig get rc heapster -o jsonpath='{.spec.replicas}' register: heapster_replica_count diff --git a/roles/openshift_metrics/tasks/install_metrics.yaml b/roles/openshift_metrics/tasks/install_metrics.yaml index 49d1d8cf1..106909941 100644 --- a/roles/openshift_metrics/tasks/install_metrics.yaml +++ b/roles/openshift_metrics/tasks/install_metrics.yaml @@ -70,7 +70,7 @@ - include_tasks: update_master_config.yaml - command: > - {{openshift.common.client_binary}} + {{openshift_client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get rc -l metrics-infra diff --git a/roles/openshift_metrics/tasks/oc_apply.yaml b/roles/openshift_metrics/tasks/oc_apply.yaml index 1e1af40e8..8ccfb7192 100644 --- a/roles/openshift_metrics/tasks/oc_apply.yaml +++ b/roles/openshift_metrics/tasks/oc_apply.yaml @@ -1,7 +1,7 @@ --- - name: Checking generation of {{file_content.kind}} {{file_content.metadata.name}} command: > - {{ openshift.common.client_binary }} + {{ openshift_client_binary }} --config={{ kubeconfig }} get {{file_content.kind}} {{file_content.metadata.name}} -o jsonpath='{.metadata.resourceVersion}' @@ -12,7 +12,7 @@ - name: Applying {{file_name}} command: > - {{ openshift.common.client_binary }} --config={{ kubeconfig }} + {{ openshift_client_binary }} --config={{ kubeconfig }} apply -f {{ file_name }} -n {{namespace}} register: generation_apply @@ -21,7 +21,7 @@ - name: Determine change status of {{file_content.kind}} {{file_content.metadata.name}} command: > - {{ openshift.common.client_binary }} --config={{ kubeconfig }} + {{ openshift_client_binary }} --config={{ kubeconfig }} get {{file_content.kind}} {{file_content.metadata.name}} -o jsonpath='{.metadata.resourceVersion}' -n {{namespace}} diff --git a/roles/openshift_metrics/tasks/pre_install.yaml b/roles/openshift_metrics/tasks/pre_install.yaml index d6756f9b9..976763236 100644 --- a/roles/openshift_metrics/tasks/pre_install.yaml +++ b/roles/openshift_metrics/tasks/pre_install.yaml @@ -14,7 +14,7 @@ - name: list existing secrets command: > - {{ openshift.common.client_binary }} -n {{ openshift_metrics_project }} + {{ openshift_client_binary }} -n {{ openshift_metrics_project }} --config={{ mktemp.stdout }}/admin.kubeconfig get secrets -o name register: metrics_secrets diff --git a/roles/openshift_metrics/tasks/setup_certificate.yaml b/roles/openshift_metrics/tasks/setup_certificate.yaml index e6081c0d3..223bd975e 100644 --- a/roles/openshift_metrics/tasks/setup_certificate.yaml +++ b/roles/openshift_metrics/tasks/setup_certificate.yaml @@ -1,7 +1,7 @@ --- - name: generate {{ component }} keys command: > - {{ openshift.common.client_binary }} adm ca create-server-cert + {{ openshift_client_binary }} adm ca create-server-cert --config={{ mktemp.stdout }}/admin.kubeconfig --key='{{ mktemp.stdout }}/{{ component }}.key' --cert='{{ mktemp.stdout }}/{{ component }}.crt' diff --git a/roles/openshift_metrics/tasks/start_metrics.yaml b/roles/openshift_metrics/tasks/start_metrics.yaml index 2037e8dc3..899251727 100644 --- a/roles/openshift_metrics/tasks/start_metrics.yaml +++ b/roles/openshift_metrics/tasks/start_metrics.yaml @@ -1,6 +1,6 @@ --- - command: > - {{openshift.common.client_binary}} + {{openshift_client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get rc -l metrics-infra=hawkular-cassandra @@ -23,7 +23,7 @@ changed_when: metrics_cassandra_rc | length > 0 - command: > - {{openshift.common.client_binary}} + {{openshift_client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get rc -l metrics-infra=hawkular-metrics @@ -45,7 +45,7 @@ changed_when: metrics_metrics_rc | length > 0 - command: > - {{openshift.common.client_binary}} + {{openshift_client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get rc -l metrics-infra=heapster diff --git a/roles/openshift_metrics/tasks/stop_metrics.yaml b/roles/openshift_metrics/tasks/stop_metrics.yaml index 9a2ce9267..4b1d7119d 100644 --- a/roles/openshift_metrics/tasks/stop_metrics.yaml +++ b/roles/openshift_metrics/tasks/stop_metrics.yaml @@ -1,6 +1,6 @@ --- - command: > - {{openshift.common.client_binary}} + {{openshift_client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get rc -l metrics-infra=heapster @@ -22,7 +22,7 @@ loop_var: object - command: > - {{openshift.common.client_binary}} + {{openshift_client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get rc -l metrics-infra=hawkular-metrics @@ -44,7 +44,7 @@ changed_when: metrics_hawkular_rc | length > 0 - command: > - {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig + {{openshift_client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get rc -o name -l metrics-infra=hawkular-cassandra diff --git a/roles/openshift_metrics/tasks/uninstall_hosa.yaml b/roles/openshift_metrics/tasks/uninstall_hosa.yaml index 42ed02460..ae3306496 100644 --- a/roles/openshift_metrics/tasks/uninstall_hosa.yaml +++ b/roles/openshift_metrics/tasks/uninstall_hosa.yaml @@ -1,7 +1,7 @@ --- - name: remove Hawkular Agent (HOSA) components command: > - {{ openshift.common.client_binary }} -n {{ openshift_metrics_hawkular_agent_namespace }} --config={{ mktemp.stdout }}/admin.kubeconfig + {{ openshift_client_binary }} -n {{ openshift_metrics_hawkular_agent_namespace }} --config={{ mktemp.stdout }}/admin.kubeconfig delete --ignore-not-found --selector=metrics-infra=agent all,sa,secrets,templates,routes,pvc,rolebindings,clusterrolebindings register: delete_metrics @@ -9,7 +9,7 @@ - name: remove rolebindings command: > - {{ openshift.common.client_binary }} -n {{ openshift_metrics_hawkular_agent_namespace }} --config={{ mktemp.stdout }}/admin.kubeconfig + {{ openshift_client_binary }} -n {{ openshift_metrics_hawkular_agent_namespace }} --config={{ mktemp.stdout }}/admin.kubeconfig delete --ignore-not-found clusterrolebinding/hawkular-openshift-agent-rb changed_when: delete_metrics.stdout != 'No resources found' diff --git a/roles/openshift_metrics/tasks/uninstall_metrics.yaml b/roles/openshift_metrics/tasks/uninstall_metrics.yaml index 1265c7bfd..0ab0eec4b 100644 --- a/roles/openshift_metrics/tasks/uninstall_metrics.yaml +++ b/roles/openshift_metrics/tasks/uninstall_metrics.yaml @@ -4,7 +4,7 @@ - name: remove metrics components command: > - {{ openshift.common.client_binary }} -n {{ openshift_metrics_project }} --config={{ mktemp.stdout }}/admin.kubeconfig + {{ openshift_client_binary }} -n {{ openshift_metrics_project }} --config={{ mktemp.stdout }}/admin.kubeconfig delete --ignore-not-found --selector=metrics-infra all,sa,secrets,templates,routes,pvc,rolebindings,clusterrolebindings,clusterrole register: delete_metrics @@ -12,7 +12,7 @@ - name: remove rolebindings command: > - {{ openshift.common.client_binary }} -n {{ openshift_metrics_project }} --config={{ mktemp.stdout }}/admin.kubeconfig + {{ openshift_client_binary }} -n {{ openshift_metrics_project }} --config={{ mktemp.stdout }}/admin.kubeconfig delete --ignore-not-found rolebinding/hawkular-view clusterrolebinding/heapster-cluster-reader diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml index 62e0e1341..779916335 100644 --- a/roles/openshift_node/handlers/main.yml +++ b/roles/openshift_node/handlers/main.yml @@ -34,7 +34,7 @@ pause: seconds=15 when: - (not skip_node_svc_handlers | default(False) | bool) - - openshift.common.is_containerized | bool + - openshift_is_containerized | bool - name: restart node systemd: diff --git a/roles/openshift_node/tasks/config.yml b/roles/openshift_node/tasks/config.yml index 8f38a47aa..1103fe4c9 100644 --- a/roles/openshift_node/tasks/config.yml +++ b/roles/openshift_node/tasks/config.yml @@ -4,7 +4,7 @@ - name: Pull container images include_tasks: container_images.yml - when: openshift.common.is_containerized | bool + when: openshift_is_containerized | bool - name: Start and enable openvswitch service systemd: @@ -13,7 +13,7 @@ state: started daemon_reload: yes when: - - openshift.common.is_containerized | bool + - openshift_is_containerized | bool - openshift_node_use_openshift_sdn | default(true) | bool register: ovs_start_result until: not (ovs_start_result is failed) @@ -58,7 +58,7 @@ # restarted after the node restarts docker and it will take up to 60 seconds for # systemd to start the master again - when: - - openshift.common.is_containerized | bool + - openshift_is_containerized | bool - not openshift_node_bootstrap block: - name: Wait for master API to become available before proceeding diff --git a/roles/openshift_node/tasks/dnsmasq_install.yml b/roles/openshift_node/tasks/dnsmasq_install.yml index 0c8857b11..5e06ba032 100644 --- a/roles/openshift_node/tasks/dnsmasq_install.yml +++ b/roles/openshift_node/tasks/dnsmasq_install.yml @@ -12,7 +12,7 @@ - name: Install dnsmasq package: name=dnsmasq state=installed - when: not openshift.common.is_atomic | bool + when: not openshift_is_atomic | bool register: result until: result is succeeded diff --git a/roles/openshift_node/tasks/install.yml b/roles/openshift_node/tasks/install.yml index fb98b7550..55738d759 100644 --- a/roles/openshift_node/tasks/install.yml +++ b/roles/openshift_node/tasks/install.yml @@ -1,5 +1,5 @@ --- -- when: not openshift.common.is_containerized | bool +- when: not openshift_is_containerized | bool block: - name: Install Node package package: @@ -25,7 +25,7 @@ until: result is succeeded - when: - - openshift.common.is_containerized | bool + - openshift_is_containerized | bool - not l_is_node_system_container | bool block: - name: Pre-pull node image when containerized diff --git a/roles/openshift_node/tasks/registry_auth.yml b/roles/openshift_node/tasks/registry_auth.yml index ab43ec049..92650e6b7 100644 --- a/roles/openshift_node/tasks/registry_auth.yml +++ b/roles/openshift_node/tasks/registry_auth.yml @@ -41,7 +41,7 @@ set_fact: l_bind_docker_reg_auth: True when: - - openshift.common.is_containerized | bool + - openshift_is_containerized | bool - oreg_auth_user is defined - > (node_oreg_auth_credentials_stat.stat.exists diff --git a/roles/openshift_node/tasks/storage_plugins/ceph.yml b/roles/openshift_node/tasks/storage_plugins/ceph.yml index 52d80357e..e30f58a9a 100644 --- a/roles/openshift_node/tasks/storage_plugins/ceph.yml +++ b/roles/openshift_node/tasks/storage_plugins/ceph.yml @@ -1,6 +1,6 @@ --- - name: Install Ceph storage plugin dependencies package: name=ceph-common state=present - when: not openshift.common.is_atomic | bool + when: not openshift_is_atomic | bool register: result until: result is succeeded diff --git a/roles/openshift_node/tasks/storage_plugins/glusterfs.yml b/roles/openshift_node/tasks/storage_plugins/glusterfs.yml index e60f57ae7..c04a6922a 100644 --- a/roles/openshift_node/tasks/storage_plugins/glusterfs.yml +++ b/roles/openshift_node/tasks/storage_plugins/glusterfs.yml @@ -1,7 +1,7 @@ --- - name: Install GlusterFS storage plugin dependencies package: name=glusterfs-fuse state=present - when: not openshift.common.is_atomic | bool + when: not openshift_is_atomic | bool register: result until: result is succeeded diff --git a/roles/openshift_node/tasks/storage_plugins/iscsi.yml b/roles/openshift_node/tasks/storage_plugins/iscsi.yml index d3a3668d5..a8048c42f 100644 --- a/roles/openshift_node/tasks/storage_plugins/iscsi.yml +++ b/roles/openshift_node/tasks/storage_plugins/iscsi.yml @@ -1,6 +1,6 @@ --- - name: Install iSCSI storage plugin dependencies package: name=iscsi-initiator-utils state=present - when: not openshift.common.is_atomic | bool + when: not openshift_is_atomic | bool register: result until: result is succeeded diff --git a/roles/openshift_node/tasks/storage_plugins/nfs.yml b/roles/openshift_node/tasks/storage_plugins/nfs.yml index 1484aa076..c2922644f 100644 --- a/roles/openshift_node/tasks/storage_plugins/nfs.yml +++ b/roles/openshift_node/tasks/storage_plugins/nfs.yml @@ -1,7 +1,7 @@ --- - name: Install NFS storage plugin dependencies package: name=nfs-utils state=present - when: not openshift.common.is_atomic | bool + when: not openshift_is_atomic | bool register: result until: result is succeeded diff --git a/roles/openshift_node/tasks/systemd_units.yml b/roles/openshift_node/tasks/systemd_units.yml index 262ee698b..e33a4999f 100644 --- a/roles/openshift_node/tasks/systemd_units.yml +++ b/roles/openshift_node/tasks/systemd_units.yml @@ -2,13 +2,13 @@ - name: Install Node service file template: dest: "/etc/systemd/system/{{ openshift_service_type }}-node.service" - src: "{{ openshift.common.is_containerized | bool | ternary('openshift.docker.node.service', 'node.service.j2') }}" + src: "{{ openshift_is_containerized | bool | ternary('openshift.docker.node.service', 'node.service.j2') }}" when: not l_is_node_system_container | bool notify: - reload systemd units - restart node -- when: openshift.common.is_containerized | bool +- when: openshift_is_containerized | bool block: - name: include node deps docker service file include_tasks: config/install-node-deps-docker-service-file.yml diff --git a/roles/openshift_node/tasks/upgrade.yml b/roles/openshift_node/tasks/upgrade.yml index f0a013e45..f62bde784 100644 --- a/roles/openshift_node/tasks/upgrade.yml +++ b/roles/openshift_node/tasks/upgrade.yml @@ -1,10 +1,10 @@ --- # input variables: # - l_docker_upgrade -# - openshift.common.is_atomic +# - openshift_is_atomic # - node_config_hook # - openshift_pkg_version -# - openshift.common.is_containerized +# - openshift_is_containerized # - deployment_type # - openshift_release @@ -26,7 +26,7 @@ include_tasks: upgrade/rpm_upgrade_install.yml vars: openshift_version: "{{ openshift_pkg_version | default('') }}" - when: not openshift.common.is_containerized | bool + when: not openshift_is_containerized | bool - include_tasks: "{{ node_config_hook }}" diff --git a/roles/openshift_node/tasks/upgrade/config_changes.yml b/roles/openshift_node/tasks/upgrade/config_changes.yml index 439700df6..50044eb3e 100644 --- a/roles/openshift_node/tasks/upgrade/config_changes.yml +++ b/roles/openshift_node/tasks/upgrade/config_changes.yml @@ -1,7 +1,7 @@ --- - name: Update systemd units include_tasks: ../systemd_units.yml - when: openshift.common.is_containerized + when: openshift_is_containerized - name: Update oreg value yedit: diff --git a/roles/openshift_node/tasks/upgrade/restart.yml b/roles/openshift_node/tasks/upgrade/restart.yml index 45b0be0a0..bd6f42182 100644 --- a/roles/openshift_node/tasks/upgrade/restart.yml +++ b/roles/openshift_node/tasks/upgrade/restart.yml @@ -1,7 +1,7 @@ --- # input variables: # - openshift_service_type -# - openshift.common.is_containerized +# - openshift_is_containerized # - openshift.common.hostname # - openshift.master.api_port diff --git a/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml b/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml index cc9a8f2d9..91a358095 100644 --- a/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml +++ b/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml @@ -3,7 +3,7 @@ # - openshift_service_type # - component # - openshift_pkg_version -# - openshift.common.is_atomic +# - openshift_is_atomic # Pre-pull new node rpm, but don't install - name: download new node packages diff --git a/roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml b/roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml index 32eeb76c6..c9094e05a 100644 --- a/roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml +++ b/roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml @@ -3,7 +3,7 @@ # - openshift_service_type # - component # - openshift_pkg_version -# - openshift.common.is_atomic +# - openshift_is_atomic # Install the pre-pulled RPM # Note: dnsmasq is covered in it's own play. openvswitch is included here diff --git a/roles/openshift_node/tasks/upgrade/stop_services.yml b/roles/openshift_node/tasks/upgrade/stop_services.yml index 2fff556e5..6d92516c3 100644 --- a/roles/openshift_node/tasks/upgrade/stop_services.yml +++ b/roles/openshift_node/tasks/upgrade/stop_services.yml @@ -19,7 +19,7 @@ - "{{ openshift_service_type }}-master-controllers" - "{{ openshift_service_type }}-node" failed_when: false - when: openshift.common.is_containerized | bool + when: openshift_is_containerized | bool - service: name: docker @@ -40,4 +40,4 @@ - "{{ openshift_service_type }}-node" - openvswitch failed_when: false - when: not openshift.common.is_containerized | bool + when: not openshift_is_containerized | bool diff --git a/roles/openshift_node/tasks/upgrade_pre.yml b/roles/openshift_node/tasks/upgrade_pre.yml index 7f591996c..3ae7dc6b6 100644 --- a/roles/openshift_node/tasks/upgrade_pre.yml +++ b/roles/openshift_node/tasks/upgrade_pre.yml @@ -11,7 +11,7 @@ command: "{{ ansible_pkg_mgr }} makecache" register: result until: result is succeeded - when: not openshift.common.is_containerized | bool + when: not openshift_is_containerized | bool - name: Check Docker image count shell: "docker images -aq | wc -l" @@ -26,7 +26,7 @@ - l_docker_upgrade | bool - include_tasks: upgrade/containerized_upgrade_pull.yml - when: openshift.common.is_containerized | bool + when: openshift_is_containerized | bool # Prepull the rpms for docker upgrade, but don't install - name: download docker upgrade rpm @@ -40,7 +40,7 @@ - include_tasks: upgrade/rpm_upgrade.yml vars: openshift_version: "{{ openshift_pkg_version | default('') }}" - when: not openshift.common.is_containerized | bool + when: not openshift_is_containerized | bool # https://docs.openshift.com/container-platform/3.4/admin_guide/overcommit.html#disabling-swap-memory - name: Check for swap usage diff --git a/roles/openshift_node_certificates/tasks/main.yml b/roles/openshift_node_certificates/tasks/main.yml index 1e5ebe98e..e95e38fdf 100644 --- a/roles/openshift_node_certificates/tasks/main.yml +++ b/roles/openshift_node_certificates/tasks/main.yml @@ -51,7 +51,7 @@ - name: Generate the node client config command: > - {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm create-api-client-config + {{ hostvars[openshift_ca_host]['first_master_client_binary'] }} adm create-api-client-config {% for named_ca_certificate in hostvars[openshift_ca_host].openshift.master.named_certificates | default([]) | lib_utils_oo_collect('cafile') %} --certificate-authority {{ named_ca_certificate }} {% endfor %} @@ -77,7 +77,7 @@ - name: Generate the node server certificate command: > - {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm ca create-server-cert + {{ hostvars[openshift_ca_host]['first_master_client_binary'] }} adm ca create-server-cert --cert={{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }}/server.crt --key={{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }}/server.key --expire-days={{ openshift_node_cert_expire_days }} diff --git a/roles/openshift_persistent_volumes/tasks/pv.yml b/roles/openshift_persistent_volumes/tasks/pv.yml index 346605ff7..ef9ab7f5f 100644 --- a/roles/openshift_persistent_volumes/tasks/pv.yml +++ b/roles/openshift_persistent_volumes/tasks/pv.yml @@ -8,7 +8,7 @@ - name: Create PersistentVolumes command: > - {{ openshift.common.client_binary }} create + {{ openshift_client_binary }} create -f {{ mktemp.stdout }}/persistent-volumes.yml --config={{ mktemp.stdout }}/admin.kubeconfig register: pv_create_output diff --git a/roles/openshift_persistent_volumes/tasks/pvc.yml b/roles/openshift_persistent_volumes/tasks/pvc.yml index e44f9b18f..2c5519192 100644 --- a/roles/openshift_persistent_volumes/tasks/pvc.yml +++ b/roles/openshift_persistent_volumes/tasks/pvc.yml @@ -8,7 +8,7 @@ - name: Create PersistentVolumeClaims command: > - {{ openshift.common.client_binary }} create + {{ openshift_client_binary }} create -f {{ mktemp.stdout }}/persistent-volume-claims.yml --config={{ mktemp.stdout }}/admin.kubeconfig register: pvc_create_output diff --git a/roles/openshift_project_request_template/tasks/main.yml b/roles/openshift_project_request_template/tasks/main.yml index c31ee5795..3403840fb 100644 --- a/roles/openshift_project_request_template/tasks/main.yml +++ b/roles/openshift_project_request_template/tasks/main.yml @@ -6,7 +6,7 @@ - name: Generate default project template command: | - {{ openshift.common.client_binary | quote }} \ + {{ openshift_client_binary | quote }} \ --config {{ openshift.common.config_base | quote }}/master/admin.kubeconfig \ --output yaml \ adm create-bootstrap-project-template \ @@ -28,7 +28,7 @@ - name: Create or update project request template command: | - {{ openshift.common.client_binary }} \ + {{ openshift_client_binary }} \ --config {{ openshift.common.config_base }}/master/admin.kubeconfig \ --namespace {{ openshift_project_request_template_namespace | quote }} \ apply --filename {{ mktemp.stdout }} diff --git a/roles/openshift_prometheus/tasks/install_prometheus.yaml b/roles/openshift_prometheus/tasks/install_prometheus.yaml index 2fb1c08e5..749df5152 100644 --- a/roles/openshift_prometheus/tasks/install_prometheus.yaml +++ b/roles/openshift_prometheus/tasks/install_prometheus.yaml @@ -39,7 +39,7 @@ # TODO remove this when annotations are supported by oc_serviceaccount - name: annotate serviceaccount command: > - {{ openshift.common.client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }} + {{ openshift_client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }} serviceaccount prometheus serviceaccounts.openshift.io/oauth-redirectreference.prom='{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"prometheus"}}' serviceaccounts.openshift.io/oauth-redirectreference.alerts='{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"alerts"}}' @@ -97,7 +97,7 @@ # TODO remove this when annotations are supported by oc_service - name: annotate prometheus service command: > - {{ openshift.common.client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }} + {{ openshift_client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }} service prometheus prometheus.io/scrape='true' prometheus.io/scheme=https @@ -105,7 +105,7 @@ - name: annotate alerts service command: > - {{ openshift.common.client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }} + {{ openshift_client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }} service alerts 'service.alpha.openshift.io/serving-cert-secret-name=prometheus-alerts-tls' # create prometheus and alerts routes diff --git a/roles/openshift_provisioners/tasks/install_efs.yaml b/roles/openshift_provisioners/tasks/install_efs.yaml index e543d753c..de763f6cf 100644 --- a/roles/openshift_provisioners/tasks/install_efs.yaml +++ b/roles/openshift_provisioners/tasks/install_efs.yaml @@ -1,7 +1,7 @@ --- - name: Check efs current replica count command: > - {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc provisioners-efs + {{ openshift_client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc provisioners-efs -o jsonpath='{.spec.replicas}' -n {{openshift_provisioners_project}} register: efs_replica_count when: not ansible_check_mode @@ -58,7 +58,7 @@ # anyuid in order to run as root & chgrp shares with allocated gids - name: "Check efs anyuid permissions" command: > - {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig + {{ openshift_client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get scc/anyuid -o jsonpath='{.users}' register: efs_anyuid check_mode: no @@ -66,7 +66,7 @@ - name: "Set anyuid permissions for efs" command: > - {{ openshift.common.client_binary}} adm --config={{ mktemp.stdout }}/admin.kubeconfig policy + {{ openshift_client_binary}} adm --config={{ mktemp.stdout }}/admin.kubeconfig policy add-scc-to-user anyuid system:serviceaccount:{{openshift_provisioners_project}}:provisioners-efs register: efs_output failed_when: efs_output.rc == 1 and 'exists' not in efs_output.stderr diff --git a/roles/openshift_provisioners/tasks/oc_apply.yaml b/roles/openshift_provisioners/tasks/oc_apply.yaml index 49d03f203..a4ce53eae 100644 --- a/roles/openshift_provisioners/tasks/oc_apply.yaml +++ b/roles/openshift_provisioners/tasks/oc_apply.yaml @@ -1,7 +1,7 @@ --- - name: Checking generation of {{file_content.kind}} {{file_content.metadata.name}} command: > - {{ openshift.common.client_binary }} --config={{ kubeconfig }} + {{ openshift_client_binary }} --config={{ kubeconfig }} get {{file_content.kind}} {{file_content.metadata.name}} -o jsonpath='{.metadata.resourceVersion}' -n {{namespace}} @@ -11,7 +11,7 @@ - name: Applying {{file_name}} command: > - {{ openshift.common.client_binary }} --config={{ kubeconfig }} + {{ openshift_client_binary }} --config={{ kubeconfig }} apply -f {{ file_name }} -n {{ namespace }} register: generation_apply @@ -20,7 +20,7 @@ - name: Determine change status of {{file_content.kind}} {{file_content.metadata.name}} command: > - {{ openshift.common.client_binary }} --config={{ kubeconfig }} + {{ openshift_client_binary }} --config={{ kubeconfig }} get {{file_content.kind}} {{file_content.metadata.name}} -o jsonpath='{.metadata.resourceVersion}' -n {{namespace}} @@ -32,7 +32,7 @@ - name: Removing previous {{file_name}} command: > - {{ openshift.common.client_binary }} --config={{ kubeconfig }} + {{ openshift_client_binary }} --config={{ kubeconfig }} delete -f {{ file_name }} -n {{ namespace }} register: generation_delete @@ -42,7 +42,7 @@ - name: Recreating {{file_name}} command: > - {{ openshift.common.client_binary }} --config={{ kubeconfig }} + {{ openshift_client_binary }} --config={{ kubeconfig }} apply -f {{ file_name }} -n {{ namespace }} register: generation_apply diff --git a/roles/openshift_provisioners/tasks/uninstall_provisioners.yaml b/roles/openshift_provisioners/tasks/uninstall_provisioners.yaml index 602dee773..ac12087ec 100644 --- a/roles/openshift_provisioners/tasks/uninstall_provisioners.yaml +++ b/roles/openshift_provisioners/tasks/uninstall_provisioners.yaml @@ -5,7 +5,7 @@ # delete the deployment objects that we had created - name: delete provisioner api objects command: > - {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig + {{ openshift_client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete {{ item }} --selector provisioners-infra -n {{ openshift_provisioners_project }} --ignore-not-found=true with_items: - dc @@ -15,7 +15,7 @@ # delete our old secrets - name: delete provisioner secrets command: > - {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig + {{ openshift_client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete secret {{ item }} -n {{ openshift_provisioners_project }} --ignore-not-found=true with_items: - provisioners-efs @@ -26,7 +26,7 @@ # delete cluster role bindings - name: delete cluster role bindings command: > - {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig + {{ openshift_client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete clusterrolebindings {{ item }} -n {{ openshift_provisioners_project }} --ignore-not-found=true with_items: - run-provisioners-efs diff --git a/roles/openshift_service_catalog/tasks/generate_certs.yml b/roles/openshift_service_catalog/tasks/generate_certs.yml index cd7bda2c6..e478023f8 100644 --- a/roles/openshift_service_catalog/tasks/generate_certs.yml +++ b/roles/openshift_service_catalog/tasks/generate_certs.yml @@ -12,7 +12,7 @@ - name: Generate signing cert command: > - {{ openshift.common.client_binary }} adm --config=/etc/origin/master/admin.kubeconfig ca create-signer-cert + {{ openshift_client_binary }} adm --config=/etc/origin/master/admin.kubeconfig ca create-signer-cert --key={{ generated_certs_dir }}/ca.key --cert={{ generated_certs_dir }}/ca.crt --serial={{ generated_certs_dir }}/apiserver.serial.txt --name=service-catalog-signer @@ -60,7 +60,7 @@ register: apiserver_ca - shell: > - {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig get apiservices.apiregistration.k8s.io/v1beta1.servicecatalog.k8s.io -n kube-service-catalog || echo "not found" + {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig get apiservices.apiregistration.k8s.io/v1beta1.servicecatalog.k8s.io -n kube-service-catalog || echo "not found" register: get_apiservices changed_when: no diff --git a/roles/openshift_service_catalog/tasks/install.yml b/roles/openshift_service_catalog/tasks/install.yml index 4b842c166..452d869f6 100644 --- a/roles/openshift_service_catalog/tasks/install.yml +++ b/roles/openshift_service_catalog/tasks/install.yml @@ -38,7 +38,7 @@ - name: Make kube-service-catalog project network global command: > - {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig adm pod-network make-projects-global kube-service-catalog + {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig adm pod-network make-projects-global kube-service-catalog - include_tasks: generate_certs.yml @@ -93,7 +93,7 @@ # only do this if we don't already have the updated role info - name: update edit role for service catalog and pod preset access command: > - {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig replace -f {{ mktemp.stdout }}/edit_sc_patch.yml + {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig replace -f {{ mktemp.stdout }}/edit_sc_patch.yml when: - not edit_yaml.results.results[0] | lib_utils_oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not edit_yaml.results.results[0] | lib_utils_oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) @@ -116,7 +116,7 @@ # only do this if we don't already have the updated role info - name: update admin role for service catalog and pod preset access command: > - {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig replace -f {{ mktemp.stdout }}/admin_sc_patch.yml + {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig replace -f {{ mktemp.stdout }}/admin_sc_patch.yml when: - not admin_yaml.results.results[0] | lib_utils_oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not admin_yaml.results.results[0] | lib_utils_oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) @@ -139,7 +139,7 @@ # only do this if we don't already have the updated role info - name: update view role for service catalog access command: > - {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig replace -f {{ mktemp.stdout }}/view_sc_patch.yml + {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig replace -f {{ mktemp.stdout }}/view_sc_patch.yml when: - not view_yaml.results.results[0] | lib_utils_oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['get', 'list', 'watch']) diff --git a/roles/openshift_service_catalog/tasks/remove.yml b/roles/openshift_service_catalog/tasks/remove.yml index a832e1f85..aa32d0513 100644 --- a/roles/openshift_service_catalog/tasks/remove.yml +++ b/roles/openshift_service_catalog/tasks/remove.yml @@ -1,7 +1,7 @@ --- - name: Remove Service Catalog APIServer command: > - {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig delete apiservices.apiregistration.k8s.io/v1beta1.servicecatalog.k8s.io --ignore-not-found -n kube-service-catalog + {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig delete apiservices.apiregistration.k8s.io/v1beta1.servicecatalog.k8s.io --ignore-not-found -n kube-service-catalog # TODO: this module doesn't currently remove this #- name: Remove service catalog api service @@ -48,7 +48,7 @@ - name: Remove Service Catalog kube-system Role Bindinds shell: > - {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig process kube-system-service-catalog-role-bindings -n kube-system | {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig delete --ignore-not-found -f - + {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig process kube-system-service-catalog-role-bindings -n kube-system | {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig delete --ignore-not-found -f - - oc_obj: kind: template @@ -58,7 +58,7 @@ - name: Remove Service Catalog kube-service-catalog Role Bindinds shell: > - {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig process service-catalog-role-bindings -n kube-service-catalog | {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig delete --ignore-not-found -f - + {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig process service-catalog-role-bindings -n kube-service-catalog | {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig delete --ignore-not-found -f - - oc_obj: kind: template diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml index 9307cb957..001578406 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml @@ -2,7 +2,7 @@ - name: Make sure heketi-client is installed package: name=heketi-client state=present when: - - not openshift.common.is_atomic | bool + - not openshift_is_atomic | bool - not glusterfs_heketi_is_native | bool register: result until: result is succeeded @@ -238,14 +238,14 @@ - name: Set heketi-cli command set_fact: - glusterfs_heketi_client: "{% if glusterfs_heketi_is_native %}{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {%if not glusterfs_heketi_is_missing %}{{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% else %}{{ deploy_heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% endif %} {% endif %}{{ glusterfs_heketi_cli }} -s http://{% if glusterfs_heketi_is_native %}localhost:8080{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %} --user admin {% if glusterfs_heketi_admin_key is defined %}--secret '{{ glusterfs_heketi_admin_key }}'{% endif %}" + glusterfs_heketi_client: "{% if glusterfs_heketi_is_native %}{{ openshift_client_binary }} rsh --namespace={{ glusterfs_namespace }} {%if not glusterfs_heketi_is_missing %}{{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% else %}{{ deploy_heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% endif %} {% endif %}{{ glusterfs_heketi_cli }} -s http://{% if glusterfs_heketi_is_native %}localhost:8080{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %} --user admin {% if glusterfs_heketi_admin_key is defined %}--secret '{{ glusterfs_heketi_admin_key }}'{% endif %}" - name: Verify heketi service command: "{{ glusterfs_heketi_client }} cluster list" changed_when: False - name: Place heketi topology on heketi Pod - shell: "{{ openshift.common.client_binary }} exec --namespace={{ glusterfs_namespace }} -i {%if not glusterfs_heketi_is_missing %}{{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% else %}{{ deploy_heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% endif %} -- bash -c 'mkdir -p {{ mktemp.stdout }} && cat > {{ mktemp.stdout }}/topology.json' < {{ mktemp.stdout }}/topology.json" + shell: "{{ openshift_client_binary }} exec --namespace={{ glusterfs_namespace }} -i {%if not glusterfs_heketi_is_missing %}{{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% else %}{{ deploy_heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% endif %} -- bash -c 'mkdir -p {{ mktemp.stdout }} && cat > {{ mktemp.stdout }}/topology.json' < {{ mktemp.stdout }}/topology.json" when: - glusterfs_heketi_is_native diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml index 60b9ca497..c0a8c53de 100644 --- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml +++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml @@ -4,7 +4,7 @@ register: setup_storage - name: Copy heketi-storage list - shell: "{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ deploy_heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} cat /tmp/heketi-storage.json > {{ mktemp.stdout }}/heketi-storage.json" + shell: "{{ openshift_client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ deploy_heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} cat /tmp/heketi-storage.json > {{ mktemp.stdout }}/heketi-storage.json" # This is used in the subsequent task - name: Copy the admin client config @@ -15,7 +15,7 @@ # Need `command` here because heketi-storage.json contains multiple objects. - name: Copy heketi DB to GlusterFS volume - command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{ mktemp.stdout }}/heketi-storage.json -n {{ glusterfs_namespace }}" + command: "{{ openshift_client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{ mktemp.stdout }}/heketi-storage.json -n {{ glusterfs_namespace }}" when: setup_storage.rc == 0 - name: Wait for copy job to finish @@ -126,7 +126,7 @@ - name: Set heketi-cli command set_fact: - glusterfs_heketi_client: "{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} {{ glusterfs_heketi_cli }} -s http://localhost:8080 --user admin --secret '{{ glusterfs_heketi_admin_key }}'" + glusterfs_heketi_client: "{{ openshift_client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} {{ glusterfs_heketi_cli }} -s http://localhost:8080 --user admin --secret '{{ glusterfs_heketi_admin_key }}'" - name: Verify heketi service command: "{{ glusterfs_heketi_client }} cluster list" diff --git a/roles/openshift_storage_nfs_lvm/tasks/main.yml b/roles/openshift_storage_nfs_lvm/tasks/main.yml index c8e7b6d7c..ff92e59e5 100644 --- a/roles/openshift_storage_nfs_lvm/tasks/main.yml +++ b/roles/openshift_storage_nfs_lvm/tasks/main.yml @@ -2,7 +2,7 @@ # TODO -- this may actually work on atomic hosts - fail: msg: "openshift_storage_nfs_lvm is not compatible with atomic host" - when: openshift.common.is_atomic | bool + when: openshift_is_atomic | bool - name: Create lvm volumes lvol: vg={{osnl_volume_group}} lv={{ item }} size={{osnl_volume_size}}G diff --git a/roles/openshift_storage_nfs_lvm/tasks/nfs.yml b/roles/openshift_storage_nfs_lvm/tasks/nfs.yml index 94dc63bd2..9a72adbdc 100644 --- a/roles/openshift_storage_nfs_lvm/tasks/nfs.yml +++ b/roles/openshift_storage_nfs_lvm/tasks/nfs.yml @@ -1,7 +1,7 @@ --- - name: Install NFS server package: name=nfs-utils state=present - when: not openshift.common.is_containerized | bool + when: not openshift_is_containerized | bool register: result until: result is succeeded diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml index e50d5371e..97e58ffac 100644 --- a/roles/openshift_version/tasks/main.yml +++ b/roles/openshift_version/tasks/main.yml @@ -1,10 +1,6 @@ --- # Determine the openshift_version to configure if none has been specified or set previously. -- set_fact: - is_containerized: "{{ openshift.common.is_containerized | default(False) | bool }}" - is_atomic: "{{ openshift.common.is_atomic | default(False) | bool }}" - # Block attempts to install origin without specifying some kind of version information. # This is because the latest tags for origin are usually alpha builds, which should not # be used by default. Users must indicate what they want. @@ -16,7 +12,7 @@ component images to use. You may want the latest (usually alpha) releases or a more stable release. (Suggestion: add openshift_release="x.y" to inventory.) when: - - is_containerized | bool + - openshift_is_containerized | bool - openshift.common.deployment_type == 'origin' - openshift_release is not defined - openshift_image_tag is not defined @@ -94,11 +90,11 @@ block: - name: Set openshift_version for rpm installation include_tasks: set_version_rpm.yml - when: not is_containerized | bool + when: not openshift_is_containerized | bool - name: Set openshift_version for containerized installation include_tasks: set_version_containerized.yml - when: is_containerized | bool + when: openshift_is_containerized | bool - block: - name: Get available {{ openshift_service_type}} version @@ -121,8 +117,8 @@ - openshift_pkg_version is not defined - openshift_image_tag is not defined when: - - is_containerized | bool - - not is_atomic | bool + - openshift_is_containerized | bool + - not openshift_is_atomic | bool # Warn if the user has provided an openshift_image_tag but is not doing a containerized install # NOTE: This will need to be modified/removed for future container + rpm installations work. @@ -132,7 +128,7 @@ openshift_image_tag is used for containerized installs. If you are trying to specify an image for a non-container install see oreg_url or oreg_url_master or oreg_url_node. when: - - not is_containerized | bool + - not openshift_is_containerized | bool - openshift_image_tag is defined # At this point we know openshift_version is set appropriately. Now we set @@ -182,14 +178,14 @@ msg: "No OpenShift version available; please ensure your systems are fully registered and have access to appropriate yum repositories." name: Abort if openshift_pkg_version was not set when: - - not is_containerized | bool + - not openshift_is_containerized | bool - openshift_version == '0.0' # We can't map an openshift_release to full rpm version like we can with containers; make sure # the rpm version we looked up matches the release requested and error out if not. - name: For an RPM install, abort when the release requested does not match the available version. when: - - not is_containerized | bool + - not openshift_is_containerized | bool - openshift_release is defined assert: that: diff --git a/roles/template_service_broker/tasks/install.yml b/roles/template_service_broker/tasks/install.yml index 1253c1133..2fc9779d6 100644 --- a/roles/template_service_broker/tasks/install.yml +++ b/roles/template_service_broker/tasks/install.yml @@ -44,16 +44,16 @@ - name: Apply template file shell: > - {{ openshift.common.client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}" + {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}" --param API_SERVER_CONFIG="{{ config['content'] | b64decode }}" --param IMAGE="{{ template_service_broker_prefix }}{{ template_service_broker_image_name }}:{{ template_service_broker_version }}" --param NODE_SELECTOR={{ template_service_broker_selector | to_json | quote }} - | {{ openshift.common.client_binary }} apply -f - + | {{ openshift_client_binary }} apply -f - # reconcile with rbac - name: Reconcile with RBAC file shell: > - {{ openshift.common.client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_rbac_file }}" | {{ openshift.common.client_binary }} auth reconcile -f - + {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_rbac_file }}" | {{ openshift_client_binary }} auth reconcile -f - # Check that the TSB is running - name: Verify that TSB is running @@ -80,7 +80,7 @@ # Register with broker - name: Register TSB with broker shell: > - {{ openshift.common.client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_broker_file }}" --param CA_BUNDLE="{{ __ca_bundle.content }}" | {{ openshift.common.client_binary }} apply -f - + {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_broker_file }}" --param CA_BUNDLE="{{ __ca_bundle.content }}" | {{ openshift_client_binary }} apply -f - - file: state: absent diff --git a/roles/template_service_broker/tasks/remove.yml b/roles/template_service_broker/tasks/remove.yml index 8b5593ff9..8b4d798db 100644 --- a/roles/template_service_broker/tasks/remove.yml +++ b/roles/template_service_broker/tasks/remove.yml @@ -13,11 +13,11 @@ - name: Delete TSB broker shell: > - {{ openshift.common.client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_broker_file }}" | {{ openshift.common.client_binary }} delete --ignore-not-found -f - + {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_broker_file }}" | {{ openshift_client_binary }} delete --ignore-not-found -f - - name: Delete TSB objects shell: > - {{ openshift.common.client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}" | {{ openshift.common.client_binary }} delete --ignore-not-found -f - + {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}" | {{ openshift_client_binary }} delete --ignore-not-found -f - - name: empty out tech preview extension file for service console UI copy: diff --git a/roles/tuned/tasks/main.yml b/roles/tuned/tasks/main.yml index e95d274d5..4a28d47b2 100644 --- a/roles/tuned/tasks/main.yml +++ b/roles/tuned/tasks/main.yml @@ -11,7 +11,7 @@ block: - name: Set tuned OpenShift variables set_fact: - openshift_tuned_guest_profile: "{{ 'atomic-guest' if openshift.common.is_atomic else 'virtual-guest' }}" + openshift_tuned_guest_profile: "{{ 'atomic-guest' if openshift_is_atomic else 'virtual-guest' }}" - name: Ensure directory structure exists file: -- cgit v1.2.3 From d3fefc32a727fe3c13159c4e9fe4399f35b487a8 Mon Sep 17 00:00:00 2001 From: Michael Gugino Date: Thu, 4 Jan 2018 23:55:34 -0500 Subject: Move more plugins to lib_utils This commit continues moving plugins into lib_utils. This commit does not move any plugins for add-on roles such as logging and metrics. --- .../private/certificates-backup.yml | 1 + roles/etcd/library/delegated_serial_command.py | 274 ------- .../fetch_client_certificates_from_ca.yml | 1 + .../fetch_server_certificates_from_ca.yml | 2 + .../action_plugins/generate_pv_pvcs_list.py | 157 ++++ roles/lib_utils/filter_plugins/oo_cert_expiry.py | 66 ++ roles/lib_utils/filter_plugins/oo_filters.py | 9 + .../filter_plugins/openshift_aws_filters.py | 74 ++ .../filter_plugins/openshift_hosted_filters.py | 42 ++ roles/lib_utils/filter_plugins/openshift_master.py | 532 +++++++++++++ .../lib_utils/library/delegated_serial_command.py | 274 +++++++ roles/lib_utils/library/openshift_cert_expiry.py | 839 +++++++++++++++++++++ .../library/openshift_container_binary_sync.py | 205 +++++ .../openshift_master_facts_default_predicates.py | 143 ++++ .../openshift_master_facts_default_priorities.py | 117 +++ roles/lib_utils/test/conftest.py | 172 +++++ .../test/openshift_master_facts_bad_input_tests.py | 57 ++ .../test/openshift_master_facts_conftest.py | 54 ++ ...nshift_master_facts_default_predicates_tests.py | 193 +++++ ...nshift_master_facts_default_priorities_tests.py | 167 ++++ roles/lib_utils/test/test_fakeopensslclasses.py | 90 +++ roles/lib_utils/test/test_load_and_handle_cert.py | 67 ++ roles/openshift_aws/defaults/main.yml | 2 + .../filter_plugins/openshift_aws_filters.py | 74 -- roles/openshift_aws/tasks/build_node_group.yml | 1 + roles/openshift_aws/tasks/wait_for_groups.yml | 1 + .../filter_plugins/oo_cert_expiry.py | 66 -- .../library/openshift_cert_expiry.py | 839 --------------------- roles/openshift_certificate_expiry/tasks/main.yml | 4 +- .../openshift_certificate_expiry/test/conftest.py | 119 --- .../test/test_fakeopensslclasses.py | 90 --- .../test/test_load_and_handle_cert.py | 67 -- .../library/openshift_container_binary_sync.py | 205 ----- roles/openshift_cli/tasks/main.yml | 2 + .../openshift_checks/disk_availability.py | 2 +- .../filter_plugins/openshift_hosted_filters.py | 42 -- roles/openshift_hosted/tasks/router.yml | 1 + .../filter_plugins/openshift_logging.py | 9 - roles/openshift_logging_fluentd/defaults/main.yml | 1 + roles/openshift_logging_mux/defaults/main.yml | 1 + roles/openshift_master/tasks/main.yml | 1 + .../tasks/upgrade/upgrade_scheduler.yml | 2 + roles/openshift_master_certificates/tasks/main.yml | 1 + .../filter_plugins/openshift_master.py | 532 ------------- roles/openshift_master_facts/tasks/main.yml | 3 + roles/openshift_master_facts/test/conftest.py | 54 -- .../test/openshift_master_facts_bad_input_tests.py | 57 -- ...nshift_master_facts_default_predicates_tests.py | 193 ----- ...nshift_master_facts_default_priorities_tests.py | 167 ---- .../filter_plugins/openshift_named_certificates.py | 21 - .../action_plugins/generate_pv_pvcs_list.py | 157 ---- roles/openshift_persistent_volumes/tasks/main.yml | 3 +- .../filter_plugins/openshift_sanitize_inventory.py | 10 - .../filter_plugins/openshift_storage_glusterfs.py | 23 - .../tasks/glusterfs_config.yml | 1 + .../tasks/glusterfs_registry.yml | 1 + 56 files changed, 3286 insertions(+), 3002 deletions(-) delete mode 100755 roles/etcd/library/delegated_serial_command.py create mode 100644 roles/lib_utils/action_plugins/generate_pv_pvcs_list.py create mode 100644 roles/lib_utils/filter_plugins/oo_cert_expiry.py create mode 100644 roles/lib_utils/filter_plugins/openshift_aws_filters.py create mode 100644 roles/lib_utils/filter_plugins/openshift_hosted_filters.py create mode 100644 roles/lib_utils/filter_plugins/openshift_master.py create mode 100755 roles/lib_utils/library/delegated_serial_command.py create mode 100644 roles/lib_utils/library/openshift_cert_expiry.py create mode 100644 roles/lib_utils/library/openshift_container_binary_sync.py create mode 100644 roles/lib_utils/lookup_plugins/openshift_master_facts_default_predicates.py create mode 100644 roles/lib_utils/lookup_plugins/openshift_master_facts_default_priorities.py create mode 100644 roles/lib_utils/test/conftest.py create mode 100644 roles/lib_utils/test/openshift_master_facts_bad_input_tests.py create mode 100644 roles/lib_utils/test/openshift_master_facts_conftest.py create mode 100644 roles/lib_utils/test/openshift_master_facts_default_predicates_tests.py create mode 100644 roles/lib_utils/test/openshift_master_facts_default_priorities_tests.py create mode 100644 roles/lib_utils/test/test_fakeopensslclasses.py create mode 100644 roles/lib_utils/test/test_load_and_handle_cert.py delete mode 100644 roles/openshift_aws/filter_plugins/openshift_aws_filters.py delete mode 100644 roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py delete mode 100644 roles/openshift_certificate_expiry/library/openshift_cert_expiry.py delete mode 100644 roles/openshift_certificate_expiry/test/conftest.py delete mode 100644 roles/openshift_certificate_expiry/test/test_fakeopensslclasses.py delete mode 100644 roles/openshift_certificate_expiry/test/test_load_and_handle_cert.py delete mode 100644 roles/openshift_cli/library/openshift_container_binary_sync.py delete mode 100644 roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py delete mode 100644 roles/openshift_master_facts/filter_plugins/openshift_master.py delete mode 100644 roles/openshift_master_facts/test/conftest.py delete mode 100644 roles/openshift_master_facts/test/openshift_master_facts_bad_input_tests.py delete mode 100644 roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py delete mode 100644 roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py delete mode 100644 roles/openshift_named_certificates/filter_plugins/openshift_named_certificates.py delete mode 100644 roles/openshift_persistent_volumes/action_plugins/generate_pv_pvcs_list.py delete mode 100644 roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py (limited to 'roles/openshift_persistent_volumes') diff --git a/playbooks/openshift-master/private/certificates-backup.yml b/playbooks/openshift-master/private/certificates-backup.yml index 4dbc041b0..56af18ca7 100644 --- a/playbooks/openshift-master/private/certificates-backup.yml +++ b/playbooks/openshift-master/private/certificates-backup.yml @@ -28,6 +28,7 @@ path: "{{ openshift.common.config_base }}/master/{{ item }}" state: absent with_items: + # certificates_to_synchronize is a custom filter in lib_utils - "{{ hostvars[inventory_hostname] | certificates_to_synchronize(include_keys=false, include_ca=false) }}" - "etcd.server.crt" - "etcd.server.key" diff --git a/roles/etcd/library/delegated_serial_command.py b/roles/etcd/library/delegated_serial_command.py deleted file mode 100755 index 0cab1ca88..000000000 --- a/roles/etcd/library/delegated_serial_command.py +++ /dev/null @@ -1,274 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Michael DeHaan , and others -# (c) 2016, Andrew Butcher -# -# This module is derrived from the Ansible command module. -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - - -# pylint: disable=unused-wildcard-import,wildcard-import,unused-import,redefined-builtin - -''' delegated_serial_command ''' - -import datetime -import errno -import glob -import shlex -import os -import fcntl -import time - -DOCUMENTATION = ''' ---- -module: delegated_serial_command -short_description: Executes a command on a remote node -version_added: historical -description: - - The M(command) module takes the command name followed by a list - of space-delimited arguments. - - The given command will be executed on all selected nodes. It - will not be processed through the shell, so variables like - C($HOME) and operations like C("<"), C(">"), C("|"), and C("&") - will not work (use the M(shell) module if you need these - features). - - Creates and maintains a lockfile such that this module will - wait for other invocations to proceed. -options: - command: - description: - - the command to run - required: true - default: null - creates: - description: - - a filename or (since 2.0) glob pattern, when it already - exists, this step will B(not) be run. - required: no - default: null - removes: - description: - - a filename or (since 2.0) glob pattern, when it does not - exist, this step will B(not) be run. - version_added: "0.8" - required: no - default: null - chdir: - description: - - cd into this directory before running the command - version_added: "0.6" - required: false - default: null - executable: - description: - - change the shell used to execute the command. Should be an - absolute path to the executable. - required: false - default: null - version_added: "0.9" - warn: - version_added: "1.8" - default: yes - description: - - if command warnings are on in ansible.cfg, do not warn about - this particular line if set to no/false. - required: false - lockfile: - default: yes - description: - - the lockfile that will be created - timeout: - default: yes - description: - - time in milliseconds to wait to obtain the lock -notes: - - If you want to run a command through the shell (say you are using C(<), - C(>), C(|), etc), you actually want the M(shell) module instead. The - M(command) module is much more secure as it's not affected by the user's - environment. - - " C(creates), C(removes), and C(chdir) can be specified after - the command. For instance, if you only want to run a command if - a certain file does not exist, use this." -author: - - Ansible Core Team - - Michael DeHaan - - Andrew Butcher -''' - -EXAMPLES = ''' -# Example from Ansible Playbooks. -- delegated_serial_command: - command: /sbin/shutdown -t now - -# Run the command if the specified file does not exist. -- delegated_serial_command: - command: /usr/bin/make_database.sh arg1 arg2 - creates: /path/to/database -''' - -# Dict of options and their defaults -OPTIONS = {'chdir': None, - 'creates': None, - 'command': None, - 'executable': None, - 'NO_LOG': None, - 'removes': None, - 'warn': True, - 'lockfile': None, - 'timeout': None} - - -def check_command(commandline): - ''' Check provided command ''' - arguments = {'chown': 'owner', 'chmod': 'mode', 'chgrp': 'group', - 'ln': 'state=link', 'mkdir': 'state=directory', - 'rmdir': 'state=absent', 'rm': 'state=absent', 'touch': 'state=touch'} - commands = {'git': 'git', 'hg': 'hg', 'curl': 'get_url or uri', 'wget': 'get_url or uri', - 'svn': 'subversion', 'service': 'service', - 'mount': 'mount', 'rpm': 'yum, dnf or zypper', 'yum': 'yum', 'apt-get': 'apt', - 'tar': 'unarchive', 'unzip': 'unarchive', 'sed': 'template or lineinfile', - 'rsync': 'synchronize', 'dnf': 'dnf', 'zypper': 'zypper'} - become = ['sudo', 'su', 'pbrun', 'pfexec', 'runas'] - warnings = list() - command = os.path.basename(commandline.split()[0]) - # pylint: disable=line-too-long - if command in arguments: - warnings.append("Consider using file module with {0} rather than running {1}".format(arguments[command], command)) - if command in commands: - warnings.append("Consider using {0} module rather than running {1}".format(commands[command], command)) - if command in become: - warnings.append( - "Consider using 'become', 'become_method', and 'become_user' rather than running {0}".format(command,)) - return warnings - - -# pylint: disable=too-many-statements,too-many-branches,too-many-locals -def main(): - ''' Main module function ''' - module = AnsibleModule( # noqa: F405 - argument_spec=dict( - _uses_shell=dict(type='bool', default=False), - command=dict(required=True), - chdir=dict(), - executable=dict(), - creates=dict(), - removes=dict(), - warn=dict(type='bool', default=True), - lockfile=dict(default='/tmp/delegated_serial_command.lock'), - timeout=dict(type='int', default=30) - ) - ) - - shell = module.params['_uses_shell'] - chdir = module.params['chdir'] - executable = module.params['executable'] - command = module.params['command'] - creates = module.params['creates'] - removes = module.params['removes'] - warn = module.params['warn'] - lockfile = module.params['lockfile'] - timeout = module.params['timeout'] - - if command.strip() == '': - module.fail_json(rc=256, msg="no command given") - - iterated = 0 - lockfd = open(lockfile, 'w+') - while iterated < timeout: - try: - fcntl.flock(lockfd, fcntl.LOCK_EX | fcntl.LOCK_NB) - break - # pylint: disable=invalid-name - except IOError as e: - if e.errno != errno.EAGAIN: - module.fail_json(msg="I/O Error {0}: {1}".format(e.errno, e.strerror)) - else: - iterated += 1 - time.sleep(0.1) - - if chdir: - chdir = os.path.abspath(os.path.expanduser(chdir)) - os.chdir(chdir) - - if creates: - # do not run the command if the line contains creates=filename - # and the filename already exists. This allows idempotence - # of command executions. - path = os.path.expanduser(creates) - if glob.glob(path): - module.exit_json( - cmd=command, - stdout="skipped, since %s exists" % path, - changed=False, - stderr=False, - rc=0 - ) - - if removes: - # do not run the command if the line contains removes=filename - # and the filename does not exist. This allows idempotence - # of command executions. - path = os.path.expanduser(removes) - if not glob.glob(path): - module.exit_json( - cmd=command, - stdout="skipped, since %s does not exist" % path, - changed=False, - stderr=False, - rc=0 - ) - - warnings = list() - if warn: - warnings = check_command(command) - - if not shell: - command = shlex.split(command) - startd = datetime.datetime.now() - - # pylint: disable=invalid-name - rc, out, err = module.run_command(command, executable=executable, use_unsafe_shell=shell) - - fcntl.flock(lockfd, fcntl.LOCK_UN) - lockfd.close() - - endd = datetime.datetime.now() - delta = endd - startd - - if out is None: - out = '' - if err is None: - err = '' - - module.exit_json( - cmd=command, - stdout=out.rstrip("\r\n"), - stderr=err.rstrip("\r\n"), - rc=rc, - start=str(startd), - end=str(endd), - delta=str(delta), - changed=True, - warnings=warnings, - iterated=iterated - ) - - -# import module snippets -# pylint: disable=wrong-import-position -from ansible.module_utils.basic import * # noqa: F402,F403 - -main() diff --git a/roles/etcd/tasks/certificates/fetch_client_certificates_from_ca.yml b/roles/etcd/tasks/certificates/fetch_client_certificates_from_ca.yml index 78578a055..ce295d2f5 100644 --- a/roles/etcd/tasks/certificates/fetch_client_certificates_from_ca.yml +++ b/roles/etcd/tasks/certificates/fetch_client_certificates_from_ca.yml @@ -57,6 +57,7 @@ # Certificates must be signed serially in order to avoid competing # for the serial file. +# delegated_serial_command is a custom module in lib_utils - name: Sign and create the client crt delegated_serial_command: command: > diff --git a/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml b/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml index 987380d0c..7c8b87d99 100644 --- a/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml +++ b/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml @@ -50,6 +50,7 @@ # Certificates must be signed serially in order to avoid competing # for the serial file. +# delegated_serial_command is a custom module in lib_utils - name: Sign and create the server crt delegated_serial_command: command: > @@ -83,6 +84,7 @@ # Certificates must be signed serially in order to avoid competing # for the serial file. +# delegated_serial_command is a custom module in lib_utils - name: Sign and create the peer crt delegated_serial_command: command: > diff --git a/roles/lib_utils/action_plugins/generate_pv_pvcs_list.py b/roles/lib_utils/action_plugins/generate_pv_pvcs_list.py new file mode 100644 index 000000000..eb13a58ba --- /dev/null +++ b/roles/lib_utils/action_plugins/generate_pv_pvcs_list.py @@ -0,0 +1,157 @@ +""" +Ansible action plugin to generate pv and pvc dictionaries lists +""" + +from ansible.plugins.action import ActionBase +from ansible import errors + + +class ActionModule(ActionBase): + """Action plugin to execute health checks.""" + + def get_templated(self, var_to_template): + """Return a properly templated ansible variable""" + return self._templar.template(self.task_vars.get(var_to_template)) + + def build_common(self, varname=None): + """Retrieve common variables for each pv and pvc type""" + volume = self.get_templated(str(varname) + '_volume_name') + size = self.get_templated(str(varname) + '_volume_size') + labels = self.task_vars.get(str(varname) + '_labels') + if labels: + labels = self._templar.template(labels) + else: + labels = dict() + access_modes = self.get_templated(str(varname) + '_access_modes') + return (volume, size, labels, access_modes) + + def build_pv_nfs(self, varname=None): + """Build pv dictionary for nfs storage type""" + host = self.task_vars.get(str(varname) + '_host') + if host: + self._templar.template(host) + elif host is None: + groups = self.task_vars.get('groups') + default_group_name = self.get_templated('openshift_persistent_volumes_default_nfs_group') + if groups and default_group_name and default_group_name in groups and len(groups[default_group_name]) > 0: + host = groups['oo_nfs_to_config'][0] + else: + raise errors.AnsibleModuleError("|failed no storage host detected") + volume, size, labels, access_modes = self.build_common(varname=varname) + directory = self.get_templated(str(varname) + '_nfs_directory') + path = directory + '/' + volume + return dict( + name="{0}-volume".format(volume), + capacity=size, + labels=labels, + access_modes=access_modes, + storage=dict( + nfs=dict( + server=host, + path=path))) + + def build_pv_openstack(self, varname=None): + """Build pv dictionary for openstack storage type""" + volume, size, labels, access_modes = self.build_common(varname=varname) + filesystem = self.get_templated(str(varname) + '_openstack_filesystem') + volume_id = self.get_templated(str(varname) + '_openstack_volumeID') + return dict( + name="{0}-volume".format(volume), + capacity=size, + labels=labels, + access_modes=access_modes, + storage=dict( + cinder=dict( + fsType=filesystem, + volumeID=volume_id))) + + def build_pv_glusterfs(self, varname=None): + """Build pv dictionary for glusterfs storage type""" + volume, size, labels, access_modes = self.build_common(varname=varname) + endpoints = self.get_templated(str(varname) + '_glusterfs_endpoints') + path = self.get_templated(str(varname) + '_glusterfs_path') + read_only = self.get_templated(str(varname) + '_glusterfs_readOnly') + return dict( + name="{0}-volume".format(volume), + capacity=size, + labels=labels, + access_modes=access_modes, + storage=dict( + glusterfs=dict( + endpoints=endpoints, + path=path, + readOnly=read_only))) + + def build_pv_dict(self, varname=None): + """Check for the existence of PV variables""" + kind = self.task_vars.get(str(varname) + '_kind') + if kind: + kind = self._templar.template(kind) + create_pv = self.task_vars.get(str(varname) + '_create_pv') + if create_pv and self._templar.template(create_pv): + if kind == 'nfs': + return self.build_pv_nfs(varname=varname) + + elif kind == 'openstack': + return self.build_pv_openstack(varname=varname) + + elif kind == 'glusterfs': + return self.build_pv_glusterfs(varname=varname) + + elif not (kind == 'object' or kind == 'dynamic'): + msg = "|failed invalid storage kind '{0}' for component '{1}'".format( + kind, + varname) + raise errors.AnsibleModuleError(msg) + return None + + def build_pvc_dict(self, varname=None): + """Check for the existence of PVC variables""" + kind = self.task_vars.get(str(varname) + '_kind') + if kind: + kind = self._templar.template(kind) + create_pv = self.task_vars.get(str(varname) + '_create_pv') + if create_pv: + create_pv = self._templar.template(create_pv) + create_pvc = self.task_vars.get(str(varname) + '_create_pvc') + if create_pvc: + create_pvc = self._templar.template(create_pvc) + if kind != 'object' and create_pv and create_pvc: + volume, size, _, access_modes = self.build_common(varname=varname) + return dict( + name="{0}-claim".format(volume), + capacity=size, + access_modes=access_modes) + return None + + def run(self, tmp=None, task_vars=None): + """Run generate_pv_pvcs_list action plugin""" + result = super(ActionModule, self).run(tmp, task_vars) + # Ignore settting self.task_vars outside of init. + # pylint: disable=W0201 + self.task_vars = task_vars or {} + + result["changed"] = False + result["failed"] = False + result["msg"] = "persistent_volumes list and persistent_volume_claims list created" + vars_to_check = ['openshift_hosted_registry_storage', + 'openshift_hosted_router_storage', + 'openshift_hosted_etcd_storage', + 'openshift_logging_storage', + 'openshift_loggingops_storage', + 'openshift_metrics_storage', + 'openshift_prometheus_storage', + 'openshift_prometheus_alertmanager_storage', + 'openshift_prometheus_alertbuffer_storage'] + persistent_volumes = [] + persistent_volume_claims = [] + for varname in vars_to_check: + pv_dict = self.build_pv_dict(varname) + if pv_dict: + persistent_volumes.append(pv_dict) + pvc_dict = self.build_pvc_dict(varname) + if pvc_dict: + persistent_volume_claims.append(pvc_dict) + result["persistent_volumes"] = persistent_volumes + result["persistent_volume_claims"] = persistent_volume_claims + return result diff --git a/roles/lib_utils/filter_plugins/oo_cert_expiry.py b/roles/lib_utils/filter_plugins/oo_cert_expiry.py new file mode 100644 index 000000000..58b228fee --- /dev/null +++ b/roles/lib_utils/filter_plugins/oo_cert_expiry.py @@ -0,0 +1,66 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +""" +Custom filters for use in openshift-ansible +""" + + +# Disabling too-many-public-methods, since filter methods are necessarily +# public +# pylint: disable=too-many-public-methods +class FilterModule(object): + """ Custom ansible filters """ + + @staticmethod + def oo_cert_expiry_results_to_json(hostvars, play_hosts): + """Takes results (`hostvars`) from the openshift_cert_expiry role +check and serializes them into proper machine-readable JSON +output. This filter parameter **MUST** be the playbook `hostvars` +variable. The `play_hosts` parameter is so we know what to loop over +when we're extrating the values. + +Returns: + +Results are collected into two top-level keys under the `json_results` +dict: + +* `json_results.data` [dict] - Each individual host check result, keys are hostnames +* `json_results.summary` [dict] - Summary of number of `warning` and `expired` +certificates + +Example playbook usage: + + - name: Generate expiration results JSON + run_once: yes + delegate_to: localhost + when: openshift_certificate_expiry_save_json_results|bool + copy: + content: "{{ hostvars|oo_cert_expiry_results_to_json() }}" + dest: "{{ openshift_certificate_expiry_json_results_path }}" + + """ + json_result = { + 'data': {}, + 'summary': {}, + } + + for host in play_hosts: + json_result['data'][host] = hostvars[host]['check_results']['check_results'] + + total_warnings = sum([hostvars[h]['check_results']['summary']['warning'] for h in play_hosts]) + total_expired = sum([hostvars[h]['check_results']['summary']['expired'] for h in play_hosts]) + total_ok = sum([hostvars[h]['check_results']['summary']['ok'] for h in play_hosts]) + total_total = sum([hostvars[h]['check_results']['summary']['total'] for h in play_hosts]) + + json_result['summary']['warning'] = total_warnings + json_result['summary']['expired'] = total_expired + json_result['summary']['ok'] = total_ok + json_result['summary']['total'] = total_total + + return json_result + + def filters(self): + """ returns a mapping of filters to methods """ + return { + "oo_cert_expiry_results_to_json": self.oo_cert_expiry_results_to_json, + } diff --git a/roles/lib_utils/filter_plugins/oo_filters.py b/roles/lib_utils/filter_plugins/oo_filters.py index a2ea287cf..fc14b5633 100644 --- a/roles/lib_utils/filter_plugins/oo_filters.py +++ b/roles/lib_utils/filter_plugins/oo_filters.py @@ -589,6 +589,14 @@ that result to this filter plugin. return secret_name +def map_from_pairs(source, delim="="): + ''' Returns a dict given the source and delim delimited ''' + if source == '': + return dict() + + return dict(item.split(delim) for item in source.split(",")) + + class FilterModule(object): """ Custom ansible filter mapping """ @@ -618,4 +626,5 @@ class FilterModule(object): "lib_utils_oo_contains_rule": lib_utils_oo_contains_rule, "lib_utils_oo_selector_to_string_list": lib_utils_oo_selector_to_string_list, "lib_utils_oo_filter_sa_secrets": lib_utils_oo_filter_sa_secrets, + "map_from_pairs": map_from_pairs } diff --git a/roles/lib_utils/filter_plugins/openshift_aws_filters.py b/roles/lib_utils/filter_plugins/openshift_aws_filters.py new file mode 100644 index 000000000..dfcb11da3 --- /dev/null +++ b/roles/lib_utils/filter_plugins/openshift_aws_filters.py @@ -0,0 +1,74 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +''' +Custom filters for use in openshift_aws +''' + +from ansible import errors + + +class FilterModule(object): + ''' Custom ansible filters for use by openshift_aws role''' + + @staticmethod + def scale_groups_serial(scale_group_info, upgrade=False): + ''' This function will determine what the deployment serial should be and return it + + Search through the tags and find the deployment_serial tag. Once found, + determine if an increment is needed during an upgrade. + if upgrade is true then increment the serial and return it + else return the serial + ''' + if scale_group_info == []: + return 1 + + scale_group_info = scale_group_info[0] + + if not isinstance(scale_group_info, dict): + raise errors.AnsibleFilterError("|filter plugin failed: Expected scale_group_info to be a dict") + + serial = None + + for tag in scale_group_info['tags']: + if tag['key'] == 'deployment_serial': + serial = int(tag['value']) + if upgrade: + serial += 1 + break + else: + raise errors.AnsibleFilterError("|filter plugin failed: deployment_serial tag was not found") + + return serial + + @staticmethod + def scale_groups_match_capacity(scale_group_info): + ''' This function will verify that the scale group instance count matches + the scale group desired capacity + + ''' + for scale_group in scale_group_info: + if scale_group['desired_capacity'] != len(scale_group['instances']): + return False + + return True + + @staticmethod + def build_instance_tags(clusterid): + ''' This function will return a dictionary of the instance tags. + + The main desire to have this inside of a filter_plugin is that we + need to build the following key. + + {"kubernetes.io/cluster/{{ openshift_aws_clusterid }}": "{{ openshift_aws_clusterid}}"} + + ''' + tags = {'clusterid': clusterid, + 'kubernetes.io/cluster/{}'.format(clusterid): clusterid} + + return tags + + def filters(self): + ''' returns a mapping of filters to methods ''' + return {'build_instance_tags': self.build_instance_tags, + 'scale_groups_match_capacity': self.scale_groups_match_capacity, + 'scale_groups_serial': self.scale_groups_serial} diff --git a/roles/lib_utils/filter_plugins/openshift_hosted_filters.py b/roles/lib_utils/filter_plugins/openshift_hosted_filters.py new file mode 100644 index 000000000..003ce5f9e --- /dev/null +++ b/roles/lib_utils/filter_plugins/openshift_hosted_filters.py @@ -0,0 +1,42 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +''' +Custom filters for use in openshift_hosted +''' + + +class FilterModule(object): + ''' Custom ansible filters for use by openshift_hosted role''' + + @staticmethod + def get_router_replicas(replicas=None, router_nodes=None): + ''' This function will return the number of replicas + based on the results from the defined + openshift_hosted_router_replicas OR + the query from oc_obj on openshift nodes with a selector OR + default to 1 + + ''' + # We always use what they've specified if they've specified a value + if replicas is not None: + return replicas + + replicas = 1 + + # Ignore boolean expression limit of 5. + # pylint: disable=too-many-boolean-expressions + if (isinstance(router_nodes, dict) and + 'results' in router_nodes and + 'results' in router_nodes['results'] and + isinstance(router_nodes['results']['results'], list) and + len(router_nodes['results']['results']) > 0 and + 'items' in router_nodes['results']['results'][0]): + + if len(router_nodes['results']['results'][0]['items']) > 0: + replicas = len(router_nodes['results']['results'][0]['items']) + + return replicas + + def filters(self): + ''' returns a mapping of filters to methods ''' + return {'get_router_replicas': self.get_router_replicas} diff --git a/roles/lib_utils/filter_plugins/openshift_master.py b/roles/lib_utils/filter_plugins/openshift_master.py new file mode 100644 index 000000000..ff15f693b --- /dev/null +++ b/roles/lib_utils/filter_plugins/openshift_master.py @@ -0,0 +1,532 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +''' +Custom filters for use in openshift-master +''' +import copy +import sys + +from ansible import errors +from ansible.parsing.yaml.dumper import AnsibleDumper +from ansible.plugins.filter.core import to_bool as ansible_bool + +# ansible.compat.six goes away with Ansible 2.4 +try: + from ansible.compat.six import string_types, u +except ImportError: + from ansible.module_utils.six import string_types, u + +import yaml + + +class IdentityProviderBase(object): + """ IdentityProviderBase + + Attributes: + name (str): Identity provider Name + login (bool): Is this identity provider a login provider? + challenge (bool): Is this identity provider a challenge provider? + provider (dict): Provider specific config + _idp (dict): internal copy of the IDP dict passed in + _required (list): List of lists of strings for required attributes + _optional (list): List of lists of strings for optional attributes + _allow_additional (bool): Does this provider support attributes + not in _required and _optional + + Args: + api_version(str): OpenShift config version + idp (dict): idp config dict + + Raises: + AnsibleFilterError: + """ + # disabling this check since the number of instance attributes are + # necessary for this class + # pylint: disable=too-many-instance-attributes + def __init__(self, api_version, idp): + if api_version not in ['v1']: + raise errors.AnsibleFilterError("|failed api version {0} unknown".format(api_version)) + + self._idp = copy.deepcopy(idp) + + if 'name' not in self._idp: + raise errors.AnsibleFilterError("|failed identity provider missing a name") + + if 'kind' not in self._idp: + raise errors.AnsibleFilterError("|failed identity provider missing a kind") + + self.name = self._idp.pop('name') + self.login = ansible_bool(self._idp.pop('login', False)) + self.challenge = ansible_bool(self._idp.pop('challenge', False)) + self.provider = dict(apiVersion=api_version, kind=self._idp.pop('kind')) + + mm_keys = ('mappingMethod', 'mapping_method') + mapping_method = None + for key in mm_keys: + if key in self._idp: + mapping_method = self._idp.pop(key) + if mapping_method is None: + mapping_method = self.get_default('mappingMethod') + self.mapping_method = mapping_method + + valid_mapping_methods = ['add', 'claim', 'generate', 'lookup'] + if self.mapping_method not in valid_mapping_methods: + raise errors.AnsibleFilterError("|failed unknown mapping method " + "for provider {0}".format(self.__class__.__name__)) + self._required = [] + self._optional = [] + self._allow_additional = True + + @staticmethod + def validate_idp_list(idp_list): + ''' validates a list of idps ''' + names = [x.name for x in idp_list] + if len(set(names)) != len(names): + raise errors.AnsibleFilterError("|failed more than one provider configured with the same name") + + for idp in idp_list: + idp.validate() + + def validate(self): + ''' validate an instance of this idp class ''' + pass + + @staticmethod + def get_default(key): + ''' get a default value for a given key ''' + if key == 'mappingMethod': + return 'claim' + else: + return None + + def set_provider_item(self, items, required=False): + ''' set a provider item based on the list of item names provided. ''' + for item in items: + provider_key = items[0] + if item in self._idp: + self.provider[provider_key] = self._idp.pop(item) + break + else: + default = self.get_default(provider_key) + if default is not None: + self.provider[provider_key] = default + elif required: + raise errors.AnsibleFilterError("|failed provider {0} missing " + "required key {1}".format(self.__class__.__name__, provider_key)) + + def set_provider_items(self): + ''' set the provider items for this idp ''' + for items in self._required: + self.set_provider_item(items, True) + for items in self._optional: + self.set_provider_item(items) + if self._allow_additional: + for key in self._idp.keys(): + self.set_provider_item([key]) + else: + if len(self._idp) > 0: + raise errors.AnsibleFilterError("|failed provider {0} " + "contains unknown keys " + "{1}".format(self.__class__.__name__, ', '.join(self._idp.keys()))) + + def to_dict(self): + ''' translate this idp to a dictionary ''' + return dict(name=self.name, challenge=self.challenge, + login=self.login, mappingMethod=self.mapping_method, + provider=self.provider) + + +class LDAPPasswordIdentityProvider(IdentityProviderBase): + """ LDAPPasswordIdentityProvider + + Attributes: + + Args: + api_version(str): OpenShift config version + idp (dict): idp config dict + + Raises: + AnsibleFilterError: + """ + def __init__(self, api_version, idp): + super(LDAPPasswordIdentityProvider, self).__init__(api_version, idp) + self._allow_additional = False + self._required += [['attributes'], ['url'], ['insecure']] + self._optional += [['ca'], + ['bindDN', 'bind_dn'], + ['bindPassword', 'bind_password']] + + self._idp['insecure'] = ansible_bool(self._idp.pop('insecure', False)) + + if 'attributes' in self._idp and 'preferred_username' in self._idp['attributes']: + pref_user = self._idp['attributes'].pop('preferred_username') + self._idp['attributes']['preferredUsername'] = pref_user + + def validate(self): + ''' validate this idp instance ''' + if not isinstance(self.provider['attributes'], dict): + raise errors.AnsibleFilterError("|failed attributes for provider " + "{0} must be a dictionary".format(self.__class__.__name__)) + + attrs = ['id', 'email', 'name', 'preferredUsername'] + for attr in attrs: + if attr in self.provider['attributes'] and not isinstance(self.provider['attributes'][attr], list): + raise errors.AnsibleFilterError("|failed {0} attribute for " + "provider {1} must be a list".format(attr, self.__class__.__name__)) + + unknown_attrs = set(self.provider['attributes'].keys()) - set(attrs) + if len(unknown_attrs) > 0: + raise errors.AnsibleFilterError("|failed provider {0} has unknown " + "attributes: {1}".format(self.__class__.__name__, ', '.join(unknown_attrs))) + + +class KeystonePasswordIdentityProvider(IdentityProviderBase): + """ KeystoneIdentityProvider + + Attributes: + + Args: + api_version(str): OpenShift config version + idp (dict): idp config dict + + Raises: + AnsibleFilterError: + """ + def __init__(self, api_version, idp): + super(KeystonePasswordIdentityProvider, self).__init__(api_version, idp) + self._allow_additional = False + self._required += [['url'], ['domainName', 'domain_name']] + self._optional += [['ca'], ['certFile', 'cert_file'], ['keyFile', 'key_file']] + + +class RequestHeaderIdentityProvider(IdentityProviderBase): + """ RequestHeaderIdentityProvider + + Attributes: + + Args: + api_version(str): OpenShift config version + idp (dict): idp config dict + + Raises: + AnsibleFilterError: + """ + def __init__(self, api_version, idp): + super(RequestHeaderIdentityProvider, self).__init__(api_version, idp) + self._allow_additional = False + self._required += [['headers']] + self._optional += [['challengeURL', 'challenge_url'], + ['loginURL', 'login_url'], + ['clientCA', 'client_ca'], + ['clientCommonNames', 'client_common_names'], + ['emailHeaders', 'email_headers'], + ['nameHeaders', 'name_headers'], + ['preferredUsernameHeaders', 'preferred_username_headers']] + + def validate(self): + ''' validate this idp instance ''' + if not isinstance(self.provider['headers'], list): + raise errors.AnsibleFilterError("|failed headers for provider {0} " + "must be a list".format(self.__class__.__name__)) + + +class AllowAllPasswordIdentityProvider(IdentityProviderBase): + """ AllowAllPasswordIdentityProvider + + Attributes: + + Args: + api_version(str): OpenShift config version + idp (dict): idp config dict + + Raises: + AnsibleFilterError: + """ + def __init__(self, api_version, idp): + super(AllowAllPasswordIdentityProvider, self).__init__(api_version, idp) + self._allow_additional = False + + +class DenyAllPasswordIdentityProvider(IdentityProviderBase): + """ DenyAllPasswordIdentityProvider + + Attributes: + + Args: + api_version(str): OpenShift config version + idp (dict): idp config dict + + Raises: + AnsibleFilterError: + """ + def __init__(self, api_version, idp): + super(DenyAllPasswordIdentityProvider, self).__init__(api_version, idp) + self._allow_additional = False + + +class HTPasswdPasswordIdentityProvider(IdentityProviderBase): + """ HTPasswdPasswordIdentity + + Attributes: + + Args: + api_version(str): OpenShift config version + idp (dict): idp config dict + + Raises: + AnsibleFilterError: + """ + def __init__(self, api_version, idp): + super(HTPasswdPasswordIdentityProvider, self).__init__(api_version, idp) + self._allow_additional = False + self._required += [['file', 'filename', 'fileName', 'file_name']] + + @staticmethod + def get_default(key): + if key == 'file': + return '/etc/origin/htpasswd' + else: + return IdentityProviderBase.get_default(key) + + +class BasicAuthPasswordIdentityProvider(IdentityProviderBase): + """ BasicAuthPasswordIdentityProvider + + Attributes: + + Args: + api_version(str): OpenShift config version + idp (dict): idp config dict + + Raises: + AnsibleFilterError: + """ + def __init__(self, api_version, idp): + super(BasicAuthPasswordIdentityProvider, self).__init__(api_version, idp) + self._allow_additional = False + self._required += [['url']] + self._optional += [['ca'], ['certFile', 'cert_file'], ['keyFile', 'key_file']] + + +class IdentityProviderOauthBase(IdentityProviderBase): + """ IdentityProviderOauthBase + + Attributes: + + Args: + api_version(str): OpenShift config version + idp (dict): idp config dict + + Raises: + AnsibleFilterError: + """ + def __init__(self, api_version, idp): + super(IdentityProviderOauthBase, self).__init__(api_version, idp) + self._allow_additional = False + self._required += [['clientID', 'client_id'], ['clientSecret', 'client_secret']] + + def validate(self): + ''' validate an instance of this idp class ''' + pass + + +class OpenIDIdentityProvider(IdentityProviderOauthBase): + """ OpenIDIdentityProvider + + Attributes: + + Args: + api_version(str): OpenShift config version + idp (dict): idp config dict + + Raises: + AnsibleFilterError: + """ + def __init__(self, api_version, idp): + IdentityProviderOauthBase.__init__(self, api_version, idp) + self._required += [['claims'], ['urls']] + self._optional += [['ca'], + ['extraScopes'], + ['extraAuthorizeParameters']] + if 'claims' in self._idp and 'preferred_username' in self._idp['claims']: + pref_user = self._idp['claims'].pop('preferred_username') + self._idp['claims']['preferredUsername'] = pref_user + if 'urls' in self._idp and 'user_info' in self._idp['urls']: + user_info = self._idp['urls'].pop('user_info') + self._idp['urls']['userInfo'] = user_info + if 'extra_scopes' in self._idp: + self._idp['extraScopes'] = self._idp.pop('extra_scopes') + if 'extra_authorize_parameters' in self._idp: + self._idp['extraAuthorizeParameters'] = self._idp.pop('extra_authorize_parameters') + + def validate(self): + ''' validate this idp instance ''' + if not isinstance(self.provider['claims'], dict): + raise errors.AnsibleFilterError("|failed claims for provider {0} " + "must be a dictionary".format(self.__class__.__name__)) + + for var, var_type in (('extraScopes', list), ('extraAuthorizeParameters', dict)): + if var in self.provider and not isinstance(self.provider[var], var_type): + raise errors.AnsibleFilterError("|failed {1} for provider " + "{0} must be a {2}".format(self.__class__.__name__, + var, + var_type.__class__.__name__)) + + required_claims = ['id'] + optional_claims = ['email', 'name', 'preferredUsername'] + all_claims = required_claims + optional_claims + + for claim in required_claims: + if claim in required_claims and claim not in self.provider['claims']: + raise errors.AnsibleFilterError("|failed {0} claim missing " + "for provider {1}".format(claim, self.__class__.__name__)) + + for claim in all_claims: + if claim in self.provider['claims'] and not isinstance(self.provider['claims'][claim], list): + raise errors.AnsibleFilterError("|failed {0} claims for " + "provider {1} must be a list".format(claim, self.__class__.__name__)) + + unknown_claims = set(self.provider['claims'].keys()) - set(all_claims) + if len(unknown_claims) > 0: + raise errors.AnsibleFilterError("|failed provider {0} has unknown " + "claims: {1}".format(self.__class__.__name__, ', '.join(unknown_claims))) + + if not isinstance(self.provider['urls'], dict): + raise errors.AnsibleFilterError("|failed urls for provider {0} " + "must be a dictionary".format(self.__class__.__name__)) + + required_urls = ['authorize', 'token'] + optional_urls = ['userInfo'] + all_urls = required_urls + optional_urls + + for url in required_urls: + if url not in self.provider['urls']: + raise errors.AnsibleFilterError("|failed {0} url missing for " + "provider {1}".format(url, self.__class__.__name__)) + + unknown_urls = set(self.provider['urls'].keys()) - set(all_urls) + if len(unknown_urls) > 0: + raise errors.AnsibleFilterError("|failed provider {0} has unknown " + "urls: {1}".format(self.__class__.__name__, ', '.join(unknown_urls))) + + +class GoogleIdentityProvider(IdentityProviderOauthBase): + """ GoogleIdentityProvider + + Attributes: + + Args: + api_version(str): OpenShift config version + idp (dict): idp config dict + + Raises: + AnsibleFilterError: + """ + def __init__(self, api_version, idp): + IdentityProviderOauthBase.__init__(self, api_version, idp) + self._optional += [['hostedDomain', 'hosted_domain']] + + def validate(self): + ''' validate this idp instance ''' + if self.challenge: + raise errors.AnsibleFilterError("|failed provider {0} does not " + "allow challenge authentication".format(self.__class__.__name__)) + + +class GitHubIdentityProvider(IdentityProviderOauthBase): + """ GitHubIdentityProvider + + Attributes: + + Args: + api_version(str): OpenShift config version + idp (dict): idp config dict + + Raises: + AnsibleFilterError: + """ + def __init__(self, api_version, idp): + IdentityProviderOauthBase.__init__(self, api_version, idp) + self._optional += [['organizations'], + ['teams']] + + def validate(self): + ''' validate this idp instance ''' + if self.challenge: + raise errors.AnsibleFilterError("|failed provider {0} does not " + "allow challenge authentication".format(self.__class__.__name__)) + + +class FilterModule(object): + ''' Custom ansible filters for use by the openshift_master role''' + + @staticmethod + def translate_idps(idps, api_version): + ''' Translates a list of dictionaries into a valid identityProviders config ''' + idp_list = [] + + if not isinstance(idps, list): + raise errors.AnsibleFilterError("|failed expects to filter on a list of identity providers") + for idp in idps: + if not isinstance(idp, dict): + raise errors.AnsibleFilterError("|failed identity providers must be a list of dictionaries") + + cur_module = sys.modules[__name__] + idp_class = getattr(cur_module, idp['kind'], None) + idp_inst = idp_class(api_version, idp) if idp_class is not None else IdentityProviderBase(api_version, idp) + idp_inst.set_provider_items() + idp_list.append(idp_inst) + + IdentityProviderBase.validate_idp_list(idp_list) + return u(yaml.dump([idp.to_dict() for idp in idp_list], + allow_unicode=True, + default_flow_style=False, + width=float("inf"), + Dumper=AnsibleDumper)) + + @staticmethod + def certificates_to_synchronize(hostvars, include_keys=True, include_ca=True): + ''' Return certificates to synchronize based on facts. ''' + if not issubclass(type(hostvars), dict): + raise errors.AnsibleFilterError("|failed expects hostvars is a dict") + certs = ['admin.crt', + 'admin.key', + 'admin.kubeconfig', + 'master.kubelet-client.crt', + 'master.kubelet-client.key', + 'master.proxy-client.crt', + 'master.proxy-client.key', + 'service-signer.crt', + 'service-signer.key'] + if bool(include_ca): + certs += ['ca.crt', 'ca.key', 'ca-bundle.crt', 'client-ca-bundle.crt'] + if bool(include_keys): + certs += ['serviceaccounts.private.key', + 'serviceaccounts.public.key'] + return certs + + @staticmethod + def oo_htpasswd_users_from_file(file_contents): + ''' return a dictionary of htpasswd users from htpasswd file contents ''' + htpasswd_entries = {} + if not isinstance(file_contents, string_types): + raise errors.AnsibleFilterError("failed, expects to filter on a string") + for line in file_contents.splitlines(): + user = None + passwd = None + if len(line) == 0: + continue + if ':' in line: + user, passwd = line.split(':', 1) + + if user is None or len(user) == 0 or passwd is None or len(passwd) == 0: + error_msg = "failed, expects each line to be a colon separated string representing the user and passwd" + raise errors.AnsibleFilterError(error_msg) + htpasswd_entries[user] = passwd + return htpasswd_entries + + def filters(self): + ''' returns a mapping of filters to methods ''' + return {"translate_idps": self.translate_idps, + "certificates_to_synchronize": self.certificates_to_synchronize, + "oo_htpasswd_users_from_file": self.oo_htpasswd_users_from_file} diff --git a/roles/lib_utils/library/delegated_serial_command.py b/roles/lib_utils/library/delegated_serial_command.py new file mode 100755 index 000000000..0cab1ca88 --- /dev/null +++ b/roles/lib_utils/library/delegated_serial_command.py @@ -0,0 +1,274 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2012, Michael DeHaan , and others +# (c) 2016, Andrew Butcher +# +# This module is derrived from the Ansible command module. +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +# pylint: disable=unused-wildcard-import,wildcard-import,unused-import,redefined-builtin + +''' delegated_serial_command ''' + +import datetime +import errno +import glob +import shlex +import os +import fcntl +import time + +DOCUMENTATION = ''' +--- +module: delegated_serial_command +short_description: Executes a command on a remote node +version_added: historical +description: + - The M(command) module takes the command name followed by a list + of space-delimited arguments. + - The given command will be executed on all selected nodes. It + will not be processed through the shell, so variables like + C($HOME) and operations like C("<"), C(">"), C("|"), and C("&") + will not work (use the M(shell) module if you need these + features). + - Creates and maintains a lockfile such that this module will + wait for other invocations to proceed. +options: + command: + description: + - the command to run + required: true + default: null + creates: + description: + - a filename or (since 2.0) glob pattern, when it already + exists, this step will B(not) be run. + required: no + default: null + removes: + description: + - a filename or (since 2.0) glob pattern, when it does not + exist, this step will B(not) be run. + version_added: "0.8" + required: no + default: null + chdir: + description: + - cd into this directory before running the command + version_added: "0.6" + required: false + default: null + executable: + description: + - change the shell used to execute the command. Should be an + absolute path to the executable. + required: false + default: null + version_added: "0.9" + warn: + version_added: "1.8" + default: yes + description: + - if command warnings are on in ansible.cfg, do not warn about + this particular line if set to no/false. + required: false + lockfile: + default: yes + description: + - the lockfile that will be created + timeout: + default: yes + description: + - time in milliseconds to wait to obtain the lock +notes: + - If you want to run a command through the shell (say you are using C(<), + C(>), C(|), etc), you actually want the M(shell) module instead. The + M(command) module is much more secure as it's not affected by the user's + environment. + - " C(creates), C(removes), and C(chdir) can be specified after + the command. For instance, if you only want to run a command if + a certain file does not exist, use this." +author: + - Ansible Core Team + - Michael DeHaan + - Andrew Butcher +''' + +EXAMPLES = ''' +# Example from Ansible Playbooks. +- delegated_serial_command: + command: /sbin/shutdown -t now + +# Run the command if the specified file does not exist. +- delegated_serial_command: + command: /usr/bin/make_database.sh arg1 arg2 + creates: /path/to/database +''' + +# Dict of options and their defaults +OPTIONS = {'chdir': None, + 'creates': None, + 'command': None, + 'executable': None, + 'NO_LOG': None, + 'removes': None, + 'warn': True, + 'lockfile': None, + 'timeout': None} + + +def check_command(commandline): + ''' Check provided command ''' + arguments = {'chown': 'owner', 'chmod': 'mode', 'chgrp': 'group', + 'ln': 'state=link', 'mkdir': 'state=directory', + 'rmdir': 'state=absent', 'rm': 'state=absent', 'touch': 'state=touch'} + commands = {'git': 'git', 'hg': 'hg', 'curl': 'get_url or uri', 'wget': 'get_url or uri', + 'svn': 'subversion', 'service': 'service', + 'mount': 'mount', 'rpm': 'yum, dnf or zypper', 'yum': 'yum', 'apt-get': 'apt', + 'tar': 'unarchive', 'unzip': 'unarchive', 'sed': 'template or lineinfile', + 'rsync': 'synchronize', 'dnf': 'dnf', 'zypper': 'zypper'} + become = ['sudo', 'su', 'pbrun', 'pfexec', 'runas'] + warnings = list() + command = os.path.basename(commandline.split()[0]) + # pylint: disable=line-too-long + if command in arguments: + warnings.append("Consider using file module with {0} rather than running {1}".format(arguments[command], command)) + if command in commands: + warnings.append("Consider using {0} module rather than running {1}".format(commands[command], command)) + if command in become: + warnings.append( + "Consider using 'become', 'become_method', and 'become_user' rather than running {0}".format(command,)) + return warnings + + +# pylint: disable=too-many-statements,too-many-branches,too-many-locals +def main(): + ''' Main module function ''' + module = AnsibleModule( # noqa: F405 + argument_spec=dict( + _uses_shell=dict(type='bool', default=False), + command=dict(required=True), + chdir=dict(), + executable=dict(), + creates=dict(), + removes=dict(), + warn=dict(type='bool', default=True), + lockfile=dict(default='/tmp/delegated_serial_command.lock'), + timeout=dict(type='int', default=30) + ) + ) + + shell = module.params['_uses_shell'] + chdir = module.params['chdir'] + executable = module.params['executable'] + command = module.params['command'] + creates = module.params['creates'] + removes = module.params['removes'] + warn = module.params['warn'] + lockfile = module.params['lockfile'] + timeout = module.params['timeout'] + + if command.strip() == '': + module.fail_json(rc=256, msg="no command given") + + iterated = 0 + lockfd = open(lockfile, 'w+') + while iterated < timeout: + try: + fcntl.flock(lockfd, fcntl.LOCK_EX | fcntl.LOCK_NB) + break + # pylint: disable=invalid-name + except IOError as e: + if e.errno != errno.EAGAIN: + module.fail_json(msg="I/O Error {0}: {1}".format(e.errno, e.strerror)) + else: + iterated += 1 + time.sleep(0.1) + + if chdir: + chdir = os.path.abspath(os.path.expanduser(chdir)) + os.chdir(chdir) + + if creates: + # do not run the command if the line contains creates=filename + # and the filename already exists. This allows idempotence + # of command executions. + path = os.path.expanduser(creates) + if glob.glob(path): + module.exit_json( + cmd=command, + stdout="skipped, since %s exists" % path, + changed=False, + stderr=False, + rc=0 + ) + + if removes: + # do not run the command if the line contains removes=filename + # and the filename does not exist. This allows idempotence + # of command executions. + path = os.path.expanduser(removes) + if not glob.glob(path): + module.exit_json( + cmd=command, + stdout="skipped, since %s does not exist" % path, + changed=False, + stderr=False, + rc=0 + ) + + warnings = list() + if warn: + warnings = check_command(command) + + if not shell: + command = shlex.split(command) + startd = datetime.datetime.now() + + # pylint: disable=invalid-name + rc, out, err = module.run_command(command, executable=executable, use_unsafe_shell=shell) + + fcntl.flock(lockfd, fcntl.LOCK_UN) + lockfd.close() + + endd = datetime.datetime.now() + delta = endd - startd + + if out is None: + out = '' + if err is None: + err = '' + + module.exit_json( + cmd=command, + stdout=out.rstrip("\r\n"), + stderr=err.rstrip("\r\n"), + rc=rc, + start=str(startd), + end=str(endd), + delta=str(delta), + changed=True, + warnings=warnings, + iterated=iterated + ) + + +# import module snippets +# pylint: disable=wrong-import-position +from ansible.module_utils.basic import * # noqa: F402,F403 + +main() diff --git a/roles/lib_utils/library/openshift_cert_expiry.py b/roles/lib_utils/library/openshift_cert_expiry.py new file mode 100644 index 000000000..e355266b0 --- /dev/null +++ b/roles/lib_utils/library/openshift_cert_expiry.py @@ -0,0 +1,839 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# pylint: disable=line-too-long,invalid-name + +"""For details on this module see DOCUMENTATION (below)""" + +import base64 +import datetime +import io +import os +import subprocess +import yaml + +# pylint import-error disabled because pylint cannot find the package +# when installed in a virtualenv +from ansible.module_utils.six.moves import configparser # pylint: disable=import-error +from ansible.module_utils.basic import AnsibleModule + +try: + # You can comment this import out and include a 'pass' in this + # block if you're manually testing this module on a NON-ATOMIC + # HOST (or any host that just doesn't have PyOpenSSL + # available). That will force the `load_and_handle_cert` function + # to use the Fake OpenSSL classes. + import OpenSSL.crypto + HAS_OPENSSL = True +except ImportError: + # Some platforms (such as RHEL Atomic) may not have the Python + # OpenSSL library installed. In this case we will use a manual + # work-around to parse each certificate. + # + # Check for 'OpenSSL.crypto' in `sys.modules` later. + HAS_OPENSSL = False + +DOCUMENTATION = ''' +--- +module: openshift_cert_expiry +short_description: Check OpenShift Container Platform (OCP) and Kube certificate expirations on a cluster +description: + - The M(openshift_cert_expiry) module has two basic functions: to flag certificates which will expire in a set window of time from now, and to notify you about certificates which have already expired. + - When the module finishes, a summary of the examination is returned. Each certificate in the summary has a C(health) key with a value of one of the following: + - C(ok) - not expired, and outside of the expiration C(warning_days) window. + - C(warning) - not expired, but will expire between now and the C(warning_days) window. + - C(expired) - an expired certificate. + - Certificate flagging follow this logic: + - If the expiration date is before now then the certificate is classified as C(expired). + - The certificates time to live (expiration date - now) is calculated, if that time window is less than C(warning_days) the certificate is classified as C(warning). + - All other conditions are classified as C(ok). + - The following keys are ALSO present in the certificate summary: + - C(cert_cn) - The common name of the certificate (additional CNs present in SAN extensions are omitted) + - C(days_remaining) - The number of days until the certificate expires. + - C(expiry) - The date the certificate expires on. + - C(path) - The full path to the certificate on the examined host. +version_added: "1.0" +options: + config_base: + description: + - Base path to OCP system settings. + required: false + default: /etc/origin + warning_days: + description: + - Flag certificates which will expire in C(warning_days) days from now. + required: false + default: 30 + show_all: + description: + - Enable this option to show analysis of ALL certificates examined by this module. + - By default only certificates which have expired, or will expire within the C(warning_days) window will be reported. + required: false + default: false + +author: "Tim Bielawa (@tbielawa) " +''' + +EXAMPLES = ''' +# Default invocation, only notify about expired certificates or certificates which will expire within 30 days from now +- openshift_cert_expiry: + +# Expand the warning window to show certificates expiring within a year from now +- openshift_cert_expiry: warning_days=365 + +# Show expired, soon to expire (now + 30 days), and all other certificates examined +- openshift_cert_expiry: show_all=true +''' + + +class FakeOpenSSLCertificate(object): + """This provides a rough mock of what you get from +`OpenSSL.crypto.load_certificate()`. This is a work-around for +platforms missing the Python OpenSSL library. + """ + def __init__(self, cert_string): + """`cert_string` is a certificate in the form you get from running a +.crt through 'openssl x509 -in CERT.cert -text'""" + self.cert_string = cert_string + self.serial = None + self.subject = None + self.extensions = [] + self.not_after = None + self._parse_cert() + + def _parse_cert(self): + """Manually parse the certificate line by line""" + self.extensions = [] + + PARSING_ALT_NAMES = False + PARSING_HEX_SERIAL = False + for line in self.cert_string.split('\n'): + l = line.strip() + if PARSING_ALT_NAMES: + # We're parsing a 'Subject Alternative Name' line + self.extensions.append( + FakeOpenSSLCertificateSANExtension(l)) + + PARSING_ALT_NAMES = False + continue + + if PARSING_HEX_SERIAL: + # Hex serials arrive colon-delimited + serial_raw = l.replace(':', '') + # Convert to decimal + self.serial = int('0x' + serial_raw, base=16) + PARSING_HEX_SERIAL = False + continue + + # parse out the bits that we can + if l.startswith('Serial Number:'): + # Decimal format: + # Serial Number: 11 (0xb) + # => 11 + # Hex Format (large serials): + # Serial Number: + # 0a:de:eb:24:04:75:ab:56:39:14:e9:5a:22:e2:85:bf + # => 14449739080294792594019643629255165375 + if l.endswith(':'): + PARSING_HEX_SERIAL = True + continue + self.serial = int(l.split()[-2]) + + elif l.startswith('Not After :'): + # Not After : Feb 7 18:19:35 2019 GMT + # => strptime(str, '%b %d %H:%M:%S %Y %Z') + # => strftime('%Y%m%d%H%M%SZ') + # => 20190207181935Z + not_after_raw = l.partition(' : ')[-1] + # Last item: ('Not After', ' : ', 'Feb 7 18:19:35 2019 GMT') + not_after_parsed = datetime.datetime.strptime(not_after_raw, '%b %d %H:%M:%S %Y %Z') + self.not_after = not_after_parsed.strftime('%Y%m%d%H%M%SZ') + + elif l.startswith('X509v3 Subject Alternative Name:'): + PARSING_ALT_NAMES = True + continue + + elif l.startswith('Subject:'): + # O = system:nodes, CN = system:node:m01.example.com + self.subject = FakeOpenSSLCertificateSubjects(l.partition(': ')[-1]) + + def get_serial_number(self): + """Return the serial number of the cert""" + return self.serial + + def get_subject(self): + """Subjects must implement get_components() and return dicts or +tuples. An 'openssl x509 -in CERT.cert -text' with 'Subject': + + Subject: Subject: O=system:nodes, CN=system:node:m01.example.com + +might return: [('O=system', 'nodes'), ('CN=system', 'node:m01.example.com')] + """ + return self.subject + + def get_extension(self, i): + """Extensions must implement get_short_name() and return the string +'subjectAltName'""" + return self.extensions[i] + + def get_extension_count(self): + """ get_extension_count """ + return len(self.extensions) + + def get_notAfter(self): + """Returns a date stamp as a string in the form +'20180922170439Z'. strptime the result with format param: +'%Y%m%d%H%M%SZ'.""" + return self.not_after + + +class FakeOpenSSLCertificateSANExtension(object): # pylint: disable=too-few-public-methods + """Mocks what happens when `get_extension` is called on a certificate +object""" + + def __init__(self, san_string): + """With `san_string` as you get from: + + $ openssl x509 -in certificate.crt -text + """ + self.san_string = san_string + self.short_name = 'subjectAltName' + + def get_short_name(self): + """Return the 'type' of this extension. It's always the same though +because we only care about subjectAltName's""" + return self.short_name + + def __str__(self): + """Return this extension and the value as a simple string""" + return self.san_string + + +# pylint: disable=too-few-public-methods +class FakeOpenSSLCertificateSubjects(object): + """Mocks what happens when `get_subject` is called on a certificate +object""" + + def __init__(self, subject_string): + """With `subject_string` as you get from: + + $ openssl x509 -in certificate.crt -text + """ + self.subjects = [] + for s in subject_string.split(', '): + name, _, value = s.partition(' = ') + self.subjects.append((name, value)) + + def get_components(self): + """Returns a list of tuples""" + return self.subjects + + +###################################################################### +def filter_paths(path_list): + """`path_list` - A list of file paths to check. Only files which exist +will be returned + """ + return [p for p in path_list if os.path.exists(os.path.realpath(p))] + + +# pylint: disable=too-many-locals,too-many-branches +# +# TODO: Break this function down into smaller chunks +def load_and_handle_cert(cert_string, now, base64decode=False, ans_module=None): + """Load a certificate, split off the good parts, and return some +useful data + +Params: + +- `cert_string` (string) - a certificate loaded into a string object +- `now` (datetime) - a datetime object of the time to calculate the certificate 'time_remaining' against +- `base64decode` (bool) - run base64.b64decode() on the input +- `ans_module` (AnsibleModule) - The AnsibleModule object for this module (so we can raise errors) + +Returns: +A tuple of the form: + (cert_subject, cert_expiry_date, time_remaining, cert_serial_number) + """ + if base64decode: + _cert_string = base64.b64decode(cert_string).decode('utf-8') + else: + _cert_string = cert_string + + # Disable this. We 'redefine' the type because we are working + # around a missing library on the target host. + # + # pylint: disable=redefined-variable-type + if HAS_OPENSSL: + # No work-around required + cert_loaded = OpenSSL.crypto.load_certificate( + OpenSSL.crypto.FILETYPE_PEM, _cert_string) + else: + # Missing library, work-around required. Run the 'openssl' + # command on it to decode it + cmd = 'openssl x509 -text' + try: + openssl_proc = subprocess.Popen(cmd.split(), + stdout=subprocess.PIPE, + stdin=subprocess.PIPE) + except OSError: + ans_module.fail_json(msg="Error: The 'OpenSSL' python library and CLI command were not found on the target host. Unable to parse any certificates. This host will not be included in generated reports.") + else: + openssl_decoded = openssl_proc.communicate(_cert_string.encode('utf-8'))[0].decode('utf-8') + cert_loaded = FakeOpenSSLCertificate(openssl_decoded) + + ###################################################################### + # Read all possible names from the cert + cert_subjects = [] + for name, value in cert_loaded.get_subject().get_components(): + if isinstance(name, bytes) or isinstance(value, bytes): + name = name.decode('utf-8') + value = value.decode('utf-8') + cert_subjects.append('{}:{}'.format(name, value)) + + # To read SANs from a cert we must read the subjectAltName + # extension from the X509 Object. What makes this more difficult + # is that pyOpenSSL does not give extensions as an iterable + san = None + for i in range(cert_loaded.get_extension_count()): + ext = cert_loaded.get_extension(i) + if ext.get_short_name() == 'subjectAltName': + san = ext + + if san is not None: + # The X509Extension object for subjectAltName prints as a + # string with the alt names separated by a comma and a + # space. Split the string by ', ' and then add our new names + # to the list of existing names + cert_subjects.extend(str(san).split(', ')) + + cert_subject = ', '.join(cert_subjects) + ###################################################################### + + # Grab the expiration date + not_after = cert_loaded.get_notAfter() + # example get_notAfter() => 20180922170439Z + if isinstance(not_after, bytes): + not_after = not_after.decode('utf-8') + + cert_expiry_date = datetime.datetime.strptime( + not_after, + '%Y%m%d%H%M%SZ') + + time_remaining = cert_expiry_date - now + + return (cert_subject, cert_expiry_date, time_remaining, cert_loaded.get_serial_number()) + + +def classify_cert(cert_meta, now, time_remaining, expire_window, cert_list): + """Given metadata about a certificate under examination, classify it + into one of three categories, 'ok', 'warning', and 'expired'. + +Params: + +- `cert_meta` dict - A dict with certificate metadata. Required fields + include: 'cert_cn', 'path', 'expiry', 'days_remaining', 'health'. +- `now` (datetime) - a datetime object of the time to calculate the certificate 'time_remaining' against +- `time_remaining` (datetime.timedelta) - a timedelta for how long until the cert expires +- `expire_window` (datetime.timedelta) - a timedelta for how long the warning window is +- `cert_list` list - A list to shove the classified cert into + +Return: +- `cert_list` - The updated list of classified certificates + """ + expiry_str = str(cert_meta['expiry']) + # Categorization + if cert_meta['expiry'] < now: + # This already expired, must NOTIFY + cert_meta['health'] = 'expired' + elif time_remaining < expire_window: + # WARN about this upcoming expirations + cert_meta['health'] = 'warning' + else: + # Not expired or about to expire + cert_meta['health'] = 'ok' + + cert_meta['expiry'] = expiry_str + cert_meta['serial_hex'] = hex(int(cert_meta['serial'])) + cert_list.append(cert_meta) + return cert_list + + +def tabulate_summary(certificates, kubeconfigs, etcd_certs, router_certs, registry_certs): + """Calculate the summary text for when the module finishes +running. This includes counts of each classification and what have +you. + +Params: + +- `certificates` (list of dicts) - Processed `expire_check_result` + dicts with filled in `health` keys for system certificates. +- `kubeconfigs` - as above for kubeconfigs +- `etcd_certs` - as above for etcd certs + +Return: + +- `summary_results` (dict) - Counts of each cert type classification + and total items examined. + """ + items = certificates + kubeconfigs + etcd_certs + router_certs + registry_certs + + summary_results = { + 'system_certificates': len(certificates), + 'kubeconfig_certificates': len(kubeconfigs), + 'etcd_certificates': len(etcd_certs), + 'router_certs': len(router_certs), + 'registry_certs': len(registry_certs), + 'total': len(items), + 'ok': 0, + 'warning': 0, + 'expired': 0 + } + + summary_results['expired'] = len([c for c in items if c['health'] == 'expired']) + summary_results['warning'] = len([c for c in items if c['health'] == 'warning']) + summary_results['ok'] = len([c for c in items if c['health'] == 'ok']) + + return summary_results + + +###################################################################### +# This is our module MAIN function after all, so there's bound to be a +# lot of code bundled up into one block +# +# Reason: These checks are disabled because the issue was introduced +# during a period where the pylint checks weren't enabled for this file +# Status: temporarily disabled pending future refactoring +# pylint: disable=too-many-locals,too-many-statements,too-many-branches +def main(): + """This module examines certificates (in various forms) which compose +an OpenShift Container Platform cluster + """ + + module = AnsibleModule( + argument_spec=dict( + config_base=dict( + required=False, + default="/etc/origin", + type='str'), + warning_days=dict( + required=False, + default=30, + type='int'), + show_all=dict( + required=False, + default=False, + type='bool') + ), + supports_check_mode=True, + ) + + # Basic scaffolding for OpenShift specific certs + openshift_base_config_path = os.path.realpath(module.params['config_base']) + openshift_master_config_path = os.path.join(openshift_base_config_path, + "master", "master-config.yaml") + openshift_node_config_path = os.path.join(openshift_base_config_path, + "node", "node-config.yaml") + openshift_cert_check_paths = [ + openshift_master_config_path, + openshift_node_config_path, + ] + + # Paths for Kubeconfigs. Additional kubeconfigs are conditionally + # checked later in the code + master_kube_configs = ['admin', 'openshift-master', + 'openshift-node', 'openshift-router', + 'openshift-registry'] + + kubeconfig_paths = [] + for m_kube_config in master_kube_configs: + kubeconfig_paths.append( + os.path.join(openshift_base_config_path, "master", m_kube_config + ".kubeconfig") + ) + + # Validate some paths we have the ability to do ahead of time + openshift_cert_check_paths = filter_paths(openshift_cert_check_paths) + kubeconfig_paths = filter_paths(kubeconfig_paths) + + # etcd, where do you hide your certs? Used when parsing etcd.conf + etcd_cert_params = [ + "ETCD_CA_FILE", + "ETCD_CERT_FILE", + "ETCD_PEER_CA_FILE", + "ETCD_PEER_CERT_FILE", + ] + + # Expiry checking stuff + now = datetime.datetime.now() + # todo, catch exception for invalid input and return a fail_json + warning_days = int(module.params['warning_days']) + expire_window = datetime.timedelta(days=warning_days) + + # Module stuff + # + # The results of our cert checking to return from the task call + check_results = {} + check_results['meta'] = {} + check_results['meta']['warning_days'] = warning_days + check_results['meta']['checked_at_time'] = str(now) + check_results['meta']['warn_before_date'] = str(now + expire_window) + check_results['meta']['show_all'] = str(module.params['show_all']) + # All the analyzed certs accumulate here + ocp_certs = [] + + ###################################################################### + # Sure, why not? Let's enable check mode. + if module.check_mode: + check_results['ocp_certs'] = [] + module.exit_json( + check_results=check_results, + msg="Checked 0 total certificates. Expired/Warning/OK: 0/0/0. Warning window: %s days" % module.params['warning_days'], + rc=0, + changed=False + ) + + ###################################################################### + # Check for OpenShift Container Platform specific certs + ###################################################################### + for os_cert in filter_paths(openshift_cert_check_paths): + # Open up that config file and locate the cert and CA + with io.open(os_cert, 'r', encoding='utf-8') as fp: + cert_meta = {} + cfg = yaml.load(fp) + # cert files are specified in parsed `fp` as relative to the path + # of the original config file. 'master-config.yaml' with certFile + # = 'foo.crt' implies that 'foo.crt' is in the same + # directory. certFile = '../foo.crt' is in the parent directory. + cfg_path = os.path.dirname(fp.name) + cert_meta['certFile'] = os.path.join(cfg_path, cfg['servingInfo']['certFile']) + cert_meta['clientCA'] = os.path.join(cfg_path, cfg['servingInfo']['clientCA']) + + ###################################################################### + # Load the certificate and the CA, parse their expiration dates into + # datetime objects so we can manipulate them later + for v in cert_meta.values(): + with io.open(v, 'r', encoding='utf-8') as fp: + cert = fp.read() + (cert_subject, + cert_expiry_date, + time_remaining, + cert_serial) = load_and_handle_cert(cert, now, ans_module=module) + + expire_check_result = { + 'cert_cn': cert_subject, + 'path': fp.name, + 'expiry': cert_expiry_date, + 'days_remaining': time_remaining.days, + 'health': None, + 'serial': cert_serial + } + + classify_cert(expire_check_result, now, time_remaining, expire_window, ocp_certs) + + ###################################################################### + # /Check for OpenShift Container Platform specific certs + ###################################################################### + + ###################################################################### + # Check service Kubeconfigs + ###################################################################### + kubeconfigs = [] + + # There may be additional kubeconfigs to check, but their naming + # is less predictable than the ones we've already assembled. + + try: + # Try to read the standard 'node-config.yaml' file to check if + # this host is a node. + with io.open(openshift_node_config_path, 'r', encoding='utf-8') as fp: + cfg = yaml.load(fp) + + # OK, the config file exists, therefore this is a + # node. Nodes have their own kubeconfig files to + # communicate with the master API. Let's read the relative + # path to that file from the node config. + node_masterKubeConfig = cfg['masterKubeConfig'] + # As before, the path to the 'masterKubeConfig' file is + # relative to `fp` + cfg_path = os.path.dirname(fp.name) + node_kubeconfig = os.path.join(cfg_path, node_masterKubeConfig) + + with io.open(node_kubeconfig, 'r', encoding='utf8') as fp: + # Read in the nodes kubeconfig file and grab the good stuff + cfg = yaml.load(fp) + + c = cfg['users'][0]['user']['client-certificate-data'] + (cert_subject, + cert_expiry_date, + time_remaining, + cert_serial) = load_and_handle_cert(c, now, base64decode=True, ans_module=module) + + expire_check_result = { + 'cert_cn': cert_subject, + 'path': fp.name, + 'expiry': cert_expiry_date, + 'days_remaining': time_remaining.days, + 'health': None, + 'serial': cert_serial + } + + classify_cert(expire_check_result, now, time_remaining, expire_window, kubeconfigs) + except IOError: + # This is not a node + pass + + for kube in filter_paths(kubeconfig_paths): + with io.open(kube, 'r', encoding='utf-8') as fp: + # TODO: Maybe consider catching exceptions here? + cfg = yaml.load(fp) + + # Per conversation, "the kubeconfigs you care about: + # admin, router, registry should all be single + # value". Following that advice we only grab the data for + # the user at index 0 in the 'users' list. There should + # not be more than one user. + c = cfg['users'][0]['user']['client-certificate-data'] + (cert_subject, + cert_expiry_date, + time_remaining, + cert_serial) = load_and_handle_cert(c, now, base64decode=True, ans_module=module) + + expire_check_result = { + 'cert_cn': cert_subject, + 'path': fp.name, + 'expiry': cert_expiry_date, + 'days_remaining': time_remaining.days, + 'health': None, + 'serial': cert_serial + } + + classify_cert(expire_check_result, now, time_remaining, expire_window, kubeconfigs) + + ###################################################################### + # /Check service Kubeconfigs + ###################################################################### + + ###################################################################### + # Check etcd certs + # + # Two things to check: 'external' etcd, and embedded etcd. + ###################################################################### + # FIRST: The 'external' etcd + # + # Some values may be duplicated, make this a set for now so we + # unique them all + etcd_certs_to_check = set([]) + etcd_certs = [] + etcd_cert_params.append('dne') + try: + with io.open('/etc/etcd/etcd.conf', 'r', encoding='utf-8') as fp: + # Add dummy header section. + config = io.StringIO() + config.write(u'[ETCD]\n') + config.write(fp.read().replace('%', '%%')) + config.seek(0, os.SEEK_SET) + + etcd_config = configparser.ConfigParser() + etcd_config.readfp(config) + + for param in etcd_cert_params: + try: + etcd_certs_to_check.add(etcd_config.get('ETCD', param)) + except configparser.NoOptionError: + # That parameter does not exist, oh well... + pass + except IOError: + # No etcd to see here, move along + pass + + for etcd_cert in filter_paths(etcd_certs_to_check): + with io.open(etcd_cert, 'r', encoding='utf-8') as fp: + c = fp.read() + (cert_subject, + cert_expiry_date, + time_remaining, + cert_serial) = load_and_handle_cert(c, now, ans_module=module) + + expire_check_result = { + 'cert_cn': cert_subject, + 'path': fp.name, + 'expiry': cert_expiry_date, + 'days_remaining': time_remaining.days, + 'health': None, + 'serial': cert_serial + } + + classify_cert(expire_check_result, now, time_remaining, expire_window, etcd_certs) + + ###################################################################### + # Now the embedded etcd + ###################################################################### + try: + with io.open('/etc/origin/master/master-config.yaml', 'r', encoding='utf-8') as fp: + cfg = yaml.load(fp) + except IOError: + # Not present + pass + else: + if cfg.get('etcdConfig', {}).get('servingInfo', {}).get('certFile', None) is not None: + # This is embedded + etcd_crt_name = cfg['etcdConfig']['servingInfo']['certFile'] + else: + # Not embedded + etcd_crt_name = None + + if etcd_crt_name is not None: + # etcd_crt_name is relative to the location of the + # master-config.yaml file + cfg_path = os.path.dirname(fp.name) + etcd_cert = os.path.join(cfg_path, etcd_crt_name) + with open(etcd_cert, 'r') as etcd_fp: + (cert_subject, + cert_expiry_date, + time_remaining, + cert_serial) = load_and_handle_cert(etcd_fp.read(), now, ans_module=module) + + expire_check_result = { + 'cert_cn': cert_subject, + 'path': etcd_fp.name, + 'expiry': cert_expiry_date, + 'days_remaining': time_remaining.days, + 'health': None, + 'serial': cert_serial + } + + classify_cert(expire_check_result, now, time_remaining, expire_window, etcd_certs) + + ###################################################################### + # /Check etcd certs + ###################################################################### + + ###################################################################### + # Check router/registry certs + # + # These are saved as secrets in etcd. That means that we can not + # simply read a file to grab the data. Instead we're going to + # subprocess out to the 'oc get' command. On non-masters this + # command will fail, that is expected so we catch that exception. + ###################################################################### + router_certs = [] + registry_certs = [] + + ###################################################################### + # First the router certs + try: + router_secrets_raw = subprocess.Popen('oc get -n default secret router-certs -o yaml'.split(), + stdout=subprocess.PIPE) + router_ds = yaml.load(router_secrets_raw.communicate()[0]) + router_c = router_ds['data']['tls.crt'] + router_path = router_ds['metadata']['selfLink'] + except TypeError: + # YAML couldn't load the result, this is not a master + pass + except OSError: + # The OC command doesn't exist here. Move along. + pass + else: + (cert_subject, + cert_expiry_date, + time_remaining, + cert_serial) = load_and_handle_cert(router_c, now, base64decode=True, ans_module=module) + + expire_check_result = { + 'cert_cn': cert_subject, + 'path': router_path, + 'expiry': cert_expiry_date, + 'days_remaining': time_remaining.days, + 'health': None, + 'serial': cert_serial + } + + classify_cert(expire_check_result, now, time_remaining, expire_window, router_certs) + + ###################################################################### + # Now for registry + try: + registry_secrets_raw = subprocess.Popen('oc get -n default secret registry-certificates -o yaml'.split(), + stdout=subprocess.PIPE) + registry_ds = yaml.load(registry_secrets_raw.communicate()[0]) + registry_c = registry_ds['data']['registry.crt'] + registry_path = registry_ds['metadata']['selfLink'] + except TypeError: + # YAML couldn't load the result, this is not a master + pass + except OSError: + # The OC command doesn't exist here. Move along. + pass + else: + (cert_subject, + cert_expiry_date, + time_remaining, + cert_serial) = load_and_handle_cert(registry_c, now, base64decode=True, ans_module=module) + + expire_check_result = { + 'cert_cn': cert_subject, + 'path': registry_path, + 'expiry': cert_expiry_date, + 'days_remaining': time_remaining.days, + 'health': None, + 'serial': cert_serial + } + + classify_cert(expire_check_result, now, time_remaining, expire_window, registry_certs) + + ###################################################################### + # /Check router/registry certs + ###################################################################### + + res = tabulate_summary(ocp_certs, kubeconfigs, etcd_certs, router_certs, registry_certs) + + msg = "Checked {count} total certificates. Expired/Warning/OK: {exp}/{warn}/{ok}. Warning window: {window} days".format( + count=res['total'], + exp=res['expired'], + warn=res['warning'], + ok=res['ok'], + window=int(module.params['warning_days']), + ) + + # By default we only return detailed information about expired or + # warning certificates. If show_all is true then we will print all + # the certificates examined. + if not module.params['show_all']: + check_results['ocp_certs'] = [crt for crt in ocp_certs if crt['health'] in ['expired', 'warning']] + check_results['kubeconfigs'] = [crt for crt in kubeconfigs if crt['health'] in ['expired', 'warning']] + check_results['etcd'] = [crt for crt in etcd_certs if crt['health'] in ['expired', 'warning']] + check_results['registry'] = [crt for crt in registry_certs if crt['health'] in ['expired', 'warning']] + check_results['router'] = [crt for crt in router_certs if crt['health'] in ['expired', 'warning']] + else: + check_results['ocp_certs'] = ocp_certs + check_results['kubeconfigs'] = kubeconfigs + check_results['etcd'] = etcd_certs + check_results['registry'] = registry_certs + check_results['router'] = router_certs + + # Sort the final results to report in order of ascending safety + # time. That is to say, the certificates which will expire sooner + # will be at the front of the list and certificates which will + # expire later are at the end. Router and registry certs should be + # limited to just 1 result, so don't bother sorting those. + def cert_key(item): + ''' return the days_remaining key ''' + return item['days_remaining'] + + check_results['ocp_certs'] = sorted(check_results['ocp_certs'], key=cert_key) + check_results['kubeconfigs'] = sorted(check_results['kubeconfigs'], key=cert_key) + check_results['etcd'] = sorted(check_results['etcd'], key=cert_key) + + # This module will never change anything, but we might want to + # change the return code parameter if there is some catastrophic + # error we noticed earlier + module.exit_json( + check_results=check_results, + summary=res, + msg=msg, + rc=0, + changed=False + ) + + +if __name__ == '__main__': + main() diff --git a/roles/lib_utils/library/openshift_container_binary_sync.py b/roles/lib_utils/library/openshift_container_binary_sync.py new file mode 100644 index 000000000..440b8ec28 --- /dev/null +++ b/roles/lib_utils/library/openshift_container_binary_sync.py @@ -0,0 +1,205 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# pylint: disable=missing-docstring,invalid-name + +import random +import tempfile +import shutil +import os.path + +# pylint: disable=redefined-builtin,wildcard-import,unused-wildcard-import +from ansible.module_utils.basic import * # noqa: F403 + + +DOCUMENTATION = ''' +--- +module: openshift_container_binary_sync +short_description: Copies OpenShift binaries out of the given image tag to host system. +''' + + +class BinarySyncError(Exception): + def __init__(self, msg): + super(BinarySyncError, self).__init__(msg) + self.msg = msg + + +# pylint: disable=too-few-public-methods,too-many-instance-attributes +class BinarySyncer(object): + """ + Syncs the openshift, oc, and kubectl binaries/symlinks out of + a container onto the host system. + """ + + def __init__(self, module, image, tag, backend): + self.module = module + self.changed = False + self.output = [] + self.bin_dir = '/usr/local/bin' + self._image = image + self.tag = tag + self.backend = backend + self.temp_dir = None # TBD + + def sync(self): + if self.backend == 'atomic': + return self._sync_atomic() + + return self._sync_docker() + + def _sync_atomic(self): + self.temp_dir = tempfile.mkdtemp() + temp_dir_mount = tempfile.mkdtemp() + try: + image_spec = '%s:%s' % (self.image, self.tag) + rc, stdout, stderr = self.module.run_command(['atomic', 'mount', + '--storage', "ostree", + image_spec, temp_dir_mount]) + if rc: + raise BinarySyncError("Error mounting image. stdout=%s, stderr=%s" % + (stdout, stderr)) + for i in ["openshift", "oc"]: + src_file = os.path.join(temp_dir_mount, "usr/bin", i) + shutil.copy(src_file, self.temp_dir) + + self._sync_binaries() + finally: + self.module.run_command(['atomic', 'umount', temp_dir_mount]) + shutil.rmtree(temp_dir_mount) + shutil.rmtree(self.temp_dir) + + def _sync_docker(self): + container_name = "openshift-cli-%s" % random.randint(1, 100000) + rc, stdout, stderr = self.module.run_command(['docker', 'create', '--name', + container_name, '%s:%s' % (self.image, self.tag)]) + if rc: + raise BinarySyncError("Error creating temporary docker container. stdout=%s, stderr=%s" % + (stdout, stderr)) + self.output.append(stdout) + try: + self.temp_dir = tempfile.mkdtemp() + self.output.append("Using temp dir: %s" % self.temp_dir) + + rc, stdout, stderr = self.module.run_command(['docker', 'cp', "%s:/usr/bin/openshift" % container_name, + self.temp_dir]) + if rc: + raise BinarySyncError("Error copying file from docker container: stdout=%s, stderr=%s" % + (stdout, stderr)) + + rc, stdout, stderr = self.module.run_command(['docker', 'cp', "%s:/usr/bin/oc" % container_name, + self.temp_dir]) + if rc: + raise BinarySyncError("Error copying file from docker container: stdout=%s, stderr=%s" % + (stdout, stderr)) + + self._sync_binaries() + finally: + shutil.rmtree(self.temp_dir) + self.module.run_command(['docker', 'rm', container_name]) + + def _sync_binaries(self): + self._sync_binary('openshift') + + # In older versions, oc was a symlink to openshift: + if os.path.islink(os.path.join(self.temp_dir, 'oc')): + self._sync_symlink('oc', 'openshift') + else: + self._sync_binary('oc') + + # Ensure correct symlinks created: + self._sync_symlink('kubectl', 'openshift') + + # Remove old oadm binary + if os.path.exists(os.path.join(self.bin_dir, 'oadm')): + os.remove(os.path.join(self.bin_dir, 'oadm')) + + def _sync_symlink(self, binary_name, link_to): + """ Ensure the given binary name exists and links to the expected binary. """ + + # The symlink we are creating: + link_path = os.path.join(self.bin_dir, binary_name) + + # The expected file we should be linking to: + link_dest = os.path.join(self.bin_dir, link_to) + + if not os.path.exists(link_path) or \ + not os.path.islink(link_path) or \ + os.path.realpath(link_path) != os.path.realpath(link_dest): + if os.path.exists(link_path): + os.remove(link_path) + os.symlink(link_to, os.path.join(self.bin_dir, binary_name)) + self.output.append("Symlinked %s to %s." % (link_path, link_dest)) + self.changed = True + + def _sync_binary(self, binary_name): + src_path = os.path.join(self.temp_dir, binary_name) + dest_path = os.path.join(self.bin_dir, binary_name) + incoming_checksum = self.module.run_command(['sha256sum', src_path])[1] + if not os.path.exists(dest_path) or self.module.run_command(['sha256sum', dest_path])[1] != incoming_checksum: + + # See: https://github.com/openshift/openshift-ansible/issues/4965 + if os.path.islink(dest_path): + os.unlink(dest_path) + self.output.append('Removed old symlink {} before copying binary.'.format(dest_path)) + shutil.move(src_path, dest_path) + self.output.append("Moved %s to %s." % (src_path, dest_path)) + self.changed = True + + @property + def raw_image(self): + """ + Returns the image as it was originally passed in to the instance. + + .. note:: + This image string will only work directly with the atomic command. + + :returns: The original image passed in. + :rtype: str + """ + return self._image + + @property + def image(self): + """ + Returns the image without atomic prefixes used to map to skopeo args. + + :returns: The image string without prefixes + :rtype: str + """ + image = self._image + for remove in ('oci:', 'http:', 'https:'): + if image.startswith(remove): + image = image.replace(remove, '') + return image + + +def main(): + module = AnsibleModule( # noqa: F405 + argument_spec=dict( + image=dict(required=True), + tag=dict(required=True), + backend=dict(required=True), + ), + supports_check_mode=True + ) + + image = module.params['image'] + tag = module.params['tag'] + backend = module.params['backend'] + + if backend not in ["docker", "atomic"]: + module.fail_json(msg="unknown backend") + + binary_syncer = BinarySyncer(module, image, tag, backend) + + try: + binary_syncer.sync() + except BinarySyncError as ex: + module.fail_json(msg=ex.msg) + + return module.exit_json(changed=binary_syncer.changed, + output=binary_syncer.output) + + +if __name__ == '__main__': + main() diff --git a/roles/lib_utils/lookup_plugins/openshift_master_facts_default_predicates.py b/roles/lib_utils/lookup_plugins/openshift_master_facts_default_predicates.py new file mode 100644 index 000000000..4858c5ec6 --- /dev/null +++ b/roles/lib_utils/lookup_plugins/openshift_master_facts_default_predicates.py @@ -0,0 +1,143 @@ +# pylint: disable=missing-docstring + +import re +from ansible.errors import AnsibleError +from ansible.plugins.lookup import LookupBase + + +class LookupModule(LookupBase): + # pylint: disable=too-many-branches,too-many-statements,too-many-arguments + + def run(self, terms, variables=None, regions_enabled=True, short_version=None, + deployment_type=None, **kwargs): + + predicates = [] + + if short_version is None or deployment_type is None: + if 'openshift' not in variables: + raise AnsibleError("This lookup module requires openshift_facts to be run prior to use") + + if deployment_type is None: + if 'common' not in variables['openshift'] or 'deployment_type' not in variables['openshift']['common']: + raise AnsibleError("This lookup module requires that the deployment_type be set") + + deployment_type = variables['openshift']['common']['deployment_type'] + + if short_version is None: + if 'short_version' in variables['openshift']['common']: + short_version = variables['openshift']['common']['short_version'] + elif 'openshift_release' in variables: + release = variables['openshift_release'] + if release.startswith('v'): + short_version = release[1:] + else: + short_version = release + short_version = '.'.join(short_version.split('.')[0:2]) + elif 'openshift_version' in variables: + version = variables['openshift_version'] + short_version = '.'.join(version.split('.')[0:2]) + else: + # pylint: disable=line-too-long + raise AnsibleError("Either OpenShift needs to be installed or openshift_release needs to be specified") + if deployment_type == 'origin': + if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '3.6', '3.7', '3.8', '3.9', 'latest']: + raise AnsibleError("Unknown short_version %s" % short_version) + elif deployment_type == 'openshift-enterprise': + if short_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6', '3.7', '3.8', '3.9', 'latest']: + raise AnsibleError("Unknown short_version %s" % short_version) + else: + raise AnsibleError("Unknown deployment_type %s" % deployment_type) + + if deployment_type == 'origin': + # convert short_version to enterprise short_version + short_version = re.sub('^1.', '3.', short_version) + + if short_version == 'latest': + short_version = '3.9' + + # Predicates ordered according to OpenShift Origin source: + # origin/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go + + if short_version == '3.1': + predicates.extend([ + {'name': 'PodFitsHostPorts'}, + {'name': 'PodFitsResources'}, + {'name': 'NoDiskConflict'}, + {'name': 'MatchNodeSelector'}, + ]) + + if short_version == '3.2': + predicates.extend([ + {'name': 'PodFitsHostPorts'}, + {'name': 'PodFitsResources'}, + {'name': 'NoDiskConflict'}, + {'name': 'NoVolumeZoneConflict'}, + {'name': 'MatchNodeSelector'}, + {'name': 'MaxEBSVolumeCount'}, + {'name': 'MaxGCEPDVolumeCount'} + ]) + + if short_version == '3.3': + predicates.extend([ + {'name': 'NoDiskConflict'}, + {'name': 'NoVolumeZoneConflict'}, + {'name': 'MaxEBSVolumeCount'}, + {'name': 'MaxGCEPDVolumeCount'}, + {'name': 'GeneralPredicates'}, + {'name': 'PodToleratesNodeTaints'}, + {'name': 'CheckNodeMemoryPressure'} + ]) + + if short_version == '3.4': + predicates.extend([ + {'name': 'NoDiskConflict'}, + {'name': 'NoVolumeZoneConflict'}, + {'name': 'MaxEBSVolumeCount'}, + {'name': 'MaxGCEPDVolumeCount'}, + {'name': 'GeneralPredicates'}, + {'name': 'PodToleratesNodeTaints'}, + {'name': 'CheckNodeMemoryPressure'}, + {'name': 'CheckNodeDiskPressure'}, + {'name': 'MatchInterPodAffinity'} + ]) + + if short_version in ['3.5', '3.6']: + predicates.extend([ + {'name': 'NoVolumeZoneConflict'}, + {'name': 'MaxEBSVolumeCount'}, + {'name': 'MaxGCEPDVolumeCount'}, + {'name': 'MatchInterPodAffinity'}, + {'name': 'NoDiskConflict'}, + {'name': 'GeneralPredicates'}, + {'name': 'PodToleratesNodeTaints'}, + {'name': 'CheckNodeMemoryPressure'}, + {'name': 'CheckNodeDiskPressure'}, + ]) + + if short_version in ['3.7', '3.8', '3.9']: + predicates.extend([ + {'name': 'NoVolumeZoneConflict'}, + {'name': 'MaxEBSVolumeCount'}, + {'name': 'MaxGCEPDVolumeCount'}, + {'name': 'MaxAzureDiskVolumeCount'}, + {'name': 'MatchInterPodAffinity'}, + {'name': 'NoDiskConflict'}, + {'name': 'GeneralPredicates'}, + {'name': 'PodToleratesNodeTaints'}, + {'name': 'CheckNodeMemoryPressure'}, + {'name': 'CheckNodeDiskPressure'}, + {'name': 'NoVolumeNodeConflict'}, + ]) + + if regions_enabled: + region_predicate = { + 'name': 'Region', + 'argument': { + 'serviceAffinity': { + 'labels': ['region'] + } + } + } + predicates.append(region_predicate) + + return predicates diff --git a/roles/lib_utils/lookup_plugins/openshift_master_facts_default_priorities.py b/roles/lib_utils/lookup_plugins/openshift_master_facts_default_priorities.py new file mode 100644 index 000000000..18e1b2e0c --- /dev/null +++ b/roles/lib_utils/lookup_plugins/openshift_master_facts_default_priorities.py @@ -0,0 +1,117 @@ +# pylint: disable=missing-docstring + +import re +from ansible.errors import AnsibleError +from ansible.plugins.lookup import LookupBase + + +class LookupModule(LookupBase): + # pylint: disable=too-many-branches,too-many-statements,too-many-arguments + + def run(self, terms, variables=None, zones_enabled=True, short_version=None, + deployment_type=None, **kwargs): + + priorities = [] + + if short_version is None or deployment_type is None: + if 'openshift' not in variables: + raise AnsibleError("This lookup module requires openshift_facts to be run prior to use") + + if deployment_type is None: + if 'common' not in variables['openshift'] or 'deployment_type' not in variables['openshift']['common']: + raise AnsibleError("This lookup module requires that the deployment_type be set") + + deployment_type = variables['openshift']['common']['deployment_type'] + + if short_version is None: + if 'short_version' in variables['openshift']['common']: + short_version = variables['openshift']['common']['short_version'] + elif 'openshift_release' in variables: + release = variables['openshift_release'] + if release.startswith('v'): + short_version = release[1:] + else: + short_version = release + short_version = '.'.join(short_version.split('.')[0:2]) + elif 'openshift_version' in variables: + version = variables['openshift_version'] + short_version = '.'.join(version.split('.')[0:2]) + else: + # pylint: disable=line-too-long + raise AnsibleError("Either OpenShift needs to be installed or openshift_release needs to be specified") + + if deployment_type == 'origin': + if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '3.6', '3.7', '3.8', '3.9', 'latest']: + raise AnsibleError("Unknown short_version %s" % short_version) + elif deployment_type == 'openshift-enterprise': + if short_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6', '3.7', '3.8', '3.9', 'latest']: + raise AnsibleError("Unknown short_version %s" % short_version) + else: + raise AnsibleError("Unknown deployment_type %s" % deployment_type) + + if deployment_type == 'origin': + # convert short_version to origin short_version + short_version = re.sub('^1.', '3.', short_version) + + if short_version == 'latest': + short_version = '3.9' + + if short_version == '3.1': + priorities.extend([ + {'name': 'LeastRequestedPriority', 'weight': 1}, + {'name': 'BalancedResourceAllocation', 'weight': 1}, + {'name': 'SelectorSpreadPriority', 'weight': 1} + ]) + + if short_version == '3.2': + priorities.extend([ + {'name': 'LeastRequestedPriority', 'weight': 1}, + {'name': 'BalancedResourceAllocation', 'weight': 1}, + {'name': 'SelectorSpreadPriority', 'weight': 1}, + {'name': 'NodeAffinityPriority', 'weight': 1} + ]) + + if short_version == '3.3': + priorities.extend([ + {'name': 'LeastRequestedPriority', 'weight': 1}, + {'name': 'BalancedResourceAllocation', 'weight': 1}, + {'name': 'SelectorSpreadPriority', 'weight': 1}, + {'name': 'NodeAffinityPriority', 'weight': 1}, + {'name': 'TaintTolerationPriority', 'weight': 1} + ]) + + if short_version == '3.4': + priorities.extend([ + {'name': 'LeastRequestedPriority', 'weight': 1}, + {'name': 'BalancedResourceAllocation', 'weight': 1}, + {'name': 'SelectorSpreadPriority', 'weight': 1}, + {'name': 'NodePreferAvoidPodsPriority', 'weight': 10000}, + {'name': 'NodeAffinityPriority', 'weight': 1}, + {'name': 'TaintTolerationPriority', 'weight': 1}, + {'name': 'InterPodAffinityPriority', 'weight': 1} + ]) + + if short_version in ['3.5', '3.6', '3.7', '3.8', '3.9']: + priorities.extend([ + {'name': 'SelectorSpreadPriority', 'weight': 1}, + {'name': 'InterPodAffinityPriority', 'weight': 1}, + {'name': 'LeastRequestedPriority', 'weight': 1}, + {'name': 'BalancedResourceAllocation', 'weight': 1}, + {'name': 'NodePreferAvoidPodsPriority', 'weight': 10000}, + {'name': 'NodeAffinityPriority', 'weight': 1}, + {'name': 'TaintTolerationPriority', 'weight': 1} + ]) + + if zones_enabled: + zone_priority = { + 'name': 'Zone', + 'argument': { + 'serviceAntiAffinity': { + 'label': 'zone' + } + }, + 'weight': 2 + } + priorities.append(zone_priority) + + return priorities diff --git a/roles/lib_utils/test/conftest.py b/roles/lib_utils/test/conftest.py new file mode 100644 index 000000000..aabdd4fa1 --- /dev/null +++ b/roles/lib_utils/test/conftest.py @@ -0,0 +1,172 @@ +# pylint: disable=missing-docstring,invalid-name,redefined-outer-name +import os +import pytest +import sys + +from OpenSSL import crypto + +sys.path.insert(1, os.path.join(os.path.dirname(__file__), os.pardir, "lookup_plugins")) + +from openshift_master_facts_default_predicates import LookupModule as PredicatesLookupModule # noqa: E402 +from openshift_master_facts_default_priorities import LookupModule as PrioritiesLookupModule # noqa: E402 + +# Parameter list for valid_cert fixture +VALID_CERTIFICATE_PARAMS = [ + { + 'short_name': 'client', + 'cn': 'client.example.com', + 'serial': 4, + 'uses': b'clientAuth', + 'dns': [], + 'ip': [], + }, + { + 'short_name': 'server', + 'cn': 'server.example.com', + 'serial': 5, + 'uses': b'serverAuth', + 'dns': ['kubernetes', 'openshift'], + 'ip': ['10.0.0.1', '192.168.0.1'] + }, + { + 'short_name': 'combined', + 'cn': 'combined.example.com', + # Verify that HUGE serials parse correctly. + # Frobs PARSING_HEX_SERIAL in _parse_cert + # See https://bugzilla.redhat.com/show_bug.cgi?id=1464240 + 'serial': 14449739080294792594019643629255165375, + 'uses': b'clientAuth, serverAuth', + 'dns': ['etcd'], + 'ip': ['10.0.0.2', '192.168.0.2'] + } +] + +# Extract the short_name from VALID_CERTIFICATE_PARAMS to provide +# friendly naming for the valid_cert fixture +VALID_CERTIFICATE_IDS = [param['short_name'] for param in VALID_CERTIFICATE_PARAMS] + + +@pytest.fixture(scope='session') +def ca(tmpdir_factory): + ca_dir = tmpdir_factory.mktemp('ca') + + key = crypto.PKey() + key.generate_key(crypto.TYPE_RSA, 2048) + + cert = crypto.X509() + cert.set_version(3) + cert.set_serial_number(1) + cert.get_subject().commonName = 'test-signer' + cert.gmtime_adj_notBefore(0) + cert.gmtime_adj_notAfter(24 * 60 * 60) + cert.set_issuer(cert.get_subject()) + cert.set_pubkey(key) + cert.add_extensions([ + crypto.X509Extension(b'basicConstraints', True, b'CA:TRUE, pathlen:0'), + crypto.X509Extension(b'keyUsage', True, + b'digitalSignature, keyEncipherment, keyCertSign, cRLSign'), + crypto.X509Extension(b'subjectKeyIdentifier', False, b'hash', subject=cert) + ]) + cert.add_extensions([ + crypto.X509Extension(b'authorityKeyIdentifier', False, b'keyid:always', issuer=cert) + ]) + cert.sign(key, 'sha256') + + return { + 'dir': ca_dir, + 'key': key, + 'cert': cert, + } + + +@pytest.fixture(scope='session', + ids=VALID_CERTIFICATE_IDS, + params=VALID_CERTIFICATE_PARAMS) +def valid_cert(request, ca): + common_name = request.param['cn'] + + key = crypto.PKey() + key.generate_key(crypto.TYPE_RSA, 2048) + + cert = crypto.X509() + cert.set_serial_number(request.param['serial']) + cert.gmtime_adj_notBefore(0) + cert.gmtime_adj_notAfter(24 * 60 * 60) + cert.set_issuer(ca['cert'].get_subject()) + cert.set_pubkey(key) + cert.set_version(3) + cert.get_subject().commonName = common_name + cert.add_extensions([ + crypto.X509Extension(b'basicConstraints', True, b'CA:FALSE'), + crypto.X509Extension(b'keyUsage', True, b'digitalSignature, keyEncipherment'), + crypto.X509Extension(b'extendedKeyUsage', False, request.param['uses']), + ]) + + if request.param['dns'] or request.param['ip']: + san_list = ['DNS:{}'.format(common_name)] + san_list.extend(['DNS:{}'.format(x) for x in request.param['dns']]) + san_list.extend(['IP:{}'.format(x) for x in request.param['ip']]) + + cert.add_extensions([ + crypto.X509Extension(b'subjectAltName', False, ', '.join(san_list).encode('utf8')) + ]) + cert.sign(ca['key'], 'sha256') + + cert_contents = crypto.dump_certificate(crypto.FILETYPE_PEM, cert) + cert_file = ca['dir'].join('{}.crt'.format(common_name)) + cert_file.write_binary(cert_contents) + + return { + 'common_name': common_name, + 'serial': request.param['serial'], + 'dns': request.param['dns'], + 'ip': request.param['ip'], + 'uses': request.param['uses'], + 'cert_file': cert_file, + 'cert': cert + } + + +@pytest.fixture() +def predicates_lookup(): + return PredicatesLookupModule() + + +@pytest.fixture() +def priorities_lookup(): + return PrioritiesLookupModule() + + +@pytest.fixture() +def facts(): + return { + 'openshift': { + 'common': {} + } + } + + +@pytest.fixture(params=[True, False]) +def regions_enabled(request): + return request.param + + +@pytest.fixture(params=[True, False]) +def zones_enabled(request): + return request.param + + +def v_prefix(release): + """Prefix a release number with 'v'.""" + return "v" + release + + +def minor(release): + """Add a suffix to release, making 'X.Y' become 'X.Y.Z'.""" + return release + ".1" + + +@pytest.fixture(params=[str, v_prefix, minor]) +def release_mod(request): + """Modifies a release string to alternative valid values.""" + return request.param diff --git a/roles/lib_utils/test/openshift_master_facts_bad_input_tests.py b/roles/lib_utils/test/openshift_master_facts_bad_input_tests.py new file mode 100644 index 000000000..e8da1e04a --- /dev/null +++ b/roles/lib_utils/test/openshift_master_facts_bad_input_tests.py @@ -0,0 +1,57 @@ +import copy +import os +import sys + +from ansible.errors import AnsibleError +import pytest + +sys.path.insert(1, os.path.join(os.path.dirname(__file__), os.pardir, "lookup_plugins")) + +from openshift_master_facts_default_predicates import LookupModule # noqa: E402 + + +class TestOpenShiftMasterFactsBadInput(object): + lookup = LookupModule() + default_facts = { + 'openshift': { + 'common': {} + } + } + + def test_missing_openshift_facts(self): + with pytest.raises(AnsibleError): + facts = {} + self.lookup.run(None, variables=facts) + + def test_missing_deployment_type(self): + with pytest.raises(AnsibleError): + facts = copy.deepcopy(self.default_facts) + facts['openshift']['common']['short_version'] = '10.10' + self.lookup.run(None, variables=facts) + + def test_missing_short_version_and_missing_openshift_release(self): + with pytest.raises(AnsibleError): + facts = copy.deepcopy(self.default_facts) + facts['openshift']['common']['deployment_type'] = 'origin' + self.lookup.run(None, variables=facts) + + def test_unknown_deployment_types(self): + with pytest.raises(AnsibleError): + facts = copy.deepcopy(self.default_facts) + facts['openshift']['common']['short_version'] = '1.1' + facts['openshift']['common']['deployment_type'] = 'bogus' + self.lookup.run(None, variables=facts) + + def test_unknown_origin_version(self): + with pytest.raises(AnsibleError): + facts = copy.deepcopy(self.default_facts) + facts['openshift']['common']['short_version'] = '0.1' + facts['openshift']['common']['deployment_type'] = 'origin' + self.lookup.run(None, variables=facts) + + def test_unknown_ocp_version(self): + with pytest.raises(AnsibleError): + facts = copy.deepcopy(self.default_facts) + facts['openshift']['common']['short_version'] = '0.1' + facts['openshift']['common']['deployment_type'] = 'openshift-enterprise' + self.lookup.run(None, variables=facts) diff --git a/roles/lib_utils/test/openshift_master_facts_conftest.py b/roles/lib_utils/test/openshift_master_facts_conftest.py new file mode 100644 index 000000000..140cced73 --- /dev/null +++ b/roles/lib_utils/test/openshift_master_facts_conftest.py @@ -0,0 +1,54 @@ +import os +import sys + +import pytest + +sys.path.insert(1, os.path.join(os.path.dirname(__file__), os.pardir, "lookup_plugins")) + +from openshift_master_facts_default_predicates import LookupModule as PredicatesLookupModule # noqa: E402 +from openshift_master_facts_default_priorities import LookupModule as PrioritiesLookupModule # noqa: E402 + + +@pytest.fixture() +def predicates_lookup(): + return PredicatesLookupModule() + + +@pytest.fixture() +def priorities_lookup(): + return PrioritiesLookupModule() + + +@pytest.fixture() +def facts(): + return { + 'openshift': { + 'common': {} + } + } + + +@pytest.fixture(params=[True, False]) +def regions_enabled(request): + return request.param + + +@pytest.fixture(params=[True, False]) +def zones_enabled(request): + return request.param + + +def v_prefix(release): + """Prefix a release number with 'v'.""" + return "v" + release + + +def minor(release): + """Add a suffix to release, making 'X.Y' become 'X.Y.Z'.""" + return release + ".1" + + +@pytest.fixture(params=[str, v_prefix, minor]) +def release_mod(request): + """Modifies a release string to alternative valid values.""" + return request.param diff --git a/roles/lib_utils/test/openshift_master_facts_default_predicates_tests.py b/roles/lib_utils/test/openshift_master_facts_default_predicates_tests.py new file mode 100644 index 000000000..11aad9f03 --- /dev/null +++ b/roles/lib_utils/test/openshift_master_facts_default_predicates_tests.py @@ -0,0 +1,193 @@ +import pytest + + +# Predicates ordered according to OpenShift Origin source: +# origin/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go + +DEFAULT_PREDICATES_1_1 = [ + {'name': 'PodFitsHostPorts'}, + {'name': 'PodFitsResources'}, + {'name': 'NoDiskConflict'}, + {'name': 'MatchNodeSelector'}, +] + +DEFAULT_PREDICATES_1_2 = [ + {'name': 'PodFitsHostPorts'}, + {'name': 'PodFitsResources'}, + {'name': 'NoDiskConflict'}, + {'name': 'NoVolumeZoneConflict'}, + {'name': 'MatchNodeSelector'}, + {'name': 'MaxEBSVolumeCount'}, + {'name': 'MaxGCEPDVolumeCount'} +] + +DEFAULT_PREDICATES_1_3 = [ + {'name': 'NoDiskConflict'}, + {'name': 'NoVolumeZoneConflict'}, + {'name': 'MaxEBSVolumeCount'}, + {'name': 'MaxGCEPDVolumeCount'}, + {'name': 'GeneralPredicates'}, + {'name': 'PodToleratesNodeTaints'}, + {'name': 'CheckNodeMemoryPressure'} +] + +DEFAULT_PREDICATES_1_4 = [ + {'name': 'NoDiskConflict'}, + {'name': 'NoVolumeZoneConflict'}, + {'name': 'MaxEBSVolumeCount'}, + {'name': 'MaxGCEPDVolumeCount'}, + {'name': 'GeneralPredicates'}, + {'name': 'PodToleratesNodeTaints'}, + {'name': 'CheckNodeMemoryPressure'}, + {'name': 'CheckNodeDiskPressure'}, + {'name': 'MatchInterPodAffinity'} +] + +DEFAULT_PREDICATES_1_5 = [ + {'name': 'NoVolumeZoneConflict'}, + {'name': 'MaxEBSVolumeCount'}, + {'name': 'MaxGCEPDVolumeCount'}, + {'name': 'MatchInterPodAffinity'}, + {'name': 'NoDiskConflict'}, + {'name': 'GeneralPredicates'}, + {'name': 'PodToleratesNodeTaints'}, + {'name': 'CheckNodeMemoryPressure'}, + {'name': 'CheckNodeDiskPressure'}, +] + +DEFAULT_PREDICATES_3_6 = DEFAULT_PREDICATES_1_5 + +DEFAULT_PREDICATES_3_7 = [ + {'name': 'NoVolumeZoneConflict'}, + {'name': 'MaxEBSVolumeCount'}, + {'name': 'MaxGCEPDVolumeCount'}, + {'name': 'MaxAzureDiskVolumeCount'}, + {'name': 'MatchInterPodAffinity'}, + {'name': 'NoDiskConflict'}, + {'name': 'GeneralPredicates'}, + {'name': 'PodToleratesNodeTaints'}, + {'name': 'CheckNodeMemoryPressure'}, + {'name': 'CheckNodeDiskPressure'}, + {'name': 'NoVolumeNodeConflict'}, +] + +DEFAULT_PREDICATES_3_9 = DEFAULT_PREDICATES_3_8 = DEFAULT_PREDICATES_3_7 + +REGION_PREDICATE = { + 'name': 'Region', + 'argument': { + 'serviceAffinity': { + 'labels': ['region'] + } + } +} + +TEST_VARS = [ + ('1.1', 'origin', DEFAULT_PREDICATES_1_1), + ('3.1', 'openshift-enterprise', DEFAULT_PREDICATES_1_1), + ('1.2', 'origin', DEFAULT_PREDICATES_1_2), + ('3.2', 'openshift-enterprise', DEFAULT_PREDICATES_1_2), + ('1.3', 'origin', DEFAULT_PREDICATES_1_3), + ('3.3', 'openshift-enterprise', DEFAULT_PREDICATES_1_3), + ('1.4', 'origin', DEFAULT_PREDICATES_1_4), + ('3.4', 'openshift-enterprise', DEFAULT_PREDICATES_1_4), + ('1.5', 'origin', DEFAULT_PREDICATES_1_5), + ('3.5', 'openshift-enterprise', DEFAULT_PREDICATES_1_5), + ('3.6', 'origin', DEFAULT_PREDICATES_3_6), + ('3.6', 'openshift-enterprise', DEFAULT_PREDICATES_3_6), + ('3.7', 'origin', DEFAULT_PREDICATES_3_7), + ('3.7', 'openshift-enterprise', DEFAULT_PREDICATES_3_7), + ('3.8', 'origin', DEFAULT_PREDICATES_3_8), + ('3.8', 'openshift-enterprise', DEFAULT_PREDICATES_3_8), + ('3.9', 'origin', DEFAULT_PREDICATES_3_9), + ('3.9', 'openshift-enterprise', DEFAULT_PREDICATES_3_9), +] + + +def assert_ok(predicates_lookup, default_predicates, regions_enabled, **kwargs): + results = predicates_lookup.run(None, regions_enabled=regions_enabled, **kwargs) + if regions_enabled: + assert results == default_predicates + [REGION_PREDICATE] + else: + assert results == default_predicates + + +def test_openshift_version(predicates_lookup, openshift_version_fixture, regions_enabled): + facts, default_predicates = openshift_version_fixture + assert_ok(predicates_lookup, default_predicates, variables=facts, regions_enabled=regions_enabled) + + +@pytest.fixture(params=TEST_VARS) +def openshift_version_fixture(request, facts): + version, deployment_type, default_predicates = request.param + version += '.1' + facts['openshift_version'] = version + facts['openshift']['common']['deployment_type'] = deployment_type + return facts, default_predicates + + +def test_openshift_release(predicates_lookup, openshift_release_fixture, regions_enabled): + facts, default_predicates = openshift_release_fixture + assert_ok(predicates_lookup, default_predicates, variables=facts, regions_enabled=regions_enabled) + + +@pytest.fixture(params=TEST_VARS) +def openshift_release_fixture(request, facts, release_mod): + release, deployment_type, default_predicates = request.param + facts['openshift_release'] = release_mod(release) + facts['openshift']['common']['deployment_type'] = deployment_type + return facts, default_predicates + + +def test_short_version(predicates_lookup, short_version_fixture, regions_enabled): + facts, default_predicates = short_version_fixture + assert_ok(predicates_lookup, default_predicates, variables=facts, regions_enabled=regions_enabled) + + +@pytest.fixture(params=TEST_VARS) +def short_version_fixture(request, facts): + short_version, deployment_type, default_predicates = request.param + facts['openshift']['common']['short_version'] = short_version + facts['openshift']['common']['deployment_type'] = deployment_type + return facts, default_predicates + + +def test_short_version_kwarg(predicates_lookup, short_version_kwarg_fixture, regions_enabled): + facts, short_version, default_predicates = short_version_kwarg_fixture + assert_ok( + predicates_lookup, default_predicates, variables=facts, + regions_enabled=regions_enabled, short_version=short_version) + + +@pytest.fixture(params=TEST_VARS) +def short_version_kwarg_fixture(request, facts): + short_version, deployment_type, default_predicates = request.param + facts['openshift']['common']['deployment_type'] = deployment_type + return facts, short_version, default_predicates + + +def test_deployment_type_kwarg(predicates_lookup, deployment_type_kwarg_fixture, regions_enabled): + facts, deployment_type, default_predicates = deployment_type_kwarg_fixture + assert_ok( + predicates_lookup, default_predicates, variables=facts, + regions_enabled=regions_enabled, deployment_type=deployment_type) + + +@pytest.fixture(params=TEST_VARS) +def deployment_type_kwarg_fixture(request, facts): + short_version, deployment_type, default_predicates = request.param + facts['openshift']['common']['short_version'] = short_version + return facts, deployment_type, default_predicates + + +def test_short_version_deployment_type_kwargs( + predicates_lookup, short_version_deployment_type_kwargs_fixture, regions_enabled): + short_version, deployment_type, default_predicates = short_version_deployment_type_kwargs_fixture + assert_ok( + predicates_lookup, default_predicates, regions_enabled=regions_enabled, + short_version=short_version, deployment_type=deployment_type) + + +@pytest.fixture(params=TEST_VARS) +def short_version_deployment_type_kwargs_fixture(request): + return request.param diff --git a/roles/lib_utils/test/openshift_master_facts_default_priorities_tests.py b/roles/lib_utils/test/openshift_master_facts_default_priorities_tests.py new file mode 100644 index 000000000..527fc9ff4 --- /dev/null +++ b/roles/lib_utils/test/openshift_master_facts_default_priorities_tests.py @@ -0,0 +1,167 @@ +import pytest + + +DEFAULT_PRIORITIES_1_1 = [ + {'name': 'LeastRequestedPriority', 'weight': 1}, + {'name': 'BalancedResourceAllocation', 'weight': 1}, + {'name': 'SelectorSpreadPriority', 'weight': 1} +] + +DEFAULT_PRIORITIES_1_2 = [ + {'name': 'LeastRequestedPriority', 'weight': 1}, + {'name': 'BalancedResourceAllocation', 'weight': 1}, + {'name': 'SelectorSpreadPriority', 'weight': 1}, + {'name': 'NodeAffinityPriority', 'weight': 1} +] + +DEFAULT_PRIORITIES_1_3 = [ + {'name': 'LeastRequestedPriority', 'weight': 1}, + {'name': 'BalancedResourceAllocation', 'weight': 1}, + {'name': 'SelectorSpreadPriority', 'weight': 1}, + {'name': 'NodeAffinityPriority', 'weight': 1}, + {'name': 'TaintTolerationPriority', 'weight': 1} +] + +DEFAULT_PRIORITIES_1_4 = [ + {'name': 'LeastRequestedPriority', 'weight': 1}, + {'name': 'BalancedResourceAllocation', 'weight': 1}, + {'name': 'SelectorSpreadPriority', 'weight': 1}, + {'name': 'NodePreferAvoidPodsPriority', 'weight': 10000}, + {'name': 'NodeAffinityPriority', 'weight': 1}, + {'name': 'TaintTolerationPriority', 'weight': 1}, + {'name': 'InterPodAffinityPriority', 'weight': 1} +] + +DEFAULT_PRIORITIES_1_5 = [ + {'name': 'SelectorSpreadPriority', 'weight': 1}, + {'name': 'InterPodAffinityPriority', 'weight': 1}, + {'name': 'LeastRequestedPriority', 'weight': 1}, + {'name': 'BalancedResourceAllocation', 'weight': 1}, + {'name': 'NodePreferAvoidPodsPriority', 'weight': 10000}, + {'name': 'NodeAffinityPriority', 'weight': 1}, + {'name': 'TaintTolerationPriority', 'weight': 1} +] + +DEFAULT_PRIORITIES_3_6 = DEFAULT_PRIORITIES_1_5 + +DEFAULT_PRIORITIES_3_9 = DEFAULT_PRIORITIES_3_8 = DEFAULT_PRIORITIES_3_7 = DEFAULT_PRIORITIES_3_6 + +ZONE_PRIORITY = { + 'name': 'Zone', + 'argument': { + 'serviceAntiAffinity': { + 'label': 'zone' + } + }, + 'weight': 2 +} + +TEST_VARS = [ + ('1.1', 'origin', DEFAULT_PRIORITIES_1_1), + ('3.1', 'openshift-enterprise', DEFAULT_PRIORITIES_1_1), + ('1.2', 'origin', DEFAULT_PRIORITIES_1_2), + ('3.2', 'openshift-enterprise', DEFAULT_PRIORITIES_1_2), + ('1.3', 'origin', DEFAULT_PRIORITIES_1_3), + ('3.3', 'openshift-enterprise', DEFAULT_PRIORITIES_1_3), + ('1.4', 'origin', DEFAULT_PRIORITIES_1_4), + ('3.4', 'openshift-enterprise', DEFAULT_PRIORITIES_1_4), + ('1.5', 'origin', DEFAULT_PRIORITIES_1_5), + ('3.5', 'openshift-enterprise', DEFAULT_PRIORITIES_1_5), + ('3.6', 'origin', DEFAULT_PRIORITIES_3_6), + ('3.6', 'openshift-enterprise', DEFAULT_PRIORITIES_3_6), + ('3.7', 'origin', DEFAULT_PRIORITIES_3_7), + ('3.7', 'openshift-enterprise', DEFAULT_PRIORITIES_3_7), + ('3.8', 'origin', DEFAULT_PRIORITIES_3_8), + ('3.8', 'openshift-enterprise', DEFAULT_PRIORITIES_3_8), + ('3.9', 'origin', DEFAULT_PRIORITIES_3_9), + ('3.9', 'openshift-enterprise', DEFAULT_PRIORITIES_3_9), +] + + +def assert_ok(priorities_lookup, default_priorities, zones_enabled, **kwargs): + results = priorities_lookup.run(None, zones_enabled=zones_enabled, **kwargs) + if zones_enabled: + assert results == default_priorities + [ZONE_PRIORITY] + else: + assert results == default_priorities + + +def test_openshift_version(priorities_lookup, openshift_version_fixture, zones_enabled): + facts, default_priorities = openshift_version_fixture + assert_ok(priorities_lookup, default_priorities, variables=facts, zones_enabled=zones_enabled) + + +@pytest.fixture(params=TEST_VARS) +def openshift_version_fixture(request, facts): + version, deployment_type, default_priorities = request.param + version += '.1' + facts['openshift_version'] = version + facts['openshift']['common']['deployment_type'] = deployment_type + return facts, default_priorities + + +def test_openshift_release(priorities_lookup, openshift_release_fixture, zones_enabled): + facts, default_priorities = openshift_release_fixture + assert_ok(priorities_lookup, default_priorities, variables=facts, zones_enabled=zones_enabled) + + +@pytest.fixture(params=TEST_VARS) +def openshift_release_fixture(request, facts, release_mod): + release, deployment_type, default_priorities = request.param + facts['openshift_release'] = release_mod(release) + facts['openshift']['common']['deployment_type'] = deployment_type + return facts, default_priorities + + +def test_short_version(priorities_lookup, short_version_fixture, zones_enabled): + facts, default_priorities = short_version_fixture + assert_ok(priorities_lookup, default_priorities, variables=facts, zones_enabled=zones_enabled) + + +@pytest.fixture(params=TEST_VARS) +def short_version_fixture(request, facts): + short_version, deployment_type, default_priorities = request.param + facts['openshift']['common']['short_version'] = short_version + facts['openshift']['common']['deployment_type'] = deployment_type + return facts, default_priorities + + +def test_short_version_kwarg(priorities_lookup, short_version_kwarg_fixture, zones_enabled): + facts, short_version, default_priorities = short_version_kwarg_fixture + assert_ok( + priorities_lookup, default_priorities, variables=facts, + zones_enabled=zones_enabled, short_version=short_version) + + +@pytest.fixture(params=TEST_VARS) +def short_version_kwarg_fixture(request, facts): + short_version, deployment_type, default_priorities = request.param + facts['openshift']['common']['deployment_type'] = deployment_type + return facts, short_version, default_priorities + + +def test_deployment_type_kwarg(priorities_lookup, deployment_type_kwarg_fixture, zones_enabled): + facts, deployment_type, default_priorities = deployment_type_kwarg_fixture + assert_ok( + priorities_lookup, default_priorities, variables=facts, + zones_enabled=zones_enabled, deployment_type=deployment_type) + + +@pytest.fixture(params=TEST_VARS) +def deployment_type_kwarg_fixture(request, facts): + short_version, deployment_type, default_priorities = request.param + facts['openshift']['common']['short_version'] = short_version + return facts, deployment_type, default_priorities + + +def test_short_version_deployment_type_kwargs( + priorities_lookup, short_version_deployment_type_kwargs_fixture, zones_enabled): + short_version, deployment_type, default_priorities = short_version_deployment_type_kwargs_fixture + assert_ok( + priorities_lookup, default_priorities, zones_enabled=zones_enabled, + short_version=short_version, deployment_type=deployment_type) + + +@pytest.fixture(params=TEST_VARS) +def short_version_deployment_type_kwargs_fixture(request): + return request.param diff --git a/roles/lib_utils/test/test_fakeopensslclasses.py b/roles/lib_utils/test/test_fakeopensslclasses.py new file mode 100644 index 000000000..8a521a765 --- /dev/null +++ b/roles/lib_utils/test/test_fakeopensslclasses.py @@ -0,0 +1,90 @@ +''' + Unit tests for the FakeOpenSSL classes +''' +import os +import subprocess +import sys + +import pytest + +MODULE_PATH = os.path.realpath(os.path.join(__file__, os.pardir, os.pardir, 'library')) +sys.path.insert(1, MODULE_PATH) + +# pylint: disable=import-error,wrong-import-position,missing-docstring +# pylint: disable=invalid-name,redefined-outer-name +from openshift_cert_expiry import FakeOpenSSLCertificate # noqa: E402 + + +@pytest.fixture(scope='module') +def fake_valid_cert(valid_cert): + cmd = ['openssl', 'x509', '-in', str(valid_cert['cert_file']), '-text', + '-nameopt', 'oneline'] + cert = subprocess.check_output(cmd) + return FakeOpenSSLCertificate(cert.decode('utf8')) + + +def test_not_after(valid_cert, fake_valid_cert): + ''' Validate value returned back from get_notAfter() ''' + real_cert = valid_cert['cert'] + + # Internal representation of pyOpenSSL is bytes, while FakeOpenSSLCertificate + # is text, so decode the result from pyOpenSSL prior to comparing + assert real_cert.get_notAfter().decode('utf8') == fake_valid_cert.get_notAfter() + + +def test_serial(valid_cert, fake_valid_cert): + ''' Validate value returned back form get_serialnumber() ''' + real_cert = valid_cert['cert'] + assert real_cert.get_serial_number() == fake_valid_cert.get_serial_number() + + +def test_get_subject(valid_cert, fake_valid_cert): + ''' Validate the certificate subject ''' + + # Gather the subject components and create a list of colon separated strings. + # Since the internal representation of pyOpenSSL uses bytes, we need to decode + # the results before comparing. + c_subjects = valid_cert['cert'].get_subject().get_components() + c_subj = ', '.join(['{}:{}'.format(x.decode('utf8'), y.decode('utf8')) for x, y in c_subjects]) + f_subjects = fake_valid_cert.get_subject().get_components() + f_subj = ', '.join(['{}:{}'.format(x, y) for x, y in f_subjects]) + assert c_subj == f_subj + + +def get_san_extension(cert): + # Internal representation of pyOpenSSL is bytes, while FakeOpenSSLCertificate + # is text, so we need to set the value to search for accordingly. + if isinstance(cert, FakeOpenSSLCertificate): + san_short_name = 'subjectAltName' + else: + san_short_name = b'subjectAltName' + + for i in range(cert.get_extension_count()): + ext = cert.get_extension(i) + if ext.get_short_name() == san_short_name: + # return the string representation to compare the actual SAN + # values instead of the data types + return str(ext) + + return None + + +def test_subject_alt_names(valid_cert, fake_valid_cert): + real_cert = valid_cert['cert'] + + san = get_san_extension(real_cert) + f_san = get_san_extension(fake_valid_cert) + + assert san == f_san + + # If there are either dns or ip sans defined, verify common_name present + if valid_cert['ip'] or valid_cert['dns']: + assert 'DNS:' + valid_cert['common_name'] in f_san + + # Verify all ip sans are present + for ip in valid_cert['ip']: + assert 'IP Address:' + ip in f_san + + # Verify all dns sans are present + for name in valid_cert['dns']: + assert 'DNS:' + name in f_san diff --git a/roles/lib_utils/test/test_load_and_handle_cert.py b/roles/lib_utils/test/test_load_and_handle_cert.py new file mode 100644 index 000000000..98792e2ee --- /dev/null +++ b/roles/lib_utils/test/test_load_and_handle_cert.py @@ -0,0 +1,67 @@ +''' + Unit tests for the load_and_handle_cert method +''' +import datetime +import os +import sys + +import pytest + +MODULE_PATH = os.path.realpath(os.path.join(__file__, os.pardir, os.pardir, 'library')) +sys.path.insert(1, MODULE_PATH) + +# pylint: disable=import-error,wrong-import-position,missing-docstring +# pylint: disable=invalid-name,redefined-outer-name +import openshift_cert_expiry # noqa: E402 + +# TODO: More testing on the results of the load_and_handle_cert function +# could be implemented here as well, such as verifying subjects +# match up. + + +@pytest.fixture(params=['OpenSSLCertificate', 'FakeOpenSSLCertificate']) +def loaded_cert(request, valid_cert): + """ parameterized fixture to provide load_and_handle_cert results + for both OpenSSL and FakeOpenSSL parsed certificates + """ + now = datetime.datetime.now() + + openshift_cert_expiry.HAS_OPENSSL = request.param == 'OpenSSLCertificate' + + # valid_cert['cert_file'] is a `py.path.LocalPath` object and + # provides a read_text() method for reading the file contents. + cert_string = valid_cert['cert_file'].read_text('utf8') + + (subject, + expiry_date, + time_remaining, + serial) = openshift_cert_expiry.load_and_handle_cert(cert_string, now) + + return { + 'now': now, + 'subject': subject, + 'expiry_date': expiry_date, + 'time_remaining': time_remaining, + 'serial': serial, + } + + +def test_serial(loaded_cert, valid_cert): + """Params: + + * `loaded_cert` comes from the `loaded_cert` fixture in this file + * `valid_cert` comes from the 'valid_cert' fixture in conftest.py + """ + valid_cert_serial = valid_cert['cert'].get_serial_number() + assert loaded_cert['serial'] == valid_cert_serial + + +def test_expiry(loaded_cert): + """Params: + + * `loaded_cert` comes from the `loaded_cert` fixture in this file + """ + expiry_date = loaded_cert['expiry_date'] + time_remaining = loaded_cert['time_remaining'] + now = loaded_cert['now'] + assert expiry_date == now + time_remaining diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml index 71de24339..8c8227b5e 100644 --- a/roles/openshift_aws/defaults/main.yml +++ b/roles/openshift_aws/defaults/main.yml @@ -109,6 +109,7 @@ openshift_aws_node_group_config_node_volumes: device_type: gp2 delete_on_termination: True +# build_instance_tags is a custom filter in role lib_utils openshift_aws_node_group_config_tags: "{{ openshift_aws_clusterid | build_instance_tags }}" openshift_aws_node_group_termination_policy: Default openshift_aws_node_group_replace_instances: [] @@ -201,6 +202,7 @@ openshift_aws_node_group_config: openshift_aws_elb_tags: "{{ openshift_aws_kube_tags }}" openshift_aws_elb_az_load_balancing: False +# build_instance_tags is a custom filter in role lib_utils openshift_aws_kube_tags: "{{ openshift_aws_clusterid | build_instance_tags }}" openshift_aws_elb_security_groups: "{{ openshift_aws_launch_config_security_groups }}" diff --git a/roles/openshift_aws/filter_plugins/openshift_aws_filters.py b/roles/openshift_aws/filter_plugins/openshift_aws_filters.py deleted file mode 100644 index dfcb11da3..000000000 --- a/roles/openshift_aws/filter_plugins/openshift_aws_filters.py +++ /dev/null @@ -1,74 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -''' -Custom filters for use in openshift_aws -''' - -from ansible import errors - - -class FilterModule(object): - ''' Custom ansible filters for use by openshift_aws role''' - - @staticmethod - def scale_groups_serial(scale_group_info, upgrade=False): - ''' This function will determine what the deployment serial should be and return it - - Search through the tags and find the deployment_serial tag. Once found, - determine if an increment is needed during an upgrade. - if upgrade is true then increment the serial and return it - else return the serial - ''' - if scale_group_info == []: - return 1 - - scale_group_info = scale_group_info[0] - - if not isinstance(scale_group_info, dict): - raise errors.AnsibleFilterError("|filter plugin failed: Expected scale_group_info to be a dict") - - serial = None - - for tag in scale_group_info['tags']: - if tag['key'] == 'deployment_serial': - serial = int(tag['value']) - if upgrade: - serial += 1 - break - else: - raise errors.AnsibleFilterError("|filter plugin failed: deployment_serial tag was not found") - - return serial - - @staticmethod - def scale_groups_match_capacity(scale_group_info): - ''' This function will verify that the scale group instance count matches - the scale group desired capacity - - ''' - for scale_group in scale_group_info: - if scale_group['desired_capacity'] != len(scale_group['instances']): - return False - - return True - - @staticmethod - def build_instance_tags(clusterid): - ''' This function will return a dictionary of the instance tags. - - The main desire to have this inside of a filter_plugin is that we - need to build the following key. - - {"kubernetes.io/cluster/{{ openshift_aws_clusterid }}": "{{ openshift_aws_clusterid}}"} - - ''' - tags = {'clusterid': clusterid, - 'kubernetes.io/cluster/{}'.format(clusterid): clusterid} - - return tags - - def filters(self): - ''' returns a mapping of filters to methods ''' - return {'build_instance_tags': self.build_instance_tags, - 'scale_groups_match_capacity': self.scale_groups_match_capacity, - 'scale_groups_serial': self.scale_groups_serial} diff --git a/roles/openshift_aws/tasks/build_node_group.yml b/roles/openshift_aws/tasks/build_node_group.yml index 9485cc3ac..a9f9cc3c4 100644 --- a/roles/openshift_aws/tasks/build_node_group.yml +++ b/roles/openshift_aws/tasks/build_node_group.yml @@ -43,6 +43,7 @@ - name: set the value for the deployment_serial and the current asgs set_fact: + # scale_groups_serial is a custom filter in role lib_utils l_deployment_serial: "{{ openshift_aws_node_group_deployment_serial if openshift_aws_node_group_deployment_serial is defined else asgs.results | scale_groups_serial(openshift_aws_node_group_upgrade) }}" openshift_aws_current_asgs: "{{ asgs.results | map(attribute='auto_scaling_group_name') | list | union(openshift_aws_current_asgs) }}" diff --git a/roles/openshift_aws/tasks/wait_for_groups.yml b/roles/openshift_aws/tasks/wait_for_groups.yml index 1f4ef3e1c..3ad876e37 100644 --- a/roles/openshift_aws/tasks/wait_for_groups.yml +++ b/roles/openshift_aws/tasks/wait_for_groups.yml @@ -8,6 +8,7 @@ tags: "{{ {'kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid } }}" register: qasg + # scale_groups_match_capacity is a custom filter in role lib_utils until: qasg | json_query('results[*]') | scale_groups_match_capacity | bool delay: 10 retries: 60 diff --git a/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py b/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py deleted file mode 100644 index 58b228fee..000000000 --- a/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py +++ /dev/null @@ -1,66 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -""" -Custom filters for use in openshift-ansible -""" - - -# Disabling too-many-public-methods, since filter methods are necessarily -# public -# pylint: disable=too-many-public-methods -class FilterModule(object): - """ Custom ansible filters """ - - @staticmethod - def oo_cert_expiry_results_to_json(hostvars, play_hosts): - """Takes results (`hostvars`) from the openshift_cert_expiry role -check and serializes them into proper machine-readable JSON -output. This filter parameter **MUST** be the playbook `hostvars` -variable. The `play_hosts` parameter is so we know what to loop over -when we're extrating the values. - -Returns: - -Results are collected into two top-level keys under the `json_results` -dict: - -* `json_results.data` [dict] - Each individual host check result, keys are hostnames -* `json_results.summary` [dict] - Summary of number of `warning` and `expired` -certificates - -Example playbook usage: - - - name: Generate expiration results JSON - run_once: yes - delegate_to: localhost - when: openshift_certificate_expiry_save_json_results|bool - copy: - content: "{{ hostvars|oo_cert_expiry_results_to_json() }}" - dest: "{{ openshift_certificate_expiry_json_results_path }}" - - """ - json_result = { - 'data': {}, - 'summary': {}, - } - - for host in play_hosts: - json_result['data'][host] = hostvars[host]['check_results']['check_results'] - - total_warnings = sum([hostvars[h]['check_results']['summary']['warning'] for h in play_hosts]) - total_expired = sum([hostvars[h]['check_results']['summary']['expired'] for h in play_hosts]) - total_ok = sum([hostvars[h]['check_results']['summary']['ok'] for h in play_hosts]) - total_total = sum([hostvars[h]['check_results']['summary']['total'] for h in play_hosts]) - - json_result['summary']['warning'] = total_warnings - json_result['summary']['expired'] = total_expired - json_result['summary']['ok'] = total_ok - json_result['summary']['total'] = total_total - - return json_result - - def filters(self): - """ returns a mapping of filters to methods """ - return { - "oo_cert_expiry_results_to_json": self.oo_cert_expiry_results_to_json, - } diff --git a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py b/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py deleted file mode 100644 index e355266b0..000000000 --- a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py +++ /dev/null @@ -1,839 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# pylint: disable=line-too-long,invalid-name - -"""For details on this module see DOCUMENTATION (below)""" - -import base64 -import datetime -import io -import os -import subprocess -import yaml - -# pylint import-error disabled because pylint cannot find the package -# when installed in a virtualenv -from ansible.module_utils.six.moves import configparser # pylint: disable=import-error -from ansible.module_utils.basic import AnsibleModule - -try: - # You can comment this import out and include a 'pass' in this - # block if you're manually testing this module on a NON-ATOMIC - # HOST (or any host that just doesn't have PyOpenSSL - # available). That will force the `load_and_handle_cert` function - # to use the Fake OpenSSL classes. - import OpenSSL.crypto - HAS_OPENSSL = True -except ImportError: - # Some platforms (such as RHEL Atomic) may not have the Python - # OpenSSL library installed. In this case we will use a manual - # work-around to parse each certificate. - # - # Check for 'OpenSSL.crypto' in `sys.modules` later. - HAS_OPENSSL = False - -DOCUMENTATION = ''' ---- -module: openshift_cert_expiry -short_description: Check OpenShift Container Platform (OCP) and Kube certificate expirations on a cluster -description: - - The M(openshift_cert_expiry) module has two basic functions: to flag certificates which will expire in a set window of time from now, and to notify you about certificates which have already expired. - - When the module finishes, a summary of the examination is returned. Each certificate in the summary has a C(health) key with a value of one of the following: - - C(ok) - not expired, and outside of the expiration C(warning_days) window. - - C(warning) - not expired, but will expire between now and the C(warning_days) window. - - C(expired) - an expired certificate. - - Certificate flagging follow this logic: - - If the expiration date is before now then the certificate is classified as C(expired). - - The certificates time to live (expiration date - now) is calculated, if that time window is less than C(warning_days) the certificate is classified as C(warning). - - All other conditions are classified as C(ok). - - The following keys are ALSO present in the certificate summary: - - C(cert_cn) - The common name of the certificate (additional CNs present in SAN extensions are omitted) - - C(days_remaining) - The number of days until the certificate expires. - - C(expiry) - The date the certificate expires on. - - C(path) - The full path to the certificate on the examined host. -version_added: "1.0" -options: - config_base: - description: - - Base path to OCP system settings. - required: false - default: /etc/origin - warning_days: - description: - - Flag certificates which will expire in C(warning_days) days from now. - required: false - default: 30 - show_all: - description: - - Enable this option to show analysis of ALL certificates examined by this module. - - By default only certificates which have expired, or will expire within the C(warning_days) window will be reported. - required: false - default: false - -author: "Tim Bielawa (@tbielawa) " -''' - -EXAMPLES = ''' -# Default invocation, only notify about expired certificates or certificates which will expire within 30 days from now -- openshift_cert_expiry: - -# Expand the warning window to show certificates expiring within a year from now -- openshift_cert_expiry: warning_days=365 - -# Show expired, soon to expire (now + 30 days), and all other certificates examined -- openshift_cert_expiry: show_all=true -''' - - -class FakeOpenSSLCertificate(object): - """This provides a rough mock of what you get from -`OpenSSL.crypto.load_certificate()`. This is a work-around for -platforms missing the Python OpenSSL library. - """ - def __init__(self, cert_string): - """`cert_string` is a certificate in the form you get from running a -.crt through 'openssl x509 -in CERT.cert -text'""" - self.cert_string = cert_string - self.serial = None - self.subject = None - self.extensions = [] - self.not_after = None - self._parse_cert() - - def _parse_cert(self): - """Manually parse the certificate line by line""" - self.extensions = [] - - PARSING_ALT_NAMES = False - PARSING_HEX_SERIAL = False - for line in self.cert_string.split('\n'): - l = line.strip() - if PARSING_ALT_NAMES: - # We're parsing a 'Subject Alternative Name' line - self.extensions.append( - FakeOpenSSLCertificateSANExtension(l)) - - PARSING_ALT_NAMES = False - continue - - if PARSING_HEX_SERIAL: - # Hex serials arrive colon-delimited - serial_raw = l.replace(':', '') - # Convert to decimal - self.serial = int('0x' + serial_raw, base=16) - PARSING_HEX_SERIAL = False - continue - - # parse out the bits that we can - if l.startswith('Serial Number:'): - # Decimal format: - # Serial Number: 11 (0xb) - # => 11 - # Hex Format (large serials): - # Serial Number: - # 0a:de:eb:24:04:75:ab:56:39:14:e9:5a:22:e2:85:bf - # => 14449739080294792594019643629255165375 - if l.endswith(':'): - PARSING_HEX_SERIAL = True - continue - self.serial = int(l.split()[-2]) - - elif l.startswith('Not After :'): - # Not After : Feb 7 18:19:35 2019 GMT - # => strptime(str, '%b %d %H:%M:%S %Y %Z') - # => strftime('%Y%m%d%H%M%SZ') - # => 20190207181935Z - not_after_raw = l.partition(' : ')[-1] - # Last item: ('Not After', ' : ', 'Feb 7 18:19:35 2019 GMT') - not_after_parsed = datetime.datetime.strptime(not_after_raw, '%b %d %H:%M:%S %Y %Z') - self.not_after = not_after_parsed.strftime('%Y%m%d%H%M%SZ') - - elif l.startswith('X509v3 Subject Alternative Name:'): - PARSING_ALT_NAMES = True - continue - - elif l.startswith('Subject:'): - # O = system:nodes, CN = system:node:m01.example.com - self.subject = FakeOpenSSLCertificateSubjects(l.partition(': ')[-1]) - - def get_serial_number(self): - """Return the serial number of the cert""" - return self.serial - - def get_subject(self): - """Subjects must implement get_components() and return dicts or -tuples. An 'openssl x509 -in CERT.cert -text' with 'Subject': - - Subject: Subject: O=system:nodes, CN=system:node:m01.example.com - -might return: [('O=system', 'nodes'), ('CN=system', 'node:m01.example.com')] - """ - return self.subject - - def get_extension(self, i): - """Extensions must implement get_short_name() and return the string -'subjectAltName'""" - return self.extensions[i] - - def get_extension_count(self): - """ get_extension_count """ - return len(self.extensions) - - def get_notAfter(self): - """Returns a date stamp as a string in the form -'20180922170439Z'. strptime the result with format param: -'%Y%m%d%H%M%SZ'.""" - return self.not_after - - -class FakeOpenSSLCertificateSANExtension(object): # pylint: disable=too-few-public-methods - """Mocks what happens when `get_extension` is called on a certificate -object""" - - def __init__(self, san_string): - """With `san_string` as you get from: - - $ openssl x509 -in certificate.crt -text - """ - self.san_string = san_string - self.short_name = 'subjectAltName' - - def get_short_name(self): - """Return the 'type' of this extension. It's always the same though -because we only care about subjectAltName's""" - return self.short_name - - def __str__(self): - """Return this extension and the value as a simple string""" - return self.san_string - - -# pylint: disable=too-few-public-methods -class FakeOpenSSLCertificateSubjects(object): - """Mocks what happens when `get_subject` is called on a certificate -object""" - - def __init__(self, subject_string): - """With `subject_string` as you get from: - - $ openssl x509 -in certificate.crt -text - """ - self.subjects = [] - for s in subject_string.split(', '): - name, _, value = s.partition(' = ') - self.subjects.append((name, value)) - - def get_components(self): - """Returns a list of tuples""" - return self.subjects - - -###################################################################### -def filter_paths(path_list): - """`path_list` - A list of file paths to check. Only files which exist -will be returned - """ - return [p for p in path_list if os.path.exists(os.path.realpath(p))] - - -# pylint: disable=too-many-locals,too-many-branches -# -# TODO: Break this function down into smaller chunks -def load_and_handle_cert(cert_string, now, base64decode=False, ans_module=None): - """Load a certificate, split off the good parts, and return some -useful data - -Params: - -- `cert_string` (string) - a certificate loaded into a string object -- `now` (datetime) - a datetime object of the time to calculate the certificate 'time_remaining' against -- `base64decode` (bool) - run base64.b64decode() on the input -- `ans_module` (AnsibleModule) - The AnsibleModule object for this module (so we can raise errors) - -Returns: -A tuple of the form: - (cert_subject, cert_expiry_date, time_remaining, cert_serial_number) - """ - if base64decode: - _cert_string = base64.b64decode(cert_string).decode('utf-8') - else: - _cert_string = cert_string - - # Disable this. We 'redefine' the type because we are working - # around a missing library on the target host. - # - # pylint: disable=redefined-variable-type - if HAS_OPENSSL: - # No work-around required - cert_loaded = OpenSSL.crypto.load_certificate( - OpenSSL.crypto.FILETYPE_PEM, _cert_string) - else: - # Missing library, work-around required. Run the 'openssl' - # command on it to decode it - cmd = 'openssl x509 -text' - try: - openssl_proc = subprocess.Popen(cmd.split(), - stdout=subprocess.PIPE, - stdin=subprocess.PIPE) - except OSError: - ans_module.fail_json(msg="Error: The 'OpenSSL' python library and CLI command were not found on the target host. Unable to parse any certificates. This host will not be included in generated reports.") - else: - openssl_decoded = openssl_proc.communicate(_cert_string.encode('utf-8'))[0].decode('utf-8') - cert_loaded = FakeOpenSSLCertificate(openssl_decoded) - - ###################################################################### - # Read all possible names from the cert - cert_subjects = [] - for name, value in cert_loaded.get_subject().get_components(): - if isinstance(name, bytes) or isinstance(value, bytes): - name = name.decode('utf-8') - value = value.decode('utf-8') - cert_subjects.append('{}:{}'.format(name, value)) - - # To read SANs from a cert we must read the subjectAltName - # extension from the X509 Object. What makes this more difficult - # is that pyOpenSSL does not give extensions as an iterable - san = None - for i in range(cert_loaded.get_extension_count()): - ext = cert_loaded.get_extension(i) - if ext.get_short_name() == 'subjectAltName': - san = ext - - if san is not None: - # The X509Extension object for subjectAltName prints as a - # string with the alt names separated by a comma and a - # space. Split the string by ', ' and then add our new names - # to the list of existing names - cert_subjects.extend(str(san).split(', ')) - - cert_subject = ', '.join(cert_subjects) - ###################################################################### - - # Grab the expiration date - not_after = cert_loaded.get_notAfter() - # example get_notAfter() => 20180922170439Z - if isinstance(not_after, bytes): - not_after = not_after.decode('utf-8') - - cert_expiry_date = datetime.datetime.strptime( - not_after, - '%Y%m%d%H%M%SZ') - - time_remaining = cert_expiry_date - now - - return (cert_subject, cert_expiry_date, time_remaining, cert_loaded.get_serial_number()) - - -def classify_cert(cert_meta, now, time_remaining, expire_window, cert_list): - """Given metadata about a certificate under examination, classify it - into one of three categories, 'ok', 'warning', and 'expired'. - -Params: - -- `cert_meta` dict - A dict with certificate metadata. Required fields - include: 'cert_cn', 'path', 'expiry', 'days_remaining', 'health'. -- `now` (datetime) - a datetime object of the time to calculate the certificate 'time_remaining' against -- `time_remaining` (datetime.timedelta) - a timedelta for how long until the cert expires -- `expire_window` (datetime.timedelta) - a timedelta for how long the warning window is -- `cert_list` list - A list to shove the classified cert into - -Return: -- `cert_list` - The updated list of classified certificates - """ - expiry_str = str(cert_meta['expiry']) - # Categorization - if cert_meta['expiry'] < now: - # This already expired, must NOTIFY - cert_meta['health'] = 'expired' - elif time_remaining < expire_window: - # WARN about this upcoming expirations - cert_meta['health'] = 'warning' - else: - # Not expired or about to expire - cert_meta['health'] = 'ok' - - cert_meta['expiry'] = expiry_str - cert_meta['serial_hex'] = hex(int(cert_meta['serial'])) - cert_list.append(cert_meta) - return cert_list - - -def tabulate_summary(certificates, kubeconfigs, etcd_certs, router_certs, registry_certs): - """Calculate the summary text for when the module finishes -running. This includes counts of each classification and what have -you. - -Params: - -- `certificates` (list of dicts) - Processed `expire_check_result` - dicts with filled in `health` keys for system certificates. -- `kubeconfigs` - as above for kubeconfigs -- `etcd_certs` - as above for etcd certs - -Return: - -- `summary_results` (dict) - Counts of each cert type classification - and total items examined. - """ - items = certificates + kubeconfigs + etcd_certs + router_certs + registry_certs - - summary_results = { - 'system_certificates': len(certificates), - 'kubeconfig_certificates': len(kubeconfigs), - 'etcd_certificates': len(etcd_certs), - 'router_certs': len(router_certs), - 'registry_certs': len(registry_certs), - 'total': len(items), - 'ok': 0, - 'warning': 0, - 'expired': 0 - } - - summary_results['expired'] = len([c for c in items if c['health'] == 'expired']) - summary_results['warning'] = len([c for c in items if c['health'] == 'warning']) - summary_results['ok'] = len([c for c in items if c['health'] == 'ok']) - - return summary_results - - -###################################################################### -# This is our module MAIN function after all, so there's bound to be a -# lot of code bundled up into one block -# -# Reason: These checks are disabled because the issue was introduced -# during a period where the pylint checks weren't enabled for this file -# Status: temporarily disabled pending future refactoring -# pylint: disable=too-many-locals,too-many-statements,too-many-branches -def main(): - """This module examines certificates (in various forms) which compose -an OpenShift Container Platform cluster - """ - - module = AnsibleModule( - argument_spec=dict( - config_base=dict( - required=False, - default="/etc/origin", - type='str'), - warning_days=dict( - required=False, - default=30, - type='int'), - show_all=dict( - required=False, - default=False, - type='bool') - ), - supports_check_mode=True, - ) - - # Basic scaffolding for OpenShift specific certs - openshift_base_config_path = os.path.realpath(module.params['config_base']) - openshift_master_config_path = os.path.join(openshift_base_config_path, - "master", "master-config.yaml") - openshift_node_config_path = os.path.join(openshift_base_config_path, - "node", "node-config.yaml") - openshift_cert_check_paths = [ - openshift_master_config_path, - openshift_node_config_path, - ] - - # Paths for Kubeconfigs. Additional kubeconfigs are conditionally - # checked later in the code - master_kube_configs = ['admin', 'openshift-master', - 'openshift-node', 'openshift-router', - 'openshift-registry'] - - kubeconfig_paths = [] - for m_kube_config in master_kube_configs: - kubeconfig_paths.append( - os.path.join(openshift_base_config_path, "master", m_kube_config + ".kubeconfig") - ) - - # Validate some paths we have the ability to do ahead of time - openshift_cert_check_paths = filter_paths(openshift_cert_check_paths) - kubeconfig_paths = filter_paths(kubeconfig_paths) - - # etcd, where do you hide your certs? Used when parsing etcd.conf - etcd_cert_params = [ - "ETCD_CA_FILE", - "ETCD_CERT_FILE", - "ETCD_PEER_CA_FILE", - "ETCD_PEER_CERT_FILE", - ] - - # Expiry checking stuff - now = datetime.datetime.now() - # todo, catch exception for invalid input and return a fail_json - warning_days = int(module.params['warning_days']) - expire_window = datetime.timedelta(days=warning_days) - - # Module stuff - # - # The results of our cert checking to return from the task call - check_results = {} - check_results['meta'] = {} - check_results['meta']['warning_days'] = warning_days - check_results['meta']['checked_at_time'] = str(now) - check_results['meta']['warn_before_date'] = str(now + expire_window) - check_results['meta']['show_all'] = str(module.params['show_all']) - # All the analyzed certs accumulate here - ocp_certs = [] - - ###################################################################### - # Sure, why not? Let's enable check mode. - if module.check_mode: - check_results['ocp_certs'] = [] - module.exit_json( - check_results=check_results, - msg="Checked 0 total certificates. Expired/Warning/OK: 0/0/0. Warning window: %s days" % module.params['warning_days'], - rc=0, - changed=False - ) - - ###################################################################### - # Check for OpenShift Container Platform specific certs - ###################################################################### - for os_cert in filter_paths(openshift_cert_check_paths): - # Open up that config file and locate the cert and CA - with io.open(os_cert, 'r', encoding='utf-8') as fp: - cert_meta = {} - cfg = yaml.load(fp) - # cert files are specified in parsed `fp` as relative to the path - # of the original config file. 'master-config.yaml' with certFile - # = 'foo.crt' implies that 'foo.crt' is in the same - # directory. certFile = '../foo.crt' is in the parent directory. - cfg_path = os.path.dirname(fp.name) - cert_meta['certFile'] = os.path.join(cfg_path, cfg['servingInfo']['certFile']) - cert_meta['clientCA'] = os.path.join(cfg_path, cfg['servingInfo']['clientCA']) - - ###################################################################### - # Load the certificate and the CA, parse their expiration dates into - # datetime objects so we can manipulate them later - for v in cert_meta.values(): - with io.open(v, 'r', encoding='utf-8') as fp: - cert = fp.read() - (cert_subject, - cert_expiry_date, - time_remaining, - cert_serial) = load_and_handle_cert(cert, now, ans_module=module) - - expire_check_result = { - 'cert_cn': cert_subject, - 'path': fp.name, - 'expiry': cert_expiry_date, - 'days_remaining': time_remaining.days, - 'health': None, - 'serial': cert_serial - } - - classify_cert(expire_check_result, now, time_remaining, expire_window, ocp_certs) - - ###################################################################### - # /Check for OpenShift Container Platform specific certs - ###################################################################### - - ###################################################################### - # Check service Kubeconfigs - ###################################################################### - kubeconfigs = [] - - # There may be additional kubeconfigs to check, but their naming - # is less predictable than the ones we've already assembled. - - try: - # Try to read the standard 'node-config.yaml' file to check if - # this host is a node. - with io.open(openshift_node_config_path, 'r', encoding='utf-8') as fp: - cfg = yaml.load(fp) - - # OK, the config file exists, therefore this is a - # node. Nodes have their own kubeconfig files to - # communicate with the master API. Let's read the relative - # path to that file from the node config. - node_masterKubeConfig = cfg['masterKubeConfig'] - # As before, the path to the 'masterKubeConfig' file is - # relative to `fp` - cfg_path = os.path.dirname(fp.name) - node_kubeconfig = os.path.join(cfg_path, node_masterKubeConfig) - - with io.open(node_kubeconfig, 'r', encoding='utf8') as fp: - # Read in the nodes kubeconfig file and grab the good stuff - cfg = yaml.load(fp) - - c = cfg['users'][0]['user']['client-certificate-data'] - (cert_subject, - cert_expiry_date, - time_remaining, - cert_serial) = load_and_handle_cert(c, now, base64decode=True, ans_module=module) - - expire_check_result = { - 'cert_cn': cert_subject, - 'path': fp.name, - 'expiry': cert_expiry_date, - 'days_remaining': time_remaining.days, - 'health': None, - 'serial': cert_serial - } - - classify_cert(expire_check_result, now, time_remaining, expire_window, kubeconfigs) - except IOError: - # This is not a node - pass - - for kube in filter_paths(kubeconfig_paths): - with io.open(kube, 'r', encoding='utf-8') as fp: - # TODO: Maybe consider catching exceptions here? - cfg = yaml.load(fp) - - # Per conversation, "the kubeconfigs you care about: - # admin, router, registry should all be single - # value". Following that advice we only grab the data for - # the user at index 0 in the 'users' list. There should - # not be more than one user. - c = cfg['users'][0]['user']['client-certificate-data'] - (cert_subject, - cert_expiry_date, - time_remaining, - cert_serial) = load_and_handle_cert(c, now, base64decode=True, ans_module=module) - - expire_check_result = { - 'cert_cn': cert_subject, - 'path': fp.name, - 'expiry': cert_expiry_date, - 'days_remaining': time_remaining.days, - 'health': None, - 'serial': cert_serial - } - - classify_cert(expire_check_result, now, time_remaining, expire_window, kubeconfigs) - - ###################################################################### - # /Check service Kubeconfigs - ###################################################################### - - ###################################################################### - # Check etcd certs - # - # Two things to check: 'external' etcd, and embedded etcd. - ###################################################################### - # FIRST: The 'external' etcd - # - # Some values may be duplicated, make this a set for now so we - # unique them all - etcd_certs_to_check = set([]) - etcd_certs = [] - etcd_cert_params.append('dne') - try: - with io.open('/etc/etcd/etcd.conf', 'r', encoding='utf-8') as fp: - # Add dummy header section. - config = io.StringIO() - config.write(u'[ETCD]\n') - config.write(fp.read().replace('%', '%%')) - config.seek(0, os.SEEK_SET) - - etcd_config = configparser.ConfigParser() - etcd_config.readfp(config) - - for param in etcd_cert_params: - try: - etcd_certs_to_check.add(etcd_config.get('ETCD', param)) - except configparser.NoOptionError: - # That parameter does not exist, oh well... - pass - except IOError: - # No etcd to see here, move along - pass - - for etcd_cert in filter_paths(etcd_certs_to_check): - with io.open(etcd_cert, 'r', encoding='utf-8') as fp: - c = fp.read() - (cert_subject, - cert_expiry_date, - time_remaining, - cert_serial) = load_and_handle_cert(c, now, ans_module=module) - - expire_check_result = { - 'cert_cn': cert_subject, - 'path': fp.name, - 'expiry': cert_expiry_date, - 'days_remaining': time_remaining.days, - 'health': None, - 'serial': cert_serial - } - - classify_cert(expire_check_result, now, time_remaining, expire_window, etcd_certs) - - ###################################################################### - # Now the embedded etcd - ###################################################################### - try: - with io.open('/etc/origin/master/master-config.yaml', 'r', encoding='utf-8') as fp: - cfg = yaml.load(fp) - except IOError: - # Not present - pass - else: - if cfg.get('etcdConfig', {}).get('servingInfo', {}).get('certFile', None) is not None: - # This is embedded - etcd_crt_name = cfg['etcdConfig']['servingInfo']['certFile'] - else: - # Not embedded - etcd_crt_name = None - - if etcd_crt_name is not None: - # etcd_crt_name is relative to the location of the - # master-config.yaml file - cfg_path = os.path.dirname(fp.name) - etcd_cert = os.path.join(cfg_path, etcd_crt_name) - with open(etcd_cert, 'r') as etcd_fp: - (cert_subject, - cert_expiry_date, - time_remaining, - cert_serial) = load_and_handle_cert(etcd_fp.read(), now, ans_module=module) - - expire_check_result = { - 'cert_cn': cert_subject, - 'path': etcd_fp.name, - 'expiry': cert_expiry_date, - 'days_remaining': time_remaining.days, - 'health': None, - 'serial': cert_serial - } - - classify_cert(expire_check_result, now, time_remaining, expire_window, etcd_certs) - - ###################################################################### - # /Check etcd certs - ###################################################################### - - ###################################################################### - # Check router/registry certs - # - # These are saved as secrets in etcd. That means that we can not - # simply read a file to grab the data. Instead we're going to - # subprocess out to the 'oc get' command. On non-masters this - # command will fail, that is expected so we catch that exception. - ###################################################################### - router_certs = [] - registry_certs = [] - - ###################################################################### - # First the router certs - try: - router_secrets_raw = subprocess.Popen('oc get -n default secret router-certs -o yaml'.split(), - stdout=subprocess.PIPE) - router_ds = yaml.load(router_secrets_raw.communicate()[0]) - router_c = router_ds['data']['tls.crt'] - router_path = router_ds['metadata']['selfLink'] - except TypeError: - # YAML couldn't load the result, this is not a master - pass - except OSError: - # The OC command doesn't exist here. Move along. - pass - else: - (cert_subject, - cert_expiry_date, - time_remaining, - cert_serial) = load_and_handle_cert(router_c, now, base64decode=True, ans_module=module) - - expire_check_result = { - 'cert_cn': cert_subject, - 'path': router_path, - 'expiry': cert_expiry_date, - 'days_remaining': time_remaining.days, - 'health': None, - 'serial': cert_serial - } - - classify_cert(expire_check_result, now, time_remaining, expire_window, router_certs) - - ###################################################################### - # Now for registry - try: - registry_secrets_raw = subprocess.Popen('oc get -n default secret registry-certificates -o yaml'.split(), - stdout=subprocess.PIPE) - registry_ds = yaml.load(registry_secrets_raw.communicate()[0]) - registry_c = registry_ds['data']['registry.crt'] - registry_path = registry_ds['metadata']['selfLink'] - except TypeError: - # YAML couldn't load the result, this is not a master - pass - except OSError: - # The OC command doesn't exist here. Move along. - pass - else: - (cert_subject, - cert_expiry_date, - time_remaining, - cert_serial) = load_and_handle_cert(registry_c, now, base64decode=True, ans_module=module) - - expire_check_result = { - 'cert_cn': cert_subject, - 'path': registry_path, - 'expiry': cert_expiry_date, - 'days_remaining': time_remaining.days, - 'health': None, - 'serial': cert_serial - } - - classify_cert(expire_check_result, now, time_remaining, expire_window, registry_certs) - - ###################################################################### - # /Check router/registry certs - ###################################################################### - - res = tabulate_summary(ocp_certs, kubeconfigs, etcd_certs, router_certs, registry_certs) - - msg = "Checked {count} total certificates. Expired/Warning/OK: {exp}/{warn}/{ok}. Warning window: {window} days".format( - count=res['total'], - exp=res['expired'], - warn=res['warning'], - ok=res['ok'], - window=int(module.params['warning_days']), - ) - - # By default we only return detailed information about expired or - # warning certificates. If show_all is true then we will print all - # the certificates examined. - if not module.params['show_all']: - check_results['ocp_certs'] = [crt for crt in ocp_certs if crt['health'] in ['expired', 'warning']] - check_results['kubeconfigs'] = [crt for crt in kubeconfigs if crt['health'] in ['expired', 'warning']] - check_results['etcd'] = [crt for crt in etcd_certs if crt['health'] in ['expired', 'warning']] - check_results['registry'] = [crt for crt in registry_certs if crt['health'] in ['expired', 'warning']] - check_results['router'] = [crt for crt in router_certs if crt['health'] in ['expired', 'warning']] - else: - check_results['ocp_certs'] = ocp_certs - check_results['kubeconfigs'] = kubeconfigs - check_results['etcd'] = etcd_certs - check_results['registry'] = registry_certs - check_results['router'] = router_certs - - # Sort the final results to report in order of ascending safety - # time. That is to say, the certificates which will expire sooner - # will be at the front of the list and certificates which will - # expire later are at the end. Router and registry certs should be - # limited to just 1 result, so don't bother sorting those. - def cert_key(item): - ''' return the days_remaining key ''' - return item['days_remaining'] - - check_results['ocp_certs'] = sorted(check_results['ocp_certs'], key=cert_key) - check_results['kubeconfigs'] = sorted(check_results['kubeconfigs'], key=cert_key) - check_results['etcd'] = sorted(check_results['etcd'], key=cert_key) - - # This module will never change anything, but we might want to - # change the return code parameter if there is some catastrophic - # error we noticed earlier - module.exit_json( - check_results=check_results, - summary=res, - msg=msg, - rc=0, - changed=False - ) - - -if __name__ == '__main__': - main() diff --git a/roles/openshift_certificate_expiry/tasks/main.yml b/roles/openshift_certificate_expiry/tasks/main.yml index 8dea2c07f..7062b5060 100644 --- a/roles/openshift_certificate_expiry/tasks/main.yml +++ b/roles/openshift_certificate_expiry/tasks/main.yml @@ -16,7 +16,9 @@ - name: Generate the result JSON string run_once: yes - set_fact: json_result_string="{{ hostvars|oo_cert_expiry_results_to_json(play_hosts) }}" + set_fact: + # oo_cert_expiry_results_to_json is a custom filter in role lib_utils + json_result_string: "{{ hostvars|oo_cert_expiry_results_to_json(play_hosts) }}" when: openshift_certificate_expiry_save_json_results|bool - name: Generate results JSON file diff --git a/roles/openshift_certificate_expiry/test/conftest.py b/roles/openshift_certificate_expiry/test/conftest.py deleted file mode 100644 index df948fff0..000000000 --- a/roles/openshift_certificate_expiry/test/conftest.py +++ /dev/null @@ -1,119 +0,0 @@ -# pylint: disable=missing-docstring,invalid-name,redefined-outer-name -import pytest -from OpenSSL import crypto - -# Parameter list for valid_cert fixture -VALID_CERTIFICATE_PARAMS = [ - { - 'short_name': 'client', - 'cn': 'client.example.com', - 'serial': 4, - 'uses': b'clientAuth', - 'dns': [], - 'ip': [], - }, - { - 'short_name': 'server', - 'cn': 'server.example.com', - 'serial': 5, - 'uses': b'serverAuth', - 'dns': ['kubernetes', 'openshift'], - 'ip': ['10.0.0.1', '192.168.0.1'] - }, - { - 'short_name': 'combined', - 'cn': 'combined.example.com', - # Verify that HUGE serials parse correctly. - # Frobs PARSING_HEX_SERIAL in _parse_cert - # See https://bugzilla.redhat.com/show_bug.cgi?id=1464240 - 'serial': 14449739080294792594019643629255165375, - 'uses': b'clientAuth, serverAuth', - 'dns': ['etcd'], - 'ip': ['10.0.0.2', '192.168.0.2'] - } -] - -# Extract the short_name from VALID_CERTIFICATE_PARAMS to provide -# friendly naming for the valid_cert fixture -VALID_CERTIFICATE_IDS = [param['short_name'] for param in VALID_CERTIFICATE_PARAMS] - - -@pytest.fixture(scope='session') -def ca(tmpdir_factory): - ca_dir = tmpdir_factory.mktemp('ca') - - key = crypto.PKey() - key.generate_key(crypto.TYPE_RSA, 2048) - - cert = crypto.X509() - cert.set_version(3) - cert.set_serial_number(1) - cert.get_subject().commonName = 'test-signer' - cert.gmtime_adj_notBefore(0) - cert.gmtime_adj_notAfter(24 * 60 * 60) - cert.set_issuer(cert.get_subject()) - cert.set_pubkey(key) - cert.add_extensions([ - crypto.X509Extension(b'basicConstraints', True, b'CA:TRUE, pathlen:0'), - crypto.X509Extension(b'keyUsage', True, - b'digitalSignature, keyEncipherment, keyCertSign, cRLSign'), - crypto.X509Extension(b'subjectKeyIdentifier', False, b'hash', subject=cert) - ]) - cert.add_extensions([ - crypto.X509Extension(b'authorityKeyIdentifier', False, b'keyid:always', issuer=cert) - ]) - cert.sign(key, 'sha256') - - return { - 'dir': ca_dir, - 'key': key, - 'cert': cert, - } - - -@pytest.fixture(scope='session', - ids=VALID_CERTIFICATE_IDS, - params=VALID_CERTIFICATE_PARAMS) -def valid_cert(request, ca): - common_name = request.param['cn'] - - key = crypto.PKey() - key.generate_key(crypto.TYPE_RSA, 2048) - - cert = crypto.X509() - cert.set_serial_number(request.param['serial']) - cert.gmtime_adj_notBefore(0) - cert.gmtime_adj_notAfter(24 * 60 * 60) - cert.set_issuer(ca['cert'].get_subject()) - cert.set_pubkey(key) - cert.set_version(3) - cert.get_subject().commonName = common_name - cert.add_extensions([ - crypto.X509Extension(b'basicConstraints', True, b'CA:FALSE'), - crypto.X509Extension(b'keyUsage', True, b'digitalSignature, keyEncipherment'), - crypto.X509Extension(b'extendedKeyUsage', False, request.param['uses']), - ]) - - if request.param['dns'] or request.param['ip']: - san_list = ['DNS:{}'.format(common_name)] - san_list.extend(['DNS:{}'.format(x) for x in request.param['dns']]) - san_list.extend(['IP:{}'.format(x) for x in request.param['ip']]) - - cert.add_extensions([ - crypto.X509Extension(b'subjectAltName', False, ', '.join(san_list).encode('utf8')) - ]) - cert.sign(ca['key'], 'sha256') - - cert_contents = crypto.dump_certificate(crypto.FILETYPE_PEM, cert) - cert_file = ca['dir'].join('{}.crt'.format(common_name)) - cert_file.write_binary(cert_contents) - - return { - 'common_name': common_name, - 'serial': request.param['serial'], - 'dns': request.param['dns'], - 'ip': request.param['ip'], - 'uses': request.param['uses'], - 'cert_file': cert_file, - 'cert': cert - } diff --git a/roles/openshift_certificate_expiry/test/test_fakeopensslclasses.py b/roles/openshift_certificate_expiry/test/test_fakeopensslclasses.py deleted file mode 100644 index 8a521a765..000000000 --- a/roles/openshift_certificate_expiry/test/test_fakeopensslclasses.py +++ /dev/null @@ -1,90 +0,0 @@ -''' - Unit tests for the FakeOpenSSL classes -''' -import os -import subprocess -import sys - -import pytest - -MODULE_PATH = os.path.realpath(os.path.join(__file__, os.pardir, os.pardir, 'library')) -sys.path.insert(1, MODULE_PATH) - -# pylint: disable=import-error,wrong-import-position,missing-docstring -# pylint: disable=invalid-name,redefined-outer-name -from openshift_cert_expiry import FakeOpenSSLCertificate # noqa: E402 - - -@pytest.fixture(scope='module') -def fake_valid_cert(valid_cert): - cmd = ['openssl', 'x509', '-in', str(valid_cert['cert_file']), '-text', - '-nameopt', 'oneline'] - cert = subprocess.check_output(cmd) - return FakeOpenSSLCertificate(cert.decode('utf8')) - - -def test_not_after(valid_cert, fake_valid_cert): - ''' Validate value returned back from get_notAfter() ''' - real_cert = valid_cert['cert'] - - # Internal representation of pyOpenSSL is bytes, while FakeOpenSSLCertificate - # is text, so decode the result from pyOpenSSL prior to comparing - assert real_cert.get_notAfter().decode('utf8') == fake_valid_cert.get_notAfter() - - -def test_serial(valid_cert, fake_valid_cert): - ''' Validate value returned back form get_serialnumber() ''' - real_cert = valid_cert['cert'] - assert real_cert.get_serial_number() == fake_valid_cert.get_serial_number() - - -def test_get_subject(valid_cert, fake_valid_cert): - ''' Validate the certificate subject ''' - - # Gather the subject components and create a list of colon separated strings. - # Since the internal representation of pyOpenSSL uses bytes, we need to decode - # the results before comparing. - c_subjects = valid_cert['cert'].get_subject().get_components() - c_subj = ', '.join(['{}:{}'.format(x.decode('utf8'), y.decode('utf8')) for x, y in c_subjects]) - f_subjects = fake_valid_cert.get_subject().get_components() - f_subj = ', '.join(['{}:{}'.format(x, y) for x, y in f_subjects]) - assert c_subj == f_subj - - -def get_san_extension(cert): - # Internal representation of pyOpenSSL is bytes, while FakeOpenSSLCertificate - # is text, so we need to set the value to search for accordingly. - if isinstance(cert, FakeOpenSSLCertificate): - san_short_name = 'subjectAltName' - else: - san_short_name = b'subjectAltName' - - for i in range(cert.get_extension_count()): - ext = cert.get_extension(i) - if ext.get_short_name() == san_short_name: - # return the string representation to compare the actual SAN - # values instead of the data types - return str(ext) - - return None - - -def test_subject_alt_names(valid_cert, fake_valid_cert): - real_cert = valid_cert['cert'] - - san = get_san_extension(real_cert) - f_san = get_san_extension(fake_valid_cert) - - assert san == f_san - - # If there are either dns or ip sans defined, verify common_name present - if valid_cert['ip'] or valid_cert['dns']: - assert 'DNS:' + valid_cert['common_name'] in f_san - - # Verify all ip sans are present - for ip in valid_cert['ip']: - assert 'IP Address:' + ip in f_san - - # Verify all dns sans are present - for name in valid_cert['dns']: - assert 'DNS:' + name in f_san diff --git a/roles/openshift_certificate_expiry/test/test_load_and_handle_cert.py b/roles/openshift_certificate_expiry/test/test_load_and_handle_cert.py deleted file mode 100644 index 98792e2ee..000000000 --- a/roles/openshift_certificate_expiry/test/test_load_and_handle_cert.py +++ /dev/null @@ -1,67 +0,0 @@ -''' - Unit tests for the load_and_handle_cert method -''' -import datetime -import os -import sys - -import pytest - -MODULE_PATH = os.path.realpath(os.path.join(__file__, os.pardir, os.pardir, 'library')) -sys.path.insert(1, MODULE_PATH) - -# pylint: disable=import-error,wrong-import-position,missing-docstring -# pylint: disable=invalid-name,redefined-outer-name -import openshift_cert_expiry # noqa: E402 - -# TODO: More testing on the results of the load_and_handle_cert function -# could be implemented here as well, such as verifying subjects -# match up. - - -@pytest.fixture(params=['OpenSSLCertificate', 'FakeOpenSSLCertificate']) -def loaded_cert(request, valid_cert): - """ parameterized fixture to provide load_and_handle_cert results - for both OpenSSL and FakeOpenSSL parsed certificates - """ - now = datetime.datetime.now() - - openshift_cert_expiry.HAS_OPENSSL = request.param == 'OpenSSLCertificate' - - # valid_cert['cert_file'] is a `py.path.LocalPath` object and - # provides a read_text() method for reading the file contents. - cert_string = valid_cert['cert_file'].read_text('utf8') - - (subject, - expiry_date, - time_remaining, - serial) = openshift_cert_expiry.load_and_handle_cert(cert_string, now) - - return { - 'now': now, - 'subject': subject, - 'expiry_date': expiry_date, - 'time_remaining': time_remaining, - 'serial': serial, - } - - -def test_serial(loaded_cert, valid_cert): - """Params: - - * `loaded_cert` comes from the `loaded_cert` fixture in this file - * `valid_cert` comes from the 'valid_cert' fixture in conftest.py - """ - valid_cert_serial = valid_cert['cert'].get_serial_number() - assert loaded_cert['serial'] == valid_cert_serial - - -def test_expiry(loaded_cert): - """Params: - - * `loaded_cert` comes from the `loaded_cert` fixture in this file - """ - expiry_date = loaded_cert['expiry_date'] - time_remaining = loaded_cert['time_remaining'] - now = loaded_cert['now'] - assert expiry_date == now + time_remaining diff --git a/roles/openshift_cli/library/openshift_container_binary_sync.py b/roles/openshift_cli/library/openshift_container_binary_sync.py deleted file mode 100644 index 440b8ec28..000000000 --- a/roles/openshift_cli/library/openshift_container_binary_sync.py +++ /dev/null @@ -1,205 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# pylint: disable=missing-docstring,invalid-name - -import random -import tempfile -import shutil -import os.path - -# pylint: disable=redefined-builtin,wildcard-import,unused-wildcard-import -from ansible.module_utils.basic import * # noqa: F403 - - -DOCUMENTATION = ''' ---- -module: openshift_container_binary_sync -short_description: Copies OpenShift binaries out of the given image tag to host system. -''' - - -class BinarySyncError(Exception): - def __init__(self, msg): - super(BinarySyncError, self).__init__(msg) - self.msg = msg - - -# pylint: disable=too-few-public-methods,too-many-instance-attributes -class BinarySyncer(object): - """ - Syncs the openshift, oc, and kubectl binaries/symlinks out of - a container onto the host system. - """ - - def __init__(self, module, image, tag, backend): - self.module = module - self.changed = False - self.output = [] - self.bin_dir = '/usr/local/bin' - self._image = image - self.tag = tag - self.backend = backend - self.temp_dir = None # TBD - - def sync(self): - if self.backend == 'atomic': - return self._sync_atomic() - - return self._sync_docker() - - def _sync_atomic(self): - self.temp_dir = tempfile.mkdtemp() - temp_dir_mount = tempfile.mkdtemp() - try: - image_spec = '%s:%s' % (self.image, self.tag) - rc, stdout, stderr = self.module.run_command(['atomic', 'mount', - '--storage', "ostree", - image_spec, temp_dir_mount]) - if rc: - raise BinarySyncError("Error mounting image. stdout=%s, stderr=%s" % - (stdout, stderr)) - for i in ["openshift", "oc"]: - src_file = os.path.join(temp_dir_mount, "usr/bin", i) - shutil.copy(src_file, self.temp_dir) - - self._sync_binaries() - finally: - self.module.run_command(['atomic', 'umount', temp_dir_mount]) - shutil.rmtree(temp_dir_mount) - shutil.rmtree(self.temp_dir) - - def _sync_docker(self): - container_name = "openshift-cli-%s" % random.randint(1, 100000) - rc, stdout, stderr = self.module.run_command(['docker', 'create', '--name', - container_name, '%s:%s' % (self.image, self.tag)]) - if rc: - raise BinarySyncError("Error creating temporary docker container. stdout=%s, stderr=%s" % - (stdout, stderr)) - self.output.append(stdout) - try: - self.temp_dir = tempfile.mkdtemp() - self.output.append("Using temp dir: %s" % self.temp_dir) - - rc, stdout, stderr = self.module.run_command(['docker', 'cp', "%s:/usr/bin/openshift" % container_name, - self.temp_dir]) - if rc: - raise BinarySyncError("Error copying file from docker container: stdout=%s, stderr=%s" % - (stdout, stderr)) - - rc, stdout, stderr = self.module.run_command(['docker', 'cp', "%s:/usr/bin/oc" % container_name, - self.temp_dir]) - if rc: - raise BinarySyncError("Error copying file from docker container: stdout=%s, stderr=%s" % - (stdout, stderr)) - - self._sync_binaries() - finally: - shutil.rmtree(self.temp_dir) - self.module.run_command(['docker', 'rm', container_name]) - - def _sync_binaries(self): - self._sync_binary('openshift') - - # In older versions, oc was a symlink to openshift: - if os.path.islink(os.path.join(self.temp_dir, 'oc')): - self._sync_symlink('oc', 'openshift') - else: - self._sync_binary('oc') - - # Ensure correct symlinks created: - self._sync_symlink('kubectl', 'openshift') - - # Remove old oadm binary - if os.path.exists(os.path.join(self.bin_dir, 'oadm')): - os.remove(os.path.join(self.bin_dir, 'oadm')) - - def _sync_symlink(self, binary_name, link_to): - """ Ensure the given binary name exists and links to the expected binary. """ - - # The symlink we are creating: - link_path = os.path.join(self.bin_dir, binary_name) - - # The expected file we should be linking to: - link_dest = os.path.join(self.bin_dir, link_to) - - if not os.path.exists(link_path) or \ - not os.path.islink(link_path) or \ - os.path.realpath(link_path) != os.path.realpath(link_dest): - if os.path.exists(link_path): - os.remove(link_path) - os.symlink(link_to, os.path.join(self.bin_dir, binary_name)) - self.output.append("Symlinked %s to %s." % (link_path, link_dest)) - self.changed = True - - def _sync_binary(self, binary_name): - src_path = os.path.join(self.temp_dir, binary_name) - dest_path = os.path.join(self.bin_dir, binary_name) - incoming_checksum = self.module.run_command(['sha256sum', src_path])[1] - if not os.path.exists(dest_path) or self.module.run_command(['sha256sum', dest_path])[1] != incoming_checksum: - - # See: https://github.com/openshift/openshift-ansible/issues/4965 - if os.path.islink(dest_path): - os.unlink(dest_path) - self.output.append('Removed old symlink {} before copying binary.'.format(dest_path)) - shutil.move(src_path, dest_path) - self.output.append("Moved %s to %s." % (src_path, dest_path)) - self.changed = True - - @property - def raw_image(self): - """ - Returns the image as it was originally passed in to the instance. - - .. note:: - This image string will only work directly with the atomic command. - - :returns: The original image passed in. - :rtype: str - """ - return self._image - - @property - def image(self): - """ - Returns the image without atomic prefixes used to map to skopeo args. - - :returns: The image string without prefixes - :rtype: str - """ - image = self._image - for remove in ('oci:', 'http:', 'https:'): - if image.startswith(remove): - image = image.replace(remove, '') - return image - - -def main(): - module = AnsibleModule( # noqa: F405 - argument_spec=dict( - image=dict(required=True), - tag=dict(required=True), - backend=dict(required=True), - ), - supports_check_mode=True - ) - - image = module.params['image'] - tag = module.params['tag'] - backend = module.params['backend'] - - if backend not in ["docker", "atomic"]: - module.fail_json(msg="unknown backend") - - binary_syncer = BinarySyncer(module, image, tag, backend) - - try: - binary_syncer.sync() - except BinarySyncError as ex: - module.fail_json(msg=ex.msg) - - return module.exit_json(changed=binary_syncer.changed, - output=binary_syncer.output) - - -if __name__ == '__main__': - main() diff --git a/roles/openshift_cli/tasks/main.yml b/roles/openshift_cli/tasks/main.yml index 37bed9dbe..ae8d1ace0 100644 --- a/roles/openshift_cli/tasks/main.yml +++ b/roles/openshift_cli/tasks/main.yml @@ -12,6 +12,7 @@ register: pull_result changed_when: "'Downloaded newer image' in pull_result.stdout" + # openshift_container_binary_sync is a custom module in lib_utils - name: Copy client binaries/symlinks out of CLI image for use on the host openshift_container_binary_sync: image: "{{ openshift_cli_image }}" @@ -28,6 +29,7 @@ register: pull_result changed_when: "'Pulling layer' in pull_result.stdout" + # openshift_container_binary_sync is a custom module in lib_utils - name: Copy client binaries/symlinks out of CLI image for use on the host openshift_container_binary_sync: image: "{{ '' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift_cli_image }}" diff --git a/roles/openshift_health_checker/openshift_checks/disk_availability.py b/roles/openshift_health_checker/openshift_checks/disk_availability.py index 87e6146d4..6e30a8610 100644 --- a/roles/openshift_health_checker/openshift_checks/disk_availability.py +++ b/roles/openshift_health_checker/openshift_checks/disk_availability.py @@ -21,7 +21,7 @@ class DiskAvailability(OpenShiftCheck): 'oo_etcd_to_config': 20 * 10**9, }, # Used to copy client binaries into, - # see roles/openshift_cli/library/openshift_container_binary_sync.py. + # see roles/lib_utils/library/openshift_container_binary_sync.py. '/usr/local/bin': { 'oo_masters_to_config': 1 * 10**9, 'oo_nodes_to_config': 1 * 10**9, diff --git a/roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py b/roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py deleted file mode 100644 index 003ce5f9e..000000000 --- a/roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py +++ /dev/null @@ -1,42 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -''' -Custom filters for use in openshift_hosted -''' - - -class FilterModule(object): - ''' Custom ansible filters for use by openshift_hosted role''' - - @staticmethod - def get_router_replicas(replicas=None, router_nodes=None): - ''' This function will return the number of replicas - based on the results from the defined - openshift_hosted_router_replicas OR - the query from oc_obj on openshift nodes with a selector OR - default to 1 - - ''' - # We always use what they've specified if they've specified a value - if replicas is not None: - return replicas - - replicas = 1 - - # Ignore boolean expression limit of 5. - # pylint: disable=too-many-boolean-expressions - if (isinstance(router_nodes, dict) and - 'results' in router_nodes and - 'results' in router_nodes['results'] and - isinstance(router_nodes['results']['results'], list) and - len(router_nodes['results']['results']) > 0 and - 'items' in router_nodes['results']['results'][0]): - - if len(router_nodes['results']['results'][0]['items']) > 0: - replicas = len(router_nodes['results']['results'][0]['items']) - - return replicas - - def filters(self): - ''' returns a mapping of filters to methods ''' - return {'get_router_replicas': self.get_router_replicas} diff --git a/roles/openshift_hosted/tasks/router.yml b/roles/openshift_hosted/tasks/router.yml index 2dc9c98f6..c2be00d19 100644 --- a/roles/openshift_hosted/tasks/router.yml +++ b/roles/openshift_hosted/tasks/router.yml @@ -18,6 +18,7 @@ - name: set_fact replicas set_fact: + # get_router_replicas is a custom filter in role lib_utils replicas: "{{ openshift_hosted_router_replicas | default(None) | get_router_replicas(router_nodes) }}" - name: Get the certificate contents for router diff --git a/roles/openshift_logging/filter_plugins/openshift_logging.py b/roles/openshift_logging/filter_plugins/openshift_logging.py index ba412b5a6..247c7e4df 100644 --- a/roles/openshift_logging/filter_plugins/openshift_logging.py +++ b/roles/openshift_logging/filter_plugins/openshift_logging.py @@ -79,14 +79,6 @@ def entry_from_named_pair(register_pairs, key): raise RuntimeError("There was no entry found in the dict that had an item with a name that matched {}".format(key)) -def map_from_pairs(source, delim="="): - ''' Returns a dict given the source and delim delimited ''' - if source == '': - return dict() - - return dict(item.split(delim) for item in source.split(",")) - - def serviceaccount_name(qualified_sa): ''' Returns the simple name from a fully qualified name ''' return qualified_sa.split(":")[-1] @@ -134,7 +126,6 @@ class FilterModule(object): return { 'random_word': random_word, 'entry_from_named_pair': entry_from_named_pair, - 'map_from_pairs': map_from_pairs, 'min_cpu': min_cpu, 'es_storage': es_storage, 'serviceaccount_name': serviceaccount_name, diff --git a/roles/openshift_logging_fluentd/defaults/main.yml b/roles/openshift_logging_fluentd/defaults/main.yml index 9b58e4456..87b4204b5 100644 --- a/roles/openshift_logging_fluentd/defaults/main.yml +++ b/roles/openshift_logging_fluentd/defaults/main.yml @@ -5,6 +5,7 @@ openshift_logging_fluentd_master_url: "https://kubernetes.default.svc.{{ openshi openshift_logging_fluentd_namespace: logging ### Common settings +# map_from_pairs is a custom filter plugin in role lib_utils openshift_logging_fluentd_nodeselector: "{{ openshift_hosted_logging_fluentd_nodeselector_label | default('logging-infra-fluentd=true') | map_from_pairs }}" openshift_logging_fluentd_cpu_limit: null openshift_logging_fluentd_cpu_request: 100m diff --git a/roles/openshift_logging_mux/defaults/main.yml b/roles/openshift_logging_mux/defaults/main.yml index db6f23126..369ba86b3 100644 --- a/roles/openshift_logging_mux/defaults/main.yml +++ b/roles/openshift_logging_mux/defaults/main.yml @@ -6,6 +6,7 @@ openshift_logging_mux_master_public_url: "{{ openshift_hosted_logging_master_pub openshift_logging_mux_namespace: logging ### Common settings +# map_from_pairs is a custom filter plugin in role lib_utils openshift_logging_mux_nodeselector: "{{ openshift_hosted_logging_mux_nodeselector_label | default('') | map_from_pairs }}" openshift_logging_mux_cpu_limit: null openshift_logging_mux_cpu_request: 100m diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index eea1401b8..b12a6b346 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -181,6 +181,7 @@ - restart master api - set_fact: + # translate_idps is a custom filter in role lib_utils translated_identity_providers: "{{ openshift.master.identity_providers | translate_idps('v1') }}" # TODO: add the validate parameter when there is a validation command to run diff --git a/roles/openshift_master/tasks/upgrade/upgrade_scheduler.yml b/roles/openshift_master/tasks/upgrade/upgrade_scheduler.yml index 8558bf3e9..995a5ab70 100644 --- a/roles/openshift_master/tasks/upgrade/upgrade_scheduler.yml +++ b/roles/openshift_master/tasks/upgrade/upgrade_scheduler.yml @@ -1,6 +1,8 @@ --- # Upgrade predicates - vars: + # openshift_master_facts_default_predicates is a custom lookup plugin in + # role lib_utils prev_predicates: "{{ lookup('openshift_master_facts_default_predicates', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type) }}" prev_predicates_no_region: "{{ lookup('openshift_master_facts_default_predicates', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type, regions_enabled=False) }}" default_predicates_no_region: "{{ lookup('openshift_master_facts_default_predicates', regions_enabled=False) }}" diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml index 649a4bc5d..ce27e238f 100644 --- a/roles/openshift_master_certificates/tasks/main.yml +++ b/roles/openshift_master_certificates/tasks/main.yml @@ -101,6 +101,7 @@ state: hard force: true with_items: + # certificates_to_synchronize is a custom filter in lib_utils - "{{ hostvars[inventory_hostname] | certificates_to_synchronize }}" when: master_certs_missing | bool and inventory_hostname != openshift_ca_host delegate_to: "{{ openshift_ca_host }}" diff --git a/roles/openshift_master_facts/filter_plugins/openshift_master.py b/roles/openshift_master_facts/filter_plugins/openshift_master.py deleted file mode 100644 index ff15f693b..000000000 --- a/roles/openshift_master_facts/filter_plugins/openshift_master.py +++ /dev/null @@ -1,532 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -''' -Custom filters for use in openshift-master -''' -import copy -import sys - -from ansible import errors -from ansible.parsing.yaml.dumper import AnsibleDumper -from ansible.plugins.filter.core import to_bool as ansible_bool - -# ansible.compat.six goes away with Ansible 2.4 -try: - from ansible.compat.six import string_types, u -except ImportError: - from ansible.module_utils.six import string_types, u - -import yaml - - -class IdentityProviderBase(object): - """ IdentityProviderBase - - Attributes: - name (str): Identity provider Name - login (bool): Is this identity provider a login provider? - challenge (bool): Is this identity provider a challenge provider? - provider (dict): Provider specific config - _idp (dict): internal copy of the IDP dict passed in - _required (list): List of lists of strings for required attributes - _optional (list): List of lists of strings for optional attributes - _allow_additional (bool): Does this provider support attributes - not in _required and _optional - - Args: - api_version(str): OpenShift config version - idp (dict): idp config dict - - Raises: - AnsibleFilterError: - """ - # disabling this check since the number of instance attributes are - # necessary for this class - # pylint: disable=too-many-instance-attributes - def __init__(self, api_version, idp): - if api_version not in ['v1']: - raise errors.AnsibleFilterError("|failed api version {0} unknown".format(api_version)) - - self._idp = copy.deepcopy(idp) - - if 'name' not in self._idp: - raise errors.AnsibleFilterError("|failed identity provider missing a name") - - if 'kind' not in self._idp: - raise errors.AnsibleFilterError("|failed identity provider missing a kind") - - self.name = self._idp.pop('name') - self.login = ansible_bool(self._idp.pop('login', False)) - self.challenge = ansible_bool(self._idp.pop('challenge', False)) - self.provider = dict(apiVersion=api_version, kind=self._idp.pop('kind')) - - mm_keys = ('mappingMethod', 'mapping_method') - mapping_method = None - for key in mm_keys: - if key in self._idp: - mapping_method = self._idp.pop(key) - if mapping_method is None: - mapping_method = self.get_default('mappingMethod') - self.mapping_method = mapping_method - - valid_mapping_methods = ['add', 'claim', 'generate', 'lookup'] - if self.mapping_method not in valid_mapping_methods: - raise errors.AnsibleFilterError("|failed unknown mapping method " - "for provider {0}".format(self.__class__.__name__)) - self._required = [] - self._optional = [] - self._allow_additional = True - - @staticmethod - def validate_idp_list(idp_list): - ''' validates a list of idps ''' - names = [x.name for x in idp_list] - if len(set(names)) != len(names): - raise errors.AnsibleFilterError("|failed more than one provider configured with the same name") - - for idp in idp_list: - idp.validate() - - def validate(self): - ''' validate an instance of this idp class ''' - pass - - @staticmethod - def get_default(key): - ''' get a default value for a given key ''' - if key == 'mappingMethod': - return 'claim' - else: - return None - - def set_provider_item(self, items, required=False): - ''' set a provider item based on the list of item names provided. ''' - for item in items: - provider_key = items[0] - if item in self._idp: - self.provider[provider_key] = self._idp.pop(item) - break - else: - default = self.get_default(provider_key) - if default is not None: - self.provider[provider_key] = default - elif required: - raise errors.AnsibleFilterError("|failed provider {0} missing " - "required key {1}".format(self.__class__.__name__, provider_key)) - - def set_provider_items(self): - ''' set the provider items for this idp ''' - for items in self._required: - self.set_provider_item(items, True) - for items in self._optional: - self.set_provider_item(items) - if self._allow_additional: - for key in self._idp.keys(): - self.set_provider_item([key]) - else: - if len(self._idp) > 0: - raise errors.AnsibleFilterError("|failed provider {0} " - "contains unknown keys " - "{1}".format(self.__class__.__name__, ', '.join(self._idp.keys()))) - - def to_dict(self): - ''' translate this idp to a dictionary ''' - return dict(name=self.name, challenge=self.challenge, - login=self.login, mappingMethod=self.mapping_method, - provider=self.provider) - - -class LDAPPasswordIdentityProvider(IdentityProviderBase): - """ LDAPPasswordIdentityProvider - - Attributes: - - Args: - api_version(str): OpenShift config version - idp (dict): idp config dict - - Raises: - AnsibleFilterError: - """ - def __init__(self, api_version, idp): - super(LDAPPasswordIdentityProvider, self).__init__(api_version, idp) - self._allow_additional = False - self._required += [['attributes'], ['url'], ['insecure']] - self._optional += [['ca'], - ['bindDN', 'bind_dn'], - ['bindPassword', 'bind_password']] - - self._idp['insecure'] = ansible_bool(self._idp.pop('insecure', False)) - - if 'attributes' in self._idp and 'preferred_username' in self._idp['attributes']: - pref_user = self._idp['attributes'].pop('preferred_username') - self._idp['attributes']['preferredUsername'] = pref_user - - def validate(self): - ''' validate this idp instance ''' - if not isinstance(self.provider['attributes'], dict): - raise errors.AnsibleFilterError("|failed attributes for provider " - "{0} must be a dictionary".format(self.__class__.__name__)) - - attrs = ['id', 'email', 'name', 'preferredUsername'] - for attr in attrs: - if attr in self.provider['attributes'] and not isinstance(self.provider['attributes'][attr], list): - raise errors.AnsibleFilterError("|failed {0} attribute for " - "provider {1} must be a list".format(attr, self.__class__.__name__)) - - unknown_attrs = set(self.provider['attributes'].keys()) - set(attrs) - if len(unknown_attrs) > 0: - raise errors.AnsibleFilterError("|failed provider {0} has unknown " - "attributes: {1}".format(self.__class__.__name__, ', '.join(unknown_attrs))) - - -class KeystonePasswordIdentityProvider(IdentityProviderBase): - """ KeystoneIdentityProvider - - Attributes: - - Args: - api_version(str): OpenShift config version - idp (dict): idp config dict - - Raises: - AnsibleFilterError: - """ - def __init__(self, api_version, idp): - super(KeystonePasswordIdentityProvider, self).__init__(api_version, idp) - self._allow_additional = False - self._required += [['url'], ['domainName', 'domain_name']] - self._optional += [['ca'], ['certFile', 'cert_file'], ['keyFile', 'key_file']] - - -class RequestHeaderIdentityProvider(IdentityProviderBase): - """ RequestHeaderIdentityProvider - - Attributes: - - Args: - api_version(str): OpenShift config version - idp (dict): idp config dict - - Raises: - AnsibleFilterError: - """ - def __init__(self, api_version, idp): - super(RequestHeaderIdentityProvider, self).__init__(api_version, idp) - self._allow_additional = False - self._required += [['headers']] - self._optional += [['challengeURL', 'challenge_url'], - ['loginURL', 'login_url'], - ['clientCA', 'client_ca'], - ['clientCommonNames', 'client_common_names'], - ['emailHeaders', 'email_headers'], - ['nameHeaders', 'name_headers'], - ['preferredUsernameHeaders', 'preferred_username_headers']] - - def validate(self): - ''' validate this idp instance ''' - if not isinstance(self.provider['headers'], list): - raise errors.AnsibleFilterError("|failed headers for provider {0} " - "must be a list".format(self.__class__.__name__)) - - -class AllowAllPasswordIdentityProvider(IdentityProviderBase): - """ AllowAllPasswordIdentityProvider - - Attributes: - - Args: - api_version(str): OpenShift config version - idp (dict): idp config dict - - Raises: - AnsibleFilterError: - """ - def __init__(self, api_version, idp): - super(AllowAllPasswordIdentityProvider, self).__init__(api_version, idp) - self._allow_additional = False - - -class DenyAllPasswordIdentityProvider(IdentityProviderBase): - """ DenyAllPasswordIdentityProvider - - Attributes: - - Args: - api_version(str): OpenShift config version - idp (dict): idp config dict - - Raises: - AnsibleFilterError: - """ - def __init__(self, api_version, idp): - super(DenyAllPasswordIdentityProvider, self).__init__(api_version, idp) - self._allow_additional = False - - -class HTPasswdPasswordIdentityProvider(IdentityProviderBase): - """ HTPasswdPasswordIdentity - - Attributes: - - Args: - api_version(str): OpenShift config version - idp (dict): idp config dict - - Raises: - AnsibleFilterError: - """ - def __init__(self, api_version, idp): - super(HTPasswdPasswordIdentityProvider, self).__init__(api_version, idp) - self._allow_additional = False - self._required += [['file', 'filename', 'fileName', 'file_name']] - - @staticmethod - def get_default(key): - if key == 'file': - return '/etc/origin/htpasswd' - else: - return IdentityProviderBase.get_default(key) - - -class BasicAuthPasswordIdentityProvider(IdentityProviderBase): - """ BasicAuthPasswordIdentityProvider - - Attributes: - - Args: - api_version(str): OpenShift config version - idp (dict): idp config dict - - Raises: - AnsibleFilterError: - """ - def __init__(self, api_version, idp): - super(BasicAuthPasswordIdentityProvider, self).__init__(api_version, idp) - self._allow_additional = False - self._required += [['url']] - self._optional += [['ca'], ['certFile', 'cert_file'], ['keyFile', 'key_file']] - - -class IdentityProviderOauthBase(IdentityProviderBase): - """ IdentityProviderOauthBase - - Attributes: - - Args: - api_version(str): OpenShift config version - idp (dict): idp config dict - - Raises: - AnsibleFilterError: - """ - def __init__(self, api_version, idp): - super(IdentityProviderOauthBase, self).__init__(api_version, idp) - self._allow_additional = False - self._required += [['clientID', 'client_id'], ['clientSecret', 'client_secret']] - - def validate(self): - ''' validate an instance of this idp class ''' - pass - - -class OpenIDIdentityProvider(IdentityProviderOauthBase): - """ OpenIDIdentityProvider - - Attributes: - - Args: - api_version(str): OpenShift config version - idp (dict): idp config dict - - Raises: - AnsibleFilterError: - """ - def __init__(self, api_version, idp): - IdentityProviderOauthBase.__init__(self, api_version, idp) - self._required += [['claims'], ['urls']] - self._optional += [['ca'], - ['extraScopes'], - ['extraAuthorizeParameters']] - if 'claims' in self._idp and 'preferred_username' in self._idp['claims']: - pref_user = self._idp['claims'].pop('preferred_username') - self._idp['claims']['preferredUsername'] = pref_user - if 'urls' in self._idp and 'user_info' in self._idp['urls']: - user_info = self._idp['urls'].pop('user_info') - self._idp['urls']['userInfo'] = user_info - if 'extra_scopes' in self._idp: - self._idp['extraScopes'] = self._idp.pop('extra_scopes') - if 'extra_authorize_parameters' in self._idp: - self._idp['extraAuthorizeParameters'] = self._idp.pop('extra_authorize_parameters') - - def validate(self): - ''' validate this idp instance ''' - if not isinstance(self.provider['claims'], dict): - raise errors.AnsibleFilterError("|failed claims for provider {0} " - "must be a dictionary".format(self.__class__.__name__)) - - for var, var_type in (('extraScopes', list), ('extraAuthorizeParameters', dict)): - if var in self.provider and not isinstance(self.provider[var], var_type): - raise errors.AnsibleFilterError("|failed {1} for provider " - "{0} must be a {2}".format(self.__class__.__name__, - var, - var_type.__class__.__name__)) - - required_claims = ['id'] - optional_claims = ['email', 'name', 'preferredUsername'] - all_claims = required_claims + optional_claims - - for claim in required_claims: - if claim in required_claims and claim not in self.provider['claims']: - raise errors.AnsibleFilterError("|failed {0} claim missing " - "for provider {1}".format(claim, self.__class__.__name__)) - - for claim in all_claims: - if claim in self.provider['claims'] and not isinstance(self.provider['claims'][claim], list): - raise errors.AnsibleFilterError("|failed {0} claims for " - "provider {1} must be a list".format(claim, self.__class__.__name__)) - - unknown_claims = set(self.provider['claims'].keys()) - set(all_claims) - if len(unknown_claims) > 0: - raise errors.AnsibleFilterError("|failed provider {0} has unknown " - "claims: {1}".format(self.__class__.__name__, ', '.join(unknown_claims))) - - if not isinstance(self.provider['urls'], dict): - raise errors.AnsibleFilterError("|failed urls for provider {0} " - "must be a dictionary".format(self.__class__.__name__)) - - required_urls = ['authorize', 'token'] - optional_urls = ['userInfo'] - all_urls = required_urls + optional_urls - - for url in required_urls: - if url not in self.provider['urls']: - raise errors.AnsibleFilterError("|failed {0} url missing for " - "provider {1}".format(url, self.__class__.__name__)) - - unknown_urls = set(self.provider['urls'].keys()) - set(all_urls) - if len(unknown_urls) > 0: - raise errors.AnsibleFilterError("|failed provider {0} has unknown " - "urls: {1}".format(self.__class__.__name__, ', '.join(unknown_urls))) - - -class GoogleIdentityProvider(IdentityProviderOauthBase): - """ GoogleIdentityProvider - - Attributes: - - Args: - api_version(str): OpenShift config version - idp (dict): idp config dict - - Raises: - AnsibleFilterError: - """ - def __init__(self, api_version, idp): - IdentityProviderOauthBase.__init__(self, api_version, idp) - self._optional += [['hostedDomain', 'hosted_domain']] - - def validate(self): - ''' validate this idp instance ''' - if self.challenge: - raise errors.AnsibleFilterError("|failed provider {0} does not " - "allow challenge authentication".format(self.__class__.__name__)) - - -class GitHubIdentityProvider(IdentityProviderOauthBase): - """ GitHubIdentityProvider - - Attributes: - - Args: - api_version(str): OpenShift config version - idp (dict): idp config dict - - Raises: - AnsibleFilterError: - """ - def __init__(self, api_version, idp): - IdentityProviderOauthBase.__init__(self, api_version, idp) - self._optional += [['organizations'], - ['teams']] - - def validate(self): - ''' validate this idp instance ''' - if self.challenge: - raise errors.AnsibleFilterError("|failed provider {0} does not " - "allow challenge authentication".format(self.__class__.__name__)) - - -class FilterModule(object): - ''' Custom ansible filters for use by the openshift_master role''' - - @staticmethod - def translate_idps(idps, api_version): - ''' Translates a list of dictionaries into a valid identityProviders config ''' - idp_list = [] - - if not isinstance(idps, list): - raise errors.AnsibleFilterError("|failed expects to filter on a list of identity providers") - for idp in idps: - if not isinstance(idp, dict): - raise errors.AnsibleFilterError("|failed identity providers must be a list of dictionaries") - - cur_module = sys.modules[__name__] - idp_class = getattr(cur_module, idp['kind'], None) - idp_inst = idp_class(api_version, idp) if idp_class is not None else IdentityProviderBase(api_version, idp) - idp_inst.set_provider_items() - idp_list.append(idp_inst) - - IdentityProviderBase.validate_idp_list(idp_list) - return u(yaml.dump([idp.to_dict() for idp in idp_list], - allow_unicode=True, - default_flow_style=False, - width=float("inf"), - Dumper=AnsibleDumper)) - - @staticmethod - def certificates_to_synchronize(hostvars, include_keys=True, include_ca=True): - ''' Return certificates to synchronize based on facts. ''' - if not issubclass(type(hostvars), dict): - raise errors.AnsibleFilterError("|failed expects hostvars is a dict") - certs = ['admin.crt', - 'admin.key', - 'admin.kubeconfig', - 'master.kubelet-client.crt', - 'master.kubelet-client.key', - 'master.proxy-client.crt', - 'master.proxy-client.key', - 'service-signer.crt', - 'service-signer.key'] - if bool(include_ca): - certs += ['ca.crt', 'ca.key', 'ca-bundle.crt', 'client-ca-bundle.crt'] - if bool(include_keys): - certs += ['serviceaccounts.private.key', - 'serviceaccounts.public.key'] - return certs - - @staticmethod - def oo_htpasswd_users_from_file(file_contents): - ''' return a dictionary of htpasswd users from htpasswd file contents ''' - htpasswd_entries = {} - if not isinstance(file_contents, string_types): - raise errors.AnsibleFilterError("failed, expects to filter on a string") - for line in file_contents.splitlines(): - user = None - passwd = None - if len(line) == 0: - continue - if ':' in line: - user, passwd = line.split(':', 1) - - if user is None or len(user) == 0 or passwd is None or len(passwd) == 0: - error_msg = "failed, expects each line to be a colon separated string representing the user and passwd" - raise errors.AnsibleFilterError(error_msg) - htpasswd_entries[user] = passwd - return htpasswd_entries - - def filters(self): - ''' returns a mapping of filters to methods ''' - return {"translate_idps": self.translate_idps, - "certificates_to_synchronize": self.certificates_to_synchronize, - "oo_htpasswd_users_from_file": self.oo_htpasswd_users_from_file} diff --git a/roles/openshift_master_facts/tasks/main.yml b/roles/openshift_master_facts/tasks/main.yml index 85d0ac25c..f450c916a 100644 --- a/roles/openshift_master_facts/tasks/main.yml +++ b/roles/openshift_master_facts/tasks/main.yml @@ -57,6 +57,7 @@ access_token_max_seconds: "{{ openshift_master_access_token_max_seconds | default(None) }}" auth_token_max_seconds: "{{ openshift_master_auth_token_max_seconds | default(None) }}" identity_providers: "{{ openshift_master_identity_providers | default(None) }}" + # oo_htpasswd_users_from_file is a custom filter in role lib_utils htpasswd_users: "{{ openshift_master_htpasswd_users | default(lookup('file', openshift_master_htpasswd_file) | oo_htpasswd_users_from_file if openshift_master_htpasswd_file is defined else None) }}" manage_htpasswd: "{{ openshift_master_manage_htpasswd | default(true) }}" ldap_ca: "{{ openshift_master_ldap_ca | default(lookup('file', openshift_master_ldap_ca_file) if openshift_master_ldap_ca_file is defined else None) }}" @@ -90,6 +91,8 @@ - name: Set Default scheduler predicates and priorities set_fact: + # openshift_master_facts_default_predicates is a custom lookup plugin in + # role lib_utils openshift_master_scheduler_default_predicates: "{{ lookup('openshift_master_facts_default_predicates') }}" openshift_master_scheduler_default_priorities: "{{ lookup('openshift_master_facts_default_priorities') }}" diff --git a/roles/openshift_master_facts/test/conftest.py b/roles/openshift_master_facts/test/conftest.py deleted file mode 100644 index 140cced73..000000000 --- a/roles/openshift_master_facts/test/conftest.py +++ /dev/null @@ -1,54 +0,0 @@ -import os -import sys - -import pytest - -sys.path.insert(1, os.path.join(os.path.dirname(__file__), os.pardir, "lookup_plugins")) - -from openshift_master_facts_default_predicates import LookupModule as PredicatesLookupModule # noqa: E402 -from openshift_master_facts_default_priorities import LookupModule as PrioritiesLookupModule # noqa: E402 - - -@pytest.fixture() -def predicates_lookup(): - return PredicatesLookupModule() - - -@pytest.fixture() -def priorities_lookup(): - return PrioritiesLookupModule() - - -@pytest.fixture() -def facts(): - return { - 'openshift': { - 'common': {} - } - } - - -@pytest.fixture(params=[True, False]) -def regions_enabled(request): - return request.param - - -@pytest.fixture(params=[True, False]) -def zones_enabled(request): - return request.param - - -def v_prefix(release): - """Prefix a release number with 'v'.""" - return "v" + release - - -def minor(release): - """Add a suffix to release, making 'X.Y' become 'X.Y.Z'.""" - return release + ".1" - - -@pytest.fixture(params=[str, v_prefix, minor]) -def release_mod(request): - """Modifies a release string to alternative valid values.""" - return request.param diff --git a/roles/openshift_master_facts/test/openshift_master_facts_bad_input_tests.py b/roles/openshift_master_facts/test/openshift_master_facts_bad_input_tests.py deleted file mode 100644 index e8da1e04a..000000000 --- a/roles/openshift_master_facts/test/openshift_master_facts_bad_input_tests.py +++ /dev/null @@ -1,57 +0,0 @@ -import copy -import os -import sys - -from ansible.errors import AnsibleError -import pytest - -sys.path.insert(1, os.path.join(os.path.dirname(__file__), os.pardir, "lookup_plugins")) - -from openshift_master_facts_default_predicates import LookupModule # noqa: E402 - - -class TestOpenShiftMasterFactsBadInput(object): - lookup = LookupModule() - default_facts = { - 'openshift': { - 'common': {} - } - } - - def test_missing_openshift_facts(self): - with pytest.raises(AnsibleError): - facts = {} - self.lookup.run(None, variables=facts) - - def test_missing_deployment_type(self): - with pytest.raises(AnsibleError): - facts = copy.deepcopy(self.default_facts) - facts['openshift']['common']['short_version'] = '10.10' - self.lookup.run(None, variables=facts) - - def test_missing_short_version_and_missing_openshift_release(self): - with pytest.raises(AnsibleError): - facts = copy.deepcopy(self.default_facts) - facts['openshift']['common']['deployment_type'] = 'origin' - self.lookup.run(None, variables=facts) - - def test_unknown_deployment_types(self): - with pytest.raises(AnsibleError): - facts = copy.deepcopy(self.default_facts) - facts['openshift']['common']['short_version'] = '1.1' - facts['openshift']['common']['deployment_type'] = 'bogus' - self.lookup.run(None, variables=facts) - - def test_unknown_origin_version(self): - with pytest.raises(AnsibleError): - facts = copy.deepcopy(self.default_facts) - facts['openshift']['common']['short_version'] = '0.1' - facts['openshift']['common']['deployment_type'] = 'origin' - self.lookup.run(None, variables=facts) - - def test_unknown_ocp_version(self): - with pytest.raises(AnsibleError): - facts = copy.deepcopy(self.default_facts) - facts['openshift']['common']['short_version'] = '0.1' - facts['openshift']['common']['deployment_type'] = 'openshift-enterprise' - self.lookup.run(None, variables=facts) diff --git a/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py b/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py deleted file mode 100644 index 11aad9f03..000000000 --- a/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py +++ /dev/null @@ -1,193 +0,0 @@ -import pytest - - -# Predicates ordered according to OpenShift Origin source: -# origin/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go - -DEFAULT_PREDICATES_1_1 = [ - {'name': 'PodFitsHostPorts'}, - {'name': 'PodFitsResources'}, - {'name': 'NoDiskConflict'}, - {'name': 'MatchNodeSelector'}, -] - -DEFAULT_PREDICATES_1_2 = [ - {'name': 'PodFitsHostPorts'}, - {'name': 'PodFitsResources'}, - {'name': 'NoDiskConflict'}, - {'name': 'NoVolumeZoneConflict'}, - {'name': 'MatchNodeSelector'}, - {'name': 'MaxEBSVolumeCount'}, - {'name': 'MaxGCEPDVolumeCount'} -] - -DEFAULT_PREDICATES_1_3 = [ - {'name': 'NoDiskConflict'}, - {'name': 'NoVolumeZoneConflict'}, - {'name': 'MaxEBSVolumeCount'}, - {'name': 'MaxGCEPDVolumeCount'}, - {'name': 'GeneralPredicates'}, - {'name': 'PodToleratesNodeTaints'}, - {'name': 'CheckNodeMemoryPressure'} -] - -DEFAULT_PREDICATES_1_4 = [ - {'name': 'NoDiskConflict'}, - {'name': 'NoVolumeZoneConflict'}, - {'name': 'MaxEBSVolumeCount'}, - {'name': 'MaxGCEPDVolumeCount'}, - {'name': 'GeneralPredicates'}, - {'name': 'PodToleratesNodeTaints'}, - {'name': 'CheckNodeMemoryPressure'}, - {'name': 'CheckNodeDiskPressure'}, - {'name': 'MatchInterPodAffinity'} -] - -DEFAULT_PREDICATES_1_5 = [ - {'name': 'NoVolumeZoneConflict'}, - {'name': 'MaxEBSVolumeCount'}, - {'name': 'MaxGCEPDVolumeCount'}, - {'name': 'MatchInterPodAffinity'}, - {'name': 'NoDiskConflict'}, - {'name': 'GeneralPredicates'}, - {'name': 'PodToleratesNodeTaints'}, - {'name': 'CheckNodeMemoryPressure'}, - {'name': 'CheckNodeDiskPressure'}, -] - -DEFAULT_PREDICATES_3_6 = DEFAULT_PREDICATES_1_5 - -DEFAULT_PREDICATES_3_7 = [ - {'name': 'NoVolumeZoneConflict'}, - {'name': 'MaxEBSVolumeCount'}, - {'name': 'MaxGCEPDVolumeCount'}, - {'name': 'MaxAzureDiskVolumeCount'}, - {'name': 'MatchInterPodAffinity'}, - {'name': 'NoDiskConflict'}, - {'name': 'GeneralPredicates'}, - {'name': 'PodToleratesNodeTaints'}, - {'name': 'CheckNodeMemoryPressure'}, - {'name': 'CheckNodeDiskPressure'}, - {'name': 'NoVolumeNodeConflict'}, -] - -DEFAULT_PREDICATES_3_9 = DEFAULT_PREDICATES_3_8 = DEFAULT_PREDICATES_3_7 - -REGION_PREDICATE = { - 'name': 'Region', - 'argument': { - 'serviceAffinity': { - 'labels': ['region'] - } - } -} - -TEST_VARS = [ - ('1.1', 'origin', DEFAULT_PREDICATES_1_1), - ('3.1', 'openshift-enterprise', DEFAULT_PREDICATES_1_1), - ('1.2', 'origin', DEFAULT_PREDICATES_1_2), - ('3.2', 'openshift-enterprise', DEFAULT_PREDICATES_1_2), - ('1.3', 'origin', DEFAULT_PREDICATES_1_3), - ('3.3', 'openshift-enterprise', DEFAULT_PREDICATES_1_3), - ('1.4', 'origin', DEFAULT_PREDICATES_1_4), - ('3.4', 'openshift-enterprise', DEFAULT_PREDICATES_1_4), - ('1.5', 'origin', DEFAULT_PREDICATES_1_5), - ('3.5', 'openshift-enterprise', DEFAULT_PREDICATES_1_5), - ('3.6', 'origin', DEFAULT_PREDICATES_3_6), - ('3.6', 'openshift-enterprise', DEFAULT_PREDICATES_3_6), - ('3.7', 'origin', DEFAULT_PREDICATES_3_7), - ('3.7', 'openshift-enterprise', DEFAULT_PREDICATES_3_7), - ('3.8', 'origin', DEFAULT_PREDICATES_3_8), - ('3.8', 'openshift-enterprise', DEFAULT_PREDICATES_3_8), - ('3.9', 'origin', DEFAULT_PREDICATES_3_9), - ('3.9', 'openshift-enterprise', DEFAULT_PREDICATES_3_9), -] - - -def assert_ok(predicates_lookup, default_predicates, regions_enabled, **kwargs): - results = predicates_lookup.run(None, regions_enabled=regions_enabled, **kwargs) - if regions_enabled: - assert results == default_predicates + [REGION_PREDICATE] - else: - assert results == default_predicates - - -def test_openshift_version(predicates_lookup, openshift_version_fixture, regions_enabled): - facts, default_predicates = openshift_version_fixture - assert_ok(predicates_lookup, default_predicates, variables=facts, regions_enabled=regions_enabled) - - -@pytest.fixture(params=TEST_VARS) -def openshift_version_fixture(request, facts): - version, deployment_type, default_predicates = request.param - version += '.1' - facts['openshift_version'] = version - facts['openshift']['common']['deployment_type'] = deployment_type - return facts, default_predicates - - -def test_openshift_release(predicates_lookup, openshift_release_fixture, regions_enabled): - facts, default_predicates = openshift_release_fixture - assert_ok(predicates_lookup, default_predicates, variables=facts, regions_enabled=regions_enabled) - - -@pytest.fixture(params=TEST_VARS) -def openshift_release_fixture(request, facts, release_mod): - release, deployment_type, default_predicates = request.param - facts['openshift_release'] = release_mod(release) - facts['openshift']['common']['deployment_type'] = deployment_type - return facts, default_predicates - - -def test_short_version(predicates_lookup, short_version_fixture, regions_enabled): - facts, default_predicates = short_version_fixture - assert_ok(predicates_lookup, default_predicates, variables=facts, regions_enabled=regions_enabled) - - -@pytest.fixture(params=TEST_VARS) -def short_version_fixture(request, facts): - short_version, deployment_type, default_predicates = request.param - facts['openshift']['common']['short_version'] = short_version - facts['openshift']['common']['deployment_type'] = deployment_type - return facts, default_predicates - - -def test_short_version_kwarg(predicates_lookup, short_version_kwarg_fixture, regions_enabled): - facts, short_version, default_predicates = short_version_kwarg_fixture - assert_ok( - predicates_lookup, default_predicates, variables=facts, - regions_enabled=regions_enabled, short_version=short_version) - - -@pytest.fixture(params=TEST_VARS) -def short_version_kwarg_fixture(request, facts): - short_version, deployment_type, default_predicates = request.param - facts['openshift']['common']['deployment_type'] = deployment_type - return facts, short_version, default_predicates - - -def test_deployment_type_kwarg(predicates_lookup, deployment_type_kwarg_fixture, regions_enabled): - facts, deployment_type, default_predicates = deployment_type_kwarg_fixture - assert_ok( - predicates_lookup, default_predicates, variables=facts, - regions_enabled=regions_enabled, deployment_type=deployment_type) - - -@pytest.fixture(params=TEST_VARS) -def deployment_type_kwarg_fixture(request, facts): - short_version, deployment_type, default_predicates = request.param - facts['openshift']['common']['short_version'] = short_version - return facts, deployment_type, default_predicates - - -def test_short_version_deployment_type_kwargs( - predicates_lookup, short_version_deployment_type_kwargs_fixture, regions_enabled): - short_version, deployment_type, default_predicates = short_version_deployment_type_kwargs_fixture - assert_ok( - predicates_lookup, default_predicates, regions_enabled=regions_enabled, - short_version=short_version, deployment_type=deployment_type) - - -@pytest.fixture(params=TEST_VARS) -def short_version_deployment_type_kwargs_fixture(request): - return request.param diff --git a/roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py b/roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py deleted file mode 100644 index 527fc9ff4..000000000 --- a/roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py +++ /dev/null @@ -1,167 +0,0 @@ -import pytest - - -DEFAULT_PRIORITIES_1_1 = [ - {'name': 'LeastRequestedPriority', 'weight': 1}, - {'name': 'BalancedResourceAllocation', 'weight': 1}, - {'name': 'SelectorSpreadPriority', 'weight': 1} -] - -DEFAULT_PRIORITIES_1_2 = [ - {'name': 'LeastRequestedPriority', 'weight': 1}, - {'name': 'BalancedResourceAllocation', 'weight': 1}, - {'name': 'SelectorSpreadPriority', 'weight': 1}, - {'name': 'NodeAffinityPriority', 'weight': 1} -] - -DEFAULT_PRIORITIES_1_3 = [ - {'name': 'LeastRequestedPriority', 'weight': 1}, - {'name': 'BalancedResourceAllocation', 'weight': 1}, - {'name': 'SelectorSpreadPriority', 'weight': 1}, - {'name': 'NodeAffinityPriority', 'weight': 1}, - {'name': 'TaintTolerationPriority', 'weight': 1} -] - -DEFAULT_PRIORITIES_1_4 = [ - {'name': 'LeastRequestedPriority', 'weight': 1}, - {'name': 'BalancedResourceAllocation', 'weight': 1}, - {'name': 'SelectorSpreadPriority', 'weight': 1}, - {'name': 'NodePreferAvoidPodsPriority', 'weight': 10000}, - {'name': 'NodeAffinityPriority', 'weight': 1}, - {'name': 'TaintTolerationPriority', 'weight': 1}, - {'name': 'InterPodAffinityPriority', 'weight': 1} -] - -DEFAULT_PRIORITIES_1_5 = [ - {'name': 'SelectorSpreadPriority', 'weight': 1}, - {'name': 'InterPodAffinityPriority', 'weight': 1}, - {'name': 'LeastRequestedPriority', 'weight': 1}, - {'name': 'BalancedResourceAllocation', 'weight': 1}, - {'name': 'NodePreferAvoidPodsPriority', 'weight': 10000}, - {'name': 'NodeAffinityPriority', 'weight': 1}, - {'name': 'TaintTolerationPriority', 'weight': 1} -] - -DEFAULT_PRIORITIES_3_6 = DEFAULT_PRIORITIES_1_5 - -DEFAULT_PRIORITIES_3_9 = DEFAULT_PRIORITIES_3_8 = DEFAULT_PRIORITIES_3_7 = DEFAULT_PRIORITIES_3_6 - -ZONE_PRIORITY = { - 'name': 'Zone', - 'argument': { - 'serviceAntiAffinity': { - 'label': 'zone' - } - }, - 'weight': 2 -} - -TEST_VARS = [ - ('1.1', 'origin', DEFAULT_PRIORITIES_1_1), - ('3.1', 'openshift-enterprise', DEFAULT_PRIORITIES_1_1), - ('1.2', 'origin', DEFAULT_PRIORITIES_1_2), - ('3.2', 'openshift-enterprise', DEFAULT_PRIORITIES_1_2), - ('1.3', 'origin', DEFAULT_PRIORITIES_1_3), - ('3.3', 'openshift-enterprise', DEFAULT_PRIORITIES_1_3), - ('1.4', 'origin', DEFAULT_PRIORITIES_1_4), - ('3.4', 'openshift-enterprise', DEFAULT_PRIORITIES_1_4), - ('1.5', 'origin', DEFAULT_PRIORITIES_1_5), - ('3.5', 'openshift-enterprise', DEFAULT_PRIORITIES_1_5), - ('3.6', 'origin', DEFAULT_PRIORITIES_3_6), - ('3.6', 'openshift-enterprise', DEFAULT_PRIORITIES_3_6), - ('3.7', 'origin', DEFAULT_PRIORITIES_3_7), - ('3.7', 'openshift-enterprise', DEFAULT_PRIORITIES_3_7), - ('3.8', 'origin', DEFAULT_PRIORITIES_3_8), - ('3.8', 'openshift-enterprise', DEFAULT_PRIORITIES_3_8), - ('3.9', 'origin', DEFAULT_PRIORITIES_3_9), - ('3.9', 'openshift-enterprise', DEFAULT_PRIORITIES_3_9), -] - - -def assert_ok(priorities_lookup, default_priorities, zones_enabled, **kwargs): - results = priorities_lookup.run(None, zones_enabled=zones_enabled, **kwargs) - if zones_enabled: - assert results == default_priorities + [ZONE_PRIORITY] - else: - assert results == default_priorities - - -def test_openshift_version(priorities_lookup, openshift_version_fixture, zones_enabled): - facts, default_priorities = openshift_version_fixture - assert_ok(priorities_lookup, default_priorities, variables=facts, zones_enabled=zones_enabled) - - -@pytest.fixture(params=TEST_VARS) -def openshift_version_fixture(request, facts): - version, deployment_type, default_priorities = request.param - version += '.1' - facts['openshift_version'] = version - facts['openshift']['common']['deployment_type'] = deployment_type - return facts, default_priorities - - -def test_openshift_release(priorities_lookup, openshift_release_fixture, zones_enabled): - facts, default_priorities = openshift_release_fixture - assert_ok(priorities_lookup, default_priorities, variables=facts, zones_enabled=zones_enabled) - - -@pytest.fixture(params=TEST_VARS) -def openshift_release_fixture(request, facts, release_mod): - release, deployment_type, default_priorities = request.param - facts['openshift_release'] = release_mod(release) - facts['openshift']['common']['deployment_type'] = deployment_type - return facts, default_priorities - - -def test_short_version(priorities_lookup, short_version_fixture, zones_enabled): - facts, default_priorities = short_version_fixture - assert_ok(priorities_lookup, default_priorities, variables=facts, zones_enabled=zones_enabled) - - -@pytest.fixture(params=TEST_VARS) -def short_version_fixture(request, facts): - short_version, deployment_type, default_priorities = request.param - facts['openshift']['common']['short_version'] = short_version - facts['openshift']['common']['deployment_type'] = deployment_type - return facts, default_priorities - - -def test_short_version_kwarg(priorities_lookup, short_version_kwarg_fixture, zones_enabled): - facts, short_version, default_priorities = short_version_kwarg_fixture - assert_ok( - priorities_lookup, default_priorities, variables=facts, - zones_enabled=zones_enabled, short_version=short_version) - - -@pytest.fixture(params=TEST_VARS) -def short_version_kwarg_fixture(request, facts): - short_version, deployment_type, default_priorities = request.param - facts['openshift']['common']['deployment_type'] = deployment_type - return facts, short_version, default_priorities - - -def test_deployment_type_kwarg(priorities_lookup, deployment_type_kwarg_fixture, zones_enabled): - facts, deployment_type, default_priorities = deployment_type_kwarg_fixture - assert_ok( - priorities_lookup, default_priorities, variables=facts, - zones_enabled=zones_enabled, deployment_type=deployment_type) - - -@pytest.fixture(params=TEST_VARS) -def deployment_type_kwarg_fixture(request, facts): - short_version, deployment_type, default_priorities = request.param - facts['openshift']['common']['short_version'] = short_version - return facts, deployment_type, default_priorities - - -def test_short_version_deployment_type_kwargs( - priorities_lookup, short_version_deployment_type_kwargs_fixture, zones_enabled): - short_version, deployment_type, default_priorities = short_version_deployment_type_kwargs_fixture - assert_ok( - priorities_lookup, default_priorities, zones_enabled=zones_enabled, - short_version=short_version, deployment_type=deployment_type) - - -@pytest.fixture(params=TEST_VARS) -def short_version_deployment_type_kwargs_fixture(request): - return request.param diff --git a/roles/openshift_named_certificates/filter_plugins/openshift_named_certificates.py b/roles/openshift_named_certificates/filter_plugins/openshift_named_certificates.py deleted file mode 100644 index 6ed6d404c..000000000 --- a/roles/openshift_named_certificates/filter_plugins/openshift_named_certificates.py +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -''' -Custom filters for use with openshift named certificates -''' - - -class FilterModule(object): - ''' Custom ansible filters for use with openshift named certificates''' - - @staticmethod - def oo_named_certificates_list(named_certificates): - ''' Returns named certificates list with correct fields for the master - config file.''' - return [{'certFile': named_certificate['certfile'], - 'keyFile': named_certificate['keyfile'], - 'names': named_certificate['names']} for named_certificate in named_certificates] - - def filters(self): - ''' returns a mapping of filters to methods ''' - return {"oo_named_certificates_list": self.oo_named_certificates_list} diff --git a/roles/openshift_persistent_volumes/action_plugins/generate_pv_pvcs_list.py b/roles/openshift_persistent_volumes/action_plugins/generate_pv_pvcs_list.py deleted file mode 100644 index eb13a58ba..000000000 --- a/roles/openshift_persistent_volumes/action_plugins/generate_pv_pvcs_list.py +++ /dev/null @@ -1,157 +0,0 @@ -""" -Ansible action plugin to generate pv and pvc dictionaries lists -""" - -from ansible.plugins.action import ActionBase -from ansible import errors - - -class ActionModule(ActionBase): - """Action plugin to execute health checks.""" - - def get_templated(self, var_to_template): - """Return a properly templated ansible variable""" - return self._templar.template(self.task_vars.get(var_to_template)) - - def build_common(self, varname=None): - """Retrieve common variables for each pv and pvc type""" - volume = self.get_templated(str(varname) + '_volume_name') - size = self.get_templated(str(varname) + '_volume_size') - labels = self.task_vars.get(str(varname) + '_labels') - if labels: - labels = self._templar.template(labels) - else: - labels = dict() - access_modes = self.get_templated(str(varname) + '_access_modes') - return (volume, size, labels, access_modes) - - def build_pv_nfs(self, varname=None): - """Build pv dictionary for nfs storage type""" - host = self.task_vars.get(str(varname) + '_host') - if host: - self._templar.template(host) - elif host is None: - groups = self.task_vars.get('groups') - default_group_name = self.get_templated('openshift_persistent_volumes_default_nfs_group') - if groups and default_group_name and default_group_name in groups and len(groups[default_group_name]) > 0: - host = groups['oo_nfs_to_config'][0] - else: - raise errors.AnsibleModuleError("|failed no storage host detected") - volume, size, labels, access_modes = self.build_common(varname=varname) - directory = self.get_templated(str(varname) + '_nfs_directory') - path = directory + '/' + volume - return dict( - name="{0}-volume".format(volume), - capacity=size, - labels=labels, - access_modes=access_modes, - storage=dict( - nfs=dict( - server=host, - path=path))) - - def build_pv_openstack(self, varname=None): - """Build pv dictionary for openstack storage type""" - volume, size, labels, access_modes = self.build_common(varname=varname) - filesystem = self.get_templated(str(varname) + '_openstack_filesystem') - volume_id = self.get_templated(str(varname) + '_openstack_volumeID') - return dict( - name="{0}-volume".format(volume), - capacity=size, - labels=labels, - access_modes=access_modes, - storage=dict( - cinder=dict( - fsType=filesystem, - volumeID=volume_id))) - - def build_pv_glusterfs(self, varname=None): - """Build pv dictionary for glusterfs storage type""" - volume, size, labels, access_modes = self.build_common(varname=varname) - endpoints = self.get_templated(str(varname) + '_glusterfs_endpoints') - path = self.get_templated(str(varname) + '_glusterfs_path') - read_only = self.get_templated(str(varname) + '_glusterfs_readOnly') - return dict( - name="{0}-volume".format(volume), - capacity=size, - labels=labels, - access_modes=access_modes, - storage=dict( - glusterfs=dict( - endpoints=endpoints, - path=path, - readOnly=read_only))) - - def build_pv_dict(self, varname=None): - """Check for the existence of PV variables""" - kind = self.task_vars.get(str(varname) + '_kind') - if kind: - kind = self._templar.template(kind) - create_pv = self.task_vars.get(str(varname) + '_create_pv') - if create_pv and self._templar.template(create_pv): - if kind == 'nfs': - return self.build_pv_nfs(varname=varname) - - elif kind == 'openstack': - return self.build_pv_openstack(varname=varname) - - elif kind == 'glusterfs': - return self.build_pv_glusterfs(varname=varname) - - elif not (kind == 'object' or kind == 'dynamic'): - msg = "|failed invalid storage kind '{0}' for component '{1}'".format( - kind, - varname) - raise errors.AnsibleModuleError(msg) - return None - - def build_pvc_dict(self, varname=None): - """Check for the existence of PVC variables""" - kind = self.task_vars.get(str(varname) + '_kind') - if kind: - kind = self._templar.template(kind) - create_pv = self.task_vars.get(str(varname) + '_create_pv') - if create_pv: - create_pv = self._templar.template(create_pv) - create_pvc = self.task_vars.get(str(varname) + '_create_pvc') - if create_pvc: - create_pvc = self._templar.template(create_pvc) - if kind != 'object' and create_pv and create_pvc: - volume, size, _, access_modes = self.build_common(varname=varname) - return dict( - name="{0}-claim".format(volume), - capacity=size, - access_modes=access_modes) - return None - - def run(self, tmp=None, task_vars=None): - """Run generate_pv_pvcs_list action plugin""" - result = super(ActionModule, self).run(tmp, task_vars) - # Ignore settting self.task_vars outside of init. - # pylint: disable=W0201 - self.task_vars = task_vars or {} - - result["changed"] = False - result["failed"] = False - result["msg"] = "persistent_volumes list and persistent_volume_claims list created" - vars_to_check = ['openshift_hosted_registry_storage', - 'openshift_hosted_router_storage', - 'openshift_hosted_etcd_storage', - 'openshift_logging_storage', - 'openshift_loggingops_storage', - 'openshift_metrics_storage', - 'openshift_prometheus_storage', - 'openshift_prometheus_alertmanager_storage', - 'openshift_prometheus_alertbuffer_storage'] - persistent_volumes = [] - persistent_volume_claims = [] - for varname in vars_to_check: - pv_dict = self.build_pv_dict(varname) - if pv_dict: - persistent_volumes.append(pv_dict) - pvc_dict = self.build_pvc_dict(varname) - if pvc_dict: - persistent_volume_claims.append(pvc_dict) - result["persistent_volumes"] = persistent_volumes - result["persistent_volume_claims"] = persistent_volume_claims - return result diff --git a/roles/openshift_persistent_volumes/tasks/main.yml b/roles/openshift_persistent_volumes/tasks/main.yml index 0b4dd7d1f..b1d9c8cca 100644 --- a/roles/openshift_persistent_volumes/tasks/main.yml +++ b/roles/openshift_persistent_volumes/tasks/main.yml @@ -26,7 +26,8 @@ when: openshift_hosted_registry_storage_glusterfs_swap | default(False) - name: create standard pv and pvc lists - # generate_pv_pvcs_list is a custom action module defined in ../action_plugins + # generate_pv_pvcs_list is a custom action module defined in + # roles/lib_utils/action_plugins/generate_pv_pvcs_list.py generate_pv_pvcs_list: {} register: l_pv_pvcs_list diff --git a/roles/openshift_sanitize_inventory/filter_plugins/openshift_sanitize_inventory.py b/roles/openshift_sanitize_inventory/filter_plugins/openshift_sanitize_inventory.py index 72c47b8ee..14f1f72c2 100644 --- a/roles/openshift_sanitize_inventory/filter_plugins/openshift_sanitize_inventory.py +++ b/roles/openshift_sanitize_inventory/filter_plugins/openshift_sanitize_inventory.py @@ -6,15 +6,6 @@ import re -# This should be removed after map_from_pairs is no longer used in __deprecations_logging.yml -def map_from_pairs(source, delim="="): - ''' Returns a dict given the source and delim delimited ''' - if source == '': - return dict() - - return dict(item.split(delim) for item in source.split(",")) - - def vars_with_pattern(source, pattern=""): ''' Returns a list of variables whose name matches the given pattern ''' if source == '': @@ -39,6 +30,5 @@ class FilterModule(object): def filters(self): ''' Returns the names of the filters provided by this class ''' return { - 'map_from_pairs': map_from_pairs, 'vars_with_pattern': vars_with_pattern } diff --git a/roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py b/roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py deleted file mode 100644 index a86c96df7..000000000 --- a/roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py +++ /dev/null @@ -1,23 +0,0 @@ -''' - Openshift Storage GlusterFS class that provides useful filters used in GlusterFS -''' - - -def map_from_pairs(source, delim="="): - ''' Returns a dict given the source and delim delimited ''' - if source == '': - return dict() - - return dict(item.split(delim) for item in source.split(",")) - - -# pylint: disable=too-few-public-methods -class FilterModule(object): - ''' OpenShift Storage GlusterFS Filters ''' - - # pylint: disable=no-self-use, too-few-public-methods - def filters(self): - ''' Returns the names of the filters provided by this class ''' - return { - 'map_from_pairs': map_from_pairs - } diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml index 2ea7286f3..a374df0ce 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml @@ -4,6 +4,7 @@ glusterfs_namespace: "{{ openshift_storage_glusterfs_namespace }}" glusterfs_is_native: "{{ openshift_storage_glusterfs_is_native | bool }}" glusterfs_name: "{{ openshift_storage_glusterfs_name }}" + # map_from_pairs is a custom filter plugin in role lib_utils glusterfs_nodeselector: "{{ openshift_storage_glusterfs_nodeselector | default(['storagenode', openshift_storage_glusterfs_name] | join('=')) | map_from_pairs }}" glusterfs_use_default_selector: "{{ openshift_storage_glusterfs_use_default_selector }}" glusterfs_storageclass: "{{ openshift_storage_glusterfs_storageclass }}" diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml index b7cff6514..544a6f491 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml @@ -4,6 +4,7 @@ glusterfs_namespace: "{{ openshift_storage_glusterfs_registry_namespace }}" glusterfs_is_native: "{{ openshift_storage_glusterfs_registry_is_native | bool }}" glusterfs_name: "{{ openshift_storage_glusterfs_registry_name }}" + # map_from_pairs is a custom filter plugin in role lib_utils glusterfs_nodeselector: "{{ openshift_storage_glusterfs_registry_nodeselector | default(['storagenode', openshift_storage_glusterfs_registry_name] | join('=')) | map_from_pairs }}" glusterfs_use_default_selector: "{{ openshift_storage_glusterfs_registry_use_default_selector }}" glusterfs_storageclass: "{{ openshift_storage_glusterfs_registry_storageclass }}" -- cgit v1.2.3 From edc4a03f14bc2e501a38c4faef229d21fb26a162 Mon Sep 17 00:00:00 2001 From: Vadim Rutkovsky Date: Wed, 17 Jan 2018 12:43:10 +0100 Subject: Check rc for commands with openshift_client_binary and failed_when This might mask some failures and continue instead of a failure --- roles/calico_master/tasks/main.yml | 2 +- roles/openshift_metrics/tasks/oc_apply.yaml | 8 ++++++-- roles/openshift_persistent_volumes/tasks/pv.yml | 2 +- roles/openshift_persistent_volumes/tasks/pvc.yml | 2 +- roles/openshift_provisioners/tasks/oc_apply.yaml | 12 +++++++++--- 5 files changed, 18 insertions(+), 8 deletions(-) (limited to 'roles/openshift_persistent_volumes') diff --git a/roles/calico_master/tasks/main.yml b/roles/calico_master/tasks/main.yml index 05415a4d6..834ebba64 100644 --- a/roles/calico_master/tasks/main.yml +++ b/roles/calico_master/tasks/main.yml @@ -23,7 +23,7 @@ -f {{ mktemp.stdout }}/calico-policy-controller.yml --config={{ openshift.common.config_base }}/master/admin.kubeconfig register: calico_create_output - failed_when: ('already exists' not in calico_create_output.stderr) and ('created' not in calico_create_output.stdout) + failed_when: "('already exists' not in calico_create_output.stderr) and ('created' not in calico_create_output.stdout) and calico_create_output.rc != 0" changed_when: ('created' in calico_create_output.stdout) - name: Calico Master | Delete temp directory diff --git a/roles/openshift_metrics/tasks/oc_apply.yaml b/roles/openshift_metrics/tasks/oc_apply.yaml index 8ccfb7192..057963c1a 100644 --- a/roles/openshift_metrics/tasks/oc_apply.yaml +++ b/roles/openshift_metrics/tasks/oc_apply.yaml @@ -16,7 +16,9 @@ apply -f {{ file_name }} -n {{namespace}} register: generation_apply - failed_when: "'error' in generation_apply.stderr" + failed_when: + - "'error' in generation_apply.stderr" + - "generation_apply.rc != 0" changed_when: no - name: Determine change status of {{file_content.kind}} {{file_content.metadata.name}} @@ -28,5 +30,7 @@ register: version_changed vars: init_version: "{{ (generation_init is defined) | ternary(generation_init.stdout, '0') }}" - failed_when: "'error' in version_changed.stderr" + failed_when: + - "'error' in version_changed.stderr" + - "version_changed.rc != 0" changed_when: version_changed.stdout | int > init_version | int diff --git a/roles/openshift_persistent_volumes/tasks/pv.yml b/roles/openshift_persistent_volumes/tasks/pv.yml index ef9ab7f5f..865269b7a 100644 --- a/roles/openshift_persistent_volumes/tasks/pv.yml +++ b/roles/openshift_persistent_volumes/tasks/pv.yml @@ -13,5 +13,5 @@ --config={{ mktemp.stdout }}/admin.kubeconfig register: pv_create_output when: persistent_volumes | length > 0 - failed_when: ('already exists' not in pv_create_output.stderr) and ('created' not in pv_create_output.stdout) + failed_when: "('already exists' not in pv_create_output.stderr) and ('created' not in pv_create_output.stdout) and pv_create_output.rc != 0" changed_when: ('created' in pv_create_output.stdout) diff --git a/roles/openshift_persistent_volumes/tasks/pvc.yml b/roles/openshift_persistent_volumes/tasks/pvc.yml index 2c5519192..6c12d128c 100644 --- a/roles/openshift_persistent_volumes/tasks/pvc.yml +++ b/roles/openshift_persistent_volumes/tasks/pvc.yml @@ -13,5 +13,5 @@ --config={{ mktemp.stdout }}/admin.kubeconfig register: pvc_create_output when: persistent_volume_claims | length > 0 - failed_when: ('already exists' not in pvc_create_output.stderr) and ('created' not in pvc_create_output.stdout) + failed_when: "('already exists' not in pvc_create_output.stderr) and ('created' not in pvc_create_output.stdout) and pvc_create_output.rc != 0" changed_when: ('created' in pvc_create_output.stdout) diff --git a/roles/openshift_provisioners/tasks/oc_apply.yaml b/roles/openshift_provisioners/tasks/oc_apply.yaml index a4ce53eae..239e1f1cc 100644 --- a/roles/openshift_provisioners/tasks/oc_apply.yaml +++ b/roles/openshift_provisioners/tasks/oc_apply.yaml @@ -15,7 +15,9 @@ apply -f {{ file_name }} -n {{ namespace }} register: generation_apply - failed_when: "'error' in generation_apply.stderr" + failed_when: + - "'error' in generation_apply.stderr" + - "generation_apply.rc != 0" changed_when: no - name: Determine change status of {{file_content.kind}} {{file_content.metadata.name}} @@ -36,7 +38,9 @@ delete -f {{ file_name }} -n {{ namespace }} register: generation_delete - failed_when: "'error' in generation_delete.stderr" + failed_when: + - "'error' in generation_delete.stderr" + - "generation_delete.rc != 0" changed_when: generation_delete.rc == 0 when: generation_apply.rc != 0 @@ -46,6 +50,8 @@ apply -f {{ file_name }} -n {{ namespace }} register: generation_apply - failed_when: "'error' in generation_apply.stderr" + failed_when: + - "'error' in generation_apply.stderr" + - "generation_apply.rc != 0" changed_when: generation_apply.rc == 0 when: generation_apply.rc != 0 -- cgit v1.2.3