diff options
26 files changed, 281 insertions, 202 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible index ec25643e6..e53f089d5 100644 --- a/.tito/packages/openshift-ansible +++ b/.tito/packages/openshift-ansible @@ -1 +1 @@ -3.3.11-1 ./ +3.3.13-1 ./ diff --git a/README_openstack.md b/README_openstack.md index e3cc7cc93..7a6b24145 100644 --- a/README_openstack.md +++ b/README_openstack.md @@ -43,6 +43,7 @@ The following options are used only by `heat_stack.yaml`. They are so used only * `external_net` (default to `external`): Name of the external network to connect to * `floating_ip_pool` (default to `external`): comma separated list of floating IP pools * `ssh_from` (default to `0.0.0.0/0`): IPs authorized to connect to the VMs via ssh +* `node_port_from` (default to `0.0.0.0/0`): IPs authorized to connect to the services exposed via nodePort Creating a cluster diff --git a/openshift-ansible.spec b/openshift-ansible.spec index 11f18cf72..0413a73d4 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -5,7 +5,7 @@ } Name: openshift-ansible -Version: 3.3.11 +Version: 3.3.13 Release: 1%{?dist} Summary: Openshift and Atomic Enterprise Ansible License: ASL 2.0 @@ -221,6 +221,33 @@ Atomic OpenShift Utilities includes %changelog +* Fri Aug 19 2016 Troy Dawson <tdawson@redhat.com> 3.3.13-1 +- Fix warnings in OpenStack provider with ansible 2.1 (lhuard@amadeus.com) +- Mount /sys rw (sdodson@redhat.com) +- Update uninstall.yml (sdodson@redhat.com) +- Fix padding on registry config (sdodson@redhat.com) + +* Wed Aug 17 2016 Troy Dawson <tdawson@redhat.com> 3.3.12-1 +- Fixes to typos, grammar, and product branding in cli_installer + (tpoitras@redhat.com) +- Reconcile roles after master upgrade, but before nodes. (dgoodwin@redhat.com) +- a-o-i: Fix nosetests after removing 3.2 from installer (smunilla@redhat.com) +- Bug 1367323 - the "OpenShift Container Platform 3.2" variant is still listed + when quick install ose-3.3 (smunilla@redhat.com) +- Bug 1367199 - iptablesSyncPeriod should default to 30s OOTB + (smunilla@redhat.com) +- Sync remaining content (sdodson@redhat.com) +- XPaas 1.3.3 (sdodson@redhat.com) +- a-o-i: Fix broken tests from installed hosts check (smunilla@redhat.com) +- Add clientCommonNames to RequestHeaderProvider optional items + (sdodson@redhat.com) +- a-o-i: Mapping for 3.2 Upgrades (smunilla@redhat.com) +- a-o-i: fix bz#1329455 (ghuang@redhat.com) +- Add nfs group to OSEv3:vars (sdodson@redhat.com) +- fixing openshift key error in case of node failure during run (ssh issue) + (jawed.khelil@amadeus.com) +- add 3.3 to installer (rmeggins@redhat.com) + * Mon Aug 15 2016 Troy Dawson <tdawson@redhat.com> 3.3.11-1 - Ensure etcd user exists in etcd_server_certificates by installing etcd. (abutcher@redhat.com) diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml index 4edd44fe4..3be3a0e96 100644 --- a/playbooks/adhoc/uninstall.yml +++ b/playbooks/adhoc/uninstall.yml @@ -338,7 +338,10 @@ - /etc/ansible/facts.d/openshift.fact - /etc/etcd - /etc/systemd/system/etcd_container.service - - /var/lib/etcd + + - name: Remove etcd data + shell: rm -rf /var/lib/etcd/* + failed_when: false - hosts: lb become: yes diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml index 3fb42a7fa..b3e02fb97 100644 --- a/playbooks/common/openshift-cluster/evaluate_groups.yml +++ b/playbooks/common/openshift-cluster/evaluate_groups.yml @@ -77,7 +77,7 @@ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" ansible_become: "{{ g_sudo | default(omit) }}" with_items: "{{ g_master_hosts | default([]) }}" - when: g_nodeonmaster | default(false) == true and g_new_node_hosts is not defined + when: g_nodeonmaster | default(false) | bool and not g_new_node_hosts | default(false) | bool - name: Evaluate oo_first_etcd add_host: diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/upgrade.yml index 3ec47d6f3..be4e02c4a 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade.yml @@ -110,6 +110,52 @@ when: master_update_failed | length > 0 ############################################################################### +# Reconcile Cluster Roles, Cluster Role Bindings and Security Context Constraints +############################################################################### + +- name: Reconcile Cluster Roles and Cluster Role Bindings and Security Context Constraints + hosts: oo_masters_to_config + roles: + - { role: openshift_cli } + vars: + origin_reconcile_bindings: "{{ deployment_type == 'origin' and openshift_version | version_compare('1.0.6', '>') }}" + ent_reconcile_bindings: true + openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" + # Similar to pre.yml, we don't want to upgrade docker during the openshift_cli role, + # it will be updated when we perform node upgrade. + docker_protect_installed_version: True + tasks: + - name: Verifying the correct commandline tools are available + shell: grep {{ verify_upgrade_version }} {{ openshift.common.admin_binary}} + when: openshift.common.is_containerized | bool and verify_upgrade_version is defined + + - name: Reconcile Cluster Roles + command: > + {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig + policy reconcile-cluster-roles --additive-only=true --confirm + run_once: true + + - name: Reconcile Cluster Role Bindings + command: > + {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig + policy reconcile-cluster-role-bindings + --exclude-groups=system:authenticated + --exclude-groups=system:authenticated:oauth + --exclude-groups=system:unauthenticated + --exclude-users=system:anonymous + --additive-only=true --confirm + when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool + run_once: true + + - name: Reconcile Security Context Constraints + command: > + {{ openshift.common.admin_binary}} policy reconcile-sccs --confirm --additive-only=true + run_once: true + + - set_fact: + reconcile_complete: True + +############################################################################### # Upgrade Nodes ############################################################################### @@ -142,7 +188,7 @@ - include: docker/upgrade.yml when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool - include: "{{ node_config_hook }}" - when: node_config_hook is defined + when: node_config_hook is defined and inventory_hostname in groups.oo_nodes_to_config - include: rpm_upgrade.yml vars: @@ -160,49 +206,6 @@ when: inventory_hostname in groups.oo_nodes_to_config and openshift.node.schedulable | bool -############################################################################### -# Reconcile Cluster Roles, Cluster Role Bindings and Security Context Constraints -############################################################################### - -- name: Reconcile Cluster Roles and Cluster Role Bindings and Security Context Constraints - hosts: oo_masters_to_config - roles: - - { role: openshift_cli } - vars: - origin_reconcile_bindings: "{{ deployment_type == 'origin' and openshift_version | version_compare('1.0.6', '>') }}" - ent_reconcile_bindings: true - openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" - tasks: - - name: Verifying the correct commandline tools are available - shell: grep {{ verify_upgrade_version }} {{ openshift.common.admin_binary}} - when: openshift.common.is_containerized | bool and verify_upgrade_version is defined - - - name: Reconcile Cluster Roles - command: > - {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig - policy reconcile-cluster-roles --additive-only=true --confirm - run_once: true - - - name: Reconcile Cluster Role Bindings - command: > - {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig - policy reconcile-cluster-role-bindings - --exclude-groups=system:authenticated - --exclude-groups=system:authenticated:oauth - --exclude-groups=system:unauthenticated - --exclude-users=system:anonymous - --additive-only=true --confirm - when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool - run_once: true - - - name: Reconcile Security Context Constraints - command: > - {{ openshift.common.admin_binary}} policy reconcile-sccs --confirm --additive-only=true - run_once: true - - - set_fact: - reconcile_complete: True - ############################################################################## # Gate on reconcile ############################################################################## diff --git a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml index 3117d9edc..b42ca83af 100644 --- a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml +++ b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml @@ -1,27 +1,11 @@ --- -- name: Test if libvirt network for openshift already exists - command: "virsh -c {{ libvirt_uri }} net-info {{ libvirt_network }}" - register: net_info_result - changed_when: False - failed_when: "net_info_result.rc != 0 and 'no network with matching name' not in net_info_result.stderr" - -- name: Create a temp directory for the template xml file - command: "mktemp -d /tmp/openshift-ansible-XXXXXXX" - register: mktemp - when: net_info_result.rc == 1 - -- name: Create network xml file - template: - src: templates/network.xml - dest: "{{ mktemp.stdout }}/network.xml" - when: net_info_result.rc == 1 - -- name: Create libvirt network for openshift - command: "virsh -c {{ libvirt_uri }} net-create {{ mktemp.stdout }}/network.xml" - when: net_info_result.rc == 1 - -- name: Remove the temp directory - file: - path: "{{ mktemp.stdout }}" - state: absent - when: net_info_result.rc == 1 +- name: Create the libvirt network for OpenShift + virt_net: + name: '{{ libvirt_network }}' + state: '{{ item }}' + autostart: 'yes' + xml: "{{ lookup('template', 'network.xml') }}" + uri: '{{ libvirt_uri }}' + with_items: + - present + - active diff --git a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml index 397158b9e..8685624ec 100644 --- a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml +++ b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml @@ -6,22 +6,25 @@ # We need to set permissions on the directory and any items created under the directory, so we need to call the acl module with and without default set. - acl: - default: "{{ item }}" + default: '{{ item.default }}' entity: kvm etype: group name: "{{ libvirt_storage_pool_path }}" - permissions: rwx + permissions: '{{ item.permissions }}' state: present with_items: - - no - - yes + - default: no + permissions: x + - default: yes + permissions: rwx -- name: Test if libvirt storage pool for openshift already exists - command: "virsh -c {{ libvirt_uri }} pool-info {{ libvirt_storage_pool }}" - register: pool_info_result - changed_when: False - failed_when: "pool_info_result.rc != 0 and 'no storage pool with matching name' not in pool_info_result.stderr" - -- name: Create the libvirt storage pool for openshift - command: 'virsh -c {{ libvirt_uri }} pool-create-as {{ libvirt_storage_pool }} dir --target {{ libvirt_storage_pool_path }}' - when: pool_info_result.rc == 1 +- name: Create the libvirt storage pool for OpenShift + virt_pool: + name: '{{ libvirt_storage_pool }}' + state: '{{ item }}' + autostart: 'yes' + xml: "{{ lookup('template', 'storage-pool.xml') }}" + uri: '{{ libvirt_uri }}' + with_items: + - present + - active diff --git a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml index cc34d0ef9..e0afc43ba 100644 --- a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml @@ -134,4 +134,4 @@ delay: 1 with_together: - '{{ instances }}' - - '{{ ips }}'
\ No newline at end of file + - '{{ ips }}' diff --git a/playbooks/libvirt/openshift-cluster/templates/network.xml b/playbooks/libvirt/openshift-cluster/templates/network.xml index 050bc7ab9..0ce2a8342 100644 --- a/playbooks/libvirt/openshift-cluster/templates/network.xml +++ b/playbooks/libvirt/openshift-cluster/templates/network.xml @@ -1,5 +1,5 @@ <network> - <name>openshift-ansible</name> + <name>{{ libvirt_network }}</name> <forward mode='nat'> <nat> <port start='1024' end='65535'/> diff --git a/playbooks/libvirt/openshift-cluster/templates/storage-pool.xml b/playbooks/libvirt/openshift-cluster/templates/storage-pool.xml new file mode 100644 index 000000000..da139afd0 --- /dev/null +++ b/playbooks/libvirt/openshift-cluster/templates/storage-pool.xml @@ -0,0 +1,6 @@ +<pool type='dir'> + <name>{{ libvirt_storage_pool }}</name> + <target> + <path>{{ libvirt_storage_pool_path }}</path> + </target> +</pool> diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml index 2d0098784..458cf5ac7 100644 --- a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml +++ b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml @@ -42,6 +42,12 @@ parameters: description: Source of legitimate ssh connections default: 0.0.0.0/0 + node_port_incoming: + type: string + label: Source of node port connections + description: Authorized sources targetting node ports + default: 0.0.0.0/0 + num_etcd: type: number label: Number of etcd nodes @@ -393,6 +399,11 @@ resources: port_range_min: 4789 port_range_max: 4789 remote_mode: remote_group_id + - direction: ingress + protocol: tcp + port_range_min: 30000 + port_range_max: 32767 + remote_ip_prefix: { get_param: node_port_incoming } infra-secgrp: type: OS::Neutron::SecurityGroup diff --git a/playbooks/openstack/openshift-cluster/launch.yml b/playbooks/openstack/openshift-cluster/launch.yml index b9aae2f4c..5cf543204 100644 --- a/playbooks/openstack/openshift-cluster/launch.yml +++ b/playbooks/openstack/openshift-cluster/launch.yml @@ -33,6 +33,7 @@ -P external_net={{ openstack_network_external_net }} -P ssh_public_key="{{ openstack_ssh_public_key }}" -P ssh_incoming={{ openstack_ssh_access_from }} + -P node_port_incoming={{ openstack_node_port_access_from }} -P num_etcd={{ num_etcd }} -P num_masters={{ num_masters }} -P num_nodes={{ num_nodes }} @@ -48,6 +49,8 @@ -P infra_flavor={{ openstack_flavor["infra"] }} -P dns_flavor={{ openstack_flavor["dns"] }} openshift-ansible-{{ cluster_id }}-stack' + args: + chdir: '{{ playbook_dir }}' - name: Wait for OpenStack Stack readiness shell: 'heat stack-show openshift-ansible-{{ cluster_id }}-stack | awk ''$2 == "stack_status" {print $4}''' @@ -107,9 +110,9 @@ openshift_node_labels: type: "etcd" with_together: - - parsed_outputs.etcd_names - - parsed_outputs.etcd_ips - - parsed_outputs.etcd_floating_ips + - '{{ parsed_outputs.etcd_names }}' + - '{{ parsed_outputs.etcd_ips }}' + - '{{ parsed_outputs.etcd_floating_ips }}' - name: Add new master instances groups and variables add_host: @@ -121,9 +124,9 @@ openshift_node_labels: type: "master" with_together: - - parsed_outputs.master_names - - parsed_outputs.master_ips - - parsed_outputs.master_floating_ips + - '{{ parsed_outputs.master_names }}' + - '{{ parsed_outputs.master_ips }}' + - '{{ parsed_outputs.master_floating_ips }}' - name: Add new node instances groups and variables add_host: @@ -135,9 +138,9 @@ openshift_node_labels: type: "compute" with_together: - - parsed_outputs.node_names - - parsed_outputs.node_ips - - parsed_outputs.node_floating_ips + - '{{ parsed_outputs.node_names }}' + - '{{ parsed_outputs.node_ips }}' + - '{{ parsed_outputs.node_floating_ips }}' - name: Add new infra instances groups and variables add_host: @@ -149,9 +152,9 @@ openshift_node_labels: type: "infra" with_together: - - parsed_outputs.infra_names - - parsed_outputs.infra_ips - - parsed_outputs.infra_floating_ips + - '{{ parsed_outputs.infra_names }}' + - '{{ parsed_outputs.infra_ips }}' + - '{{ parsed_outputs.infra_floating_ips }}' - name: Add DNS groups and variables add_host: @@ -166,10 +169,10 @@ host: '{{ item }}' port: 22 with_flattened: - - parsed_outputs.master_floating_ips - - parsed_outputs.node_floating_ips - - parsed_outputs.infra_floating_ips - - parsed_outputs.dns_floating_ip + - '{{ parsed_outputs.master_floating_ips }}' + - '{{ parsed_outputs.node_floating_ips }}' + - '{{ parsed_outputs.infra_floating_ips }}' + - '{{ parsed_outputs.dns_floating_ip }}' - name: Wait for user setup command: 'ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ deployment_vars[deployment_type].ssh_user }}@{{ item }} echo {{ deployment_vars[deployment_type].ssh_user }} user is setup' @@ -178,10 +181,10 @@ retries: 30 delay: 1 with_flattened: - - parsed_outputs.master_floating_ips - - parsed_outputs.node_floating_ips - - parsed_outputs.infra_floating_ips - - parsed_outputs.dns_floating_ip + - '{{ parsed_outputs.master_floating_ips }}' + - '{{ parsed_outputs.node_floating_ips }}' + - '{{ parsed_outputs.infra_floating_ips }}' + - '{{ parsed_outputs.dns_floating_ip }}' - include: update.yml diff --git a/playbooks/openstack/openshift-cluster/list.yml b/playbooks/openstack/openshift-cluster/list.yml index ba9c6bf9c..60372e262 100644 --- a/playbooks/openstack/openshift-cluster/list.yml +++ b/playbooks/openstack/openshift-cluster/list.yml @@ -17,7 +17,7 @@ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_ssh_host: "{{ hostvars[item].ansible_ssh_host | default(item) }}" ansible_become: "{{ deployment_vars[deployment_type].become }}" - with_items: groups[scratch_group] | default([]) | difference(['localhost']) + with_items: "{{ groups[scratch_group] | default([]) | difference(['localhost']) }}" - name: List Hosts hosts: oo_list_hosts diff --git a/playbooks/openstack/openshift-cluster/terminate.yml b/playbooks/openstack/openshift-cluster/terminate.yml index 5bd8476f1..980ab7337 100644 --- a/playbooks/openstack/openshift-cluster/terminate.yml +++ b/playbooks/openstack/openshift-cluster/terminate.yml @@ -11,7 +11,7 @@ groups: oo_hosts_to_terminate ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_become: "{{ deployment_vars[deployment_type].become }}" - with_items: (groups['tag_environment_' ~ cluster_env]|default([])) | intersect(groups['tag_clusterid_' ~ cluster_id ]|default([])) + with_items: "{{ (groups['tag_environment_' ~ cluster_env]|default([])) | intersect(groups['tag_clusterid_' ~ cluster_id ]|default([])) }}" - name: Unsubscribe VMs hosts: oo_hosts_to_terminate diff --git a/playbooks/openstack/openshift-cluster/vars.yml b/playbooks/openstack/openshift-cluster/vars.yml index bc53a51b0..17063ef34 100644 --- a/playbooks/openstack/openshift-cluster/vars.yml +++ b/playbooks/openstack/openshift-cluster/vars.yml @@ -12,6 +12,8 @@ openstack_ssh_public_key: "{{ lookup('file', lookup('oo_option', 'public_k default('~/.ssh/id_rsa.pub', True)) }}" openstack_ssh_access_from: "{{ lookup('oo_option', 'ssh_from') | default('0.0.0.0/0', True) }}" +openstack_node_port_access_from: "{{ lookup('oo_option', 'node_port_from') | + default('0.0.0.0/0', True) }}" openstack_flavor: dns: "{{ lookup('oo_option', 'dns_flavor' ) | default('m1.small', True) }}" etcd: "{{ lookup('oo_option', 'etcd_flavor' ) | default('m1.small', True) }}" diff --git a/roles/openshift_hosted/templates/registry_config.j2 b/roles/openshift_hosted/templates/registry_config.j2 index 71539775d..b70ec500e 100644 --- a/roles/openshift_hosted/templates/registry_config.j2 +++ b/roles/openshift_hosted/templates/registry_config.j2 @@ -61,7 +61,7 @@ middleware: {% if openshift.common.version_gte_3_3_or_1_3 | bool %} registry: - name: openshift -{% endif -%} +{% endif %} repository: - name: openshift options: diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml index aafb06f93..ffde59358 100644 --- a/roles/openshift_master_certificates/tasks/main.yml +++ b/roles/openshift_master_certificates/tasks/main.yml @@ -164,3 +164,29 @@ owner: "{{ item }}" group: "{{ 'root' if item == 'root' else _ansible_ssh_user_gid.stdout }}" with_items: "{{ client_users }}" + +# Ensure ca-bundle exists for 3.2+ configuration +- name: Check for ca-bundle.crt + stat: + path: "{{ openshift.common.config_base }}/master/ca-bundle.crt" + register: ca_bundle_stat + failed_when: false + +- name: Check for ca.crt + stat: + path: "{{ openshift.common.config_base }}/master/ca.crt" + register: ca_crt_stat + failed_when: false + +- name: Migrate ca.crt to ca-bundle.crt + command: mv ca.crt ca-bundle.crt + args: + chdir: "{{ openshift.common.config_base }}/master" + when: ca_crt_stat.stat.isreg and not ca_bundle_stat.stat.exists + +- name: Link ca.crt to ca-bundle.crt + file: + src: "{{ openshift.common.config_base }}/master/ca-bundle.crt" + path: "{{ openshift.common.config_base }}/master/ca.crt" + state: link + when: ca_crt_stat.stat.isreg and not ca_bundle_stat.stat.exists diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service index f5b6f501d..e33b665ca 100644 --- a/roles/openshift_node/templates/openshift.docker.node.service +++ b/roles/openshift_node/templates/openshift.docker.node.service @@ -15,7 +15,7 @@ After={{ openshift.common.service_type }}-node-dep.service EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node-dep ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-node -ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if 'cloudprovider' in openshift and 'kind' in openshift.cloudprovider and openshift.cloudprovider.kind != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:ro -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS {{ openshift.node.node_image }}:${IMAGE_VERSION} +ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if 'cloudprovider' in openshift and 'kind' in openshift.cloudprovider and openshift.cloudprovider.kind != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS {{ openshift.node.node_image }}:${IMAGE_VERSION} ExecStartPost=/usr/bin/sleep 10 ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node SyslogIdentifier={{ openshift.common.service_type }}-node diff --git a/roles/openshift_serviceaccounts/tasks/legacy_add_scc_to_user.yml b/roles/openshift_serviceaccounts/tasks/legacy_add_scc_to_user.yml index 1efab9466..8715fc64e 100644 --- a/roles/openshift_serviceaccounts/tasks/legacy_add_scc_to_user.yml +++ b/roles/openshift_serviceaccounts/tasks/legacy_add_scc_to_user.yml @@ -15,7 +15,7 @@ template: src: serviceaccount.j2 dest: "/tmp/openshift/{{ item }}-serviceaccount.yaml" - with_items: openshift_serviceaccounts_names + with_items: '{{ openshift_serviceaccounts_names }}' - name: Get current security context constraints shell: > @@ -30,8 +30,8 @@ insertafter: "^users:$" when: "item.1.rc == 0 and 'system:serviceaccount:{{ openshift_serviceaccounts_namespace }}:{{ item.0 }}' not in {{ (item.1.stdout | from_yaml).users }}" with_nested: - - openshift_serviceaccounts_names - - scc_test.results + - '{{ openshift_serviceaccounts_names }}' + - '{{ scc_test.results }}' - name: Apply new scc rules for service accounts command: "{{ openshift.common.client_binary }} update -f /tmp/openshift/scc.yaml --api-version=v1" diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml index 6e5d2b22c..a3a99d248 100644 --- a/roles/openshift_version/tasks/main.yml +++ b/roles/openshift_version/tasks/main.yml @@ -73,6 +73,10 @@ msg: openshift_version role was unable to set openshift_pkg_version when: openshift_pkg_version is not defined +- fail: + msg: "No OpenShift version available, please ensure your systems are fully registered and have access to appropriate yum repositories." + when: not is_containerized | bool and openshift_version == '0.0' + # We can't map an openshift_release to full rpm version like we can with containers, make sure # the rpm version we looked up matches the release requested and error out if not. - fail: diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py index 9ed17f481..d677ea8c8 100644 --- a/utils/src/ooinstall/cli_installer.py +++ b/utils/src/ooinstall/cli_installer.py @@ -39,7 +39,7 @@ UPGRADE_MAPPINGS = { def validate_ansible_dir(path): if not path: - raise click.BadParameter('An ansible path must be provided') + raise click.BadParameter('An Ansible path must be provided') return path # if not os.path.exists(path)): # raise click.BadParameter("Path \"{}\" doesn't exist".format(path)) @@ -60,8 +60,8 @@ def validate_prompt_hostname(hostname): def get_ansible_ssh_user(): click.clear() message = """ -This installation process will involve connecting to remote hosts via ssh. Any -account may be used however if a non-root account is used it must have +This installation process involves connecting to remote hosts via ssh. Any +account may be used. However, if a non-root account is used, then it must have passwordless sudo access. """ click.echo(message) @@ -70,8 +70,7 @@ passwordless sudo access. def get_master_routingconfig_subdomain(): click.clear() message = """ -You might want to override the default subdomain uses for exposed routes. If you don't know what -this is, use the default value. +You might want to override the default subdomain used for exposed routes. If you don't know what this is, use the default value. """ click.echo(message) return click.prompt('New default subdomain (ENTER for none)', default='') @@ -96,15 +95,15 @@ def delete_hosts(hosts): response = del_idx.lower() if response in ['y', 'n']: return hosts, response - click.echo("\"{}\" doesn't coorespond to any valid input.".format(del_idx)) + click.echo("\"{}\" doesn't correspond to any valid input.".format(del_idx)) except AttributeError: - click.echo("\"{}\" doesn't coorespond to any valid input.".format(del_idx)) + click.echo("\"{}\" doesn't correspond to any valid input.".format(del_idx)) return hosts, None def collect_hosts(oo_cfg, existing_env=False, masters_set=False, print_summary=True): """ Collect host information from user. This will later be filled in using - ansible. + Ansible. Returns: a list of host information collected from the user """ @@ -113,28 +112,28 @@ def collect_hosts(oo_cfg, existing_env=False, masters_set=False, print_summary=T message = """ You must now specify the hosts that will compose your OpenShift cluster. -Please enter an IP or hostname to connect to for each system in the cluster. -You will then be prompted to identify what role you would like this system to +Please enter an IP address or hostname to connect to for each system in the +cluster. You will then be prompted to identify what role you want this system to serve in the cluster. -OpenShift Masters serve the API and web console and coordinate the jobs to run -across the environment. If desired you can specify multiple Master systems for -an HA deployment, in which case you will be prompted to identify a *separate* -system to act as the load balancer for your cluster after all Masters and Nodes -are defined. +OpenShift masters serve the API and web console and coordinate the jobs to run +across the environment. Optionally, you can specify multiple master systems for +a high-availability (HA) deployment. If you choose an HA deployment, then you +are prompted to identify a *separate* system to act as the load balancer for +your cluster once you define all masters and nodes. -If only one Master is specified, an etcd instance embedded within the OpenShift -Master service will be used as the datastore. This can be later replaced with a -separate etcd instance if desired. If multiple Masters are specified, a -separate etcd cluster will be configured with each Master serving as a member. +If only one master is specified, an etcd instance is embedded within the +OpenShift master service to use as the datastore. This can be later replaced +with a separate etcd instance, if required. If multiple masters are specified, +then a separate etcd cluster is configured with each master serving as a member. -Any Masters configured as part of this installation process will also be -configured as Nodes. This is so that the Master will be able to proxy to Pods -from the API. By default this Node will be unschedulable but this can be changed -after installation with 'oadm manage-node'. +Any masters configured as part of this installation process are also +configured as nodes. This enables the master to proxy to pods +from the API. By default, this node is unschedulable, but this can be changed +after installation with the 'oadm manage-node' command. -OpenShift Nodes provide the runtime environments for containers. They will -host the required services to be managed by the Master. +OpenShift nodes provide the runtime environments for containers. They host the +required services to be managed by the master. http://docs.openshift.com/enterprise/latest/architecture/infrastructure_components/kubernetes_infrastructure.html#master http://docs.openshift.com/enterprise/latest/architecture/infrastructure_components/kubernetes_infrastructure.html#node @@ -152,7 +151,7 @@ http://docs.openshift.com/enterprise/latest/architecture/infrastructure_componen value_proc=validate_prompt_hostname) if not masters_set: - if click.confirm('Will this host be an OpenShift Master?'): + if click.confirm('Will this host be an OpenShift master?'): host_props['roles'].append('master') host_props['roles'].append('etcd') num_masters += 1 @@ -181,7 +180,7 @@ http://docs.openshift.com/enterprise/latest/architecture/infrastructure_componen print_installation_summary(hosts, oo_cfg.settings['variant_version']) # If we have one master, this is enough for an all-in-one deployment, - # thus we can start asking if you wish to proceed. Otherwise we assume + # thus we can start asking if you want to proceed. Otherwise we assume # you must. if masters_set or num_masters != 2: more_hosts = click.confirm('Do you want to add additional hosts?') @@ -214,12 +213,12 @@ def print_installation_summary(hosts, version=None): nodes = [host for host in hosts if host.is_node()] dedicated_nodes = [host for host in hosts if host.is_node() and not host.is_master()] click.echo('') - click.echo('Total OpenShift Masters: %s' % len(masters)) - click.echo('Total OpenShift Nodes: %s' % len(nodes)) + click.echo('Total OpenShift masters: %s' % len(masters)) + click.echo('Total OpenShift nodes: %s' % len(nodes)) if len(masters) == 1 and version != '3.0': ha_hint_message = """ -NOTE: Add a total of 3 or more Masters to perform an HA installation.""" +NOTE: Add a total of 3 or more masters to perform an HA installation.""" click.echo(ha_hint_message) elif len(masters) == 2: min_masters_message = """ @@ -228,19 +227,19 @@ Please add one more to proceed.""" click.echo(min_masters_message) elif len(masters) >= 3: ha_message = """ -NOTE: Multiple Masters specified, this will be an HA deployment with a separate +NOTE: Multiple masters specified, this will be an HA deployment with a separate etcd cluster. You will be prompted to provide the FQDN of a load balancer and a host for storage once finished entering hosts. """ click.echo(ha_message) dedicated_nodes_message = """ -WARNING: Dedicated Nodes are recommended for an HA deployment. If no dedicated -Nodes are specified, each configured Master will be marked as a schedulable -Node.""" +WARNING: Dedicated nodes are recommended for an HA deployment. If no dedicated +nodes are specified, each configured master will be marked as a schedulable +node.""" min_ha_nodes_message = """ -WARNING: A minimum of 3 dedicated Nodes are recommended for an HA +WARNING: A minimum of 3 dedicated nodes are recommended for an HA deployment.""" if len(dedicated_nodes) == 0: click.echo(dedicated_nodes_message) @@ -253,14 +252,14 @@ deployment.""" def print_host_summary(all_hosts, host): click.echo("- %s" % host.connect_to) if host.is_master(): - click.echo(" - OpenShift Master") + click.echo(" - OpenShift master") if host.is_node(): if host.is_dedicated_node(): - click.echo(" - OpenShift Node (Dedicated)") + click.echo(" - OpenShift node (Dedicated)") elif host.is_schedulable_node(all_hosts): - click.echo(" - OpenShift Node") + click.echo(" - OpenShift node") else: - click.echo(" - OpenShift Node (Unscheduled)") + click.echo(" - OpenShift node (Unscheduled)") if host.is_master_lb(): if host.preconfigured: click.echo(" - Load Balancer (Preconfigured)") @@ -284,14 +283,14 @@ def collect_master_lb(hosts): this is an invalid configuration. """ message = """ -Setting up High Availability Masters requires a load balancing solution. -Please provide a the FQDN of a host that will be configured as a proxy. This +Setting up high-availability masters requires a load balancing solution. +Please provide the FQDN of a host that will be configured as a proxy. This can be either an existing load balancer configured to balance all masters on port 8443 or a new host that will have HAProxy installed on it. -If the host provided does is not yet configured, a reference haproxy load -balancer will be installed. It's important to note that while the rest of the -environment will be fault tolerant this reference load balancer will not be. +If the host provided is not yet configured, a reference HAProxy load +balancer will be installed. It's important to note that while the rest of the +environment will be fault-tolerant, this reference load balancer will not be. It can be replaced post-installation with a load balancer with the same hostname. """ @@ -313,7 +312,7 @@ hostname. host_props['connect_to'] = click.prompt('Enter hostname or IP address', value_proc=validate_prompt_lb) install_haproxy = \ - click.confirm('Should the reference haproxy load balancer be installed on this host?') + click.confirm('Should the reference HAProxy load balancer be installed on this host?') host_props['preconfigured'] = not install_haproxy host_props['roles'] = ['master_lb'] master_lb = Host(**host_props) @@ -325,7 +324,7 @@ def collect_storage_host(hosts): hosts. """ message = """ -Setting up High Availability Masters requires a storage host. Please provide a +Setting up high-availability masters requires a storage host. Please provide a host that will be configured as a Registry Storage. Note: Containerized storage hosts are not currently supported. @@ -363,15 +362,15 @@ def confirm_hosts_facts(oo_cfg, callback_facts): hosts = oo_cfg.deployment.hosts click.clear() message = """ -A list of the facts gathered from the provided hosts follows. Because it is -often the case that the hostname for a system inside the cluster is different -from the hostname that is resolveable from command line or web clients -these settings cannot be validated automatically. +The following is a list of the facts gathered from the provided hosts. The +hostname for a system inside the cluster is often different from the hostname +that is resolveable from command-line or web clients, therefore these settings +cannot be validated automatically. -For some cloud providers the installer is able to gather metadata exposed in -the instance so reasonable defaults will be provided. +For some cloud providers, the installer is able to gather metadata exposed in +the instance, so reasonable defaults will be provided. -Plese confirm that they are correct before moving forward. +Please confirm that they are correct before moving forward. """ notes = """ @@ -385,7 +384,7 @@ Notes: * The public IP should be the externally accessible IP associated with the instance * The hostname should resolve to the internal IP from the instances themselves. - * The public hostname should resolve to the external ip from hosts outside of + * The public hostname should resolve to the external IP from hosts outside of the cloud. """ @@ -439,24 +438,24 @@ def check_hosts_config(oo_cfg, unattended): masters = [host for host in oo_cfg.deployment.hosts if host.is_master()] if len(masters) == 2: - click.echo("A minimum of 3 Masters are required for HA deployments.") + click.echo("A minimum of 3 masters are required for HA deployments.") sys.exit(1) if len(masters) > 1: master_lb = [host for host in oo_cfg.deployment.hosts if host.is_master_lb()] if len(master_lb) > 1: - click.echo('ERROR: More than one Master load balancer specified. Only one is allowed.') + click.echo('ERROR: More than one master load balancer specified. Only one is allowed.') sys.exit(1) elif len(master_lb) == 1: if master_lb[0].is_master() or master_lb[0].is_node(): - click.echo('ERROR: The Master load balancer is configured as a master or node. ' \ + click.echo('ERROR: The master load balancer is configured as a master or node. ' \ 'Please correct this.') sys.exit(1) else: message = """ ERROR: No master load balancer specified in config. You must provide the FQDN -of a load balancer to balance the API (port 8443) on all Master hosts. +of a load balancer to balance the API (port 8443) on all master hosts. https://docs.openshift.org/latest/install_config/install/advanced_install.html#multiple-masters """ @@ -467,8 +466,8 @@ https://docs.openshift.org/latest/install_config/install/advanced_install.html#m if host.is_node() and not host.is_master()] if len(dedicated_nodes) == 0: message = """ -WARNING: No dedicated Nodes specified. By default, colocated Masters have -their Nodes set to unschedulable. If you proceed all nodes will be labelled +WARNING: No dedicated nodes specified. By default, colocated masters have +their nodes set to unschedulable. If you proceed all nodes will be labelled as schedulable. """ if unattended: @@ -552,11 +551,10 @@ def get_host_roles_set(oo_cfg): def get_proxy_hostnames_and_excludes(): message = """ -If a proxy is needed to reach HTTP and HTTPS traffic please enter the name below. -This proxy will be configured by default for all processes needing to reach systems outside -the cluster. +If a proxy is needed to reach HTTP and HTTPS traffic, please enter the name below. +This proxy will be configured by default for all processes that need to reach systems outside the cluster. -More advanced configuration is possible if using ansible directly: +More advanced configuration is possible if using Ansible directly: https://docs.openshift.com/enterprise/latest/install_config/http_proxies.html """ @@ -570,7 +568,7 @@ https://docs.openshift.com/enterprise/latest/install_config/http_proxies.html if http_proxy_hostname or https_proxy_hostname: message = """ -All hosts in your openshift inventory will automatically be added to the NO_PROXY value. +All hosts in your OpenShift inventory will automatically be added to the NO_PROXY value. Please provide any additional hosts to be added to NO_PROXY. (ENTER for none) """ proxy_excludes = click.prompt(message, default='') @@ -594,10 +592,10 @@ Please confirm that following prerequisites have been met: repositories. * All systems have run docker-storage-setup (part of the Red Hat docker RPM). * All systems have working DNS that resolves not only from the perspective of - the installer but also from within the cluster. + the installer, but also from within the cluster. -When the process completes you will have a default configuration for Masters -and Nodes. For ongoing environment maintenance it's recommended that the +When the process completes you will have a default configuration for masters +and nodes. For ongoing environment maintenance it's recommended that the official Ansible playbooks be used. For more information on installation prerequisites please see: @@ -712,11 +710,11 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force, verbose): # present a message listing already installed hosts and remove hosts if needed for host in installed_hosts: if host.is_master(): - click.echo("{} is already an OpenShift Master".format(host)) + click.echo("{} is already an OpenShift master".format(host)) # Masters stay in the list, we need to run against them when adding # new nodes. elif host.is_node(): - click.echo("{} is already an OpenShift Node".format(host)) + click.echo("{} is already an OpenShift node".format(host)) # force is only used for reinstalls so we don't want to remove # anything. if not force: @@ -869,7 +867,7 @@ def uninstall(ctx): # Prompt interactively to confirm: for host in hosts: click.echo(" * %s" % host.connect_to) - proceed = click.confirm("\nDo you wish to proceed?") + proceed = click.confirm("\nDo you want to proceed?") if not proceed: click.echo("Uninstall cancelled.") sys.exit(0) @@ -930,7 +928,7 @@ def upgrade(ctx, latest_minor, next_major): playbook = mapping['major_playbook'] new_version = mapping['major_version'] # Update config to reflect the version we're targetting, we'll write - # to disk once ansible completes successfully, not before. + # to disk once Ansible completes successfully, not before. oo_cfg.settings['variant_version'] = new_version if oo_cfg.settings['variant'] == 'enterprise': oo_cfg.settings['variant'] = 'openshift-enterprise' @@ -943,14 +941,14 @@ def upgrade(ctx, latest_minor, next_major): playbook = mapping['minor_playbook'] new_version = old_version - click.echo("Openshift will be upgraded from %s %s to latest %s %s on the following hosts:\n" % ( + click.echo("OpenShift will be upgraded from %s %s to latest %s %s on the following hosts:\n" % ( variant, old_version, oo_cfg.settings['variant'], new_version)) for host in oo_cfg.deployment.hosts: click.echo(" * %s" % host.connect_to) if not ctx.obj['unattended']: # Prompt interactively to confirm: - if not click.confirm("\nDo you wish to proceed?"): + if not click.confirm("\nDo you want to proceed?"): click.echo("Upgrade cancelled.") sys.exit(0) @@ -968,7 +966,7 @@ def upgrade(ctx, latest_minor, next_major): @click.command() @click.option('--force', '-f', is_flag=True, default=False) @click.option('--gen-inventory', is_flag=True, default=False, - help="Generate an ansible inventory file and exit.") + help="Generate an Ansible inventory file and exit.") @click.pass_context def install(ctx, force, gen_inventory): oo_cfg = ctx.obj['oo_cfg'] @@ -1006,12 +1004,12 @@ def install(ctx, force, gen_inventory): # Write quick installer config file to disk: oo_cfg.save_to_disk() - # Write ansible inventory file to disk: + # Write Ansible inventory file to disk: inventory_file = openshift_ansible.generate_inventory(hosts_to_run_on) click.echo() click.echo('Wrote atomic-openshift-installer config: %s' % oo_cfg.config_path) - click.echo("Wrote ansible inventory: %s" % inventory_file) + click.echo("Wrote Ansible inventory: %s" % inventory_file) click.echo() if gen_inventory: @@ -1030,7 +1028,7 @@ If changes are needed please edit the config file above and re-run. if error: # The bootstrap script will print out the log location. message = """ -An error was detected. After resolving the problem please relaunch the +An error was detected. After resolving the problem please relaunch the installation process. """ click.echo(message) @@ -1040,7 +1038,7 @@ installation process. The installation was successful! If this is your first time installing please take a look at the Administrator -Guide for advanced options related to routing, storage, authentication and much +Guide for advanced options related to routing, storage, authentication, and more: http://docs.openshift.com/enterprise/latest/admin_guide/overview.html diff --git a/utils/src/ooinstall/variants.py b/utils/src/ooinstall/variants.py index dc25fae8f..b32370cd5 100644 --- a/utils/src/ooinstall/variants.py +++ b/utils/src/ooinstall/variants.py @@ -46,8 +46,16 @@ origin = Variant('origin', 'OpenShift Origin', ] ) +LEGACY = Variant('openshift-enterprise', 'OpenShift Container Platform', + [ + Version('3.2', 'openshift-enterprise'), + Version('3.1', 'openshift-enterprise'), + Version('3.0', 'openshift-enterprise'), + ] +) + # Ordered list of variants we can install, first is the default. -SUPPORTED_VARIANTS = (OSE, origin) +SUPPORTED_VARIANTS = (OSE, origin, LEGACY) DISPLAY_VARIANTS = (OSE, ) def find_variant(name, version=None): diff --git a/utils/test/cli_installer_tests.py b/utils/test/cli_installer_tests.py index 98e111043..0556e52a1 100644 --- a/utils/test/cli_installer_tests.py +++ b/utils/test/cli_installer_tests.py @@ -557,7 +557,7 @@ class UnattendedCliTests(OOCliFixture): self.assertEquals('openshift-enterprise', written_config['variant']) # We didn't specify a version so the latest should have been assumed, # and written to disk: - self.assertEquals('3.2', written_config['variant_version']) + self.assertEquals('3.3', written_config['variant_version']) # Make sure the correct value was passed to ansible: inventory = ConfigParser.ConfigParser(allow_no_value=True) @@ -573,7 +573,7 @@ class UnattendedCliTests(OOCliFixture): run_playbook_mock.return_value = 0 config = SAMPLE_CONFIG % 'openshift-enterprise' - config = '%s\n%s' % (config, 'variant_version: 3.2') + config = '%s\n%s' % (config, 'variant_version: 3.3') config_file = self.write_config(os.path.join(self.work_dir, 'ooinstall.conf'), config) @@ -586,7 +586,7 @@ class UnattendedCliTests(OOCliFixture): self.assertEquals('openshift-enterprise', written_config['variant']) # Make sure our older version was preserved: # and written to disk: - self.assertEquals('3.2', written_config['variant_version']) + self.assertEquals('3.3', written_config['variant_version']) inventory = ConfigParser.ConfigParser(allow_no_value=True) inventory.read(os.path.join(self.work_dir, 'hosts')) @@ -722,7 +722,7 @@ class UnattendedCliTests(OOCliFixture): # This is an invalid config: self.assert_result(result, 1) - self.assertTrue("A minimum of 3 Masters are required" in result.output) + self.assertTrue("A minimum of 3 masters are required" in result.output) #unattended with three masters, one node, but no load balancer specified: @patch('ooinstall.openshift_ansible.run_main_playbook') diff --git a/utils/test/fixture.py b/utils/test/fixture.py index ddf6b6802..b2a0a7134 100644 --- a/utils/test/fixture.py +++ b/utils/test/fixture.py @@ -10,7 +10,7 @@ from click.testing import CliRunner # Substitute in a product name before use: SAMPLE_CONFIG = """ variant: %s -variant_version: 3.2 +variant_version: 3.3 master_routingconfig_subdomain: example.com deployment: ansible_ssh_user: root diff --git a/utils/test/oo_config_tests.py b/utils/test/oo_config_tests.py index 10439c9ae..f82d55b05 100644 --- a/utils/test/oo_config_tests.py +++ b/utils/test/oo_config_tests.py @@ -12,7 +12,7 @@ from ooinstall.oo_config import OOConfig, Host, OOConfigInvalidHostError SAMPLE_CONFIG = """ variant: openshift-enterprise -variant_version: 3.2 +variant_version: 3.3 deployment: ansible_ssh_user: root hosts: |