diff options
Diffstat (limited to 'playbooks')
257 files changed, 4477 insertions, 5428 deletions
diff --git a/playbooks/README.md b/playbooks/README.md index 5857a9f59..290d4c082 100644 --- a/playbooks/README.md +++ b/playbooks/README.md @@ -12,8 +12,6 @@ And: - [`adhoc`](adhoc) is a generic home for playbooks and tasks that are community supported and not officially maintained. -- [`aws`](aws), [`gce`](gce), [`libvirt`](libvirt) and [`openstack`](openstack) - are related to the [`bin/cluster`](../bin) tool and its usage is deprecated. Refer to the `README.md` file in each playbook directory for more information about them. diff --git a/playbooks/adhoc/contiv/delete_contiv.yml b/playbooks/adhoc/contiv/delete_contiv.yml index 91948c72e..eec6c23a7 100644 --- a/playbooks/adhoc/contiv/delete_contiv.yml +++ b/playbooks/adhoc/contiv/delete_contiv.yml @@ -1,5 +1,5 @@ --- -- name: delete contiv +- name: Uninstall contiv hosts: all gather_facts: False tasks: diff --git a/playbooks/adhoc/grow_docker_vg/filter_plugins/grow_docker_vg_filters.py b/playbooks/adhoc/grow_docker_vg/filter_plugins/grow_docker_vg_filters.py index daff68fbe..cacd0b0f3 100644 --- a/playbooks/adhoc/grow_docker_vg/filter_plugins/grow_docker_vg_filters.py +++ b/playbooks/adhoc/grow_docker_vg/filter_plugins/grow_docker_vg_filters.py @@ -1,6 +1,5 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# vim: expandtab:tabstop=4:shiftwidth=4 ''' Custom filters for use in openshift-ansible ''' diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml index beaf20b07..5072d10fa 100644 --- a/playbooks/adhoc/uninstall.yml +++ b/playbooks/adhoc/uninstall.yml @@ -26,6 +26,18 @@ - hosts: nodes become: yes tasks: + - name: Remove dnsmasq dispatcher + file: + path: "{{ item }}" + state: absent + with_items: + - /etc/dnsmasq.d/origin-dns.conf + - /etc/dnsmasq.d/origin-upstream-dns.conf + - /etc/dnsmasq.d/openshift-ansible.conf + - /etc/NetworkManager/dispatcher.d/99-origin-dns.sh + - service: + name: NetworkManager + state: restarted - name: Stop services service: name={{ item }} state=stopped with_items: @@ -103,7 +115,7 @@ - atomic-openshift-sdn-ovs - cockpit-bridge - cockpit-docker - - cockpit-shell + - cockpit-system - cockpit-ws - kubernetes-client - openshift @@ -279,9 +291,6 @@ with_items: - /etc/ansible/facts.d/openshift.fact - /etc/atomic-enterprise - - /etc/dnsmasq.d/origin-dns.conf - - /etc/dnsmasq.d/origin-upstream-dns.conf - - /etc/NetworkManager/dispatcher.d/99-origin-dns.sh - /etc/openshift - /etc/openshift-sdn - /etc/sysconfig/atomic-enterprise-node @@ -305,11 +314,19 @@ - shell: systemctl daemon-reload changed_when: False + - name: restart container-engine + service: name=container-engine state=restarted + failed_when: false + register: container_engine + - name: restart docker service: name=docker state=restarted - - - name: restart NetworkManager - service: name=NetworkManager state=restarted + failed_when: false + when: not (container_engine | changed) + register: l_docker_restart_docker_in_pb_result + until: not l_docker_restart_docker_in_pb_result | failed + retries: 3 + delay: 30 - hosts: masters become: yes @@ -339,7 +356,7 @@ - atomic-openshift-master - cockpit-bridge - cockpit-docker - - cockpit-shell + - cockpit-system - cockpit-ws - corosync - kubernetes-client @@ -386,10 +403,19 @@ - "{{ directories.results | default([]) }}" - files + - set_fact: + client_users: "{{ [ansible_ssh_user, 'root'] | unique }}" + + - name: Remove client kubeconfigs + file: + path: "~{{ item }}/.kube" + state: absent + with_items: + - "{{ client_users }}" + - name: Remove remaining files file: path={{ item }} state=absent with_items: - - "~{{ ansible_ssh_user }}/.kube" - /etc/ansible/facts.d/openshift.fact - /etc/atomic-enterprise - /etc/corosync @@ -414,7 +440,6 @@ - /etc/sysconfig/origin-master - /etc/sysconfig/origin-master-api - /etc/sysconfig/origin-master-controllers - - /root/.kube - /usr/share/openshift/examples - /var/lib/atomic-enterprise - /var/lib/openshift diff --git a/playbooks/aws/README.md b/playbooks/aws/README.md index 99698b4d0..2b3d4329e 100644 --- a/playbooks/aws/README.md +++ b/playbooks/aws/README.md @@ -1,4 +1,256 @@ # AWS playbooks -This playbook directory is meant to be driven by [`bin/cluster`](../../bin), -which is community supported and most use is considered deprecated. +## Provisioning + +With recent desire for provisioning from customers and developers alike, the AWS + playbook directory now supports a limited set of ansible playbooks to achieve a + complete cluster setup. These playbooks bring into alignment our desire to + deploy highly scalable Openshift clusters utilizing AWS auto scale groups and + custom AMIs. + +### Where do I start? + +Before any provisioning may occur, AWS account credentials must be present in the environment. This can be done in two ways: + +- Create the following file `~/.aws/credentials` with the contents (substitute your access key and secret key): + ``` + [myaccount] + aws_access_key_id = <Your access_key here> + aws_secret_access_key = <Your secret acces key here> + ``` + From the shell: + ``` + $ export AWS_PROFILE=myaccount + ``` + --- +- Alternatively to using a profile you can export your AWS credentials as environment variables. + ``` + $ export AWS_ACCESS_KEY_ID=AKIXXXXXX + $ export AWS_SECRET_ACCESS_KEY=XXXXXX + ``` + +### Let's Provision! + +The newly added playbooks are the following: +- build_ami.yml - Builds a custom AMI. This currently requires the user to supply a valid AMI with access to repositories that contain openshift repositories. +- provision.yml - Create a vpc, elbs, security groups, launch config, asg's, etc. +- install.yml - Calls the openshift-ansible installer on the newly created instances +- provision_nodes.yml - Creates the infra and compute node scale groups +- accept.yml - This is a playbook to accept infra and compute nodes into the cluster +- provision_install.yml - This is a combination of all 3 of the above playbooks. (provision, install, and provision_nodes as well as accept.yml) + +The current expected work flow should be to provide an AMI with access to Openshift repositories. There should be a repository specified in the `openshift_additional_repos` parameter of the inventory file. The next expectation is a minimal set of values in the `provisioning_vars.yml` file to configure the desired settings for cluster instances. These settings are AWS specific and should be tailored to the consumer's AWS custom account settings. + +```yaml +--- +# when creating an AMI set this to True +# when installing a cluster set this to False +openshift_node_bootstrap: True + +# specify a clusterid +# openshift_aws_clusterid: default + +# specify a region +# openshift_aws_region: us-east-1 + +# must specify a base_ami when building an AMI +# openshift_aws_base_ami: # base image for AMI to build from +# specify when using a custom AMI +# openshift_aws_ami: + +# when creating an encrypted AMI please specify use_encryption +# openshift_aws_ami_encrypt: False + +# custom certificates are required for the ELB +# openshift_aws_iam_cert_path: '/path/to/cert/wildcard.<clusterid>.<domain>.com.crt' +# openshift_aws_iam_cert_key_path: '/path/to/key/wildcard.<clusterid>.<domain>.com.key' +# openshift_aws_iam_cert_chain_path: '/path/to/ca_cert_file/ca.crt' + +# This is required for any ec2 instances +# openshift_aws_ssh_key_name: myuser_key + +# This will ensure these users are created +#openshift_aws_users: +#- key_name: myuser_key +# username: myuser +# pub_key: | +# ssh-rsa AAAA +``` + +If customization is required for the instances, scale groups, or any other configurable option please see the ['openshift_aws/defaults/main.yml'](../../roles/openshift_aws/defaults/main.yml) for variables and overrides. These overrides can be placed in the `provisioning_vars.yml`, `inventory`, or `group_vars`. + +In order to create the bootstrap-able AMI we need to create an openshift-ansible inventory file. This file enables us to create the AMI using the openshift-ansible node roles. The exception here is that there will be no hosts specified by the inventory file. Here is an example: + +```ini +[OSEv3:children] +masters +nodes +etcd + +[OSEv3:children] +masters +nodes +etcd + +[OSEv3:vars] +################################################################################ +# Ensure these variables are set for bootstrap +################################################################################ +# openshift_deployment_type is required for installation +openshift_deployment_type=origin + +# required when building an AMI. This will +# be dependent on the version provided by the yum repository +openshift_pkg_version=-3.6.0 + +openshift_master_bootstrap_enabled=True + +openshift_hosted_router_wait=False +openshift_hosted_registry_wait=False + +# Repository for installation +openshift_additional_repos=[{'name': 'openshift-repo', 'id': 'openshift-repo', 'baseurl': 'https://mirror.openshift.com/enterprise/enterprise-3.6/latest/x86_64/os/', 'enabled': 'yes', 'gpgcheck': 0, 'sslverify': 'no', 'sslclientcert': '/var/lib/yum/client-cert.pem', 'sslclientkey': '/var/lib/yum/client-key.pem', 'gpgkey': 'https://mirror.ops.rhcloud.com/libra/keys/RPM-GPG-KEY-redhat-release https://mirror.ops.rhcloud.com/libra/keys/RPM-GPG-KEY-redhat-beta https://mirror.ops.rhcloud.com/libra/keys/RPM-GPG-KEY-redhat-openshifthosted'}] + +################################################################################ +# cluster specific settings maybe be placed here + +[masters] + +[etcd] + +[nodes] +``` + +There are more examples of cluster inventory settings [`here`](../../inventory/byo/). + +#### Step 1 + +Once the `inventory` and the `provisioning_vars.yml` file has been updated with the correct settings for the desired AWS account then we are ready to build an AMI. + +``` +$ ansible-playbook -i inventory.yml build_ami.yml -e @provisioning_vars.yml +``` + +1. This script will build a VPC. Default name will be clusterid if not specified. +2. Create an ssh key required for the instance. +3. Create a security group. +4. Create an instance using the key from step 2 or a specified key. +5. Run openshift-ansible setup roles to ensure packages and services are correctly configured. +6. Create the AMI. +7. If encryption is desired + - A KMS key is created with the name of $clusterid + - An encrypted AMI will be produced with $clusterid KMS key +8. Terminate the instance used to configure the AMI. + +More AMI specific options can be found in ['openshift_aws/defaults/main.yml'](../../roles/openshift_aws/defaults/main.yml). When creating an encrypted AMI please specify use_encryption: +``` +# openshift_aws_ami_encrypt: True # defaults to false +``` + +**Note**: This will ensure to take the recently created AMI and encrypt it to be used later. If encryption is not desired then set the value to false (defaults to false). The AMI id will be fetched and used according to its most recent creation date. + +#### Step 2 + +Now that we have created an AMI for our Openshift installation, there are two ways to use the AMI. + +1. In the default behavior, the AMI id will be found and used in the last created fashion. +2. The `openshift_aws_ami` option can be specified. This will allow the user to override the behavior of the role and use a custom AMI specified in the `openshift_aws_ami` variable. + +We are now ready to provision and install the cluster. This can be accomplished by calling all of the following steps at once or one-by-one. The all in one can be called like this: +``` +$ ansible-playbook -i inventory.yml provision_install.yml -e @provisioning_vars.yml +``` + +If this is the first time running through this process, please attempt the following steps one-by-one and ensure the setup works correctly. + +#### Step 3 + +We are ready to create the master instances. + +``` +$ ansible-playbook provision.yml -e @provisioning_vars.yml +``` + +This playbook runs through the following steps: +1. Ensures a VPC is created. +2. Ensures a SSH key exists. +3. Creates an s3 bucket for the registry named $clusterid-docker-registry +4. Create master security groups. +5. Create a master launch config. +6. Create the master auto scaling groups. +7. If certificates are desired for ELB, they will be uploaded. +8. Create internal and external master ELBs. +9. Add newly created masters to the correct groups. +10. Set a couple of important facts for the masters. + +At this point we have successfully created the infrastructure including the master nodes. + +#### Step 4 + +Now it is time to install Openshift using the openshift-ansible installer. This can be achieved by running the following playbook: + +``` +$ ansible-playbook -i inventory.yml install.yml @provisioning_vars.yml +``` +This playbook accomplishes the following: +1. Builds a dynamic inventory file by querying AWS. +2. Runs the [`byo`](../../common/openshift-cluster/config.yml) + +Once this playbook completes, the cluster masters should be installed and configured. + +#### Step 5 + +Now that we have a cluster deployed it will be more interesting to create some node types. This can be done easily with the following playbook: + +``` +$ ansible-playbook provision_nodes.yml -e @provisioning_vars.yml +``` + +Once this playbook completes, it should create the compute and infra node scale groups. These nodes will attempt to register themselves to the cluster. These requests must be approved by an administrator. + +#### Step 6 + +To facilitate the node registration process, nodes may be registered by running the following script `accept.yml`. This script can register in a few different ways. +- approve_all - **Note**: this option is for development and test environments. Security is bypassed +- nodes - A list of node names that will be accepted into the cluster + +```yaml + oc_adm_csr: + #approve_all: True + nodes: < list of nodes here > + timeout: 0 +``` + +Once the desired accept method is chosen, run the following playbook `accept.yml`: +1. Run the following playbook. +``` +$ ansible-playbook accept.yml -e @provisioning_vars.yml +``` + +Login to a master and run the following command: +``` +ssh root@<master ip address> +$ oc --config=/etc/origin/master/admin.kubeconfig get csr +node-bootstrapper-client-ip-172-31-49-148-ec2-internal 1h system:serviceaccount:openshift-infra:node-bootstrapper Approved,Issued +node-bootstrapper-server-ip-172-31-49-148-ec2-internal 1h system:node:ip-172-31-49-148.ec2.internal Approved,Issued +``` + +Verify the `CONDITION` is `Approved,Issued` on the `csr` objects. There are two for each node required. +1. `node-bootstrapper-client` is a request to access the api/controllers. +2. `node-bootstrapper-server` is a request to join the cluster. + +Once this is complete, verify the nodes have joined the cluster and are `ready`. + +``` +$ oc --config=/etc/origin/master/admin.kubeconfig get nodes +NAME STATUS AGE VERSION +ip-172-31-49-148.ec2.internal Ready 1h v1.6.1+5115d708d7 +``` + +### Ready To Work! + +At this point your cluster should be ready for workloads. Proceed to deploy applications on your cluster. + +### Still to come + +There are more enhancements that are arriving for provisioning. These will include more playbooks that enhance the provisioning capabilities. diff --git a/playbooks/aws/openshift-cluster/accept.yml b/playbooks/aws/openshift-cluster/accept.yml new file mode 100755 index 000000000..ffc367f9f --- /dev/null +++ b/playbooks/aws/openshift-cluster/accept.yml @@ -0,0 +1,53 @@ +#!/usr/bin/ansible-playbook +--- +- name: Setup the vpc and the master node group + hosts: localhost + remote_user: root + gather_facts: no + tasks: + - name: Alert user to variables needed - clusterid + debug: + msg: "openshift_aws_clusterid={{ openshift_aws_clusterid | default('default') }}" + + - name: Alert user to variables needed - region + debug: + msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}" + + - name: bring lib_openshift into scope + include_role: + name: lib_openshift + + - name: fetch masters + ec2_remote_facts: + region: "{{ openshift_aws_region | default('us-east-1') }}" + filters: + "tag:clusterid": "{{ openshift_aws_clusterid | default('default') }}" + "tag:host-type": master + instance-state-name: running + register: mastersout + retries: 20 + delay: 3 + until: "'instances' in mastersout and mastersout.instances|length > 0" + + - name: fetch new node instances + ec2_remote_facts: + region: "{{ openshift_aws_region | default('us-east-1') }}" + filters: + "tag:clusterid": "{{ openshift_aws_clusterid | default('default') }}" + "tag:host-type": node + instance-state-name: running + register: instancesout + retries: 20 + delay: 3 + until: "'instances' in instancesout and instancesout.instances|length > 0" + + - debug: + msg: "{{ instancesout.instances|map(attribute='private_dns_name') | list | regex_replace('.ec2.internal') }}" + + - name: approve nodes + oc_adm_csr: + #approve_all: True + nodes: "{{ instancesout.instances|map(attribute='private_dns_name') | list | regex_replace('.ec2.internal') }}" + timeout: 0 + register: nodeout + delegate_to: "{{ mastersout.instances[0].public_ip_address }}" diff --git a/playbooks/aws/openshift-cluster/add_nodes.yml b/playbooks/aws/openshift-cluster/add_nodes.yml deleted file mode 100644 index 0e8eb90c1..000000000 --- a/playbooks/aws/openshift-cluster/add_nodes.yml +++ /dev/null @@ -1,35 +0,0 @@ ---- -- name: Launch instance(s) - hosts: localhost - connection: local - become: no - gather_facts: no - vars_files: - - vars.yml - vars: - oo_extend_env: True - tasks: - - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml - vars: - type: "compute" - count: "{{ num_nodes }}" - - include: tasks/launch_instances.yml - vars: - instances: "{{ node_names }}" - cluster: "{{ cluster_id }}" - type: "{{ k8s_type }}" - g_sub_host_type: "{{ sub_host_type }}" - - - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml - vars: - type: "infra" - count: "{{ num_infra }}" - - include: tasks/launch_instances.yml - vars: - instances: "{{ node_names }}" - cluster: "{{ cluster_id }}" - type: "{{ k8s_type }}" - g_sub_host_type: "{{ sub_host_type }}" - -- include: scaleup.yml -- include: list.yml diff --git a/playbooks/aws/openshift-cluster/build_ami.yml b/playbooks/aws/openshift-cluster/build_ami.yml new file mode 100644 index 000000000..fc11205d8 --- /dev/null +++ b/playbooks/aws/openshift-cluster/build_ami.yml @@ -0,0 +1,78 @@ +--- +- hosts: localhost + connection: local + gather_facts: no + tasks: + - name: Require openshift_aws_base_ami + fail: + msg: "A base AMI is required for AMI building. Please ensure `openshift_aws_base_ami` is defined." + when: openshift_aws_base_ami is undefined + + - name: "Alert user to variables needed and their values - {{ item.name }}" + debug: + msg: "{{ item.msg }}" + with_items: + - name: openshift_aws_clusterid + msg: "openshift_aws_clusterid={{ openshift_aws_clusterid | default('default') }}" + - name: openshift_aws_region + msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}" + + - name: create an instance and prepare for ami + include_role: + name: openshift_aws + tasks_from: build_ami.yml + vars: + openshift_aws_node_group_type: compute + + - name: fetch newly created instances + ec2_remote_facts: + region: "{{ openshift_aws_region | default('us-east-1') }}" + filters: + "tag:Name": "{{ openshift_aws_base_ami_name | default('ami_base') }}" + instance-state-name: running + register: instancesout + retries: 20 + delay: 3 + until: instancesout.instances|length > 0 + + - name: wait for ssh to become available + wait_for: + port: 22 + host: "{{ instancesout.instances[0].public_ip_address }}" + timeout: 300 + search_regex: OpenSSH + + - name: add host to nodes + add_host: + groups: nodes + name: "{{ instancesout.instances[0].public_dns_name }}" + + - name: set the user to perform installation + set_fact: + ansible_ssh_user: root + +- name: normalize groups + include: ../../byo/openshift-cluster/initialize_groups.yml + +- name: run the std_include + include: ../../common/openshift-cluster/evaluate_groups.yml + +- name: run the std_include + include: ../../common/openshift-cluster/initialize_facts.yml + +- name: run the std_include + include: ../../common/openshift-cluster/initialize_openshift_repos.yml + +- name: install node config + include: ../../common/openshift-node/config.yml + +- hosts: localhost + connection: local + become: no + tasks: + - name: seal the ami + include_role: + name: openshift_aws + tasks_from: seal_ami.yml + vars: + openshift_aws_ami_name: "openshift-gi-{{ lookup('pipe', 'date +%Y%m%d%H%M')}}" diff --git a/playbooks/aws/openshift-cluster/cluster_hosts.yml b/playbooks/aws/openshift-cluster/cluster_hosts.yml deleted file mode 100644 index 119df9c7d..000000000 --- a/playbooks/aws/openshift-cluster/cluster_hosts.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- -g_all_hosts: "{{ groups['tag_clusterid_' ~ cluster_id] | default([]) - | intersect(groups['tag_environment_' ~ cluster_env] | default([])) }}" - -g_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_etcd'] | default([])) }}" - -g_lb_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_lb'] | default([])) }}" - -g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_nfs'] | default([])) }}" - -g_glusterfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-glusterfs'] | default([])) }}" - -g_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_master'] | default([])) }}" - -g_new_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_new_master'] | default([])) }}" - -g_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_node'] | default([])) }}" - -g_new_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_new_node'] | default([])) }}" - -g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type_infra'] | default([])) }}" - -g_compute_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type_compute'] | default([])) }}" diff --git a/playbooks/aws/openshift-cluster/config.yml b/playbooks/aws/openshift-cluster/config.yml deleted file mode 100644 index 8d64b0521..000000000 --- a/playbooks/aws/openshift-cluster/config.yml +++ /dev/null @@ -1,38 +0,0 @@ ---- -- hosts: localhost - gather_facts: no - tasks: - - include_vars: vars.yml - - include_vars: cluster_hosts.yml - - add_host: - name: "{{ item }}" - groups: l_oo_all_hosts - with_items: "{{ g_all_hosts | default([]) }}" - -- hosts: l_oo_all_hosts - gather_facts: no - tasks: - - include_vars: vars.yml - - include_vars: cluster_hosts.yml - -- include: ../../common/openshift-cluster/config.yml - vars: - g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - g_sudo: "{{ deployment_vars[deployment_type].become }}" - g_nodeonmaster: true - openshift_cluster_id: "{{ cluster_id }}" - openshift_debug_level: "{{ debug_level }}" - openshift_deployment_type: "{{ deployment_type }}" - openshift_public_hostname: "{{ ec2_ip_address }}" - openshift_hosted_registry_selector: 'type=infra' - openshift_hosted_router_selector: 'type=infra' - openshift_node_labels: - region: "{{ deployment_vars[deployment_type].region }}" - type: "{{ hostvars[inventory_hostname]['ec2_tag_sub-host-type'] }}" - openshift_master_cluster_method: 'native' - openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}" - os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}" - openshift_use_flannel: "{{ lookup('oo_option', 'use_flannel') }}" - openshift_use_calico: "{{ lookup('oo_option', 'use_calico') }}" - openshift_use_fluentd: "{{ lookup('oo_option', 'use_fluentd') }}" - openshift_use_dnsmasq: false diff --git a/playbooks/aws/openshift-cluster/install.yml b/playbooks/aws/openshift-cluster/install.yml new file mode 100644 index 000000000..86d58a68e --- /dev/null +++ b/playbooks/aws/openshift-cluster/install.yml @@ -0,0 +1,74 @@ +--- +- name: Setup the vpc and the master node group + hosts: localhost + tasks: + - name: Alert user to variables needed - clusterid + debug: + msg: "openshift_aws_clusterid={{ openshift_aws_clusterid | default('default') }}" + + - name: Alert user to variables needed - region + debug: + msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}" + + - name: fetch newly created instances + ec2_remote_facts: + region: "{{ openshift_aws_region | default('us-east-1') }}" + filters: + "tag:clusterid": "{{ openshift_aws_clusterid | default('default') }}" + "tag:host-type": master + instance-state-name: running + register: instancesout + retries: 20 + delay: 3 + until: instancesout.instances|length > 0 + + - name: add new master to masters group + add_host: + groups: "masters,etcd,nodes" + name: "{{ item.public_ip_address }}" + hostname: "{{ openshift_aws_clusterid | default('default') }}-master-{{ item.id[:-5] }}" + with_items: "{{ instancesout.instances }}" + + - name: wait for ssh to become available + wait_for: + port: 22 + host: "{{ item.public_ip_address }}" + timeout: 300 + search_regex: OpenSSH + with_items: "{{ instancesout.instances }}" + +- name: set the master facts for hostname to elb + hosts: masters + gather_facts: no + remote_user: root + tasks: + - name: fetch elbs + ec2_elb_facts: + region: "{{ openshift_aws_region | default('us-east-1') }}" + names: + - "{{ item }}" + with_items: + - "{{ openshift_aws_clusterid | default('default') }}-master-external" + - "{{ openshift_aws_clusterid | default('default') }}-master-internal" + delegate_to: localhost + register: elbs + + - debug: var=elbs + + - name: set fact + set_fact: + openshift_master_cluster_hostname: "{{ elbs.results[1].elbs[0].dns_name }}" + osm_custom_cors_origins: + - "{{ elbs.results[1].elbs[0].dns_name }}" + - "console.{{ openshift_aws_clusterid | default('default') }}.openshift.com" + - "api.{{ openshift_aws_clusterid | default('default') }}.openshift.com" + with_items: "{{ groups['masters'] }}" + +- name: normalize groups + include: ../../byo/openshift-cluster/initialize_groups.yml + +- name: run the std_include + include: ../../common/openshift-cluster/std_include.yml + +- name: run the config + include: ../../common/openshift-cluster/config.yml diff --git a/playbooks/aws/openshift-cluster/launch.yml b/playbooks/aws/openshift-cluster/launch.yml deleted file mode 100644 index 3edace493..000000000 --- a/playbooks/aws/openshift-cluster/launch.yml +++ /dev/null @@ -1,54 +0,0 @@ ---- -- name: Launch instance(s) - hosts: localhost - connection: local - become: no - gather_facts: no - vars_files: - - vars.yml - tasks: - - include: ../../common/openshift-cluster/tasks/set_etcd_launch_facts.yml - - include: tasks/launch_instances.yml - vars: - instances: "{{ etcd_names }}" - cluster: "{{ cluster_id }}" - type: "{{ k8s_type }}" - g_sub_host_type: "default" - - - include: ../../common/openshift-cluster/tasks/set_master_launch_facts.yml - - include: tasks/launch_instances.yml - vars: - instances: "{{ master_names }}" - cluster: "{{ cluster_id }}" - type: "{{ k8s_type }}" - g_sub_host_type: "default" - - - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml - vars: - type: "compute" - count: "{{ num_nodes }}" - - include: tasks/launch_instances.yml - vars: - instances: "{{ node_names }}" - cluster: "{{ cluster_id }}" - type: "{{ k8s_type }}" - g_sub_host_type: "{{ sub_host_type }}" - - - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml - vars: - type: "infra" - count: "{{ num_infra }}" - - include: tasks/launch_instances.yml - vars: - instances: "{{ node_names }}" - cluster: "{{ cluster_id }}" - type: "{{ k8s_type }}" - g_sub_host_type: "{{ sub_host_type }}" - - - add_host: - name: "{{ master_names.0 }}" - groups: service_master - when: master_names is defined and master_names.0 is defined - -- include: update.yml -- include: list.yml diff --git a/playbooks/aws/openshift-cluster/library/ec2_ami_find.py b/playbooks/aws/openshift-cluster/library/ec2_ami_find.py deleted file mode 100644 index 99d0f44f0..000000000 --- a/playbooks/aws/openshift-cluster/library/ec2_ami_find.py +++ /dev/null @@ -1,303 +0,0 @@ -#!/usr/bin/python -#pylint: skip-file -# flake8: noqa -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see <http://www.gnu.org/licenses/>. - -DOCUMENTATION = ''' ---- -module: ec2_ami_find -version_added: 2.0 -short_description: Searches for AMIs to obtain the AMI ID and other information -description: - - Returns list of matching AMIs with AMI ID, along with other useful information - - Can search AMIs with different owners - - Can search by matching tag(s), by AMI name and/or other criteria - - Results can be sorted and sliced -author: Tom Bamford -notes: - - This module is not backwards compatible with the previous version of the ec2_search_ami module which worked only for Ubuntu AMIs listed on cloud-images.ubuntu.com. - - See the example below for a suggestion of how to search by distro/release. -options: - region: - description: - - The AWS region to use. - required: true - aliases: [ 'aws_region', 'ec2_region' ] - owner: - description: - - Search AMIs owned by the specified owner - - Can specify an AWS account ID, or one of the special IDs 'self', 'amazon' or 'aws-marketplace' - - If not specified, all EC2 AMIs in the specified region will be searched. - - You can include wildcards in many of the search options. An asterisk (*) matches zero or more characters, and a question mark (?) matches exactly one character. You can escape special characters using a backslash (\) before the character. For example, a value of \*amazon\?\\ searches for the literal string *amazon?\. - required: false - default: null - ami_id: - description: - - An AMI ID to match. - default: null - required: false - ami_tags: - description: - - A hash/dictionary of tags to match for the AMI. - default: null - required: false - architecture: - description: - - An architecture type to match (e.g. x86_64). - default: null - required: false - hypervisor: - description: - - A hypervisor type type to match (e.g. xen). - default: null - required: false - is_public: - description: - - Whether or not the image(s) are public. - choices: ['yes', 'no'] - default: null - required: false - name: - description: - - An AMI name to match. - default: null - required: false - platform: - description: - - Platform type to match. - default: null - required: false - sort: - description: - - Optional attribute which with to sort the results. - - If specifying 'tag', the 'tag_name' parameter is required. - choices: ['name', 'description', 'tag'] - default: null - required: false - sort_tag: - description: - - Tag name with which to sort results. - - Required when specifying 'sort=tag'. - default: null - required: false - sort_order: - description: - - Order in which to sort results. - - Only used when the 'sort' parameter is specified. - choices: ['ascending', 'descending'] - default: 'ascending' - required: false - sort_start: - description: - - Which result to start with (when sorting). - - Corresponds to Python slice notation. - default: null - required: false - sort_end: - description: - - Which result to end with (when sorting). - - Corresponds to Python slice notation. - default: null - required: false - state: - description: - - AMI state to match. - default: 'available' - required: false - virtualization_type: - description: - - Virtualization type to match (e.g. hvm). - default: null - required: false - no_result_action: - description: - - What to do when no results are found. - - "'success' reports success and returns an empty array" - - "'fail' causes the module to report failure" - choices: ['success', 'fail'] - default: 'success' - required: false -requirements: - - boto - -''' - -EXAMPLES = ''' -# Note: These examples do not set authentication details, see the AWS Guide for details. - -# Search for the AMI tagged "project:website" -- ec2_ami_find: - owner: self - tags: - project: website - no_result_action: fail - register: ami_find - -# Search for the latest Ubuntu 14.04 AMI -- ec2_ami_find: - name: "ubuntu/images/ebs/ubuntu-trusty-14.04-amd64-server-*" - owner: 099720109477 - sort: name - sort_order: descending - sort_end: 1 - register: ami_find - -# Launch an EC2 instance -- ec2: - image: "{{ ami_search.results[0].ami_id }}" - instance_type: m4.medium - key_name: mykey - wait: yes -''' - -try: - import boto.ec2 - HAS_BOTO=True -except ImportError: - HAS_BOTO=False - -import json - -def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( - region = dict(required=True, - aliases = ['aws_region', 'ec2_region']), - owner = dict(required=False, default=None), - ami_id = dict(required=False), - ami_tags = dict(required=False, type='dict', - aliases = ['search_tags', 'image_tags']), - architecture = dict(required=False), - hypervisor = dict(required=False), - is_public = dict(required=False), - name = dict(required=False), - platform = dict(required=False), - sort = dict(required=False, default=None, - choices=['name', 'description', 'tag']), - sort_tag = dict(required=False), - sort_order = dict(required=False, default='ascending', - choices=['ascending', 'descending']), - sort_start = dict(required=False), - sort_end = dict(required=False), - state = dict(required=False, default='available'), - virtualization_type = dict(required=False), - no_result_action = dict(required=False, default='success', - choices = ['success', 'fail']), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - ) - - if not HAS_BOTO: - module.fail_json(msg='boto required for this module, install via pip or your package manager') - - ami_id = module.params.get('ami_id') - ami_tags = module.params.get('ami_tags') - architecture = module.params.get('architecture') - hypervisor = module.params.get('hypervisor') - is_public = module.params.get('is_public') - name = module.params.get('name') - owner = module.params.get('owner') - platform = module.params.get('platform') - sort = module.params.get('sort') - sort_tag = module.params.get('sort_tag') - sort_order = module.params.get('sort_order') - sort_start = module.params.get('sort_start') - sort_end = module.params.get('sort_end') - state = module.params.get('state') - virtualization_type = module.params.get('virtualization_type') - no_result_action = module.params.get('no_result_action') - - filter = {'state': state} - - if ami_id: - filter['image_id'] = ami_id - if ami_tags: - for tag in ami_tags: - filter['tag:'+tag] = ami_tags[tag] - if architecture: - filter['architecture'] = architecture - if hypervisor: - filter['hypervisor'] = hypervisor - if is_public: - filter['is_public'] = is_public - if name: - filter['name'] = name - if platform: - filter['platform'] = platform - if virtualization_type: - filter['virtualization_type'] = virtualization_type - - ec2 = ec2_connect(module) - - images_result = ec2.get_all_images(owners=owner, filters=filter) - - if no_result_action == 'fail' and len(images_result) == 0: - module.fail_json(msg="No AMIs matched the attributes: %s" % json.dumps(filter)) - - results = [] - for image in images_result: - data = { - 'ami_id': image.id, - 'architecture': image.architecture, - 'description': image.description, - 'is_public': image.is_public, - 'name': image.name, - 'owner_id': image.owner_id, - 'platform': image.platform, - 'root_device_name': image.root_device_name, - 'root_device_type': image.root_device_type, - 'state': image.state, - 'tags': image.tags, - 'virtualization_type': image.virtualization_type, - } - - if image.kernel_id: - data['kernel_id'] = image.kernel_id - if image.ramdisk_id: - data['ramdisk_id'] = image.ramdisk_id - - results.append(data) - - if sort == 'tag': - if not sort_tag: - module.fail_json(msg="'sort_tag' option must be given with 'sort=tag'") - results.sort(key=lambda e: e['tags'][sort_tag], reverse=(sort_order=='descending')) - elif sort: - results.sort(key=lambda e: e[sort], reverse=(sort_order=='descending')) - - try: - if sort and sort_start and sort_end: - results = results[int(sort_start):int(sort_end)] - elif sort and sort_start: - results = results[int(sort_start):] - elif sort and sort_end: - results = results[:int(sort_end)] - except TypeError: - module.fail_json(msg="Please supply numeric values for sort_start and/or sort_end") - - module.exit_json(results=results) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * - -if __name__ == '__main__': - main() - diff --git a/playbooks/aws/openshift-cluster/list.yml b/playbooks/aws/openshift-cluster/list.yml deleted file mode 100644 index ed8aac398..000000000 --- a/playbooks/aws/openshift-cluster/list.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- -- name: Generate oo_list_hosts group - hosts: localhost - gather_facts: no - connection: local - become: no - vars_files: - - vars.yml - tasks: - - set_fact: scratch_group=tag_clusterid_{{ cluster_id }} - when: cluster_id != '' - - set_fact: scratch_group=all - when: cluster_id == '' - - add_host: - name: "{{ item }}" - groups: oo_list_hosts - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_become: "{{ deployment_vars[deployment_type].become }}" - oo_public_ipv4: "{{ hostvars[item].ec2_ip_address }}" - oo_private_ipv4: "{{ hostvars[item].ec2_private_ip_address }}" - with_items: "{{ groups[scratch_group] | default([]) | difference(['localhost']) }}" - - debug: - msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster }}" diff --git a/playbooks/aws/openshift-cluster/provision.yml b/playbooks/aws/openshift-cluster/provision.yml new file mode 100644 index 000000000..db7afac6f --- /dev/null +++ b/playbooks/aws/openshift-cluster/provision.yml @@ -0,0 +1,17 @@ +--- +- name: Setup the vpc and the master node group + hosts: localhost + tasks: + + - name: Alert user to variables needed - clusterid + debug: + msg: "openshift_aws_clusterid={{ openshift_aws_clusterid | default('default') }}" + + - name: Alert user to variables needed - region + debug: + msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}" + + - name: create default vpc + include_role: + name: openshift_aws + tasks_from: provision.yml diff --git a/playbooks/aws/openshift-cluster/provision_install.yml b/playbooks/aws/openshift-cluster/provision_install.yml new file mode 100644 index 000000000..e787deced --- /dev/null +++ b/playbooks/aws/openshift-cluster/provision_install.yml @@ -0,0 +1,16 @@ +--- +# Once an AMI is built then this script is used for +# the one stop shop to provision and install a cluster +# this playbook is run with the following parameters: +# ansible-playbook -i openshift-ansible-inventory provision_install.yml +- name: Include the provision.yml playbook to create cluster + include: provision.yml + +- name: Include the install.yml playbook to install cluster + include: install.yml + +- name: Include the install.yml playbook to install cluster + include: provision_nodes.yml + +- name: Include the accept.yml playbook to accept nodes into the cluster + include: accept.yml diff --git a/playbooks/aws/openshift-cluster/provision_nodes.yml b/playbooks/aws/openshift-cluster/provision_nodes.yml new file mode 100644 index 000000000..44c686e08 --- /dev/null +++ b/playbooks/aws/openshift-cluster/provision_nodes.yml @@ -0,0 +1,18 @@ +--- +- name: create the node scale groups + hosts: localhost + connection: local + gather_facts: yes + tasks: + - name: Alert user to variables needed - clusterid + debug: + msg: "openshift_aws_clusterid={{ openshift_aws_clusterid | default('default') }}" + + - name: Alert user to variables needed - region + debug: + msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}" + + - name: create the node groups + include_role: + name: openshift_aws + tasks_from: provision_nodes.yml diff --git a/playbooks/aws/openshift-cluster/provisioning_vars.example.yml b/playbooks/aws/openshift-cluster/provisioning_vars.example.yml new file mode 100644 index 000000000..28eb9c993 --- /dev/null +++ b/playbooks/aws/openshift-cluster/provisioning_vars.example.yml @@ -0,0 +1,28 @@ +--- +# when creating an AMI set this option to True +# when installing the cluster, set this to False +openshift_node_bootstrap: True + +# specify a clusterid +#openshift_aws_clusterid: default + +# must specify a base_ami when building an AMI +#openshift_aws_base_ami: + +# when creating an encrypted AMI please specify use_encryption +#openshift_aws_ami_encrypt: False + +# custom certificates are required for the ELB +#openshift_aws_iam_cert_path: '/path/to/wildcard.<clusterid>.example.com.crt' +#openshift_aws_iam_key_path: '/path/to/wildcard.<clusterid>.example.com.key' +#openshift_aws_iam_cert_chain_path: '/path/to/cert.ca.crt' + +# This is required for any ec2 instances +#openshift_aws_ssh_key_name: myuser_key + +# This will ensure these users are created +#openshift_aws_users: +#- key_name: myuser_key +# username: myuser +# pub_key: | +# ssh-rsa AAAA diff --git a/playbooks/aws/openshift-cluster/scaleup.yml b/playbooks/aws/openshift-cluster/scaleup.yml deleted file mode 100644 index 6fa9142a0..000000000 --- a/playbooks/aws/openshift-cluster/scaleup.yml +++ /dev/null @@ -1,32 +0,0 @@ ---- - -- hosts: localhost - gather_facts: no - connection: local - become: no - vars_files: - - vars.yml - tasks: - - name: Evaluate oo_hosts_to_update - add_host: - name: "{{ item }}" - groups: oo_hosts_to_update - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_become: "{{ deployment_vars[deployment_type].become }}" - with_items: "{{ groups.nodes_to_add }}" - -- include: ../../common/openshift-cluster/update_repos_and_packages.yml - -- include: ../../common/openshift-cluster/scaleup.yml - vars_files: - - ../../aws/openshift-cluster/vars.yml - - ../../aws/openshift-cluster/cluster_hosts.yml - vars: - g_new_node_hosts: "{{ groups.nodes_to_add }}" - g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - g_sudo: "{{ deployment_vars[deployment_type].become }}" - g_nodeonmaster: true - openshift_cluster_id: "{{ cluster_id }}" - openshift_debug_level: "{{ debug_level }}" - openshift_deployment_type: "{{ deployment_type }}" - openshift_public_hostname: "{{ ec2_ip_address }}" diff --git a/playbooks/aws/openshift-cluster/service.yml b/playbooks/aws/openshift-cluster/service.yml deleted file mode 100644 index f7f4812bb..000000000 --- a/playbooks/aws/openshift-cluster/service.yml +++ /dev/null @@ -1,31 +0,0 @@ ---- -- name: Call same systemctl command for openshift on all instance(s) - hosts: localhost - connection: local - become: no - gather_facts: no - vars_files: - - vars.yml - - cluster_hosts.yml - tasks: - - fail: msg="cluster_id is required to be injected in this playbook" - when: cluster_id is not defined - - - name: Evaluate g_service_masters - add_host: - name: "{{ item }}" - groups: g_service_masters - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_become: "{{ deployment_vars[deployment_type].become }}" - with_items: "{{ master_hosts | default([]) }}" - - - name: Evaluate g_service_nodes - add_host: - name: "{{ item }}" - groups: g_service_nodes - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_become: "{{ deployment_vars[deployment_type].become }}" - with_items: "{{ node_hosts | default([]) }}" - -- include: ../../common/openshift-node/service.yml -- include: ../../common/openshift-master/service.yml diff --git a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml deleted file mode 100644 index 608512b79..000000000 --- a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml +++ /dev/null @@ -1,188 +0,0 @@ ---- -- set_fact: - created_by: "{{ lookup('env', 'LOGNAME')|default(cluster, true) }}" - docker_vol_ephemeral: "{{ lookup('env', 'os_docker_vol_ephemeral') | default(false, true) }}" - cluster: "{{ cluster_id }}" - env: "{{ cluster_env }}" - host_type: "{{ type }}" - sub_host_type: "{{ g_sub_host_type }}" - -- set_fact: - ec2_instance_type: "{{ lookup('env', 'ec2_master_instance_type') | default(deployment_vars[deployment_type].type, true) }}" - ec2_security_groups: "{{ lookup('env', 'ec2_master_security_groups') | default(deployment_vars[deployment_type].security_groups, true) }}" - when: host_type == "master" and sub_host_type == "default" - -- set_fact: - ec2_instance_type: "{{ lookup('env', 'ec2_etcd_instance_type') | default(deployment_vars[deployment_type].type, true) }}" - ec2_security_groups: "{{ lookup('env', 'ec2_etcd_security_groups') | default(deployment_vars[deployment_type].security_groups, true) }}" - when: host_type == "etcd" and sub_host_type == "default" - -- set_fact: - ec2_instance_type: "{{ lookup('env', 'ec2_infra_instance_type') | default(deployment_vars[deployment_type].type, true) }}" - ec2_security_groups: "{{ lookup('env', 'ec2_infra_security_groups') | default(deployment_vars[deployment_type].security_groups, true) }}" - when: host_type == "node" and sub_host_type == "infra" - -- set_fact: - ec2_instance_type: "{{ lookup('env', 'ec2_node_instance_type') | default(deployment_vars[deployment_type].type, true) }}" - ec2_security_groups: "{{ lookup('env', 'ec2_node_security_groups') | default(deployment_vars[deployment_type].security_groups, true) }}" - when: host_type == "node" and sub_host_type == "compute" - -- set_fact: - ec2_instance_type: "{{ deployment_vars[deployment_type].type }}" - when: ec2_instance_type is not defined -- set_fact: - ec2_security_groups: "{{ deployment_vars[deployment_type].security_groups }}" - when: ec2_security_groups is not defined - -- name: Find amis for deployment_type - ec2_ami_find: - region: "{{ deployment_vars[deployment_type].region }}" - ami_id: "{{ deployment_vars[deployment_type].image }}" - name: "{{ deployment_vars[deployment_type].image_name }}" - register: ami_result - -- fail: msg="Could not find requested ami" - when: not ami_result.results - -- set_fact: - latest_ami: "{{ ami_result.results | oo_ami_selector(deployment_vars[deployment_type].image_name) }}" - volume_defs: - etcd: - root: - volume_size: "{{ lookup('env', 'os_etcd_root_vol_size') | default(25, true) }}" - device_type: "{{ lookup('env', 'os_etcd_root_vol_type') | default('gp2', true) }}" - iops: "{{ lookup('env', 'os_etcd_root_vol_iops') | default(500, true) }}" - master: - root: - volume_size: "{{ lookup('env', 'os_master_root_vol_size') | default(25, true) }}" - device_type: "{{ lookup('env', 'os_master_root_vol_type') | default('gp2', true) }}" - iops: "{{ lookup('env', 'os_master_root_vol_iops') | default(500, true) }}" - docker: - volume_size: "{{ lookup('env', 'os_docker_vol_size') | default(10, true) }}" - device_type: "{{ lookup('env', 'os_docker_vol_type') | default('gp2', true) }}" - iops: "{{ lookup('env', 'os_docker_vol_iops') | default(500, true) }}" - node: - root: - volume_size: "{{ lookup('env', 'os_node_root_vol_size') | default(85, true) }}" - device_type: "{{ lookup('env', 'os_node_root_vol_type') | default('gp2', true) }}" - iops: "{{ lookup('env', 'os_node_root_vol_iops') | default(500, true) }}" - docker: - volume_size: "{{ lookup('env', 'os_docker_vol_size') | default(32, true) }}" - device_type: "{{ lookup('env', 'os_docker_vol_type') | default('gp2', true) }}" - iops: "{{ lookup('env', 'os_docker_vol_iops') | default(500, true) }}" - -- set_fact: - volumes: "{{ volume_defs | oo_ec2_volume_definition(host_type, docker_vol_ephemeral | bool) }}" - -- name: Launch instance(s) - ec2: - state: present - region: "{{ deployment_vars[deployment_type].region }}" - keypair: "{{ deployment_vars[deployment_type].keypair }}" - group: "{{ deployment_vars[deployment_type].security_groups }}" - instance_type: "{{ ec2_instance_type }}" - image: "{{ deployment_vars[deployment_type].image }}" - count: "{{ instances | length }}" - vpc_subnet_id: "{{ deployment_vars[deployment_type].vpc_subnet }}" - assign_public_ip: "{{ deployment_vars[deployment_type].assign_public_ip }}" - user_data: "{{ lookup('template', '../templates/user_data.j2') }}" - wait: yes - instance_tags: - created-by: "{{ created_by }}" - clusterid: "{{ cluster }}" - environment: "{{ cluster_env }}" - host-type: "{{ host_type }}" - sub-host-type: "{{ sub_host_type }}" - volumes: "{{ volumes }}" - register: ec2 - -- name: Add Name tag to instances - ec2_tag: resource={{ item.1.id }} region={{ deployment_vars[deployment_type].region }} state=present - with_together: - - "{{ instances }}" - - "{{ ec2.instances }}" - args: - tags: - Name: "{{ item.0 }}" - -- set_fact: - instance_groups: > - tag_created-by_{{ created_by }}, tag_clusterid_{{ cluster }}, - tag_environment_{{ cluster_env }}, tag_host-type_{{ host_type }}, - tag_sub-host-type_{{ sub_host_type }} - -- set_fact: - node_label: - region: "{{ deployment_vars[deployment_type].region }}" - type: "{{sub_host_type}}" - when: host_type == "node" - -- set_fact: - node_label: - region: "{{ deployment_vars[deployment_type].region }}" - type: "{{host_type}}" - when: host_type != "node" - -- set_fact: - logrotate: - - name: syslog - path: | - /var/log/cron - /var/log/maillog - /var/log/messages - /var/log/secure - /var/log/spooler" - options: - - daily - - rotate 7 - - compress - - sharedscripts - - missingok - scripts: - postrotate: "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true" - -- name: Add new instances groups and variables - add_host: - hostname: "{{ item.0 }}" - ansible_ssh_host: "{{ item.1.dns_name }}" - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_become: "{{ deployment_vars[deployment_type].become }}" - groups: "{{ instance_groups }}" - ec2_private_ip_address: "{{ item.1.private_ip }}" - ec2_ip_address: "{{ item.1.public_ip }}" - ec2_tag_sub-host-type: "{{ sub_host_type }}" - openshift_node_labels: "{{ node_label }}" - logrotate_scripts: "{{ logrotate }}" - with_together: - - "{{ instances }}" - - "{{ ec2.instances }}" - -- name: Add new instances to nodes_to_add group if needed - add_host: - hostname: "{{ item.0 }}" - ansible_ssh_host: "{{ item.1.dns_name }}" - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_become: "{{ deployment_vars[deployment_type].become }}" - groups: nodes_to_add - ec2_private_ip_address: "{{ item.1.private_ip }}" - ec2_ip_address: "{{ item.1.public_ip }}" - openshift_node_labels: "{{ node_label }}" - logrotate_scripts: "{{ logrotate }}" - with_together: - - "{{ instances }}" - - "{{ ec2.instances }}" - when: oo_extend_env is defined and oo_extend_env | bool - -- name: Wait for ssh - wait_for: "port=22 host={{ item.dns_name }}" - with_items: "{{ ec2.instances }}" - -- name: Wait for user setup - command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ hostvars[item.0].ansible_ssh_user }}@{{ item.1.dns_name }} echo {{ hostvars[item.0].ansible_ssh_user }} user is setup" - register: result - until: result.rc == 0 - retries: 20 - delay: 10 - with_together: - - "{{ instances }}" - - "{{ ec2.instances }}" diff --git a/playbooks/aws/openshift-cluster/templates/user_data.j2 b/playbooks/aws/openshift-cluster/templates/user_data.j2 deleted file mode 100644 index b1087f9c4..000000000 --- a/playbooks/aws/openshift-cluster/templates/user_data.j2 +++ /dev/null @@ -1,22 +0,0 @@ -#cloud-config -{% if type in ['node', 'master'] and 'docker' in volume_defs[type] %} -mounts: -- [ xvdb ] -- [ ephemeral0 ] -{% endif %} - -write_files: -{% if type in ['node', 'master'] and 'docker' in volume_defs[type] %} -- content: | - DEVS=/dev/xvdb - VG=docker_vg - path: /etc/sysconfig/docker-storage-setup - owner: root:root - permissions: '0644' -{% endif %} -{% if deployment_vars[deployment_type].become | bool %} -- path: /etc/sudoers.d/99-{{ deployment_vars[deployment_type].ssh_user }}-cloud-init-requiretty - permissions: 440 - content: | - Defaults:{{ deployment_vars[deployment_type].ssh_user }} !requiretty -{% endif %} diff --git a/playbooks/aws/openshift-cluster/terminate.yml b/playbooks/aws/openshift-cluster/terminate.yml deleted file mode 100644 index 1f15aa4bf..000000000 --- a/playbooks/aws/openshift-cluster/terminate.yml +++ /dev/null @@ -1,77 +0,0 @@ ---- -- name: Terminate instance(s) - hosts: localhost - connection: local - become: no - gather_facts: no - vars_files: - - vars.yml - tasks: - - add_host: - name: "{{ item }}" - groups: oo_hosts_to_terminate - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_become: "{{ deployment_vars[deployment_type].become }}" - with_items: "{{ (groups['tag_clusterid_' ~ cluster_id] | default([])) | difference(['localhost']) }}" - -- name: Unsubscribe VMs - hosts: oo_hosts_to_terminate - roles: - - role: rhel_unsubscribe - when: deployment_type in ['atomic-enterprise', 'enterprise', 'openshift-enterprise'] and - ansible_distribution == "RedHat" and - lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) | - default('no', True) | lower in ['no', 'false'] - -- name: Terminate instances - hosts: localhost - connection: local - become: no - gather_facts: no - tasks: - - name: Remove tags from instances - ec2_tag: - resource: "{{ hostvars[item]['ec2_id'] }}" - region: "{{ hostvars[item]['ec2_region'] }}" - state: absent - tags: - environment: "{{ hostvars[item]['ec2_tag_environment'] }}" - clusterid: "{{ hostvars[item]['ec2_tag_clusterid'] }}" - host-type: "{{ hostvars[item]['ec2_tag_host-type'] }}" - sub_host_type: "{{ hostvars[item]['ec2_tag_sub-host-type'] }}" - with_items: "{{ groups.oo_hosts_to_terminate }}" - when: "'oo_hosts_to_terminate' in groups" - - - name: Terminate instances - ec2: - state: absent - instance_ids: ["{{ hostvars[item].ec2_id }}"] - region: "{{ hostvars[item].ec2_region }}" - ignore_errors: yes - register: ec2_term - with_items: "{{ groups.oo_hosts_to_terminate }}" - when: "'oo_hosts_to_terminate' in groups" - - # Fail if any of the instances failed to terminate with an error other - # than 403 Forbidden - - fail: - msg: "Terminating instance {{ item.ec2_id }} failed with message {{ item.msg }}" - when: "'oo_hosts_to_terminate' in groups and item.has_key('failed') and item.failed" - with_items: "{{ ec2_term.results }}" - - - name: Stop instance if termination failed - ec2: - state: stopped - instance_ids: ["{{ item.item.ec2_id }}"] - region: "{{ item.item.ec2_region }}" - register: ec2_stop - when: "'oo_hosts_to_terminate' in groups and item.has_key('failed') and item.failed" - with_items: "{{ ec2_term.results }}" - - - name: Rename stopped instances - ec2_tag: resource={{ item.item.item.ec2_id }} region={{ item.item.item.ec2_region }} state=present - args: - tags: - Name: "{{ item.item.item.ec2_tag_Name }}-terminate" - with_items: "{{ ec2_stop.results }}" - when: ec2_stop | changed diff --git a/playbooks/aws/openshift-cluster/update.yml b/playbooks/aws/openshift-cluster/update.yml deleted file mode 100644 index ed05d61ed..000000000 --- a/playbooks/aws/openshift-cluster/update.yml +++ /dev/null @@ -1,34 +0,0 @@ ---- -- hosts: localhost - gather_facts: no - tasks: - - include_vars: vars.yml - - include_vars: cluster_hosts.yml - - add_host: - name: "{{ item }}" - groups: l_oo_all_hosts - with_items: "{{ g_all_hosts }}" - -- hosts: l_oo_all_hosts - gather_facts: no - tasks: - - include_vars: vars.yml - - include_vars: cluster_hosts.yml - -- name: Update - Populate oo_hosts_to_update group - hosts: localhost - connection: local - become: no - gather_facts: no - tasks: - - name: Update - Evaluate oo_hosts_to_update - add_host: - name: "{{ item }}" - groups: oo_hosts_to_update - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_become: "{{ deployment_vars[deployment_type].become }}" - with_items: "{{ g_all_hosts | default([]) }}" - -- include: ../../common/openshift-cluster/update_repos_and_packages.yml - -- include: config.yml diff --git a/playbooks/aws/openshift-cluster/vars.yml b/playbooks/aws/openshift-cluster/vars.yml deleted file mode 100644 index d774187f0..000000000 --- a/playbooks/aws/openshift-cluster/vars.yml +++ /dev/null @@ -1,33 +0,0 @@ ---- -debug_level: 2 - -deployment_rhel7_ent_base: - # rhel-7.1, requires cloud access subscription - image: "{{ lookup('oo_option', 'ec2_image') | default('ami-10251c7a', True) }}" - image_name: "{{ lookup('oo_option', 'ec2_image_name') | default(None, True) }}" - region: "{{ lookup('oo_option', 'ec2_region') | default('us-east-1', True) }}" - ssh_user: ec2-user - become: yes - keypair: "{{ lookup('oo_option', 'ec2_keypair') | default('libra', True) }}" - type: "{{ lookup('oo_option', 'ec2_instance_type') | default('m4.large', True) }}" - security_groups: "{{ lookup('oo_option', 'ec2_security_groups') | default([ 'public' ], True) }}" - vpc_subnet: "{{ lookup('oo_option', 'ec2_vpc_subnet') | default(omit, True) }}" - assign_public_ip: "{{ lookup('oo_option', 'ec2_assign_public_ip') | default(omit, True) }}" - -deployment_vars: - origin: - # centos-7, requires marketplace - image: "{{ lookup('oo_option', 'ec2_image') | default('ami-6d1c2007', True) }}" - image_name: "{{ lookup('oo_option', 'ec2_image_name') | default(None, True) }}" - region: "{{ lookup('oo_option', 'ec2_region') | default('us-east-1', True) }}" - ssh_user: centos - become: yes - keypair: "{{ lookup('oo_option', 'ec2_keypair') | default('libra', True) }}" - type: "{{ lookup('oo_option', 'ec2_instance_type') | default('m4.large', True) }}" - security_groups: "{{ lookup('oo_option', 'ec2_security_groups') | default([ 'public' ], True) }}" - vpc_subnet: "{{ lookup('oo_option', 'ec2_vpc_subnet') | default(omit, True) }}" - assign_public_ip: "{{ lookup('oo_option', 'ec2_assign_public_ip') | default(omit, True) }}" - - enterprise: "{{ deployment_rhel7_ent_base }}" - openshift-enterprise: "{{ deployment_rhel7_ent_base }}" - atomic-enterprise: "{{ deployment_rhel7_ent_base }}" diff --git a/playbooks/byo/openshift-cfme/config.yml b/playbooks/byo/openshift-cfme/config.yml new file mode 100644 index 000000000..0e8e7a94d --- /dev/null +++ b/playbooks/byo/openshift-cfme/config.yml @@ -0,0 +1,8 @@ +--- +- include: ../openshift-cluster/initialize_groups.yml + tags: + - always + +- include: ../../common/openshift-cluster/evaluate_groups.yml + +- include: ../../common/openshift-cfme/config.yml diff --git a/playbooks/byo/openshift-cfme/uninstall.yml b/playbooks/byo/openshift-cfme/uninstall.yml new file mode 100644 index 000000000..c8ed16859 --- /dev/null +++ b/playbooks/byo/openshift-cfme/uninstall.yml @@ -0,0 +1,6 @@ +--- +# - include: ../openshift-cluster/initialize_groups.yml +# tags: +# - always + +- include: ../../common/openshift-cfme/uninstall.yml diff --git a/playbooks/byo/openshift-checks/README.md b/playbooks/byo/openshift-checks/README.md new file mode 100644 index 000000000..b26e7d7ed --- /dev/null +++ b/playbooks/byo/openshift-checks/README.md @@ -0,0 +1,104 @@ +# OpenShift health checks + +This directory contains Ansible playbooks for detecting potential problems prior +to an install, as well as health checks to run on existing OpenShift clusters. + +Ansible's default operation mode is to fail fast, on the first error. However, +when performing checks, it is useful to gather as much information about +problems as possible in a single run. + +Thus, the playbooks run a battery of checks against the inventory hosts and +gather intermediate errors, giving a more complete diagnostic of the state of +each host. If any check failed, the playbook run will be marked as failed. + +To facilitate understanding the problems that were encountered, a custom +callback plugin summarizes execution errors at the end of a playbook run. + +## Available playbooks + +1. Pre-install playbook ([pre-install.yml](pre-install.yml)) - verifies system + requirements and look for common problems that can prevent a successful + installation of a production cluster. + +2. Diagnostic playbook ([health.yml](health.yml)) - check an existing cluster + for known signs of problems. + +3. Certificate expiry playbooks ([certificate_expiry](certificate_expiry)) - + check that certificates in use are valid and not expiring soon. + +4. Adhoc playbook ([adhoc.yml](adhoc.yml)) - use it to run adhoc checks or to + list existing checks. + See the [next section](#the-adhoc-playbook) for a usage example. + +## Running + +With a [recent installation of Ansible](../../../README.md#setup), run the playbook +against your inventory file. Here is the step-by-step: + +1. If you haven't done it yet, clone this repository: + + ```console + $ git clone https://github.com/openshift/openshift-ansible + $ cd openshift-ansible + ``` + +2. Install the [dependencies](../../../README.md#setup) + +3. Run the appropriate playbook: + + ```console + $ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/pre-install.yml + ``` + + or + + ```console + $ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/health.yml + ``` + + or + + ```console + $ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/certificate_expiry/default.yaml -v + ``` + +### The adhoc playbook + +The adhoc playbook gives flexibility to run any check or a custom group of +checks. What will be run is determined by the `openshift_checks` variable, +which, among other ways supported by Ansible, can be set on the command line +using the `-e` flag. + +For example, to run the `docker_storage` check: + +```console +$ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/adhoc.yml -e openshift_checks=docker_storage +``` + +To run more checks, use a comma-separated list of check names: + +```console +$ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/adhoc.yml -e openshift_checks=docker_storage,disk_availability +``` + +To run an entire class of checks, use the name of a check group tag, prefixed by `@`. This will run all checks tagged `preflight`: + +```console +$ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/adhoc.yml -e openshift_checks=@preflight +``` + +It is valid to specify multiple check tags and individual check names together +in a comma-separated list. + +To list all of the available checks and tags, run the adhoc playbook without +setting the `openshift_checks` variable: + +```console +$ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/adhoc.yml +``` + +## Running in a container + +This repository is built into a Docker image including Ansible so that it can +be run anywhere Docker is available, without the need to manually install dependencies. +Instructions for doing so may be found [in the README](../../../README_CONTAINER_IMAGE.md). diff --git a/playbooks/byo/openshift-checks/adhoc.yml b/playbooks/byo/openshift-checks/adhoc.yml new file mode 100644 index 000000000..226bed732 --- /dev/null +++ b/playbooks/byo/openshift-checks/adhoc.yml @@ -0,0 +1,27 @@ +--- +# NOTE: ideally this would be just part of a single play in +# common/openshift-checks/adhoc.yml that lists the existing checks when +# openshift_checks is not set or run the requested checks. However, to actually +# run the checks we need to have the included dependencies to run first and that +# takes time. To speed up listing checks, we use this separate play that runs +# before the include of dependencies to save time and improve the UX. +- name: OpenShift health checks + # NOTE: though the openshift_checks variable could be potentially defined on + # individual hosts while not defined for localhost, we do not support that + # usage. Running this play only in localhost speeds up execution. + hosts: localhost + connection: local + roles: + - openshift_health_checker + vars: + - r_openshift_health_checker_playbook_context: adhoc + pre_tasks: + - name: List known health checks + action: openshift_health_check + when: openshift_checks is undefined or not openshift_checks + +- include: ../openshift-cluster/initialize_groups.yml + +- include: ../../common/openshift-cluster/std_include.yml + +- include: ../../common/openshift-checks/adhoc.yml diff --git a/playbooks/certificate_expiry/default.yaml b/playbooks/byo/openshift-checks/certificate_expiry/default.yaml index 630135cae..630135cae 100644 --- a/playbooks/certificate_expiry/default.yaml +++ b/playbooks/byo/openshift-checks/certificate_expiry/default.yaml diff --git a/playbooks/certificate_expiry/easy-mode-upload.yaml b/playbooks/byo/openshift-checks/certificate_expiry/easy-mode-upload.yaml index 378d1f154..378d1f154 100644 --- a/playbooks/certificate_expiry/easy-mode-upload.yaml +++ b/playbooks/byo/openshift-checks/certificate_expiry/easy-mode-upload.yaml diff --git a/playbooks/certificate_expiry/easy-mode.yaml b/playbooks/byo/openshift-checks/certificate_expiry/easy-mode.yaml index ae41c7c14..ae41c7c14 100644 --- a/playbooks/certificate_expiry/easy-mode.yaml +++ b/playbooks/byo/openshift-checks/certificate_expiry/easy-mode.yaml diff --git a/playbooks/certificate_expiry/html_and_json_default_paths.yaml b/playbooks/byo/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml index d80cb6ff4..d80cb6ff4 100644 --- a/playbooks/certificate_expiry/html_and_json_default_paths.yaml +++ b/playbooks/byo/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml diff --git a/playbooks/certificate_expiry/html_and_json_timestamp.yaml b/playbooks/byo/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml index 2189455b7..2189455b7 100644 --- a/playbooks/certificate_expiry/html_and_json_timestamp.yaml +++ b/playbooks/byo/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml diff --git a/playbooks/certificate_expiry/longer-warning-period-json-results.yaml b/playbooks/byo/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml index 87a0f3be4..87a0f3be4 100644 --- a/playbooks/certificate_expiry/longer-warning-period-json-results.yaml +++ b/playbooks/byo/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml diff --git a/playbooks/certificate_expiry/longer_warning_period.yaml b/playbooks/byo/openshift-checks/certificate_expiry/longer_warning_period.yaml index 960457c4b..960457c4b 100644 --- a/playbooks/certificate_expiry/longer_warning_period.yaml +++ b/playbooks/byo/openshift-checks/certificate_expiry/longer_warning_period.yaml diff --git a/playbooks/byo/openshift-checks/certificate_expiry/roles b/playbooks/byo/openshift-checks/certificate_expiry/roles new file mode 120000 index 000000000..4bdbcbad3 --- /dev/null +++ b/playbooks/byo/openshift-checks/certificate_expiry/roles @@ -0,0 +1 @@ +../../../../roles
\ No newline at end of file diff --git a/playbooks/byo/openshift-checks/health.yml b/playbooks/byo/openshift-checks/health.yml new file mode 100644 index 000000000..96a71e4dc --- /dev/null +++ b/playbooks/byo/openshift-checks/health.yml @@ -0,0 +1,6 @@ +--- +- include: ../openshift-cluster/initialize_groups.yml + +- include: ../../common/openshift-cluster/std_include.yml + +- include: ../../common/openshift-checks/health.yml diff --git a/playbooks/byo/openshift-checks/pre-install.yml b/playbooks/byo/openshift-checks/pre-install.yml new file mode 100644 index 000000000..dd93df0bb --- /dev/null +++ b/playbooks/byo/openshift-checks/pre-install.yml @@ -0,0 +1,6 @@ +--- +- include: ../openshift-cluster/initialize_groups.yml + +- include: ../../common/openshift-cluster/std_include.yml + +- include: ../../common/openshift-checks/pre-install.yml diff --git a/playbooks/byo/openshift-preflight/roles b/playbooks/byo/openshift-checks/roles index 20c4c58cf..20c4c58cf 120000 --- a/playbooks/byo/openshift-preflight/roles +++ b/playbooks/byo/openshift-checks/roles diff --git a/playbooks/byo/openshift-cluster/cluster_hosts.yml b/playbooks/byo/openshift-cluster/cluster_hosts.yml index 268a65415..e807ac004 100644 --- a/playbooks/byo/openshift-cluster/cluster_hosts.yml +++ b/playbooks/byo/openshift-cluster/cluster_hosts.yml @@ -1,6 +1,8 @@ --- g_etcd_hosts: "{{ groups.etcd | default([]) }}" +g_new_etcd_hosts: "{{ groups.new_etcd | default([]) }}" + g_lb_hosts: "{{ groups.lb | default([]) }}" g_master_hosts: "{{ groups.masters | default([]) }}" @@ -15,7 +17,10 @@ g_nfs_hosts: "{{ groups.nfs | default([]) }}" g_glusterfs_hosts: "{{ groups.glusterfs | default([]) }}" +g_glusterfs_registry_hosts: "{{ groups.glusterfs_registry | default(g_glusterfs_hosts) }}" + g_all_hosts: "{{ g_master_hosts | union(g_node_hosts) | union(g_etcd_hosts) - | union(g_lb_hosts) | union(g_nfs_hosts) + | union(g_new_etcd_hosts) | union(g_lb_hosts) | union(g_nfs_hosts) | union(g_new_node_hosts)| union(g_new_master_hosts) + | union(g_glusterfs_hosts) | union(g_glusterfs_registry_hosts) | default([]) }}" diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml index acf5469bf..60fa44c5b 100644 --- a/playbooks/byo/openshift-cluster/config.yml +++ b/playbooks/byo/openshift-cluster/config.yml @@ -9,6 +9,4 @@ - include: ../../common/openshift-cluster/config.yml vars: - openshift_cluster_id: "{{ cluster_id | default('default') }}" - openshift_debug_level: "{{ debug_level | default(2) }}" openshift_deployment_subtype: "{{ deployment_subtype | default(none) }}" diff --git a/playbooks/byo/openshift-cluster/initialize_groups.yml b/playbooks/byo/openshift-cluster/initialize_groups.yml index 2785dcc3b..2a725510a 100644 --- a/playbooks/byo/openshift-cluster/initialize_groups.yml +++ b/playbooks/byo/openshift-cluster/initialize_groups.yml @@ -8,17 +8,3 @@ - always tasks: - include_vars: cluster_hosts.yml - - name: Evaluate group l_oo_all_hosts - add_host: - name: "{{ item }}" - groups: l_oo_all_hosts - with_items: "{{ g_all_hosts | default([]) }}" - changed_when: no - -- name: Create initial host groups for all hosts - hosts: l_oo_all_hosts - gather_facts: no - tags: - - always - tasks: - - include_vars: cluster_hosts.yml diff --git a/playbooks/byo/openshift-cluster/openshift-logging.yml b/playbooks/byo/openshift-cluster/openshift-logging.yml index 76f165c6d..a523bb47f 100644 --- a/playbooks/byo/openshift-cluster/openshift-logging.yml +++ b/playbooks/byo/openshift-cluster/openshift-logging.yml @@ -5,8 +5,11 @@ # currently supported method. # - include: initialize_groups.yml + tags: + - always + +- include: ../../common/openshift-cluster/std_include.yml + tags: + - always - include: ../../common/openshift-cluster/openshift_logging.yml - vars: - openshift_cluster_id: "{{ cluster_id | default('default') }}" - openshift_debug_level: "{{ debug_level | default(2) }}" diff --git a/playbooks/byo/openshift-cluster/openshift-metrics.yml b/playbooks/byo/openshift-cluster/openshift-metrics.yml index 5ad3a1a01..1135c8c11 100644 --- a/playbooks/byo/openshift-cluster/openshift-metrics.yml +++ b/playbooks/byo/openshift-cluster/openshift-metrics.yml @@ -1,4 +1,10 @@ --- - include: initialize_groups.yml + tags: + - always + +- include: ../../common/openshift-cluster/std_include.yml + tags: + - always - include: ../../common/openshift-cluster/openshift_metrics.yml diff --git a/playbooks/byo/openshift-cluster/openshift-prometheus.yml b/playbooks/byo/openshift-cluster/openshift-prometheus.yml new file mode 100644 index 000000000..15917078d --- /dev/null +++ b/playbooks/byo/openshift-cluster/openshift-prometheus.yml @@ -0,0 +1,4 @@ +--- +- include: initialize_groups.yml + +- include: ../../common/openshift-cluster/openshift_prometheus.yml diff --git a/playbooks/byo/openshift-cluster/openshift-provisioners.yml b/playbooks/byo/openshift-cluster/openshift-provisioners.yml new file mode 100644 index 000000000..8e80f158b --- /dev/null +++ b/playbooks/byo/openshift-cluster/openshift-provisioners.yml @@ -0,0 +1,6 @@ +--- +- include: initialize_groups.yml + +- include: ../../common/openshift-cluster/std_include.yml + +- include: ../../common/openshift-cluster/openshift_provisioners.yml diff --git a/playbooks/byo/openshift-cluster/redeploy-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-certificates.yml index 012ce69ec..a3894e243 100644 --- a/playbooks/byo/openshift-cluster/redeploy-certificates.yml +++ b/playbooks/byo/openshift-cluster/redeploy-certificates.yml @@ -20,5 +20,7 @@ - include: ../../common/openshift-node/restart.yml - include: ../../common/openshift-cluster/redeploy-certificates/router.yml + when: openshift_hosted_manage_router | default(true) | bool - include: ../../common/openshift-cluster/redeploy-certificates/registry.yml + when: openshift_hosted_manage_registry | default(true) | bool diff --git a/playbooks/byo/openshift-cluster/redeploy-etcd-ca.yml b/playbooks/byo/openshift-cluster/redeploy-etcd-ca.yml new file mode 100644 index 000000000..29f821eda --- /dev/null +++ b/playbooks/byo/openshift-cluster/redeploy-etcd-ca.yml @@ -0,0 +1,10 @@ +--- +- include: initialize_groups.yml + tags: + - always + +- include: ../../common/openshift-cluster/std_include.yml + tags: + - always + +- include: ../../common/openshift-cluster/redeploy-certificates/etcd-ca.yml diff --git a/playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml b/playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml index 3b33e0d6f..6e11a111b 100644 --- a/playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml +++ b/playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml @@ -7,4 +7,4 @@ tags: - always -- include: ../../common/openshift-cluster/redeploy-certificates/ca.yml +- include: ../../common/openshift-cluster/redeploy-certificates/openshift-ca.yml diff --git a/playbooks/byo/openshift-cluster/service-catalog.yml b/playbooks/byo/openshift-cluster/service-catalog.yml new file mode 100644 index 000000000..40a7606e7 --- /dev/null +++ b/playbooks/byo/openshift-cluster/service-catalog.yml @@ -0,0 +1,15 @@ +--- +# +# This playbook is a preview of upcoming changes for installing +# Hosted logging on. See inventory/byo/hosts.*.example for the +# currently supported method. +# +- include: initialize_groups.yml + tags: + - always + +- include: ../../common/openshift-cluster/std_include.yml + tags: + - always + +- include: ../../common/openshift-cluster/service_catalog.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml b/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml index 8005a17a3..5bd5d64ab 100644 --- a/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml +++ b/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml @@ -1,4 +1,6 @@ --- - include: ../initialize_groups.yml +- include: ../../../common/openshift-cluster/evaluate_groups.yml + - include: ../../../common/openshift-cluster/upgrades/etcd/main.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml index 690b663f4..697a18c4d 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml @@ -4,106 +4,4 @@ # - include: ../../initialize_groups.yml -- include: ../../../../common/openshift-cluster/upgrades/init.yml - tags: - - pre_upgrade - -- name: Configure the upgrade target for the common upgrade tasks - hosts: l_oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" - openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" - -# Pre-upgrade - -- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos and initialize facts on all hosts - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config - tags: - - pre_upgrade - roles: - - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-master/validate_restart.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config - tasks: - - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml - -- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml - vars: - master_config_hook: "v3_3/master_config_upgrade.yml" - -- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml - vars: - node_config_hook: "v3_3/node_config_upgrade.yml" - -- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml +- include: ../../../../common/openshift-cluster/upgrades/v3_3/upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml index fca2c04f3..4d284c279 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml @@ -13,101 +13,4 @@ # - include: ../../initialize_groups.yml -- include: ../../../../common/openshift-cluster/upgrades/init.yml - tags: - - pre_upgrade - -- name: Configure the upgrade target for the common upgrade tasks - hosts: l_oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" - openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" - -# Pre-upgrade -- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos on control plane hosts - hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config - tags: - - pre_upgrade - roles: - - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-master/validate_restart.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_masters_to_config:oo_etcd_to_config - tasks: - - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml - -- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml - vars: - master_config_hook: "v3_3/master_config_upgrade.yml" - -- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml +- include: ../../../../common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml index d171ac3cd..180a2821f 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml @@ -6,103 +6,4 @@ # - include: ../../initialize_groups.yml -- include: ../../../../common/openshift-cluster/upgrades/init.yml - tags: - - pre_upgrade - -- name: Configure the upgrade target for the common upgrade tasks - hosts: l_oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" - openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" - -# Pre-upgrade -- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos on nodes - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config - roles: - - openshift_repos - tags: - - pre_upgrade - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- name: Verify masters are already upgraded - hosts: oo_masters_to_config - tags: - - pre_upgrade - tasks: - - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." - when: openshift.common.version != openshift_version - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_nodes_to_upgrade - tasks: - - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml - -- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml - vars: - node_config_hook: "v3_3/node_config_upgrade.yml" +- include: ../../../../common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml index 217163802..8cce91b3f 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml @@ -4,104 +4,4 @@ # - include: ../../initialize_groups.yml -- include: ../../../../common/openshift-cluster/upgrades/init.yml - tags: - - pre_upgrade - -- name: Configure the upgrade target for the common upgrade tasks - hosts: l_oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" - openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" - -# Pre-upgrade - -- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos and initialize facts on all hosts - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config - tags: - - pre_upgrade - roles: - - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-master/validate_restart.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config - tasks: - - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml - -- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml - vars: - master_config_hook: "v3_4/master_config_upgrade.yml" - -- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml - -- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml +- include: ../../../../common/openshift-cluster/upgrades/v3_4/upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml index d21c195bf..8e5d0f5f9 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml @@ -13,101 +13,4 @@ # - include: ../../initialize_groups.yml -- include: ../../../../common/openshift-cluster/upgrades/init.yml - tags: - - pre_upgrade - -- name: Configure the upgrade target for the common upgrade tasks - hosts: l_oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" - openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" - -# Pre-upgrade -- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos on control plane hosts - hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config - tags: - - pre_upgrade - roles: - - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-master/validate_restart.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_masters_to_config:oo_etcd_to_config - tasks: - - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml - -- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml - vars: - master_config_hook: "v3_4/master_config_upgrade.yml" - -- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml +- include: ../../../../common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml index 7bb66611c..d5329b858 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml @@ -6,101 +6,4 @@ # - include: ../../initialize_groups.yml -- include: ../../../../common/openshift-cluster/upgrades/init.yml - tags: - - pre_upgrade - -- name: Configure the upgrade target for the common upgrade tasks - hosts: l_oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" - openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" - -# Pre-upgrade -- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos on nodes - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config - roles: - - openshift_repos - tags: - - pre_upgrade - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- name: Verify masters are already upgraded - hosts: oo_masters_to_config - tags: - - pre_upgrade - tasks: - - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." - when: openshift.common.version != openshift_version - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_nodes_to_upgrade - tasks: - - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml - -- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml +- include: ../../../../common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/roles b/playbooks/byo/openshift-cluster/upgrades/v3_5/roles deleted file mode 120000 index 6bc1a7aef..000000000 --- a/playbooks/byo/openshift-cluster/upgrades/v3_5/roles +++ /dev/null @@ -1 +0,0 @@ -../../../../../roles
\ No newline at end of file diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml index f0900e04e..f44d55ad2 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml @@ -4,110 +4,4 @@ # - include: ../../initialize_groups.yml -- include: ../../../../common/openshift-cluster/upgrades/init.yml - tags: - - pre_upgrade - -- name: Configure the upgrade target for the common upgrade tasks - hosts: l_oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" - openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" - -# Pre-upgrade - -- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos and initialize facts on all hosts - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config - tags: - - pre_upgrade - roles: - - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml - tags: - - pre_upgrade - -# Note: During upgrade the openshift excluder is not unexcluded inside the initialize_openshift_version.yml play. -# So it is necassary to run the play after running disable_excluder.yml. -- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-master/validate_restart.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/v3_5/validator.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config - tasks: - - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml - -- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml - -- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml - -- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml - -- include: ../../../../common/openshift-cluster/upgrades/v3_5/storage_upgrade.yml +- include: ../../../../common/openshift-cluster/upgrades/v3_5/upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml index e8d834a04..2377713fa 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml @@ -13,105 +13,4 @@ # - include: ../../initialize_groups.yml -- include: ../../../../common/openshift-cluster/upgrades/init.yml - tags: - - pre_upgrade - -# Configure the upgrade target for the common upgrade tasks: -- hosts: l_oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" - openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" - -# Pre-upgrade -- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos on control plane hosts - hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config - tags: - - pre_upgrade - roles: - - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-master/validate_restart.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/v3_5/validator.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_masters_to_config:oo_etcd_to_config - tasks: - - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml - -- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml - -- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml - -- include: ../../../../common/openshift-cluster/upgrades/v3_5/storage_upgrade.yml +- include: ../../../../common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml index c2a4debc8..5b3f6ab06 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml @@ -6,101 +6,4 @@ # - include: ../../initialize_groups.yml -- include: ../../../../common/openshift-cluster/upgrades/init.yml - tags: - - pre_upgrade - -# Configure the upgrade target for the common upgrade tasks: -- hosts: l_oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" - openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" - -# Pre-upgrade -- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos on nodes - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config - roles: - - openshift_repos - tags: - - pre_upgrade - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- name: Verify masters are already upgraded - hosts: oo_masters_to_config - tags: - - pre_upgrade - tasks: - - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." - when: openshift.common.version != openshift_version - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_nodes_to_upgrade - tasks: - - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml - -- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml +- include: ../../../../common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/roles b/playbooks/byo/openshift-cluster/upgrades/v3_6/roles deleted file mode 120000 index 6bc1a7aef..000000000 --- a/playbooks/byo/openshift-cluster/upgrades/v3_6/roles +++ /dev/null @@ -1 +0,0 @@ -../../../../../roles
\ No newline at end of file diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml index 763e79e01..40120b3e8 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml @@ -4,110 +4,4 @@ # - include: ../../initialize_groups.yml -- include: ../../../../common/openshift-cluster/upgrades/init.yml - tags: - - pre_upgrade - -- name: Configure the upgrade target for the common upgrade tasks - hosts: l_oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: '3.6' - openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" - -# Pre-upgrade - -- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos and initialize facts on all hosts - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config - tags: - - pre_upgrade - roles: - - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml - tags: - - pre_upgrade - -# Note: During upgrade the openshift excluder is not unexcluded inside the initialize_openshift_version.yml play. -# So it is necassary to run the play after running disable_excluder.yml. -- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-master/validate_restart.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/v3_6/validator.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config - tasks: - - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml - -- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml - -- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml - -- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml - -- include: ../../../../common/openshift-cluster/upgrades/v3_6/storage_upgrade.yml +- include: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml index 7a1377be2..408a4c631 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml @@ -13,105 +13,4 @@ # - include: ../../initialize_groups.yml -- include: ../../../../common/openshift-cluster/upgrades/init.yml - tags: - - pre_upgrade - -# Configure the upgrade target for the common upgrade tasks: -- hosts: l_oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: '3.6' - openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" - -# Pre-upgrade -- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos on control plane hosts - hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config - tags: - - pre_upgrade - roles: - - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-master/validate_restart.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/v3_6/validator.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_masters_to_config:oo_etcd_to_config - tasks: - - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml - -- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml - -- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml - -- include: ../../../../common/openshift-cluster/upgrades/v3_6/storage_upgrade.yml +- include: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml index 065746493..b5f42b804 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml @@ -6,101 +6,4 @@ # - include: ../../initialize_groups.yml -- include: ../../../../common/openshift-cluster/upgrades/init.yml - tags: - - pre_upgrade - -# Configure the upgrade target for the common upgrade tasks: -- hosts: l_oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: '3.6' - openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" - -# Pre-upgrade -- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos on nodes - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config - roles: - - openshift_repos - tags: - - pre_upgrade - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- name: Verify masters are already upgraded - hosts: oo_masters_to_config - tags: - - pre_upgrade - tasks: - - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." - when: openshift.common.version != openshift_version - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_nodes_to_upgrade - tasks: - - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml - -- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml +- include: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_7/README.md new file mode 100644 index 000000000..4bf53be81 --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_7/README.md @@ -0,0 +1,20 @@ +# v3.6 Major and Minor Upgrade Playbook + +## Overview +This playbook currently performs the following steps. + + * Upgrade and restart master services + * Unschedule node + * Upgrade and restart docker + * Upgrade and restart node services + * Modifies the subset of the configuration necessary + * Applies the latest cluster policies + * Updates the default router if one exists + * Updates the default registry if one exists + * Updates image streams and quickstarts + +## Usage + +``` +ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml +``` diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml new file mode 100644 index 000000000..e41c29682 --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml @@ -0,0 +1,7 @@ +--- +# +# Full Control Plane + Nodes Upgrade +# +- include: ../../initialize_groups.yml + +- include: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml new file mode 100644 index 000000000..21e0fd815 --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml @@ -0,0 +1,16 @@ +--- +# +# Control Plane Upgrade Playbook +# +# Upgrades masters and Docker (only on standalone etcd hosts) +# +# This upgrade does not include: +# - node service running on masters +# - docker running on masters +# - node service running on dedicated nodes +# +# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. +# +- include: ../../initialize_groups.yml + +- include: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml new file mode 100644 index 000000000..0e09d996e --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml @@ -0,0 +1,9 @@ +--- +# +# Node Upgrade Playbook +# +# Upgrades nodes only, but requires the control plane to have already been upgraded. +# +- include: ../../initialize_groups.yml + +- include: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml diff --git a/playbooks/byo/openshift-etcd/config.yml b/playbooks/byo/openshift-etcd/config.yml index dd3f47a4d..1342bd60c 100644 --- a/playbooks/byo/openshift-etcd/config.yml +++ b/playbooks/byo/openshift-etcd/config.yml @@ -1,14 +1,6 @@ --- - include: ../openshift-cluster/initialize_groups.yml - tags: - - always - include: ../../common/openshift-cluster/std_include.yml - tags: - - always - include: ../../common/openshift-etcd/config.yml - vars: - openshift_cluster_id: "{{ cluster_id | default('default') }}" - openshift_debug_level: "{{ debug_level | default(2) }}" - openshift_deployment_subtype: "{{ deployment_subtype | default(none) }}" diff --git a/playbooks/byo/openshift-etcd/migrate.yml b/playbooks/byo/openshift-etcd/migrate.yml new file mode 100644 index 000000000..2dec2bef6 --- /dev/null +++ b/playbooks/byo/openshift-etcd/migrate.yml @@ -0,0 +1,6 @@ +--- +- include: ../openshift-cluster/initialize_groups.yml + +- include: ../../common/openshift-cluster/std_include.yml + +- include: ../../common/openshift-etcd/migrate.yml diff --git a/playbooks/byo/openshift-etcd/restart.yml b/playbooks/byo/openshift-etcd/restart.yml index d43533641..034bba4b4 100644 --- a/playbooks/byo/openshift-etcd/restart.yml +++ b/playbooks/byo/openshift-etcd/restart.yml @@ -1,10 +1,6 @@ --- - include: ../openshift-cluster/initialize_groups.yml - tags: - - always - include: ../../common/openshift-cluster/std_include.yml - tags: - - always - include: ../../common/openshift-etcd/restart.yml diff --git a/playbooks/byo/openshift-etcd/scaleup.yml b/playbooks/byo/openshift-etcd/scaleup.yml new file mode 100644 index 000000000..a2a5856a9 --- /dev/null +++ b/playbooks/byo/openshift-etcd/scaleup.yml @@ -0,0 +1,6 @@ +--- +- include: ../openshift-cluster/initialize_groups.yml + +- include: ../../common/openshift-cluster/std_include.yml + +- include: ../../common/openshift-etcd/scaleup.yml diff --git a/playbooks/byo/openshift-glusterfs/README.md b/playbooks/byo/openshift-glusterfs/README.md new file mode 100644 index 000000000..f62aea229 --- /dev/null +++ b/playbooks/byo/openshift-glusterfs/README.md @@ -0,0 +1,98 @@ +# OpenShift GlusterFS Playbooks + +These playbooks are intended to enable the use of GlusterFS volumes by pods in +OpenShift. While they try to provide a sane set of defaults they do cover a +variety of scenarios and configurations, so read carefully. :) + +## Playbook: config.yml + +This is the main playbook that integrates GlusterFS into a new or existing +OpenShift cluster. It will also, if specified, configure a hosted Docker +registry with GlusterFS backend storage. + +This playbook requires the `glusterfs` group to exist in the Ansible inventory +file. The hosts in this group are the nodes of the GlusterFS cluster. + + * If this is a newly configured cluster each host must have a + `glusterfs_devices` variable defined, each of which must be a list of block + storage devices intended for use only by the GlusterFS cluster. If this is + also an external GlusterFS cluster, you must specify + `openshift_storage_glusterfs_is_native=False`. If the cluster is to be + managed by an external heketi service you must also specify + `openshift_storage_glusterfs_heketi_is_native=False` and + `openshift_storage_glusterfs_heketi_url=<URL>` with the URL to the heketi + service. All these variables are specified in `[OSEv3:vars]`, + * If this is an existing cluster you do not need to specify a list of block + devices but you must specify the following variables in `[OSEv3:vars]`: + * `openshift_storage_glusterfs_is_missing=False` + * `openshift_storage_glusterfs_heketi_is_missing=False` + +By default, pods for a native GlusterFS cluster will be created in the +`default` namespace. To change this, specify +`openshift_storage_glusterfs_namespace=<other namespace>` in `[OSEv3:vars]`. + +To configure the deployment of a Docker registry with GlusterFS backend +storage, specify `openshift_hosted_registry_storage_kind=glusterfs` in +`[OSEv3:vars]`. To create a separate GlusterFS cluster for use only by the +registry, specify a `glusterfs_registry` group that is populated as the +`glusterfs` is with the nodes for the separate cluster. If no +`glusterfs_registry` group is specified, the cluster defined by the `glusterfs` +group will be used. + +To swap an existing hosted registry's backend storage for a GlusterFS volume, +specify `openshift_hosted_registry_storage_glusterfs_swap=True`. To +additoinally copy any existing contents from an existing hosted registry, +specify `openshift_hosted_registry_storage_glusterfs_swapcopy=True`. + +**NOTE:** For each namespace that is to have access to GlusterFS volumes an +Enpoints resource pointing to the GlusterFS cluster nodes and a corresponding +Service resource must be created. If dynamic provisioning using StorageClasses +is configure, these resources are created automatically in the namespaces that +require them. This playbook also takes care of creating these resources in the +namespaces used for deployment. + +An example of a minimal inventory file: +``` +[OSEv3:children] +masters +nodes +glusterfs + +[OSEv3:vars] +ansible_ssh_user=root +deployment_type=origin + +[masters] +master + +[nodes] +node0 +node1 +node2 + +[glusterfs] +node0 glusterfs_devices='[ "/dev/sdb" ]' +node1 glusterfs_devices='[ "/dev/sdb", "/dev/sdc" ]' +node2 glusterfs_devices='[ "/dev/sdd" ]' +``` + +## Playbook: registry.yml + +This playbook is intended for admins who want to deploy a hosted Docker +registry with GlusterFS backend storage on an existing OpenShift cluster. It +has all the same requirements and behaviors as `config.yml`. + +## Role: openshift_storage_glusterfs + +The bulk of the work is done by the `openshift_storage_glusterfs` role. This +role can handle the deployment of GlusterFS (if it is to be hosted on the +OpenShift cluster), the registration of GlusterFS nodes (hosted or standalone), +and (if specified) integration as backend storage for a hosted Docker registry. + +See the documentation in the role's directory for further details. + +## Role: openshift_hosted + +The `openshift_hosted` role recognizes `glusterfs` as a possible storage +backend for a hosted docker registry. It will also, if configured, handle the +swap of an existing registry's backend storage to a GlusterFS volume. diff --git a/playbooks/byo/openshift-glusterfs/config.yml b/playbooks/byo/openshift-glusterfs/config.yml new file mode 100644 index 000000000..3f11f3991 --- /dev/null +++ b/playbooks/byo/openshift-glusterfs/config.yml @@ -0,0 +1,10 @@ +--- +- include: ../openshift-cluster/initialize_groups.yml + tags: + - always + +- include: ../../common/openshift-cluster/std_include.yml + tags: + - always + +- include: ../../common/openshift-glusterfs/config.yml diff --git a/playbooks/gce/openshift-cluster/filter_plugins b/playbooks/byo/openshift-glusterfs/filter_plugins index 99a95e4ca..99a95e4ca 120000 --- a/playbooks/gce/openshift-cluster/filter_plugins +++ b/playbooks/byo/openshift-glusterfs/filter_plugins diff --git a/playbooks/gce/openshift-cluster/lookup_plugins b/playbooks/byo/openshift-glusterfs/lookup_plugins index ac79701db..ac79701db 120000 --- a/playbooks/gce/openshift-cluster/lookup_plugins +++ b/playbooks/byo/openshift-glusterfs/lookup_plugins diff --git a/playbooks/byo/openshift-glusterfs/registry.yml b/playbooks/byo/openshift-glusterfs/registry.yml new file mode 100644 index 000000000..6ee6febdb --- /dev/null +++ b/playbooks/byo/openshift-glusterfs/registry.yml @@ -0,0 +1,10 @@ +--- +- include: ../openshift-cluster/initialize_groups.yml + tags: + - always + +- include: ../../common/openshift-cluster/std_include.yml + tags: + - always + +- include: ../../common/openshift-glusterfs/registry.yml diff --git a/playbooks/gce/openshift-cluster/roles b/playbooks/byo/openshift-glusterfs/roles index 20c4c58cf..20c4c58cf 120000 --- a/playbooks/gce/openshift-cluster/roles +++ b/playbooks/byo/openshift-glusterfs/roles diff --git a/playbooks/byo/openshift-loadbalancer/config.yml b/playbooks/byo/openshift-loadbalancer/config.yml new file mode 100644 index 000000000..32c828f97 --- /dev/null +++ b/playbooks/byo/openshift-loadbalancer/config.yml @@ -0,0 +1,6 @@ +--- +- include: ../openshift-cluster/initialize_groups.yml + +- include: ../../common/openshift-cluster/std_include.yml + +- include: ../../common/openshift-loadbalancer/config.yml diff --git a/playbooks/byo/openshift-master/additional_config.yml b/playbooks/byo/openshift-master/additional_config.yml new file mode 100644 index 000000000..b3d7b5731 --- /dev/null +++ b/playbooks/byo/openshift-master/additional_config.yml @@ -0,0 +1,6 @@ +--- +- include: ../openshift-cluster/initialize_groups.yml + +- include: ../../common/openshift-cluster/std_include.yml + +- include: ../../common/openshift-master/additional_config.yml diff --git a/playbooks/byo/openshift-master/config.yml b/playbooks/byo/openshift-master/config.yml new file mode 100644 index 000000000..98be0c448 --- /dev/null +++ b/playbooks/byo/openshift-master/config.yml @@ -0,0 +1,6 @@ +--- +- include: ../openshift-cluster/initialize_groups.yml + +- include: ../../common/openshift-cluster/std_include.yml + +- include: ../../common/openshift-master/config.yml diff --git a/playbooks/byo/openshift-master/restart.yml b/playbooks/byo/openshift-master/restart.yml index 7988863f3..8950efd00 100644 --- a/playbooks/byo/openshift-master/restart.yml +++ b/playbooks/byo/openshift-master/restart.yml @@ -1,10 +1,6 @@ --- - include: ../openshift-cluster/initialize_groups.yml - tags: - - always - include: ../../common/openshift-cluster/std_include.yml - tags: - - always - include: ../../common/openshift-master/restart.yml diff --git a/playbooks/byo/openshift-master/scaleup.yml b/playbooks/byo/openshift-master/scaleup.yml index 8aa07a664..a09edd55a 100644 --- a/playbooks/byo/openshift-master/scaleup.yml +++ b/playbooks/byo/openshift-master/scaleup.yml @@ -1,7 +1,20 @@ --- - include: ../openshift-cluster/initialize_groups.yml +- name: Ensure there are new_masters or new_nodes + hosts: localhost + connection: local + become: no + gather_facts: no + tasks: + - fail: + msg: > + Detected no new_masters or no new_nodes in inventory. Please + add hosts to the new_masters and new_nodes host groups to add + masters. + when: + - (g_new_master_hosts | default([]) | length == 0) and (g_new_node_hosts | default([]) | length == 0) + +- include: ../../common/openshift-cluster/std_include.yml + - include: ../../common/openshift-master/scaleup.yml - vars: - openshift_cluster_id: "{{ cluster_id | default('default') }}" - openshift_debug_level: "{{ debug_level | default(2) }}" diff --git a/playbooks/byo/openshift-nfs/config.yml b/playbooks/byo/openshift-nfs/config.yml new file mode 100644 index 000000000..93b24411e --- /dev/null +++ b/playbooks/byo/openshift-nfs/config.yml @@ -0,0 +1,6 @@ +--- +- include: ../openshift-cluster/initialize_groups.yml + +- include: ../../common/openshift-cluster/std_include.yml + +- include: ../../common/openshift-nfs/config.yml diff --git a/playbooks/byo/openshift-node/config.yml b/playbooks/byo/openshift-node/config.yml new file mode 100644 index 000000000..839dc36ff --- /dev/null +++ b/playbooks/byo/openshift-node/config.yml @@ -0,0 +1,6 @@ +--- +- include: ../openshift-cluster/initialize_groups.yml + +- include: ../../common/openshift-cluster/std_include.yml + +- include: ../../common/openshift-node/config.yml diff --git a/playbooks/byo/openshift-node/restart.yml b/playbooks/byo/openshift-node/restart.yml index 92665d71d..ccf9e82da 100644 --- a/playbooks/byo/openshift-node/restart.yml +++ b/playbooks/byo/openshift-node/restart.yml @@ -1,10 +1,6 @@ --- - include: ../openshift-cluster/initialize_groups.yml - tags: - - always - include: ../../common/openshift-cluster/std_include.yml - tags: - - always - include: ../../common/openshift-node/restart.yml diff --git a/playbooks/byo/openshift-node/scaleup.yml b/playbooks/byo/openshift-node/scaleup.yml index c6965fd6f..e0c36fb69 100644 --- a/playbooks/byo/openshift-node/scaleup.yml +++ b/playbooks/byo/openshift-node/scaleup.yml @@ -1,9 +1,19 @@ --- - include: ../openshift-cluster/initialize_groups.yml -- include: ../../common/openshift-node/scaleup.yml - vars: - openshift_cluster_id: "{{ cluster_id | default('default') }}" - openshift_debug_level: "{{ debug_level | default(2) }}" - openshift_master_etcd_hosts: "{{ groups.etcd | default([]) }}" - openshift_master_etcd_port: 2379 +- name: Ensure there are new_nodes + hosts: localhost + connection: local + become: no + gather_facts: no + tasks: + - fail: + msg: > + Detected no new_nodes in inventory. Please add hosts to the + new_nodes host group to add nodes. + when: + - g_new_node_hosts | default([]) | length == 0 + +- include: ../../common/openshift-cluster/std_include.yml + +- include: ../../common/openshift-node/config.yml diff --git a/playbooks/byo/openshift-preflight/README.md b/playbooks/byo/openshift-preflight/README.md deleted file mode 100644 index b50292eac..000000000 --- a/playbooks/byo/openshift-preflight/README.md +++ /dev/null @@ -1,43 +0,0 @@ -# OpenShift preflight checks - -Here we provide an Ansible playbook for detecting potential roadblocks prior to -an install or upgrade. - -Ansible's default operation mode is to fail fast, on the first error. However, -when performing checks, it is useful to gather as much information about -problems as possible in a single run. - -The `check.yml` playbook runs a battery of checks against the inventory hosts -and tells Ansible to ignore intermediate errors, thus giving a more complete -diagnostic of the state of each host. Still, if any check failed, the playbook -run will be marked as having failed. - -To facilitate understanding the problems that were encountered, we provide a -custom callback plugin to summarize execution errors at the end of a playbook -run. - ---- - -*Note that currently the `check.yml` playbook is only useful for RPM-based -installations. Containerized installs are excluded from checks for now, but -might be included in the future if there is demand for that.* - ---- - -## Running - -With an installation of Ansible 2.2 or greater, run the playbook directly -against your inventory file. Here is the step-by-step: - -1. If you haven't done it yet, clone this repository: - - ```console - $ git clone https://github.com/openshift/openshift-ansible - $ cd openshift-ansible - ``` - -2. Run the playbook: - - ```console - $ ansible-playbook -i <inventory file> playbooks/byo/openshift-preflight/check.yml - ``` diff --git a/playbooks/byo/openshift-preflight/check.yml b/playbooks/byo/openshift-preflight/check.yml index c5f05d0f0..2e53452a6 100644 --- a/playbooks/byo/openshift-preflight/check.yml +++ b/playbooks/byo/openshift-preflight/check.yml @@ -1,12 +1,3 @@ --- -- hosts: OSEv3 - name: run OpenShift health checks - roles: - - openshift_health_checker - post_tasks: - # NOTE: we need to use the old "action: name" syntax until - # https://github.com/ansible/ansible/issues/20513 is fixed. - - action: openshift_health_check - args: - checks: - - '@preflight' +# location is moved; this file remains so existing instructions keep working +- include: ../openshift-checks/pre-install.yml diff --git a/playbooks/byo/openshift_facts.yml b/playbooks/byo/openshift_facts.yml index 3b10323d6..a8c1c3a88 100644 --- a/playbooks/byo/openshift_facts.yml +++ b/playbooks/byo/openshift_facts.yml @@ -8,6 +8,7 @@ - always - name: Gather Cluster facts + # Temporarily reverting to OSEv3 until group standardization is complete hosts: OSEv3 roles: - openshift_facts diff --git a/playbooks/byo/rhel_subscribe.yml b/playbooks/byo/rhel_subscribe.yml index 777743def..1b14ff32e 100644 --- a/playbooks/byo/rhel_subscribe.yml +++ b/playbooks/byo/rhel_subscribe.yml @@ -3,12 +3,9 @@ tags: - always -- include: ../common/openshift-cluster/std_include.yml - tags: - - always - - name: Subscribe hosts, update repos and update OS packages - hosts: l_oo_all_hosts + # Temporarily reverting to OSEv3 until group standardization is complete + hosts: OSEv3 roles: - role: rhel_subscribe when: deployment_type in ['atomic-enterprise', 'enterprise', 'openshift-enterprise'] and diff --git a/playbooks/byo/vagrant.yml b/playbooks/byo/vagrant.yml deleted file mode 100644 index 76246e7b0..000000000 --- a/playbooks/byo/vagrant.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -- include: rhel_subscribe.yml - -- include: config.yml diff --git a/playbooks/certificate_expiry b/playbooks/certificate_expiry new file mode 120000 index 000000000..9cf5334a1 --- /dev/null +++ b/playbooks/certificate_expiry @@ -0,0 +1 @@ +byo/openshift-checks/certificate_expiry/
\ No newline at end of file diff --git a/playbooks/certificate_expiry/roles b/playbooks/certificate_expiry/roles deleted file mode 120000 index b741aa3db..000000000 --- a/playbooks/certificate_expiry/roles +++ /dev/null @@ -1 +0,0 @@ -../../roles
\ No newline at end of file diff --git a/playbooks/common/README.md b/playbooks/common/README.md index 0b5e26989..968bd99cb 100644 --- a/playbooks/common/README.md +++ b/playbooks/common/README.md @@ -1,9 +1,8 @@ # Common playbooks This directory has a generic set of playbooks that are included by playbooks in -[`byo`](../byo), as well as other playbooks related to the -[`bin/cluster`](../../bin) tool. +[`byo`](../byo). Note: playbooks in this directory use generic group names that do not line up -with the groups used by the `byo` playbooks or `bin/cluster` derived playbooks, -requiring an explicit remapping of groups. +with the groups used by the `byo` playbooks, requiring an explicit remapping of +groups. diff --git a/playbooks/common/openshift-cfme/config.yml b/playbooks/common/openshift-cfme/config.yml new file mode 100644 index 000000000..533a35d9e --- /dev/null +++ b/playbooks/common/openshift-cfme/config.yml @@ -0,0 +1,44 @@ +--- +# TODO: Make this work. The 'name' variable below is undefined +# presently because it's part of the cfme role. This play can't run +# until that's re-worked. +# +# - name: Pre-Pull manageiq-pods docker images +# hosts: nodes +# tasks: +# - name: Ensure the latest manageiq-pods docker image is pulling +# docker_image: +# name: "{{ openshift_cfme_container_image }}" +# # Fire-and-forget method, never timeout +# async: 99999999999 +# # F-a-f, never check on this. True 'background' task. +# poll: 0 + +- name: Configure Masters for CFME Bulk Image Imports + hosts: oo_masters_to_config + serial: 1 + tasks: + - name: Run master cfme tuning playbook + include_role: + name: openshift_cfme + tasks_from: tune_masters + +- name: Setup CFME + hosts: oo_first_master + vars: + r_openshift_cfme_miq_template_content: "{{ lookup('file', 'roles/openshift_cfme/files/miq-template.yaml') | from_yaml}}" + pre_tasks: + - name: Create a temporary place to evaluate the PV templates + command: mktemp -d /tmp/openshift-ansible-XXXXXXX + register: r_openshift_cfme_mktemp + changed_when: false + - name: Ensure the server template was read from disk + debug: + msg="{{ r_openshift_cfme_miq_template_content | from_yaml }}" + + tasks: + - name: Run the CFME Setup Role + include_role: + name: openshift_cfme + vars: + template_dir: "{{ hostvars[groups.masters.0].r_openshift_cfme_mktemp.stdout }}" diff --git a/playbooks/libvirt/openshift-cluster/filter_plugins b/playbooks/common/openshift-cfme/filter_plugins index 99a95e4ca..99a95e4ca 120000 --- a/playbooks/libvirt/openshift-cluster/filter_plugins +++ b/playbooks/common/openshift-cfme/filter_plugins diff --git a/playbooks/common/openshift-cfme/library b/playbooks/common/openshift-cfme/library new file mode 120000 index 000000000..ba40d2f56 --- /dev/null +++ b/playbooks/common/openshift-cfme/library @@ -0,0 +1 @@ +../../../library
\ No newline at end of file diff --git a/playbooks/libvirt/openshift-cluster/roles b/playbooks/common/openshift-cfme/roles index 20c4c58cf..20c4c58cf 120000 --- a/playbooks/libvirt/openshift-cluster/roles +++ b/playbooks/common/openshift-cfme/roles diff --git a/playbooks/common/openshift-cfme/uninstall.yml b/playbooks/common/openshift-cfme/uninstall.yml new file mode 100644 index 000000000..78b8e7668 --- /dev/null +++ b/playbooks/common/openshift-cfme/uninstall.yml @@ -0,0 +1,8 @@ +--- +- name: Uninstall CFME + hosts: masters + tasks: + - name: Run the CFME Uninstall Role Tasks + include_role: + name: openshift_cfme + tasks_from: uninstall diff --git a/playbooks/common/openshift-checks/adhoc.yml b/playbooks/common/openshift-checks/adhoc.yml new file mode 100644 index 000000000..dfcef8435 --- /dev/null +++ b/playbooks/common/openshift-checks/adhoc.yml @@ -0,0 +1,12 @@ +--- +- name: OpenShift health checks + hosts: oo_all_hosts + roles: + - openshift_health_checker + vars: + - r_openshift_health_checker_playbook_context: adhoc + post_tasks: + - name: Run health checks + action: openshift_health_check + args: + checks: '{{ openshift_checks | default([]) }}' diff --git a/playbooks/common/openshift-checks/health.yml b/playbooks/common/openshift-checks/health.yml new file mode 100644 index 000000000..21ea785ef --- /dev/null +++ b/playbooks/common/openshift-checks/health.yml @@ -0,0 +1,11 @@ +--- +- name: Run OpenShift health checks + hosts: oo_all_hosts + roles: + - openshift_health_checker + vars: + - r_openshift_health_checker_playbook_context: health + post_tasks: + - action: openshift_health_check + args: + checks: ['@health'] diff --git a/playbooks/common/openshift-checks/pre-install.yml b/playbooks/common/openshift-checks/pre-install.yml new file mode 100644 index 000000000..88e6f9120 --- /dev/null +++ b/playbooks/common/openshift-checks/pre-install.yml @@ -0,0 +1,11 @@ +--- +- name: run OpenShift pre-install checks + hosts: oo_all_hosts + roles: + - openshift_health_checker + vars: + - r_openshift_health_checker_playbook_context: pre-install + post_tasks: + - action: openshift_health_check + args: + checks: ['@preflight'] diff --git a/playbooks/openstack/openshift-cluster/roles b/playbooks/common/openshift-checks/roles index 20c4c58cf..20c4c58cf 120000 --- a/playbooks/openstack/openshift-cluster/roles +++ b/playbooks/common/openshift-checks/roles diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml index 239bb211b..bbd5a0185 100644 --- a/playbooks/common/openshift-cluster/config.yml +++ b/playbooks/common/openshift-cluster/config.yml @@ -1,15 +1,37 @@ --- +# TODO: refactor this into its own include +# and pass a variable for ctx +- name: Verify Requirements + hosts: oo_all_hosts + roles: + - openshift_health_checker + vars: + - r_openshift_health_checker_playbook_context: install + post_tasks: + - action: openshift_health_check + args: + checks: + - disk_availability + - memory_availability + - package_availability + - package_version + - docker_image_availability + - docker_storage + - include: initialize_oo_option_facts.yml tags: - always -- include: disable_excluder.yml - tags: - - always +- name: Set hostname + hosts: oo_masters_to_config:oo_nodes_to_config + tasks: + # TODO: switch back to hostname module once we depend on ansible-2.4 + # https://github.com/ansible/ansible/pull/25906 + - name: Set hostname + command: "hostnamectl set-hostname {{ openshift.common.hostname }}" + when: openshift_set_hostname | default(false,true) | bool - include: ../openshift-etcd/config.yml - tags: - - etcd - include: ../openshift-nfs/config.yml tags: @@ -20,12 +42,8 @@ - loadbalancer - include: ../openshift-master/config.yml - tags: - - master -- include: additional_config.yml - tags: - - master +- include: ../openshift-master/additional_config.yml - include: ../openshift-node/config.yml tags: @@ -39,6 +57,8 @@ tags: - hosted -- include: reset_excluder.yml +- include: service_catalog.yml + when: + - openshift_enable_service_catalog | default(false) | bool tags: - - always + - servicecatalog diff --git a/playbooks/common/openshift-cluster/disable_excluder.yml b/playbooks/common/openshift-cluster/disable_excluder.yml deleted file mode 100644 index f664c51c9..000000000 --- a/playbooks/common/openshift-cluster/disable_excluder.yml +++ /dev/null @@ -1,17 +0,0 @@ ---- -- name: Disable excluders - hosts: oo_masters_to_config:oo_nodes_to_config - gather_facts: no - tasks: - - # During installation the excluders are installed with present state. - # So no pre-validation check here as the excluders are either to be installed (present = latest) - # or they are not going to be updated if already installed - - # disable excluders based on their status - - include_role: - name: openshift_excluder - tasks_from: disable - vars: - openshift_excluder_package_state: present - docker_excluder_package_state: present diff --git a/playbooks/common/openshift-cluster/enable_dnsmasq.yml b/playbooks/common/openshift-cluster/enable_dnsmasq.yml index 5425f448f..be14b06f0 100644 --- a/playbooks/common/openshift-cluster/enable_dnsmasq.yml +++ b/playbooks/common/openshift-cluster/enable_dnsmasq.yml @@ -27,9 +27,6 @@ role: "{{ item.role }}" local_facts: "{{ item.local_facts }}" with_items: - - role: common - local_facts: - use_dnsmasq: True - role: master local_facts: dns_port: '8053' @@ -37,7 +34,7 @@ dest: "{{ openshift.common.config_base }}/master/master-config.yaml" yaml_key: dnsConfig.bindAddress yaml_value: "{{ openshift.master.bind_addr }}:{{ openshift.master.dns_port }}" - notify: restart master + notify: restart master api - meta: flush_handlers - name: Configure nodes for dnsmasq @@ -50,9 +47,6 @@ role: "{{ item.role }}" local_facts: "{{ item.local_facts }}" with_items: - - role: common - local_facts: - use_dnsmasq: True - role: node local_facts: dns_ip: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}" diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml index 4331bc88d..e55b2f964 100644 --- a/playbooks/common/openshift-cluster/evaluate_groups.yml +++ b/playbooks/common/openshift-cluster/evaluate_groups.yml @@ -5,34 +5,54 @@ become: no gather_facts: no tasks: - - fail: - msg: This playbook requires g_etcd_hosts to be set - when: g_etcd_hosts is not defined + - name: Evaluate groups - g_etcd_hosts or g_new_etcd_hosts required + fail: + msg: This playbook requires g_etcd_hosts or g_new_etcd_hosts to be set + when: g_etcd_hosts is not defined and g_new_etcd_hosts is not defined - - fail: + - name: Evaluate groups - g_master_hosts or g_new_master_hosts required + fail: msg: This playbook requires g_master_hosts or g_new_master_hosts to be set when: g_master_hosts is not defined and g_new_master_hosts is not defined - - fail: + - name: Evaluate groups - g_node_hosts or g_new_node_hosts required + fail: msg: This playbook requires g_node_hosts or g_new_node_hosts to be set when: g_node_hosts is not defined and g_new_node_hosts is not defined - - fail: + - name: Evaluate groups - g_lb_hosts required + fail: msg: This playbook requires g_lb_hosts to be set when: g_lb_hosts is not defined - - fail: + - name: Evaluate groups - g_nfs_hosts required + fail: msg: This playbook requires g_nfs_hosts to be set when: g_nfs_hosts is not defined - - fail: + - name: Evaluate groups - g_nfs_hosts is single host + fail: msg: The nfs group must be limited to one host - when: (groups[g_nfs_hosts] | default([])) | length > 1 + when: g_nfs_hosts | default([]) | length > 1 - - fail: + - name: Evaluate groups - g_glusterfs_hosts required + fail: msg: This playbook requires g_glusterfs_hosts to be set when: g_glusterfs_hosts is not defined + - name: Evaluate groups - Fail if no etcd hosts group is defined + fail: + msg: > + Running etcd as an embedded service is no longer supported. If this is a + new install please define an 'etcd' group with either one or three + hosts. These hosts may be the same hosts as your masters. If this is an + upgrade you may set openshift_master_unsupported_embedded_etcd=true + until a migration playbook becomes available. + when: + - g_etcd_hosts | default([]) | length not in [3,1] + - not openshift_master_unsupported_embedded_etcd | default(False) + - not openshift_node_bootstrap | default(False) + - name: Evaluate oo_all_hosts add_host: name: "{{ item }}" @@ -51,42 +71,40 @@ with_items: "{{ g_master_hosts | union(g_new_master_hosts) | default([]) }}" changed_when: no - - name: Evaluate oo_etcd_to_config + - name: Evaluate oo_first_master add_host: - name: "{{ item }}" - groups: oo_etcd_to_config + name: "{{ g_master_hosts[0] }}" + groups: oo_first_master ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" ansible_become: "{{ g_sudo | default(omit) }}" - with_items: "{{ g_etcd_hosts | default([]) }}" + when: g_master_hosts|length > 0 changed_when: no - - name: Evaluate oo_masters_to_config + - name: Evaluate oo_new_etcd_to_config add_host: name: "{{ item }}" - groups: oo_masters_to_config + groups: oo_new_etcd_to_config ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" ansible_become: "{{ g_sudo | default(omit) }}" - with_items: "{{ g_new_master_hosts | default(g_master_hosts | default([], true), true) }}" + with_items: "{{ g_new_etcd_hosts | default([]) }}" changed_when: no - - name: Evaluate oo_nodes_to_config + - name: Evaluate oo_masters_to_config add_host: name: "{{ item }}" - groups: oo_nodes_to_config + groups: oo_masters_to_config ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" ansible_become: "{{ g_sudo | default(omit) }}" - with_items: "{{ g_new_node_hosts | default(g_node_hosts | default([], true), true) }}" + with_items: "{{ g_new_master_hosts | default(g_master_hosts | default([], true), true) }}" changed_when: no - # Skip adding the master to oo_nodes_to_config when g_new_node_hosts is - - name: Add master to oo_nodes_to_config + - name: Evaluate oo_etcd_to_config add_host: name: "{{ item }}" - groups: oo_nodes_to_config + groups: oo_etcd_to_config ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" ansible_become: "{{ g_sudo | default(omit) }}" - with_items: "{{ g_master_hosts | default([]) }}" - when: g_nodeonmaster | default(false) | bool and not g_new_node_hosts | default(false) | bool + with_items: "{{ g_etcd_hosts | default([]) }}" changed_when: no - name: Evaluate oo_first_etcd @@ -94,16 +112,45 @@ name: "{{ g_etcd_hosts[0] }}" groups: oo_first_etcd ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_become: "{{ g_sudo | default(omit) }}" when: g_etcd_hosts|length > 0 changed_when: no - - name: Evaluate oo_first_master + # We use two groups one for hosts we're upgrading which doesn't include embedded etcd + # The other for backing up which includes the embedded etcd host, there's no need to + # upgrade embedded etcd that just happens when the master is updated. + - name: Evaluate oo_etcd_hosts_to_upgrade add_host: - name: "{{ g_master_hosts[0] }}" - groups: oo_first_master + name: "{{ item }}" + groups: oo_etcd_hosts_to_upgrade + with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else [] }}" + changed_when: False + + - name: Evaluate oo_etcd_hosts_to_backup + add_host: + name: "{{ item }}" + groups: oo_etcd_hosts_to_backup + with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else (groups.oo_first_master | default([])) }}" + changed_when: False + + - name: Evaluate oo_nodes_to_config + add_host: + name: "{{ item }}" + groups: oo_nodes_to_config ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" ansible_become: "{{ g_sudo | default(omit) }}" - when: g_master_hosts|length > 0 + with_items: "{{ g_new_node_hosts | default(g_node_hosts | default([], true), true) }}" + changed_when: no + + # Skip adding the master to oo_nodes_to_config when g_new_node_hosts is + - name: Add master to oo_nodes_to_config + add_host: + name: "{{ item }}" + groups: oo_nodes_to_config + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_become: "{{ g_sudo | default(omit) }}" + with_items: "{{ g_master_hosts | default([]) }}" + when: g_nodeonmaster | default(false) | bool and not g_new_node_hosts | default(false) | bool changed_when: no - name: Evaluate oo_lb_to_config @@ -130,5 +177,14 @@ groups: oo_glusterfs_to_config ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" ansible_become: "{{ g_sudo | default(omit) }}" - with_items: "{{ g_glusterfs_hosts | default([]) }}" + with_items: "{{ g_glusterfs_hosts | union(g_glusterfs_registry_hosts | default([])) }}" + changed_when: no + + - name: Evaluate oo_etcd_to_migrate + add_host: + name: "{{ item }}" + groups: oo_etcd_to_migrate + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_become: "{{ g_sudo | default(omit) }}" + with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config | default([]) | length != 0 else (groups.oo_first_master |default([]))}}" changed_when: no diff --git a/playbooks/common/openshift-cluster/initialize_facts.yml b/playbooks/common/openshift-cluster/initialize_facts.yml index 9cebecd68..0723575c2 100644 --- a/playbooks/common/openshift-cluster/initialize_facts.yml +++ b/playbooks/common/openshift-cluster/initialize_facts.yml @@ -6,12 +6,151 @@ - name: Initialize host facts hosts: oo_all_hosts - roles: - - openshift_facts tasks: - - openshift_facts: + - name: load openshift_facts module + include_role: + name: openshift_facts + + # TODO: Should this role be refactored into health_checks?? + - name: Run openshift_sanitize_inventory to set variables + include_role: + name: openshift_sanitize_inventory + + - name: Detecting Operating System from ostree_booted + stat: + path: /run/ostree-booted + register: ostree_booted + + # Locally setup containerized facts for now + - name: initialize_facts set fact l_is_atomic + set_fact: + l_is_atomic: "{{ ostree_booted.stat.exists }}" + + - name: initialize_facts set fact for containerized and l_is_*_system_container + set_fact: + l_is_containerized: "{{ (l_is_atomic | bool) or (containerized | default(false) | bool) }}" + l_is_openvswitch_system_container: "{{ (openshift_use_openvswitch_system_container | default(openshift_use_system_containers | default(false)) | bool) }}" + l_is_node_system_container: "{{ (openshift_use_node_system_container | default(openshift_use_system_containers | default(false)) | bool) }}" + l_is_master_system_container: "{{ (openshift_use_master_system_container | default(openshift_use_system_containers | default(false)) | bool) }}" + l_is_etcd_system_container: "{{ (openshift_use_etcd_system_container | default(openshift_use_system_containers | default(false)) | bool) }}" + + - name: initialize_facts set facts for l_any_system_container + set_fact: + l_any_system_container: "{{ l_is_etcd_system_container or l_is_openvswitch_system_container or l_is_node_system_container or l_is_master_system_container }}" + + - name: initialize_facts set fact for l_etcd_runtime + set_fact: + l_etcd_runtime: "{{ 'runc' if l_is_etcd_system_container else 'docker' if l_is_containerized else 'host' }}" + + # TODO: Should this be moved into health checks?? + # Seems as though any check that happens with a corresponding fail should move into health_checks + - name: Validate python version - ans_dist is fedora and python is v3 + fail: + msg: | + openshift-ansible requires Python 3 for {{ ansible_distribution }}; + For information on enabling Python 3 with Ansible, see https://docs.ansible.com/ansible/python_3_support.html + when: + - ansible_distribution == 'Fedora' + - ansible_python['version']['major'] != 3 + + # TODO: Should this be moved into health checks?? + # Seems as though any check that happens with a corresponding fail should move into health_checks + - name: Validate python version - ans_dist not Fedora and python must be v2 + fail: + msg: "openshift-ansible requires Python 2 for {{ ansible_distribution }}" + when: + - ansible_distribution != 'Fedora' + - ansible_python['version']['major'] != 2 + + # TODO: Should this be moved into health checks?? + # Seems as though any check that happens with a corresponding fail should move into health_checks + # Fail as early as possible if Atomic and old version of Docker + - when: + - l_is_atomic | bool + block: + + # See https://access.redhat.com/articles/2317361 + # and https://github.com/ansible/ansible/issues/15892 + # NOTE: the "'s can not be removed at this level else the docker command will fail + # NOTE: When ansible >2.2.1.x is used this can be updated per + # https://github.com/openshift/openshift-ansible/pull/3475#discussion_r103525121 + - name: Determine Atomic Host Docker Version + shell: 'CURLY="{"; docker version --format "$CURLY{json .Server.Version}}"' + register: l_atomic_docker_version + + - name: assert atomic host docker version is 1.12 or later + assert: + that: + - l_atomic_docker_version.stdout | replace('"', '') | version_compare('1.12','>=') + msg: Installation on Atomic Host requires Docker 1.12 or later. Please upgrade and restart the Atomic Host. + + - when: + - not l_is_atomic | bool + block: + - name: Ensure openshift-ansible installer package deps are installed + package: + name: "{{ item }}" + state: present + with_items: + - iproute + - "{{ 'python3-dbus' if ansible_distribution == 'Fedora' else 'python-dbus' }}" + - PyYAML + - yum-utils + + - name: Ensure various deps for running system containers are installed + package: + name: "{{ item }}" + state: present + with_items: + - atomic + - ostree + - runc + when: + - l_any_system_container | bool + + - name: Default system_images_registry to a enterprise registry + set_fact: + system_images_registry: "registry.access.redhat.com" + when: + - system_images_registry is not defined + - openshift_deployment_type == "openshift-enterprise" + + - name: Default system_images_registry to community registry + set_fact: + system_images_registry: "docker.io" + when: + - system_images_registry is not defined + - openshift_deployment_type == "origin" + + - name: Gather Cluster facts and set is_containerized if needed + openshift_facts: role: common local_facts: + deployment_type: "{{ openshift_deployment_type }}" + deployment_subtype: "{{ openshift_deployment_subtype | default(None) }}" + cli_image: "{{ osm_image | default(None) }}" hostname: "{{ openshift_hostname | default(None) }}" - - set_fact: - openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" + ip: "{{ openshift_ip | default(None) }}" + is_containerized: "{{ l_is_containerized | default(None) }}" + is_openvswitch_system_container: "{{ l_is_openvswitch_system_container | default(false) }}" + is_node_system_container: "{{ l_is_node_system_container | default(false) }}" + is_master_system_container: "{{ l_is_master_system_container | default(false) }}" + is_etcd_system_container: "{{ l_is_etcd_system_container | default(false) }}" + etcd_runtime: "{{ l_etcd_runtime }}" + system_images_registry: "{{ system_images_registry }}" + public_hostname: "{{ openshift_public_hostname | default(None) }}" + public_ip: "{{ openshift_public_ip | default(None) }}" + portal_net: "{{ openshift_portal_net | default(openshift_master_portal_net) | default(None) }}" + http_proxy: "{{ openshift_http_proxy | default(None) }}" + https_proxy: "{{ openshift_https_proxy | default(None) }}" + no_proxy: "{{ openshift_no_proxy | default(None) }}" + generate_no_proxy_hosts: "{{ openshift_generate_no_proxy_hosts | default(True) }}" + no_proxy_internal_hostnames: "{{ openshift_no_proxy_internal_hostnames | default(None) }}" + + - name: initialize_facts set_fact repoquery command + set_fact: + repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}" + + - name: initialize_facts set_fact on openshift_docker_hosted_registry_network + set_fact: + openshift_docker_hosted_registry_network: "{{ '' if 'oo_first_master' not in groups else hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" diff --git a/playbooks/common/openshift-cluster/initialize_openshift_repos.yml b/playbooks/common/openshift-cluster/initialize_openshift_repos.yml new file mode 100644 index 000000000..a7114fc80 --- /dev/null +++ b/playbooks/common/openshift-cluster/initialize_openshift_repos.yml @@ -0,0 +1,8 @@ +--- +- name: Setup yum repositories for all hosts + hosts: oo_all_hosts + gather_facts: no + tasks: + - name: initialize openshift repos + include_role: + name: openshift_repos diff --git a/playbooks/common/openshift-cluster/initialize_openshift_version.yml b/playbooks/common/openshift-cluster/initialize_openshift_version.yml index 88f82f6f2..1b186f181 100644 --- a/playbooks/common/openshift-cluster/initialize_openshift_version.yml +++ b/playbooks/common/openshift-cluster/initialize_openshift_version.yml @@ -1,23 +1,13 @@ --- -# NOTE: requires openshift_facts be run -- name: Verify compatible yum/subscription-manager combination - hosts: l_oo_all_hosts - gather_facts: no +- name: Set version_install_base_package true on masters and nodes + hosts: oo_masters_to_config:oo_nodes_to_config tasks: - # See: - # https://bugzilla.redhat.com/show_bug.cgi?id=1395047 - # https://bugzilla.redhat.com/show_bug.cgi?id=1282961 - # https://github.com/openshift/openshift-ansible/issues/1138 - - name: Check for bad combinations of yum and subscription-manager - command: > - {{ repoquery_cmd }} --installed --qf '%{version}' "yum" - register: yum_ver_test - changed_when: false - when: not openshift.common.is_atomic | bool - - fail: - msg: Incompatible versions of yum and subscription-manager found. You may need to update yum and yum-utils. - when: not openshift.common.is_atomic | bool and 'Plugin \"search-disabled-repos\" requires API 2.7. Supported API is 2.6.' in yum_ver_test.stdout + - name: Set version_install_base_package true + set_fact: + version_install_base_package: True + when: version_install_base_package is not defined +# NOTE: requires openshift_facts be run - name: Determine openshift_version to configure on first master hosts: oo_first_master roles: @@ -30,5 +20,10 @@ hosts: oo_all_hosts:!oo_first_master vars: openshift_version: "{{ hostvars[groups.oo_first_master.0].openshift_version }}" + pre_tasks: + - set_fact: + openshift_pkg_version: -{{ openshift_version }} + when: openshift_pkg_version is not defined + - debug: msg="openshift_pkg_version set to {{ openshift_pkg_version }}" roles: - openshift_version diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml index 5db71b857..75339f6df 100644 --- a/playbooks/common/openshift-cluster/openshift_hosted.yml +++ b/playbooks/common/openshift-cluster/openshift_hosted.yml @@ -26,6 +26,8 @@ logging_elasticsearch_cluster_size: "{{ openshift_hosted_logging_elasticsearch_cluster_size | default(1) }}" logging_elasticsearch_ops_cluster_size: "{{ openshift_hosted_logging_elasticsearch_ops_cluster_size | default(1) }}" roles: + - role: openshift_default_storage_class + when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce') - role: openshift_hosted - role: openshift_metrics when: openshift_hosted_metrics_deploy | default(false) | bool @@ -46,6 +48,9 @@ - role: cockpit-ui when: ( openshift.common.version_gte_3_3_or_1_3 | bool ) and ( openshift_hosted_manage_registry | default(true) | bool ) and not (openshift.docker.hosted_registry_insecure | default(false) | bool) + - role: openshift_prometheus + when: openshift_hosted_prometheus_deploy | default(false) | bool + - name: Update master-config for publicLoggingURL hosts: oo_masters_to_config:!oo_first_master tags: diff --git a/playbooks/common/openshift-cluster/openshift_logging.yml b/playbooks/common/openshift-cluster/openshift_logging.yml index 57580406c..c1a5d83cd 100644 --- a/playbooks/common/openshift-cluster/openshift_logging.yml +++ b/playbooks/common/openshift-cluster/openshift_logging.yml @@ -1,6 +1,4 @@ --- -- include: evaluate_groups.yml - - name: OpenShift Aggregated Logging hosts: oo_first_master roles: diff --git a/playbooks/common/openshift-cluster/openshift_metrics.yml b/playbooks/common/openshift-cluster/openshift_metrics.yml index bcff4a1a1..1dc180c26 100644 --- a/playbooks/common/openshift-cluster/openshift_metrics.yml +++ b/playbooks/common/openshift-cluster/openshift_metrics.yml @@ -1,7 +1,14 @@ --- -- include: evaluate_groups.yml - - name: OpenShift Metrics hosts: oo_first_master roles: - openshift_metrics + +- name: OpenShift Metrics + hosts: oo_masters:!oo_first_master + serial: 1 + tasks: + - name: Setup the non-first masters configs + include_role: + name: openshift_metrics + tasks_from: update_master_config.yaml diff --git a/playbooks/common/openshift-cluster/openshift_prometheus.yml b/playbooks/common/openshift-cluster/openshift_prometheus.yml new file mode 100644 index 000000000..a979c0c00 --- /dev/null +++ b/playbooks/common/openshift-cluster/openshift_prometheus.yml @@ -0,0 +1,9 @@ +--- +- include: std_include.yml + +- name: OpenShift Prometheus + hosts: oo_first_master + roles: + - openshift_prometheus + vars: + openshift_prometheus_state: present diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml new file mode 100644 index 000000000..6964e8567 --- /dev/null +++ b/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml @@ -0,0 +1,158 @@ +--- +- name: Check cert expirys + hosts: oo_etcd_to_config:oo_masters_to_config + vars: + openshift_certificate_expiry_show_all: yes + roles: + # Sets 'check_results' per host which contains health status for + # etcd, master and node certificates. We will use 'check_results' + # to determine if any certificates were expired prior to running + # this playbook. Service restarts will be skipped if any + # certificates were previously expired. + - role: openshift_certificate_expiry + +- name: Backup existing etcd CA certificate directories + hosts: oo_etcd_to_config + roles: + - role: etcd_common + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" + tasks: + - name: Determine if CA certificate directory exists + stat: + path: "{{ etcd_ca_dir }}" + register: etcd_ca_certs_dir_stat + - name: Backup generated etcd certificates + command: > + tar -czf {{ etcd_conf_dir }}/etcd-ca-certificate-backup-{{ ansible_date_time.epoch }}.tgz + {{ etcd_ca_dir }} + args: + warn: no + when: etcd_ca_certs_dir_stat.stat.exists | bool + - name: Remove CA certificate directory + file: + path: "{{ etcd_ca_dir }}" + state: absent + when: etcd_ca_certs_dir_stat.stat.exists | bool + +- name: Generate new etcd CA + hosts: oo_first_etcd + roles: + - role: openshift_etcd_ca + etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}" + etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" + etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}" + +- name: Create temp directory for syncing certs + hosts: localhost + connection: local + become: no + gather_facts: no + tasks: + - name: Create local temp directory for syncing certs + local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX + register: g_etcd_mktemp + changed_when: false + +- name: Distribute etcd CA to etcd hosts + hosts: oo_etcd_to_config + vars: + etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" + roles: + - role: etcd_common + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" + tasks: + - name: Create a tarball of the etcd ca certs + command: > + tar -czvf {{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz + -C {{ etcd_ca_dir }} . + args: + creates: "{{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz" + warn: no + delegate_to: "{{ etcd_ca_host }}" + run_once: true + - name: Retrieve etcd ca cert tarball + fetch: + src: "{{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz" + dest: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/" + flat: yes + fail_on_missing: yes + validate_checksum: yes + delegate_to: "{{ etcd_ca_host }}" + run_once: true + - name: Ensure ca directory exists + file: + path: "{{ etcd_ca_dir }}" + state: directory + - name: Unarchive etcd ca cert tarballs + unarchive: + src: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/{{ etcd_ca_name }}.tgz" + dest: "{{ etcd_ca_dir }}" + - name: Read current etcd CA + slurp: + src: "{{ etcd_conf_dir }}/ca.crt" + register: g_current_etcd_ca_output + - name: Read new etcd CA + slurp: + src: "{{ etcd_ca_dir }}/ca.crt" + register: g_new_etcd_ca_output + - copy: + content: "{{ (g_new_etcd_ca_output.content|b64decode) + (g_current_etcd_ca_output.content|b64decode) }}" + dest: "{{ item }}/ca.crt" + with_items: + - "{{ etcd_conf_dir }}" + - "{{ etcd_ca_dir }}" + +- include: ../../openshift-etcd/restart.yml + # Do not restart etcd when etcd certificates were previously expired. + when: ('expired' not in (hostvars + | oo_select_keys(groups['etcd']) + | oo_collect('check_results.check_results.etcd') + | oo_collect('health'))) + +- name: Retrieve etcd CA certificate + hosts: oo_first_etcd + roles: + - role: etcd_common + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" + tasks: + - name: Retrieve etcd CA certificate + fetch: + src: "{{ etcd_conf_dir }}/ca.crt" + dest: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/" + flat: yes + fail_on_missing: yes + validate_checksum: yes + +- name: Distribute etcd CA to masters + hosts: oo_masters_to_config + vars: + openshift_ca_host: "{{ groups.oo_first_master.0 }}" + tasks: + - name: Deploy etcd CA + copy: + src: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/ca.crt" + dest: "{{ openshift.common.config_base }}/master/master.etcd-ca.crt" + when: groups.oo_etcd_to_config | default([]) | length > 0 + +- name: Delete temporary directory on localhost + hosts: localhost + connection: local + become: no + gather_facts: no + tasks: + - file: + name: "{{ g_etcd_mktemp.stdout }}" + state: absent + changed_when: false + +- include: ../../openshift-master/restart.yml + # Do not restart masters when master certificates were previously expired. + when: ('expired' not in hostvars + | oo_select_keys(groups['oo_masters_to_config']) + | oo_collect('check_results.check_results.ocp_certs') + | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"})) + and + ('expired' not in hostvars + | oo_select_keys(groups['oo_masters_to_config']) + | oo_collect('check_results.check_results.ocp_certs') + | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"})) diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/etcd.yml b/playbooks/common/openshift-cluster/redeploy-certificates/etcd.yml index 2963a5940..6b5c805e6 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/etcd.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/etcd.yml @@ -3,7 +3,8 @@ hosts: oo_first_etcd any_errors_fatal: true roles: - - etcd_common + - role: etcd_common + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" post_tasks: - name: Determine if generated etcd certificates exist stat: @@ -27,7 +28,8 @@ hosts: oo_etcd_to_config any_errors_fatal: true roles: - - etcd_common + - role: etcd_common + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" post_tasks: - name: Backup etcd certificates command: > @@ -50,6 +52,7 @@ etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}" etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}" openshift_ca_host: "{{ groups.oo_first_master.0 }}" + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" - name: Redeploy etcd client certificates for masters hosts: oo_masters_to_config @@ -63,4 +66,5 @@ etcd_cert_prefix: "master.etcd-" openshift_ca_host: "{{ groups.oo_first_master.0 }}" openshift_master_count: "{{ openshift.master.master_count | default(groups.oo_masters | length) }}" + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/masters.yml b/playbooks/common/openshift-cluster/redeploy-certificates/masters.yml index c30889d64..51b196299 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/masters.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/masters.yml @@ -51,3 +51,13 @@ | oo_collect('openshift.common.hostname') | default(none, true) }}" openshift_certificates_redeploy: true + - role: lib_utils + post_tasks: + - yedit: + src: "{{ openshift.common.config_base }}/master/master-config.yaml" + key: servingInfo.namedCertificates + value: "{{ openshift.master.named_certificates | default([]) | oo_named_certificates_list }}" + when: + - ('named_certificates' in openshift.master) + - openshift.master.named_certificates | default([]) | length > 0 + - openshift_master_overwrite_named_certificates | default(false) | bool diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml index 4fa7f9cdf..089ae6bbc 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml @@ -6,131 +6,17 @@ msg: "The current OpenShift version is less than 1.2/3.2 and does not support CA bundles." when: not openshift.common.version_gte_3_2_or_1_2 | bool -- name: Backup existing etcd CA certificate directories - hosts: oo_etcd_to_config - roles: - - etcd_common - tasks: - - name: Determine if CA certificate directory exists - stat: - path: "{{ etcd_ca_dir }}" - register: etcd_ca_certs_dir_stat - - name: Backup generated etcd certificates - command: > - tar -czf {{ etcd_conf_dir }}/etcd-ca-certificate-backup-{{ ansible_date_time.epoch }}.tgz - {{ etcd_ca_dir }} - args: - warn: no - when: etcd_ca_certs_dir_stat.stat.exists | bool - - name: Remove CA certificate directory - file: - path: "{{ etcd_ca_dir }}" - state: absent - when: etcd_ca_certs_dir_stat.stat.exists | bool - -- name: Generate new etcd CA - hosts: oo_first_etcd - roles: - - role: openshift_etcd_ca - etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}" - etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" - etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}" - -- name: Create temp directory for syncing certs - hosts: localhost - connection: local - become: no - gather_facts: no - tasks: - - name: Create local temp directory for syncing certs - local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX - register: g_etcd_mktemp - changed_when: false - -- name: Distribute etcd CA to etcd hosts - hosts: oo_etcd_to_config +- name: Check cert expirys + hosts: oo_nodes_to_config:oo_masters_to_config vars: - etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" + openshift_certificate_expiry_show_all: yes roles: - - etcd_common - tasks: - - name: Create a tarball of the etcd ca certs - command: > - tar -czvf {{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz - -C {{ etcd_ca_dir }} . - args: - creates: "{{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz" - warn: no - delegate_to: "{{ etcd_ca_host }}" - run_once: true - - name: Retrieve etcd ca cert tarball - fetch: - src: "{{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz" - dest: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/" - flat: yes - fail_on_missing: yes - validate_checksum: yes - delegate_to: "{{ etcd_ca_host }}" - run_once: true - - name: Ensure ca directory exists - file: - path: "{{ etcd_ca_dir }}" - state: directory - - name: Unarchive etcd ca cert tarballs - unarchive: - src: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/{{ etcd_ca_name }}.tgz" - dest: "{{ etcd_ca_dir }}" - - name: Read current etcd CA - slurp: - src: "{{ etcd_conf_dir }}/ca.crt" - register: g_current_etcd_ca_output - - name: Read new etcd CA - slurp: - src: "{{ etcd_ca_dir }}/ca.crt" - register: g_new_etcd_ca_output - - copy: - content: "{{ (g_new_etcd_ca_output.content|b64decode) + (g_current_etcd_ca_output.content|b64decode) }}" - dest: "{{ item }}/ca.crt" - with_items: - - "{{ etcd_conf_dir }}" - - "{{ etcd_ca_dir }}" - -- name: Retrieve etcd CA certificate - hosts: oo_first_etcd - roles: - - etcd_common - tasks: - - name: Retrieve etcd CA certificate - fetch: - src: "{{ etcd_conf_dir }}/ca.crt" - dest: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/" - flat: yes - fail_on_missing: yes - validate_checksum: yes - -- name: Distribute etcd CA to masters - hosts: oo_masters_to_config - vars: - openshift_ca_host: "{{ groups.oo_first_master.0 }}" - tasks: - - name: Deploy CA certificate, key, bundle and serial - copy: - src: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/ca.crt" - dest: "{{ openshift.common.config_base }}/master/master.etcd-ca.crt" - when: groups.oo_etcd_to_config | default([]) | length > 0 - -- name: Delete temporary directory on localhost - hosts: localhost - connection: local - become: no - gather_facts: no - tasks: - - file: - name: "{{ g_etcd_mktemp.stdout }}" - state: absent - changed_when: false - -- include: ../../openshift-etcd/restart.yml + # Sets 'check_results' per host which contains health status for + # etcd, master and node certificates. We will use 'check_results' + # to determine if any certificates were expired prior to running + # this playbook. Service restarts will be skipped if any + # certificates were previously expired. + - role: openshift_certificate_expiry # Update master config when ca-bundle not referenced. Services will be # restarted below after new CA certificate has been distributed. @@ -323,6 +209,16 @@ with_items: "{{ client_users }}" - include: ../../openshift-master/restart.yml + # Do not restart masters when master certificates were previously expired. + when: ('expired' not in hostvars + | oo_select_keys(groups['oo_masters_to_config']) + | oo_collect('check_results.check_results.ocp_certs') + | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"})) + and + ('expired' not in hostvars + | oo_select_keys(groups['oo_masters_to_config']) + | oo_collect('check_results.check_results.ocp_certs') + | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"})) - name: Distribute OpenShift CA certificate to nodes hosts: oo_nodes_to_config @@ -372,3 +268,13 @@ changed_when: false - include: ../../openshift-node/restart.yml + # Do not restart nodes when node certificates were previously expired. + when: ('expired' not in hostvars + | oo_select_keys(groups['oo_nodes_to_config']) + | oo_collect('check_results.check_results.ocp_certs') + | oo_collect('health', {'path':hostvars[groups.oo_nodes_to_config.0].openshift.common.config_base ~ "/node/server.crt"})) + and + ('expired' not in hostvars + | oo_select_keys(groups['oo_nodes_to_config']) + | oo_collect('check_results.check_results.ocp_certs') + | oo_collect('health', {'path':hostvars[groups.oo_nodes_to_config.0].openshift.common.config_base ~ "/node/ca.crt"})) diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml b/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml index 8c8062585..afd5463b2 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml @@ -66,6 +66,7 @@ --signer-cert={{ openshift.common.config_base }}/master/ca.crt --signer-key={{ openshift.common.config_base }}/master/ca.key --signer-serial={{ openshift.common.config_base }}/master/ca.serial.txt + --config={{ mktemp.stdout }}/admin.kubeconfig --hostnames="{{ docker_registry_service_ip.results.clusterip }},docker-registry.default.svc,docker-registry.default.svc.cluster.local,{{ docker_registry_route_hostname }}" --cert={{ openshift.common.config_base }}/master/registry.crt --key={{ openshift.common.config_base }}/master/registry.key diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/router.yml b/playbooks/common/openshift-cluster/redeploy-certificates/router.yml index 9f14f2d69..748bbbf91 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/router.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/router.yml @@ -116,8 +116,9 @@ tls.crt="{{ mktemp.stdout }}/openshift-hosted-router-certificate.pem" tls.key="{{ mktemp.stdout }}/openshift-hosted-router-certificate.key" --type=kubernetes.io/tls + --config={{ mktemp.stdout }}/admin.kubeconfig --confirm - -o json | {{ openshift.common.client_binary }} replace -f - + -o json | {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig replace -f - - name: Remove temporary router certificate and key files file: diff --git a/playbooks/common/openshift-cluster/reset_excluder.yml b/playbooks/common/openshift-cluster/reset_excluder.yml deleted file mode 100644 index eaa8ce39c..000000000 --- a/playbooks/common/openshift-cluster/reset_excluder.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -- name: Re-enable excluder if it was previously enabled - hosts: oo_masters_to_config:oo_nodes_to_config - gather_facts: no - tasks: - - include_role: - name: openshift_excluder - tasks_from: enable diff --git a/playbooks/common/openshift-cluster/sanity_checks.yml b/playbooks/common/openshift-cluster/sanity_checks.yml new file mode 100644 index 000000000..26716a92d --- /dev/null +++ b/playbooks/common/openshift-cluster/sanity_checks.yml @@ -0,0 +1,51 @@ +--- +- name: Verify Requirements + hosts: oo_all_hosts + tasks: + - fail: + msg: Flannel can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use flannel + when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_flannel | default(false) | bool + + - fail: + msg: Nuage sdn can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use nuage + when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_nuage | default(false) | bool + + - fail: + msg: Nuage sdn can not be used with flannel + when: openshift_use_flannel | default(false) | bool and openshift_use_nuage | default(false) | bool + + - fail: + msg: Contiv can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use contiv + when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_contiv | default(false) | bool + + - fail: + msg: Contiv can not be used with flannel + when: openshift_use_flannel | default(false) | bool and openshift_use_contiv | default(false) | bool + + - fail: + msg: Contiv can not be used with nuage + when: openshift_use_nuage | default(false) | bool and openshift_use_contiv | default(false) | bool + + - fail: + msg: Calico can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use Calico + when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_calico | default(false) | bool + + - fail: + msg: The Calico playbook does not yet integrate with the Flannel playbook in Openshift. Set either openshift_use_calico or openshift_use_flannel, but not both. + when: openshift_use_calico | default(false) | bool and openshift_use_flannel | default(false) | bool + + - fail: + msg: Calico can not be used with Nuage in Openshift. Set either openshift_use_calico or openshift_use_nuage, but not both + when: openshift_use_calico | default(false) | bool and openshift_use_nuage | default(false) | bool + + - fail: + msg: Calico can not be used with Contiv in Openshift. Set either openshift_use_calico or openshift_use_contiv, but not both + when: openshift_use_calico | default(false) | bool and openshift_use_contiv | default(false) | bool + + - fail: + msg: openshift_hostname must be 63 characters or less + when: openshift_hostname is defined and openshift_hostname | length > 63 + + - fail: + msg: openshift_public_hostname must be 63 characters or less + when: openshift_public_hostname is defined and openshift_public_hostname | length > 63 diff --git a/playbooks/common/openshift-cluster/service_catalog.yml b/playbooks/common/openshift-cluster/service_catalog.yml new file mode 100644 index 000000000..599350258 --- /dev/null +++ b/playbooks/common/openshift-cluster/service_catalog.yml @@ -0,0 +1,20 @@ +--- + +- name: Update Master configs + hosts: oo_masters + serial: 1 + tasks: + - block: + - include_role: + name: openshift_service_catalog + tasks_from: wire_aggregator + vars: + first_master: "{{ groups.oo_first_master[0] }}" + +- name: Service Catalog + hosts: oo_first_master + roles: + - openshift_service_catalog + - ansible_service_broker + vars: + first_master: "{{ groups.oo_first_master[0] }}" diff --git a/playbooks/common/openshift-cluster/std_include.yml b/playbooks/common/openshift-cluster/std_include.yml index 6ed31a644..cef0072f3 100644 --- a/playbooks/common/openshift-cluster/std_include.yml +++ b/playbooks/common/openshift-cluster/std_include.yml @@ -7,10 +7,18 @@ tags: - always +- include: sanity_checks.yml + tags: + - always + - include: validate_hostnames.yml tags: - node +- include: initialize_openshift_repos.yml + tags: + - always + - include: initialize_openshift_version.yml tags: - always diff --git a/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml b/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml index 1a6580795..eb118365a 100644 --- a/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml +++ b/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml @@ -3,7 +3,7 @@ - name: Generate etcd instance names(s) set_fact: - scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}" + scratch_name: "{{ openshift_cluster_id | default('default') }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}" register: etcd_names_output with_sequence: count={{ num_etcd }} diff --git a/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml b/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml index 36d7b7870..783f70f50 100644 --- a/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml +++ b/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml @@ -3,7 +3,7 @@ - name: Generate master instance names(s) set_fact: - scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}" + scratch_name: "{{ openshift_cluster_id | default('default') }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}" register: master_names_output with_sequence: count={{ num_masters }} diff --git a/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml b/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml index 278942f8b..c103e40a9 100644 --- a/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml +++ b/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml @@ -5,7 +5,7 @@ - name: Generate node instance names(s) set_fact: - scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ sub_host_type }}-{{ '%05x' | format(1048576 | random) }}" + scratch_name: "{{ openshift_cluster_id | default('default') }}-{{ k8s_type }}-{{ sub_host_type }}-{{ '%05x' | format(1048576 | random) }}" register: node_names_output with_sequence: count={{ number_nodes }} diff --git a/playbooks/common/openshift-cluster/update_repos_and_packages.yml b/playbooks/common/openshift-cluster/update_repos_and_packages.yml deleted file mode 100644 index be956fca5..000000000 --- a/playbooks/common/openshift-cluster/update_repos_and_packages.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- -- include: evaluate_groups.yml - -- name: Subscribe hosts, update repos and update OS packages - hosts: oo_hosts_to_update - roles: - # Explicitly calling openshift_facts because it appears that when - # rhel_subscribe is skipped that the openshift_facts dependency for - # openshift_repos is also skipped (this is the case at least for Ansible - # 2.0.2) - - openshift_facts - - role: rhel_subscribe - when: deployment_type in ["enterprise", "atomic-enterprise", "openshift-enterprise"] and - ansible_distribution == "RedHat" and - lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) | - default('no', True) | lower in ['no', 'false'] - - openshift_repos - - os_update_latest diff --git a/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml deleted file mode 100644 index 9f7961614..000000000 --- a/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml +++ /dev/null @@ -1,14 +0,0 @@ ---- -# This is a hack to allow us to use systemd_units.yml, but skip the handlers which -# restart services. We will unconditionally restart all containerized services -# because we have to unconditionally restart Docker: -- set_fact: - skip_node_svc_handlers: True - -- name: Update systemd units - include: ../../../../roles/openshift_node/tasks/systemd_units.yml openshift_version={{ openshift_image_tag }} - -# This is a no-op because of skip_node_svc_handlers, but lets us trigger it before end of -# play when the node has already been marked schedulable again. (this would look strange -# in logs otherwise) -- meta: flush_handlers diff --git a/playbooks/common/openshift-cluster/upgrades/disable_excluder.yml b/playbooks/common/openshift-cluster/upgrades/disable_excluder.yml deleted file mode 100644 index a30952929..000000000 --- a/playbooks/common/openshift-cluster/upgrades/disable_excluder.yml +++ /dev/null @@ -1,22 +0,0 @@ ---- -- name: Record excluder state and disable - hosts: oo_masters_to_config:oo_nodes_to_config - gather_facts: no - tasks: - - include: pre/validate_excluder.yml - vars: - excluder: "{{ openshift.common.service_type }}-docker-excluder" - when: enable_docker_excluder | default(enable_excluders) | default(True) | bool - - include: pre/validate_excluder.yml - vars: - excluder: "{{ openshift.common.service_type }}-excluder" - when: enable_openshift_excluder | default(enable_excluders) | default(True) | bool - - - # disable excluders based on their status - - include_role: - name: openshift_excluder - tasks_from: disable - vars: - openshift_excluder_package_state: latest - docker_excluder_package_state: latest diff --git a/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml b/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml new file mode 100644 index 000000000..800621857 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml @@ -0,0 +1,12 @@ +--- +- name: Disable excluders + hosts: oo_masters_to_config + gather_facts: no + roles: + - role: openshift_excluder + r_openshift_excluder_action: disable + r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" + r_openshift_excluder_verify_upgrade: true + r_openshift_excluder_upgrade_target: "{{ openshift_upgrade_target }}" + r_openshift_excluder_package_state: latest + r_openshift_excluder_docker_package_state: latest diff --git a/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml b/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml new file mode 100644 index 000000000..a66301c0d --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml @@ -0,0 +1,12 @@ +--- +- name: Disable excluders + hosts: oo_nodes_to_upgrade:!oo_masters_to_config + gather_facts: no + roles: + - role: openshift_excluder + r_openshift_excluder_action: disable + r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" + r_openshift_excluder_verify_upgrade: true + r_openshift_excluder_upgrade_target: "{{ openshift_upgrade_target }}" + r_openshift_excluder_package_state: latest + r_openshift_excluder_docker_package_state: latest diff --git a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml index 07db071ce..98953f72e 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml @@ -4,7 +4,6 @@ # Do not allow adding hosts during upgrade. g_new_master_hosts: [] g_new_node_hosts: [] - openshift_cluster_id: "{{ cluster_id | default('default') }}" - include: ../initialize_nodes_to_upgrade.yml @@ -52,11 +51,15 @@ - name: Drain Node for Kubelet upgrade command: > - {{ openshift.common.admin_binary }} drain {{ openshift.node.nodename }} --force --delete-local-data --ignore-daemonsets + {{ openshift.common.admin_binary }} drain {{ openshift.node.nodename }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets delegate_to: "{{ groups.oo_first_master.0 }}" when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade + register: l_docker_upgrade_drain_result + until: not l_docker_upgrade_drain_result | failed + retries: 60 + delay: 60 - - include: upgrade.yml + - include: tasks/upgrade.yml when: l_docker_upgrade is defined and l_docker_upgrade | bool - name: Set node schedulability diff --git a/playbooks/common/openshift-cluster/upgrades/docker/restart.yml b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml index 1b418920f..83f16ac0d 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/restart.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml @@ -1,6 +1,10 @@ --- - name: Restart docker service: name=docker state=restarted + register: l_docker_restart_docker_in_upgrade_result + until: not l_docker_restart_docker_in_upgrade_result | failed + retries: 3 + delay: 30 - name: Update docker facts openshift_facts: @@ -11,7 +15,6 @@ with_items: - etcd_container - openvswitch - - "{{ openshift.common.service_type }}-master" - "{{ openshift.common.service_type }}-master-api" - "{{ openshift.common.service_type }}-master-controllers" - "{{ openshift.common.service_type }}-node" @@ -24,4 +27,5 @@ state: started delay: 10 port: "{{ openshift.master.api_port }}" + timeout: 600 when: inventory_hostname in groups.oo_masters_to_config diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml index 17f8fc6e9..808cc562c 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml @@ -4,7 +4,6 @@ - name: Stop containerized services service: name={{ item }} state=stopped with_items: - - "{{ openshift.common.service_type }}-master" - "{{ openshift.common.service_type }}-master-api" - "{{ openshift.common.service_type }}-master-controllers" - "{{ openshift.common.service_type }}-node" @@ -32,7 +31,13 @@ - debug: var=docker_image_count.stdout when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool -- service: name=docker state=stopped +- service: + name: docker + state: stopped + register: l_pb_docker_upgrade_stop_result + until: not l_pb_docker_upgrade_stop_result | failed + retries: 3 + delay: 30 - name: Upgrade Docker package: name=docker{{ '-' + docker_version }} state=present diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml index b2a2eac9a..52345a9ba 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml @@ -18,12 +18,16 @@ - name: Get current version of Docker command: "{{ repoquery_cmd }} --installed --qf '%{version}' docker" register: curr_docker_version + retries: 4 + until: curr_docker_version | succeeded changed_when: false - name: Get latest available version of Docker command: > {{ repoquery_cmd }} --qf '%{version}' "docker" register: avail_docker_version + retries: 4 + until: avail_docker_version | succeeded # Don't expect docker rpm to be available on hosts that don't already have it installed: when: pkg_check.rc == 0 failed_when: false diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml index fb51a0061..2cc6c9019 100644 --- a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml +++ b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml @@ -1,84 +1,14 @@ --- - name: Backup etcd - hosts: etcd_hosts_to_backup - vars: - embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" - etcdctl_command: "{{ 'etcdctl' if not openshift.common.is_containerized or embedded_etcd else 'docker exec etcd_container etcdctl' if not openshift.common.is_etcd_system_container else 'runc exec etcd etcdctl' }}" - timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}" + hosts: oo_etcd_hosts_to_backup roles: - - openshift_facts - tasks: - # Ensure we persist the etcd role for this host in openshift_facts - - openshift_facts: - role: etcd - local_facts: {} - when: "'etcd' not in openshift" - - set_fact: - etcd_backup_dir: "{{ openshift.etcd.etcd_data_dir }}/openshift-backup-{{ backup_tag | default('') }}{{ timestamp }}" - - # TODO: replace shell module with command and update later checks - - name: Check available disk space for etcd backup - shell: df --output=avail -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1 - register: avail_disk - # AUDIT:changed_when: `false` because we are only inspecting - # state, not manipulating anything - changed_when: false - - # TODO: replace shell module with command and update later checks - - name: Check current etcd disk usage - shell: du --exclude='*openshift-backup*' -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1 | cut -f1 - register: etcd_disk_usage - when: embedded_etcd | bool - # AUDIT:changed_when: `false` because we are only inspecting - # state, not manipulating anything - changed_when: false - - - name: Abort if insufficient disk space for etcd backup - fail: - msg: > - {{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup, - {{ avail_disk.stdout }} Kb available. - when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int) - - # For non containerized and non embedded we should have the correct version of - # etcd installed already. So don't do anything. - # - # For containerized installs we now exec into etcd_container - # - # For embedded non containerized we need to ensure we have the latest version - # etcd on the host. - - name: Install latest etcd for embedded - package: - name: etcd - state: latest - when: - - embedded_etcd | bool - - not openshift.common.is_atomic | bool - - - name: Generate etcd backup - command: > - {{ etcdctl_command }} backup --data-dir={{ openshift.etcd.etcd_data_dir }} - --backup-dir={{ etcd_backup_dir }} - - # According to the docs change you can simply copy snap/db - # https://github.com/openshift/openshift-docs/commit/b38042de02d9780842dce95cfa0ef45d53b58bc6 - - name: Check for v3 data store - stat: - path: "{{ openshift.etcd.etcd_data_dir }}/member/snap/db" - register: v3_db - - - name: Copy etcd v3 data store - command: > - cp -a {{ openshift.etcd.etcd_data_dir }}/member/snap/db - {{ etcd_backup_dir }}/member/snap/ - when: v3_db.stat.exists - - - set_fact: - etcd_backup_complete: True - - - name: Display location of etcd backup - debug: - msg: "Etcd backup created in {{ etcd_backup_dir }}" + - role: openshift_etcd_facts + - role: etcd_common + r_etcd_common_action: backup + r_etcd_common_backup_tag: etcd_backup_tag + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" + r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" + r_etcd_common_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}" - name: Gate on etcd backup hosts: localhost @@ -87,10 +17,10 @@ tasks: - set_fact: etcd_backup_completed: "{{ hostvars - | oo_select_keys(groups.etcd_hosts_to_backup) - | oo_collect('inventory_hostname', {'etcd_backup_complete': true}) }}" + | oo_select_keys(groups.oo_etcd_hosts_to_backup) + | oo_collect('inventory_hostname', {'r_etcd_common_backup_complete': true}) }}" - set_fact: - etcd_backup_failed: "{{ groups.etcd_hosts_to_backup | difference(etcd_backup_completed) }}" + etcd_backup_failed: "{{ groups.oo_etcd_hosts_to_backup | difference(etcd_backup_completed) }}" - fail: msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}" when: etcd_backup_failed | length > 0 diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml b/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml deleted file mode 100644 index 5f8b59e17..000000000 --- a/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml +++ /dev/null @@ -1,46 +0,0 @@ ---- -- name: Verify cluster is healthy pre-upgrade - command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health" - -- name: Get current image - shell: grep 'ExecStart=' /etc/systemd/system/etcd_container.service | awk '{print $NF}' - register: current_image - -- name: Set new_etcd_image - set_fact: - new_etcd_image: "{{ current_image.stdout | regex_replace('/etcd.*$','/etcd:' ~ upgrade_version ) }}" - -- name: Pull new etcd image - command: "docker pull {{ new_etcd_image }}" - -- name: Update to latest etcd image - replace: - dest: /etc/systemd/system/etcd_container.service - regexp: "{{ current_image.stdout }}$" - replace: "{{ new_etcd_image }}" - -- name: Restart etcd_container - systemd: - name: etcd_container - daemon_reload: yes - state: restarted - -## TODO: probably should just move this into the backup playbooks, also this -## will fail on atomic host. We need to revisit how to do etcd backups there as -## the container may be newer than etcdctl on the host. Assumes etcd3 obsoletes etcd (7.3.1) -- name: Upgrade etcd for etcdctl when not atomic - package: name=etcd state=latest - when: not openshift.common.is_atomic | bool - -- name: Verify cluster is healthy - command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health" - register: etcdctl - until: etcdctl.rc == 0 - retries: 3 - delay: 10 - -- name: Store new etcd_image - openshift_facts: - role: etcd - local_facts: - etcd_image: "{{ new_etcd_image }}" diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/fedora_tasks.yml b/playbooks/common/openshift-cluster/upgrades/etcd/fedora_tasks.yml deleted file mode 100644 index 30232110e..000000000 --- a/playbooks/common/openshift-cluster/upgrades/etcd/fedora_tasks.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- -# F23 GA'd with etcd 2.0, currently has 2.2 in updates -# F24 GA'd with etcd-2.2, currently has 2.2 in updates -# F25 Beta currently has etcd 3.0 -- name: Verify cluster is healthy pre-upgrade - command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health" - -- name: Update etcd - package: - name: "etcd" - state: "latest" - -- name: Restart etcd - service: - name: etcd - state: restarted - -- name: Verify cluster is healthy - command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health" - register: etcdctl - until: etcdctl.rc == 0 - retries: 3 - delay: 10 diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh b/playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh deleted file mode 120000 index 641e04e44..000000000 --- a/playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh +++ /dev/null @@ -1 +0,0 @@ -../roles/etcd/files/etcdctl.sh
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml index fa86d29fb..64abc54e7 100644 --- a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml +++ b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml @@ -5,42 +5,19 @@ # mirrored packages on your own because only the GA and latest versions are # available in the repos. So for Fedora we'll simply skip this, sorry. -- include: ../../evaluate_groups.yml - tags: - - always - -# We use two groups one for hosts we're upgrading which doesn't include embedded etcd -# The other for backing up which includes the embedded etcd host, there's no need to -# upgrade embedded etcd that just happens when the master is updated. -- name: Evaluate additional groups for etcd - hosts: localhost - connection: local - become: no - tasks: - - name: Evaluate etcd_hosts_to_upgrade - add_host: - name: "{{ item }}" - groups: etcd_hosts_to_upgrade - with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else [] }}" - changed_when: False - - - name: Evaluate etcd_hosts_to_backup - add_host: - name: "{{ item }}" - groups: etcd_hosts_to_backup - with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master }}" - changed_when: False - - name: Backup etcd before upgrading anything include: backup.yml vars: - backup_tag: "pre-upgrade-" + etcd_backup_tag: "pre-upgrade-" when: openshift_etcd_backup | default(true) | bool - name: Drop etcdctl profiles - hosts: etcd_hosts_to_upgrade + hosts: oo_etcd_hosts_to_upgrade tasks: - - include: roles/etcd/tasks/etcdctl.yml + - include_role: + name: etcd_common + vars: + r_etcd_common_action: drop_etcdctl - name: Perform etcd upgrade include: ./upgrade.yml diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/rhel_tasks.yml b/playbooks/common/openshift-cluster/upgrades/etcd/rhel_tasks.yml deleted file mode 100644 index 3a972e8ab..000000000 --- a/playbooks/common/openshift-cluster/upgrades/etcd/rhel_tasks.yml +++ /dev/null @@ -1,20 +0,0 @@ ---- -- name: Verify cluster is healthy pre-upgrade - command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health" - -- name: Update etcd RPM - package: - name: etcd-{{ upgrade_version }}* - state: latest - -- name: Restart etcd - service: - name: etcd - state: restarted - -- name: Verify cluster is healthy - command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health" - register: etcdctl - until: etcdctl.rc == 0 - retries: 3 - delay: 10 diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml index a9b5b94e6..39e82498d 100644 --- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml @@ -1,119 +1,110 @@ --- - name: Determine etcd version - hosts: etcd_hosts_to_upgrade + hosts: oo_etcd_hosts_to_upgrade tasks: - - name: Record RPM based etcd version - command: rpm -qa --qf '%{version}' etcd\* - args: - warn: no - register: etcd_rpm_version - failed_when: false - when: not openshift.common.is_containerized | bool - # AUDIT:changed_when: `false` because we are only inspecting - # state, not manipulating anything - changed_when: false - - - name: Record containerized etcd version - command: docker exec etcd_container rpm -qa --qf '%{version}' etcd\* - register: etcd_container_version - failed_when: false - when: openshift.common.is_containerized | bool - # AUDIT:changed_when: `false` because we are only inspecting - # state, not manipulating anything - changed_when: false - - - name: Record containerized etcd version - command: docker exec etcd_container rpm -qa --qf '%{version}' etcd\* - register: etcd_container_version - failed_when: false - when: openshift.common.is_containerized | bool and not openshift.common.is_etcd_system_container | bool - # AUDIT:changed_when: `false` because we are only inspecting - # state, not manipulating anything - changed_when: false - - - name: Record containerized etcd version - command: runc exec etcd_container rpm -qa --qf '%{version}' etcd\* - register: etcd_container_version - failed_when: false - when: openshift.common.is_containerized | bool and openshift.common.is_etcd_system_container | bool - # AUDIT:changed_when: `false` because we are only inspecting - # state, not manipulating anything - changed_when: false - -# I really dislike this copy/pasta but I wasn't able to find a way to get it to loop -# through hosts, then loop through tasks only when appropriate -- name: Upgrade to 2.1 - hosts: etcd_hosts_to_upgrade - serial: 1 + - block: + - name: Record RPM based etcd version + command: rpm -qa --qf '%{version}' etcd\* + args: + warn: no + register: etcd_rpm_version + failed_when: false + # AUDIT:changed_when: `false` because we are only inspecting + # state, not manipulating anything + changed_when: false + - debug: + msg: "Etcd rpm version {{ etcd_rpm_version.stdout }} detected" + when: + - not openshift.common.is_containerized | bool + + - block: + - name: Record containerized etcd version (docker) + command: docker exec etcd_container rpm -qa --qf '%{version}' etcd\* + register: etcd_container_version_docker + failed_when: false + # AUDIT:changed_when: `false` because we are only inspecting + # state, not manipulating anything + changed_when: false + when: + - not openshift.common.is_etcd_system_container | bool + + # Given a register variables is set even if the whwen condition + # is false, we need to set etcd_container_version separately + - set_fact: + etcd_container_version: "{{ etcd_container_version_docker.stdout }}" + when: + - not openshift.common.is_etcd_system_container | bool + + - name: Record containerized etcd version (runc) + command: runc exec etcd rpm -qa --qf '%{version}' etcd\* + register: etcd_container_version_runc + failed_when: false + # AUDIT:changed_when: `false` because we are only inspecting + # state, not manipulating anything + changed_when: false + when: + - openshift.common.is_etcd_system_container | bool + + # Given a register variables is set even if the whwen condition + # is false, we need to set etcd_container_version separately + - set_fact: + etcd_container_version: "{{ etcd_container_version_runc.stdout }}" + when: + - openshift.common.is_etcd_system_container | bool + + - debug: + msg: "Etcd containerized version {{ etcd_container_version }} detected" + when: + - openshift.common.is_containerized | bool + +- include: upgrade_rpm_members.yml vars: - upgrade_version: '2.1' - tasks: - - include: rhel_tasks.yml - when: etcd_rpm_version.stdout | default('99') | version_compare('2.1','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool + etcd_upgrade_version: '2.1' -- name: Upgrade RPM hosts to 2.2 - hosts: etcd_hosts_to_upgrade - serial: 1 +- include: upgrade_rpm_members.yml vars: - upgrade_version: '2.2' - tasks: - - include: rhel_tasks.yml - when: etcd_rpm_version.stdout | default('99') | version_compare('2.2','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool + etcd_upgrade_version: '2.2' -- name: Upgrade containerized hosts to 2.2.5 - hosts: etcd_hosts_to_upgrade - serial: 1 +- include: upgrade_image_members.yml vars: - upgrade_version: 2.2.5 - tasks: - - include: containerized_tasks.yml - when: etcd_container_version.stdout | default('99') | version_compare('2.2','<') and openshift.common.is_containerized | bool + etcd_upgrade_version: '2.2.5' -- name: Upgrade RPM hosts to 2.3 - hosts: etcd_hosts_to_upgrade - serial: 1 +- include: upgrade_rpm_members.yml vars: - upgrade_version: '2.3' - tasks: - - include: rhel_tasks.yml - when: etcd_rpm_version.stdout | default('99') | version_compare('2.3','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool + etcd_upgrade_version: '2.3' -- name: Upgrade containerized hosts to 2.3.7 - hosts: etcd_hosts_to_upgrade - serial: 1 +- include: upgrade_image_members.yml vars: - upgrade_version: 2.3.7 - tasks: - - include: containerized_tasks.yml - when: etcd_container_version.stdout | default('99') | version_compare('2.3','<') and openshift.common.is_containerized | bool + etcd_upgrade_version: '2.3.7' -- name: Upgrade RPM hosts to 3.0 - hosts: etcd_hosts_to_upgrade - serial: 1 +- include: upgrade_rpm_members.yml vars: - upgrade_version: '3.0' - tasks: - - include: rhel_tasks.yml - when: etcd_rpm_version.stdout | default('99') | version_compare('3.0','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool + etcd_upgrade_version: '3.0' -- name: Upgrade containerized hosts to etcd3 image - hosts: etcd_hosts_to_upgrade - serial: 1 +- include: upgrade_image_members.yml vars: - upgrade_version: 3.0.15 - tasks: - - include: containerized_tasks.yml - when: etcd_container_version.stdout | default('99') | version_compare('3.0','<') and openshift.common.is_containerized | bool + etcd_upgrade_version: '3.0.15' + +- include: upgrade_rpm_members.yml + vars: + etcd_upgrade_version: '3.1' + +- include: upgrade_image_members.yml + vars: + etcd_upgrade_version: '3.1.3' - name: Upgrade fedora to latest - hosts: etcd_hosts_to_upgrade + hosts: oo_etcd_hosts_to_upgrade serial: 1 tasks: - - include: fedora_tasks.yml - when: ansible_distribution == 'Fedora' and not openshift.common.is_containerized | bool + - include_role: + name: etcd_upgrade + when: + - ansible_distribution == 'Fedora' + - not openshift.common.is_containerized | bool - name: Backup etcd include: backup.yml vars: - backup_tag: "post-3.0-" + etcd_backup_tag: "post-3.0-" when: openshift_etcd_backup | default(true) | bool diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml new file mode 100644 index 000000000..831ca8f57 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml @@ -0,0 +1,17 @@ +--- +# INPUT etcd_upgrade_version +# INPUT etcd_container_version +# INPUT openshift.common.is_containerized +- name: Upgrade containerized hosts to {{ etcd_upgrade_version }} + hosts: oo_etcd_hosts_to_upgrade + serial: 1 + roles: + - role: etcd_upgrade + r_etcd_upgrade_action: upgrade + r_etcd_upgrade_mechanism: image + r_etcd_upgrade_version: "{{ etcd_upgrade_version }}" + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" + etcd_peer: "{{ openshift.common.hostname }}" + when: + - etcd_container_version | default('99') | version_compare(etcd_upgrade_version,'<') + - openshift.common.is_containerized | bool diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml new file mode 100644 index 000000000..2e79451e0 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml @@ -0,0 +1,18 @@ +--- +# INPUT etcd_upgrade_version +# INPUT etcd_rpm_version +# INPUT openshift.common.is_containerized +- name: Upgrade to {{ etcd_upgrade_version }} + hosts: oo_etcd_hosts_to_upgrade + serial: 1 + roles: + - role: etcd_upgrade + r_etcd_upgrade_action: upgrade + r_etcd_upgrade_mechanism: rpm + r_etcd_upgrade_version: "{{ etcd_upgrade_version }}" + r_etcd_common_etcd_runtime: "host" + etcd_peer: "{{ openshift.common.hostname }}" + when: + - etcd_rpm_version.stdout | default('99') | version_compare(etcd_upgrade_version, '<') + - ansible_distribution == 'RedHat' + - not openshift.common.is_containerized | bool diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml index cbf6d58b3..c98065cf4 100644 --- a/playbooks/common/openshift-cluster/upgrades/init.yml +++ b/playbooks/common/openshift-cluster/upgrades/init.yml @@ -4,23 +4,11 @@ # Do not allow adding hosts during upgrade. g_new_master_hosts: [] g_new_node_hosts: [] - openshift_cluster_id: "{{ cluster_id | default('default') }}" - include: ../initialize_oo_option_facts.yml - include: ../initialize_facts.yml -- name: Ensure clean repo cache in the event repos have been changed manually - hosts: oo_all_hosts - tags: - - pre_upgrade - tasks: - - name: Clean package cache - command: "{{ ansible_pkg_mgr }} clean all" - when: not openshift.common.is_atomic | bool - args: - warn: no - - name: Ensure firewall is not switched during upgrade hosts: oo_all_hosts tasks: diff --git a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml index 046535680..72de63070 100644 --- a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml @@ -6,27 +6,32 @@ - lib_openshift tasks: - - name: Retrieve list of openshift nodes matching upgrade label - oc_obj: - state: list - kind: node - selector: "{{ openshift_upgrade_nodes_label }}" - register: nodes_to_upgrade - when: openshift_upgrade_nodes_label is defined + - when: openshift_upgrade_nodes_label is defined + block: + - name: Retrieve list of openshift nodes matching upgrade label + oc_obj: + state: list + kind: node + selector: "{{ openshift_upgrade_nodes_label }}" + register: nodes_to_upgrade - # We got a list of nodes with the label, now we need to match these with inventory hosts - # using their openshift.common.hostname fact. - - name: Map labelled nodes to inventory hosts - add_host: - name: "{{ item }}" - groups: temp_nodes_to_upgrade - ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - ansible_become: "{{ g_sudo | default(omit) }}" - with_items: " {{ groups['oo_nodes_to_config'] }}" - when: - - openshift_upgrade_nodes_label is defined - - hostvars[item].openshift.common.hostname in nodes_to_upgrade.results.results[0]['items'] | map(attribute='metadata.name') | list - changed_when: false + - name: Fail if no nodes match openshift_upgrade_nodes_label + fail: + msg: "openshift_upgrade_nodes_label was specified but no nodes matched" + when: nodes_to_upgrade.results.results[0]['items'] | length == 0 + + # We got a list of nodes with the label, now we need to match these with inventory hosts + # using their openshift.common.hostname fact. + - name: Map labelled nodes to inventory hosts + add_host: + name: "{{ item }}" + groups: temp_nodes_to_upgrade + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_become: "{{ g_sudo | default(omit) }}" + with_items: " {{ groups['oo_nodes_to_config'] }}" + when: + - hostvars[item].openshift.common.hostname in nodes_to_upgrade.results.results[0]['items'] | map(attribute='metadata.name') | list + changed_when: false # Build up the oo_nodes_to_upgrade group, use the list filtered by label if # present, otherwise hit all nodes: diff --git a/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py b/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py index 673f11889..4eac8b067 100755 --- a/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py +++ b/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py @@ -1,7 +1,5 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# vim: expandtab:tabstop=4:shiftwidth=4 - """Ansible module for modifying OpenShift configs during an upgrade""" import os diff --git a/playbooks/common/openshift-cluster/upgrades/master_docker b/playbooks/common/openshift-cluster/upgrades/master_docker deleted file mode 120000 index 6aeca2842..000000000 --- a/playbooks/common/openshift-cluster/upgrades/master_docker +++ /dev/null @@ -1 +0,0 @@ -../../../../roles/openshift_master/templates/master_docker
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml index 0d7cdb227..d9ddf3860 100644 --- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml @@ -9,6 +9,8 @@ replace ( '${version}', openshift_image_tag ) }}" router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', openshift_image_tag ) }}" + registry_console_image: "{{ openshift.master.registry_url | regex_replace ( '(origin|ose)-\\${component}', 'registry-console') | + replace ( '${version}', 'v' ~ openshift.common.short_version ) }}" pre_tasks: - name: Load lib_openshift modules @@ -61,6 +63,26 @@ when: - _default_registry.results.results[0] != {} + - name: Check for registry-console + oc_obj: + state: list + kind: dc + name: registry-console + register: _registry_console + when: + - openshift.common.deployment_type != 'origin' + + - name: Update registry-console image to current version + oc_edit: + kind: dc + name: registry-console + namespace: default + content: + spec.template.spec.containers[0].image: "{{ registry_console_image }}" + when: + - openshift.common.deployment_type != 'origin' + - _registry_console.results.results[0] != {} + roles: - openshift_manageiq # Create the new templates shipped in 3.2, existing templates are left @@ -97,6 +119,12 @@ - not grep_plugin_order_override | skipped - grep_plugin_order_override.rc == 0 -- include: ../reset_excluder.yml +- name: Re-enable excluder if it was previously enabled + hosts: oo_masters_to_config tags: - always + gather_facts: no + roles: + - role: openshift_excluder + r_openshift_excluder_action: enable + r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" diff --git a/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml new file mode 100644 index 000000000..6d8503879 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml @@ -0,0 +1,22 @@ +--- +# Only check if docker upgrade is required if docker_upgrade is not +# already set to False. +- include: ../../docker/upgrade_check.yml + when: + - docker_upgrade is not defined or (docker_upgrade | bool) + - not (openshift.common.is_atomic | bool) + +# Additional checks for Atomic hosts: + +- name: Determine available Docker + shell: "rpm -q --queryformat '---\ncurr_version: %{VERSION}\navail_version: \n' docker" + register: g_atomic_docker_version_result + when: openshift.common.is_atomic | bool + +- set_fact: + l_docker_version: "{{ g_atomic_docker_version_result.stdout | from_yaml }}" + when: openshift.common.is_atomic | bool + +- fail: + msg: This playbook requires access to Docker 1.12 or later + when: openshift.common.is_atomic | bool and l_docker_version.avail_version | default(l_docker_version.curr_version, true) | version_compare('1.12','<') diff --git a/playbooks/common/openshift-cluster/upgrades/pre/validate_excluder.yml b/playbooks/common/openshift-cluster/upgrades/pre/validate_excluder.yml deleted file mode 100644 index 6de1ed061..000000000 --- a/playbooks/common/openshift-cluster/upgrades/pre/validate_excluder.yml +++ /dev/null @@ -1,29 +0,0 @@ ---- -# input variables: -# - repoquery_cmd -# - excluder -# - openshift_upgrade_target -- block: - - name: Get available excluder version - command: > - {{ repoquery_cmd }} --qf '%{version}' "{{ excluder }}" - register: excluder_version - failed_when: false - changed_when: false - - - name: Docker excluder version detected - debug: - msg: "{{ excluder }}: {{ excluder_version.stdout }}" - - - name: Printing upgrade target version - debug: - msg: "{{ openshift_upgrade_target }}" - - - name: Check the available {{ excluder }} version is at most of the upgrade target version - fail: - msg: "Available {{ excluder }} version {{ excluder_version.stdout }} is higher than the upgrade target version" - when: - - "{{ excluder_version.stdout != '' }}" - - "{{ excluder_version.stdout.split('.')[0:2] | join('.') | version_compare(openshift_upgrade_target.split('.')[0:2] | join('.'), '>', strict=True) }}" - when: - - not openshift.common.is_atomic | bool diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml index 06eb5f936..45022cd61 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml @@ -9,23 +9,16 @@ local_facts: ha: "{{ groups.oo_masters_to_config | length > 1 }}" - - name: Ensure Master is running - service: - name: "{{ openshift.common.service_type }}-master" - state: started - enabled: yes - when: openshift.master.ha is defined and not openshift.master.ha | bool and openshift.common.is_containerized | bool - - name: Ensure HA Master is running service: name: "{{ openshift.common.service_type }}-master-api" state: started enabled: yes - when: openshift.master.ha is defined and openshift.master.ha | bool and openshift.common.is_containerized | bool + when: openshift.common.is_containerized | bool - name: Ensure HA Master is running service: name: "{{ openshift.common.service_type }}-master-controllers" state: started enabled: yes - when: openshift.master.ha is defined and openshift.master.ha | bool and openshift.common.is_containerized | bool + when: openshift.common.is_containerized | bool diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml deleted file mode 100644 index 7646e0fa6..000000000 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- -- name: Verify docker upgrade targets - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config - tasks: - # Only check if docker upgrade is required if docker_upgrade is not - # already set to False. - - include: ../docker/upgrade_check.yml - when: docker_upgrade is not defined or docker_upgrade | bool and not openshift.common.is_atomic | bool - - # Additional checks for Atomic hosts: - - - name: Determine available Docker - shell: "rpm -q --queryformat '---\ncurr_version: %{VERSION}\navail_version: \n' docker" - register: g_atomic_docker_version_result - when: openshift.common.is_atomic | bool - - - set_fact: - l_docker_version: "{{ g_atomic_docker_version_result.stdout | from_yaml }}" - when: openshift.common.is_atomic | bool - - - fail: - msg: This playbook requires access to Docker 1.12 or later - when: openshift.common.is_atomic | bool and l_docker_version.avail_version | default(l_docker_version.curr_version, true) | version_compare('1.12','<') diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml new file mode 100644 index 000000000..497709d25 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml @@ -0,0 +1,13 @@ +--- +- name: Verify Host Requirements + hosts: oo_all_hosts + roles: + - openshift_health_checker + vars: + - r_openshift_health_checker_playbook_context: upgrade + post_tasks: + - action: openshift_health_check + args: + checks: + - disk_availability + - memory_availability diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_nodes_running.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_nodes_running.yml deleted file mode 100644 index 354af3cde..000000000 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_nodes_running.yml +++ /dev/null @@ -1,13 +0,0 @@ ---- -- name: Verify node processes - hosts: oo_nodes_to_config - roles: - - openshift_facts - - openshift_docker_facts - tasks: - - name: Ensure Node is running - service: - name: "{{ openshift.common.service_type }}-node" - state: started - enabled: yes - when: openshift.common.is_containerized | bool diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml index c83923dae..9b4a8e413 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml @@ -1,41 +1,43 @@ --- -- name: Verify upgrade targets - hosts: oo_masters_to_config:oo_nodes_to_upgrade - vars: - openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" - pre_tasks: - - fail: - msg: Verify OpenShift is already installed - when: openshift.common.version is not defined +- name: Fail when OpenShift is not installed + fail: + msg: Verify OpenShift is already installed + when: openshift.common.version is not defined - - fail: - msg: Verify the correct version was found - when: verify_upgrade_version is defined and openshift_version != verify_upgrade_version +- name: Verify containers are available for upgrade + command: > + docker pull {{ openshift.common.cli_image }}:{{ openshift_image_tag }} + register: pull_result + changed_when: "'Downloaded newer image' in pull_result.stdout" + when: openshift.common.is_containerized | bool - - set_fact: - g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}" - when: not openshift.common.is_containerized | bool +- when: not openshift.common.is_containerized | bool + block: + - name: Check latest available OpenShift RPM version + repoquery: + name: "{{ openshift.common.service_type }}" + ignore_excluders: true + register: repoquery_out - - name: Verify containers are available for upgrade - command: > - docker pull {{ openshift.common.cli_image }}:{{ openshift_image_tag }} - register: pull_result - changed_when: "'Downloaded newer image' in pull_result.stdout" - when: openshift.common.is_containerized | bool + - name: Fail when unable to determine available OpenShift RPM version + fail: + msg: "Unable to determine available OpenShift RPM version" + when: + - not repoquery_out.results.package_found - - name: Check latest available OpenShift RPM version - command: > - {{ repoquery_cmd }} --qf '%{version}' "{{ openshift.common.service_type }}" - failed_when: false - changed_when: false - register: avail_openshift_version - when: not openshift.common.is_containerized | bool + - name: Set fact avail_openshift_version + set_fact: + avail_openshift_version: "{{ repoquery_out.results.versions.available_versions.0 }}" - name: Verify OpenShift RPMs are available for upgrade fail: - msg: "OpenShift {{ avail_openshift_version.stdout }} is available, but {{ openshift_upgrade_target }} or greater is required" - when: not openshift.common.is_containerized | bool and not avail_openshift_version | skipped and avail_openshift_version.stdout | default('0.0', True) | version_compare(openshift_release, '<') + msg: "OpenShift {{ avail_openshift_version }} is available, but {{ openshift_upgrade_target }} or greater is required" + when: + - avail_openshift_version | default('0.0', True) | version_compare(openshift_release, '<') - - fail: - msg: "This upgrade playbook must be run against OpenShift {{ openshift_upgrade_min }} or later" - when: deployment_type == 'origin' and openshift.common.version | version_compare(openshift_upgrade_min,'<') +- name: Fail when openshift version does not meet minium requirement for Origin upgrade + fail: + msg: "This upgrade playbook must be run against OpenShift {{ openshift_upgrade_min }} or later" + when: + - deployment_type == 'origin' + - openshift.common.version | version_compare(openshift_upgrade_min,'<') diff --git a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml index 03ac02e9f..164baca81 100644 --- a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml @@ -1,27 +1,39 @@ --- -# We verified latest rpm available is suitable, so just yum update. +# When we update package "a-${version}" and a requires b >= ${version} if we +# don't specify the version of b yum will choose the latest version of b +# available and the whole set of dependencies end up at the latest version. +# Since the package module, unlike the yum module, doesn't flatten a list +# of packages into one transaction we need to do that explicitly. The ansible +# core team tells us not to rely on yum module transaction flattening anyway. + +# TODO: If the sdn package isn't already installed this will install it, we +# should fix that -# Master package upgrade ends up depending on node and sdn packages, we need to be explicit -# with all versions to avoid yum from accidentally jumping to something newer than intended: - name: Upgrade master packages - package: name={{ item }} state=present - when: component == "master" - with_items: - - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-master{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}" + package: name={{ master_pkgs | join(',') }} state=present + vars: + master_pkgs: + - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}" + - "{{ openshift.common.service_type }}-master{{ openshift_pkg_version }}" + - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" + - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version}}" + - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}" + - "tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" + - PyYAML + when: + - component == "master" + - not openshift.common.is_atomic | bool - name: Upgrade node packages - package: name={{ item }} state=present - when: component == "node" - with_items: - - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}" - -- name: Ensure python-yaml present for config upgrade - package: name=PyYAML state=present - when: not openshift.common.is_atomic | bool + package: name={{ node_pkgs | join(',') }} state=present + vars: + node_pkgs: + - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}" + - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" + - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}" + - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}" + - "tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" + - PyYAML + when: + - component == "node" + - not openshift.common.is_atomic | bool diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index c6e799261..b75aae589 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -2,17 +2,22 @@ ############################################################################### # Upgrade Masters ############################################################################### -- name: Evaluate additional groups for upgrade - hosts: localhost - connection: local - become: no + +# oc adm migrate storage should be run prior to etcd v3 upgrade +# See: https://github.com/openshift/origin/pull/14625#issuecomment-308467060 +- name: Pre master upgrade - Upgrade all storage + hosts: oo_first_master tasks: - - name: Evaluate etcd_hosts_to_backup - add_host: - name: "{{ item }}" - groups: etcd_hosts_to_backup - with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master }}" - changed_when: False + - name: Upgrade all storage + command: > + {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig + migrate storage --include=* --confirm + register: l_pb_upgrade_control_plane_pre_upgrade_storage + when: openshift_upgrade_pre_storage_migration_enabled | default(true) | bool + failed_when: + - openshift_upgrade_pre_storage_migration_enabled | default(true) | bool + - l_pb_upgrade_control_plane_pre_upgrade_storage.rc != 0 + - openshift_upgrade_pre_storage_migration_fatal | default(true) | bool # If facts cache were for some reason deleted, this fact may not be set, and if not set # it will always default to true. This causes problems for the etcd data dir fact detection @@ -86,7 +91,7 @@ - include_vars: ../../../../roles/openshift_master/vars/main.yml - - name: Update systemd units + - name: Remove any legacy systemd units and update systemd units include: ../../../../roles/openshift_master/tasks/systemd_units.yml - name: Check for ca-bundle.crt @@ -118,8 +123,8 @@ yedit: src: "{{ openshift.common.config_base }}/master/master-config.yaml" key: 'imageConfig.format' - value: "{{ oreg_url }}" - when: oreg_url is defined + value: "{{ oreg_url | default(oreg_url_master) }}" + when: oreg_url is defined or oreg_url_master is defined # Run the upgrade hook prior to restarting services/system if defined: - debug: msg="Running master upgrade hook {{ openshift_master_upgrade_hook }}" @@ -141,6 +146,19 @@ - include: "{{ openshift_master_upgrade_post_hook }}" when: openshift_master_upgrade_post_hook is defined + - name: Post master upgrade - Upgrade clusterpolicies storage + command: > + {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig + migrate storage --include=clusterpolicies --confirm + register: l_pb_upgrade_control_plane_post_upgrade_storage + when: openshift_upgrade_post_storage_migration_enabled | default(true) | bool + failed_when: + - openshift_upgrade_post_storage_migration_enabled | default(true) | bool + - l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0 + - openshift_upgrade_post_storage_migration_fatal | default(false) | bool + run_once: true + delegate_to: "{{ groups.oo_first_master.0 }}" + - set_fact: master_update_complete: True @@ -216,13 +234,25 @@ - name: Reconcile Security Context Constraints command: > - {{ openshift.common.client_binary }} adm policy reconcile-sccs --confirm --additive-only=true -o name + {{ openshift.common.client_binary }} adm policy --config={{ openshift.common.config_base }}/master/admin.kubeconfig reconcile-sccs --confirm --additive-only=true -o name register: reconcile_scc_result changed_when: - reconcile_scc_result.stdout != '' - reconcile_scc_result.rc == 0 run_once: true + - name: Migrate storage post policy reconciliation + command: > + {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig + migrate storage --include=* --confirm + run_once: true + register: l_pb_upgrade_control_plane_post_upgrade_storage + when: openshift_upgrade_post_storage_migration_enabled | default(true) | bool + failed_when: + - openshift_upgrade_post_storage_migration_enabled | default(true) | bool + - l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0 + - openshift_upgrade_post_storage_migration_fatal | default(false) | bool + - set_fact: reconcile_complete: True @@ -251,15 +281,15 @@ roles: - openshift_facts tasks: - - include: docker/upgrade.yml + - include: docker/tasks/upgrade.yml when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool - name: Drain and upgrade master nodes hosts: oo_masters_to_config:&oo_nodes_to_upgrade # This var must be set with -e on invocation, as it is not a per-host inventory var # and is evaluated early. Values such as "20%" can also be used. - serial: "{{ openshift_upgrade_nodes_serial | default(1) }}" - any_errors_fatal: true + serial: "{{ openshift_upgrade_control_plane_nodes_serial | default(1) }}" + max_fail_percentage: "{{ openshift_upgrade_control_plane_nodes_max_fail_percentage | default(0) }}" pre_tasks: - name: Load lib_openshift modules @@ -281,13 +311,18 @@ - name: Drain Node for Kubelet upgrade command: > - {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --force --delete-local-data --ignore-daemonsets + {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets delegate_to: "{{ groups.oo_first_master.0 }}" + register: l_upgrade_control_plane_drain_result + until: not l_upgrade_control_plane_drain_result | failed + retries: 60 + delay: 60 roles: - lib_openshift - openshift_facts - docker + - openshift_node_dnsmasq - openshift_node_upgrade post_tasks: diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml index e9f894942..c93a5d89c 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml @@ -4,7 +4,7 @@ # This var must be set with -e on invocation, as it is not a per-host inventory var # and is evaluated early. Values such as "20%" can also be used. serial: "{{ openshift_upgrade_nodes_serial | default(1) }}" - any_errors_fatal: true + max_fail_percentage: "{{ openshift_upgrade_nodes_max_fail_percentage | default(0) }}" pre_tasks: - name: Load lib_openshift modules @@ -26,14 +26,22 @@ - name: Drain Node for Kubelet upgrade command: > - {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --force --delete-local-data --ignore-daemonsets + {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets delegate_to: "{{ groups.oo_first_master.0 }}" + register: l_upgrade_nodes_drain_result + until: not l_upgrade_nodes_drain_result | failed + retries: 60 + delay: 60 roles: - lib_openshift - openshift_facts - docker + - openshift_node_dnsmasq - openshift_node_upgrade + - role: openshift_excluder + r_openshift_excluder_action: enable + r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" post_tasks: - name: Set node schedulability @@ -46,7 +54,3 @@ register: node_schedulable until: node_schedulable|succeeded when: node_unschedulable|changed - -- include: ../reset_excluder.yml - tags: - - always diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml index 83d2cec81..8558bf3e9 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml @@ -74,18 +74,21 @@ - block: - debug: msg: "WARNING: existing scheduler config does not match previous known defaults automated upgrade of scheduler config is disabled.\nexisting scheduler predicates: {{ openshift_master_scheduler_current_predicates }}\ncurrent scheduler default predicates are: {{ openshift_master_scheduler_default_predicates }}" - when: "{{ openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates and - openshift_master_scheduler_current_predicates not in older_predicates + [prev_predicates] }}" + when: + - openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates + - openshift_master_scheduler_current_predicates not in older_predicates + [prev_predicates] - set_fact: openshift_upgrade_scheduler_predicates: "{{ openshift_master_scheduler_default_predicates }}" - when: "{{ openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates and - openshift_master_scheduler_current_predicates in older_predicates + [prev_predicates] }}" + when: + - openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates + - openshift_master_scheduler_current_predicates in older_predicates + [prev_predicates] - set_fact: openshift_upgrade_scheduler_predicates: "{{ default_predicates_no_region }}" - when: "{{ openshift_master_scheduler_current_predicates != default_predicates_no_region and - openshift_master_scheduler_current_predicates in older_predicates_no_region + [prev_predicates_no_region] }}" + when: + - openshift_master_scheduler_current_predicates != default_predicates_no_region + - openshift_master_scheduler_current_predicates in older_predicates_no_region + [prev_predicates_no_region] when: openshift_master_scheduler_predicates | default(none) is none @@ -131,18 +134,21 @@ - block: - debug: msg: "WARNING: existing scheduler config does not match previous known defaults automated upgrade of scheduler config is disabled.\nexisting scheduler priorities: {{ openshift_master_scheduler_current_priorities }}\ncurrent scheduler default priorities are: {{ openshift_master_scheduler_default_priorities }}" - when: "{{ openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities and - openshift_master_scheduler_current_priorities not in older_priorities + [prev_priorities] }}" + when: + - openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities + - openshift_master_scheduler_current_priorities not in older_priorities + [prev_priorities] - set_fact: openshift_upgrade_scheduler_priorities: "{{ openshift_master_scheduler_default_priorities }}" - when: "{{ openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities and - openshift_master_scheduler_current_priorities in older_priorities + [prev_priorities] }}" + when: + - openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities + - openshift_master_scheduler_current_priorities in older_priorities + [prev_priorities] - set_fact: openshift_upgrade_scheduler_priorities: "{{ default_priorities_no_zone }}" - when: "{{ openshift_master_scheduler_current_priorities != default_priorities_no_zone and - openshift_master_scheduler_current_priorities in older_priorities_no_zone + [prev_priorities_no_zone] }}" + when: + - openshift_master_scheduler_current_priorities != default_priorities_no_zone + - openshift_master_scheduler_current_priorities in older_priorities_no_zone + [prev_priorities_no_zone] when: openshift_master_scheduler_priorities | default(none) is none @@ -162,5 +168,6 @@ content: "{{ scheduler_config | to_nice_json }}" dest: "{{ openshift_master_scheduler_conf }}" backup: true - when: "{{ openshift_upgrade_scheduler_predicates is defined or - openshift_upgrade_scheduler_priorities is defined }}" + when: > + openshift_upgrade_scheduler_predicates is defined or + openshift_upgrade_scheduler_priorities is defined diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/roles b/playbooks/common/openshift-cluster/upgrades/v3_3/roles index 6bc1a7aef..6bc1a7aef 120000 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/roles +++ b/playbooks/common/openshift-cluster/upgrades/v3_3/roles diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml new file mode 100644 index 000000000..a241ef039 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml @@ -0,0 +1,118 @@ +--- +# +# Full Control Plane + Nodes Upgrade +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" + openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" + +# Pre-upgrade + +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos and initialize facts on all hosts + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config + tags: + - pre_upgrade + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_master_excluders.yml + tags: + - pre_upgrade + +- include: ../disable_node_excluders.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../../../openshift-master/validate_restart.yml + tags: + - pre_upgrade + +- name: Verify upgrade targets + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tasks: + - include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- name: Verify docker upgrade targets + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config + tasks: + - include: ../pre/tasks/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_control_plane.yml + vars: + master_config_hook: "v3_3/master_config_upgrade.yml" + +- include: ../upgrade_nodes.yml + vars: + node_config_hook: "v3_3/node_config_upgrade.yml" + +- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml new file mode 100644 index 000000000..54c85f0fb --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml @@ -0,0 +1,118 @@ +--- +# +# Control Plane Upgrade Playbook +# +# Upgrades masters and Docker (only on standalone etcd hosts) +# +# This upgrade does not include: +# - node service running on masters +# - docker running on masters +# - node service running on dedicated nodes +# +# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" + openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" + +# Pre-upgrade +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos on control plane hosts + hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config + tags: + - pre_upgrade + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_master_excluders.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../../../openshift-master/validate_restart.yml + tags: + - pre_upgrade + +- name: Verify upgrade targets + hosts: oo_masters_to_config + tasks: + - include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- name: Verify docker upgrade targets + hosts: oo_masters_to_config:oo_etcd_to_config + tasks: + - include: ../pre/tasks/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_etcd_to_config + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_control_plane.yml + vars: + master_config_hook: "v3_3/master_config_upgrade.yml" + +- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml new file mode 100644 index 000000000..cee4e9087 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml @@ -0,0 +1,113 @@ +--- +# +# Node Upgrade Playbook +# +# Upgrades nodes only, but requires the control plane to have already been upgraded. +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" + openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" + +# Pre-upgrade +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos on nodes + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config + roles: + - openshift_repos + tags: + - pre_upgrade + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_node_excluders.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- name: Verify masters are already upgraded + hosts: oo_masters_to_config + tags: + - pre_upgrade + tasks: + - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." + when: openshift.common.version != openshift_version + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- name: Verify upgrade targets + hosts: oo_nodes_to_upgrade + tasks: + - include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- name: Verify docker upgrade targets + hosts: oo_nodes_to_upgrade + tasks: + - include: ../pre/tasks/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_nodes_to_upgrade + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_nodes.yml + vars: + node_config_hook: "v3_3/node_config_upgrade.yml" diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/roles b/playbooks/common/openshift-cluster/upgrades/v3_4/roles index 6bc1a7aef..6bc1a7aef 120000 --- a/playbooks/byo/openshift-cluster/upgrades/v3_4/roles +++ b/playbooks/common/openshift-cluster/upgrades/v3_4/roles diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml new file mode 100644 index 000000000..ae217ba2e --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml @@ -0,0 +1,116 @@ +--- +# +# Full Control Plane + Nodes Upgrade +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" + openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" + +# Pre-upgrade + +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos and initialize facts on all hosts + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config + tags: + - pre_upgrade + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_master_excluders.yml + tags: + - pre_upgrade + +- include: ../disable_node_excluders.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../../../openshift-master/validate_restart.yml + tags: + - pre_upgrade + +- name: Verify upgrade targets + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tasks: + - include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- name: Verify docker upgrade targets + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config + tasks: + - include: ../pre/tasks/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_control_plane.yml + vars: + master_config_hook: "v3_4/master_config_upgrade.yml" + +- include: ../upgrade_nodes.yml + +- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml new file mode 100644 index 000000000..d7cb38d03 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml @@ -0,0 +1,118 @@ +--- +# +# Control Plane Upgrade Playbook +# +# Upgrades masters and Docker (only on standalone etcd hosts) +# +# This upgrade does not include: +# - node service running on masters +# - docker running on masters +# - node service running on dedicated nodes +# +# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" + openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" + +# Pre-upgrade +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos on control plane hosts + hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config + tags: + - pre_upgrade + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_master_excluders.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../../../openshift-master/validate_restart.yml + tags: + - pre_upgrade + +- name: Verify upgrade targets + hosts: oo_masters_to_config + tasks: + - include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- name: Verify docker upgrade targets + hosts: oo_masters_to_config:oo_etcd_to_config + tasks: + - include: ../pre/tasks/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_etcd_to_config + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_control_plane.yml + vars: + master_config_hook: "v3_4/master_config_upgrade.yml" + +- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml new file mode 100644 index 000000000..8531e6045 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml @@ -0,0 +1,111 @@ +--- +# +# Node Upgrade Playbook +# +# Upgrades nodes only, but requires the control plane to have already been upgraded. +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" + openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" + +# Pre-upgrade +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos on nodes + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config + roles: + - openshift_repos + tags: + - pre_upgrade + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_node_excluders.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- name: Verify masters are already upgraded + hosts: oo_masters_to_config + tags: + - pre_upgrade + tasks: + - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." + when: openshift.common.version != openshift_version + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- name: Verify upgrade targets + hosts: oo_nodes_to_upgrade + tasks: + - include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- name: Verify docker upgrade targets + hosts: oo_nodes_to_upgrade + tasks: + - include: ../pre/tasks/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_nodes_to_upgrade + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml new file mode 100644 index 000000000..ed89dbe8d --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml @@ -0,0 +1,16 @@ +--- +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'admissionConfig.pluginConfig' + yaml_value: "{{ openshift.master.admission_plugin_config }}" + when: "'admission_plugin_config' in openshift.master" + +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'admissionConfig.pluginOrderOverride' + yaml_value: + +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'kubernetesMasterConfig.admissionConfig' + yaml_value: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/storage_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/storage_upgrade.yml deleted file mode 100644 index 48c69eccd..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/storage_upgrade.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- -############################################################################### -# Post upgrade - Upgrade job storage -############################################################################### -- name: Upgrade job storage - hosts: oo_first_master - roles: - - { role: openshift_cli } - vars: - # Another spot where we assume docker is running and do not want to accidentally trigger an unsafe - # restart. - skip_docker_role: True - tasks: - - name: Upgrade job storage - command: > - {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig - migrate storage --include=jobs --confirm - run_once: true diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml new file mode 100644 index 000000000..a3d0d6305 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml @@ -0,0 +1,118 @@ +--- +# +# Full Control Plane + Nodes Upgrade +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" + openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" + +# Pre-upgrade + +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos and initialize facts on all hosts + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config + tags: + - pre_upgrade + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_master_excluders.yml + tags: + - pre_upgrade + +- include: ../disable_node_excluders.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../../../openshift-master/validate_restart.yml + tags: + - pre_upgrade + +- name: Verify upgrade targets + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tasks: + - include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- name: Verify docker upgrade targets + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config + tasks: + - include: ../pre/tasks/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: validator.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_control_plane.yml + +- include: ../upgrade_nodes.yml + +- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml new file mode 100644 index 000000000..5fee56615 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml @@ -0,0 +1,122 @@ +--- +# +# Control Plane Upgrade Playbook +# +# Upgrades masters and Docker (only on standalone etcd hosts) +# +# This upgrade does not include: +# - node service running on masters +# - docker running on masters +# - node service running on dedicated nodes +# +# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" + openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" + +# Pre-upgrade +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos on control plane hosts + hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config + tags: + - pre_upgrade + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_master_excluders.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../../../openshift-master/validate_restart.yml + tags: + - pre_upgrade + +- name: Verify upgrade targets + hosts: oo_masters_to_config + tasks: + - include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- name: Verify docker upgrade targets + hosts: oo_masters_to_config:oo_etcd_to_config + tasks: + - include: ../pre/tasks/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: validator.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_etcd_to_config + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_control_plane.yml + vars: + master_config_hook: "v3_5/master_config_upgrade.yml" + +- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml new file mode 100644 index 000000000..e29d0f8e6 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml @@ -0,0 +1,111 @@ +--- +# +# Node Upgrade Playbook +# +# Upgrades nodes only, but requires the control plane to have already been upgraded. +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" + openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" + +# Pre-upgrade +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos on nodes + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config + roles: + - openshift_repos + tags: + - pre_upgrade + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_node_excluders.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- name: Verify masters are already upgraded + hosts: oo_masters_to_config + tags: + - pre_upgrade + tasks: + - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." + when: openshift.common.version != openshift_version + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- name: Verify upgrade targets + hosts: oo_nodes_to_upgrade + tasks: + - include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- name: Verify docker upgrade targets + hosts: oo_nodes_to_upgrade + tasks: + - include: ../pre/tasks/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_nodes_to_upgrade + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml new file mode 100644 index 000000000..ed89dbe8d --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml @@ -0,0 +1,16 @@ +--- +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'admissionConfig.pluginConfig' + yaml_value: "{{ openshift.master.admission_plugin_config }}" + when: "'admission_plugin_config' in openshift.master" + +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'admissionConfig.pluginOrderOverride' + yaml_value: + +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'kubernetesMasterConfig.admissionConfig' + yaml_value: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/storage_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/storage_upgrade.yml deleted file mode 100644 index 48c69eccd..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/storage_upgrade.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- -############################################################################### -# Post upgrade - Upgrade job storage -############################################################################### -- name: Upgrade job storage - hosts: oo_first_master - roles: - - { role: openshift_cli } - vars: - # Another spot where we assume docker is running and do not want to accidentally trigger an unsafe - # restart. - skip_docker_role: True - tasks: - - name: Upgrade job storage - command: > - {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig - migrate storage --include=jobs --confirm - run_once: true diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml new file mode 100644 index 000000000..51acd17da --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml @@ -0,0 +1,122 @@ +--- +# +# Full Control Plane + Nodes Upgrade +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: '3.6' + openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" + +# Pre-upgrade + +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos and initialize facts on all hosts + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config + tags: + - pre_upgrade + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_master_excluders.yml + tags: + - pre_upgrade + +- include: ../disable_node_excluders.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- include: ../pre/verify_health_checks.yml + tags: + - pre_upgrade + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../../../openshift-master/validate_restart.yml + tags: + - pre_upgrade + +- name: Verify upgrade targets + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tasks: + - include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- name: Verify docker upgrade targets + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config + tasks: + - include: ../pre/tasks/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: validator.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_control_plane.yml + +- include: ../upgrade_nodes.yml + +- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml new file mode 100644 index 000000000..9fe059ac9 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml @@ -0,0 +1,122 @@ +--- +# +# Control Plane Upgrade Playbook +# +# Upgrades masters and Docker (only on standalone etcd hosts) +# +# This upgrade does not include: +# - node service running on masters +# - docker running on masters +# - node service running on dedicated nodes +# +# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: '3.6' + openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" + +# Pre-upgrade +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos on control plane hosts + hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config + tags: + - pre_upgrade + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_master_excluders.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../../../openshift-master/validate_restart.yml + tags: + - pre_upgrade + +- name: Verify upgrade targets + hosts: oo_masters_to_config + tasks: + - include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- name: Verify docker upgrade targets + hosts: oo_masters_to_config:oo_etcd_to_config + tasks: + - include: ../pre/tasks/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: validator.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_etcd_to_config + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_control_plane.yml + vars: + master_config_hook: "v3_6/master_config_upgrade.yml" + +- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml new file mode 100644 index 000000000..1b10d4e37 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml @@ -0,0 +1,111 @@ +--- +# +# Node Upgrade Playbook +# +# Upgrades nodes only, but requires the control plane to have already been upgraded. +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: '3.6' + openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" + +# Pre-upgrade +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos on nodes + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config + roles: + - openshift_repos + tags: + - pre_upgrade + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_node_excluders.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- name: Verify masters are already upgraded + hosts: oo_masters_to_config + tags: + - pre_upgrade + tasks: + - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." + when: openshift.common.version != openshift_version + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- name: Verify upgrade targets + hosts: oo_nodes_to_upgrade + tasks: + - include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- name: Verify docker upgrade targets + hosts: oo_nodes_to_upgrade + tasks: + - include: ../pre/tasks/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_nodes_to_upgrade + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/validator.yml index ac5704f69..78c1767b8 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/validator.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/validator.yml @@ -7,4 +7,6 @@ hosts: oo_first_master roles: - { role: lib_openshift } - tasks: [] + tasks: + - name: Check for invalid namespaces and SDN errors + oc_objectvalidator: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_7/filter_plugins new file mode 120000 index 000000000..7de3c1dd7 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/filter_plugins @@ -0,0 +1 @@ +../../../../../filter_plugins/
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml new file mode 100644 index 000000000..ed89dbe8d --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml @@ -0,0 +1,16 @@ +--- +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'admissionConfig.pluginConfig' + yaml_value: "{{ openshift.master.admission_plugin_config }}" + when: "'admission_plugin_config' in openshift.master" + +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'admissionConfig.pluginOrderOverride' + yaml_value: + +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'kubernetesMasterConfig.admissionConfig' + yaml_value: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/roles b/playbooks/common/openshift-cluster/upgrades/v3_7/roles new file mode 120000 index 000000000..415645be6 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/roles @@ -0,0 +1 @@ +../../../../../roles/
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml new file mode 100644 index 000000000..9ec40723a --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml @@ -0,0 +1,122 @@ +--- +# +# Full Control Plane + Nodes Upgrade +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: '3.7' + openshift_upgrade_min: '3.6' + +# Pre-upgrade + +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos and initialize facts on all hosts + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config + tags: + - pre_upgrade + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_master_excluders.yml + tags: + - pre_upgrade + +- include: ../disable_node_excluders.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- include: ../pre/verify_health_checks.yml + tags: + - pre_upgrade + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../../../openshift-master/validate_restart.yml + tags: + - pre_upgrade + +- name: Verify upgrade targets + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tasks: + - include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- name: Verify docker upgrade targets + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config + tasks: + - include: ../pre/tasks/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: validator.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_control_plane.yml + +- include: ../upgrade_nodes.yml + +- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml new file mode 100644 index 000000000..f97f34c3b --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml @@ -0,0 +1,122 @@ +--- +# +# Control Plane Upgrade Playbook +# +# Upgrades masters and Docker (only on standalone etcd hosts) +# +# This upgrade does not include: +# - node service running on masters +# - docker running on masters +# - node service running on dedicated nodes +# +# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: '3.7' + openshift_upgrade_min: '3.6' + +# Pre-upgrade +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos on control plane hosts + hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config + tags: + - pre_upgrade + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_master_excluders.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../../../openshift-master/validate_restart.yml + tags: + - pre_upgrade + +- name: Verify upgrade targets + hosts: oo_masters_to_config + tasks: + - include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- name: Verify docker upgrade targets + hosts: oo_masters_to_config:oo_etcd_to_config + tasks: + - include: ../pre/tasks/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: validator.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_etcd_to_config + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_control_plane.yml + vars: + master_config_hook: "v3_7/master_config_upgrade.yml" + +- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml new file mode 100644 index 000000000..e95b90cd5 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml @@ -0,0 +1,111 @@ +--- +# +# Node Upgrade Playbook +# +# Upgrades nodes only, but requires the control plane to have already been upgraded. +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: '3.7' + openshift_upgrade_min: '3.6' + +# Pre-upgrade +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos on nodes + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config + roles: + - openshift_repos + tags: + - pre_upgrade + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_node_excluders.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- name: Verify masters are already upgraded + hosts: oo_masters_to_config + tags: + - pre_upgrade + tasks: + - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." + when: openshift.common.version != openshift_version + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- name: Verify upgrade targets + hosts: oo_nodes_to_upgrade + tasks: + - include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- name: Verify docker upgrade targets + hosts: oo_nodes_to_upgrade + tasks: + - include: ../pre/tasks/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_nodes_to_upgrade + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml new file mode 100644 index 000000000..f76fc68d1 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml @@ -0,0 +1,23 @@ +--- +############################################################################### +# Pre upgrade checks for known data problems, if this playbook fails you should +# contact support. If you're not supported contact users@lists.openshift.com +############################################################################### +- name: Verify 3.7 specific upgrade checks + hosts: oo_first_master + roles: + - { role: lib_openshift } + + tasks: + - name: Check for invalid namespaces and SDN errors + oc_objectvalidator: + + - name: Confirm OpenShift authorization objects are in sync + command: > + {{ openshift.common.client_binary }} adm migrate authorization + when: not openshift.common.version_gte_3_7 | bool + changed_when: false + register: l_oc_result + until: l_oc_result.rc == 0 + retries: 4 + delay: 15 diff --git a/playbooks/common/openshift-cluster/validate_hostnames.yml b/playbooks/common/openshift-cluster/validate_hostnames.yml index 48cc03b19..be2e6a15a 100644 --- a/playbooks/common/openshift-cluster/validate_hostnames.yml +++ b/playbooks/common/openshift-cluster/validate_hostnames.yml @@ -1,16 +1,23 @@ --- -- name: Gather and set facts for node hosts +- name: Validate node hostnames hosts: oo_nodes_to_config - roles: - - openshift_facts tasks: - - shell: + - name: Query DNS for IP address of {{ openshift.common.hostname }} + shell: getent ahostsv4 {{ openshift.common.hostname }} | head -n 1 | awk '{ print $1 }' register: lookupip changed_when: false failed_when: false - name: Warn user about bad openshift_hostname values pause: - prompt: "The hostname \"{{ openshift.common.hostname }}\" for \"{{ ansible_nodename }}\" doesn't resolve to an ip address owned by this host. Please set openshift_hostname variable to a hostname that when resolved on the host in question resolves to an IP address matching an interface on this host. This host will fail liveness checks for pods utilizing hostPorts, press ENTER to continue or CTRL-C to abort." + prompt: + The hostname {{ openshift.common.hostname }} for {{ ansible_nodename }} + doesn't resolve to an IP address owned by this host. Please set + openshift_hostname variable to a hostname that when resolved on the host + in question resolves to an IP address matching an interface on this + host. This host will fail liveness checks for pods utilizing hostPorts, + press ENTER to continue or CTRL-C to abort. seconds: "{{ 10 if openshift_override_hostname_check | default(false) | bool else omit }}" - when: lookupip.stdout not in ansible_all_ipv4_addresses + when: + - lookupip.stdout != '127.0.0.1' + - lookupip.stdout not in ansible_all_ipv4_addresses diff --git a/playbooks/common/openshift-etcd/config.yml b/playbooks/common/openshift-etcd/config.yml index 1b8106e0e..f2b85eea1 100644 --- a/playbooks/common/openshift-etcd/config.yml +++ b/playbooks/common/openshift-etcd/config.yml @@ -3,8 +3,10 @@ hosts: oo_etcd_to_config any_errors_fatal: true roles: + - role: os_firewall - role: openshift_etcd etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}" etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}" + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" - role: nickhammond.logrotate diff --git a/playbooks/common/openshift-etcd/migrate.yml b/playbooks/common/openshift-etcd/migrate.yml new file mode 100644 index 000000000..e4ab0aa41 --- /dev/null +++ b/playbooks/common/openshift-etcd/migrate.yml @@ -0,0 +1,147 @@ +--- +- name: Run pre-checks + hosts: oo_etcd_to_migrate + roles: + - role: etcd_migrate + r_etcd_migrate_action: check + r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" + etcd_peer: "{{ ansible_default_ipv4.address }}" + +# TODO: This will be different for release-3.6 branch +- name: Prepare masters for etcd data migration + hosts: oo_masters_to_config + tasks: + - set_fact: + master_services: + - "{{ openshift.common.service_type + '-master-controllers' }}" + - "{{ openshift.common.service_type + '-master-api' }}" + - debug: + msg: "master service name: {{ master_services }}" + - name: Stop masters + service: + name: "{{ item }}" + state: stopped + with_items: "{{ master_services }}" + +- name: Backup v2 data + hosts: oo_etcd_to_migrate + gather_facts: no + roles: + - role: openshift_facts + - role: etcd_common + r_etcd_common_action: backup + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" + r_etcd_common_backup_tag: pre-migration + r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" + r_etcd_common_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}" + +- name: Gate on etcd backup + hosts: localhost + connection: local + become: no + tasks: + - set_fact: + etcd_backup_completed: "{{ hostvars + | oo_select_keys(groups.oo_etcd_to_migrate) + | oo_collect('inventory_hostname', {'r_etcd_common_backup_complete': true}) }}" + - set_fact: + etcd_backup_failed: "{{ groups.oo_etcd_to_migrate | difference(etcd_backup_completed) }}" + - fail: + msg: "Migration cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}" + when: + - etcd_backup_failed | length > 0 + +- name: Stop etcd + hosts: oo_etcd_to_migrate + gather_facts: no + pre_tasks: + - set_fact: + l_etcd_service: "{{ 'etcd_container' if openshift.common.is_containerized else 'etcd' }}" + - name: Disable etcd members + service: + name: "{{ l_etcd_service }}" + state: stopped + +- name: Migrate data on first etcd + hosts: oo_etcd_to_migrate[0] + gather_facts: no + roles: + - role: etcd_migrate + r_etcd_migrate_action: migrate + r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" + etcd_peer: "{{ openshift.common.ip }}" + etcd_url_scheme: "https" + etcd_peer_url_scheme: "https" + +- name: Clean data stores on remaining etcd hosts + hosts: oo_etcd_to_migrate[1:] + gather_facts: no + roles: + - role: etcd_migrate + r_etcd_migrate_action: clean_data + r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" + etcd_peer: "{{ openshift.common.ip }}" + etcd_url_scheme: "https" + etcd_peer_url_scheme: "https" + post_tasks: + - name: Add etcd hosts + delegate_to: localhost + add_host: + name: "{{ item }}" + groups: oo_new_etcd_to_config + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_become: "{{ g_sudo | default(omit) }}" + with_items: "{{ groups.oo_etcd_to_migrate[1:] | default([]) }}" + changed_when: no + - name: Set success + set_fact: + r_etcd_migrate_success: true + +- include: ./scaleup.yml + +- name: Gate on etcd migration + hosts: oo_masters_to_config + gather_facts: no + tasks: + - set_fact: + etcd_migration_completed: "{{ hostvars + | oo_select_keys(groups.oo_etcd_to_migrate) + | oo_collect('inventory_hostname', {'r_etcd_migrate_success': true}) }}" + - set_fact: + etcd_migration_failed: "{{ groups.oo_etcd_to_migrate | difference(etcd_migration_completed) }}" + +- name: Add TTLs on the first master + hosts: oo_first_master[0] + roles: + - role: etcd_migrate + r_etcd_migrate_action: add_ttls + etcd_peer: "{{ hostvars[groups.oo_etcd_to_migrate.0].openshift.common.ip }}" + etcd_url_scheme: "https" + etcd_peer_url_scheme: "https" + when: etcd_migration_failed | length == 0 + +- name: Configure masters if etcd data migration is succesfull + hosts: oo_masters_to_config + roles: + - role: etcd_migrate + r_etcd_migrate_action: configure + when: etcd_migration_failed | length == 0 + tasks: + - debug: + msg: "Skipping master re-configuration since migration failed." + when: + - etcd_migration_failed | length > 0 + - name: Start master services + service: + name: "{{ item }}" + state: started + register: service_status + # Sometimes the master-api, resp. master-controllers fails to start for the first time + until: service_status.state is defined and service_status.state == "started" + retries: 5 + delay: 10 + with_items: "{{ master_services[::-1] }}" + - fail: + msg: "Migration failed. The following hosts were not properly migrated: {{ etcd_migration_failed | join(',') }}" + when: + - etcd_migration_failed | length > 0 diff --git a/playbooks/common/openshift-etcd/restart.yml b/playbooks/common/openshift-etcd/restart.yml index 196c86f28..af1ef245a 100644 --- a/playbooks/common/openshift-etcd/restart.yml +++ b/playbooks/common/openshift-etcd/restart.yml @@ -5,5 +5,5 @@ tasks: - name: restart etcd service: - name: "{{ 'etcd' if not openshift.common.is_containerized | bool else 'etcd_container' }}" + name: "{{ 'etcd_container' if openshift.common.etcd_runtime == 'docker' else 'etcd' }}" state: restarted diff --git a/playbooks/common/openshift-etcd/scaleup.yml b/playbooks/common/openshift-etcd/scaleup.yml new file mode 100644 index 000000000..d3fa48bad --- /dev/null +++ b/playbooks/common/openshift-etcd/scaleup.yml @@ -0,0 +1,74 @@ +--- +- name: Gather facts + hosts: oo_etcd_to_config:oo_new_etcd_to_config + roles: + - openshift_etcd_facts + post_tasks: + - set_fact: + etcd_hostname: "{{ etcd_hostname }}" + etcd_ip: "{{ etcd_ip }}" + +- name: Configure etcd + hosts: oo_new_etcd_to_config + serial: 1 + any_errors_fatal: true + vars: + etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" + pre_tasks: + - name: Add new etcd members to cluster + command: > + /usr/bin/etcdctl --cert-file {{ etcd_peer_cert_file }} + --key-file {{ etcd_peer_key_file }} + --ca-file {{ etcd_peer_ca_file }} + -C {{ etcd_peer_url_scheme }}://{{ hostvars[etcd_ca_host].etcd_hostname }}:{{ etcd_client_port }} + member add {{ etcd_hostname }} {{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_port }} + delegate_to: "{{ etcd_ca_host }}" + failed_when: + - etcd_add_check.rc == 1 + - ("peerURL exists" not in etcd_add_check.stderr) + register: etcd_add_check + retries: 3 + delay: 10 + until: etcd_add_check.rc == 0 + roles: + - role: os_firewall + when: etcd_add_check.rc == 0 + - role: openshift_etcd + when: etcd_add_check.rc == 0 + etcd_peers: "{{ groups.oo_etcd_to_config | union(groups.oo_new_etcd_to_config)| default([], true) }}" + etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" + etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}" + etcd_initial_cluster_state: "existing" + initial_etcd_cluster: "{{ etcd_add_check.stdout_lines[3] | regex_replace('ETCD_INITIAL_CLUSTER=','') | regex_replace('\"','') }}" + etcd_ca_setup: False + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" + - role: nickhammond.logrotate + when: etcd_add_check.rc == 0 + post_tasks: + - name: Verify cluster is stable + command: > + /usr/bin/etcdctl --cert-file {{ etcd_peer_cert_file }} + --key-file {{ etcd_peer_key_file }} + --ca-file {{ etcd_peer_ca_file }} + -C {{ etcd_peer_url_scheme }}://{{ hostvars[etcd_ca_host].etcd_hostname }}:{{ etcd_client_port }} + cluster-health + register: scaleup_health + retries: 3 + delay: 30 + until: scaleup_health.rc == 0 + +- name: Update master etcd client urls + hosts: oo_masters_to_config + serial: 1 + tasks: + - include_role: + name: openshift_master + tasks_from: update_etcd_client_urls + vars: + etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" + openshift_ca_host: "{{ groups.oo_first_master.0 }}" + openshift_master_etcd_hosts: "{{ hostvars + | oo_select_keys(groups['oo_etcd_to_config'] | union(groups['oo_new_etcd_to_config'])) + | oo_collect('openshift.common.hostname') + | default(none, true) }}" + openshift_master_etcd_port: "{{ (etcd_client_port | default('2379')) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else none }}" diff --git a/playbooks/common/openshift-etcd/service.yml b/playbooks/common/openshift-etcd/service.yml deleted file mode 100644 index ced4bddc5..000000000 --- a/playbooks/common/openshift-etcd/service.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- -- name: Populate g_service_masters host group if needed - hosts: localhost - connection: local - become: no - gather_facts: no - tasks: - - fail: msg="new_cluster_state is required to be injected in this playbook" - when: new_cluster_state is not defined - - - name: Evaluate g_service_etcd - add_host: - name: "{{ item }}" - groups: g_service_etcd - with_items: "{{ oo_host_group_exp | default([]) }}" - changed_when: False - -- name: Change etcd state on etcd instance(s) - hosts: g_service_etcd - connection: ssh - gather_facts: no - tasks: - - service: name=etcd state="{{ new_cluster_state }}" diff --git a/playbooks/common/openshift-glusterfs/config.yml b/playbooks/common/openshift-glusterfs/config.yml index 75faf5ba8..d9de578f3 100644 --- a/playbooks/common/openshift-glusterfs/config.yml +++ b/playbooks/common/openshift-glusterfs/config.yml @@ -1,21 +1,26 @@ --- -- name: Open firewall ports for GlusterFS - hosts: oo_glusterfs_to_config - vars: - os_firewall_allow: - - service: glusterfs_sshd - port: "2222/tcp" - - service: glusterfs_daemon - port: "24007/tcp" - - service: glusterfs_management - port: "24008/tcp" - - service: glusterfs_bricks - port: "49152-49251/tcp" - roles: - - os_firewall +- name: Open firewall ports for GlusterFS nodes + hosts: glusterfs + tasks: + - include_role: + name: openshift_storage_glusterfs + tasks_from: firewall.yml + when: + - openshift_storage_glusterfs_is_native | default(True) | bool + +- name: Open firewall ports for GlusterFS registry nodes + hosts: glusterfs_registry + tasks: + - include_role: + name: openshift_storage_glusterfs + tasks_from: firewall.yml + when: + - openshift_storage_glusterfs_registry_is_native | default(True) | bool - name: Configure GlusterFS hosts: oo_first_master - roles: - - role: openshift_storage_glusterfs + tasks: + - name: setup glusterfs + include_role: + name: openshift_storage_glusterfs when: groups.oo_glusterfs_to_config | default([]) | count > 0 diff --git a/playbooks/common/openshift-glusterfs/registry.yml b/playbooks/common/openshift-glusterfs/registry.yml new file mode 100644 index 000000000..80cf7529e --- /dev/null +++ b/playbooks/common/openshift-glusterfs/registry.yml @@ -0,0 +1,49 @@ +--- +- include: config.yml + +- name: Initialize GlusterFS registry PV and PVC vars + hosts: oo_first_master + tags: hosted + tasks: + - set_fact: + glusterfs_pv: [] + glusterfs_pvc: [] + + - set_fact: + glusterfs_pv: + - name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-volume" + capacity: "{{ openshift.hosted.registry.storage.volume.size }}" + access_modes: "{{ openshift.hosted.registry.storage.access.modes }}" + storage: + glusterfs: + endpoints: "{{ openshift.hosted.registry.storage.glusterfs.endpoints }}" + path: "{{ openshift.hosted.registry.storage.glusterfs.path }}" + readOnly: "{{ openshift.hosted.registry.storage.glusterfs.readOnly }}" + glusterfs_pvc: + - name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-claim" + capacity: "{{ openshift.hosted.registry.storage.volume.size }}" + access_modes: "{{ openshift.hosted.registry.storage.access.modes }}" + when: openshift.hosted.registry.storage.glusterfs.swap + +- name: Create persistent volumes + hosts: oo_first_master + tags: + - hosted + vars: + persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups, glusterfs_pv) }}" + persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims(glusterfs_pvc) }}" + roles: + - role: openshift_persistent_volumes + when: persistent_volumes | union(glusterfs_pv) | length > 0 or persistent_volume_claims | union(glusterfs_pvc) | length > 0 + +- name: Create Hosted Resources + hosts: oo_first_master + tags: + - hosted + pre_tasks: + - set_fact: + openshift_hosted_router_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}" + openshift_hosted_registry_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}" + when: "'master' in hostvars[groups.oo_first_master.0].openshift and 'registry_url' in hostvars[groups.oo_first_master.0].openshift.master" + roles: + - role: openshift_hosted diff --git a/playbooks/common/openshift-loadbalancer/config.yml b/playbooks/common/openshift-loadbalancer/config.yml index c414913bf..09ed81a83 100644 --- a/playbooks/common/openshift-loadbalancer/config.yml +++ b/playbooks/common/openshift-loadbalancer/config.yml @@ -12,5 +12,7 @@ openshift_use_nuage | default(false), nuage_mon_rest_server_port | default(none))) + openshift_loadbalancer_additional_backends | default([]) }}" + openshift_image_tag: "{{ hostvars[groups.oo_first_master.0].openshift_image_tag }}" roles: + - role: os_firewall - role: openshift_loadbalancer diff --git a/playbooks/common/openshift-loadbalancer/service.yml b/playbooks/common/openshift-loadbalancer/service.yml deleted file mode 100644 index d3762c961..000000000 --- a/playbooks/common/openshift-loadbalancer/service.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- -- name: Populate g_service_nodes host group if needed - hosts: localhost - connection: local - become: no - gather_facts: no - tasks: - - fail: msg="new_cluster_state is required to be injected in this playbook" - when: new_cluster_state is not defined - - - name: Evaluate g_service_lb - add_host: - name: "{{ item }}" - groups: g_service_lb - with_items: "{{ oo_host_group_exp | default([]) }}" - changed_when: False - -- name: Change state on lb instance(s) - hosts: g_service_lb - connection: ssh - gather_facts: no - tasks: - - service: name=haproxy state="{{ new_cluster_state }}" diff --git a/playbooks/common/openshift-cluster/additional_config.yml b/playbooks/common/openshift-master/additional_config.yml index c0ea93d2c..7468c78f0 100644 --- a/playbooks/common/openshift-cluster/additional_config.yml +++ b/playbooks/common/openshift-master/additional_config.yml @@ -11,13 +11,13 @@ when: openshift_master_ha | bool and openshift.master.cluster_method == "pacemaker" - role: openshift_examples registry_url: "{{ openshift.master.registry_url }}" - when: openshift.common.install_examples | bool + when: openshift_install_examples | default(True) - role: openshift_hosted_templates registry_url: "{{ openshift.master.registry_url }}" - role: openshift_manageiq - when: openshift.common.use_manageiq | bool + when: openshift_use_manageiq | default(false) | bool - role: cockpit when: not openshift.common.is_atomic and ( deployment_type in ['atomic-enterprise','openshift-enterprise'] ) and (osm_use_cockpit | bool or osm_use_cockpit is undefined ) and ( openshift.common.deployment_subtype != 'registry' ) - role: flannel_register - when: openshift.common.use_flannel | bool + when: openshift_use_flannel | default(false) | bool diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index 60cf56108..e1b9a4964 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -1,10 +1,31 @@ --- +- name: Disable excluders + hosts: oo_masters_to_config + gather_facts: no + roles: + - role: openshift_excluder + r_openshift_excluder_action: disable + r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" + - name: Gather and set facts for master hosts hosts: oo_masters_to_config vars: t_oo_option_master_debug_level: "{{ lookup('oo_option', 'openshift_master_debug_level') }}" pre_tasks: + # Per https://bugzilla.redhat.com/show_bug.cgi?id=1469336 + # + # When scaling up a cluster upgraded from OCP <= 3.5, ensure that + # OPENSHIFT_DEFAULT_REGISTRY is present as defined on the existing + # masters, or absent if such is the case. + - name: Detect if this host is a new master in a scale up + set_fact: + g_openshift_master_is_scaleup: "{{ openshift.common.hostname in ( groups['new_masters'] | default([]) ) }}" + + - name: Scaleup Detection + debug: + var: g_openshift_master_is_scaleup + - name: Check for RPM generated config marker file .config_managed stat: path: /etc/origin/.config_managed @@ -69,7 +90,7 @@ ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}" master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}" -- name: Determine if session secrets must be generated +- name: Inspect state of first master config settings hosts: oo_first_master roles: - role: openshift_facts @@ -79,6 +100,60 @@ local_facts: session_auth_secrets: "{{ openshift_master_session_auth_secrets | default(openshift.master.session_auth_secrets | default(None)) }}" session_encryption_secrets: "{{ openshift_master_session_encryption_secrets | default(openshift.master.session_encryption_secrets | default(None)) }}" + - name: Check for existing configuration + stat: + path: /etc/origin/master/master-config.yaml + register: master_config_stat + + - name: Set clean install fact + set_fact: + l_clean_install: "{{ not master_config_stat.stat.exists | bool }}" + + - name: Determine if etcd3 storage is in use + command: grep -Pzo "storage-backend:\n.*etcd3" /etc/origin/master/master-config.yaml -q + register: etcd3_grep + failed_when: false + changed_when: false + + - name: Set etcd3 fact + set_fact: + l_etcd3_enabled: "{{ etcd3_grep.rc == 0 | bool }}" + + - name: Check if atomic-openshift-master sysconfig exists yet + stat: + path: /etc/sysconfig/atomic-openshift-master + register: l_aom_exists + + - name: Preserve OPENSHIFT_DEFAULT_REGISTRY master parameter if present + command: awk '/^OPENSHIFT_DEFAULT_REGISTRY/' /etc/sysconfig/atomic-openshift-master + register: l_default_registry_defined + when: l_aom_exists.stat.exists | bool + + - name: Check if atomic-openshift-master-api sysconfig exists yet + stat: + path: /etc/sysconfig/atomic-openshift-master-api + register: l_aom_api_exists + + - name: Preserve OPENSHIFT_DEFAULT_REGISTRY master-api parameter if present + command: awk '/^OPENSHIFT_DEFAULT_REGISTRY/' /etc/sysconfig/atomic-openshift-master-api + register: l_default_registry_defined_api + when: l_aom_api_exists.stat.exists | bool + + - name: Check if atomic-openshift-master-controllers sysconfig exists yet + stat: + path: /etc/sysconfig/atomic-openshift-master-controllers + register: l_aom_controllers_exists + + - name: Preserve OPENSHIFT_DEFAULT_REGISTRY master-controllers parameter if present + command: awk '/^OPENSHIFT_DEFAULT_REGISTRY/' /etc/sysconfig/atomic-openshift-master-controllers + register: l_default_registry_defined_controllers + when: l_aom_controllers_exists.stat.exists | bool + + - name: Update facts with OPENSHIFT_DEFAULT_REGISTRY value + set_fact: + l_default_registry_value: "{{ l_default_registry_defined.stdout | default('') }}" + l_default_registry_value_api: "{{ l_default_registry_defined_api.stdout | default('') }}" + l_default_registry_value_controllers: "{{ l_default_registry_defined_controllers.stdout | default('') }}" - name: Generate master session secrets hosts: oo_first_master @@ -104,29 +179,55 @@ openshift_master_count: "{{ openshift.master.master_count }}" openshift_master_session_auth_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_auth_secrets }}" openshift_master_session_encryption_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_encryption_secrets }}" - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - roles: - - role: openshift_master openshift_ca_host: "{{ groups.oo_first_master.0 }}" openshift_master_etcd_hosts: "{{ hostvars | oo_select_keys(groups['oo_etcd_to_config'] | default([])) | oo_collect('openshift.common.hostname') | default(none, true) }}" - openshift_master_hosts: "{{ groups.oo_masters_to_config }}" - etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" + openshift_no_proxy_etcd_host_ips: "{{ hostvars | oo_select_keys(groups['oo_etcd_to_config'] | default([])) + | oo_collect('openshift.common.ip') | default([]) | join(',') + }}" + roles: + - role: os_firewall + - role: openshift_master_facts + - role: openshift_hosted_facts + - role: openshift_master_certificates + - role: openshift_etcd_client_certificates etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}" etcd_cert_config_dir: "{{ openshift.common.config_base }}/master" etcd_cert_prefix: "master.etcd-" + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" + etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" + when: groups.oo_etcd_to_config | default([]) | length != 0 + - role: openshift_clock + - role: openshift_cloud_provider + - role: openshift_builddefaults + - role: openshift_buildoverrides + - role: nickhammond.logrotate + - role: contiv + contiv_role: netmaster + when: openshift_use_contiv | default(False) | bool + - role: openshift_master + openshift_master_hosts: "{{ groups.oo_masters_to_config }}" + r_openshift_master_clean_install: "{{ hostvars[groups.oo_first_master.0].l_clean_install }}" + r_openshift_master_etcd3_storage: "{{ hostvars[groups.oo_first_master.0].l_etcd3_enabled }}" + openshift_master_is_scaleup_host: "{{ g_openshift_master_is_scaleup | default(false) }}" + openshift_master_default_registry_value: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value }}" + openshift_master_default_registry_value_api: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value_api }}" + openshift_master_default_registry_value_controllers: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value_controllers }}" - role: nuage_master - when: openshift.common.use_nuage | bool + when: openshift_use_nuage | default(false) | bool - role: calico_master - when: openshift.common.use_calico | bool - + when: openshift_use_calico | default(false) | bool post_tasks: - name: Create group for deployment type group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }} changed_when: False + +- name: Re-enable excluder if it was previously enabled + hosts: oo_masters_to_config + gather_facts: no + roles: + - role: openshift_excluder + r_openshift_excluder_action: enable + r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" diff --git a/playbooks/common/openshift-master/restart.yml b/playbooks/common/openshift-master/restart.yml index 6fec346c3..4d73b8124 100644 --- a/playbooks/common/openshift-master/restart.yml +++ b/playbooks/common/openshift-master/restart.yml @@ -7,7 +7,7 @@ openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" serial: 1 handlers: - - include: roles/openshift_master/handlers/main.yml + - include: ../../../roles/openshift_master/handlers/main.yml static: yes roles: - openshift_facts diff --git a/playbooks/common/openshift-master/restart_hosts.yml b/playbooks/common/openshift-master/restart_hosts.yml index 67ba0aa2e..a5dbe0590 100644 --- a/playbooks/common/openshift-master/restart_hosts.yml +++ b/playbooks/common/openshift-master/restart_hosts.yml @@ -37,3 +37,4 @@ state: started delay: 10 port: "{{ openshift.master.api_port }}" + timeout: 600 diff --git a/playbooks/common/openshift-master/restart_services.yml b/playbooks/common/openshift-master/restart_services.yml index 508b5a3ac..4f8b758fd 100644 --- a/playbooks/common/openshift-master/restart_services.yml +++ b/playbooks/common/openshift-master/restart_services.yml @@ -1,9 +1,4 @@ --- -- name: Restart master - service: - name: "{{ openshift.common.service_type }}-master" - state: restarted - when: not openshift_master_ha | bool - name: Restart master API service: name: "{{ openshift.common.service_type }}-master-api" @@ -15,6 +10,7 @@ state: started delay: 10 port: "{{ openshift.master.api_port }}" + timeout: 600 when: openshift_master_ha | bool - name: Restart master controllers service: diff --git a/playbooks/common/openshift-master/scaleup.yml b/playbooks/common/openshift-master/scaleup.yml index 92f16dc47..17f9ef4bc 100644 --- a/playbooks/common/openshift-master/scaleup.yml +++ b/playbooks/common/openshift-master/scaleup.yml @@ -1,11 +1,4 @@ --- -- include: ../openshift-cluster/evaluate_groups.yml - -- name: Gather facts - hosts: oo_etcd_to_config:oo_masters_to_config:oo_nodes_to_config - roles: - - openshift_facts - - name: Update master count hosts: oo_masters:!oo_masters_to_config serial: 1 @@ -50,26 +43,8 @@ delay: 1 changed_when: false -- name: Configure docker hosts - hosts: oo_masters_to-config:oo_nodes_to_config - vars: - docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') | oo_split }}" - docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') | oo_split }}" - docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') | oo_split }}" - roles: - - openshift_facts - - openshift_docker - -- include: ../openshift-cluster/disable_excluder.yml - tags: - - always - - include: ../openshift-master/config.yml - include: ../openshift-loadbalancer/config.yml - include: ../openshift-node/config.yml - -- include: ../openshift-cluster/reset_excluder.yml - tags: - - always diff --git a/playbooks/common/openshift-master/service.yml b/playbooks/common/openshift-master/service.yml deleted file mode 100644 index 48a2731aa..000000000 --- a/playbooks/common/openshift-master/service.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- -- name: Populate g_service_masters host group if needed - hosts: localhost - gather_facts: no - connection: local - become: no - tasks: - - fail: msg="new_cluster_state is required to be injected in this playbook" - when: new_cluster_state is not defined - - - name: Evaluate g_service_masters - add_host: - name: "{{ item }}" - groups: g_service_masters - with_items: "{{ oo_host_group_exp | default([]) }}" - changed_when: False - -- name: Change state on master instance(s) - hosts: g_service_masters - connection: ssh - gather_facts: no - tasks: - - service: name={{ openshift.common.service_type }}-master state="{{ new_cluster_state }}" diff --git a/playbooks/common/openshift-nfs/config.yml b/playbooks/common/openshift-nfs/config.yml index 000e46e80..64ea0d3c4 100644 --- a/playbooks/common/openshift-nfs/config.yml +++ b/playbooks/common/openshift-nfs/config.yml @@ -2,5 +2,5 @@ - name: Configure nfs hosts: oo_nfs_to_config roles: - - role: openshift_facts + - role: os_firewall - role: openshift_storage_nfs diff --git a/playbooks/common/openshift-nfs/service.yml b/playbooks/common/openshift-nfs/service.yml deleted file mode 100644 index b1e35e4b1..000000000 --- a/playbooks/common/openshift-nfs/service.yml +++ /dev/null @@ -1,21 +0,0 @@ ---- -- name: Populate g_service_nfs host group if needed - hosts: localhost - gather_facts: no - tasks: - - fail: msg="new_cluster_state is required to be injected in this playbook" - when: new_cluster_state is not defined - - - name: Evaluate g_service_nfs - add_host: - name: "{{ item }}" - groups: g_service_nfs - with_items: "{{ oo_host_group_exp | default([]) }}" - changed_when: False - -- name: Change state on nfs instance(s) - hosts: g_service_nfs - connection: ssh - gather_facts: no - tasks: - - service: name=nfs-server state="{{ new_cluster_state }}" diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml index 792ffb4e2..0801c41ff 100644 --- a/playbooks/common/openshift-node/config.yml +++ b/playbooks/common/openshift-node/config.yml @@ -1,24 +1,11 @@ --- -- name: Gather and set facts for node hosts +- name: Disable excluders hosts: oo_nodes_to_config - vars: - t_oo_option_node_debug_level: "{{ lookup('oo_option', 'openshift_node_debug_level') }}" - pre_tasks: - - set_fact: - openshift_node_debug_level: "{{ t_oo_option_node_debug_level }}" - when: openshift_node_debug_level is not defined and t_oo_option_node_debug_level != "" + gather_facts: no roles: - - openshift_facts - tasks: - # Since the master is generating the node certificates before they are - # configured, we need to make sure to set the node properties beforehand if - # we do not want the defaults - - openshift_facts: - role: node - local_facts: - labels: "{{ openshift_node_labels | default(None) }}" - annotations: "{{ openshift_node_annotations | default(None) }}" - schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}" + - role: openshift_excluder + r_openshift_excluder_action: disable + r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" - name: Evaluate node groups hosts: localhost @@ -32,7 +19,11 @@ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" ansible_become: "{{ g_sudo | default(omit) }}" with_items: "{{ groups.oo_nodes_to_config | default([]) }}" - when: hostvars[item].openshift.common is defined and hostvars[item].openshift.common.is_containerized | bool and (item in groups.oo_nodes_to_config and item in groups.oo_masters_to_config) + when: + - hostvars[item].openshift is defined + - hostvars[item].openshift.common is defined + - hostvars[item].openshift.common.is_containerized | bool + - (item in groups.oo_nodes_to_config and item in groups.oo_masters_to_config) changed_when: False - name: Configure containerized nodes @@ -47,9 +38,9 @@ | union(groups['oo_etcd_to_config'] | default([]))) | oo_collect('openshift.common.hostname') | default([]) | join (',') }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" + roles: + - role: os_firewall - role: openshift_node openshift_ca_host: "{{ groups.oo_first_master.0 }}" @@ -64,9 +55,8 @@ | union(groups['oo_etcd_to_config'] | default([]))) | oo_collect('openshift.common.hostname') | default([]) | join (',') }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" roles: + - role: os_firewall - role: openshift_node openshift_ca_host: "{{ groups.oo_first_master.0 }}" @@ -81,18 +71,27 @@ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" etcd_cert_subdir: "openshift-node-{{ openshift.common.hostname }}" etcd_cert_config_dir: "{{ openshift.common.config_base }}/node" - when: openshift.common.use_flannel | bool + when: openshift_use_flannel | default(false) | bool - role: calico - when: openshift.common.use_calico | bool + when: openshift_use_calico | default(false) | bool - role: nuage_node - when: openshift.common.use_nuage | bool + when: openshift_use_nuage | default(false) | bool - role: contiv contiv_role: netplugin - when: openshift.common.use_contiv | bool + when: openshift_use_contiv | default(false) | bool - role: nickhammond.logrotate - role: openshift_manage_node openshift_master_host: "{{ groups.oo_first_master.0 }}" + when: not openshift_node_bootstrap | default(False) tasks: - name: Create group for deployment type group_by: key=oo_nodes_deployment_type_{{ openshift.common.deployment_type }} changed_when: False + +- name: Re-enable excluder if it was previously enabled + hosts: oo_nodes_to_config + gather_facts: no + roles: + - role: openshift_excluder + r_openshift_excluder_action: enable + r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" diff --git a/playbooks/common/openshift-node/network_manager.yml b/playbooks/common/openshift-node/network_manager.yml index be050c12c..b3a7399dc 100644 --- a/playbooks/common/openshift-node/network_manager.yml +++ b/playbooks/common/openshift-node/network_manager.yml @@ -1,6 +1,8 @@ --- +- include: ../openshift-cluster/evaluate_groups.yml + - name: Install and configure NetworkManager - hosts: l_oo_all_hosts + hosts: oo_all_hosts become: yes tasks: - name: install NetworkManager diff --git a/playbooks/common/openshift-node/restart.yml b/playbooks/common/openshift-node/restart.yml index 441b100e9..c3beb59b7 100644 --- a/playbooks/common/openshift-node/restart.yml +++ b/playbooks/common/openshift-node/restart.yml @@ -11,6 +11,10 @@ service: name: docker state: restarted + register: l_docker_restart_docker_in_node_result + until: not l_docker_restart_docker_in_node_result | failed + retries: 3 + delay: 30 - name: Update docker facts openshift_facts: @@ -23,7 +27,6 @@ with_items: - etcd_container - openvswitch - - "{{ openshift.common.service_type }}-master" - "{{ openshift.common.service_type }}-master-api" - "{{ openshift.common.service_type }}-master-controllers" - "{{ openshift.common.service_type }}-node" @@ -36,6 +39,7 @@ state: started delay: 10 port: "{{ openshift.master.api_port }}" + timeout: 600 when: inventory_hostname in groups.oo_masters_to_config - name: restart node @@ -51,7 +55,7 @@ register: node_output delegate_to: "{{ groups.oo_first_master.0 }}" when: inventory_hostname in groups.oo_nodes_to_config - until: node_output.results.results[0].status.conditions | selectattr('type', 'match', '^Ready$') | map(attribute='status') | join | bool == True + until: node_output.results.returncode == 0 and node_output.results.results[0].status.conditions | selectattr('type', 'match', '^Ready$') | map(attribute='status') | join | bool == True # Give the node two minutes to come back online. retries: 24 delay: 5 diff --git a/playbooks/common/openshift-node/scaleup.yml b/playbooks/common/openshift-node/scaleup.yml deleted file mode 100644 index c31aca62b..000000000 --- a/playbooks/common/openshift-node/scaleup.yml +++ /dev/null @@ -1,38 +0,0 @@ ---- -- include: ../openshift-cluster/evaluate_groups.yml - -- name: Gather facts - hosts: oo_etcd_to_config:oo_masters_to_config:oo_nodes_to_config - roles: - - openshift_facts - -- name: Gather and set facts for first master - hosts: oo_first_master - vars: - openshift_master_count: "{{ groups.oo_masters | length }}" - pre_tasks: - - set_fact: - openshift_master_default_subdomain: "{{ lookup('oo_option', 'openshift_master_default_subdomain') | default(None, true) }}" - when: openshift_master_default_subdomain is not defined - roles: - - openshift_master_facts - -- name: Configure docker hosts - hosts: oo_nodes_to_config - vars: - docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') | oo_split }}" - docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') | oo_split }}" - docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') | oo_split }}" - roles: - - openshift_facts - - openshift_docker - -- include: ../openshift-cluster/disable_excluder.yml - tags: - - always - -- include: ../openshift-node/config.yml - -- include: ../openshift-cluster/reset_excluder.yml - tags: - - always diff --git a/playbooks/common/openshift-node/service.yml b/playbooks/common/openshift-node/service.yml deleted file mode 100644 index 130a5416f..000000000 --- a/playbooks/common/openshift-node/service.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- -- name: Populate g_service_nodes host group if needed - hosts: localhost - connection: local - become: no - gather_facts: no - tasks: - - fail: msg="new_cluster_state is required to be injected in this playbook" - when: new_cluster_state is not defined - - - name: Evaluate g_service_nodes - add_host: - name: "{{ item }}" - groups: g_service_nodes - with_items: "{{ oo_host_group_exp | default([]) }}" - changed_when: False - -- name: Change state on node instance(s) - hosts: g_service_nodes - connection: ssh - gather_facts: no - tasks: - - name: Change state on node instance(s) - service: - name: "{{ service_type }}-node" - state: "{{ new_cluster_state }}" diff --git a/playbooks/gce/README.md b/playbooks/gce/README.md deleted file mode 100644 index 0514d6f50..000000000 --- a/playbooks/gce/README.md +++ /dev/null @@ -1,4 +0,0 @@ -# GCE playbooks - -This playbook directory is meant to be driven by [`bin/cluster`](../../bin), -which is community supported and most use is considered deprecated. diff --git a/playbooks/gce/openshift-cluster/add_nodes.yml b/playbooks/gce/openshift-cluster/add_nodes.yml deleted file mode 100644 index 765e03fdc..000000000 --- a/playbooks/gce/openshift-cluster/add_nodes.yml +++ /dev/null @@ -1,43 +0,0 @@ ---- -- name: Launch instance(s) - hosts: localhost - connection: local - become: no - gather_facts: no - vars_files: - - vars.yml - vars: - oo_extend_env: True - tasks: - - fail: - msg: Deployment type not supported for gce provider yet - when: deployment_type == 'enterprise' - - - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml - vars: - type: "compute" - count: "{{ num_nodes }}" - - include: tasks/launch_instances.yml - vars: - instances: "{{ node_names }}" - cluster: "{{ cluster_id }}" - type: "{{ k8s_type }}" - g_sub_host_type: "{{ sub_host_type }}" - gce_machine_type: "{{ lookup('env', 'gce_machine_node_type') | default(lookup('env', 'gce_machine_type'), true) }}" - gce_machine_image: "{{ lookup('env', 'gce_machine_node_image') | default(lookup('env', 'gce_machine_image'), true) }}" - - - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml - vars: - type: "infra" - count: "{{ num_infra }}" - - include: tasks/launch_instances.yml - vars: - instances: "{{ node_names }}" - cluster: "{{ cluster_id }}" - type: "{{ k8s_type }}" - g_sub_host_type: "{{ sub_host_type }}" - gce_machine_type: "{{ lookup('env', 'gce_machine_node_type') | default(lookup('env', 'gce_machine_type'), true) }}" - gce_machine_image: "{{ lookup('env', 'gce_machine_node_image') | default(lookup('env', 'gce_machine_image'), true) }}" - -- include: scaleup.yml -- include: list.yml diff --git a/playbooks/gce/openshift-cluster/cluster_hosts.yml b/playbooks/gce/openshift-cluster/cluster_hosts.yml deleted file mode 100644 index 05a58db73..000000000 --- a/playbooks/gce/openshift-cluster/cluster_hosts.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- -g_all_hosts: "{{ groups['tag_clusterid-' ~ cluster_id] | default([]) - | intersect(groups['tag_environment-' ~ cluster_env] | default([])) }}" - -g_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-etcd'] | default([])) }}" - -g_lb_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-lb'] | default([])) }}" - -g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-nfs'] | default([])) }}" - -g_glusterfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-glusterfs'] | default([])) }}" - -g_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-master'] | default([])) }}" - -g_new_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new-master'] | default([])) }}" - -g_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-node'] | default([])) }}" - -g_new_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new-node'] | default([])) }}" - -g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-infra'] | default([])) }}" - -g_compute_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-compute'] | default([])) }}" diff --git a/playbooks/gce/openshift-cluster/config.yml b/playbooks/gce/openshift-cluster/config.yml deleted file mode 100644 index 2625d4d05..000000000 --- a/playbooks/gce/openshift-cluster/config.yml +++ /dev/null @@ -1,36 +0,0 @@ ---- -- hosts: localhost - gather_facts: no - tasks: - - include_vars: vars.yml - - include_vars: cluster_hosts.yml - - add_host: - name: "{{ item }}" - groups: l_oo_all_hosts - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_become: "{{ deployment_vars[deployment_type].become }}" - with_items: "{{ g_all_hosts | default([]) }}" - -- hosts: l_oo_all_hosts - gather_facts: no - tasks: - - include_vars: vars.yml - - include_vars: cluster_hosts.yml - -- include: ../../common/openshift-cluster/config.yml - vars: - g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - g_sudo: "{{ deployment_vars[deployment_type].become }}" - g_nodeonmaster: true - openshift_cluster_id: "{{ cluster_id }}" - openshift_debug_level: "{{ debug_level }}" - openshift_deployment_type: "{{ deployment_type }}" - openshift_hostname: "{{ gce_private_ip }}" - openshift_hosted_registry_selector: 'type=infra' - openshift_hosted_router_selector: 'type=infra' - openshift_master_cluster_method: 'native' - openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}" - os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}" - openshift_use_flannel: "{{ lookup('oo_option', 'use_flannel') }}" - openshift_use_calico: "{{ lookup('oo_option', 'use_calico') }}" - openshift_use_fluentd: "{{ lookup('oo_option', 'use_fluentd') }}" diff --git a/playbooks/gce/openshift-cluster/launch.yml b/playbooks/gce/openshift-cluster/launch.yml deleted file mode 100644 index 7532a678b..000000000 --- a/playbooks/gce/openshift-cluster/launch.yml +++ /dev/null @@ -1,67 +0,0 @@ ---- -- name: Launch instance(s) - hosts: localhost - connection: local - become: no - gather_facts: no - vars_files: - - vars.yml - tasks: - - fail: msg="Deployment type not supported for gce provider yet" - when: deployment_type == 'enterprise' - - - include: ../../common/openshift-cluster/tasks/set_etcd_launch_facts.yml - - include: tasks/launch_instances.yml - vars: - instances: "{{ etcd_names }}" - cluster: "{{ cluster_id }}" - type: "{{ k8s_type }}" - g_sub_host_type: "default" - gce_machine_type: "{{ lookup('env', 'gce_machine_etcd_type') | default(lookup('env', 'gce_machine_type'), true) }}" - gce_machine_image: "{{ lookup('env', 'gce_machine_etcd_image') | default(lookup('env', 'gce_machine_image'), true) }}" - - - - include: ../../common/openshift-cluster/tasks/set_master_launch_facts.yml - - include: tasks/launch_instances.yml - vars: - instances: "{{ master_names }}" - cluster: "{{ cluster_id }}" - type: "{{ k8s_type }}" - g_sub_host_type: "default" - gce_machine_type: "{{ lookup('env', 'gce_machine_master_type') | default(lookup('env', 'gce_machine_type'), true) }}" - gce_machine_image: "{{ lookup('env', 'gce_machine_master_image') | default(lookup('env', 'gce_machine_image'), true) }}" - - - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml - vars: - type: "compute" - count: "{{ num_nodes }}" - - include: tasks/launch_instances.yml - vars: - instances: "{{ node_names }}" - cluster: "{{ cluster_id }}" - type: "{{ k8s_type }}" - g_sub_host_type: "{{ sub_host_type }}" - gce_machine_type: "{{ lookup('env', 'gce_machine_node_type') | default(lookup('env', 'gce_machine_type'), true) }}" - gce_machine_image: "{{ lookup('env', 'gce_machine_node_image') | default(lookup('env', 'gce_machine_image'), true) }}" - - - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml - vars: - type: "infra" - count: "{{ num_infra }}" - - include: tasks/launch_instances.yml - vars: - instances: "{{ node_names }}" - cluster: "{{ cluster_id }}" - type: "{{ k8s_type }}" - g_sub_host_type: "{{ sub_host_type }}" - gce_machine_type: "{{ lookup('env', 'gce_machine_node_type') | default(lookup('env', 'gce_machine_type'), true) }}" - gce_machine_image: "{{ lookup('env', 'gce_machine_node_image') | default(lookup('env', 'gce_machine_image'), true) }}" - - - add_host: - name: "{{ master_names.0 }}" - groups: service_master - when: master_names is defined and master_names.0 is defined - -- include: update.yml - -- include: list.yml diff --git a/playbooks/gce/openshift-cluster/list.yml b/playbooks/gce/openshift-cluster/list.yml deleted file mode 100644 index 34ab09533..000000000 --- a/playbooks/gce/openshift-cluster/list.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- -- name: Generate oo_list_hosts group - hosts: localhost - connection: local - become: no - gather_facts: no - vars_files: - - vars.yml - tasks: - - set_fact: scratch_group=tag_clusterid-{{ cluster_id }} - when: cluster_id != '' - - set_fact: scratch_group=all - when: cluster_id == '' - - add_host: - name: "{{ item }}" - groups: oo_list_hosts - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_become: "{{ deployment_vars[deployment_type].become }}" - oo_public_ipv4: "{{ hostvars[item].gce_public_ip }}" - oo_private_ipv4: "{{ hostvars[item].gce_private_ip }}" - with_items: "{{ groups[scratch_group] | default([], true) | difference(['localhost']) | difference(groups.status_terminated | default([], true)) }}" - - debug: - msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster }}" diff --git a/playbooks/gce/openshift-cluster/service.yml b/playbooks/gce/openshift-cluster/service.yml deleted file mode 100644 index 13b267976..000000000 --- a/playbooks/gce/openshift-cluster/service.yml +++ /dev/null @@ -1,29 +0,0 @@ ---- -- name: Call same systemctl command for openshift on all instance(s) - hosts: localhost - connection: local - become: no - gather_facts: no - vars_files: - - vars.yml - - cluster_hosts.yml - tasks: - - fail: msg="cluster_id is required to be injected in this playbook" - when: cluster_id is not defined - - - add_host: - name: "{{ item }}" - groups: g_service_nodes - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_become: "{{ deployment_vars[deployment_type].become }}" - with_items: "{{ node_hosts | default([]) | difference(['localhost']) | difference(groups.status_terminated) }}" - - - add_host: - name: "{{ item }}" - groups: g_service_masters - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_become: "{{ deployment_vars[deployment_type].become }}" - with_items: "{{ master_hosts | default([]) | difference(['localhost']) | difference(groups.status_terminated) }}" - -- include: ../../common/openshift-node/service.yml -- include: ../../common/openshift-master/service.yml diff --git a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml deleted file mode 100644 index 65dd2b71e..000000000 --- a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml +++ /dev/null @@ -1,65 +0,0 @@ ---- -- name: Launch instance(s) - gce: - instance_names: "{{ instances|join(',') }}" - machine_type: "{{ gce_machine_type | default(deployment_vars[deployment_type].machine_type, true) }}" - image: "{{ gce_machine_image | default(deployment_vars[deployment_type].image, true) }}" - service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}" - pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}" - project_id: "{{ lookup('env', 'gce_project_id') }}" - zone: "{{ lookup('env', 'zone') }}" - network: "{{ lookup('env', 'network') }}" - subnetwork: "{{ lookup('env', 'subnetwork') | default(omit, True) }}" - # unsupported in 1.9.+ - #service_account_permissions: "datastore,logging-write" - tags: - - created-by-{{ lookup('env', 'LOGNAME') | regex_replace('[^a-z0-9]+', '') | default(cluster, true) }} - - environment-{{ cluster_env }} - - clusterid-{{ cluster_id }} - - host-type-{{ type }} - - sub-host-type-{{ g_sub_host_type }} - metadata: - startup-script: | - #!/bin/bash - echo "Defaults:{{ deployment_vars[deployment_type].ssh_user }} !requiretty" > /etc/sudoers.d/99-{{ deployment_vars[deployment_type].ssh_user }} - - when: instances |length > 0 - register: gce - -- set_fact: - node_label: - # There doesn't seem to be a way to get the region directly, so parse it out of the zone. - region: "{{ gce.zone | regex_replace('^(.*)-.*$', '\\\\1') }}" - type: "{{ g_sub_host_type }}" - when: instances |length > 0 and type == "node" - -- set_fact: - node_label: - # There doesn't seem to be a way to get the region directly, so parse it out of the zone. - region: "{{ gce.zone | regex_replace('^(.*)-.*$', '\\\\1') }}" - type: "{{ type }}" - when: instances |length > 0 and type != "node" - -- name: Add new instances to groups and set variables needed - add_host: - hostname: "{{ item.name }}" - ansible_ssh_host: "{{ item.public_ip }}" - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_become: "{{ deployment_vars[deployment_type].become }}" - groups: "{{ item.tags | oo_prepend_strings_in_list('tag_') | join(',') }}" - gce_public_ip: "{{ item.public_ip }}" - gce_private_ip: "{{ item.private_ip }}" - openshift_node_labels: "{{ node_label }}" - with_items: "{{ gce.instance_data | default([], true) }}" - -- name: Wait for ssh - wait_for: port=22 host={{ item.public_ip }} - with_items: "{{ gce.instance_data | default([], true) }}" - -- name: Wait for user setup - command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ hostvars[item.name].ansible_ssh_user }}@{{ item.public_ip }} echo {{ hostvars[item.name].ansible_ssh_user }} user is setup" - register: result - until: result.rc == 0 - retries: 30 - delay: 5 - with_items: "{{ gce.instance_data | default([], true) }}" diff --git a/playbooks/gce/openshift-cluster/terminate.yml b/playbooks/gce/openshift-cluster/terminate.yml deleted file mode 100644 index afe269b7c..000000000 --- a/playbooks/gce/openshift-cluster/terminate.yml +++ /dev/null @@ -1,58 +0,0 @@ ---- -- name: Terminate instance(s) - hosts: localhost - connection: local - become: no - gather_facts: no - vars_files: - - vars.yml - tasks: - - add_host: - name: "{{ item }}" - groups: oo_hosts_to_terminate - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_become: "{{ deployment_vars[deployment_type].become }}" - with_items: "{{ (groups['tag_clusterid-' ~ cluster_id] | default([])) | difference(['localhost']) }}" - -- name: Unsubscribe VMs - hosts: oo_hosts_to_terminate - vars_files: - - vars.yml - roles: - - role: rhel_unsubscribe - when: deployment_type in ['atomic-enterprise', 'enterprise', 'openshift-enterprise'] and - ansible_distribution == "RedHat" and - lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) | - default('no', True) | lower in ['no', 'false'] - -- name: Terminate instances(s) - hosts: localhost - become: no - connection: local - gather_facts: no - vars_files: - - vars.yml - tasks: - - name: Terminate instances that were previously launched - local_action: - module: gce - state: 'absent' - name: "{{ item }}" - service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}" - pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}" - project_id: "{{ lookup('env', 'gce_project_id') }}" - zone: "{{ lookup('env', 'zone') }}" - with_items: "{{ groups['oo_hosts_to_terminate'] | default([], true) }}" - when: item is defined - -#- include: ../openshift-node/terminate.yml -# vars: -# gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}" -# gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}" -# gce_project_id: "{{ lookup('env', 'gce_project_id') }}" -# -#- include: ../openshift-master/terminate.yml -# vars: -# gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}" -# gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}" -# gce_project_id: "{{ lookup('env', 'gce_project_id') }}" diff --git a/playbooks/gce/openshift-cluster/update.yml b/playbooks/gce/openshift-cluster/update.yml deleted file mode 100644 index 6d2af3d26..000000000 --- a/playbooks/gce/openshift-cluster/update.yml +++ /dev/null @@ -1,34 +0,0 @@ ---- -- hosts: localhost - gather_facts: no - tasks: - - include_vars: vars.yml - - include_vars: cluster_hosts.yml - - add_host: - name: "{{ item }}" - groups: l_oo_all_hosts - with_items: "{{ g_all_hosts }}" - -- hosts: l_oo_all_hosts - gather_facts: no - tasks: - - include_vars: vars.yml - - include_vars: cluster_hosts.yml - -- name: Populate oo_hosts_to_update group - hosts: localhost - connection: local - become: no - gather_facts: no - tasks: - - name: Evaluate oo_hosts_to_update - add_host: - name: "{{ item }}" - groups: oo_hosts_to_update - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_become: "{{ deployment_vars[deployment_type].become }}" - with_items: "{{ g_all_hosts | default([]) }}" - -- include: ../../common/openshift-cluster/update_repos_and_packages.yml - -- include: config.yml diff --git a/playbooks/gce/openshift-cluster/vars.yml b/playbooks/gce/openshift-cluster/vars.yml deleted file mode 100644 index 13c754c1e..000000000 --- a/playbooks/gce/openshift-cluster/vars.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- -debug_level: 2 - -deployment_rhel7_ent_base: - image: "{{ lookup('oo_option', 'image_name') | default('rhel-7', True) }}" - machine_type: "{{ lookup('oo_option', 'machine_type') | default('n1-standard-1', True) }}" - ssh_user: "{{ lookup('env', 'gce_ssh_user') | default(ansible_ssh_user, true) }}" - become: yes - -deployment_vars: - origin: - image: "{{ lookup('oo_option', 'image_name') | default('centos-7', True) }}" - machine_type: "{{ lookup('oo_option', 'machine_type') | default('n1-standard-1', True) }}" - ssh_user: "{{ lookup('env', 'gce_ssh_user') | default(ansible_ssh_user, true) }}" - become: yes - enterprise: "{{ deployment_rhel7_ent_base }}" - openshift-enterprise: "{{ deployment_rhel7_ent_base }}" - atomic-enterprise: "{{ deployment_rhel7_ent_base }}" diff --git a/playbooks/gcp/openshift-cluster/provision.yml b/playbooks/gcp/openshift-cluster/provision.yml new file mode 100644 index 000000000..a3d1d46a6 --- /dev/null +++ b/playbooks/gcp/openshift-cluster/provision.yml @@ -0,0 +1,19 @@ +--- +- name: Ensure all cloud resources necessary for the cluster, including instances, have been started + hosts: localhost + connection: local + gather_facts: no + tasks: + + - name: provision a GCP cluster in the specified project + include_role: + name: openshift_gcp + +- name: normalize groups + include: ../../byo/openshift-cluster/initialize_groups.yml + +- name: run the std_include + include: ../../common/openshift-cluster/std_include.yml + +- name: run the config + include: ../../common/openshift-cluster/config.yml diff --git a/playbooks/libvirt/README.md b/playbooks/libvirt/README.md deleted file mode 100644 index 3ce46a76f..000000000 --- a/playbooks/libvirt/README.md +++ /dev/null @@ -1,4 +0,0 @@ -# libvirt playbooks - -This playbook directory is meant to be driven by [`bin/cluster`](../../bin), -which is community supported and most use is considered deprecated. diff --git a/playbooks/libvirt/openshift-cluster/cluster_hosts.yml b/playbooks/libvirt/openshift-cluster/cluster_hosts.yml deleted file mode 100644 index 05a58db73..000000000 --- a/playbooks/libvirt/openshift-cluster/cluster_hosts.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- -g_all_hosts: "{{ groups['tag_clusterid-' ~ cluster_id] | default([]) - | intersect(groups['tag_environment-' ~ cluster_env] | default([])) }}" - -g_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-etcd'] | default([])) }}" - -g_lb_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-lb'] | default([])) }}" - -g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-nfs'] | default([])) }}" - -g_glusterfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-glusterfs'] | default([])) }}" - -g_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-master'] | default([])) }}" - -g_new_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new-master'] | default([])) }}" - -g_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-node'] | default([])) }}" - -g_new_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new-node'] | default([])) }}" - -g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-infra'] | default([])) }}" - -g_compute_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-compute'] | default([])) }}" diff --git a/playbooks/libvirt/openshift-cluster/config.yml b/playbooks/libvirt/openshift-cluster/config.yml deleted file mode 100644 index f782d6dab..000000000 --- a/playbooks/libvirt/openshift-cluster/config.yml +++ /dev/null @@ -1,38 +0,0 @@ ---- -# TODO: need to figure out a plan for setting hostname, currently the default -# is localhost, so no hostname value (or public_hostname) value is getting -# assigned - -- hosts: localhost - gather_facts: no - tasks: - - include_vars: vars.yml - - include_vars: cluster_hosts.yml - - add_host: - name: "{{ item }}" - groups: l_oo_all_hosts - with_items: "{{ g_all_hosts | default([]) }}" - -- hosts: l_oo_all_hosts - gather_facts: no - tasks: - - include_vars: vars.yml - - include_vars: cluster_hosts.yml - -- include: ../../common/openshift-cluster/config.yml - vars: - g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - g_sudo: "{{ deployment_vars[deployment_type].become }}" - g_nodeonmaster: true - openshift_cluster_id: "{{ cluster_id }}" - openshift_debug_level: "{{ debug_level }}" - openshift_deployment_type: "{{ deployment_type }}" - openshift_hosted_registry_selector: 'type=infra' - openshift_hosted_router_selector: 'type=infra' - openshift_master_cluster_method: 'native' - openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}" - os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}" - openshift_use_flannel: "{{ lookup('oo_option', 'use_flannel') }}" - openshift_use_calico: "{{ lookup('oo_option', 'use_calico') }}" - openshift_use_fluentd: "{{ lookup('oo_option', 'use_fluentd') }}" - openshift_use_dnsmasq: false diff --git a/playbooks/libvirt/openshift-cluster/launch.yml b/playbooks/libvirt/openshift-cluster/launch.yml deleted file mode 100644 index 2475b9d6b..000000000 --- a/playbooks/libvirt/openshift-cluster/launch.yml +++ /dev/null @@ -1,57 +0,0 @@ ---- -- name: Launch instance(s) - hosts: localhost - become: no - connection: local - gather_facts: no - vars_files: - - vars.yml - vars: - image_url: "{{ deployment_vars[deployment_type].image.url }}" - image_sha256: "{{ deployment_vars[deployment_type].image.sha256 }}" - image_name: "{{ deployment_vars[deployment_type].image.name }}" - image_compression: "{{ deployment_vars[deployment_type].image.compression }}" - tasks: - - include: tasks/configure_libvirt.yml - - - include: ../../common/openshift-cluster/tasks/set_etcd_launch_facts.yml - - include: tasks/launch_instances.yml - vars: - instances: "{{ etcd_names }}" - cluster: "{{ cluster_id }}" - type: "{{ k8s_type }}" - g_sub_host_type: "default" - - - include: ../../common/openshift-cluster/tasks/set_master_launch_facts.yml - - include: tasks/launch_instances.yml - vars: - instances: "{{ master_names }}" - cluster: "{{ cluster_id }}" - type: "{{ k8s_type }}" - g_sub_host_type: "default" - - - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml - vars: - type: "compute" - count: "{{ num_nodes }}" - - include: tasks/launch_instances.yml - vars: - instances: "{{ node_names }}" - cluster: "{{ cluster_id }}" - type: "{{ k8s_type }}" - g_sub_host_type: "{{ sub_host_type }}" - - - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml - vars: - type: "infra" - count: "{{ num_infra }}" - - include: tasks/launch_instances.yml - vars: - instances: "{{ node_names }}" - cluster: "{{ cluster_id }}" - type: "{{ k8s_type }}" - g_sub_host_type: "{{ sub_host_type }}" - -- include: update.yml - -- include: list.yml diff --git a/playbooks/libvirt/openshift-cluster/list.yml b/playbooks/libvirt/openshift-cluster/list.yml deleted file mode 100644 index 579cd7ac6..000000000 --- a/playbooks/libvirt/openshift-cluster/list.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- -- name: Generate oo_list_hosts group - hosts: localhost - become: no - connection: local - gather_facts: no - vars_files: - - vars.yml - tasks: - - set_fact: scratch_group=tag_clusterid-{{ cluster_id }} - when: cluster_id != '' - - set_fact: scratch_group=all - when: cluster_id == '' - - add_host: - name: "{{ item }}" - groups: oo_list_hosts - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_become: "{{ deployment_vars[deployment_type].become }}" - oo_public_ipv4: "" - oo_private_ipv4: "{{ hostvars[item].libvirt_ip_address }}" - with_items: "{{ groups[scratch_group] | default([]) | difference(['localhost']) }}" - - debug: - msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster }}" diff --git a/playbooks/libvirt/openshift-cluster/lookup_plugins b/playbooks/libvirt/openshift-cluster/lookup_plugins deleted file mode 120000 index ac79701db..000000000 --- a/playbooks/libvirt/openshift-cluster/lookup_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../lookup_plugins
\ No newline at end of file diff --git a/playbooks/libvirt/openshift-cluster/service.yml b/playbooks/libvirt/openshift-cluster/service.yml deleted file mode 100644 index 8bd24a8cf..000000000 --- a/playbooks/libvirt/openshift-cluster/service.yml +++ /dev/null @@ -1,34 +0,0 @@ ---- -# TODO: need to figure out a plan for setting hostname, currently the default -# is localhost, so no hostname value (or public_hostname) value is getting -# assigned - -- name: Call same systemctl command for openshift on all instance(s) - hosts: localhost - become: no - connection: local - gather_facts: no - vars_files: - - vars.yml - tasks: - - fail: msg="cluster_id is required to be injected in this playbook" - when: cluster_id is not defined - - - name: Evaluate g_service_masters - add_host: - name: "{{ item }}" - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_become: "{{ deployment_vars[deployment_type].become }}" - groups: g_service_masters - with_items: "{{ g_master_hosts | default([]) }}" - - - name: Evaluate g_service_nodes - add_host: - name: "{{ item }}" - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_become: "{{ deployment_vars[deployment_type].become }}" - groups: g_service_nodes - with_items: "{{ g_node_hosts | default([]) }}" - -- include: ../../common/openshift-node/service.yml -- include: ../../common/openshift-master/service.yml diff --git a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt.yml b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt.yml deleted file mode 100644 index f237c1a60..000000000 --- a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -- include: configure_libvirt_storage_pool.yml - when: libvirt_storage_pool is defined and libvirt_storage_pool_path is defined - -- include: configure_libvirt_network.yml - when: libvirt_network is defined diff --git a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml deleted file mode 100644 index b42ca83af..000000000 --- a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -- name: Create the libvirt network for OpenShift - virt_net: - name: '{{ libvirt_network }}' - state: '{{ item }}' - autostart: 'yes' - xml: "{{ lookup('template', 'network.xml') }}" - uri: '{{ libvirt_uri }}' - with_items: - - present - - active diff --git a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml deleted file mode 100644 index 8685624ec..000000000 --- a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml +++ /dev/null @@ -1,30 +0,0 @@ ---- -- name: Create libvirt storage directory for openshift - file: - dest: "{{ libvirt_storage_pool_path }}" - state: directory - -# We need to set permissions on the directory and any items created under the directory, so we need to call the acl module with and without default set. -- acl: - default: '{{ item.default }}' - entity: kvm - etype: group - name: "{{ libvirt_storage_pool_path }}" - permissions: '{{ item.permissions }}' - state: present - with_items: - - default: no - permissions: x - - default: yes - permissions: rwx - -- name: Create the libvirt storage pool for OpenShift - virt_pool: - name: '{{ libvirt_storage_pool }}' - state: '{{ item }}' - autostart: 'yes' - xml: "{{ lookup('template', 'storage-pool.xml') }}" - uri: '{{ libvirt_uri }}' - with_items: - - present - - active diff --git a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml deleted file mode 100644 index ccd29be29..000000000 --- a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml +++ /dev/null @@ -1,138 +0,0 @@ ---- -# TODO: Add support for choosing base image based on deployment_type and os -# wanted (os wanted needs support added in bin/cluster with sane defaults: -# fedora/centos for origin, rhel for enterprise) - -# TODO: create a role to encapsulate some of this complexity, possibly also -# create a module to manage the storage tasks, network tasks, and possibly -# even handle the libvirt tasks to set metadata in the domain xml and be able -# to create/query data about vms without having to use xml the python libvirt -# bindings look like a good candidate for this - -- name: Download Base Cloud image - get_url: - url: '{{ image_url }}' - sha256sum: '{{ image_sha256 }}' - dest: '{{ libvirt_storage_pool_path }}/{{ [image_name, image_compression] | difference([""]) | join(".") }}' - when: ( lookup("oo_option", "skip_image_download") | default("no", True) | lower ) in ["false", "no"] - register: downloaded_image - -- name: Uncompress xz compressed base cloud image - command: 'unxz -kf {{ libvirt_storage_pool_path }}/{{ [image_name, image_compression] | join(".") }}' - args: - creates: '{{ libvirt_storage_pool_path }}/{{ image_name }}' - when: image_compression in ["xz"] and downloaded_image.changed - -- name: Uncompress tgz compressed base cloud image - command: 'tar zxvf {{ libvirt_storage_pool_path }}/{{ [image_name, image_compression] | join(".") }}' - args: - creates: '{{ libvirt_storage_pool_path }}/{{ image_name }}' - when: image_compression in ["tgz"] and downloaded_image.changed - -- name: Uncompress gzip compressed base cloud image - command: 'gunzip {{ libvirt_storage_pool_path }}/{{ [image_name, image_compression] | join(".") }}' - args: - creates: '{{ libvirt_storage_pool_path }}/{{ image_name }}' - when: image_compression in ["gz"] and downloaded_image.changed - -- name: Create the cloud-init config drive path - file: - dest: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/' - state: directory - with_items: '{{ instances }}' - -- name: Create the cloud-init config drive files - template: - src: '{{ item[1] }}' - dest: '{{ libvirt_storage_pool_path }}/{{ item[0] }}_configdrive/{{ item[1] }}' - with_nested: - - '{{ instances }}' - - [ user-data, meta-data ] - -- name: Create the cloud-init config drive - command: 'genisoimage -output {{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso -volid cidata -joliet -rock user-data meta-data' - args: - chdir: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/' - creates: '{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso' - with_items: '{{ instances }}' - -- name: Refresh the libvirt storage pool for openshift - command: 'virsh -c {{ libvirt_uri }} pool-refresh {{ libvirt_storage_pool }}' - -- name: Create VM drives - command: 'virsh -c {{ libvirt_uri }} vol-create-as {{ libvirt_storage_pool }} {{ item }}.qcow2 10G --format qcow2 --backing-vol {{ image_name }} --backing-vol-format qcow2' - with_items: '{{ instances }}' - -- name: Create VM docker drives - command: 'virsh -c {{ libvirt_uri }} vol-create-as {{ libvirt_storage_pool }} {{ item }}-docker.qcow2 10G --format qcow2 --allocation 0' - with_items: '{{ instances }}' - -- name: Create VMs - virt: - name: '{{ item }}' - command: define - xml: "{{ lookup('template', '../templates/domain.xml') }}" - uri: '{{ libvirt_uri }}' - with_items: '{{ instances }}' - -- name: Start VMs - virt: - name: '{{ item }}' - state: running - uri: '{{ libvirt_uri }}' - with_items: '{{ instances }}' - -- name: Wait for the VMs to get an IP - shell: 'virsh -c {{ libvirt_uri }} net-dhcp-leases {{ libvirt_network }} | egrep -c ''{{ instances | join("|") }}''' - register: nb_allocated_ips - until: nb_allocated_ips.stdout == '{{ instances | length }}' - retries: 60 - delay: 3 - when: instances | length != 0 - -- name: Collect IP addresses of the VMs - shell: 'virsh -c {{ libvirt_uri }} net-dhcp-leases {{ libvirt_network }} | awk ''$6 == "{{ item }}" {gsub(/\/.*/, "", $5); print $5}''' - register: scratch_ip - with_items: '{{ instances }}' - -- set_fact: - ips: "{{ scratch_ip.results | default([]) | oo_collect('stdout') }}" - -- set_fact: - node_label: - type: "{{ g_sub_host_type }}" - when: instances | length > 0 and type == "node" - -- set_fact: - node_label: - type: "{{ type }}" - when: instances | length > 0 and type != "node" - -- name: Add new instances - add_host: - hostname: '{{ item.0 }}' - ansible_ssh_host: '{{ item.1 }}' - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_become: "{{ deployment_vars[deployment_type].become }}" - groups: "tag_environment-{{ cluster_env }}, tag_host-type-{{ type }}, tag_sub-host-type-{{ g_sub_host_type }}, tag_clusterid-{{ cluster_id }}" - openshift_node_labels: "{{ node_label }}" - libvirt_ip_address: "{{ item.1 }}" - with_together: - - '{{ instances }}' - - '{{ ips }}' - -- name: Wait for ssh - wait_for: - host: '{{ item }}' - port: 22 - with_items: '{{ ips }}' - -- name: Wait for openshift user setup - command: 'ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null openshift@{{ item.1 }} echo openshift user is setup' - register: result - until: result.rc == 0 - retries: 30 - delay: 1 - with_together: - - '{{ instances }}' - - '{{ ips }}' diff --git a/playbooks/libvirt/openshift-cluster/templates/domain.xml b/playbooks/libvirt/openshift-cluster/templates/domain.xml deleted file mode 100644 index 88504a5f6..000000000 --- a/playbooks/libvirt/openshift-cluster/templates/domain.xml +++ /dev/null @@ -1,65 +0,0 @@ -<domain type='kvm' id='8'> - <name>{{ item }}</name> - <memory unit='MiB'>{{ libvirt_instance_memory_mib }}</memory> - <metadata xmlns:ansible="https://github.com/ansible/ansible"> - <ansible:tags> - <ansible:tag>environment-{{ cluster_env }}</ansible:tag> - <ansible:tag>clusterid-{{ cluster }}</ansible:tag> - <ansible:tag>host-type-{{ type }}</ansible:tag> - <ansible:tag>sub-host-type-{{ g_sub_host_type }}</ansible:tag> - </ansible:tags> - </metadata> - <vcpu placement='static'>{{ libvirt_instance_vcpu }}</vcpu> - <os> - <type arch='x86_64' machine='pc'>hvm</type> - <boot dev='hd'/> - </os> - <features> - <acpi/> - <apic/> - <pae/> - </features> - <cpu mode='host-model'> - <model fallback='allow'/> - </cpu> - <clock offset='utc'> - <timer name='rtc' tickpolicy='catchup'/> - <timer name='pit' tickpolicy='delay'/> - <timer name='hpet' present='no'/> - </clock> - <on_poweroff>destroy</on_poweroff> - <on_reboot>restart</on_reboot> - <on_crash>restart</on_crash> - <devices> - <emulator>/usr/bin/qemu-system-x86_64</emulator> - <disk type='file' device='disk'> - <driver name='qemu' type='qcow2' discard='unmap'/> - <source file='{{ libvirt_storage_pool_path }}/{{ item }}.qcow2'/> - <target dev='sda' bus='scsi'/> - </disk> - <disk type='file' device='disk'> - <driver name='qemu' type='qcow2' discard='unmap'/> - <source file='{{ libvirt_storage_pool_path }}/{{ item }}-docker.qcow2'/> - <target dev='sdb' bus='scsi'/> - </disk> - <disk type='file' device='cdrom'> - <driver name='qemu' type='raw'/> - <source file='{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'/> - <target dev='sdc' bus='scsi'/> - <readonly/> - </disk> - <controller type='scsi' model='virtio-scsi' /> - <interface type='network'> - <source network='{{ libvirt_network }}'/> - <model type='virtio'/> - </interface> - <serial type='pty'> - <target port='0'/> - </serial> - <console type='pty'> - <target type='serial' port='0'/> - </console> - <memballoon model='virtio'> - </memballoon> - </devices> -</domain> diff --git a/playbooks/libvirt/openshift-cluster/templates/meta-data b/playbooks/libvirt/openshift-cluster/templates/meta-data deleted file mode 100644 index 6b421770d..000000000 --- a/playbooks/libvirt/openshift-cluster/templates/meta-data +++ /dev/null @@ -1,3 +0,0 @@ -instance-id: {{ item[0] }} -hostname: {{ item[0] }} -local-hostname: {{ item[0] }}.example.com diff --git a/playbooks/libvirt/openshift-cluster/templates/network.xml b/playbooks/libvirt/openshift-cluster/templates/network.xml deleted file mode 100644 index 0ce2a8342..000000000 --- a/playbooks/libvirt/openshift-cluster/templates/network.xml +++ /dev/null @@ -1,23 +0,0 @@ -<network> - <name>{{ libvirt_network }}</name> - <forward mode='nat'> - <nat> - <port start='1024' end='65535'/> - </nat> - </forward> - <!-- TODO: query for first available virbr interface available --> - <bridge name='virbr3' stp='on' delay='0'/> - <!-- TODO: make overridable --> - <domain name='example.com' localOnly='yes' /> - <dns> - <!-- TODO: automatically add host entries --> - </dns> - <!-- TODO: query for available address space --> - <ip address='192.168.55.1' netmask='255.255.255.0'> - <dhcp> - <range start='192.168.55.2' end='192.168.55.254'/> - <!-- TODO: add static entries addresses for the hosts to be created --> - </dhcp> - </ip> -</network> - diff --git a/playbooks/libvirt/openshift-cluster/templates/storage-pool.xml b/playbooks/libvirt/openshift-cluster/templates/storage-pool.xml deleted file mode 100644 index da139afd0..000000000 --- a/playbooks/libvirt/openshift-cluster/templates/storage-pool.xml +++ /dev/null @@ -1,6 +0,0 @@ -<pool type='dir'> - <name>{{ libvirt_storage_pool }}</name> - <target> - <path>{{ libvirt_storage_pool_path }}</path> - </target> -</pool> diff --git a/playbooks/libvirt/openshift-cluster/templates/user-data b/playbooks/libvirt/openshift-cluster/templates/user-data deleted file mode 100644 index fbcf7c886..000000000 --- a/playbooks/libvirt/openshift-cluster/templates/user-data +++ /dev/null @@ -1,43 +0,0 @@ -#cloud-config -disable_root: true - -hostname: {{ item[0] }} -fqdn: {{ item[0] }}.example.com - -mounts: -- [ sdb ] - -users: - - default - - name: root - ssh_authorized_keys: - - {{ lookup('file', '~/.ssh/id_rsa.pub') }} - -system_info: - default_user: - name: openshift - sudo: ["ALL=(ALL) NOPASSWD: ALL"] - -ssh_authorized_keys: - - {{ lookup('file', '~/.ssh/id_rsa.pub') }} - -write_files: - - path: /etc/sudoers.d/00-openshift-no-requiretty - permissions: 440 - content: | - Defaults:openshift !requiretty - - path: /etc/sysconfig/docker-storage-setup - owner: root:root - permissions: '0644' - content: | - DEVS=/dev/sdb - VG=docker_vg - EXTRA_DOCKER_STORAGE_OPTIONS='--storage-opt dm.blkdiscard=true' - - path: /etc/systemd/system/fstrim.timer.d/hourly.conf - content: | - [Timer] - OnCalendar=hourly - -runcmd: - - NETWORK_CONFIG=/etc/sysconfig/network-scripts/ifcfg-eth0; if ! grep DHCP_HOSTNAME ${NETWORK_CONFIG}; then echo 'DHCP_HOSTNAME="{{ item[0] }}.example.com"' >> ${NETWORK_CONFIG}; fi; pkill -9 dhclient; service network restart - - systemctl enable --now fstrim.timer diff --git a/playbooks/libvirt/openshift-cluster/terminate.yml b/playbooks/libvirt/openshift-cluster/terminate.yml deleted file mode 100644 index 8a63d11a5..000000000 --- a/playbooks/libvirt/openshift-cluster/terminate.yml +++ /dev/null @@ -1,70 +0,0 @@ ---- -# TODO: does not handle a non-existent cluster gracefully - -- name: Terminate instance(s) - hosts: localhost - become: no - connection: local - gather_facts: no - vars_files: - - vars.yml - tasks: - - set_fact: cluster_group=tag_clusterid-{{ cluster_id }} - - add_host: - name: "{{ item }}" - groups: oo_hosts_to_terminate - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_become: "{{ deployment_vars[deployment_type].become }}" - with_items: '{{ groups[cluster_group] | default([]) }}' - -- name: Unsubscribe VMs - hosts: oo_hosts_to_terminate - vars_files: - - vars.yml - roles: - - role: rhel_unsubscribe - when: deployment_type in ['atomic-enterprise', 'enterprise', 'openshift-enterprise'] and - ansible_distribution == "RedHat" and - lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) | - default('no', True) | lower in ['no', 'false'] - -- name: Terminate instance(s) - hosts: localhost - become: no - connection: local - gather_facts: no - vars_files: - - vars.yml - tasks: - - name: Destroy VMs - virt: - name: '{{ item[0] }}' - command: '{{ item[1] }}' - uri: '{{ libvirt_uri }}' - with_nested: - - "{{ groups['oo_hosts_to_terminate'] }}" - - [ destroy, undefine ] - - - name: Delete VM drives - command: 'virsh -c {{ libvirt_uri }} vol-delete --pool {{ libvirt_storage_pool }} {{ item }}.qcow2' - args: - removes: '{{ libvirt_storage_pool_path }}/{{ item }}.qcow2' - with_items: "{{ groups['oo_hosts_to_terminate'] }}" - - - name: Delete VM docker drives - command: 'virsh -c {{ libvirt_uri }} vol-delete --pool {{ libvirt_storage_pool }} {{ item }}-docker.qcow2' - args: - removes: '{{ libvirt_storage_pool_path }}/{{ item }}-docker.qcow2' - with_items: "{{ groups['oo_hosts_to_terminate'] }}" - - - name: Delete the VM cloud-init image - file: - path: '{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso' - state: absent - with_items: "{{ groups['oo_hosts_to_terminate'] }}" - - - name: Remove the cloud-init config directory - file: - path: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/' - state: absent - with_items: "{{ groups['oo_hosts_to_terminate'] }}" diff --git a/playbooks/libvirt/openshift-cluster/update.yml b/playbooks/libvirt/openshift-cluster/update.yml deleted file mode 100644 index a152135fc..000000000 --- a/playbooks/libvirt/openshift-cluster/update.yml +++ /dev/null @@ -1,37 +0,0 @@ ---- -- hosts: localhost - gather_facts: no - tasks: - - include_vars: vars.yml - - include_vars: cluster_hosts.yml - - add_host: - name: "{{ item }}" - groups: l_oo_all_hosts - with_items: '{{ g_all_hosts }}' - -- hosts: l_oo_all_hosts - gather_facts: no - tasks: - - include_vars: vars.yml - - include_vars: cluster_hosts.yml - -- name: Populate oo_hosts_to_update group - hosts: localhost - connection: local - become: no - gather_facts: no - vars_files: - - vars.yml - - cluster_hosts.yml - tasks: - - name: Evaluate oo_hosts_to_update - add_host: - name: "{{ item }}" - groups: oo_hosts_to_update - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_become: "{{ deployment_vars[deployment_type].become }}" - with_items: '{{ g_all_hosts | default([]) }}' - -- include: ../../common/openshift-cluster/update_repos_and_packages.yml - -- include: config.yml diff --git a/playbooks/libvirt/openshift-cluster/vars.yml b/playbooks/libvirt/openshift-cluster/vars.yml deleted file mode 100644 index 5156789e7..000000000 --- a/playbooks/libvirt/openshift-cluster/vars.yml +++ /dev/null @@ -1,40 +0,0 @@ ---- -default_pool_path: "{{ lookup('env','HOME') }}/libvirt-storage-pool-openshift-ansible" -libvirt_storage_pool_path: "{{ lookup('oo_option', 'libvirt_storage_pool_path') | default(default_pool_path, True) }}" -libvirt_storage_pool: "{{ lookup('oo_option', 'libvirt_storage_pool') | default('openshift-ansible', True) }}" -libvirt_network: "{{ lookup('oo_option', 'libvirt_network') | default('openshift-ansible', True) }}" -libvirt_instance_memory_mib: "{{ lookup('oo_option', 'libvirt_instance_memory_mib') | default(1024, True) }}" -libvirt_instance_vcpu: "{{ lookup('oo_option', 'libvirt_instance_vcpu') | default(2, True) }}" -libvirt_uri: "{{ lookup('oo_option', 'libvirt_uri') | default('qemu:///system', True) }}" -debug_level: 2 - -# Automatic download of the qcow2 image for RHEL cannot be done directly from the RedHat portal because it requires authentication. -# The default value of image_url for enterprise and openshift-enterprise deployment types below won't work. -deployment_rhel7_ent_base: - image: - url: "{{ lookup('oo_option', 'image_url') | - default('https://access.cdn.redhat.com//content/origin/files/sha256/25/25f880767ec6bf71beb532e17f1c45231640bbfdfbbb1dffb79d2c1b328388e0/rhel-guest-image-7.2-20151102.0.x86_64.qcow2', True) }}" - name: "{{ lookup('oo_option', 'image_name') | - default('rhel-guest-image-7.2-20151102.0.x86_64.qcow2', True) }}" - sha256: "{{ lookup('oo_option', 'image_sha256') | - default('25f880767ec6bf71beb532e17f1c45231640bbfdfbbb1dffb79d2c1b328388e0', True) }}" - compression: "" - ssh_user: openshift - become: yes - -deployment_vars: - origin: - image: - url: "{{ lookup('oo_option', 'image_url') | - default('http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-1602.qcow2.xz', True) }}" - compression: "{{ lookup('oo_option', 'image_compression') | - default('xz', True) }}" - name: "{{ lookup('oo_option', 'image_name') | - default('CentOS-7-x86_64-GenericCloud.qcow2', True) }}" - sha256: "{{ lookup('oo_option', 'image_sha256') | - default('dd0f5e610e7c5ffacaca35ed7a78a19142a588f4543da77b61c1fb0d74400471', True) }}" - ssh_user: openshift - become: yes - enterprise: "{{ deployment_rhel7_ent_base }}" - openshift-enterprise: "{{ deployment_rhel7_ent_base }}" - atomic-enterprise: "{{ deployment_rhel7_ent_base }}" diff --git a/playbooks/openstack/README.md b/playbooks/openstack/README.md deleted file mode 100644 index a6d8d6995..000000000 --- a/playbooks/openstack/README.md +++ /dev/null @@ -1,4 +0,0 @@ -# OpenStack playbooks - -This playbook directory is meant to be driven by [`bin/cluster`](../../bin), -which is community supported and most use is considered deprecated. diff --git a/playbooks/openstack/openshift-cluster/cluster_hosts.yml b/playbooks/openstack/openshift-cluster/cluster_hosts.yml deleted file mode 100644 index 505f7b3a8..000000000 --- a/playbooks/openstack/openshift-cluster/cluster_hosts.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- -g_all_hosts: "{{ groups['meta-clusterid_' ~ cluster_id] | default([]) - | intersect(groups['meta-environment_' ~ cluster_env] | default([])) }}" - -g_etcd_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_etcd'] | default([])) }}" - -g_lb_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_lb'] | default([])) }}" - -g_nfs_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_nfs'] | default([])) }}" - -g_glusterfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-glusterfs'] | default([])) }}" - -g_master_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_master'] | default([])) }}" - -g_new_master_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_new_master'] | default([])) }}" - -g_node_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_node'] | default([])) }}" - -g_new_node_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_new_node'] | default([])) }}" - -g_infra_hosts: "{{ g_node_hosts | intersect(groups['meta-sub-host-type_infra'] | default([])) }}" - -g_compute_hosts: "{{ g_node_hosts | intersect(groups['meta-sub-host-type_compute'] | default([])) }}" diff --git a/playbooks/openstack/openshift-cluster/config.yml b/playbooks/openstack/openshift-cluster/config.yml deleted file mode 100644 index f9ddb9469..000000000 --- a/playbooks/openstack/openshift-cluster/config.yml +++ /dev/null @@ -1,33 +0,0 @@ ---- -- hosts: localhost - gather_facts: no - tasks: - - include_vars: vars.yml - - include_vars: cluster_hosts.yml - - add_host: - name: "{{ item }}" - groups: l_oo_all_hosts - with_items: "{{ g_all_hosts | default([]) }}" - -- hosts: l_oo_all_hosts - gather_facts: no - tasks: - - include_vars: vars.yml - - include_vars: cluster_hosts.yml - -- include: ../../common/openshift-cluster/config.yml - vars: - g_nodeonmaster: true - g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - g_sudo: "{{ deployment_vars[deployment_type].become }}" - openshift_cluster_id: "{{ cluster_id }}" - openshift_debug_level: "{{ debug_level }}" - openshift_deployment_type: "{{ deployment_type }}" - openshift_hosted_registry_selector: 'type=infra' - openshift_hosted_router_selector: 'type=infra' - openshift_master_cluster_method: 'native' - openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}" - os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}" - openshift_use_flannel: "{{ lookup('oo_option', 'use_flannel') }}" - openshift_use_calico: "{{ lookup('oo_option', 'use_calico') }}" - openshift_use_fluentd: "{{ lookup('oo_option', 'use_fluentd') }}" diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml deleted file mode 100644 index 82329eac1..000000000 --- a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml +++ /dev/null @@ -1,508 +0,0 @@ -heat_template_version: 2014-10-16 - -description: OpenShift cluster - -parameters: - - cluster_env: - type: string - label: Cluster environment - description: Environment of the cluster - - cluster_id: - type: string - label: Cluster ID - description: Identifier of the cluster - - subnet_24_prefix: - type: string - label: subnet /24 prefix - description: /24 subnet prefix of the network of the cluster (dot separated number triplet) - - dns_nameservers: - type: comma_delimited_list - label: DNS nameservers list - description: List of DNS nameservers - - external_net: - type: string - label: External network - description: Name of the external network - default: external - - ssh_public_key: - type: string - label: SSH public key - description: SSH public key - hidden: true - - ssh_incoming: - type: string - label: Source of ssh connections - description: Source of legitimate ssh connections - default: 0.0.0.0/0 - - node_port_incoming: - type: string - label: Source of node port connections - description: Authorized sources targeting node ports - default: 0.0.0.0/0 - - num_etcd: - type: number - label: Number of etcd nodes - description: Number of etcd nodes - - num_masters: - type: number - label: Number of masters - description: Number of masters - - num_nodes: - type: number - label: Number of compute nodes - description: Number of compute nodes - - num_infra: - type: number - label: Number of infrastructure nodes - description: Number of infrastructure nodes - - etcd_image: - type: string - label: Etcd image - description: Name of the image for the etcd servers - - master_image: - type: string - label: Master image - description: Name of the image for the master servers - - node_image: - type: string - label: Node image - description: Name of the image for the compute node servers - - infra_image: - type: string - label: Infra image - description: Name of the image for the infra node servers - - etcd_flavor: - type: string - label: Etcd flavor - description: Flavor of the etcd servers - - master_flavor: - type: string - label: Master flavor - description: Flavor of the master servers - - node_flavor: - type: string - label: Node flavor - description: Flavor of the compute node servers - - infra_flavor: - type: string - label: Infra flavor - description: Flavor of the infra node servers - -outputs: - - etcd_names: - description: Name of the etcds - value: { get_attr: [ etcd, name ] } - - etcd_ips: - description: IPs of the etcds - value: { get_attr: [ etcd, private_ip ] } - - etcd_floating_ips: - description: Floating IPs of the etcds - value: { get_attr: [ etcd, floating_ip ] } - - master_names: - description: Name of the masters - value: { get_attr: [ masters, name ] } - - master_ips: - description: IPs of the masters - value: { get_attr: [ masters, private_ip ] } - - master_floating_ips: - description: Floating IPs of the masters - value: { get_attr: [ masters, floating_ip ] } - - node_names: - description: Name of the nodes - value: { get_attr: [ compute_nodes, name ] } - - node_ips: - description: IPs of the nodes - value: { get_attr: [ compute_nodes, private_ip ] } - - node_floating_ips: - description: Floating IPs of the nodes - value: { get_attr: [ compute_nodes, floating_ip ] } - - infra_names: - description: Name of the nodes - value: { get_attr: [ infra_nodes, name ] } - - infra_ips: - description: IPs of the nodes - value: { get_attr: [ infra_nodes, private_ip ] } - - infra_floating_ips: - description: Floating IPs of the nodes - value: { get_attr: [ infra_nodes, floating_ip ] } - -resources: - - net: - type: OS::Neutron::Net - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: { get_param: cluster_id } - - subnet: - type: OS::Neutron::Subnet - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-subnet - params: - cluster_id: { get_param: cluster_id } - network: { get_resource: net } - cidr: - str_replace: - template: subnet_24_prefix.0/24 - params: - subnet_24_prefix: { get_param: subnet_24_prefix } - dns_nameservers: { get_param: dns_nameservers } - - router: - type: OS::Neutron::Router - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-router - params: - cluster_id: { get_param: cluster_id } - external_gateway_info: - network: { get_param: external_net } - - interface: - type: OS::Neutron::RouterInterface - properties: - router_id: { get_resource: router } - subnet_id: { get_resource: subnet } - - keypair: - type: OS::Nova::KeyPair - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-keypair - params: - cluster_id: { get_param: cluster_id } - public_key: { get_param: ssh_public_key } - - master-secgrp: - type: OS::Neutron::SecurityGroup - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-master-secgrp - params: - cluster_id: { get_param: cluster_id } - description: - str_replace: - template: Security group for cluster_id OpenShift cluster master - params: - cluster_id: { get_param: cluster_id } - rules: - - direction: ingress - protocol: tcp - port_range_min: 22 - port_range_max: 22 - remote_ip_prefix: { get_param: ssh_incoming } - - direction: ingress - protocol: tcp - port_range_min: 4001 - port_range_max: 4001 - - direction: ingress - protocol: tcp - port_range_min: 8443 - port_range_max: 8443 - - direction: ingress - protocol: tcp - port_range_min: 8444 - port_range_max: 8444 - - direction: ingress - protocol: tcp - port_range_min: 53 - port_range_max: 53 - - direction: ingress - protocol: udp - port_range_min: 53 - port_range_max: 53 - - direction: ingress - protocol: tcp - port_range_min: 8053 - port_range_max: 8053 - - direction: ingress - protocol: udp - port_range_min: 8053 - port_range_max: 8053 - - direction: ingress - protocol: tcp - port_range_min: 24224 - port_range_max: 24224 - - direction: ingress - protocol: udp - port_range_min: 24224 - port_range_max: 24224 - - direction: ingress - protocol: tcp - port_range_min: 2224 - port_range_max: 2224 - - direction: ingress - protocol: udp - port_range_min: 5404 - port_range_max: 5404 - - direction: ingress - protocol: udp - port_range_min: 5405 - port_range_max: 5405 - - direction: ingress - protocol: tcp - port_range_min: 9090 - port_range_max: 9090 - - etcd-secgrp: - type: OS::Neutron::SecurityGroup - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-etcd-secgrp - params: - cluster_id: { get_param: cluster_id } - description: - str_replace: - template: Security group for cluster_id etcd cluster - params: - cluster_id: { get_param: cluster_id } - rules: - - direction: ingress - protocol: tcp - port_range_min: 22 - port_range_max: 22 - remote_ip_prefix: { get_param: ssh_incoming } - - direction: ingress - protocol: tcp - port_range_min: 2379 - port_range_max: 2379 - remote_mode: remote_group_id - remote_group_id: { get_resource: master-secgrp } - - direction: ingress - protocol: tcp - port_range_min: 2380 - port_range_max: 2380 - remote_mode: remote_group_id - - node-secgrp: - type: OS::Neutron::SecurityGroup - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-node-secgrp - params: - cluster_id: { get_param: cluster_id } - description: - str_replace: - template: Security group for cluster_id OpenShift cluster nodes - params: - cluster_id: { get_param: cluster_id } - rules: - - direction: ingress - protocol: tcp - port_range_min: 22 - port_range_max: 22 - remote_ip_prefix: { get_param: ssh_incoming } - - direction: ingress - protocol: tcp - port_range_min: 10250 - port_range_max: 10250 - remote_mode: remote_group_id - - direction: ingress - protocol: udp - port_range_min: 4789 - port_range_max: 4789 - remote_mode: remote_group_id - - direction: ingress - protocol: tcp - port_range_min: 30000 - port_range_max: 32767 - remote_ip_prefix: { get_param: node_port_incoming } - - infra-secgrp: - type: OS::Neutron::SecurityGroup - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-infra-secgrp - params: - cluster_id: { get_param: cluster_id } - description: - str_replace: - template: Security group for cluster_id OpenShift infrastructure cluster nodes - params: - cluster_id: { get_param: cluster_id } - rules: - - direction: ingress - protocol: tcp - port_range_min: 80 - port_range_max: 80 - - direction: ingress - protocol: tcp - port_range_min: 443 - port_range_max: 443 - - etcd: - type: OS::Heat::ResourceGroup - properties: - count: { get_param: num_etcd } - resource_def: - type: heat_stack_server.yaml - properties: - name: - str_replace: - template: cluster_id-k8s_type-%index% - params: - cluster_id: { get_param: cluster_id } - k8s_type: etcd - cluster_env: { get_param: cluster_env } - cluster_id: { get_param: cluster_id } - type: etcd - image: { get_param: etcd_image } - flavor: { get_param: etcd_flavor } - key_name: { get_resource: keypair } - net: { get_resource: net } - subnet: { get_resource: subnet } - secgrp: - - { get_resource: etcd-secgrp } - floating_network: { get_param: external_net } - net_name: - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: { get_param: cluster_id } - depends_on: - - interface - - masters: - type: OS::Heat::ResourceGroup - properties: - count: { get_param: num_masters } - resource_def: - type: heat_stack_server.yaml - properties: - name: - str_replace: - template: cluster_id-k8s_type-%index% - params: - cluster_id: { get_param: cluster_id } - k8s_type: master - cluster_env: { get_param: cluster_env } - cluster_id: { get_param: cluster_id } - type: master - image: { get_param: master_image } - flavor: { get_param: master_flavor } - key_name: { get_resource: keypair } - net: { get_resource: net } - subnet: { get_resource: subnet } - secgrp: - - { get_resource: master-secgrp } - - { get_resource: node-secgrp } - floating_network: { get_param: external_net } - net_name: - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: { get_param: cluster_id } - depends_on: - - interface - - compute_nodes: - type: OS::Heat::ResourceGroup - properties: - count: { get_param: num_nodes } - resource_def: - type: heat_stack_server.yaml - properties: - name: - str_replace: - template: cluster_id-k8s_type-sub_host_type-%index% - params: - cluster_id: { get_param: cluster_id } - k8s_type: node - sub_host_type: compute - cluster_env: { get_param: cluster_env } - cluster_id: { get_param: cluster_id } - type: node - subtype: compute - image: { get_param: node_image } - flavor: { get_param: node_flavor } - key_name: { get_resource: keypair } - net: { get_resource: net } - subnet: { get_resource: subnet } - secgrp: - - { get_resource: node-secgrp } - floating_network: { get_param: external_net } - net_name: - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: { get_param: cluster_id } - depends_on: - - interface - - infra_nodes: - type: OS::Heat::ResourceGroup - properties: - count: { get_param: num_infra } - resource_def: - type: heat_stack_server.yaml - properties: - name: - str_replace: - template: cluster_id-k8s_type-sub_host_type-%index% - params: - cluster_id: { get_param: cluster_id } - k8s_type: node - sub_host_type: infra - cluster_env: { get_param: cluster_env } - cluster_id: { get_param: cluster_id } - type: node - subtype: infra - image: { get_param: infra_image } - flavor: { get_param: infra_flavor } - key_name: { get_resource: keypair } - net: { get_resource: net } - subnet: { get_resource: subnet } - secgrp: - - { get_resource: node-secgrp } - - { get_resource: infra-secgrp } - floating_network: { get_param: external_net } - net_name: - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: { get_param: cluster_id } - depends_on: - - interface diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml deleted file mode 100644 index 435139849..000000000 --- a/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml +++ /dev/null @@ -1,152 +0,0 @@ -heat_template_version: 2014-10-16 - -description: OpenShift cluster server - -parameters: - - name: - type: string - label: Name - description: Name - - cluster_env: - type: string - label: Cluster environment - description: Environment of the cluster - - cluster_id: - type: string - label: Cluster ID - description: Identifier of the cluster - - type: - type: string - label: Type - description: Type master or node - - subtype: - type: string - label: Sub-type - description: Sub-type compute or infra for nodes, default otherwise - default: default - - key_name: - type: string - label: Key name - description: Key name of keypair - - image: - type: string - label: Image - description: Name of the image - - flavor: - type: string - label: Flavor - description: Name of the flavor - - net: - type: string - label: Net ID - description: Net resource - - net_name: - type: string - label: Net name - description: Net name - - subnet: - type: string - label: Subnet ID - description: Subnet resource - - secgrp: - type: comma_delimited_list - label: Security groups - description: Security group resources - - floating_network: - type: string - label: Floating network - description: Network to allocate floating IP from - -outputs: - - name: - description: Name of the server - value: { get_attr: [ server, name ] } - - private_ip: - description: Private IP of the server - value: - get_attr: - - server - - addresses - - { get_param: net_name } - - 0 - - addr - - floating_ip: - description: Floating IP of the server - value: - get_attr: - - server - - addresses - - { get_param: net_name } - - 1 - - addr - -resources: - - server: - type: OS::Nova::Server - properties: - name: { get_param: name } - key_name: { get_param: key_name } - image: { get_param: image } - flavor: { get_param: flavor } - networks: - - port: { get_resource: port } - user_data: { get_resource: config } - user_data_format: RAW - metadata: - environment: { get_param: cluster_env } - clusterid: { get_param: cluster_id } - host-type: { get_param: type } - sub-host-type: { get_param: subtype } - - port: - type: OS::Neutron::Port - properties: - network: { get_param: net } - fixed_ips: - - subnet: { get_param: subnet } - security_groups: { get_param: secgrp } - - floating-ip: - type: OS::Neutron::FloatingIP - properties: - floating_network: { get_param: floating_network } - port_id: { get_resource: port } - - config: - type: OS::Heat::CloudConfig - properties: - cloud_config: - disable_root: true - - hostname: { get_param: name } - - system_info: - default_user: - name: openshift - sudo: ["ALL=(ALL) NOPASSWD: ALL"] - - write_files: - - path: /etc/sudoers.d/00-openshift-no-requiretty - permissions: 440 - # content: Defaults:openshift !requiretty - # Encoded in base64 to be sure that we do not forget the trailing newline or - # sudo will not be able to parse that file - encoding: b64 - content: RGVmYXVsdHM6b3BlbnNoaWZ0ICFyZXF1aXJldHR5Cg== diff --git a/playbooks/openstack/openshift-cluster/filter_plugins b/playbooks/openstack/openshift-cluster/filter_plugins deleted file mode 120000 index 99a95e4ca..000000000 --- a/playbooks/openstack/openshift-cluster/filter_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../filter_plugins
\ No newline at end of file diff --git a/playbooks/openstack/openshift-cluster/launch.yml b/playbooks/openstack/openshift-cluster/launch.yml deleted file mode 100644 index c0bc12f55..000000000 --- a/playbooks/openstack/openshift-cluster/launch.yml +++ /dev/null @@ -1,191 +0,0 @@ ---- -- name: Launch instance(s) - hosts: localhost - become: no - connection: local - gather_facts: no - vars_files: - - vars.yml - tasks: - # TODO: Write an Ansible module for dealing with HEAT stacks - # Dealing with the outputs is currently terrible - - - name: Check OpenStack stack - command: 'heat stack-show openshift-ansible-{{ cluster_id }}-stack' - register: stack_show_result - changed_when: false - failed_when: stack_show_result.rc != 0 and 'Stack not found' not in stack_show_result.stderr - - - set_fact: - heat_stack_action: 'stack-create' - when: stack_show_result.rc == 1 - - set_fact: - heat_stack_action: 'stack-update' - when: stack_show_result.rc == 0 - - - name: Create or Update OpenStack Stack - command: 'heat {{ heat_stack_action }} -f {{ openstack_infra_heat_stack }} - --timeout {{ openstack_heat_timeout }} - -P cluster_env={{ cluster_env }} - -P cluster_id={{ cluster_id }} - -P subnet_24_prefix={{ openstack_subnet_24_prefix }} - -P dns_nameservers={{ openstack_network_dns | join(",") }} - -P external_net={{ openstack_network_external_net }} - -P ssh_public_key="{{ openstack_ssh_public_key }}" - -P ssh_incoming={{ openstack_ssh_access_from }} - -P node_port_incoming={{ openstack_node_port_access_from }} - -P num_etcd={{ num_etcd }} - -P num_masters={{ num_masters }} - -P num_nodes={{ num_nodes }} - -P num_infra={{ num_infra }} - -P etcd_image={{ deployment_vars[deployment_type].image }} - -P master_image={{ deployment_vars[deployment_type].image }} - -P node_image={{ deployment_vars[deployment_type].image }} - -P infra_image={{ deployment_vars[deployment_type].image }} - -P etcd_flavor={{ openstack_flavor["etcd"] }} - -P master_flavor={{ openstack_flavor["master"] }} - -P node_flavor={{ openstack_flavor["node"] }} - -P infra_flavor={{ openstack_flavor["infra"] }} - openshift-ansible-{{ cluster_id }}-stack' - args: - chdir: '{{ playbook_dir }}' - - - name: Wait for OpenStack Stack readiness - shell: 'heat stack-show openshift-ansible-{{ cluster_id }}-stack | awk ''$2 == "stack_status" {print $4}''' - register: stack_show_status_result - until: stack_show_status_result.stdout not in ['CREATE_IN_PROGRESS', 'UPDATE_IN_PROGRESS'] - retries: 30 - delay: 5 - - - name: Display the stack resources - command: 'heat resource-list openshift-ansible-{{ cluster_id }}-stack' - register: stack_resource_list_result - when: stack_show_status_result.stdout not in ['CREATE_COMPLETE', 'UPDATE_COMPLETE'] - - - name: Display the stack status - command: 'heat stack-show openshift-ansible-{{ cluster_id }}-stack' - register: stack_show_result - when: stack_show_status_result.stdout not in ['CREATE_COMPLETE', 'UPDATE_COMPLETE'] - - - name: Delete the stack - command: 'heat stack-delete openshift-ansible-{{ cluster_id }}-stack' - when: stack_show_status_result.stdout not in ['CREATE_COMPLETE', 'UPDATE_COMPLETE'] - - - fail: - msg: | - - +--------------------------------------+ - | ^ | - | /!\ Failed to create the heat stack | - | /___\ | - +--------------------------------------+ - - Here is the list of stack resources and their status: - {{ stack_resource_list_result.stdout }} - - Here is the status of the stack: - {{ stack_show_result.stdout }} - - ^ Failed to create the heat stack - /!\ - /___\ Please check the `stack_status_reason` line in the above array to know why. - when: stack_show_status_result.stdout not in ['CREATE_COMPLETE', 'UPDATE_COMPLETE'] - - - name: Read OpenStack Stack outputs - command: 'heat stack-show openshift-ansible-{{ cluster_id }}-stack' - register: stack_show_result - - - set_fact: - parsed_outputs: "{{ stack_show_result | oo_parse_heat_stack_outputs }}" - - - name: Add new etcd instances groups and variables - add_host: - hostname: '{{ item[0] }}' - ansible_ssh_host: '{{ item[2] }}' - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_become: "{{ deployment_vars[deployment_type].become }}" - groups: 'meta-environment_{{ cluster_env }}, meta-host-type_etcd, meta-sub-host-type_default, meta-clusterid_{{ cluster_id }}' - openshift_node_labels: - type: "etcd" - openstack: - public_v4: '{{ item[2] }}' - private_v4: '{{ item[1] }}' - with_together: - - '{{ parsed_outputs.etcd_names }}' - - '{{ parsed_outputs.etcd_ips }}' - - '{{ parsed_outputs.etcd_floating_ips }}' - - - name: Add new master instances groups and variables - add_host: - hostname: '{{ item[0] }}' - ansible_ssh_host: '{{ item[2] }}' - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_become: "{{ deployment_vars[deployment_type].become }}" - groups: 'meta-environment_{{ cluster_env }}, meta-host-type_master, meta-sub-host-type_default, meta-clusterid_{{ cluster_id }}' - openshift_node_labels: - type: "master" - openstack: - public_v4: '{{ item[2] }}' - private_v4: '{{ item[1] }}' - with_together: - - '{{ parsed_outputs.master_names }}' - - '{{ parsed_outputs.master_ips }}' - - '{{ parsed_outputs.master_floating_ips }}' - - - name: Add new node instances groups and variables - add_host: - hostname: '{{ item[0] }}' - ansible_ssh_host: '{{ item[2] }}' - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_become: "{{ deployment_vars[deployment_type].become }}" - groups: 'meta-environment_{{ cluster_env }}, meta-host-type_node, meta-sub-host-type_compute, meta-clusterid_{{ cluster_id }}' - openshift_node_labels: - type: "compute" - openstack: - public_v4: '{{ item[2] }}' - private_v4: '{{ item[1] }}' - with_together: - - '{{ parsed_outputs.node_names }}' - - '{{ parsed_outputs.node_ips }}' - - '{{ parsed_outputs.node_floating_ips }}' - - - name: Add new infra instances groups and variables - add_host: - hostname: '{{ item[0] }}' - ansible_ssh_host: '{{ item[2] }}' - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_become: "{{ deployment_vars[deployment_type].become }}" - groups: 'meta-environment_{{ cluster_env }}, meta-host-type_node, meta-sub-host-type_infra, meta-clusterid_{{ cluster_id }}' - openshift_node_labels: - type: "infra" - openstack: - public_v4: '{{ item[2] }}' - private_v4: '{{ item[1] }}' - with_together: - - '{{ parsed_outputs.infra_names }}' - - '{{ parsed_outputs.infra_ips }}' - - '{{ parsed_outputs.infra_floating_ips }}' - - - name: Wait for ssh - wait_for: - host: '{{ item }}' - port: 22 - with_flattened: - - '{{ parsed_outputs.master_floating_ips }}' - - '{{ parsed_outputs.node_floating_ips }}' - - '{{ parsed_outputs.infra_floating_ips }}' - - - name: Wait for user setup - command: 'ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ deployment_vars[deployment_type].ssh_user }}@{{ item }} echo {{ deployment_vars[deployment_type].ssh_user }} user is setup' - register: result - until: result.rc == 0 - retries: 30 - delay: 1 - with_flattened: - - '{{ parsed_outputs.master_floating_ips }}' - - '{{ parsed_outputs.node_floating_ips }}' - - '{{ parsed_outputs.infra_floating_ips }}' - -- include: update.yml - -- include: list.yml diff --git a/playbooks/openstack/openshift-cluster/list.yml b/playbooks/openstack/openshift-cluster/list.yml deleted file mode 100644 index 6c6f671be..000000000 --- a/playbooks/openstack/openshift-cluster/list.yml +++ /dev/null @@ -1,24 +0,0 @@ ---- -- name: Generate oo_list_hosts group - hosts: localhost - become: no - connection: local - gather_facts: no - vars_files: - - vars.yml - tasks: - - set_fact: scratch_group=meta-clusterid_{{ cluster_id }} - when: cluster_id != '' - - set_fact: scratch_group=all - when: cluster_id == '' - - add_host: - name: "{{ item }}" - groups: oo_list_hosts - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_ssh_host: "{{ hostvars[item].ansible_ssh_host | default(item) }}" - ansible_become: "{{ deployment_vars[deployment_type].become }}" - oo_public_ipv4: "{{ hostvars[item].openstack.public_v4 }}" - oo_private_ipv4: "{{ hostvars[item].openstack.private_v4 }}" - with_items: "{{ groups[scratch_group] | default([]) | difference(['localhost']) }}" - - debug: - msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster('meta-') }}" diff --git a/playbooks/openstack/openshift-cluster/lookup_plugins b/playbooks/openstack/openshift-cluster/lookup_plugins deleted file mode 120000 index ac79701db..000000000 --- a/playbooks/openstack/openshift-cluster/lookup_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../lookup_plugins
\ No newline at end of file diff --git a/playbooks/openstack/openshift-cluster/terminate.yml b/playbooks/openstack/openshift-cluster/terminate.yml deleted file mode 100644 index affb57117..000000000 --- a/playbooks/openstack/openshift-cluster/terminate.yml +++ /dev/null @@ -1,49 +0,0 @@ ---- -- name: Terminate instance(s) - hosts: localhost - become: no - connection: local - gather_facts: no - vars_files: - - vars.yml - tasks: - - add_host: - name: "{{ item }}" - groups: oo_hosts_to_terminate - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_become: "{{ deployment_vars[deployment_type].become }}" - with_items: "{{ (groups['meta-environment_' ~ cluster_env]|default([])) | intersect(groups['meta-clusterid_' ~ cluster_id ]|default([])) }}" - -- name: Unsubscribe VMs - hosts: oo_hosts_to_terminate - vars_files: - - vars.yml - roles: - - role: rhel_unsubscribe - when: deployment_type in ['atomic-enterprise', 'enterprise', 'openshift-enterprise'] and - ansible_distribution == "RedHat" and - lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) | - default('no', True) | lower in ['no', 'false'] - -- hosts: localhost - become: no - connection: local - gather_facts: no - vars_files: - - vars.yml - tasks: - - name: Delete the OpenStack Stack - command: 'heat stack-delete openshift-ansible-{{ cluster_id }}-stack' - register: stack_delete_result - changed_when: stack_delete_result.rc == 0 - failed_when: stack_delete_result.rc != 0 and 'could not be found' not in stack_delete_result.stdout - - - name: Wait for the completion of the OpenStack Stack deletion - shell: 'heat stack-show openshift-ansible-{{ cluster_id }}-stack | awk ''$2 == "stack_status" {print $4}''' - when: stack_delete_result.changed - register: stack_show_result - until: stack_show_result.stdout != 'DELETE_IN_PROGRESS' - retries: 60 - delay: 5 - failed_when: '"Stack not found" not in stack_show_result.stderr and - stack_show_result.stdout != "DELETE_COMPLETE"' diff --git a/playbooks/openstack/openshift-cluster/update.yml b/playbooks/openstack/openshift-cluster/update.yml deleted file mode 100644 index 6d2af3d26..000000000 --- a/playbooks/openstack/openshift-cluster/update.yml +++ /dev/null @@ -1,34 +0,0 @@ ---- -- hosts: localhost - gather_facts: no - tasks: - - include_vars: vars.yml - - include_vars: cluster_hosts.yml - - add_host: - name: "{{ item }}" - groups: l_oo_all_hosts - with_items: "{{ g_all_hosts }}" - -- hosts: l_oo_all_hosts - gather_facts: no - tasks: - - include_vars: vars.yml - - include_vars: cluster_hosts.yml - -- name: Populate oo_hosts_to_update group - hosts: localhost - connection: local - become: no - gather_facts: no - tasks: - - name: Evaluate oo_hosts_to_update - add_host: - name: "{{ item }}" - groups: oo_hosts_to_update - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_become: "{{ deployment_vars[deployment_type].become }}" - with_items: "{{ g_all_hosts | default([]) }}" - -- include: ../../common/openshift-cluster/update_repos_and_packages.yml - -- include: config.yml diff --git a/playbooks/openstack/openshift-cluster/vars.yml b/playbooks/openstack/openshift-cluster/vars.yml deleted file mode 100644 index ba2855b73..000000000 --- a/playbooks/openstack/openshift-cluster/vars.yml +++ /dev/null @@ -1,38 +0,0 @@ -# yamllint disable rule:colons ---- -debug_level: 2 -openstack_infra_heat_stack: "{{ lookup('oo_option', 'infra_heat_stack' ) | - default('files/heat_stack.yaml', True) }}" -openstack_subnet_24_prefix: "{{ lookup('oo_option', 'subnet_24_prefix' ) | - default('192.168.' + ( ( 1048576 | random % 256 ) | string() ), True) }}" -openstack_network_external_net: "{{ lookup('oo_option', 'external_net' ) | - default('external', True) }}" -openstack_network_dns: "{{ lookup('oo_option', 'dns' ) | - default('8.8.8.8,8.8.4.4', True) | oo_split() }}" -openstack_ssh_public_key: "{{ lookup('file', lookup('oo_option', 'public_key') | - default('~/.ssh/id_rsa.pub', True)) }}" -openstack_ssh_access_from: "{{ lookup('oo_option', 'ssh_from') | - default('0.0.0.0/0', True) }}" -openstack_node_port_access_from: "{{ lookup('oo_option', 'node_port_from') | - default('0.0.0.0/0', True) }}" -openstack_heat_timeout: "{{ lookup('oo_option', 'heat_timeout') | - default('3', True) }}" -openstack_flavor: - etcd: "{{ lookup('oo_option', 'etcd_flavor' ) | default('m1.small', True) }}" - master: "{{ lookup('oo_option', 'master_flavor' ) | default('m1.small', True) }}" - infra: "{{ lookup('oo_option', 'infra_flavor' ) | default('m1.small', True) }}" - node: "{{ lookup('oo_option', 'node_flavor' ) | default('m1.medium', True) }}" - -deployment_rhel7_ent_base: - image: "{{ lookup('oo_option', 'image_name') | default('rhel-guest-image-7.2-20151102.0.x86_64', True) }}" - ssh_user: openshift - become: yes - -deployment_vars: - origin: - image: "{{ lookup('oo_option', 'image_name') | default('centos-70-raw', True) }}" - ssh_user: openshift - become: yes - enterprise: "{{ deployment_rhel7_ent_base }}" - openshift-enterprise: "{{ deployment_rhel7_ent_base }}" - atomic-enterprise: "{{ deployment_rhel7_ent_base }}" |