From 6a4b7a5eb6c4b5e747bab795e2428d7c3992f559 Mon Sep 17 00:00:00 2001
From: Jason DeTiberus <jdetiber@redhat.com>
Date: Wed, 1 Apr 2015 15:09:19 -0400
Subject: Configuration updates for latest builds and major refactor

Configuration updates for latest builds
- Switch to using create-node-config
- Switch sdn services to use etcd over SSL
- This re-uses the client certificate deployed on each node
- Additional node registration changes
- Do not assume that metadata service is available in openshift_facts module
- Call systemctl daemon-reload after installing openshift-master, openshift-sdn-master, openshift-node, openshift-sdn-node
- Fix bug overriding openshift_hostname and openshift_public_hostname in byo playbooks
- Start moving generated configs to /etc/openshift
- Some custom module cleanup
- Add known issue with ansible-1.9 to README_OSE.md
- Update to genericize the kubernetes_register_node module
  - Default to use kubectl for commands
  - Allow for overriding kubectl_cmd
  - In openshift_register_node role, override kubectl_cmd to openshift_kube
- Set default openshift_registry_url for enterprise when deployment_type is enterprise
- Fix openshift_register_node for client config change
- Ensure that master certs directory is created
- Add roles and filter_plugin symlinks to playbooks/common/openshift-master and node
- Allow non-root user with sudo nopasswd access
- Updates for README_OSE.md
- Update byo inventory for adding additional comments
- Updates for node cert/config sync to work with non-root user using sudo
- Move node config/certs to /etc/openshift/node
- Don't use path for mktemp. addresses: https://github.com/openshift/openshift-ansible/issues/154

Create common playbooks
- create common/openshift-master/config.yml
- create common/openshift-node/config.yml
- update playbooks to use new common playbooks
- update launch playbooks to call update playbooks
- fix openshift_registry and openshift_node_ip usage

Set default deployment type to origin
- openshift_repo updates for enabling origin deployments
  - also separate repo and gpgkey file structure
  - remove kubernetes repo since it isn't currently needed
- full deployment type support for bin/cluster
  - honor OS_DEPLOYMENT_TYPE env variable
  - add --deployment-type option, which will override OS_DEPLOYMENT_TYPE if set
  - if neither OS_DEPLOYMENT_TYPE or --deployment-type is set, defaults to
    origin installs

Additional changes:
- Add separate config action to bin/cluster that runs ansible config but does
  not update packages
- Some more duplication reduction in cluster playbooks.
- Rename task files in playbooks dirs to have tasks in their name for clarity.
- update aws/gce scripts to use a directory for inventory (otherwise when
  there are no hosts returned from dynamic inventory there is an error)

libvirt refactor and update

- add libvirt dynamic inventory
- updates to use dynamic inventory for libvirt
---
 playbooks/libvirt/openshift-cluster/config.yml     |  38 ++++++++
 playbooks/libvirt/openshift-cluster/launch.yml     |  81 ++++++----------
 .../libvirt/openshift-cluster/launch_instances.yml | 102 --------------------
 playbooks/libvirt/openshift-cluster/list.yml       |  50 +++-------
 .../openshift-cluster/tasks/configure_libvirt.yml  |   6 ++
 .../tasks/configure_libvirt_network.yml            |  27 ++++++
 .../tasks/configure_libvirt_storage_pool.yml       |  27 ++++++
 .../openshift-cluster/tasks/launch_instances.yml   | 104 +++++++++++++++++++++
 .../libvirt/openshift-cluster/templates/domain.xml |  67 +++++++++++++
 .../libvirt/openshift-cluster/templates/meta-data  |   3 +
 .../openshift-cluster/templates/network.xml        |  23 +++++
 .../libvirt/openshift-cluster/templates/user-data  |  23 +++++
 playbooks/libvirt/openshift-cluster/terminate.yml  |  69 +++++++-------
 playbooks/libvirt/openshift-cluster/update.yml     |  18 ++++
 playbooks/libvirt/openshift-cluster/vars.yml       |  38 ++++++--
 15 files changed, 445 insertions(+), 231 deletions(-)
 create mode 100644 playbooks/libvirt/openshift-cluster/config.yml
 delete mode 100644 playbooks/libvirt/openshift-cluster/launch_instances.yml
 create mode 100644 playbooks/libvirt/openshift-cluster/tasks/configure_libvirt.yml
 create mode 100644 playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml
 create mode 100644 playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml
 create mode 100644 playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
 create mode 100644 playbooks/libvirt/openshift-cluster/templates/domain.xml
 create mode 100644 playbooks/libvirt/openshift-cluster/templates/meta-data
 create mode 100644 playbooks/libvirt/openshift-cluster/templates/network.xml
 create mode 100644 playbooks/libvirt/openshift-cluster/templates/user-data
 create mode 100644 playbooks/libvirt/openshift-cluster/update.yml

(limited to 'playbooks/libvirt/openshift-cluster')

diff --git a/playbooks/libvirt/openshift-cluster/config.yml b/playbooks/libvirt/openshift-cluster/config.yml
new file mode 100644
index 000000000..faf278b10
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/config.yml
@@ -0,0 +1,38 @@
+---
+# TODO: need to figure out a plan for setting hostname, currently the default
+# is localhost, so no hostname value (or public_hostname) value is getting
+# assigned
+
+- name: Populate oo_masters_to_config host group
+  hosts: localhost
+  gather_facts: no
+  vars_files:
+  - vars.yml
+  tasks:
+  - name: Evaluate oo_masters_to_config
+    add_host:
+      name: "{{ item }}"
+      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+      groups: oo_masters_to_config
+    with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | default([])
+  - name: Evaluate oo_nodes_to_config
+    add_host:
+      name: "{{ item }}"
+      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+      groups: oo_nodes_to_config
+    with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-node"] | default([])
+  - name: Evaluate oo_first_master
+    add_host:
+      name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}"
+      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+      groups: oo_first_master
+    when: "'tag_env-host-type-{{ cluster_id }}-openshift-master' in groups"
+
+- include: ../../common/openshift-cluster/config.yml
+  vars:
+    openshift_cluster_id: "{{ cluster_id }}"
+    openshift_debug_level: 4
+    openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/libvirt/openshift-cluster/launch.yml b/playbooks/libvirt/openshift-cluster/launch.yml
index 6f2df33af..a7ddc1e7e 100644
--- a/playbooks/libvirt/openshift-cluster/launch.yml
+++ b/playbooks/libvirt/openshift-cluster/launch.yml
@@ -1,65 +1,36 @@
+---
 - name: Launch instance(s)
   hosts: localhost
-  connection: local
   gather_facts: no
-
-  vars:
-    libvirt_storage_pool_path: "{{ lookup('env','HOME') }}/libvirt-storage-pool-openshift"
-    libvirt_storage_pool: 'openshift'
-    libvirt_uri: 'qemu:///system'
-
   vars_files:
-    - vars.yml
-
+  - vars.yml
+  vars:
+    os_libvirt_storage_pool: "{{ libvirt_storage_pool | default('images') }}"
+    os_libvirt_storage_pool_path: "{{ libvirt_storage_pool_path | default('/var/lib/libvirt/images') }}"
+    os_libvirt_network: "{{ libvirt_network | default('default') }}"
+    image_url: "{{ deployment_vars[deployment_type].image.url }}"
+    image_sha256: "{{ deployment_vars[deployment_type].image.sha256 }}"
+    image_name: "{{ deployment_vars[deployment_type].image.name }}"
   tasks:
-    - set_fact:
-        k8s_type: master
-
-    - name: Generate master instance name(s)
-      set_fact:
-        scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format( 1048576 | random ) }}"
-      register: master_names_output
-      with_sequence: start=1 end='{{ num_masters }}'
+  - fail: msg="Deployment type not supported for libvirt provider yet"
+    when: deployment_type in ['online', 'enterprise']
 
-    - set_fact:
-        master_names: "{{ master_names_output.results | oo_collect('ansible_facts') | oo_collect('scratch_name') }}"
+  - include: tasks/configure_libvirt.yml
 
-    - include: launch_instances.yml
-      vars:
-        instances: '{{ master_names }}'
-        cluster: '{{ cluster_id }}'
-        type: '{{ k8s_type }}'
-        group_name: 'tag_env-host-type-{{ cluster_id }}-openshift-master'
+  - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml
+  - include: tasks/launch_instances.yml
+    vars:
+      instances: "{{ master_names }}"
+      cluster: "{{ cluster_id }}"
+      type: "{{ k8s_type }}"
 
-    - set_fact:
-        k8s_type: node
+  - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+  - include: tasks/launch_instances.yml
+    vars:
+      instances: "{{ node_names }}"
+      cluster: "{{ cluster_id }}"
+      type: "{{ k8s_type }}"
 
-    - name: Generate node instance name(s)
-      set_fact:
-        scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format( 1048576 | random ) }}"
-      register: node_names_output
-      with_sequence: start=1 end='{{ num_nodes }}'
+- include: update.yml
 
-    - set_fact:
-        node_names: "{{ node_names_output.results | oo_collect('ansible_facts') | oo_collect('scratch_name') }}"
-
-    - include: launch_instances.yml
-      vars:
-        instances: '{{ node_names }}'
-        cluster: '{{ cluster_id }}'
-        type: '{{ k8s_type }}'
-
-- hosts: 'tag_env-{{ cluster_id }}'
-  roles:
-    - openshift_repos
-    - os_update_latest
-
-- include: ../openshift-master/config.yml
-  vars:
-    oo_host_group_exp: 'groups["tag_env-host-type-{{ cluster_id }}-openshift-master"]'
-    oo_env: '{{ cluster_id }}'
-
-- include: ../openshift-node/config.yml
-  vars:
-    oo_host_group_exp: 'groups["tag_env-host-type-{{ cluster_id }}-openshift-node"]'
-    oo_env: '{{ cluster_id }}'
+- include: list.yml
diff --git a/playbooks/libvirt/openshift-cluster/launch_instances.yml b/playbooks/libvirt/openshift-cluster/launch_instances.yml
deleted file mode 100644
index 3bbcae981..000000000
--- a/playbooks/libvirt/openshift-cluster/launch_instances.yml
+++ /dev/null
@@ -1,102 +0,0 @@
-- name: Create the libvirt storage directory for openshift
-  file:
-    dest: '{{ libvirt_storage_pool_path }}'
-    state: directory
-
-- name: Download Base Cloud image
-  get_url:
-    url: '{{ base_image_url }}'
-    sha256sum: '{{ base_image_sha256 }}'
-    dest: '{{ libvirt_storage_pool_path }}/{{ base_image_name }}'
-
-- name: Create the cloud-init config drive path
-  file:
-    dest: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/openstack/latest'
-    state: directory
-  with_items: '{{ instances }}'
-
-- name: Create the cloud-init config drive files
-  template:
-    src: '{{ item[1] }}'
-    dest: '{{ libvirt_storage_pool_path }}/{{ item[0] }}_configdrive/openstack/latest/{{ item[1] }}'
-  with_nested:
-    - '{{ instances }}'
-    - [ user-data, meta-data ]
-
-- name: Create the cloud-init config drive
-  command: 'genisoimage -output {{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso -volid cidata -joliet -rock user-data meta-data'
-  args:
-    chdir: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/openstack/latest'
-    creates: '{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'
-  with_items: '{{ instances }}'
-
-- name: Create the libvirt storage pool for openshift
-  command: 'virsh -c {{ libvirt_uri }} pool-create-as {{ libvirt_storage_pool }} dir --target {{ libvirt_storage_pool_path }}'
-  ignore_errors: yes
-
-- name: Refresh the libvirt storage pool for openshift
-  command: 'virsh -c {{ libvirt_uri }} pool-refresh {{ libvirt_storage_pool }}'
-
-- name: Create VMs drives
-  command: 'virsh -c {{ libvirt_uri }} vol-create-as {{ libvirt_storage_pool }} {{ item }}.qcow2 10G --format qcow2 --backing-vol {{ base_image_name }} --backing-vol-format qcow2'
-  with_items: '{{ instances }}'
-
-- name: Create VMs
-  virt:
-    name: '{{ item }}'
-    command: define
-    xml: "{{ lookup('template', '../templates/domain.xml') }}"
-    uri: '{{ libvirt_uri }}'
-  with_items: '{{ instances }}'
-
-- name: Start VMs
-  virt:
-    name: '{{ item }}'
-    state: running
-    uri: '{{ libvirt_uri }}'
-  with_items: '{{ instances }}'
-
-- name: Collect MAC addresses of the VMs
-  shell: 'virsh -c {{ libvirt_uri }} dumpxml {{ item }} | xmllint --xpath "string(//domain/devices/interface/mac/@address)" -'
-  register: scratch_mac
-  with_items: '{{ instances }}'
-
-- name: Wait for the VMs to get an IP
-  command: "egrep -c '{{ scratch_mac.results | oo_collect('stdout') | join('|') }}' /proc/net/arp"
-  ignore_errors: yes
-  register: nb_allocated_ips
-  until: nb_allocated_ips.stdout == '{{ instances | length }}'
-  retries: 30
-  delay: 1
-
-- name: Collect IP addresses of the VMs
-  shell: "awk '/{{ item.stdout }}/ {print $1}' /proc/net/arp"
-  register: scratch_ip
-  with_items: '{{ scratch_mac.results }}'
-
-- set_fact:
-    ips: "{{ scratch_ip.results | oo_collect('stdout') }}"
-
-- name: Add new instances
-  add_host:
-    hostname: '{{ item.0 }}'
-    ansible_ssh_host: '{{ item.1 }}'
-    ansible_ssh_user: root
-    groups: 'tag_env-{{ cluster }}, tag_host-type-{{ type }}, tag_env-host-type-{{ cluster }}-openshift-{{ type }}'
-  with_together:
-    - instances
-    - ips
-
-- name: Wait for ssh
-  wait_for:
-    host: '{{ item }}'
-    port: 22
-  with_items: ips
-
-- name: Wait for root user setup
-  command: 'ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item }} echo root user is setup'
-  register: result
-  until: result.rc == 0
-  retries: 30
-  delay: 1
-  with_items: ips
diff --git a/playbooks/libvirt/openshift-cluster/list.yml b/playbooks/libvirt/openshift-cluster/list.yml
index 6bf07e3c6..25a25f791 100644
--- a/playbooks/libvirt/openshift-cluster/list.yml
+++ b/playbooks/libvirt/openshift-cluster/list.yml
@@ -1,43 +1,23 @@
+---
 - name: Generate oo_list_hosts group
   hosts: localhost
-  connection: local
   gather_facts: no
-
-  vars:
-    libvirt_uri: 'qemu:///system'
-
+  vars_files:
+  - vars.yml
   tasks:
-    - name: List VMs
-      virt:
-        command: list_vms
-      register: list_vms
-
-    - name: Collect MAC addresses of the VMs
-      shell: 'virsh -c {{ libvirt_uri }} dumpxml {{ item }} | xmllint --xpath "string(//domain/devices/interface/mac/@address)" -'
-      register: scratch_mac
-      with_items: '{{ list_vms.list_vms }}'
-      when: item|truncate(cluster_id|length+1, True) == '{{ cluster_id }}-...'
-
-    - name: Collect IP addresses of the VMs
-      shell: "awk '/{{ item.stdout }}/ {print $1}' /proc/net/arp"
-      register: scratch_ip
-      with_items: '{{ scratch_mac.results }}'
-      when: item.skipped is not defined
-
-    - name: Add hosts
-      add_host:
-        hostname: '{{ item[0] }}'
-        ansible_ssh_host: '{{ item[1].stdout }}'
-        ansible_ssh_user: root
-        groups: oo_list_hosts
-      with_together:
-        - '{{ list_vms.list_vms }}'
-        - '{{ scratch_ip.results }}'
-      when: item[1].skipped is not defined
+  - set_fact: scratch_group=tag_env-{{ cluster_id }}
+    when: cluster_id != ''
+  - set_fact: scratch_group=all
+    when: cluster_id == ''
+  - add_host:
+      name: "{{ item }}"
+      groups: oo_list_hosts
+      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+    with_items: groups[scratch_group] | default([]) | difference(['localhost'])
 
 - name: List Hosts
   hosts: oo_list_hosts
-
   tasks:
-    - debug:
-        msg: 'public:{{ansible_default_ipv4.address}} private:{{ansible_default_ipv4.address}}'
+  - debug:
+      msg: 'public:{{ansible_default_ipv4.address}} private:{{ansible_default_ipv4.address}} deployment-type: {{ hostvars[inventory_hostname].group_names | oo_get_deployment_type_from_groups }}'
diff --git a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt.yml b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt.yml
new file mode 100644
index 000000000..f237c1a60
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt.yml
@@ -0,0 +1,6 @@
+---
+- include: configure_libvirt_storage_pool.yml
+  when: libvirt_storage_pool is defined and libvirt_storage_pool_path is defined
+
+- include: configure_libvirt_network.yml
+  when: libvirt_network is defined
diff --git a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml
new file mode 100644
index 000000000..1cd83f7be
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml
@@ -0,0 +1,27 @@
+---
+- name: Test if libvirt network for openshift already exists
+  command: "virsh -c {{ libvirt_uri }} net-info {{ libvirt_network }}"
+  register: net_info_result
+  changed_when: False
+  failed_when: "net_info_result.rc != 0 and 'error: Network not found:' not in net_info_result.stderr"
+
+- name: Create a temp directory for the template xml file
+  command: "/usr/bin/mktemp -d /tmp/openshift-ansible-XXXXXXX"
+  register: mktemp
+  when: net_info_result.rc == 1
+
+- name: Create network xml file
+  template:
+    src: templates/network.xml
+    dest: "{{ mktemp.stdout }}/network.xml"
+  when: net_info_result.rc == 1
+
+- name: Create libvirt network for openshift
+  command: "virsh -c {{ libvirt_uri }} net-create {{ mktemp.stdout }}/network.xml"
+  when: net_info_result.rc == 1
+
+- name: Remove the temp directory
+  file:
+    path: "{{ mktemp.stdout }}"
+    state: absent
+  when: net_info_result.rc == 1
diff --git a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml
new file mode 100644
index 000000000..817acb250
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml
@@ -0,0 +1,27 @@
+---
+- name: Create libvirt storage directory for openshift
+  file:
+    dest: "{{ libvirt_storage_pool_path }}"
+    state: directory
+
+- acl:
+    default: yes
+    entity: kvm
+    etype: group
+    name: "{{ libvirt_storage_pool_path }}"
+    permissions: rwx
+    state: present
+
+- name: Test if libvirt storage pool for openshift already exists
+  command: "virsh -c {{ libvirt_uri }} pool-info {{ libvirt_storage_pool }}"
+  register: pool_info_result
+  changed_when: False
+  failed_when: "pool_info_result.rc != 0 and 'error: Storage pool not found:' not in pool_info_result.stderr"
+
+- name: Create the libvirt storage pool for openshift
+  command: 'virsh -c {{ libvirt_uri }} pool-create-as {{ libvirt_storage_pool }} dir --target {{ libvirt_storage_pool_path }}'
+  when: pool_info_result.rc == 1
+
+- name: Refresh the libvirt storage pool for openshift
+  command: 'virsh -c {{ libvirt_uri }} pool-refresh {{ libvirt_storage_pool }}'
+  when: pool_info_result.rc == 1
diff --git a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
new file mode 100644
index 000000000..96d440096
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
@@ -0,0 +1,104 @@
+---
+# TODO: Add support for choosing base image based on deployment_type and os
+# wanted (os wanted needs support added in bin/cluster with sane defaults:
+# fedora/centos for origin, rhel for online/enterprise)
+
+# TODO: create a role to encapsulate some of this complexity, possibly also
+# create a module to manage the storage tasks, network tasks, and possibly
+# even handle the libvirt tasks to set metadata in the domain xml and be able
+# to create/query data about vms without having to use xml the python libvirt
+# bindings look like a good candidate for this
+
+- name: Download Base Cloud image
+  get_url:
+    url: '{{ image_url }}'
+    sha256sum: '{{ image_sha256 }}'
+    dest: '{{ os_libvirt_storage_pool_path }}/{{ image_name }}'
+
+- name: Create the cloud-init config drive path
+  file:
+    dest: '{{ os_libvirt_storage_pool_path }}/{{ item }}_configdrive/'
+    state: directory
+  with_items: instances
+
+- name: Create the cloud-init config drive files
+  template:
+    src: '{{ item[1] }}'
+    dest: '{{ os_libvirt_storage_pool_path }}/{{ item[0] }}_configdrive/{{ item[1] }}'
+  with_nested:
+    - instances
+    - [ user-data, meta-data ]
+
+- name: Create the cloud-init config drive
+  command: 'genisoimage -output {{ os_libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso -volid cidata -joliet -rock user-data meta-data'
+  args:
+    chdir: '{{ os_libvirt_storage_pool_path }}/{{ item }}_configdrive/'
+    creates: '{{ os_libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'
+  with_items: instances
+
+- name: Create VMs drives
+  command: 'virsh -c {{ libvirt_uri }} vol-create-as {{ os_libvirt_storage_pool }} {{ item }}.qcow2 10G --format qcow2 --backing-vol {{ image_name }} --backing-vol-format qcow2'
+  with_items: instances
+
+- name: Create VMs
+  virt:
+    name: '{{ item }}'
+    command: define
+    xml: "{{ lookup('template', '../templates/domain.xml') }}"
+    uri: '{{ libvirt_uri }}'
+  with_items: instances
+
+- name: Start VMs
+  virt:
+    name: '{{ item }}'
+    state: running
+    uri: '{{ libvirt_uri }}'
+  with_items: instances
+
+- name: Collect MAC addresses of the VMs
+  shell: 'virsh -c {{ libvirt_uri }} dumpxml {{ item }} | xmllint --xpath "string(//domain/devices/interface/mac/@address)" -'
+  register: scratch_mac
+  with_items: instances
+
+- name: Wait for the VMs to get an IP
+  command: "egrep -c '{{ scratch_mac.results | oo_collect('stdout') | join('|') }}' /proc/net/arp"
+  ignore_errors: yes
+  register: nb_allocated_ips
+  until: nb_allocated_ips.stdout == '{{ instances | length }}'
+  retries: 30
+  delay: 1
+
+- name: Collect IP addresses of the VMs
+  shell: "awk '/{{ item.stdout }}/ {print $1}' /proc/net/arp"
+  register: scratch_ip
+  with_items: scratch_mac.results
+
+- set_fact:
+    ips: "{{ scratch_ip.results | oo_collect('stdout') }}"
+
+- name: Add new instances
+  add_host:
+    hostname: '{{ item.0 }}'
+    ansible_ssh_host: '{{ item.1 }}'
+    ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+    ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+    groups: 'tag_env-{{ cluster }}, tag_host-type-{{ type }}, tag_env-host-type-{{ cluster }}-openshift-{{ type }}'
+  with_together:
+    - instances
+    - ips
+
+- name: Wait for ssh
+  wait_for:
+    host: '{{ item }}'
+    port: 22
+  with_items: ips
+
+- name: Wait for openshift user setup
+  command: 'ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null openshift@{{ item.1 }} echo openshift user is setup'
+  register: result
+  until: result.rc == 0
+  retries: 30
+  delay: 1
+  with_together:
+  - instances
+  - ips
diff --git a/playbooks/libvirt/openshift-cluster/templates/domain.xml b/playbooks/libvirt/openshift-cluster/templates/domain.xml
new file mode 100644
index 000000000..8cb017367
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/templates/domain.xml
@@ -0,0 +1,67 @@
+<domain type='kvm' id='8'>
+  <name>{{ item }}</name>
+  <memory unit='GiB'>1</memory>
+  <metadata xmlns:ansible="https://github.com/ansible/ansible">
+    <ansible:tag>deployment-type-{{ deployment_type }}</ansible:tag>
+    <ansible:tag>env-{{ cluster }}</ansible:tag>
+    <ansible:tag>env-host-type-{{ cluster }}-openshift-{{ type }}</ansible:tag>
+    <ansible:tag>host-type-{{ type }}</ansible:tag>
+  </metadata>
+  <currentMemory unit='GiB'>1</currentMemory>
+  <vcpu placement='static'>2</vcpu>
+  <os>
+    <type arch='x86_64' machine='pc'>hvm</type>
+    <boot dev='hd'/>
+  </os>
+  <features>
+    <acpi/>
+    <apic/>
+    <pae/>
+  </features>
+  <clock offset='utc'>
+    <timer name='rtc' tickpolicy='catchup'/>
+    <timer name='pit' tickpolicy='delay'/>
+    <timer name='hpet' present='no'/>
+  </clock>
+  <on_poweroff>destroy</on_poweroff>
+  <on_reboot>restart</on_reboot>
+  <on_crash>restart</on_crash>
+  <devices>
+    <emulator>/usr/bin/qemu-system-x86_64</emulator>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2'/>
+      <source file='{{ os_libvirt_storage_pool_path }}/{{ item }}.qcow2'/>
+      <target dev='vda' bus='virtio'/>
+    </disk>
+    <disk type='file' device='cdrom'>
+      <driver name='qemu' type='raw'/>
+      <source file='{{ os_libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'/>
+      <target dev='vdb' bus='virtio'/>
+      <readonly/>
+    </disk>
+    <controller type='usb' index='0' />
+    <interface type='network'>
+      <source network='{{ os_libvirt_network }}'/>
+      <model type='virtio'/>
+    </interface>
+    <serial type='pty'>
+      <target port='0'/>
+    </serial>
+    <console type='pty'>
+      <target type='serial' port='0'/>
+    </console>
+    <channel type='spicevmc'>
+      <target type='virtio' name='com.redhat.spice.0'/>
+    </channel>
+    <input type='tablet' bus='usb' />
+    <input type='mouse' bus='ps2'/>
+    <graphics type='spice' autoport='yes' />
+    <video>
+      <model type='qxl' ram='65536' vram='65536' vgamem='16384' heads='1'/>
+    </video>
+    <redirdev bus='usb' type='spicevmc'>
+    </redirdev>
+    <memballoon model='virtio'>
+    </memballoon>
+  </devices>
+</domain>
diff --git a/playbooks/libvirt/openshift-cluster/templates/meta-data b/playbooks/libvirt/openshift-cluster/templates/meta-data
new file mode 100644
index 000000000..6b421770d
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/templates/meta-data
@@ -0,0 +1,3 @@
+instance-id: {{ item[0] }}
+hostname: {{ item[0] }}
+local-hostname: {{ item[0] }}.example.com
diff --git a/playbooks/libvirt/openshift-cluster/templates/network.xml b/playbooks/libvirt/openshift-cluster/templates/network.xml
new file mode 100644
index 000000000..86dcd62bb
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/templates/network.xml
@@ -0,0 +1,23 @@
+<network>
+  <name>openshift-ansible</name>
+  <forward mode='nat'>
+    <nat>
+      <port start='1024' end='65535'/>
+    </nat>
+  </forward>
+  <!-- TODO: query for first available virbr interface available -->
+  <bridge name='virbr3' stp='on' delay='0'/>
+  <!-- TODO: make overridable -->
+  <domain name='example.com'/>
+  <dns>
+    <!-- TODO: automatically add host entries -->
+  </dns>
+  <!-- TODO: query for available address space -->
+  <ip address='192.168.55.1' netmask='255.255.255.0'>
+    <dhcp>
+      <range start='192.168.55.2' end='192.168.55.254'/>
+      <!-- TODO: add static entries addresses for the hosts to be created -->
+    </dhcp>
+  </ip>
+</network>
+
diff --git a/playbooks/libvirt/openshift-cluster/templates/user-data b/playbooks/libvirt/openshift-cluster/templates/user-data
new file mode 100644
index 000000000..77b788109
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/templates/user-data
@@ -0,0 +1,23 @@
+#cloud-config
+disable_root: true
+
+hostname: {{ item[0] }}
+fqdn: {{ item[0] }}.example.com
+manage_etc_hosts: true
+
+users:
+  - default
+  - name: root
+    ssh_authorized_keys:
+    - {{ lookup('file', '~/.ssh/id_rsa.pub') }}
+
+system_info:
+  default_user:
+    name: openshift
+    sudo: ["ALL=(ALL) NOPASSWD: ALL"]
+
+ssh_authorized_keys:
+  - {{ lookup('file', '~/.ssh/id_rsa.pub') }}
+
+bootcmd:
+  - NETWORK_CONFIG=/etc/sysconfig/network-scripts/ifcfg-eth0; if ! grep DHCP_HOSTNAME ${NETWORK_CONFIG}; then echo 'DHCP_HOSTNAME="{{ item[0] }}.example.com"' >> ${NETWORK_CONFIG}; fi; pkill -9 dhclient; service network restart
diff --git a/playbooks/libvirt/openshift-cluster/terminate.yml b/playbooks/libvirt/openshift-cluster/terminate.yml
index c609169d3..b173a09dd 100644
--- a/playbooks/libvirt/openshift-cluster/terminate.yml
+++ b/playbooks/libvirt/openshift-cluster/terminate.yml
@@ -1,41 +1,44 @@
+---
+# TODO: does not handle a non-existant cluster gracefully
+
 - name: Terminate instance(s)
   hosts: localhost
-  connection: local
   gather_facts: no
+  vars_files:
+  - vars.yml
+  tasks:
+  - set_fact: cluster_group=tag_env-{{ cluster_id }}
+  - add_host:
+      name: "{{ item }}"
+      groups: oo_hosts_to_terminate
+      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+    with_items: groups[cluster_group] | default([])
 
-  vars:
-    libvirt_storage_pool_path: "{{ lookup('env','HOME') }}/libvirt-storage-pool-openshift"
-    libvirt_storage_pool: 'openshift'
-    libvirt_uri: 'qemu:///system'
+  - name: Destroy VMs
+    virt:
+      name: '{{ item[0] }}'
+      command: '{{ item[1] }}'
+      uri: '{{ libvirt_uri }}'
+    with_nested:
+    - groups['oo_hosts_to_terminate']
+    - [ destroy, undefine ]
 
-  tasks:
-    - name: List VMs
-      virt:
-        command: list_vms
-      register: list_vms
+  - name: Delete VMs drives
+    command: 'virsh -c {{ libvirt_uri }} vol-delete --pool {{ libvirt_storage_pool }} {{ item }}.qcow2'
+    args:
+      removes: '{{ libvirt_storage_pool_path }}/{{ item }}.qcow2'
+    with_items: groups['oo_hosts_to_terminate']
 
-    - name: Destroy VMs
-      virt:
-        name: '{{ item[0] }}'
-        command: '{{ item[1] }}'
-        uri: '{{ libvirt_uri }}'
-      with_nested:
-        - '{{ list_vms.list_vms }}'
-        - [ destroy, undefine ]
-      when: item[0]|truncate(cluster_id|length+1, True) == '{{ cluster_id }}-...'
+  - name: Delete the VM cloud-init image
+    file:
+      path: '{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'
+      state: absent
+    with_items: groups['oo_hosts_to_terminate']
 
-    - name: Delete VMs config drive
-      file:
-        path: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/openstack'
-        state: absent
-      with_items: '{{ list_vms.list_vms }}'
-      when: item|truncate(cluster_id|length+1, True) == '{{ cluster_id }}-...'
+  - name: Remove the cloud-init config directory
+    file:
+      path: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/'
+      state: absent
+    with_items: groups['oo_hosts_to_terminate']
 
-    - name: Delete VMs drives
-      command: 'virsh -c {{ libvirt_uri }} vol-delete --pool {{ libvirt_storage_pool }} {{ item[0] }}{{ item[1] }}'
-      args:
-        removes: '{{ libvirt_storage_pool_path }}/{{ item[0] }}{{ item[1] }}'
-      with_nested:
-        - '{{ list_vms.list_vms }}'
-        - [ '_configdrive', '_cloud-init.iso', '.qcow2' ]
-      when: item[0]|truncate(cluster_id|length+1, True) == '{{ cluster_id }}-...'
diff --git a/playbooks/libvirt/openshift-cluster/update.yml b/playbooks/libvirt/openshift-cluster/update.yml
new file mode 100644
index 000000000..57e36db9e
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/update.yml
@@ -0,0 +1,18 @@
+---
+- name: Populate oo_hosts_to_update group
+  hosts: localhost
+  gather_facts: no
+  vars_files:
+  - vars.yml
+  tasks:
+  - name: Evaluate oo_hosts_to_update
+    add_host:
+      name: "{{ item }}"
+      groups: oo_hosts_to_update
+      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+    with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | union(groups["tag_env-host-type-{{ cluster_id }}-openshift-node"]) | default([])
+
+- include: ../../common/openshift-cluster/update_repos_and_packages.yml
+
+- include: config.yml
diff --git a/playbooks/libvirt/openshift-cluster/vars.yml b/playbooks/libvirt/openshift-cluster/vars.yml
index 4e4eecd46..65d954fee 100644
--- a/playbooks/libvirt/openshift-cluster/vars.yml
+++ b/playbooks/libvirt/openshift-cluster/vars.yml
@@ -1,7 +1,33 @@
-# base_image_url: http://download.fedoraproject.org/pub/fedora/linux/releases/21/Cloud/Images/x86_64/Fedora-Cloud-Base-20141203-21.x86_64.qcow2
-# base_image_name: Fedora-Cloud-Base-20141203-21.x86_64.qcow2
-# base_image_sha256: 3a99bb89f33e3d4ee826c8160053cdb8a72c80cd23350b776ce73cd244467d86
+---
+libvirt_storage_pool_path: "{{ lookup('env','HOME') }}/libvirt-storage-pool-openshift-ansible"
+libvirt_storage_pool: 'openshift-ansible'
+libvirt_network: openshift-ansible
+libvirt_uri: 'qemu:///system'
 
-base_image_url: http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2
-base_image_name: CentOS-7-x86_64-GenericCloud.qcow2
-base_image_sha256: e324e3ab1d24a1bbf035ddb365e7f9058c0b454acf48d7aa15c5519fae5998ab
+deployment_vars:
+  origin:
+    image:
+      url: "http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2"
+      name: CentOS-7-x86_64-GenericCloud.qcow2
+      sha256: e324e3ab1d24a1bbf035ddb365e7f9058c0b454acf48d7aa15c5519fae5998ab
+    ssh_user: openshift
+    sudo: yes
+  online:
+    image:
+      url:
+      name:
+      sha256:
+    ssh_user: root
+    sudo: no
+  enterprise:
+    image:
+      url:
+      name:
+      sha256:
+    ssh_user: openshift
+    sudo: yes
+#  origin:
+#    fedora:
+#      url: "http://download.fedoraproject.org/pub/fedora/linux/releases/21/Cloud/Images/x86_64/Fedora-Cloud-Base-20141203-21.x86_64.qcow2"
+#      name: Fedora-Cloud-Base-20141203-21.x86_64.qcow2
+#      sha256: 3a99bb89f33e3d4ee826c8160053cdb8a72c80cd23350b776ce73cd244467d86
-- 
cgit v1.2.3


From 34326ef782bcba8632738a40d3948bb23a3915dc Mon Sep 17 00:00:00 2001
From: Jason DeTiberus <jdetiber@redhat.com>
Date: Wed, 15 Apr 2015 09:40:38 -0400
Subject: fix missed absolute path reference to mktemp

---
 playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

(limited to 'playbooks/libvirt/openshift-cluster')

diff --git a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml
index 1cd83f7be..a320e681e 100644
--- a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml
+++ b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml
@@ -6,7 +6,7 @@
   failed_when: "net_info_result.rc != 0 and 'error: Network not found:' not in net_info_result.stderr"
 
 - name: Create a temp directory for the template xml file
-  command: "/usr/bin/mktemp -d /tmp/openshift-ansible-XXXXXXX"
+  command: "mktemp -d /tmp/openshift-ansible-XXXXXXX"
   register: mktemp
   when: net_info_result.rc == 1
 
-- 
cgit v1.2.3


From 83ed87d41536f7006b3858a65a587263e3fd2b14 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?L=C3=A9na=C3=AFc=20Huard?= <lenaic@lhuard.fr>
Date: Wed, 15 Apr 2015 23:17:30 +0200
Subject: Move `virsh pool-refresh`

The `pool-refresh` command is used to ask libvirt to rescan the content of a volume pool.
This is used to make `libvirt` take into account volumes that were created outside of livirt control
i.e.: not with a `virsh` command.

`pool-refresh` is useless after a `pool-create` as the content is scanned at creation.
`pool-refresh` is mandatory after having created files inside an existing pool.
---
 .../openshift-cluster/tasks/configure_libvirt_storage_pool.yml        | 4 ----
 playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml        | 3 +++
 2 files changed, 3 insertions(+), 4 deletions(-)

(limited to 'playbooks/libvirt/openshift-cluster')

diff --git a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml
index 817acb250..b49879c6a 100644
--- a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml
+++ b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml
@@ -21,7 +21,3 @@
 - name: Create the libvirt storage pool for openshift
   command: 'virsh -c {{ libvirt_uri }} pool-create-as {{ libvirt_storage_pool }} dir --target {{ libvirt_storage_pool_path }}'
   when: pool_info_result.rc == 1
-
-- name: Refresh the libvirt storage pool for openshift
-  command: 'virsh -c {{ libvirt_uri }} pool-refresh {{ libvirt_storage_pool }}'
-  when: pool_info_result.rc == 1
diff --git a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
index 96d440096..359d0b2f3 100644
--- a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
@@ -36,6 +36,9 @@
     creates: '{{ os_libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'
   with_items: instances
 
+- name: Refresh the libvirt storage pool for openshift
+  command: 'virsh -c {{ libvirt_uri }} pool-refresh {{ libvirt_storage_pool }}'
+
 - name: Create VMs drives
   command: 'virsh -c {{ libvirt_uri }} vol-create-as {{ os_libvirt_storage_pool }} {{ item }}.qcow2 10G --format qcow2 --backing-vol {{ image_name }} --backing-vol-format qcow2'
   with_items: instances
-- 
cgit v1.2.3


From b71037de41baf06889b7a875a0e8914f940ecc2a Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?L=C3=A9na=C3=AFc=20Huard?= <lenaic@lhuard.fr>
Date: Wed, 15 Apr 2015 23:37:24 +0200
Subject: Make the error message checks locale proof
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

On a computer which has a locale set, the error messages look like this:

```
$ virsh net-info foo
erreur :impossible de récupérer le réseau « foo »
erreur :Réseau non trouvé : no network with matching name 'foo'
```
```
$ virsh pool-info foo
erreur :impossible de récupérer le pool « foo »
erreur :Pool de stockage introuvable : no storage pool with matching name 'foo'
```

The classical way to make those tests locale proof is to force a given locale.
Like this:
```
$ LANG=POSIX virsh net-info foo
error: failed to get network 'foo'
error: Réseau non trouvé : no network with matching name 'foo'
```
```
$ LANG=POSIX virsh pool-info foo
error: failed to get pool 'foo'
error: Pool de stockage introuvable : no storage pool with matching name 'foo'
```

It looks like the "Network not found" or "Storage pool not found" parts of the message
are generated by the `libvirtd` daemon and are not subject to the locale of the `virsh`
client.

The clean fix consists in patching `libvirt` so that `virsh` sends its locale to the
`libvirtd` daemon.
But in the mean time, it is safer to have our playbook match the part of the message
which is not subject to the daemon locale.
---
 playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml | 2 +-
 .../libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml  | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

(limited to 'playbooks/libvirt/openshift-cluster')

diff --git a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml
index a320e681e..3117d9edc 100644
--- a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml
+++ b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml
@@ -3,7 +3,7 @@
   command: "virsh -c {{ libvirt_uri }} net-info {{ libvirt_network }}"
   register: net_info_result
   changed_when: False
-  failed_when: "net_info_result.rc != 0 and 'error: Network not found:' not in net_info_result.stderr"
+  failed_when: "net_info_result.rc != 0 and 'no network with matching name' not in net_info_result.stderr"
 
 - name: Create a temp directory for the template xml file
   command: "mktemp -d /tmp/openshift-ansible-XXXXXXX"
diff --git a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml
index 817acb250..10715f2b5 100644
--- a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml
+++ b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml
@@ -16,7 +16,7 @@
   command: "virsh -c {{ libvirt_uri }} pool-info {{ libvirt_storage_pool }}"
   register: pool_info_result
   changed_when: False
-  failed_when: "pool_info_result.rc != 0 and 'error: Storage pool not found:' not in pool_info_result.stderr"
+  failed_when: "pool_info_result.rc != 0 and 'no storage pool with matching name' not in pool_info_result.stderr"
 
 - name: Create the libvirt storage pool for openshift
   command: 'virsh -c {{ libvirt_uri }} pool-create-as {{ libvirt_storage_pool }} dir --target {{ libvirt_storage_pool_path }}'
-- 
cgit v1.2.3


From aaee17b0fc8feddf31d4e5b46a1bfe2f8dabf16b Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?L=C3=A9na=C3=AFc=20Huard?= <lenaic@lhuard.fr>
Date: Thu, 16 Apr 2015 00:26:45 +0200
Subject: Fix libvirt metadata used to store ansible tags
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

According to https://libvirt.org/formatdomain.html#elementsMetadata , the `metadata` tag can contain only one top-level element per namespace.
Because of that, libvirt stored only the `deployment-type-{{ deployment_type }}` tag.
As a consequence, the dynamic inventory reported no `env-{{ cluster }}` group.
This is problematic for the `terminate.yml` playbook which iterates over `groups['tag-env-{{ cluster-id }}]`
The symptom is that `oo_hosts_to_terminate` was not defined.
In the end, as Ansible couldn’t iterate on the value of `groups['oo_hosts_to_terminate']`, it iterated on its letters:
```
TASK: [Destroy VMs] ***********************************************************
failed: [localhost] => (item=['g', 'destroy']) => {"failed": true, "item": ["g", "destroy"]}
msg: virtual machine g not found
failed: [localhost] => (item=['g', 'undefine']) => {"failed": true, "item": ["g", "undefine"]}
msg: virtual machine g not found
failed: [localhost] => (item=['r', 'destroy']) => {"failed": true, "item": ["r", "destroy"]}
msg: virtual machine r not found
failed: [localhost] => (item=['r', 'undefine']) => {"failed": true, "item": ["r", "undefine"]}
msg: virtual machine r not found
failed: [localhost] => (item=['o', 'destroy']) => {"failed": true, "item": ["o", "destroy"]}
msg: virtual machine o not found
failed: [localhost] => (item=['o', 'undefine']) => {"failed": true, "item": ["o", "undefine"]}
msg: virtual machine o not found
failed: [localhost] => (item=['u', 'destroy']) => {"failed": true, "item": ["u", "destroy"]}
msg: virtual machine u not found
failed: [localhost] => (item=['u', 'undefine']) => {"failed": true, "item": ["u", "undefine"]}
msg: virtual machine u not found
failed: [localhost] => (item=['p', 'destroy']) => {"failed": true, "item": ["p", "destroy"]}
msg: virtual machine p not found
failed: [localhost] => (item=['p', 'undefine']) => {"failed": true, "item": ["p", "undefine"]}
msg: virtual machine p not found
failed: [localhost] => (item=['s', 'destroy']) => {"failed": true, "item": ["s", "destroy"]}
msg: virtual machine s not found
failed: [localhost] => (item=['s', 'undefine']) => {"failed": true, "item": ["s", "undefine"]}
msg: virtual machine s not found
failed: [localhost] => (item=['[', 'destroy']) => {"failed": true, "item": ["[", "destroy"]}
msg: virtual machine [ not found
failed: [localhost] => (item=['[', 'undefine']) => {"failed": true, "item": ["[", "undefine"]}
msg: virtual machine [ not found
failed: [localhost] => (item=["'", 'destroy']) => {"failed": true, "item": ["'", "destroy"]}
msg: virtual machine ' not found
failed: [localhost] => (item=["'", 'undefine']) => {"failed": true, "item": ["'", "undefine"]}
msg: virtual machine ' not found
failed: [localhost] => (item=['o', 'destroy']) => {"failed": true, "item": ["o", "destroy"]}
msg: virtual machine o not found
failed: [localhost] => (item=['o', 'undefine']) => {"failed": true, "item": ["o", "undefine"]}
msg: virtual machine o not found
etc…
```
---
 playbooks/libvirt/openshift-cluster/templates/domain.xml | 10 ++++++----
 1 file changed, 6 insertions(+), 4 deletions(-)

(limited to 'playbooks/libvirt/openshift-cluster')

diff --git a/playbooks/libvirt/openshift-cluster/templates/domain.xml b/playbooks/libvirt/openshift-cluster/templates/domain.xml
index 8cb017367..7656249da 100644
--- a/playbooks/libvirt/openshift-cluster/templates/domain.xml
+++ b/playbooks/libvirt/openshift-cluster/templates/domain.xml
@@ -2,10 +2,12 @@
   <name>{{ item }}</name>
   <memory unit='GiB'>1</memory>
   <metadata xmlns:ansible="https://github.com/ansible/ansible">
-    <ansible:tag>deployment-type-{{ deployment_type }}</ansible:tag>
-    <ansible:tag>env-{{ cluster }}</ansible:tag>
-    <ansible:tag>env-host-type-{{ cluster }}-openshift-{{ type }}</ansible:tag>
-    <ansible:tag>host-type-{{ type }}</ansible:tag>
+    <ansible:tags>
+      <ansible:tag>deployment-type-{{ deployment_type }}</ansible:tag>
+      <ansible:tag>env-{{ cluster }}</ansible:tag>
+      <ansible:tag>env-host-type-{{ cluster }}-openshift-{{ type }}</ansible:tag>
+      <ansible:tag>host-type-{{ type }}</ansible:tag>
+    </ansible:tags>
   </metadata>
   <currentMemory unit='GiB'>1</currentMemory>
   <vcpu placement='static'>2</vcpu>
-- 
cgit v1.2.3


From 0ecefd20d06e67823cb033d4ac7ec4b57e613af6 Mon Sep 17 00:00:00 2001
From: Jason DeTiberus <jdetiber@redhat.com>
Date: Mon, 20 Apr 2015 23:45:15 -0400
Subject: Remove deployment-type tags

---
 playbooks/libvirt/openshift-cluster/templates/domain.xml | 1 -
 1 file changed, 1 deletion(-)

(limited to 'playbooks/libvirt/openshift-cluster')

diff --git a/playbooks/libvirt/openshift-cluster/templates/domain.xml b/playbooks/libvirt/openshift-cluster/templates/domain.xml
index 7656249da..df200e374 100644
--- a/playbooks/libvirt/openshift-cluster/templates/domain.xml
+++ b/playbooks/libvirt/openshift-cluster/templates/domain.xml
@@ -3,7 +3,6 @@
   <memory unit='GiB'>1</memory>
   <metadata xmlns:ansible="https://github.com/ansible/ansible">
     <ansible:tags>
-      <ansible:tag>deployment-type-{{ deployment_type }}</ansible:tag>
       <ansible:tag>env-{{ cluster }}</ansible:tag>
       <ansible:tag>env-host-type-{{ cluster }}-openshift-{{ type }}</ansible:tag>
       <ansible:tag>host-type-{{ type }}</ansible:tag>
-- 
cgit v1.2.3


From 378e8a8c9d7e7be7f52691e957f07096ee0b2c82 Mon Sep 17 00:00:00 2001
From: Jason DeTiberus <jdetiber@redhat.com>
Date: Thu, 16 Apr 2015 01:49:29 -0400
Subject: lvm-direct support for aws

- Create a separate docker volume in aws openshift-cluster playbooks
  - default to using ephemeral storage, but allow to be overriden
  - allow root volume settingsto be overriden as well

- add user-data cloud-config to bootstrap the installation/configuration of
  docker-storage-setup

- pylint cleanup for oo_filters.py

- remove left over traces to the deployment_type tags which were previously
  removed
  - oo_get_deployment_type_from_groups filter in oo_filters.py
  - cluster list playbooks references to oo_get_deployment_type_from_groups
    filter
---
 playbooks/libvirt/openshift-cluster/list.yml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

(limited to 'playbooks/libvirt/openshift-cluster')

diff --git a/playbooks/libvirt/openshift-cluster/list.yml b/playbooks/libvirt/openshift-cluster/list.yml
index 25a25f791..eaedc4d0d 100644
--- a/playbooks/libvirt/openshift-cluster/list.yml
+++ b/playbooks/libvirt/openshift-cluster/list.yml
@@ -20,4 +20,4 @@
   hosts: oo_list_hosts
   tasks:
   - debug:
-      msg: 'public:{{ansible_default_ipv4.address}} private:{{ansible_default_ipv4.address}} deployment-type: {{ hostvars[inventory_hostname].group_names | oo_get_deployment_type_from_groups }}'
+      msg: 'public:{{ansible_default_ipv4.address}} private:{{ansible_default_ipv4.address}}'
-- 
cgit v1.2.3