blob: 1116a8178a6ca49f262f361042fd66516492b4d9 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
|
---
- name: Gather and set facts for node hosts
hosts: oo_nodes_to_config
vars:
t_oo_option_node_debug_level: "{{ lookup('oo_option', 'openshift_node_debug_level') }}"
pre_tasks:
- set_fact:
openshift_node_debug_level: "{{ t_oo_option_node_debug_level }}"
when: openshift_node_debug_level is not defined and t_oo_option_node_debug_level != ""
roles:
- openshift_facts
tasks:
# Since the master is generating the node certificates before they are
# configured, we need to make sure to set the node properties beforehand if
# we do not want the defaults
- openshift_facts:
role: node
local_facts:
labels: "{{ openshift_node_labels | default(None) }}"
annotations: "{{ openshift_node_annotations | default(None) }}"
schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}"
- name: Check status of node certificates
stat:
path: "{{ openshift.common.config_base }}/node/{{ item }}"
with_items:
- "system:node:{{ openshift.common.hostname }}.crt"
- "system:node:{{ openshift.common.hostname }}.key"
- "system:node:{{ openshift.common.hostname }}.kubeconfig"
- ca.crt
- server.key
- server.crt
register: stat_result
- set_fact:
certs_missing: "{{ stat_result.results | oo_collect(attribute='stat.exists')
| list | intersect([false])}}"
node_subdir: node-{{ openshift.common.hostname }}
config_dir: "{{ openshift.common.config_base }}/generated-configs/node-{{ openshift.common.hostname }}"
node_cert_dir: "{{ openshift.common.config_base }}/node"
- name: Create temp directory for syncing certs
hosts: localhost
connection: local
become: no
gather_facts: no
tasks:
- name: Create local temp directory for syncing certs
local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
register: mktemp
changed_when: False
- name: Create node certificates
hosts: oo_first_master
vars:
nodes_needing_certs: "{{ hostvars
| oo_select_keys(groups['oo_nodes_to_config']
| default([]))
| oo_filter_list(filter_attr='certs_missing') }}"
sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
roles:
- openshift_node_certificates
post_tasks:
- name: Create a tarball of the node config directories
command: >
tar -czvf {{ item.config_dir }}.tgz
--transform 's|system:{{ item.node_subdir }}|node|'
-C {{ item.config_dir }} .
args:
creates: "{{ item.config_dir }}.tgz"
with_items: nodes_needing_certs
- name: Retrieve the node config tarballs from the master
fetch:
src: "{{ item.config_dir }}.tgz"
dest: "{{ sync_tmpdir }}/"
flat: yes
fail_on_missing: yes
validate_checksum: yes
with_items: nodes_needing_certs
- name: Deploy node certificates
hosts: oo_nodes_to_config
vars:
sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
tasks:
- name: Ensure certificate directory exists
file:
path: "{{ node_cert_dir }}"
state: directory
# TODO: notify restart node
# possibly test service started time against certificate/config file
# timestamps in node to trigger notify
- name: Unarchive the tarball on the node
unarchive:
src: "{{ sync_tmpdir }}/{{ node_subdir }}.tgz"
dest: "{{ node_cert_dir }}"
when: certs_missing
- name: Evaluate node groups
hosts: localhost
become: no
connection: local
tasks:
- name: Evaluate oo_containerized_master_nodes
add_host:
name: "{{ item }}"
groups: oo_containerized_master_nodes
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_sudo: "{{ g_sudo | default(omit) }}"
with_items: "{{ groups.oo_nodes_to_config | default([]) }}"
when: hostvars[item].openshift.common.is_containerized | bool and (item in groups.oo_nodes_to_config and item in groups.oo_masters_to_config)
- name: Configure node instances
hosts: oo_containerized_master_nodes
serial: 1
vars:
openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}"
# TODO: configure these based on
# hostvars[groups.oo_first_master.0].openshift.hosted.registry instead of
# hardcoding
openshift_docker_hosted_registry_insecure: True
openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.master.portal_net }}"
roles:
- openshift_node
- name: Configure node instances
hosts: oo_nodes_to_config:!oo_containerized_master_nodes
vars:
openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}"
# TODO: configure these based on
# hostvars[groups.oo_first_master.0].openshift.hosted.registry instead of
# hardcoding
openshift_docker_hosted_registry_insecure: True
openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.master.portal_net }}"
roles:
- openshift_node
- name: Gather and set facts for flannel certificatess
hosts: oo_nodes_to_config
tasks:
- name: Check status of flannel external etcd certificates
stat:
path: "{{ openshift.common.config_base }}/node/{{ item }}"
with_items:
- node.etcd-client.crt
- node.etcd-ca.crt
register: g_external_etcd_flannel_cert_stat_result
when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config and (openshift.common.use_flannel | bool)
- set_fact:
etcd_client_flannel_certs_missing: "{{ g_external_etcd_flannel_cert_stat_result.results
| oo_collect(attribute='stat.exists')
| list | intersect([false])}}"
etcd_cert_subdir: openshift-node-{{ openshift.common.hostname }}
etcd_cert_config_dir: "{{ openshift.common.config_base }}/node"
etcd_cert_prefix: node.etcd-
when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config and (openshift.common.use_flannel | bool)
- name: Configure flannel etcd certificates
hosts: oo_first_etcd
vars:
etcd_generated_certs_dir: /etc/etcd/generated_certs
sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
pre_tasks:
- set_fact:
etcd_needing_client_certs: "{{ hostvars
| oo_select_keys(groups['oo_nodes_to_config'])
| oo_filter_list(filter_attr='etcd_client_flannel_certs_missing') | default([]) }}"
when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing
roles:
- role: etcd_certificates
when: openshift_use_flannel | default(false) | bool
post_tasks:
- name: Create a tarball of the etcd flannel certs
command: >
tar -czvf {{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz
-C {{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }} .
args:
creates: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz"
with_items: etcd_needing_client_certs
when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing
- name: Retrieve the etcd cert tarballs
fetch:
src: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz"
dest: "{{ sync_tmpdir }}/"
flat: yes
fail_on_missing: yes
validate_checksum: yes
with_items: etcd_needing_client_certs
when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing
- name: Copy the external etcd flannel certs to the nodes
hosts: oo_nodes_to_config
vars:
sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
tasks:
- name: Ensure certificate directory exists
file:
path: "{{ openshift.common.config_base }}/node"
state: directory
when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing
- name: Unarchive the tarball on the master
unarchive:
src: "{{ sync_tmpdir }}/{{ etcd_cert_subdir }}.tgz"
dest: "{{ etcd_cert_config_dir }}"
when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing
- file:
path: "{{ etcd_cert_config_dir }}/{{ item }}"
owner: root
group: root
mode: 0600
with_items:
- node.etcd-client.crt
- node.etcd-client.key
- node.etcd-ca.crt
when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing
- name: Additional node config
hosts: oo_nodes_to_config
vars:
# TODO: Prefix flannel role variables.
openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
etcd_urls: "{{ hostvars[groups.oo_first_master.0].openshift.master.etcd_urls }}"
embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}"
roles:
- role: flannel
when: openshift.common.use_flannel | bool
- role: nuage_node
when: openshift.common.use_nuage | bool
- role: nickhammond.logrotate
tasks:
- name: Create group for deployment type
group_by: key=oo_nodes_deployment_type_{{ openshift.common.deployment_type }}
changed_when: False
- name: Delete temporary directory on localhost
hosts: localhost
connection: local
become: no
gather_facts: no
tasks:
- file: name={{ mktemp.stdout }} state=absent
changed_when: False
# Additional config for online type deployments
- name: Additional instance config
hosts: oo_nodes_deployment_type_online
gather_facts: no
roles:
- os_env_extras
- os_env_extras_node
- name: Set schedulability
hosts: oo_first_master
vars:
openshift_nodes: "{{ hostvars
| oo_select_keys(groups['oo_nodes_to_config'])
| oo_collect('openshift.common.hostname') }}"
openshift_node_vars: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']) }}"
pre_tasks:
# Necessary because when you're on a node that's also a master the master will be
# restarted after the node restarts docker and it will take up to 60 seconds for
# systemd to start the master again
- name: Wait for master API to become available before proceeding
# Using curl here since the uri module requires python-httplib2 and
# wait_for port doesn't provide health information.
command: >
curl -k --silent {{ openshift.master.api_url }}/healthz/ready
register: api_available_output
until: api_available_output.stdout == 'ok'
retries: 120
delay: 1
changed_when: false
when: openshift.common.is_containerized | bool
roles:
- openshift_manage_node
|