summaryrefslogtreecommitdiffstats
path: root/roles/openshift_logging
diff options
context:
space:
mode:
authorScott Dodson <sdodson@redhat.com>2017-01-17 22:37:11 -0500
committerGitHub <noreply@github.com>2017-01-17 22:37:11 -0500
commit7b512bf5fc36ee9ad2df65d8e129aa52c939d98e (patch)
tree0d31786e7a29af19b2bfbabbe49b2ed72c862a54 /roles/openshift_logging
parenta2d9da8c95511c968a6b7d4c1247f017df14a5ce (diff)
parent598b2652ac9bfe94622cbe6324d4f121bf996c70 (diff)
downloadopenshift-7b512bf5fc36ee9ad2df65d8e129aa52c939d98e.tar.gz
openshift-7b512bf5fc36ee9ad2df65d8e129aa52c939d98e.tar.bz2
openshift-7b512bf5fc36ee9ad2df65d8e129aa52c939d98e.tar.xz
openshift-7b512bf5fc36ee9ad2df65d8e129aa52c939d98e.zip
Merge pull request #2640 from ewolinetz/logging_deployer_tasks
Logging deployer tasks
Diffstat (limited to 'roles/openshift_logging')
-rw-r--r--roles/openshift_logging/README.md88
-rw-r--r--roles/openshift_logging/defaults/main.yml85
-rw-r--r--roles/openshift_logging/files/curator.yml18
-rw-r--r--roles/openshift_logging/files/elasticsearch-logging.yml72
-rw-r--r--roles/openshift_logging/files/es_migration.sh79
-rw-r--r--roles/openshift_logging/files/fluent.conf34
-rw-r--r--roles/openshift_logging/files/fluentd-throttle-config.yaml7
-rw-r--r--roles/openshift_logging/files/generate-jks.sh168
-rw-r--r--roles/openshift_logging/files/logging-deployer-sa.yaml6
-rw-r--r--roles/openshift_logging/files/secure-forward.conf24
-rw-r--r--roles/openshift_logging/files/server-tls.json5
-rw-r--r--roles/openshift_logging/filter_plugins/openshift_logging.py38
-rw-r--r--roles/openshift_logging/library/openshift_logging_facts.py340
-rw-r--r--roles/openshift_logging/meta/main.yaml15
-rw-r--r--roles/openshift_logging/tasks/delete_logging.yaml114
-rw-r--r--roles/openshift_logging/tasks/generate_certs.yaml217
-rw-r--r--roles/openshift_logging/tasks/generate_clusterrolebindings.yaml13
-rw-r--r--roles/openshift_logging/tasks/generate_clusterroles.yaml11
-rw-r--r--roles/openshift_logging/tasks/generate_configmaps.yaml117
-rw-r--r--roles/openshift_logging/tasks/generate_deploymentconfigs.yaml65
-rw-r--r--roles/openshift_logging/tasks/generate_pems.yaml36
-rw-r--r--roles/openshift_logging/tasks/generate_pvcs.yaml49
-rw-r--r--roles/openshift_logging/tasks/generate_rolebindings.yaml12
-rw-r--r--roles/openshift_logging/tasks/generate_routes.yaml21
-rw-r--r--roles/openshift_logging/tasks/generate_secrets.yaml77
-rw-r--r--roles/openshift_logging/tasks/generate_serviceaccounts.yaml14
-rw-r--r--roles/openshift_logging/tasks/generate_services.yaml87
-rw-r--r--roles/openshift_logging/tasks/install_curator.yaml51
-rw-r--r--roles/openshift_logging/tasks/install_elasticsearch.yaml107
-rw-r--r--roles/openshift_logging/tasks/install_fluentd.yaml54
-rw-r--r--roles/openshift_logging/tasks/install_kibana.yaml58
-rw-r--r--roles/openshift_logging/tasks/install_logging.yaml49
-rw-r--r--roles/openshift_logging/tasks/install_support.yaml54
-rw-r--r--roles/openshift_logging/tasks/label_node.yaml29
-rw-r--r--roles/openshift_logging/tasks/main.yaml40
-rw-r--r--roles/openshift_logging/tasks/oc_apply.yaml29
-rw-r--r--roles/openshift_logging/tasks/procure_server_certs.yaml52
-rw-r--r--roles/openshift_logging/tasks/scale.yaml28
-rw-r--r--roles/openshift_logging/tasks/start_cluster.yaml104
-rw-r--r--roles/openshift_logging/tasks/stop_cluster.yaml97
-rw-r--r--roles/openshift_logging/tasks/upgrade_logging.yaml41
-rw-r--r--roles/openshift_logging/templates/clusterrole.j221
-rw-r--r--roles/openshift_logging/templates/clusterrolebinding.j224
-rw-r--r--roles/openshift_logging/templates/curator.j297
-rw-r--r--roles/openshift_logging/templates/elasticsearch.yml.j275
-rw-r--r--roles/openshift_logging/templates/es.j2105
-rw-r--r--roles/openshift_logging/templates/fluentd.j2149
-rw-r--r--roles/openshift_logging/templates/jks_pod.j228
-rw-r--r--roles/openshift_logging/templates/kibana.j2110
-rw-r--r--roles/openshift_logging/templates/oauth-client.j215
-rw-r--r--roles/openshift_logging/templates/pvc.j227
-rw-r--r--roles/openshift_logging/templates/rolebinding.j214
-rw-r--r--roles/openshift_logging/templates/route_reencrypt.j225
-rw-r--r--roles/openshift_logging/templates/secret.j29
-rw-r--r--roles/openshift_logging/templates/service.j228
-rw-r--r--roles/openshift_logging/templates/serviceaccount.j216
-rw-r--r--roles/openshift_logging/templates/signing.conf.j2103
-rw-r--r--roles/openshift_logging/vars/main.yaml8
58 files changed, 3459 insertions, 0 deletions
diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md
new file mode 100644
index 000000000..2cc2c48ee
--- /dev/null
+++ b/roles/openshift_logging/README.md
@@ -0,0 +1,88 @@
+## openshift_logging Role
+
+### Please note this role is still a work in progress
+
+This role is used for installing the Aggregated Logging stack. It should be run against
+a single host, it will create any missing certificates and API objects that the current
+[logging deployer](https://github.com/openshift/origin-aggregated-logging/tree/master/deployer) does.
+
+As part of the installation, it is recommended that you add the Fluentd node selector label
+to the list of persisted [node labels](https://docs.openshift.org/latest/install_config/install/advanced_install.html#configuring-node-host-labels).
+
+###Required vars:
+
+- `openshift_logging_install_logging`: When `True` the `openshift_logging` role will install Aggregated Logging.
+- `openshift_logging_upgrade_logging`: When `True` the `openshift_logging` role will upgrade Aggregated Logging.
+
+When both `openshift_logging_install_logging` and `openshift_logging_upgrade_logging` are `False` the `openshift_logging` role will uninstall Aggregated Logging.
+
+###Optional vars:
+
+- `openshift_logging_image_prefix`: The prefix for the logging images to use. Defaults to 'docker.io/openshift/origin-'.
+- `openshift_logging_image_version`: The image version for the logging images to use. Defaults to 'latest'.
+- `openshift_logging_use_ops`: If 'True', set up a second ES and Kibana cluster for infrastructure logs. Defaults to 'False'.
+- `master_url`: The URL for the Kubernetes master, this does not need to be public facing but should be accessible from within the cluster. Defaults to 'https://kubernetes.default.svc.cluster.local'.
+- `public_master_url`: The public facing URL for the Kubernetes master, this is used for Authentication redirection. Defaults to 'https://localhost:8443'.
+- `openshift_logging_namespace`: The namespace that Aggregated Logging will be installed in. Defaults to 'logging'.
+- `openshift_logging_curator_default_days`: The default minimum age (in days) Curator uses for deleting log records. Defaults to '30'.
+- `openshift_logging_curator_run_hour`: The hour of the day that Curator will run at. Defaults to '0'.
+- `openshift_logging_curator_run_minute`: The minute of the hour that Curator will run at. Defaults to '0'.
+- `openshift_logging_curator_run_timezone`: The timezone that Curator uses for figuring out its run time. Defaults to 'UTC'.
+- `openshift_logging_curator_script_log_level`: The script log level for Curator. Defaults to 'INFO'.
+- `openshift_logging_curator_log_level`: The log level for the Curator process. Defaults to 'ERROR'.
+- `openshift_logging_curator_cpu_limit`: The amount of CPU to allocate to Curator. Default is '100m'.
+- `openshift_logging_curator_memory_limit`: The amount of memory to allocate to Curator. Unset if not specified.
+
+- `openshift_logging_kibana_hostname`: The Kibana hostname. Defaults to 'kibana.example.com'.
+- `openshift_logging_kibana_cpu_limit`: The amount of CPU to allocate to Kibana or unset if not specified.
+- `openshift_logging_kibana_memory_limit`: The amount of memory to allocate to Kibana or unset if not specified.
+- `openshift_logging_kibana_proxy_debug`: When "True", set the Kibana Proxy log level to DEBUG. Defaults to 'false'.
+- `openshift_logging_kibana_proxy_cpu_limit`: The amount of CPU to allocate to Kibana proxy or unset if not specified.
+- `openshift_logging_kibana_proxy_memory_limit`: The amount of memory to allocate to Kibana proxy or unset if not specified.
+- `openshift_logging_kibana_replica_count`: The number of replicas Kibana should be scaled up to. Defaults to 1.
+
+- `openshift_logging_fluentd_nodeselector`: The node selector that the Fluentd daemonset uses to determine where to deploy to. Defaults to '"logging-infra-fluentd": "true"'.
+- `openshift_logging_fluentd_cpu_limit`: The CPU limit for Fluentd pods. Defaults to '100m'.
+- `openshift_logging_fluentd_memory_limit`: The memory limit for Fluentd pods. Defaults to '512Mi'.
+- `openshift_logging_fluentd_es_copy`: Whether or not to use the ES_COPY feature for Fluentd (DEPRECATED). Defaults to 'False'.
+- `openshift_logging_fluentd_use_journal`: Whether or not Fluentd should read log entries from Journal. Defaults to 'False'. NOTE: Fluentd will attempt to detect whether or not Docker is using the journald log driver and may overwrite this value.
+- `openshift_logging_fluentd_journal_read_from_head`: Whether or not Fluentd will try to read from the head of Journal when first starting up, using this may cause a delay in ES receiving current log records. Defaults to 'False'.
+- `openshift_logging_fluentd_hosts`: List of nodes that should be labeled for Fluentd to be deployed to. Defaults to ['--all'].
+
+- `openshift_logging_es_host`: The name of the ES service Fluentd should send logs to. Defaults to 'logging-es'.
+- `openshift_logging_es_port`: The port for the ES service Fluentd should sent its logs to. Defaults to '9200'.
+- `openshift_logging_es_ca`: The location of the ca Fluentd uses to communicate with its openshift_logging_es_host. Defaults to '/etc/fluent/keys/ca'.
+- `openshift_logging_es_client_cert`: The location of the client certificate Fluentd uses for openshift_logging_es_host. Defaults to '/etc/fluent/keys/cert'.
+- `openshift_logging_es_client_key`: The location of the client key Fluentd uses for openshift_logging_es_host. Defaults to '/etc/fluent/keys/key'.
+
+- `openshift_logging_es_cluster_size`: The number of ES cluster members. Defaults to '1'.
+- `openshift_logging_es_cpu_limit`: The amount of CPU limit for the ES cluster. Unused if not set
+- `openshift_logging_es_memory_limit`: The amount of RAM that should be assigned to ES. Defaults to '1024Mi'.
+- `openshift_logging_es_pv_selector`: A key/value map added to a PVC in order to select specific PVs. Defaults to 'None'.
+- `openshift_logging_es_pvc_dynamic`: Whether or not to add the dynamic PVC annotation for any generated PVCs. Defaults to 'False'.
+- `openshift_logging_es_pvc_size`: The requested size for the ES PVCs, when not provided the role will not generate any PVCs. Defaults to '""'.
+- `openshift_logging_es_pvc_prefix`: The prefix for the generated PVCs. Defaults to 'logging-es'.
+- `openshift_logging_es_recover_after_time`: The amount of time ES will wait before it tries to recover. Defaults to '5m'.
+- `openshift_logging_es_storage_group`: The storage group used for ES. Defaults to '65534'.
+
+When `openshift_logging_use_ops` is `True`, there are some additional vars. These work the
+same as above for their non-ops counterparts, but apply to the OPS cluster instance:
+- `openshift_logging_es_ops_host`: logging-es-ops
+- `openshift_logging_es_ops_port`: 9200
+- `openshift_logging_es_ops_ca`: /etc/fluent/keys/ca
+- `openshift_logging_es_ops_client_cert`: /etc/fluent/keys/cert
+- `openshift_logging_es_ops_client_key`: /etc/fluent/keys/key
+- `openshift_logging_es_ops_cluster_size`: 1
+- `openshift_logging_es_ops_cpu_limit`: The amount of CPU limit for the ES cluster. Unused if not set
+- `openshift_logging_es_ops_memory_limit`: 1024Mi
+- `openshift_logging_es_ops_pvc_dynamic`: False
+- `openshift_logging_es_ops_pvc_size`: ""
+- `openshift_logging_es_ops_pvc_prefix`: logging-es-ops
+- `openshift_logging_es_ops_recover_after_time`: 5m
+- `openshift_logging_es_ops_storage_group`: 65534
+- `openshift_logging_kibana_ops_hostname`: The Operations Kibana hostname. Defaults to 'kibana-ops.example.com'.
+- `openshift_logging_kibana_ops_cpu_limit`: The amount of CPU to allocate to Kibana or unset if not specified.
+- `openshift_logging_kibana_ops_memory_limit`: The amount of memory to allocate to Kibana or unset if not specified.
+- `openshift_logging_kibana_ops_proxy_cpu_limit`: The amount of CPU to allocate to Kibana proxy or unset if not specified.
+- `openshift_logging_kibana_ops_proxy_memory_limit`: The amount of memory to allocate to Kibana proxy or unset if not specified.
+- `openshift_logging_kibana_ops_replica_count`: The number of replicas Kibana ops should be scaled up to. Defaults to 1.
diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml
new file mode 100644
index 000000000..919c53787
--- /dev/null
+++ b/roles/openshift_logging/defaults/main.yml
@@ -0,0 +1,85 @@
+---
+openshift_logging_image_prefix: docker.io/openshift/origin-
+openshift_logging_image_version: latest
+openshift_logging_use_ops: False
+master_url: "https://kubernetes.default.svc.{{ openshift.common.dns_domain }}"
+public_master_url: "https://{{openshift.common.public_hostname}}:8443"
+openshift_logging_namespace: logging
+openshift_logging_install_logging: True
+
+openshift_logging_curator_default_days: 30
+openshift_logging_curator_run_hour: 0
+openshift_logging_curator_run_minute: 0
+openshift_logging_curator_run_timezone: UTC
+openshift_logging_curator_script_log_level: INFO
+openshift_logging_curator_log_level: ERROR
+openshift_logging_curator_cpu_limit: 100m
+openshift_logging_curator_memory_limit: null
+
+openshift_logging_curator_ops_cpu_limit: 100m
+openshift_logging_curator_ops_memory_limit: null
+
+openshift_logging_kibana_hostname: "kibana.{{openshift.common.dns_domain}}"
+openshift_logging_kibana_cpu_limit: null
+openshift_logging_kibana_memory_limit: null
+openshift_logging_kibana_proxy_debug: false
+openshift_logging_kibana_proxy_cpu_limit: null
+openshift_logging_kibana_proxy_memory_limit: null
+openshift_logging_kibana_replica_count: 1
+
+openshift_logging_kibana_ops_hostname: "kibana-ops.{{openshift.common.dns_domain}}"
+openshift_logging_kibana_ops_cpu_limit: null
+openshift_logging_kibana_ops_memory_limit: null
+openshift_logging_kibana_ops_proxy_debug: false
+openshift_logging_kibana_ops_proxy_cpu_limit: null
+openshift_logging_kibana_ops_proxy_memory_limit: null
+openshift_logging_kibana_ops_replica_count: 1
+
+openshift_logging_fluentd_nodeselector: {'logging-infra-fluentd': 'true'}
+openshift_logging_fluentd_cpu_limit: 100m
+openshift_logging_fluentd_memory_limit: 512Mi
+openshift_logging_fluentd_es_copy: false
+openshift_logging_fluentd_use_journal: false
+openshift_logging_fluentd_journal_read_from_head: false
+openshift_logging_fluentd_hosts: ['--all']
+
+openshift_logging_es_host: logging-es
+openshift_logging_es_port: 9200
+openshift_logging_es_ca: /etc/fluent/keys/ca
+openshift_logging_es_client_cert: /etc/fluent/keys/cert
+openshift_logging_es_client_key: /etc/fluent/keys/key
+openshift_logging_es_cluster_size: 1
+openshift_logging_es_cpu_limit: null
+openshift_logging_es_memory_limit: 1024Mi
+openshift_logging_es_pv_selector: null
+openshift_logging_es_pvc_dynamic: False
+openshift_logging_es_pvc_size: ""
+openshift_logging_es_pvc_prefix: logging-es
+openshift_logging_es_recover_after_time: 5m
+openshift_logging_es_storage_group: 65534
+
+# allow cluster-admin or cluster-reader to view operations index
+openshift_logging_es_ops_allow_cluster_reader: False
+
+openshift_logging_es_ops_host: logging-es-ops
+openshift_logging_es_ops_port: 9200
+openshift_logging_es_ops_ca: /etc/fluent/keys/ca
+openshift_logging_es_ops_client_cert: /etc/fluent/keys/cert
+openshift_logging_es_ops_client_key: /etc/fluent/keys/key
+openshift_logging_es_ops_cluster_size: 1
+openshift_logging_es_ops_cpu_limit: null
+openshift_logging_es_ops_memory_limit: 1024Mi
+openshift_logging_es_ops_pv_selector: None
+openshift_logging_es_ops_pvc_dynamic: False
+openshift_logging_es_ops_pvc_size: ""
+openshift_logging_es_ops_pvc_prefix: logging-es-ops
+openshift_logging_es_ops_recover_after_time: 5m
+openshift_logging_es_ops_storage_group: 65534
+
+# following can be uncommented to provide values for configmaps -- take care when providing file contents as it may cause your cluster to not operate correctly
+#es_logging_contents:
+#es_config_contents:
+#curator_config_contents:
+#fluentd_config_contents:
+#fluentd_throttle_contents:
+#fluentd_secureforward_contents:
diff --git a/roles/openshift_logging/files/curator.yml b/roles/openshift_logging/files/curator.yml
new file mode 100644
index 000000000..8d62d8e7d
--- /dev/null
+++ b/roles/openshift_logging/files/curator.yml
@@ -0,0 +1,18 @@
+# Logging example curator config file
+
+# uncomment and use this to override the defaults from env vars
+#.defaults:
+# delete:
+# days: 30
+# runhour: 0
+# runminute: 0
+
+# to keep ops logs for a different duration:
+#.operations:
+# delete:
+# weeks: 8
+
+# example for a normal project
+#myapp:
+# delete:
+# weeks: 1
diff --git a/roles/openshift_logging/files/elasticsearch-logging.yml b/roles/openshift_logging/files/elasticsearch-logging.yml
new file mode 100644
index 000000000..377abe21f
--- /dev/null
+++ b/roles/openshift_logging/files/elasticsearch-logging.yml
@@ -0,0 +1,72 @@
+# you can override this using by setting a system property, for example -Des.logger.level=DEBUG
+es.logger.level: INFO
+rootLogger: ${es.logger.level}, console, file
+logger:
+ # log action execution errors for easier debugging
+ action: WARN
+ # reduce the logging for aws, too much is logged under the default INFO
+ com.amazonaws: WARN
+ io.fabric8.elasticsearch: ${PLUGIN_LOGLEVEL}
+ io.fabric8.kubernetes: ${PLUGIN_LOGLEVEL}
+
+ # gateway
+ #gateway: DEBUG
+ #index.gateway: DEBUG
+
+ # peer shard recovery
+ #indices.recovery: DEBUG
+
+ # discovery
+ #discovery: TRACE
+
+ index.search.slowlog: TRACE, index_search_slow_log_file
+ index.indexing.slowlog: TRACE, index_indexing_slow_log_file
+
+ # search-guard
+ com.floragunn.searchguard: WARN
+
+additivity:
+ index.search.slowlog: false
+ index.indexing.slowlog: false
+
+appender:
+ console:
+ type: console
+ layout:
+ type: consolePattern
+ conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
+
+ file:
+ type: dailyRollingFile
+ file: ${path.logs}/${cluster.name}.log
+ datePattern: "'.'yyyy-MM-dd"
+ layout:
+ type: pattern
+ conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
+
+ # Use the following log4j-extras RollingFileAppender to enable gzip compression of log files.
+ # For more information see https://logging.apache.org/log4j/extras/apidocs/org/apache/log4j/rolling/RollingFileAppender.html
+ #file:
+ #type: extrasRollingFile
+ #file: ${path.logs}/${cluster.name}.log
+ #rollingPolicy: timeBased
+ #rollingPolicy.FileNamePattern: ${path.logs}/${cluster.name}.log.%d{yyyy-MM-dd}.gz
+ #layout:
+ #type: pattern
+ #conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
+
+ index_search_slow_log_file:
+ type: dailyRollingFile
+ file: ${path.logs}/${cluster.name}_index_search_slowlog.log
+ datePattern: "'.'yyyy-MM-dd"
+ layout:
+ type: pattern
+ conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
+
+ index_indexing_slow_log_file:
+ type: dailyRollingFile
+ file: ${path.logs}/${cluster.name}_index_indexing_slowlog.log
+ datePattern: "'.'yyyy-MM-dd"
+ layout:
+ type: pattern
+ conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
diff --git a/roles/openshift_logging/files/es_migration.sh b/roles/openshift_logging/files/es_migration.sh
new file mode 100644
index 000000000..339b5a1b2
--- /dev/null
+++ b/roles/openshift_logging/files/es_migration.sh
@@ -0,0 +1,79 @@
+CA=${1:-/etc/openshift/logging/ca.crt}
+KEY=${2:-/etc/openshift/logging/system.admin.key}
+CERT=${3:-/etc/openshift/logging/system.admin.crt}
+openshift_logging_es_host=${4:-logging-es}
+openshift_logging_es_port=${5:-9200}
+namespace=${6:-logging}
+
+# for each index in _cat/indices
+# skip indices that begin with . - .kibana, .operations, etc.
+# skip indices that contain a uuid
+# get a list of unique project
+# daterx - the date regex that matches the .%Y.%m.%d at the end of the indices
+# we are interested in - the awk will strip that part off
+function get_list_of_indices() {
+ curl -s --cacert $CA --key $KEY --cert $CERT https://$openshift_logging_es_host:$openshift_logging_es_port/_cat/indices | \
+ awk -v daterx='[.]20[0-9]{2}[.][0-1]?[0-9][.][0-9]{1,2}$' \
+ '$3 !~ "^[.]" && $3 !~ "^[^.]+[.][^.]+"daterx && $3 !~ "^project." && $3 ~ daterx {print gensub(daterx, "", "", $3)}' | \
+ sort -u
+}
+
+# for each index in _cat/indices
+# skip indices that begin with . - .kibana, .operations, etc.
+# get a list of unique project.uuid
+# daterx - the date regex that matches the .%Y.%m.%d at the end of the indices
+# we are interested in - the awk will strip that part off
+function get_list_of_proj_uuid_indices() {
+ curl -s --cacert $CA --key $KEY --cert $CERT https://$openshift_logging_es_host:$openshift_logging_es_port/_cat/indices | \
+ awk -v daterx='[.]20[0-9]{2}[.][0-1]?[0-9][.][0-9]{1,2}$' \
+ '$3 !~ "^[.]" && $3 ~ "^[^.]+[.][^.]+"daterx && $3 !~ "^project." && $3 ~ daterx {print gensub(daterx, "", "", $3)}' | \
+ sort -u
+}
+
+if [[ -z "$(oc get pods -l component=es -o jsonpath='{.items[?(@.status.phase == "Running")].metadata.name}')" ]]; then
+ echo "No Elasticsearch pods found running. Cannot update common data model."
+ exit 1
+fi
+
+count=$(get_list_of_indices | wc -l)
+if [ $count -eq 0 ]; then
+ echo No matching indices found - skipping update_for_uuid
+else
+ echo Creating aliases for $count index patterns . . .
+ {
+ echo '{"actions":['
+ get_list_of_indices | \
+ while IFS=. read proj ; do
+ # e.g. make test.uuid.* an alias of test.* so we can search for
+ # /test.uuid.*/_search and get both the test.uuid.* and
+ # the test.* indices
+ uid=$(oc get project "$proj" -o jsonpath='{.metadata.uid}' 2>/dev/null)
+ [ -n "$uid" ] && echo "{\"add\":{\"index\":\"$proj.*\",\"alias\":\"$proj.$uuid.*\"}}"
+ done
+ echo ']}'
+ } | curl -s --cacert $CA --key $KEY --cert $CERT -XPOST -d @- "https://$openshift_logging_es_host:$openshift_logging_es_port/_aliases"
+fi
+
+count=$(get_list_of_proj_uuid_indices | wc -l)
+if [ $count -eq 0 ] ; then
+ echo No matching indexes found - skipping update_for_common_data_model
+ exit 0
+fi
+
+echo Creating aliases for $count index patterns . . .
+# for each index in _cat/indices
+# skip indices that begin with . - .kibana, .operations, etc.
+# get a list of unique project.uuid
+# daterx - the date regex that matches the .%Y.%m.%d at the end of the indices
+# we are interested in - the awk will strip that part off
+{
+ echo '{"actions":['
+ get_list_of_proj_uuid_indices | \
+ while IFS=. read proj uuid ; do
+ # e.g. make project.test.uuid.* and alias of test.uuid.* so we can search for
+ # /project.test.uuid.*/_search and get both the test.uuid.* and
+ # the project.test.uuid.* indices
+ echo "{\"add\":{\"index\":\"$proj.$uuid.*\",\"alias\":\"${PROJ_PREFIX}$proj.$uuid.*\"}}"
+ done
+ echo ']}'
+} | curl -s --cacert $CA --key $KEY --cert $CERT -XPOST -d @- "https://$openshift_logging_es_host:$openshift_logging_es_port/_aliases"
diff --git a/roles/openshift_logging/files/fluent.conf b/roles/openshift_logging/files/fluent.conf
new file mode 100644
index 000000000..aa843e983
--- /dev/null
+++ b/roles/openshift_logging/files/fluent.conf
@@ -0,0 +1,34 @@
+# This file is the fluentd configuration entrypoint. Edit with care.
+
+@include configs.d/openshift/system.conf
+
+# In each section below, pre- and post- includes don't include anything initially;
+# they exist to enable future additions to openshift conf as needed.
+
+## sources
+## ordered so that syslog always runs last...
+@include configs.d/openshift/input-pre-*.conf
+@include configs.d/dynamic/input-docker-*.conf
+@include configs.d/dynamic/input-syslog-*.conf
+@include configs.d/openshift/input-post-*.conf
+##
+
+<label @INGRESS>
+## filters
+ @include configs.d/openshift/filter-pre-*.conf
+ @include configs.d/openshift/filter-retag-journal.conf
+ @include configs.d/openshift/filter-k8s-meta.conf
+ @include configs.d/openshift/filter-kibana-transform.conf
+ @include configs.d/openshift/filter-k8s-flatten-hash.conf
+ @include configs.d/openshift/filter-k8s-record-transform.conf
+ @include configs.d/openshift/filter-syslog-record-transform.conf
+ @include configs.d/openshift/filter-post-*.conf
+##
+
+## matches
+ @include configs.d/openshift/output-pre-*.conf
+ @include configs.d/openshift/output-operations.conf
+ @include configs.d/openshift/output-applications.conf
+ # no post - applications.conf matches everything left
+##
+</label>
diff --git a/roles/openshift_logging/files/fluentd-throttle-config.yaml b/roles/openshift_logging/files/fluentd-throttle-config.yaml
new file mode 100644
index 000000000..375621ff1
--- /dev/null
+++ b/roles/openshift_logging/files/fluentd-throttle-config.yaml
@@ -0,0 +1,7 @@
+# Logging example fluentd throttling config file
+
+#example-project:
+# read_lines_limit: 10
+#
+#.operations:
+# read_lines_limit: 100
diff --git a/roles/openshift_logging/files/generate-jks.sh b/roles/openshift_logging/files/generate-jks.sh
new file mode 100644
index 000000000..995ec0b98
--- /dev/null
+++ b/roles/openshift_logging/files/generate-jks.sh
@@ -0,0 +1,168 @@
+#! /bin/sh
+set -ex
+
+function generate_JKS_chain() {
+ dir=${SCRATCH_DIR:-_output}
+ ADD_OID=$1
+ NODE_NAME=$2
+ CERT_NAMES=${3:-$NODE_NAME}
+ ks_pass=${KS_PASS:-kspass}
+ ts_pass=${TS_PASS:-tspass}
+ rm -rf $NODE_NAME
+
+ extension_names=""
+ for name in ${CERT_NAMES//,/ }; do
+ extension_names="${extension_names},dns:${name}"
+ done
+
+ if [ "$ADD_OID" = true ]; then
+ extension_names="${extension_names},oid:1.2.3.4.5.5"
+ fi
+
+ echo Generating keystore and certificate for node $NODE_NAME
+
+ keytool -genkey \
+ -alias $NODE_NAME \
+ -keystore $dir/$NODE_NAME.jks \
+ -keypass $ks_pass \
+ -storepass $ks_pass \
+ -keyalg RSA \
+ -keysize 2048 \
+ -validity 712 \
+ -dname "CN=$NODE_NAME, OU=OpenShift, O=Logging" \
+ -ext san=dns:localhost,ip:127.0.0.1"${extension_names}"
+
+ echo Generating certificate signing request for node $NODE_NAME
+
+ keytool -certreq \
+ -alias $NODE_NAME \
+ -keystore $dir/$NODE_NAME.jks \
+ -storepass $ks_pass \
+ -file $dir/$NODE_NAME.csr \
+ -keyalg rsa \
+ -dname "CN=$NODE_NAME, OU=OpenShift, O=Logging" \
+ -ext san=dns:localhost,ip:127.0.0.1"${extension_names}"
+
+ echo Sign certificate request with CA
+
+ openssl ca \
+ -in $dir/$NODE_NAME.csr \
+ -notext \
+ -out $dir/$NODE_NAME.crt \
+ -config $dir/signing.conf \
+ -extensions v3_req \
+ -batch \
+ -extensions server_ext
+
+ echo "Import back to keystore (including CA chain)"
+
+ keytool \
+ -import \
+ -file $dir/ca.crt \
+ -keystore $dir/$NODE_NAME.jks \
+ -storepass $ks_pass \
+ -noprompt -alias sig-ca
+
+ keytool \
+ -import \
+ -file $dir/$NODE_NAME.crt \
+ -keystore $dir/$NODE_NAME.jks \
+ -storepass $ks_pass \
+ -noprompt \
+ -alias $NODE_NAME
+
+ echo All done for $NODE_NAME
+}
+
+function generate_JKS_client_cert() {
+ NODE_NAME="$1"
+ ks_pass=${KS_PASS:-kspass}
+ ts_pass=${TS_PASS:-tspass}
+ dir=${SCRATCH_DIR:-_output} # for writing files to bundle into secrets
+
+ echo Generating keystore and certificate for node ${NODE_NAME}
+
+ keytool -genkey \
+ -alias $NODE_NAME \
+ -keystore $dir/$NODE_NAME.jks \
+ -keyalg RSA \
+ -keysize 2048 \
+ -validity 712 \
+ -keypass $ks_pass \
+ -storepass $ks_pass \
+ -dname "CN=$NODE_NAME, OU=OpenShift, O=Logging"
+
+ echo Generating certificate signing request for node $NODE_NAME
+
+ keytool -certreq \
+ -alias $NODE_NAME \
+ -keystore $dir/$NODE_NAME.jks \
+ -file $dir/$NODE_NAME.jks.csr \
+ -keyalg rsa \
+ -keypass $ks_pass \
+ -storepass $ks_pass \
+ -dname "CN=$NODE_NAME, OU=OpenShift, O=Logging"
+
+ echo Sign certificate request with CA
+ openssl ca \
+ -in "$dir/$NODE_NAME.jks.csr" \
+ -notext \
+ -out "$dir/$NODE_NAME.jks.crt" \
+ -config $dir/signing.conf \
+ -extensions v3_req \
+ -batch \
+ -extensions server_ext
+
+ echo "Import back to keystore (including CA chain)"
+
+ keytool \
+ -import \
+ -file $dir/ca.crt \
+ -keystore $dir/$NODE_NAME.jks \
+ -storepass $ks_pass \
+ -noprompt -alias sig-ca
+
+ keytool \
+ -import \
+ -file $dir/$NODE_NAME.jks.crt \
+ -keystore $dir/$NODE_NAME.jks \
+ -storepass $ks_pass \
+ -noprompt \
+ -alias $NODE_NAME
+
+ echo All done for $NODE_NAME
+}
+
+function join { local IFS="$1"; shift; echo "$*"; }
+
+function createTruststore() {
+
+ echo "Import CA to truststore for validating client certs"
+
+ keytool \
+ -import \
+ -file $dir/ca.crt \
+ -keystore $dir/truststore.jks \
+ -storepass $ts_pass \
+ -noprompt -alias sig-ca
+}
+
+dir="$CERT_DIR"
+SCRATCH_DIR=$dir
+
+if [[ ! -f $dir/system.admin.jks || -z "$(keytool -list -keystore $dir/system.admin.jks -storepass kspass | grep sig-ca)" ]]; then
+ generate_JKS_client_cert "system.admin"
+fi
+
+if [[ ! -f $dir/elasticsearch.jks || -z "$(keytool -list -keystore $dir/elasticsearch.jks -storepass kspass | grep sig-ca)" ]]; then
+ generate_JKS_chain true elasticsearch "$(join , logging-es{,-ops})"
+fi
+
+if [[ ! -f $dir/logging-es.jks || -z "$(keytool -list -keystore $dir/logging-es.jks -storepass kspass | grep sig-ca)" ]]; then
+ generate_JKS_chain false logging-es "$(join , logging-es{,-ops}{,-cluster}{,.${PROJECT}.svc.cluster.local})"
+fi
+
+[ ! -f $dir/truststore.jks ] && createTruststore
+
+# necessary so that the job knows it completed successfully
+exit 0
diff --git a/roles/openshift_logging/files/logging-deployer-sa.yaml b/roles/openshift_logging/files/logging-deployer-sa.yaml
new file mode 100644
index 000000000..334c9402b
--- /dev/null
+++ b/roles/openshift_logging/files/logging-deployer-sa.yaml
@@ -0,0 +1,6 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: logging-deployer
+secrets:
+- name: logging-deployer
diff --git a/roles/openshift_logging/files/secure-forward.conf b/roles/openshift_logging/files/secure-forward.conf
new file mode 100644
index 000000000..f4483df79
--- /dev/null
+++ b/roles/openshift_logging/files/secure-forward.conf
@@ -0,0 +1,24 @@
+# @type secure_forward
+
+# self_hostname ${HOSTNAME}
+# shared_key <SECRET_STRING>
+
+# secure yes
+# enable_strict_verification yes
+
+# ca_cert_path /etc/fluent/keys/your_ca_cert
+# ca_private_key_path /etc/fluent/keys/your_private_key
+ # for private CA secret key
+# ca_private_key_passphrase passphrase
+
+# <server>
+ # or IP
+# host server.fqdn.example.com
+# port 24284
+# </server>
+# <server>
+ # ip address to connect
+# host 203.0.113.8
+ # specify hostlabel for FQDN verification if ipaddress is used for host
+# hostlabel server.fqdn.example.com
+# </server>
diff --git a/roles/openshift_logging/files/server-tls.json b/roles/openshift_logging/files/server-tls.json
new file mode 100644
index 000000000..86deb23e3
--- /dev/null
+++ b/roles/openshift_logging/files/server-tls.json
@@ -0,0 +1,5 @@
+// See for available options: https://nodejs.org/api/tls.html#tls_tls_createserver_options_secureconnectionlistener
+tls_options = {
+ ciphers: 'kEECDH:+kEECDH+SHA:kEDH:+kEDH+SHA:+kEDH+CAMELLIA:kECDH:+kECDH+SHA:kRSA:+kRSA+SHA:+kRSA+CAMELLIA:!aNULL:!eNULL:!SSLv2:!RC4:!DES:!EXP:!SEED:!IDEA:+3DES',
+ honorCipherOrder: true
+}
diff --git a/roles/openshift_logging/filter_plugins/openshift_logging.py b/roles/openshift_logging/filter_plugins/openshift_logging.py
new file mode 100644
index 000000000..007be3ac0
--- /dev/null
+++ b/roles/openshift_logging/filter_plugins/openshift_logging.py
@@ -0,0 +1,38 @@
+'''
+ Openshift Logging class that provides useful filters used in Logging
+'''
+
+import random
+
+
+def random_word(source_alpha, length):
+ ''' Returns a random word given the source of characters to pick from and resulting length '''
+ return ''.join(random.choice(source_alpha) for i in range(length))
+
+
+def entry_from_named_pair(register_pairs, key):
+ ''' Returns the entry in key given results provided by register_pairs '''
+ results = register_pairs.get("results")
+ if results is None:
+ raise RuntimeError("The dict argument does not have a 'results' entry. "
+ "Must not have been created using 'register' in a loop")
+ for result in results:
+ item = result.get("item")
+ if item is not None:
+ name = item.get("name")
+ if name == key:
+ return result["content"]
+ raise RuntimeError("There was no entry found in the dict that had an item with a name that matched {}".format(key))
+
+
+# pylint: disable=too-few-public-methods
+class FilterModule(object):
+ ''' OpenShift Logging Filters '''
+
+ # pylint: disable=no-self-use, too-few-public-methods
+ def filters(self):
+ ''' Returns the names of the filters provided by this class '''
+ return {
+ 'random_word': random_word,
+ 'entry_from_named_pair': entry_from_named_pair,
+ }
diff --git a/roles/openshift_logging/library/openshift_logging_facts.py b/roles/openshift_logging/library/openshift_logging_facts.py
new file mode 100644
index 000000000..8bbfdf7bf
--- /dev/null
+++ b/roles/openshift_logging/library/openshift_logging_facts.py
@@ -0,0 +1,340 @@
+'''
+---
+module: openshift_logging_facts
+version_added: ""
+short_description: Gather facts about the OpenShift logging stack
+description:
+ - Determine the current facts about the OpenShift logging stack (e.g. cluster size)
+options:
+author: Red Hat, Inc
+'''
+
+import copy
+import json
+
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
+from subprocess import * # noqa: F402,F403
+
+# ignore pylint errors related to the module_utils import
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
+from ansible.module_utils.basic import * # noqa: F402,F403
+
+import yaml
+
+EXAMPLES = """
+- action: opneshift_logging_facts
+"""
+
+RETURN = """
+"""
+
+DEFAULT_OC_OPTIONS = ["-o", "json"]
+
+# constants used for various labels and selectors
+COMPONENT_KEY = "component"
+LOGGING_INFRA_KEY = "logging-infra"
+
+# selectors for filtering resources
+DS_FLUENTD_SELECTOR = LOGGING_INFRA_KEY + "=" + "fluentd"
+LOGGING_SELECTOR = LOGGING_INFRA_KEY + "=" + "support"
+ROUTE_SELECTOR = "component=support, logging-infra=support, provider=openshift"
+COMPONENTS = ["kibana", "curator", "elasticsearch", "fluentd", "kibana_ops", "curator_ops", "elasticsearch_ops"]
+
+
+class OCBaseCommand(object):
+ ''' The base class used to query openshift '''
+
+ def __init__(self, binary, kubeconfig, namespace):
+ ''' the init method of OCBaseCommand class '''
+ self.binary = binary
+ self.kubeconfig = kubeconfig
+ self.user = self.get_system_admin(self.kubeconfig)
+ self.namespace = namespace
+
+ # pylint: disable=no-self-use
+ def get_system_admin(self, kubeconfig):
+ ''' Retrieves the system admin '''
+ with open(kubeconfig, 'r') as kubeconfig_file:
+ config = yaml.load(kubeconfig_file)
+ for user in config["users"]:
+ if user["name"].startswith("system:admin"):
+ return user["name"]
+ raise Exception("Unable to find system:admin in: " + kubeconfig)
+
+ # pylint: disable=too-many-arguments, dangerous-default-value
+ def oc_command(self, sub, kind, namespace=None, name=None, add_options=None):
+ ''' Wrapper method for the "oc" command '''
+ cmd = [self.binary, sub, kind]
+ if name is not None:
+ cmd = cmd + [name]
+ if namespace is not None:
+ cmd = cmd + ["-n", namespace]
+ if add_options is None:
+ add_options = []
+ cmd = cmd + ["--user=" + self.user, "--config=" + self.kubeconfig] + DEFAULT_OC_OPTIONS + add_options
+ try:
+ process = Popen(cmd, stdout=PIPE, stderr=PIPE) # noqa: F405
+ out, err = process.communicate(cmd)
+ if len(err) > 0:
+ if 'not found' in err:
+ return {'items': []}
+ if 'No resources found' in err:
+ return {'items': []}
+ raise Exception(err)
+ except Exception as excp:
+ err = "There was an exception trying to run the command '" + " ".join(cmd) + "' " + str(excp)
+ raise Exception(err)
+
+ return json.loads(out)
+
+
+class OpenshiftLoggingFacts(OCBaseCommand):
+ ''' The class structure for holding the OpenshiftLogging Facts'''
+ name = "facts"
+
+ def __init__(self, logger, binary, kubeconfig, namespace):
+ ''' The init method for OpenshiftLoggingFacts '''
+ super(OpenshiftLoggingFacts, self).__init__(binary, kubeconfig, namespace)
+ self.logger = logger
+ self.facts = dict()
+
+ def default_keys_for(self, kind):
+ ''' Sets the default key values for kind '''
+ for comp in COMPONENTS:
+ self.add_facts_for(comp, kind)
+
+ def add_facts_for(self, comp, kind, name=None, facts=None):
+ ''' Add facts for the provided kind '''
+ if comp in self.facts is False:
+ self.facts[comp] = dict()
+ if kind in self.facts[comp] is False:
+ self.facts[comp][kind] = dict()
+ if name:
+ self.facts[comp][kind][name] = facts
+
+ def facts_for_routes(self, namespace):
+ ''' Gathers facts for Routes in logging namespace '''
+ self.default_keys_for("routes")
+ route_list = self.oc_command("get", "routes", namespace=namespace, add_options=["-l", ROUTE_SELECTOR])
+ if len(route_list["items"]) == 0:
+ return None
+ for route in route_list["items"]:
+ name = route["metadata"]["name"]
+ comp = self.comp(name)
+ if comp is not None:
+ self.add_facts_for(comp, "routes", name, dict(host=route["spec"]["host"]))
+ self.facts["agl_namespace"] = namespace
+
+ def facts_for_daemonsets(self, namespace):
+ ''' Gathers facts for Daemonsets in logging namespace '''
+ self.default_keys_for("daemonsets")
+ ds_list = self.oc_command("get", "daemonsets", namespace=namespace,
+ add_options=["-l", LOGGING_INFRA_KEY + "=fluentd"])
+ if len(ds_list["items"]) == 0:
+ return
+ for ds_item in ds_list["items"]:
+ name = ds_item["metadata"]["name"]
+ comp = self.comp(name)
+ spec = ds_item["spec"]["template"]["spec"]
+ container = spec["containers"][0]
+ result = dict(
+ selector=ds_item["spec"]["selector"],
+ image=container["image"],
+ resources=container["resources"],
+ nodeSelector=spec["nodeSelector"],
+ serviceAccount=spec["serviceAccount"],
+ terminationGracePeriodSeconds=spec["terminationGracePeriodSeconds"]
+ )
+ self.add_facts_for(comp, "daemonsets", name, result)
+
+ def facts_for_pvcs(self, namespace):
+ ''' Gathers facts for PVCS in logging namespace'''
+ self.default_keys_for("pvcs")
+ pvclist = self.oc_command("get", "pvc", namespace=namespace, add_options=["-l", LOGGING_INFRA_KEY])
+ if len(pvclist["items"]) == 0:
+ return
+ for pvc in pvclist["items"]:
+ name = pvc["metadata"]["name"]
+ comp = self.comp(name)
+ self.add_facts_for(comp, "pvcs", name, dict())
+
+ def facts_for_deploymentconfigs(self, namespace):
+ ''' Gathers facts for DeploymentConfigs in logging namespace '''
+ self.default_keys_for("deploymentconfigs")
+ dclist = self.oc_command("get", "deploymentconfigs", namespace=namespace, add_options=["-l", LOGGING_INFRA_KEY])
+ if len(dclist["items"]) == 0:
+ return
+ dcs = dclist["items"]
+ for dc_item in dcs:
+ name = dc_item["metadata"]["name"]
+ comp = self.comp(name)
+ if comp is not None:
+ spec = dc_item["spec"]["template"]["spec"]
+ facts = dict(
+ selector=dc_item["spec"]["selector"],
+ replicas=dc_item["spec"]["replicas"],
+ serviceAccount=spec["serviceAccount"],
+ containers=dict(),
+ volumes=dict()
+ )
+ if "volumes" in spec:
+ for vol in spec["volumes"]:
+ clone = copy.deepcopy(vol)
+ clone.pop("name", None)
+ facts["volumes"][vol["name"]] = clone
+ for container in spec["containers"]:
+ facts["containers"][container["name"]] = dict(
+ image=container["image"],
+ resources=container["resources"],
+ )
+ self.add_facts_for(comp, "deploymentconfigs", name, facts)
+
+ def facts_for_services(self, namespace):
+ ''' Gathers facts for services in logging namespace '''
+ self.default_keys_for("services")
+ servicelist = self.oc_command("get", "services", namespace=namespace, add_options=["-l", LOGGING_SELECTOR])
+ if len(servicelist["items"]) == 0:
+ return
+ for service in servicelist["items"]:
+ name = service["metadata"]["name"]
+ comp = self.comp(name)
+ if comp is not None:
+ self.add_facts_for(comp, "services", name, dict())
+
+ def facts_for_configmaps(self, namespace):
+ ''' Gathers facts for configmaps in logging namespace '''
+ self.default_keys_for("configmaps")
+ a_list = self.oc_command("get", "configmaps", namespace=namespace, add_options=["-l", LOGGING_SELECTOR])
+ if len(a_list["items"]) == 0:
+ return
+ for item in a_list["items"]:
+ name = item["metadata"]["name"]
+ comp = self.comp(name)
+ if comp is not None:
+ self.add_facts_for(comp, "configmaps", name, item["data"])
+
+ def facts_for_oauthclients(self, namespace):
+ ''' Gathers facts for oauthclients used with logging '''
+ self.default_keys_for("oauthclients")
+ a_list = self.oc_command("get", "oauthclients", namespace=namespace, add_options=["-l", LOGGING_SELECTOR])
+ if len(a_list["items"]) == 0:
+ return
+ for item in a_list["items"]:
+ name = item["metadata"]["name"]
+ comp = self.comp(name)
+ if comp is not None:
+ result = dict(
+ redirectURIs=item["redirectURIs"]
+ )
+ self.add_facts_for(comp, "oauthclients", name, result)
+
+ def facts_for_secrets(self, namespace):
+ ''' Gathers facts for secrets in the logging namespace '''
+ self.default_keys_for("secrets")
+ a_list = self.oc_command("get", "secrets", namespace=namespace)
+ if len(a_list["items"]) == 0:
+ return
+ for item in a_list["items"]:
+ name = item["metadata"]["name"]
+ comp = self.comp(name)
+ if comp is not None and item["type"] == "Opaque":
+ result = dict(
+ keys=item["data"].keys()
+ )
+ self.add_facts_for(comp, "secrets", name, result)
+
+ def facts_for_sccs(self):
+ ''' Gathers facts for SCCs used with logging '''
+ self.default_keys_for("sccs")
+ scc = self.oc_command("get", "scc", name="privileged")
+ if len(scc["users"]) == 0:
+ return
+ for item in scc["users"]:
+ comp = self.comp(item)
+ if comp is not None:
+ self.add_facts_for(comp, "sccs", "privileged", dict())
+
+ def facts_for_clusterrolebindings(self, namespace):
+ ''' Gathers ClusterRoleBindings used with logging '''
+ self.default_keys_for("clusterrolebindings")
+ role = self.oc_command("get", "clusterrolebindings", name="cluster-readers")
+ if "subjects" not in role or len(role["subjects"]) == 0:
+ return
+ for item in role["subjects"]:
+ comp = self.comp(item["name"])
+ if comp is not None and namespace == item["namespace"]:
+ self.add_facts_for(comp, "clusterrolebindings", "cluster-readers", dict())
+
+# this needs to end up nested under the service account...
+ def facts_for_rolebindings(self, namespace):
+ ''' Gathers facts for RoleBindings used with logging '''
+ self.default_keys_for("rolebindings")
+ role = self.oc_command("get", "rolebindings", namespace=namespace, name="logging-elasticsearch-view-role")
+ if "subjects" not in role or len(role["subjects"]) == 0:
+ return
+ for item in role["subjects"]:
+ comp = self.comp(item["name"])
+ if comp is not None and namespace == item["namespace"]:
+ self.add_facts_for(comp, "rolebindings", "logging-elasticsearch-view-role", dict())
+
+ # pylint: disable=no-self-use, too-many-return-statements
+ def comp(self, name):
+ ''' Does a comparison to evaluate the logging component '''
+ if name.startswith("logging-curator-ops"):
+ return "curator_ops"
+ elif name.startswith("logging-kibana-ops") or name.startswith("kibana-ops"):
+ return "kibana_ops"
+ elif name.startswith("logging-es-ops") or name.startswith("logging-elasticsearch-ops"):
+ return "elasticsearch_ops"
+ elif name.startswith("logging-curator"):
+ return "curator"
+ elif name.startswith("logging-kibana") or name.startswith("kibana"):
+ return "kibana"
+ elif name.startswith("logging-es") or name.startswith("logging-elasticsearch"):
+ return "elasticsearch"
+ elif name.startswith("logging-fluentd") or name.endswith("aggregated-logging-fluentd"):
+ return "fluentd"
+ else:
+ return None
+
+ def build_facts(self):
+ ''' Builds the logging facts and returns them '''
+ self.facts_for_routes(self.namespace)
+ self.facts_for_daemonsets(self.namespace)
+ self.facts_for_deploymentconfigs(self.namespace)
+ self.facts_for_services(self.namespace)
+ self.facts_for_configmaps(self.namespace)
+ self.facts_for_sccs()
+ self.facts_for_oauthclients(self.namespace)
+ self.facts_for_clusterrolebindings(self.namespace)
+ self.facts_for_rolebindings(self.namespace)
+ self.facts_for_secrets(self.namespace)
+ self.facts_for_pvcs(self.namespace)
+
+ return self.facts
+
+
+def main():
+ ''' The main method '''
+ module = AnsibleModule( # noqa: F405
+ argument_spec=dict(
+ admin_kubeconfig={"required": True, "type": "str"},
+ oc_bin={"required": True, "type": "str"},
+ openshift_logging_namespace={"required": True, "type": "str"}
+ ),
+ supports_check_mode=False
+ )
+ try:
+ cmd = OpenshiftLoggingFacts(module, module.params['oc_bin'], module.params['admin_kubeconfig'],
+ module.params['openshift_logging_namespace'])
+ module.exit_json(
+ ansible_facts={"openshift_logging_facts": cmd.build_facts()}
+ )
+ # ignore broad-except error to avoid stack trace to ansible user
+ # pylint: disable=broad-except
+ except Exception as error:
+ module.fail_json(msg=str(error))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/openshift_logging/meta/main.yaml b/roles/openshift_logging/meta/main.yaml
new file mode 100644
index 000000000..7050e51db
--- /dev/null
+++ b/roles/openshift_logging/meta/main.yaml
@@ -0,0 +1,15 @@
+---
+galaxy_info:
+ author: OpenShift Red Hat
+ description: OpenShift Embedded Router
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- role: openshift_facts
diff --git a/roles/openshift_logging/tasks/delete_logging.yaml b/roles/openshift_logging/tasks/delete_logging.yaml
new file mode 100644
index 000000000..908f3ee88
--- /dev/null
+++ b/roles/openshift_logging/tasks/delete_logging.yaml
@@ -0,0 +1,114 @@
+---
+- name: stop logging
+ include: stop_cluster.yaml
+
+# delete the deployment objects that we had created
+- name: delete logging api objects
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ delete {{ item }} --selector logging-infra -n {{ openshift_logging_namespace }} --ignore-not-found=true
+ with_items:
+ - dc
+ - rc
+ - svc
+ - routes
+ - templates
+ - daemonset
+ register: delete_result
+ changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
+
+
+# delete the oauthclient
+- name: delete oauthclient kibana-proxy
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete oauthclient kibana-proxy --ignore-not-found=true
+ register: delete_result
+ changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
+
+# delete any image streams that we may have created
+- name: delete logging is
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ delete is -l logging-infra=support -n {{ openshift_logging_namespace }} --ignore-not-found=true
+ register: delete_result
+ changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
+
+# delete our old secrets
+- name: delete logging secrets
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ delete secret {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true
+ with_items:
+ - logging-fluentd
+ - logging-elasticsearch
+ - logging-kibana
+ - logging-kibana-proxy
+ - logging-curator
+ ignore_errors: yes
+ register: delete_result
+ changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
+
+# delete role bindings
+- name: delete rolebindings
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ delete rolebinding {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true
+ with_items:
+ - logging-elasticsearch-view-role
+ register: delete_result
+ changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
+
+# delete cluster role bindings
+- name: delete cluster role bindings
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ delete clusterrolebindings {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true
+ with_items:
+ - rolebinding-reader
+ register: delete_result
+ changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
+
+# delete cluster roles
+- name: delete cluster roles
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ delete clusterroles {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true
+ with_items:
+ - rolebinding-reader
+ register: delete_result
+ changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
+
+# delete our service accounts
+- name: delete service accounts
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ delete serviceaccount {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true
+ with_items:
+ - aggregated-logging-elasticsearch
+ - aggregated-logging-kibana
+ - aggregated-logging-curator
+ - aggregated-logging-fluentd
+ register: delete_result
+ changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
+
+# delete our roles
+- name: delete roles
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ delete clusterrole {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true
+ with_items:
+ - daemonset-admin
+ register: delete_result
+ changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
+
+# delete our configmaps
+- name: delete configmaps
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ delete configmap {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true
+ with_items:
+ - logging-curator
+ - logging-elasticsearch
+ - logging-fluentd
+ register: delete_result
+ changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
diff --git a/roles/openshift_logging/tasks/generate_certs.yaml b/roles/openshift_logging/tasks/generate_certs.yaml
new file mode 100644
index 000000000..e16071e46
--- /dev/null
+++ b/roles/openshift_logging/tasks/generate_certs.yaml
@@ -0,0 +1,217 @@
+---
+# we will ensure our secrets and configmaps are set up here first
+- name: Checking for ca.key
+ stat: path="{{generated_certs_dir}}/ca.key"
+ register: ca_key_file
+ check_mode: no
+
+- name: Checking for ca.crt
+ stat: path="{{generated_certs_dir}}/ca.crt"
+ register: ca_cert_file
+ check_mode: no
+
+- name: Checking for ca.serial.txt
+ stat: path="{{generated_certs_dir}}/ca.serial.txt"
+ register: ca_serial_file
+ check_mode: no
+
+- name: Generate certificates
+ command: >
+ {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig ca create-signer-cert
+ --key={{generated_certs_dir}}/ca.key --cert={{generated_certs_dir}}/ca.crt
+ --serial={{generated_certs_dir}}/ca.serial.txt --name=logging-signer-test
+ check_mode: no
+ when:
+ - not ca_key_file.stat.exists
+ - not ca_cert_file.stat.exists
+ - not ca_serial_file.stat.exists
+
+- name: Checking for signing.conf
+ stat: path="{{generated_certs_dir}}/signing.conf"
+ register: signing_conf_file
+ check_mode: no
+
+- template: src=signing.conf.j2 dest={{generated_certs_dir}}/signing.conf
+ vars:
+ - top_dir: '{{generated_certs_dir}}'
+ when: not signing_conf_file.stat.exists
+
+- include: procure_server_certs.yaml
+ loop_control:
+ loop_var: cert_info
+ with_items:
+ - procure_component: kibana
+ - procure_component: kibana-ops
+ - procure_component: kibana-internal
+ hostnames: "kibana, kibana-ops, {{openshift_logging_kibana_hostname}}, {{openshift_logging_kibana_ops_hostname}}"
+
+- name: Copy proxy TLS configuration file
+ copy: src=server-tls.json dest={{generated_certs_dir}}/server-tls.json
+ when: server_tls_json is undefined
+ check_mode: no
+
+- name: Copy proxy TLS configuration file
+ copy: content="{{server_tls_json}}" dest={{generated_certs_dir}}/server-tls.json
+ when: server_tls_json is defined
+ check_mode: no
+
+- name: Checking for ca.db
+ stat: path="{{generated_certs_dir}}/ca.db"
+ register: ca_db_file
+ check_mode: no
+
+- copy: content="" dest={{generated_certs_dir}}/ca.db
+ check_mode: no
+ when:
+ - not ca_db_file.stat.exists
+
+- name: Checking for ca.crt.srl
+ stat: path="{{generated_certs_dir}}/ca.crt.srl"
+ register: ca_cert_srl_file
+ check_mode: no
+
+- copy: content="" dest={{generated_certs_dir}}/ca.crt.srl
+ check_mode: no
+ when:
+ - not ca_cert_srl_file.stat.exists
+
+- name: Generate PEM certs
+ include: generate_pems.yaml component={{node_name}}
+ with_items:
+ - system.logging.fluentd
+ - system.logging.kibana
+ - system.logging.curator
+ - system.admin
+ loop_control:
+ loop_var: node_name
+
+- name: Check for jks-generator service account
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get serviceaccount/jks-generator --no-headers -n {{openshift_logging_namespace}}
+ register: serviceaccount_result
+ ignore_errors: yes
+ when: not ansible_check_mode
+ changed_when: no
+
+- name: Create jks-generator service account
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create serviceaccount jks-generator -n {{openshift_logging_namespace}}
+ when: not ansible_check_mode and "not found" in serviceaccount_result.stderr
+
+- name: Check for hostmount-anyuid scc entry
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get scc hostmount-anyuid -o jsonpath='{.users}'
+ register: scc_result
+ when: not ansible_check_mode
+ changed_when: no
+
+- name: Add to hostmount-anyuid scc
+ command: >
+ {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig policy add-scc-to-user hostmount-anyuid -z jks-generator -n {{openshift_logging_namespace}}
+ when:
+ - not ansible_check_mode
+ - scc_result.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:jks-generator") == -1
+
+- name: Copy JKS generation script
+ copy:
+ src: generate-jks.sh
+ dest: "{{generated_certs_dir}}/generate-jks.sh"
+ check_mode: no
+
+- name: Generate JKS pod template
+ template:
+ src: jks_pod.j2
+ dest: "{{mktemp.stdout}}/jks_pod.yaml"
+ check_mode: no
+ changed_when: no
+
+# check if pod generated files exist -- if they all do don't run the pod
+- name: Checking for elasticsearch.jks
+ stat: path="{{generated_certs_dir}}/elasticsearch.jks"
+ register: elasticsearch_jks
+ check_mode: no
+
+- name: Checking for logging-es.jks
+ stat: path="{{generated_certs_dir}}/logging-es.jks"
+ register: logging_es_jks
+ check_mode: no
+
+- name: Checking for system.admin.jks
+ stat: path="{{generated_certs_dir}}/system.admin.jks"
+ register: system_admin_jks
+ check_mode: no
+
+- name: Checking for truststore.jks
+ stat: path="{{generated_certs_dir}}/truststore.jks"
+ register: truststore_jks
+ check_mode: no
+
+- name: create JKS generation pod
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{mktemp.stdout}}/jks_pod.yaml -n {{openshift_logging_namespace}} -o name
+ register: podoutput
+ check_mode: no
+ when: not elasticsearch_jks.stat.exists or not logging_es_jks.stat.exists or not system_admin_jks.stat.exists or not truststore_jks.stat.exists
+
+- command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get {{podoutput.stdout}} -o jsonpath='{.status.phase}' -n {{openshift_logging_namespace}}
+ register: result
+ until: result.stdout.find("Succeeded") != -1
+ retries: 5
+ delay: 10
+ changed_when: no
+ when: not elasticsearch_jks.stat.exists or not logging_es_jks.stat.exists or not system_admin_jks.stat.exists or not truststore_jks.stat.exists
+
+# check for secret/logging-kibana-proxy
+- command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get secret/logging-kibana-proxy -n {{openshift_logging_namespace}} -o jsonpath='{.data.oauth-secret}'
+ register: kibana_secret_oauth_check
+ ignore_errors: yes
+ changed_when: no
+ check_mode: no
+
+- command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get secret/logging-kibana-proxy -n {{openshift_logging_namespace}} -o jsonpath='{.data.session-secret}'
+ register: kibana_secret_session_check
+ ignore_errors: yes
+ changed_when: no
+ check_mode: no
+
+# check for oauthclient secret
+- command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get oauthclient/kibana-proxy -n {{openshift_logging_namespace}} -o jsonpath='{.secret}'
+ register: oauth_secret_check
+ ignore_errors: yes
+ changed_when: no
+ check_mode: no
+
+# set or generate as needed
+- name: Generate proxy session
+ set_fact: session_secret={{'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'|random_word(200)}}
+ check_mode: no
+ when:
+ - kibana_secret_session_check.stdout is not defined or kibana_secret_session_check.stdout == ''
+
+- name: Generate proxy session
+ set_fact: session_secret={{kibana_secret_session_check.stdout | b64decode }}
+ check_mode: no
+ when:
+ - kibana_secret_session_check.stdout is defined
+ - kibana_secret_session_check.stdout != ''
+
+- name: Generate oauth client secret
+ set_fact: oauth_secret={{'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'|random_word(64)}}
+ check_mode: no
+ when: kibana_secret_oauth_check.stdout is not defined or kibana_secret_oauth_check.stdout == ''
+ or oauth_secret_check.stdout is not defined or oauth_secret_check.stdout == ''
+ or kibana_secret_oauth_check.stdout | b64decode != oauth_secret_check.stdout
+
+- name: Generate oauth client secret
+ set_fact: oauth_secret={{kibana_secret_oauth_check.stdout | b64decode}}
+ check_mode: no
+ when:
+ - kibana_secret_oauth_check is defined
+ - kibana_secret_oauth_check.stdout != ''
+ - oauth_secret_check.stdout is defined
+ - oauth_secret_check.stdout != ''
+ - kibana_secret_oauth_check.stdout | b64decode == oauth_secret_check.stdout
diff --git a/roles/openshift_logging/tasks/generate_clusterrolebindings.yaml b/roles/openshift_logging/tasks/generate_clusterrolebindings.yaml
new file mode 100644
index 000000000..56f590717
--- /dev/null
+++ b/roles/openshift_logging/tasks/generate_clusterrolebindings.yaml
@@ -0,0 +1,13 @@
+---
+- name: Generate ClusterRoleBindings
+ template: src=clusterrolebinding.j2 dest={{mktemp.stdout}}/templates/logging-15-{{obj_name}}-clusterrolebinding.yaml
+ vars:
+ acct_name: aggregated-logging-elasticsearch
+ obj_name: rolebinding-reader
+ crb_usernames: ["system:serviceaccount:{{openshift_logging_namespace}}:{{acct_name}}"]
+ subjects:
+ - kind: ServiceAccount
+ name: "{{acct_name}}"
+ namespace: "{{openshift_logging_namespace}}"
+ check_mode: no
+ changed_when: no
diff --git a/roles/openshift_logging/tasks/generate_clusterroles.yaml b/roles/openshift_logging/tasks/generate_clusterroles.yaml
new file mode 100644
index 000000000..0b8b1014c
--- /dev/null
+++ b/roles/openshift_logging/tasks/generate_clusterroles.yaml
@@ -0,0 +1,11 @@
+---
+- name: Generate ClusterRole for cluster-reader
+ template: src=clusterrole.j2 dest={{mktemp.stdout}}/templates/logging-10-{{obj_name}}-clusterrole.yaml
+ vars:
+ obj_name: rolebinding-reader
+ rules:
+ - resources: [clusterrolebindings]
+ verbs:
+ - get
+ check_mode: no
+ changed_when: no
diff --git a/roles/openshift_logging/tasks/generate_configmaps.yaml b/roles/openshift_logging/tasks/generate_configmaps.yaml
new file mode 100644
index 000000000..b24a7c342
--- /dev/null
+++ b/roles/openshift_logging/tasks/generate_configmaps.yaml
@@ -0,0 +1,117 @@
+---
+- block:
+ - copy:
+ src: elasticsearch-logging.yml
+ dest: "{{mktemp.stdout}}/elasticsearch-logging.yml"
+ when: es_logging_contents is undefined
+ changed_when: no
+
+ - template:
+ src: elasticsearch.yml.j2
+ dest: "{{mktemp.stdout}}/elasticsearch.yml"
+ vars:
+ - allow_cluster_reader: "{{openshift_logging_es_ops_allow_cluster_reader | lower | default('false')}}"
+ when: es_config_contents is undefined
+ changed_when: no
+
+ - copy:
+ content: "{{es_logging_contents}}"
+ dest: "{{mktemp.stdout}}/elasticsearch-logging.yml"
+ when: es_logging_contents is defined
+ changed_when: no
+
+ - copy:
+ content: "{{es_config_contents}}"
+ dest: "{{mktemp.stdout}}/elasticsearch.yml"
+ when: es_config_contents is defined
+ changed_when: no
+
+ - command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-elasticsearch
+ --from-file=logging.yml={{mktemp.stdout}}/elasticsearch-logging.yml --from-file=elasticsearch.yml={{mktemp.stdout}}/elasticsearch.yml -o yaml --dry-run
+ register: es_configmap
+ changed_when: no
+
+ - copy:
+ content: "{{es_configmap.stdout}}"
+ dest: "{{mktemp.stdout}}/templates/logging-elasticsearch-configmap.yaml"
+ when: es_configmap.stdout is defined
+ changed_when: no
+ check_mode: no
+
+- block:
+ - copy:
+ src: curator.yml
+ dest: "{{mktemp.stdout}}/curator.yml"
+ when: curator_config_contents is undefined
+ changed_when: no
+
+ - copy:
+ content: "{{curator_config_contents}}"
+ dest: "{{mktemp.stdout}}/curator.yml"
+ when: curator_config_contenets is defined
+ changed_when: no
+
+ - command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-curator
+ --from-file=config.yaml={{mktemp.stdout}}/curator.yml -o yaml --dry-run
+ register: curator_configmap
+ changed_when: no
+
+ - copy:
+ content: "{{curator_configmap.stdout}}"
+ dest: "{{mktemp.stdout}}/templates/logging-curator-configmap.yaml"
+ when: curator_configmap.stdout is defined
+ changed_when: no
+ check_mode: no
+
+- block:
+ - copy:
+ src: fluent.conf
+ dest: "{{mktemp.stdout}}/fluent.conf"
+ when: fluentd_config_contents is undefined
+ changed_when: no
+
+ - copy:
+ src: fluentd-throttle-config.yaml
+ dest: "{{mktemp.stdout}}/fluentd-throttle-config.yaml"
+ when: fluentd_throttle_contents is undefined
+ changed_when: no
+
+ - copy:
+ src: secure-forward.conf
+ dest: "{{mktemp.stdout}}/secure-forward.conf"
+ when: fluentd_securefoward_contents is undefined
+ changed_when: no
+
+ - copy:
+ content: "{{fluentd_config_contents}}"
+ dest: "{{mktemp.stdout}}/fluent.conf"
+ when: fluentd_config_contents is defined
+ changed_when: no
+
+ - copy:
+ content: "{{fluentd_throttle_contents}}"
+ dest: "{{mktemp.stdout}}/fluentd-throttle-config.yaml"
+ when: fluentd_throttle_contents is defined
+ changed_when: no
+
+ - copy:
+ content: "{{fluentd_secureforward_contents}}"
+ dest: "{{mktemp.stdout}}/secure-forward.conf"
+ when: fluentd_secureforward_contents is defined
+ changed_when: no
+
+ - command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-fluentd
+ --from-file=fluent.conf={{mktemp.stdout}}/fluent.conf --from-file=throttle-config.yaml={{mktemp.stdout}}/fluentd-throttle-config.yaml
+ --from-file=secure-forward.conf={{mktemp.stdout}}/secure-forward.conf -o yaml --dry-run
+ register: fluentd_configmap
+ changed_when: no
+
+ - copy:
+ content: "{{fluentd_configmap.stdout}}"
+ dest: "{{mktemp.stdout}}/templates/logging-fluentd-configmap.yaml"
+ when: fluentd_configmap.stdout is defined
+ changed_when: no
+ check_mode: no
diff --git a/roles/openshift_logging/tasks/generate_deploymentconfigs.yaml b/roles/openshift_logging/tasks/generate_deploymentconfigs.yaml
new file mode 100644
index 000000000..8aea4e81f
--- /dev/null
+++ b/roles/openshift_logging/tasks/generate_deploymentconfigs.yaml
@@ -0,0 +1,65 @@
+---
+- name: Generate kibana deploymentconfig
+ template: src=kibana.j2 dest={{mktemp.stdout}}/logging-kibana-dc.yaml
+ vars:
+ component: kibana
+ deploy_name: "logging-{{component}}"
+ image: "{{openshift_logging_image_prefix}}logging-kibana:{{openshift_logging_image_version}}"
+ proxy_image: "{{openshift_logging_image_prefix}}logging-auth-proxy:{{openshift_logging_image_version}}"
+ es_host: logging-es
+ es_port: "{{openshift_logging_es_port}}"
+ check_mode: no
+ changed_when: no
+
+- name: Generate OPS kibana deploymentconfig
+ template: src=kibana.j2 dest={{mktemp.stdout}}/logging-kibana-ops-dc.yaml
+ vars:
+ component: kibana-ops
+ deploy_name: "logging-{{component}}"
+ image: "{{openshift_logging_image_prefix}}logging-kibana:{{openshift_logging_image_version}}"
+ proxy_image: "{{openshift_logging_image_prefix}}logging-auth-proxy:{{openshift_logging_image_version}}"
+ es_host: logging-es-ops
+ es_port: "{{openshift_logging_es_ops_port}}"
+ check_mode: no
+ changed_when: no
+
+- name: Generate elasticsearch deploymentconfig
+ template: src=es.j2 dest={{mktemp.stdout}}/logging-es-dc.yaml
+ vars:
+ component: es
+ deploy_name_prefix: "logging-{{component}}"
+ deploy_name: "{{deploy_name_prefix}}-abc123"
+ image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}"
+ es_cluster_name: "{{component}}"
+ check_mode: no
+ changed_when: no
+
+- name: Generate OPS elasticsearch deploymentconfig
+ template: src=es.j2 dest={{mktemp.stdout}}/logging-es-ops-dc.yaml
+ vars:
+ component: es-ops
+ deploy_name_prefix: "logging-{{component}}"
+ deploy_name: "{{deploy_name_prefix}}-abc123"
+ image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}"
+ es_cluster_name: "{{component}}"
+ check_mode: no
+ changed_when: no
+
+- name: Generate curator deploymentconfig
+ template: src=curator.j2 dest={{mktemp.stdout}}/logging-curator-dc.yaml
+ vars:
+ component: curator
+ deploy_name: "logging-{{component}}"
+ image: "{{openshift_logging_image_prefix}}logging-curator:{{openshift_logging_image_version}}"
+ check_mode: no
+ changed_when: no
+
+- name: Generate OPS curator deploymentconfig
+ template: src=curator.j2 dest={{mktemp.stdout}}/logging-curator-ops-dc.yaml
+ vars:
+ component: curator-ops
+ deploy_name: "logging-{{component}}"
+ image: "{{openshift_logging_image_prefix}}logging-curator:{{openshift_logging_image_version}}"
+ openshift_logging_es_host: logging-es-ops
+ check_mode: no
+ changed_when: no
diff --git a/roles/openshift_logging/tasks/generate_pems.yaml b/roles/openshift_logging/tasks/generate_pems.yaml
new file mode 100644
index 000000000..289b72ea6
--- /dev/null
+++ b/roles/openshift_logging/tasks/generate_pems.yaml
@@ -0,0 +1,36 @@
+---
+- name: Checking for {{component}}.key
+ stat: path="{{generated_certs_dir}}/{{component}}.key"
+ register: key_file
+ check_mode: no
+
+- name: Checking for {{component}}.crt
+ stat: path="{{generated_certs_dir}}/{{component}}.crt"
+ register: cert_file
+ check_mode: no
+
+- name: Creating cert req for {{component}}
+ command: >
+ openssl req -out {{generated_certs_dir}}/{{component}}.csr -new -newkey rsa:2048 -keyout {{generated_certs_dir}}/{{component}}.key
+ -subj "/CN={{component}}/OU=OpenShift/O=Logging/subjectAltName=DNS.1=localhost{{cert_ext.stdout}}" -days 712 -nodes
+ when:
+ - not key_file.stat.exists
+ - cert_ext.stdout is defined
+ check_mode: no
+
+- name: Creating cert req for {{component}}
+ command: >
+ openssl req -out {{generated_certs_dir}}/{{component}}.csr -new -newkey rsa:2048 -keyout {{generated_certs_dir}}/{{component}}.key
+ -subj "/CN={{component}}/OU=OpenShift/O=Logging" -days 712 -nodes
+ when:
+ - not key_file.stat.exists
+ - cert_ext.stdout is undefined
+ check_mode: no
+
+- name: Sign cert request with CA for {{component}}
+ command: >
+ openssl ca -in {{generated_certs_dir}}/{{component}}.csr -notext -out {{generated_certs_dir}}/{{component}}.crt
+ -config {{generated_certs_dir}}/signing.conf -extensions v3_req -batch -extensions server_ext
+ when:
+ - not cert_file.stat.exists
+ check_mode: no
diff --git a/roles/openshift_logging/tasks/generate_pvcs.yaml b/roles/openshift_logging/tasks/generate_pvcs.yaml
new file mode 100644
index 000000000..601ec9e83
--- /dev/null
+++ b/roles/openshift_logging/tasks/generate_pvcs.yaml
@@ -0,0 +1,49 @@
+---
+- name: Init pool of PersistentVolumeClaim names
+ set_fact: es_pvc_pool={{es_pvc_pool|default([]) + [pvc_name]}}
+ vars:
+ pvc_name: "{{openshift_logging_es_pvc_prefix}}-{{item| int}}"
+ start: "{{es_pvc_names | map('regex_search',openshift_logging_es_pvc_prefix+'.*')|select('string')|list|length}}"
+ with_sequence: start={{start}} end={{ (start|int > openshift_logging_es_cluster_size - 1) | ternary(start, openshift_logging_es_cluster_size - 1)}}
+ when:
+ - openshift_logging_es_pvc_size | search('^\d.*')
+ - "{{ es_dc_names|default([]) | length < openshift_logging_es_cluster_size }}"
+ check_mode: no
+
+- name: Generating PersistentVolumeClaims
+ template: src=pvc.j2 dest={{mktemp.stdout}}/templates/logging-{{obj_name}}-pvc.yaml
+ vars:
+ obj_name: "{{claim_name}}"
+ size: "{{openshift_logging_es_pvc_size}}"
+ access_modes:
+ - ReadWriteOnce
+ pv_selector: "{{openshift_logging_es_pv_selector}}"
+ with_items:
+ - "{{es_pvc_pool | default([])}}"
+ loop_control:
+ loop_var: claim_name
+ when:
+ - not openshift_logging_es_pvc_dynamic
+ - es_pvc_pool is defined
+ check_mode: no
+ changed_when: no
+
+- name: Generating PersistentVolumeClaims - Dynamic
+ template: src=pvc.j2 dest={{mktemp.stdout}}/templates/logging-{{obj_name}}-pvc.yaml
+ vars:
+ obj_name: "{{claim_name}}"
+ annotations:
+ volume.alpha.kubernetes.io/storage-class: "dynamic"
+ size: "{{openshift_logging_es_pvc_size}}"
+ access_modes:
+ - ReadWriteOnce
+ pv_selector: "{{openshift_logging_es_pv_selector}}"
+ with_items:
+ - "{{es_pvc_pool|default([])}}"
+ loop_control:
+ loop_var: claim_name
+ when:
+ - openshift_logging_es_pvc_dynamic
+ - es_pvc_pool is defined
+ check_mode: no
+ changed_when: no
diff --git a/roles/openshift_logging/tasks/generate_rolebindings.yaml b/roles/openshift_logging/tasks/generate_rolebindings.yaml
new file mode 100644
index 000000000..7dc9530df
--- /dev/null
+++ b/roles/openshift_logging/tasks/generate_rolebindings.yaml
@@ -0,0 +1,12 @@
+---
+- name: Generate RoleBindings
+ template: src=rolebinding.j2 dest={{mktemp.stdout}}/templates/logging-{{obj_name}}-rolebinding.yaml
+ vars:
+ obj_name: logging-elasticsearch-view-role
+ roleRef:
+ name: view
+ subjects:
+ - kind: ServiceAccount
+ name: aggregated-logging-elasticsearch
+ check_mode: no
+ changed_when: no
diff --git a/roles/openshift_logging/tasks/generate_routes.yaml b/roles/openshift_logging/tasks/generate_routes.yaml
new file mode 100644
index 000000000..25877ebff
--- /dev/null
+++ b/roles/openshift_logging/tasks/generate_routes.yaml
@@ -0,0 +1,21 @@
+---
+- name: Generating logging routes
+ template: src=route_reencrypt.j2 dest={{mktemp.stdout}}/templates/logging-{{route_info.name}}-route.yaml
+ tags: routes
+ vars:
+ obj_name: "{{route_info.name}}"
+ route_host: "{{route_info.host}}"
+ service_name: "{{route_info.name}}"
+ tls_ca_cert: "{{key_pairs | entry_from_named_pair('ca_file')| b64decode }}"
+ tls_dest_ca_cert: "{{key_pairs | entry_from_named_pair('ca_file')| b64decode }}"
+ labels:
+ component: support
+ logging-infra: support
+ provider: openshift
+ with_items:
+ - {name: logging-kibana, host: "{{openshift_logging_kibana_hostname}}"}
+ - {name: logging-kibana-ops, host: "{{openshift_logging_kibana_ops_hostname}}"}
+ loop_control:
+ loop_var: route_info
+ when: (route_info.name == 'logging-kibana-ops' and openshift_logging_use_ops) or route_info.name == 'logging-kibana'
+ changed_when: no
diff --git a/roles/openshift_logging/tasks/generate_secrets.yaml b/roles/openshift_logging/tasks/generate_secrets.yaml
new file mode 100644
index 000000000..1829acaee
--- /dev/null
+++ b/roles/openshift_logging/tasks/generate_secrets.yaml
@@ -0,0 +1,77 @@
+---
+- name: Retrieving the cert to use when generating secrets for the logging components
+ slurp: src="{{generated_certs_dir}}/{{item.file}}"
+ register: key_pairs
+ with_items:
+ - { name: "ca_file", file: "ca.crt" }
+ - { name: "kibana_key", file: "system.logging.kibana.key"}
+ - { name: "kibana_cert", file: "system.logging.kibana.crt"}
+ - { name: "curator_key", file: "system.logging.curator.key"}
+ - { name: "curator_cert", file: "system.logging.curator.crt"}
+ - { name: "fluentd_key", file: "system.logging.fluentd.key"}
+ - { name: "fluentd_cert", file: "system.logging.fluentd.crt"}
+ - { name: "kibana_internal_key", file: "kibana-internal.key"}
+ - { name: "kibana_internal_cert", file: "kibana-internal.crt"}
+ - { name: "server_tls", file: "server-tls.json"}
+
+- name: Generating secrets for logging components
+ template: src=secret.j2 dest={{mktemp.stdout}}/templates/{{secret_name}}-secret.yaml
+ vars:
+ secret_name: logging-{{component}}
+ secret_key_file: "{{component}}_key"
+ secret_cert_file: "{{component}}_cert"
+ secrets:
+ - {key: ca, value: "{{key_pairs | entry_from_named_pair('ca_file')| b64decode }}"}
+ - {key: key, value: "{{key_pairs | entry_from_named_pair(secret_key_file)| b64decode }}"}
+ - {key: cert, value: "{{key_pairs | entry_from_named_pair(secret_cert_file)| b64decode }}"}
+ secret_keys: ["ca", "cert", "key"]
+ with_items:
+ - kibana
+ - curator
+ - fluentd
+ loop_control:
+ loop_var: component
+ when: secret_name not in openshift_logging_facts.{{component}}.secrets or
+ secret_keys | difference(openshift_logging_facts.{{component}}.secrets["{{secret_name}}"]["keys"]) | length != 0
+ check_mode: no
+ changed_when: no
+
+- name: Generating secrets for kibana proxy
+ template: src=secret.j2 dest={{mktemp.stdout}}/templates/{{secret_name}}-secret.yaml
+ vars:
+ secret_name: logging-kibana-proxy
+ secrets:
+ - {key: oauth-secret, value: "{{oauth_secret}}"}
+ - {key: session-secret, value: "{{session_secret}}"}
+ - {key: server-key, value: "{{kibana_key_file}}"}
+ - {key: server-cert, value: "{{kibana_cert_file}}"}
+ - {key: server-tls, value: "{{server_tls_file}}"}
+ secret_keys: ["server-tls.json", "server-key", "session-secret", "oauth-secret", "server-cert"]
+ kibana_key_file: "{{key_pairs | entry_from_named_pair('kibana_internal_key')| b64decode }}"
+ kibana_cert_file: "{{key_pairs | entry_from_named_pair('kibana_internal_cert')| b64decode }}"
+ server_tls_file: "{{key_pairs | entry_from_named_pair('server_tls')| b64decode }}"
+ when: secret_name not in openshift_logging_facts.kibana.secrets or
+ secret_keys | difference(openshift_logging_facts.kibana.secrets["{{secret_name}}"]["keys"]) | length != 0
+ check_mode: no
+ changed_when: no
+
+- name: Generating secrets for elasticsearch
+ command: >
+ {{openshift.common.client_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig secrets new {{secret_name}}
+ key={{generated_certs_dir}}/logging-es.jks truststore={{generated_certs_dir}}/truststore.jks
+ searchguard.key={{generated_certs_dir}}/elasticsearch.jks searchguard.truststore={{generated_certs_dir}}/truststore.jks
+ admin-key={{generated_certs_dir}}/system.admin.key admin-cert={{generated_certs_dir}}/system.admin.crt
+ admin-ca={{generated_certs_dir}}/ca.crt admin.jks={{generated_certs_dir}}/system.admin.jks -o yaml
+ vars:
+ secret_name: logging-elasticsearch
+ secret_keys: ["admin-cert", "searchguard.key", "admin-ca", "key", "truststore", "admin-key"]
+ register: logging_es_secret
+ when: secret_name not in openshift_logging_facts.elasticsearch.secrets or
+ secret_keys | difference(openshift_logging_facts.elasticsearch.secrets["{{secret_name}}"]["keys"]) | length != 0
+ check_mode: no
+ changed_when: no
+
+- copy: content="{{logging_es_secret.stdout}}" dest={{mktemp.stdout}}/templates/logging-elasticsearch-secret.yaml
+ when: logging_es_secret.stdout is defined
+ check_mode: no
+ changed_when: no
diff --git a/roles/openshift_logging/tasks/generate_serviceaccounts.yaml b/roles/openshift_logging/tasks/generate_serviceaccounts.yaml
new file mode 100644
index 000000000..21bcdfecb
--- /dev/null
+++ b/roles/openshift_logging/tasks/generate_serviceaccounts.yaml
@@ -0,0 +1,14 @@
+---
+- name: Generating serviceaccounts
+ template: src=serviceaccount.j2 dest={{mktemp.stdout}}/templates/logging-{{component}}-sa.yaml
+ vars:
+ obj_name: aggregated-logging-{{component}}
+ with_items:
+ - elasticsearch
+ - kibana
+ - fluentd
+ - curator
+ loop_control:
+ loop_var: component
+ check_mode: no
+ changed_when: no
diff --git a/roles/openshift_logging/tasks/generate_services.yaml b/roles/openshift_logging/tasks/generate_services.yaml
new file mode 100644
index 000000000..8eaac76c4
--- /dev/null
+++ b/roles/openshift_logging/tasks/generate_services.yaml
@@ -0,0 +1,87 @@
+---
+- name: Generating logging-es service
+ template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-es-svc.yaml
+ vars:
+ obj_name: logging-es
+ ports:
+ - {port: 9200, targetPort: restapi}
+ labels:
+ logging-infra: support
+ selector:
+ provider: openshift
+ component: es
+ check_mode: no
+ changed_when: no
+
+- name: Generating logging-es-cluster service
+ template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-es-cluster-svc.yaml
+ vars:
+ obj_name: logging-es-cluster
+ ports:
+ - {port: 9300}
+ labels:
+ logging-infra: support
+ selector:
+ provider: openshift
+ component: es
+ check_mode: no
+ changed_when: no
+
+- name: Generating logging-kibana service
+ template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-kibana-svc.yaml
+ vars:
+ obj_name: logging-kibana
+ ports:
+ - {port: 443, targetPort: oaproxy}
+ labels:
+ logging-infra: support
+ selector:
+ provider: openshift
+ component: kibana
+ check_mode: no
+ changed_when: no
+
+- name: Generating logging-es-ops service
+ template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-es-ops-svc.yaml
+ vars:
+ obj_name: logging-es-ops
+ ports:
+ - {port: 9200, targetPort: restapi}
+ labels:
+ logging-infra: support
+ selector:
+ provider: openshift
+ component: es-ops
+ when: openshift_logging_use_ops
+ check_mode: no
+ changed_when: no
+
+- name: Generating logging-es-ops-cluster service
+ template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-es-ops-cluster-svc.yaml
+ vars:
+ obj_name: logging-es-ops-cluster
+ ports:
+ - {port: 9300}
+ labels:
+ logging-infra: support
+ selector:
+ provider: openshift
+ component: es-ops
+ when: openshift_logging_use_ops
+ check_mode: no
+ changed_when: no
+
+- name: Generating logging-kibana-ops service
+ template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-kibana-ops-svc.yaml
+ vars:
+ obj_name: logging-kibana-ops
+ ports:
+ - {port: 443, targetPort: oaproxy}
+ labels:
+ logging-infra: support
+ selector:
+ provider: openshift
+ component: kibana-ops
+ when: openshift_logging_use_ops
+ check_mode: no
+ changed_when: no
diff --git a/roles/openshift_logging/tasks/install_curator.yaml b/roles/openshift_logging/tasks/install_curator.yaml
new file mode 100644
index 000000000..8f2825552
--- /dev/null
+++ b/roles/openshift_logging/tasks/install_curator.yaml
@@ -0,0 +1,51 @@
+---
+- name: Check Curator current replica count
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc/logging-curator
+ -o jsonpath='{.spec.replicas}' -n {{openshift_logging_namespace}}
+ register: curator_replica_count
+ when: not ansible_check_mode
+ ignore_errors: yes
+ changed_when: no
+
+- name: Check Curator ops current replica count
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc/logging-curator-ops
+ -o jsonpath='{.spec.replicas}' -n {{openshift_logging_namespace}}
+ register: curator_ops_replica_count
+ when:
+ - not ansible_check_mode
+ - openshift_logging_use_ops
+ ignore_errors: yes
+ changed_when: no
+
+- name: Generate curator deploymentconfig
+ template: src=curator.j2 dest={{mktemp.stdout}}/templates/logging-curator-dc.yaml
+ vars:
+ component: curator
+ logging_component: curator
+ deploy_name: "logging-{{component}}"
+ image: "{{openshift_logging_image_prefix}}logging-curator:{{openshift_logging_image_version}}"
+ es_host: logging-es
+ es_port: "{{openshift_logging_es_port}}"
+ curator_cpu_limit: "{{openshift_logging_curator_cpu_limit }}"
+ curator_memory_limit: "{{openshift_logging_curator_memory_limit }}"
+ replicas: "{{curator_replica_count.stdout | default (0)}}"
+ check_mode: no
+ changed_when: no
+
+- name: Generate OPS curator deploymentconfig
+ template: src=curator.j2 dest={{mktemp.stdout}}/templates/logging-curator-ops-dc.yaml
+ vars:
+ component: curator-ops
+ logging_component: curator
+ deploy_name: "logging-{{component}}"
+ image: "{{openshift_logging_image_prefix}}logging-curator:{{openshift_logging_image_version}}"
+ es_host: logging-es-ops
+ es_port: "{{openshift_logging_es_ops_port}}"
+ curator_cpu_limit: "{{openshift_logging_curator_ops_cpu_limit }}"
+ curator_memory_limit: "{{openshift_logging_curator_ops_memory_limit }}"
+ replicas: "{{curator_ops_replica_count.stdout | default (0)}}"
+ when: openshift_logging_use_ops
+ check_mode: no
+ changed_when: no
diff --git a/roles/openshift_logging/tasks/install_elasticsearch.yaml b/roles/openshift_logging/tasks/install_elasticsearch.yaml
new file mode 100644
index 000000000..fbba46a35
--- /dev/null
+++ b/roles/openshift_logging/tasks/install_elasticsearch.yaml
@@ -0,0 +1,107 @@
+---
+- name: Generate PersistentVolumeClaims
+ include: "{{ role_path}}/tasks/generate_pvcs.yaml"
+ vars:
+ es_pvc_names: "{{openshift_logging_facts.elasticsearch.pvcs.keys()}}"
+ es_dc_names: "{{openshift_logging_facts.elasticsearch.deploymentconfigs.keys()}}"
+ when:
+ - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length < openshift_logging_es_cluster_size }}"
+
+- name: Init pool of DeploymentConfig names for Elasticsearch
+ set_fact: es_dc_pool={{es_dc_pool | default([]) + [deploy_name]}}
+ vars:
+ component: es
+ es_cluster_name: "{{component}}"
+ deploy_name_prefix: "logging-{{component}}"
+ deploy_name: "{{deploy_name_prefix}}-{{'abcdefghijklmnopqrstuvwxyz0123456789'|random_word(8)}}"
+ with_sequence: count={{(openshift_logging_es_cluster_size - openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length) | abs}}
+ when:
+ - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length < openshift_logging_es_cluster_size }}"
+ check_mode: no
+
+
+- name: Generate Elasticsearch DeploymentConfig
+ template: src=es.j2 dest={{mktemp.stdout}}/templates/logging-{{deploy_name}}-dc.yaml
+ vars:
+ component: es
+ logging_component: elasticsearch
+ deploy_name_prefix: "logging-{{component}}"
+ image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}"
+ es_cluster_name: "{{component}}"
+ es_cpu_limit: "{{openshift_logging_es_cpu_limit }}"
+ es_memory_limit: "{{openshift_logging_es_memory_limit}}"
+ volume_names: "{{es_pvc_pool | default([])}}"
+ pvc_claim: "{{(volume_names | length > item.0) | ternary(volume_names[item.0], None)}}"
+ deploy_name: "{{item.1}}"
+ with_indexed_items:
+ - "{{es_dc_pool | default([])}}"
+ check_mode: no
+ when:
+ - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length < openshift_logging_es_cluster_size }}"
+ changed_when: no
+
+# --------- Tasks for Operation clusters ---------
+
+- name: Validate Elasticsearch cluster size for Ops
+ fail: msg="The openshift_logging_es_ops_cluster_size may not be scaled down more than 1 less (or 0) the number of Elasticsearch nodes already deployed"
+ vars:
+ es_dcs: "{{openshift_logging_facts.elasticsearch_ops.deploymentconfigs}}"
+ cluster_size: "{{openshift_logging_es_ops_cluster_size}}"
+ when:
+ - openshift_logging_use_ops
+ - "{{es_dcs | length - openshift_logging_es_ops_cluster_size | abs > 1}}"
+ check_mode: no
+
+- name: Generate PersistentVolumeClaims for Ops
+ include: "{{ role_path}}/tasks/generate_pvcs.yaml"
+ vars:
+ es_pvc_names: "{{openshift_logging_facts.elasticsearch_ops.pvcs.keys()}}"
+ es_dc_names: "{{openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys()}}"
+ openshift_logging_es_pvc_prefix: "{{openshift_logging_es_ops_pvc_prefix}}"
+ openshift_logging_es_cluster_size: "{{openshift_logging_es_ops_cluster_size}}"
+ openshift_logging_es_pvc_size: "{{openshift_logging_es_ops_pvc_size}}"
+ openshift_logging_es_pvc_dynamic: "{{openshift_logging_es_ops_pvc_dynamic}}"
+ openshift_logging_es_pv_selector: "{{openshift_logging_es_ops_pv_selector}}"
+ when:
+ - openshift_logging_use_ops
+ - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | length < openshift_logging_es_ops_cluster_size }}"
+ check_mode: no
+
+- name: Init pool of DeploymentConfig names for Elasticsearch for Ops
+ set_fact: es_dc_pool_ops={{es_dc_pool_ops | default([]) + [deploy_name]}}
+ vars:
+ component: es-ops
+ es_cluster_name: "{{component}}"
+ deploy_name_prefix: "logging-{{component}}"
+ deploy_name: "{{deploy_name_prefix}}-{{'abcdefghijklmnopqrstuvwxyz0123456789'|random_word(8)}}"
+ cluster_size: "{{openshift_logging_es_ops_cluster_size}}"
+ with_sequence: count={{openshift_logging_es_ops_cluster_size - openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | length}}
+ when:
+ - openshift_logging_use_ops
+ - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | length < openshift_logging_es_ops_cluster_size }}"
+ check_mode: no
+
+- name: Generate Elasticsearch DeploymentConfig for Ops
+ template: src=es.j2 dest={{mktemp.stdout}}/templates/logging-{{deploy_name}}-dc.yaml
+ vars:
+ component: es-ops
+ logging_component: elasticsearch
+ deploy_name_prefix: "logging-{{component}}"
+ image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}"
+ volume_names: "{{es_pvc_pool | default([])}}"
+ pvc_claim: "{{(volume_names | length > item.0) | ternary(volume_names[item.0], None)}}"
+ deploy_name: "{{item.1}}"
+ es_cluster_name: "{{component}}"
+ es_cpu_limit: "{{openshift_logging_es_ops_cpu_limit }}"
+ es_memory_limit: "{{openshift_logging_es_ops_memory_limit}}"
+ es_node_quorum: "{{es_ops_node_quorum}}"
+ es_recover_after_nodes: "{{es_ops_recover_after_nodes}}"
+ es_recover_expected_nodes: "{{es_ops_recover_expected_nodes}}"
+ openshift_logging_es_recover_after_time: "{{openshift_logging_es_ops_recover_after_time}}"
+ with_indexed_items:
+ - "{{es_dc_pool_ops | default([])}}"
+ when:
+ - openshift_logging_use_ops
+ - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | length < openshift_logging_es_ops_cluster_size }}"
+ check_mode: no
+ changed_when: no
diff --git a/roles/openshift_logging/tasks/install_fluentd.yaml b/roles/openshift_logging/tasks/install_fluentd.yaml
new file mode 100644
index 000000000..4c510c6e7
--- /dev/null
+++ b/roles/openshift_logging/tasks/install_fluentd.yaml
@@ -0,0 +1,54 @@
+---
+- set_fact: fluentd_ops_host={{ (openshift_logging_use_ops) | ternary(openshift_logging_es_ops_host, openshift_logging_es_host) }}
+ check_mode: no
+
+- set_fact: fluentd_ops_port={{ (openshift_logging_use_ops) | ternary(openshift_logging_es_ops_port, openshift_logging_es_port) }}
+ check_mode: no
+
+- name: Generating Fluentd daemonset
+ template: src=fluentd.j2 dest={{mktemp.stdout}}/templates/logging-fluentd.yaml
+ vars:
+ daemonset_name: logging-fluentd
+ daemonset_component: fluentd
+ daemonset_container_name: fluentd-elasticsearch
+ daemonset_serviceAccount: aggregated-logging-fluentd
+ ops_host: "{{ fluentd_ops_host }}"
+ ops_port: "{{ fluentd_ops_port }}"
+ fluentd_nodeselector_key: "{{openshift_logging_fluentd_nodeselector.keys()[0]}}"
+ fluentd_nodeselector_value: "{{openshift_logging_fluentd_nodeselector.values()[0]}}"
+ check_mode: no
+ changed_when: no
+
+- name: "Check fluentd privileged permissions"
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ get scc/privileged -o jsonpath='{.users}'
+ register: fluentd_privileged
+ check_mode: no
+ changed_when: no
+
+- name: "Set privileged permissions for fluentd"
+ command: >
+ {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy
+ add-scc-to-user privileged system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd
+ register: fluentd_output
+ failed_when: "fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr"
+ check_mode: no
+ when: fluentd_privileged.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd") == -1
+
+- name: "Check fluentd cluster-reader permissions"
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ get clusterrolebinding/cluster-readers -o jsonpath='{.userNames}'
+ register: fluentd_cluster_reader
+ check_mode: no
+ changed_when: no
+
+- name: "Set cluster-reader permissions for fluentd"
+ command: >
+ {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy
+ add-cluster-role-to-user cluster-reader system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd
+ register: fluentd2_output
+ failed_when: "fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr"
+ check_mode: no
+ when: fluentd_cluster_reader.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd") == -1
diff --git a/roles/openshift_logging/tasks/install_kibana.yaml b/roles/openshift_logging/tasks/install_kibana.yaml
new file mode 100644
index 000000000..de4b018dd
--- /dev/null
+++ b/roles/openshift_logging/tasks/install_kibana.yaml
@@ -0,0 +1,58 @@
+---
+- name: Check Kibana current replica count
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc/logging-kibana
+ -o jsonpath='{.spec.replicas}' -n {{openshift_logging_namespace}}
+ register: kibana_replica_count
+ when: not ansible_check_mode
+ ignore_errors: yes
+ changed_when: no
+
+- name: Check Kibana ops current replica count
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc/logging-kibana-ops
+ -o jsonpath='{.spec.replicas}' -n {{openshift_logging_namespace}}
+ register: kibana_ops_replica_count
+ when:
+ - not ansible_check_mode
+ - openshift_logging_use_ops
+ ignore_errors: yes
+ changed_when: no
+
+
+- name: Generate kibana deploymentconfig
+ template: src=kibana.j2 dest={{mktemp.stdout}}/templates/logging-kibana-dc.yaml
+ vars:
+ component: kibana
+ logging_component: kibana
+ deploy_name: "logging-{{component}}"
+ image: "{{openshift_logging_image_prefix}}logging-kibana:{{openshift_logging_image_version}}"
+ proxy_image: "{{openshift_logging_image_prefix}}logging-auth-proxy:{{openshift_logging_image_version}}"
+ es_host: logging-es
+ es_port: "{{openshift_logging_es_port}}"
+ kibana_cpu_limit: "{{openshift_logging_kibana_cpu_limit }}"
+ kibana_memory_limit: "{{openshift_logging_kibana_memory_limit }}"
+ kibana_proxy_cpu_limit: "{{openshift_logging_kibana_proxy_cpu_limit }}"
+ kibana_proxy_memory_limit: "{{openshift_logging_kibana_proxy_memory_limit }}"
+ replicas: "{{kibana_replica_count.stdout | default (0)}}"
+ check_mode: no
+ changed_when: no
+
+- name: Generate OPS kibana deploymentconfig
+ template: src=kibana.j2 dest={{mktemp.stdout}}/templates/logging-kibana-ops-dc.yaml
+ vars:
+ component: kibana-ops
+ logging_component: kibana
+ deploy_name: "logging-{{component}}"
+ image: "{{openshift_logging_image_prefix}}logging-kibana:{{openshift_logging_image_version}}"
+ proxy_image: "{{openshift_logging_image_prefix}}logging-auth-proxy:{{openshift_logging_image_version}}"
+ es_host: logging-es-ops
+ es_port: "{{openshift_logging_es_ops_port}}"
+ kibana_cpu_limit: "{{openshift_logging_kibana_ops_cpu_limit }}"
+ kibana_memory_limit: "{{openshift_logging_kibana_ops_memory_limit }}"
+ kibana_proxy_cpu_limit: "{{openshift_logging_kibana_ops_proxy_cpu_limit }}"
+ kibana_proxy_memory_limit: "{{openshift_logging_kibana_ops_proxy_memory_limit }}"
+ replicas: "{{kibana_ops_replica_count.stdout | default (0)}}"
+ when: openshift_logging_use_ops
+ check_mode: no
+ changed_when: no
diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml
new file mode 100644
index 000000000..af03e9371
--- /dev/null
+++ b/roles/openshift_logging/tasks/install_logging.yaml
@@ -0,0 +1,49 @@
+---
+- name: Gather OpenShift Logging Facts
+ openshift_logging_facts:
+ oc_bin: "{{openshift.common.client_binary}}"
+ admin_kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
+ openshift_logging_namespace: "{{openshift_logging_namespace}}"
+ tags: logging_facts
+ check_mode: no
+
+- name: Validate Elasticsearch cluster size
+ fail: msg="The openshift_logging_es_cluster_size may not be scaled down more than 1 less (or 0) the number of Elasticsearch nodes already deployed"
+ when: "{{openshift_logging_facts.elasticsearch.deploymentconfigs | length - openshift_logging_es_cluster_size | abs > 1}}"
+
+- name: Install logging
+ include: "{{ role_path }}/tasks/install_{{ install_component }}.yaml"
+ when: openshift_hosted_logging_install | default(true) | bool
+ with_items:
+ - support
+ - elasticsearch
+ - kibana
+ - curator
+ - fluentd
+ loop_control:
+ loop_var: install_component
+
+- name: Create objects
+ include: oc_apply.yaml
+ vars:
+ - kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
+ - namespace: "{{ openshift_logging_namespace }}"
+ - file_name: "{{ file }}"
+ - file_content: "{{ lookup('file', file) | from_yaml }}"
+ with_fileglob:
+ - "{{ mktemp.stdout }}/templates/*.yaml"
+ loop_control:
+ loop_var: file
+ when: not ansible_check_mode
+
+- name: Printing out objects to create
+ debug: msg="{{lookup('file', file)|quote}}"
+ with_fileglob:
+ - "{{mktemp.stdout}}/templates/*.yaml"
+ loop_control:
+ loop_var: file
+ when: ansible_check_mode
+
+- name: Scaling up cluster
+ include: start_cluster.yaml
+ when: start_cluster | default(true) | bool
diff --git a/roles/openshift_logging/tasks/install_support.yaml b/roles/openshift_logging/tasks/install_support.yaml
new file mode 100644
index 000000000..da0bbb627
--- /dev/null
+++ b/roles/openshift_logging/tasks/install_support.yaml
@@ -0,0 +1,54 @@
+---
+# This is the base configuration for installing the other components
+- name: Check for logging project already exists
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get project {{openshift_logging_namespace}} --no-headers
+ register: logging_project_result
+ ignore_errors: yes
+ when: not ansible_check_mode
+ changed_when: no
+
+- name: "Create logging project"
+ command: >
+ {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-project {{openshift_logging_namespace}}
+ when: not ansible_check_mode and "not found" in logging_project_result.stderr
+
+- name: Create logging cert directory
+ file: path={{openshift.common.config_base}}/logging state=directory mode=0755
+ changed_when: False
+ check_mode: no
+
+- include: generate_certs.yaml
+ vars:
+ generated_certs_dir: "{{openshift.common.config_base}}/logging"
+
+- name: Create temp directory for all our templates
+ file: path={{mktemp.stdout}}/templates state=directory mode=0755
+ changed_when: False
+ check_mode: no
+
+- include: generate_secrets.yaml
+ vars:
+ generated_certs_dir: "{{openshift.common.config_base}}/logging"
+
+- include: generate_configmaps.yaml
+
+- include: generate_services.yaml
+
+- name: Generate kibana-proxy oauth client
+ template: src=oauth-client.j2 dest={{mktemp.stdout}}/templates/oauth-client.yaml
+ vars:
+ secret: "{{oauth_secret}}"
+ when: oauth_secret is defined
+ check_mode: no
+ changed_when: no
+
+- include: generate_clusterroles.yaml
+
+- include: generate_rolebindings.yaml
+
+- include: generate_clusterrolebindings.yaml
+
+- include: generate_serviceaccounts.yaml
+
+- include: generate_routes.yaml
diff --git a/roles/openshift_logging/tasks/label_node.yaml b/roles/openshift_logging/tasks/label_node.yaml
new file mode 100644
index 000000000..aecb5d81b
--- /dev/null
+++ b/roles/openshift_logging/tasks/label_node.yaml
@@ -0,0 +1,29 @@
+---
+- command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get node {{host}}
+ -o jsonpath='{.metadata.labels.{{ label }}}'
+ register: label_value
+ failed_when: label_value.rc == 1 and 'exists' not in label_value.stderr
+ when: not ansible_check_mode
+ changed_when: no
+
+- command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig label node {{host}} {{label}}={{value}} --overwrite
+ register: label_result
+ failed_when: label_result.rc == 1 and 'exists' not in label_result.stderr
+ when:
+ - value is defined
+ - label_value.stdout is defined
+ - label_value.stdout != value
+ - unlabel is not defined or not unlabel
+ - not ansible_check_mode
+
+- command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig label node {{host}} {{label}}-
+ register: label_result
+ failed_when: label_result.rc == 1 and 'exists' not in label_result.stderr
+ when:
+ - unlabel is defined
+ - unlabel
+ - not ansible_check_mode
+ - label_value.stdout != ""
diff --git a/roles/openshift_logging/tasks/main.yaml b/roles/openshift_logging/tasks/main.yaml
new file mode 100644
index 000000000..c4ec1b255
--- /dev/null
+++ b/roles/openshift_logging/tasks/main.yaml
@@ -0,0 +1,40 @@
+---
+- fail:
+ msg: Only one Fluentd nodeselector key pair should be provided
+ when: "{{ openshift_logging_fluentd_nodeselector.keys() | count }} > 1"
+
+
+- name: Create temp directory for doing work in
+ command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
+ check_mode: no
+ tags: logging_init
+
+- debug: msg="Created temp dir {{mktemp.stdout}}"
+
+- name: Copy the admin client config(s)
+ command: >
+ cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
+ changed_when: False
+ check_mode: no
+ tags: logging_init
+
+- include: "{{ role_path }}/tasks/install_logging.yaml"
+ when: openshift_logging_install_logging | default(false) | bool
+
+- include: "{{ role_path }}/tasks/upgrade_logging.yaml"
+ when: openshift_logging_upgrade_logging | default(false) | bool
+
+- include: "{{ role_path }}/tasks/delete_logging.yaml"
+ when:
+ - not openshift_logging_install_logging | default(false) | bool
+ - not openshift_logging_upgrade_logging | default(false) | bool
+
+- name: Delete temp directory
+ file:
+ name: "{{ mktemp.stdout }}"
+ state: absent
+ tags: logging_cleanup
+ changed_when: False
+ check_mode: no
diff --git a/roles/openshift_logging/tasks/oc_apply.yaml b/roles/openshift_logging/tasks/oc_apply.yaml
new file mode 100644
index 000000000..c362b7fca
--- /dev/null
+++ b/roles/openshift_logging/tasks/oc_apply.yaml
@@ -0,0 +1,29 @@
+---
+- name: Checking generation of {{file_content.kind}} {{file_content.metadata.name}}
+ shell: >
+ {{ openshift.common.client_binary }}
+ --config={{ kubeconfig }}
+ get {{file_content.kind}} {{file_content.metadata.name}}
+ -o jsonpath='{.metadata.resourceVersion}'
+ -n {{namespace}} || echo 0
+ register: generation_init
+ changed_when: no
+
+- name: Applying {{file_name}}
+ command: >
+ {{ openshift.common.client_binary }} --config={{ kubeconfig }}
+ apply -f {{ file_name }}
+ -n {{ namespace }}
+ register: generation_apply
+ failed_when: "'error' in generation_apply.stderr"
+ changed_when: no
+
+- name: Determine change status of {{file_content.kind}} {{file_content.metadata.name}}
+ shell: >
+ {{ openshift.common.client_binary }} --config={{ kubeconfig }}
+ get {{file_content.kind}} {{file_content.metadata.name}}
+ -o jsonpath='{.metadata.resourceVersion}'
+ -n {{namespace}} || echo 0
+ register: generation_changed
+ failed_when: "'error' in generation_changed.stderr"
+ changed_when: generation_changed.stdout | int > generation_init.stdout | int
diff --git a/roles/openshift_logging/tasks/procure_server_certs.yaml b/roles/openshift_logging/tasks/procure_server_certs.yaml
new file mode 100644
index 000000000..44dd5e894
--- /dev/null
+++ b/roles/openshift_logging/tasks/procure_server_certs.yaml
@@ -0,0 +1,52 @@
+---
+- name: Checking for {{ cert_info.procure_component }}.crt
+ stat: path="{{generated_certs_dir}}/{{ cert_info.procure_component }}.crt"
+ register: component_cert_file
+ check_mode: no
+
+- name: Checking for {{ cert_info.procure_component }}.key
+ stat: path="{{generated_certs_dir}}/{{ cert_info.procure_component }}.key"
+ register: component_key_file
+ check_mode: no
+
+- name: Trying to discover server cert variable name for {{ cert_info.procure_component }}
+ set_fact: procure_component_crt={{ lookup('env', '{{cert_info.procure_component}}' + '_crt') }}
+ when: cert_info.hostnames is undefined and {{ cert_info.procure_component }}_crt is defined and {{ cert_info.procure_component }}_key is defined
+ check_mode: no
+
+- name: Trying to discover the server key variable name for {{ cert_info.procure_component }}
+ set_fact: procure_component_key={{ lookup('env', '{{cert_info.procure_component}}' + '_key') }}
+ when: cert_info.hostnames is undefined and {{ cert_info.procure_component }}_crt is defined and {{ cert_info.procure_component }}_key is defined
+ check_mode: no
+
+- name: Creating signed server cert and key for {{ cert_info.procure_component }}
+ command: >
+ {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig ca create-server-cert
+ --key={{generated_certs_dir}}/{{cert_info.procure_component}}.key --cert={{generated_certs_dir}}/{{cert_info.procure_component}}.crt
+ --hostnames={{cert_info.hostnames|quote}} --signer-cert={{generated_certs_dir}}/ca.crt --signer-key={{generated_certs_dir}}/ca.key
+ --signer-serial={{generated_certs_dir}}/ca.serial.txt
+ check_mode: no
+ when:
+ - cert_info.hostnames is defined
+ - not component_key_file.stat.exists
+ - not component_cert_file.stat.exists
+
+- name: Copying server key for {{ cert_info.procure_component }} to generated certs directory
+ copy: content="{{procure_component_key}}" dest={{generated_certs_dir}}/{{cert_info.procure_component}}.key
+ check_mode: no
+ when:
+ - cert_info.hostnames is undefined
+ - "{{ cert_info.procure_component }}_crt is defined"
+ - "{{ cert_info.procure_component }}_key is defined"
+ - not component_key_file.stat.exists
+ - not component_cert_file.stat.exists
+
+- name: Copying Server cert for {{ cert_info.procure_component }} to generated certs directory
+ copy: content="{{procure_component_crt}}" dest={{generated_certs_dir}}/{{cert_info.procure_component}}.crt
+ check_mode: no
+ when:
+ - cert_info.hostnames is undefined
+ - "{{ cert_info.procure_component }}_crt is defined"
+ - "{{ cert_info.procure_component }}_key is defined"
+ - not component_key_file.stat.exists
+ - not component_cert_file.stat.exists
diff --git a/roles/openshift_logging/tasks/scale.yaml b/roles/openshift_logging/tasks/scale.yaml
new file mode 100644
index 000000000..125d3b8af
--- /dev/null
+++ b/roles/openshift_logging/tasks/scale.yaml
@@ -0,0 +1,28 @@
+---
+- command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get {{object}}
+ -o jsonpath='{.spec.replicas}' -n {{openshift_logging_namespace}}
+ register: replica_count
+ failed_when: replica_count.rc == 1 and 'exists' not in replica_count.stderr
+ when: not ansible_check_mode
+ changed_when: no
+
+- command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig scale {{object}}
+ --replicas={{desired}} -n {{openshift_logging_namespace}}
+ register: scale_result
+ failed_when: scale_result.rc == 1 and 'exists' not in scale_result.stderr
+ when:
+ - not ansible_check_mode
+ - replica_count.stdout|int != desired
+
+- command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get {{object}} -n {{openshift_logging_namespace}} -o jsonpath='{.status.replicas}'
+ register: replica_counts
+ until: replica_counts.stdout|int == desired
+ retries: 30
+ delay: 10
+ when:
+ - not ansible_check_mode
+ - replica_count.stdout|int != desired
+ changed_when: no
diff --git a/roles/openshift_logging/tasks/start_cluster.yaml b/roles/openshift_logging/tasks/start_cluster.yaml
new file mode 100644
index 000000000..a96ad3f3a
--- /dev/null
+++ b/roles/openshift_logging/tasks/start_cluster.yaml
@@ -0,0 +1,104 @@
+---
+- command: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get node -o jsonpath='{.items[*].metadata.name}'
+ register: fluentd_hosts
+ when: "'--all' in openshift_logging_fluentd_hosts"
+ check_mode: no
+ changed_when: no
+
+- name: start fluentd
+ include: label_node.yaml
+ vars:
+ host: "{{fluentd_host}}"
+ label: "{{openshift_logging_fluentd_nodeselector.keys()[0]}}"
+ value: "{{openshift_logging_fluentd_nodeselector.values()[0]}}"
+ with_items: "{{(fluentd_hosts.stdout_lines is defined) | ternary(fluentd_hosts.stdout_lines, openshift_logging_fluentd_hosts)}}"
+ loop_control:
+ loop_var: fluentd_host
+
+- command: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es -o name -n {{openshift_logging_namespace}}
+ register: es_dc
+ check_mode: no
+ changed_when: no
+
+- name: start elasticsearch
+ include: scale.yaml
+ vars:
+ desired: 1
+ with_items: "{{es_dc.stdout_lines}}"
+ loop_control:
+ loop_var: object
+
+- command: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=kibana -o name -n {{openshift_logging_namespace}}
+ register: kibana_dc
+ check_mode: no
+ changed_when: no
+
+- name: start kibana
+ include: scale.yaml
+ vars:
+ desired: "{{ openshift_logging_kibana_replica_count | default (1) }}"
+ with_items: "{{kibana_dc.stdout_lines}}"
+ loop_control:
+ loop_var: object
+
+- command: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=curator -o name -n {{openshift_logging_namespace}}
+ register: curator_dc
+ check_mode: no
+ changed_when: no
+
+- name: start curator
+ include: scale.yaml
+ vars:
+ desired: 1
+ with_items: "{{curator_dc.stdout_lines}}"
+ loop_control:
+ loop_var: object
+
+- command: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es-ops -o name -n {{openshift_logging_namespace}}
+ register: es_dc
+ check_mode: no
+ changed_when: no
+
+- name: start elasticsearch-ops
+ include: scale.yaml
+ vars:
+ desired: 1
+ with_items: "{{es_dc.stdout_lines}}"
+ loop_control:
+ loop_var: object
+ when: openshift_logging_use_ops
+
+- command: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=kibana-ops -o name -n {{openshift_logging_namespace}}
+ register: kibana_dc
+ check_mode: no
+ changed_when: no
+
+- name: start kibana-ops
+ include: scale.yaml
+ vars:
+ desired: "{{ openshift_logging_kibana_ops_replica_count | default (1) }}"
+ with_items: "{{kibana_dc.stdout_lines}}"
+ loop_control:
+ loop_var: object
+ when: openshift_logging_use_ops
+
+- command: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=curator-ops -o name -n {{openshift_logging_namespace}}
+ register: curator_dc
+ check_mode: no
+ changed_when: no
+
+- name: start curator-ops
+ include: scale.yaml
+ vars:
+ desired: 1
+ with_items: "{{curator_dc.stdout_lines}}"
+ loop_control:
+ loop_var: object
+ when: openshift_logging_use_ops
diff --git a/roles/openshift_logging/tasks/stop_cluster.yaml b/roles/openshift_logging/tasks/stop_cluster.yaml
new file mode 100644
index 000000000..e44493e4d
--- /dev/null
+++ b/roles/openshift_logging/tasks/stop_cluster.yaml
@@ -0,0 +1,97 @@
+---
+- command: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get node -o jsonpath='{.items[*].metadata.name}'
+ register: fluentd_hosts
+ when: "'--all' in openshift_logging_fluentd_hosts"
+ changed_when: no
+
+- name: stop fluentd
+ include: label_node.yaml
+ vars:
+ host: "{{fluentd_host}}"
+ label: "{{openshift_logging_fluentd_nodeselector.keys()[0]}}"
+ unlabel: True
+ with_items: "{{(fluentd_hosts.stdout_lines is defined) | ternary(fluentd_hosts.stdout_lines, openshift_logging_fluentd_hosts)}}"
+ loop_control:
+ loop_var: fluentd_host
+
+- command: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es -o name -n {{openshift_logging_namespace}}
+ register: es_dc
+ changed_when: no
+
+- name: stop elasticsearch
+ include: scale.yaml
+ vars:
+ desired: 0
+ with_items: "{{es_dc.stdout_lines}}"
+ loop_control:
+ loop_var: object
+
+- command: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=kibana -o name -n {{openshift_logging_namespace}}
+ register: kibana_dc
+ changed_when: no
+
+- name: stop kibana
+ include: scale.yaml
+ vars:
+ desired: 0
+ with_items: "{{kibana_dc.stdout_lines}}"
+ loop_control:
+ loop_var: object
+
+- command: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=curator -o name -n {{openshift_logging_namespace}}
+ register: curator_dc
+ changed_when: no
+
+- name: stop curator
+ include: scale.yaml
+ vars:
+ desired: 0
+ with_items: "{{curator_dc.stdout_lines}}"
+ loop_control:
+ loop_var: object
+
+- command: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es-ops -o name -n {{openshift_logging_namespace}}
+ register: es_dc
+ changed_when: no
+
+- name: stop elasticsearch-ops
+ include: scale.yaml
+ vars:
+ desired: 0
+ with_items: "{{es_dc.stdout_lines}}"
+ loop_control:
+ loop_var: object
+ when: openshift_logging_use_ops
+
+- command: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=kibana-ops -o name -n {{openshift_logging_namespace}}
+ register: kibana_dc
+ changed_when: no
+
+- name: stop kibana-ops
+ include: scale.yaml
+ vars:
+ desired: 0
+ with_items: "{{kibana_dc.stdout_lines}}"
+ loop_control:
+ loop_var: object
+ when: openshift_logging_use_ops
+
+- command: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=curator-ops -o name -n {{openshift_logging_namespace}}
+ register: curator_dc
+ changed_when: no
+
+- name: stop curator-ops
+ include: scale.yaml
+ vars:
+ desired: 0
+ with_items: "{{curator_dc.stdout_lines}}"
+ loop_control:
+ loop_var: object
+ when: openshift_logging_use_ops
diff --git a/roles/openshift_logging/tasks/upgrade_logging.yaml b/roles/openshift_logging/tasks/upgrade_logging.yaml
new file mode 100644
index 000000000..a93463239
--- /dev/null
+++ b/roles/openshift_logging/tasks/upgrade_logging.yaml
@@ -0,0 +1,41 @@
+---
+- name: Stop the Cluster
+ include: stop_cluster.yaml
+
+- name: Upgrade logging
+ include: install_logging.yaml
+ vars:
+ start_cluster: False
+
+# start ES so that we can run migrate script
+- command: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es -o name -n {{openshift_logging_namespace}}
+ register: es_dc
+ check_mode: no
+
+- name: start elasticsearch
+ include: scale.yaml
+ vars:
+ desired: 1
+ with_items: "{{es_dc.stdout_lines}}"
+ loop_control:
+ loop_var: object
+
+- command: >
+ {{ openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get pods -n {{openshift_logging_namespace}} -l component=es -o jsonpath='{.items[?(@.status.phase == "Running")].metadata.name}'
+ register: running_pod
+ until: running_pod.stdout != ''
+ retries: 30
+ delay: 10
+ changed_when: no
+ check_mode: no
+
+- name: Run upgrade script
+ script: es_migration.sh {{openshift.common.config_base}}/logging/ca.crt {{openshift.common.config_base}}/logging/system.admin.key {{openshift.common.config_base}}/logging/system.admin.crt {{openshift_logging_es_host}} {{openshift_logging_es_port}} {{openshift_logging_namespace}}
+ register: script_output
+ changed_when:
+ - script_output.rc == 0
+ - script_output.stdout.find("skipping update_for_uuid") == -1 or script_output.stdout.find("skipping update_for_common_data_model") == -1
+
+- name: Start up rest of cluster
+ include: start_cluster.yaml
diff --git a/roles/openshift_logging/templates/clusterrole.j2 b/roles/openshift_logging/templates/clusterrole.j2
new file mode 100644
index 000000000..0d28db48e
--- /dev/null
+++ b/roles/openshift_logging/templates/clusterrole.j2
@@ -0,0 +1,21 @@
+apiVersion: v1
+kind: ClusterRole
+metadata:
+ name: {{obj_name}}
+rules:
+{% for rule in rules %}
+- resources:
+{% for kind in rule.resources %}
+ - {{ kind }}
+{% endfor %}
+ apiGroups:
+{% if rule.api_groups is defined %}
+{% for group in rule.api_groups %}
+ - {{ group }}
+{% endfor %}
+{% endif %}
+ verbs:
+{% for verb in rule.verbs %}
+ - {{ verb }}
+{% endfor %}
+{% endfor %}
diff --git a/roles/openshift_logging/templates/clusterrolebinding.j2 b/roles/openshift_logging/templates/clusterrolebinding.j2
new file mode 100644
index 000000000..2d25ff1fb
--- /dev/null
+++ b/roles/openshift_logging/templates/clusterrolebinding.j2
@@ -0,0 +1,24 @@
+apiVersion: v1
+kind: ClusterRoleBinding
+metadata:
+ name: {{obj_name}}
+{% if crb_usernames is defined %}
+userNames:
+{% for name in crb_usernames %}
+ - {{ name }}
+{% endfor %}
+{% endif %}
+{% if crb_groupnames is defined %}
+groupNames:
+{% for name in crb_groupnames %}
+ - {{ name }}
+{% endfor %}
+{% endif %}
+subjects:
+{% for sub in subjects %}
+ - kind: {{ sub.kind }}
+ name: {{ sub.name }}
+ namespace: {{sub.namespace}}
+{% endfor %}
+roleRef:
+ name: {{obj_name}}
diff --git a/roles/openshift_logging/templates/curator.j2 b/roles/openshift_logging/templates/curator.j2
new file mode 100644
index 000000000..d3b5d33a2
--- /dev/null
+++ b/roles/openshift_logging/templates/curator.j2
@@ -0,0 +1,97 @@
+apiVersion: "v1"
+kind: "DeploymentConfig"
+metadata:
+ name: "{{deploy_name}}"
+ labels:
+ provider: openshift
+ component: "{{component}}"
+ logging-infra: "{{logging_component}}"
+spec:
+ replicas: {{replicas|default(0)}}
+ selector:
+ provider: openshift
+ component: "{{component}}"
+ logging-infra: "{{logging_component}}"
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ type: Recreate
+ template:
+ metadata:
+ name: "{{deploy_name}}"
+ labels:
+ logging-infra: "{{logging_component}}"
+ provider: openshift
+ component: "{{component}}"
+ spec:
+ terminationGracePeriod: 600
+ serviceAccountName: aggregated-logging-curator
+ containers:
+ -
+ name: "curator"
+ image: {{image}}
+ imagePullPolicy: Always
+ resources:
+ limits:
+ cpu: "{{curator_cpu_limit}}"
+{% if curator_memory_limit is defined and curator_memory_limit is not none %}
+ memory: "{{curator_memory_limit}}"
+{% endif %}
+ env:
+ -
+ name: "K8S_HOST_URL"
+ value: "{{master_url}}"
+ -
+ name: "ES_HOST"
+ value: "{{es_host}}"
+ -
+ name: "ES_PORT"
+ value: "{{es_port}}"
+ -
+ name: "ES_CLIENT_CERT"
+ value: "/etc/curator/keys/cert"
+ -
+ name: "ES_CLIENT_KEY"
+ value: "/etc/curator/keys/key"
+ -
+ name: "ES_CA"
+ value: "/etc/curator/keys/ca"
+ -
+ name: "CURATOR_DEFAULT_DAYS"
+ value: "{{openshift_logging_curator_default_days}}"
+ -
+ name: "CURATOR_RUN_HOUR"
+ value: "{{openshift_logging_curator_run_hour}}"
+ -
+ name: "CURATOR_RUN_MINUTE"
+ value: "{{openshift_logging_curator_run_minute}}"
+ -
+ name: "CURATOR_RUN_TIMEZONE"
+ value: "{{openshift_logging_curator_run_timezone}}"
+ -
+ name: "CURATOR_SCRIPT_LOG_LEVEL"
+ value: "{{openshift_logging_curator_script_log_level}}"
+ -
+ name: "CURATOR_LOG_LEVEL"
+ value: "{{openshift_logging_curator_log_level}}"
+ volumeMounts:
+ - name: certs
+ mountPath: /etc/curator/keys
+ readOnly: true
+ - name: config
+ mountPath: /usr/curator/settings
+ readOnly: true
+ - name: elasticsearch-storage
+ mountPath: /elasticsearch/persistent
+ readOnly: true
+ volumes:
+ - name: certs
+ secret:
+ secretName: logging-curator
+ - name: config
+ configMap:
+ name: logging-curator
+ - name: elasticsearch-storage
+ emptyDir: {}
diff --git a/roles/openshift_logging/templates/elasticsearch.yml.j2 b/roles/openshift_logging/templates/elasticsearch.yml.j2
new file mode 100644
index 000000000..dad78b844
--- /dev/null
+++ b/roles/openshift_logging/templates/elasticsearch.yml.j2
@@ -0,0 +1,75 @@
+cluster:
+ name: ${CLUSTER_NAME}
+
+script:
+ inline: on
+ indexed: on
+
+index:
+ number_of_shards: 1
+ number_of_replicas: 0
+ auto_expand_replicas: 0-3
+ unassigned.node_left.delayed_timeout: 2m
+ translog:
+ flush_threshold_size: 256mb
+ flush_threshold_period: 5m
+
+node:
+ master: true
+ data: true
+
+network:
+ host: 0.0.0.0
+
+cloud:
+ kubernetes:
+ service: ${SERVICE_DNS}
+ namespace: ${NAMESPACE}
+
+discovery:
+ type: kubernetes
+ zen.ping.multicast.enabled: false
+
+gateway:
+ expected_master_nodes: ${NODE_QUORUM}
+ recover_after_nodes: ${RECOVER_AFTER_NODES}
+ expected_nodes: ${RECOVER_EXPECTED_NODES}
+ recover_after_time: ${RECOVER_AFTER_TIME}
+
+io.fabric8.elasticsearch.authentication.users: ["system.logging.kibana", "system.logging.fluentd", "system.logging.curator", "system.admin"]
+
+openshift.searchguard:
+ keystore.path: /etc/elasticsearch/secret/admin.jks
+ truststore.path: /etc/elasticsearch/secret/searchguard.truststore
+
+openshift.operations.allow_cluster_reader: {{allow_cluster_reader | default ('false')}}
+
+path:
+ data: /elasticsearch/persistent/${CLUSTER_NAME}/data
+ logs: /elasticsearch/${CLUSTER_NAME}/logs
+ work: /elasticsearch/${CLUSTER_NAME}/work
+ scripts: /elasticsearch/${CLUSTER_NAME}/scripts
+
+searchguard:
+ authcz.admin_dn:
+ - CN=system.admin,OU=OpenShift,O=Logging
+ config_index_name: ".searchguard.${HOSTNAME}"
+ ssl:
+ transport:
+ enabled: true
+ enforce_hostname_verification: false
+ keystore_type: JKS
+ keystore_filepath: /etc/elasticsearch/secret/searchguard.key
+ keystore_password: kspass
+ truststore_type: JKS
+ truststore_filepath: /etc/elasticsearch/secret/searchguard.truststore
+ truststore_password: tspass
+ http:
+ enabled: true
+ keystore_type: JKS
+ keystore_filepath: /etc/elasticsearch/secret/key
+ keystore_password: kspass
+ clientauth_mode: OPTIONAL
+ truststore_type: JKS
+ truststore_filepath: /etc/elasticsearch/secret/truststore
+ truststore_password: tspass
diff --git a/roles/openshift_logging/templates/es.j2 b/roles/openshift_logging/templates/es.j2
new file mode 100644
index 000000000..291589690
--- /dev/null
+++ b/roles/openshift_logging/templates/es.j2
@@ -0,0 +1,105 @@
+apiVersion: "v1"
+kind: "DeploymentConfig"
+metadata:
+ name: "{{deploy_name}}"
+ labels:
+ provider: openshift
+ component: "{{component}}"
+ deployment: "{{deploy_name}}"
+ logging-infra: "{{logging_component}}"
+spec:
+ replicas: {{replicas|default(0)}}
+ selector:
+ provider: openshift
+ component: "{{component}}"
+ deployment: "{{deploy_name}}"
+ logging-infra: "{{logging_component}}"
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: "{{deploy_name}}"
+ labels:
+ logging-infra: "{{logging_component}}"
+ provider: openshift
+ component: "{{component}}"
+ deployment: "{{deploy_name}}"
+ spec:
+ terminationGracePeriod: 600
+ serviceAccountName: aggregated-logging-elasticsearch
+ securityContext:
+ supplementalGroups:
+ - {{openshift_logging_es_storage_group}}
+ containers:
+ -
+ name: "elasticsearch"
+ image: {{image}}
+ imagePullPolicy: Always
+ resources:
+ limits:
+ memory: "{{es_memory_limit}}"
+{% if es_cpu_limit is defined and es_cpu_limit is not none %}
+ cpu: "{{es_cpu_limit}}"
+{% endif %}
+ requests:
+ memory: "512Mi"
+ ports:
+ -
+ containerPort: 9200
+ name: "restapi"
+ -
+ containerPort: 9300
+ name: "cluster"
+ env:
+ -
+ name: "NAMESPACE"
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ -
+ name: "KUBERNETES_TRUST_CERT"
+ value: "true"
+ -
+ name: "SERVICE_DNS"
+ value: "logging-{{es_cluster_name}}-cluster"
+ -
+ name: "CLUSTER_NAME"
+ value: "logging-{{es_cluster_name}}"
+ -
+ name: "INSTANCE_RAM"
+ value: "{{openshift_logging_es_memory_limit}}"
+ -
+ name: "NODE_QUORUM"
+ value: "{{es_node_quorum | int}}"
+ -
+ name: "RECOVER_AFTER_NODES"
+ value: "{{es_recover_after_nodes}}"
+ -
+ name: "RECOVER_EXPECTED_NODES"
+ value: "{{es_recover_expected_nodes}}"
+ -
+ name: "RECOVER_AFTER_TIME"
+ value: "{{openshift_logging_es_recover_after_time}}"
+ volumeMounts:
+ - name: elasticsearch
+ mountPath: /etc/elasticsearch/secret
+ readOnly: true
+ - name: elasticsearch-config
+ mountPath: /usr/share/java/elasticsearch/config
+ readOnly: true
+ - name: elasticsearch-storage
+ mountPath: /elasticsearch/persistent
+ volumes:
+ - name: elasticsearch
+ secret:
+ secretName: logging-elasticsearch
+ - name: elasticsearch-config
+ configMap:
+ name: logging-elasticsearch
+ - name: elasticsearch-storage
+{% if pvc_claim is defined and pvc_claim | trim | length > 0 %}
+ persistentVolumeClaim:
+ claimName: {{pvc_claim}}
+{% else %}
+ emptyDir: {}
+{% endif %}
diff --git a/roles/openshift_logging/templates/fluentd.j2 b/roles/openshift_logging/templates/fluentd.j2
new file mode 100644
index 000000000..b6c91f8ed
--- /dev/null
+++ b/roles/openshift_logging/templates/fluentd.j2
@@ -0,0 +1,149 @@
+apiVersion: extensions/v1beta1
+kind: "DaemonSet"
+metadata:
+ name: "{{daemonset_name}}"
+ labels:
+ provider: openshift
+ component: "{{daemonset_component}}"
+ logging-infra: "{{daemonset_component}}"
+spec:
+ selector:
+ matchLabels:
+ provider: openshift
+ component: "{{daemonset_component}}"
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate:
+ minReadySeconds: 600
+ template:
+ metadata:
+ name: "{{daemonset_container_name}}"
+ labels:
+ logging-infra: "{{daemonset_component}}"
+ provider: openshift
+ component: "{{daemonset_component}}"
+ spec:
+ serviceAccountName: "{{daemonset_serviceAccount}}"
+ nodeSelector:
+ {{fluentd_nodeselector_key}}: "{{fluentd_nodeselector_value}}"
+ containers:
+ - name: "{{daemonset_container_name}}"
+ image: "{{openshift_logging_image_prefix}}{{daemonset_name}}:{{openshift_logging_image_version}}"
+ imagePullPolicy: Always
+ securityContext:
+ privileged: true
+ resources:
+ limits:
+ cpu: {{openshift_logging_fluentd_cpu_limit}}
+ memory: {{openshift_logging_fluentd_memory_limit}}
+ volumeMounts:
+ - name: runlogjournal
+ mountPath: /run/log/journal
+ - name: varlog
+ mountPath: /var/log
+ - name: varlibdockercontainers
+ mountPath: /var/lib/docker/containers
+ readOnly: true
+ - name: config
+ mountPath: /etc/fluent/configs.d/user
+ readOnly: true
+ - name: certs
+ mountPath: /etc/fluent/keys
+ readOnly: true
+ - name: dockerhostname
+ mountPath: /etc/docker-hostname
+ readOnly: true
+ - name: localtime
+ mountPath: /etc/localtime
+ readOnly: true
+ - name: dockercfg
+ mountPath: /etc/sysconfig/docker
+ readOnly: true
+ env:
+ - name: "K8S_HOST_URL"
+ value: "{{master_url}}"
+ - name: "ES_HOST"
+ value: "{{openshift_logging_es_host}}"
+ - name: "ES_PORT"
+ value: "{{openshift_logging_es_port}}"
+ - name: "ES_CLIENT_CERT"
+ value: "{{openshift_logging_es_client_cert}}"
+ - name: "ES_CLIENT_KEY"
+ value: "{{openshift_logging_es_client_key}}"
+ - name: "ES_CA"
+ value: "{{openshift_logging_es_ca}}"
+ - name: "OPS_HOST"
+ value: "{{ops_host}}"
+ - name: "OPS_PORT"
+ value: "{{ops_port}}"
+ - name: "OPS_CLIENT_CERT"
+ value: "{{openshift_logging_es_ops_client_cert}}"
+ - name: "OPS_CLIENT_KEY"
+ value: "{{openshift_logging_es_ops_client_key}}"
+ - name: "OPS_CA"
+ value: "{{openshift_logging_es_ops_ca}}"
+ - name: "ES_COPY"
+ value: "{{openshift_logging_fluentd_es_copy|lower}}"
+ - name: "ES_COPY_HOST"
+ value: "{{es_copy_host | default('')}}"
+ - name: "ES_COPY_PORT"
+ value: "{{es_copy_port | default('')}}"
+ - name: "ES_COPY_SCHEME"
+ value: "{{es_copy_scheme | default('https')}}"
+ - name: "ES_COPY_CLIENT_CERT"
+ value: "{{es_copy_client_cert | default('')}}"
+ - name: "ES_COPY_CLIENT_KEY"
+ value: "{{es_copy_client_key | default('')}}"
+ - name: "ES_COPY_CA"
+ value: "{{es_copy_ca | default('')}}"
+ - name: "ES_COPY_USERNAME"
+ value: "{{es_copy_username | default('')}}"
+ - name: "ES_COPY_PASSWORD"
+ value: "{{es_copy_password | default('')}}"
+ - name: "OPS_COPY_HOST"
+ value: "{{ops_copy_host | default('')}}"
+ - name: "OPS_COPY_PORT"
+ value: "{{ops_copy_port | default('')}}"
+ - name: "OPS_COPY_SCHEME"
+ value: "{{ops_copy_scheme | default('https')}}"
+ - name: "OPS_COPY_CLIENT_CERT"
+ value: "{{ops_copy_client_cert | default('')}}"
+ - name: "OPS_COPY_CLIENT_KEY"
+ value: "{{ops_copy_client_key | default('')}}"
+ - name: "OPS_COPY_CA"
+ value: "{{ops_copy_ca | default('')}}"
+ - name: "OPS_COPY_USERNAME"
+ value: "{{ops_copy_username | default('')}}"
+ - name: "OPS_COPY_PASSWORD"
+ value: "{{ops_copy_password | default('')}}"
+ - name: "USE_JOURNAL"
+ value: "{{openshift_logging_fluentd_use_journal|lower}}"
+ - name: "JOURNAL_SOURCE"
+ value: "{{fluentd_journal_source | default('')}}"
+ - name: "JOURNAL_READ_FROM_HEAD"
+ value: "{{openshift_logging_fluentd_journal_read_from_head|lower}}"
+ volumes:
+ - name: runlogjournal
+ hostPath:
+ path: /run/log/journal
+ - name: varlog
+ hostPath:
+ path: /var/log
+ - name: varlibdockercontainers
+ hostPath:
+ path: /var/lib/docker/containers
+ - name: config
+ configMap:
+ name: logging-fluentd
+ - name: certs
+ secret:
+ secretName: logging-fluentd
+ - name: dockerhostname
+ hostPath:
+ path: /etc/hostname
+ - name: localtime
+ hostPath:
+ path: /etc/localtime
+ - name: dockercfg
+ hostPath:
+ path: /etc/sysconfig/docker
diff --git a/roles/openshift_logging/templates/jks_pod.j2 b/roles/openshift_logging/templates/jks_pod.j2
new file mode 100644
index 000000000..8b1c74211
--- /dev/null
+++ b/roles/openshift_logging/templates/jks_pod.j2
@@ -0,0 +1,28 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ labels:
+ logging-infra: support
+ generateName: jks-cert-gen-
+spec:
+ containers:
+ - name: jks-cert-gen
+ image: {{openshift_logging_image_prefix}}logging-deployer:{{openshift_logging_image_version}}
+ imagePullPolicy: Always
+ command: ["sh", "{{generated_certs_dir}}/generate-jks.sh"]
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - mountPath: {{generated_certs_dir}}
+ name: certmount
+ env:
+ - name: PROJECT
+ value: {{openshift_logging_namespace}}
+ - name: CERT_DIR
+ value: {{generated_certs_dir}}
+ restartPolicy: Never
+ serviceAccount: jks-generator
+ volumes:
+ - hostPath:
+ path: "{{generated_certs_dir}}"
+ name: certmount
diff --git a/roles/openshift_logging/templates/kibana.j2 b/roles/openshift_logging/templates/kibana.j2
new file mode 100644
index 000000000..1ec97701a
--- /dev/null
+++ b/roles/openshift_logging/templates/kibana.j2
@@ -0,0 +1,110 @@
+apiVersion: "v1"
+kind: "DeploymentConfig"
+metadata:
+ name: "{{deploy_name}}"
+ labels:
+ provider: openshift
+ component: "{{component}}"
+ logging-infra: "{{logging_component}}"
+spec:
+ replicas: {{replicas|default(0)}}
+ selector:
+ provider: openshift
+ component: "{{component}}"
+ logging-infra: "{{logging_component}}"
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ name: "{{deploy_name}}"
+ labels:
+ logging-infra: "{{logging_component}}"
+ provider: openshift
+ component: "{{component}}"
+ spec:
+ serviceAccountName: aggregated-logging-kibana
+ containers:
+ -
+ name: "kibana"
+ image: {{image}}
+ imagePullPolicy: Always
+{% if (kibana_memory_limit is defined and kibana_memory_limit is not none) or (kibana_cpu_limit is defined and kibana_cpu_limit is not none) %}
+ resources:
+ limits:
+{% if kibana_cpu_limit is not none %}
+ cpu: "{{kibana_cpu_limit}}"
+{% endif %}
+{% if kibana_memory_limit is not none %}
+ memory: "{{kibana_memory_limit}}"
+{% endif %}
+{% endif %}
+ env:
+ - name: "ES_HOST"
+ value: "{{es_host}}"
+ - name: "ES_PORT"
+ value: "{{es_port}}"
+ volumeMounts:
+ - name: kibana
+ mountPath: /etc/kibana/keys
+ readOnly: true
+ -
+ name: "kibana-proxy"
+ image: {{proxy_image}}
+ imagePullPolicy: Always
+{% if (kibana_proxy_memory_limit is defined and kibana_proxy_memory_limit is not none) or (kibana_proxy_cpu_limit is defined and kibana_proxy_cpu_limit is not none) %}
+ resources:
+ limits:
+{% if kibana_proxy_cpu_limit is not none %}
+ cpu: "{{kibana_proxy_cpu_limit}}"
+{% endif %}
+{% if kibana_proxy_memory_limit is not none %}
+ memory: "{{kibana_proxy_memory_limit}}"
+{% endif %}
+{% endif %}
+ ports:
+ -
+ name: "oaproxy"
+ containerPort: 3000
+ env:
+ -
+ name: "OAP_BACKEND_URL"
+ value: "http://localhost:5601"
+ -
+ name: "OAP_AUTH_MODE"
+ value: "oauth2"
+ -
+ name: "OAP_TRANSFORM"
+ value: "user_header,token_header"
+ -
+ name: "OAP_OAUTH_ID"
+ value: kibana-proxy
+ -
+ name: "OAP_MASTER_URL"
+ value: {{master_url}}
+ -
+ name: "OAP_PUBLIC_MASTER_URL"
+ value: {{public_master_url}}
+ -
+ name: "OAP_LOGOUT_REDIRECT"
+ value: {{public_master_url}}/console/logout
+ -
+ name: "OAP_MASTER_CA_FILE"
+ value: "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
+ -
+ name: "OAP_DEBUG"
+ value: "{{openshift_logging_kibana_proxy_debug}}"
+ volumeMounts:
+ - name: kibana-proxy
+ mountPath: /secret
+ readOnly: true
+ volumes:
+ - name: kibana
+ secret:
+ secretName: logging-kibana
+ - name: kibana-proxy
+ secret:
+ secretName: logging-kibana-proxy
diff --git a/roles/openshift_logging/templates/oauth-client.j2 b/roles/openshift_logging/templates/oauth-client.j2
new file mode 100644
index 000000000..41d3123cb
--- /dev/null
+++ b/roles/openshift_logging/templates/oauth-client.j2
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: OAuthClient
+metadata:
+ name: kibana-proxy
+ labels:
+ logging-infra: support
+secret: {{secret}}
+redirectURIs:
+- https://{{openshift_logging_kibana_hostname}}
+- https://{{openshift_logging_kibana_ops_hostname}}
+scopeRestrictions:
+- literals:
+ - user:info
+ - user:check-access
+ - user:list-projects
diff --git a/roles/openshift_logging/templates/pvc.j2 b/roles/openshift_logging/templates/pvc.j2
new file mode 100644
index 000000000..f19a3a750
--- /dev/null
+++ b/roles/openshift_logging/templates/pvc.j2
@@ -0,0 +1,27 @@
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: {{obj_name}}
+ labels:
+ logging-infra: support
+{% if annotations is defined %}
+ annotations:
+{% for key,value in annotations.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% endif %}
+spec:
+{% if pv_selector is defined and pv_selector is mapping %}
+ selector:
+ matchLabels:
+{% for key,value in pv_selector.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% endif %}
+ accessModes:
+{% for mode in access_modes %}
+ - {{ mode }}
+{% endfor %}
+ resources:
+ requests:
+ storage: {{size}}
diff --git a/roles/openshift_logging/templates/rolebinding.j2 b/roles/openshift_logging/templates/rolebinding.j2
new file mode 100644
index 000000000..fcd4e87cc
--- /dev/null
+++ b/roles/openshift_logging/templates/rolebinding.j2
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: RoleBinding
+metadata:
+ name: {{obj_name}}
+roleRef:
+{% if roleRef.kind is defined %}
+ kind: {{ roleRef.kind }}
+{% endif %}
+ name: {{ roleRef.name }}
+subjects:
+{% for sub in subjects %}
+ - kind: {{ sub.kind }}
+ name: {{ sub.name }}
+{% endfor %}
diff --git a/roles/openshift_logging/templates/route_reencrypt.j2 b/roles/openshift_logging/templates/route_reencrypt.j2
new file mode 100644
index 000000000..8be30a2c4
--- /dev/null
+++ b/roles/openshift_logging/templates/route_reencrypt.j2
@@ -0,0 +1,25 @@
+apiVersion: "v1"
+kind: "Route"
+metadata:
+ name: "{{obj_name}}"
+{% if labels is defined%}
+ labels:
+{% for key, value in labels.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% endif %}
+spec:
+ host: {{ route_host }}
+ tls:
+ caCertificate: |
+{% for line in tls_ca_cert.split('\n') %}
+ {{ line }}
+{% endfor %}
+ destinationCACertificate: |
+{% for line in tls_dest_ca_cert.split('\n') %}
+ {{ line }}
+{% endfor %}
+ termination: reencrypt
+ to:
+ kind: Service
+ name: {{ service_name }}
diff --git a/roles/openshift_logging/templates/secret.j2 b/roles/openshift_logging/templates/secret.j2
new file mode 100644
index 000000000..d73bae9c4
--- /dev/null
+++ b/roles/openshift_logging/templates/secret.j2
@@ -0,0 +1,9 @@
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{secret_name}}
+type: Opaque
+data:
+{% for s in secrets %}
+ {{s.key}}: {{s.value | b64encode}}
+{% endfor %}
diff --git a/roles/openshift_logging/templates/service.j2 b/roles/openshift_logging/templates/service.j2
new file mode 100644
index 000000000..6c4ec0c76
--- /dev/null
+++ b/roles/openshift_logging/templates/service.j2
@@ -0,0 +1,28 @@
+apiVersion: "v1"
+kind: "Service"
+metadata:
+ name: "{{obj_name}}"
+{% if labels is defined%}
+ labels:
+{% for key, value in labels.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% endif %}
+spec:
+ ports:
+{% for port in ports %}
+ -
+{% for key, value in port.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% if port.targetPort is undefined %}
+ clusterIP: "None"
+{% endif %}
+{% endfor %}
+{% if service_targetPort is defined %}
+ targetPort: {{service_targetPort}}
+{% endif %}
+ selector:
+ {% for key, value in selector.iteritems() %}
+ {{key}}: {{value}}
+ {% endfor %}
diff --git a/roles/openshift_logging/templates/serviceaccount.j2 b/roles/openshift_logging/templates/serviceaccount.j2
new file mode 100644
index 000000000..b22acc594
--- /dev/null
+++ b/roles/openshift_logging/templates/serviceaccount.j2
@@ -0,0 +1,16 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{obj_name}}
+{% if labels is defined%}
+ labels:
+{% for key, value in labels.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% endif %}
+{% if secrets is defined %}
+secrets:
+{% for name in secrets %}
+- name: {{ name }}
+{% endfor %}
+{% endif %}
diff --git a/roles/openshift_logging/templates/signing.conf.j2 b/roles/openshift_logging/templates/signing.conf.j2
new file mode 100644
index 000000000..727cde4c9
--- /dev/null
+++ b/roles/openshift_logging/templates/signing.conf.j2
@@ -0,0 +1,103 @@
+# Simple Signing CA
+
+# The [default] section contains global constants that can be referred to from
+# the entire configuration file. It may also hold settings pertaining to more
+# than one openssl command.
+
+[ default ]
+dir = {{top_dir}} # Top dir
+
+# The next part of the configuration file is used by the openssl req command.
+# It defines the CA's key pair, its DN, and the desired extensions for the CA
+# certificate.
+
+[ req ]
+default_bits = 2048 # RSA key size
+encrypt_key = yes # Protect private key
+default_md = sha1 # MD to use
+utf8 = yes # Input is UTF-8
+string_mask = utf8only # Emit UTF-8 strings
+prompt = no # Don't prompt for DN
+distinguished_name = ca_dn # DN section
+req_extensions = ca_reqext # Desired extensions
+
+[ ca_dn ]
+0.domainComponent = "io"
+1.domainComponent = "openshift"
+organizationName = "OpenShift Origin"
+organizationalUnitName = "Logging Signing CA"
+commonName = "Logging Signing CA"
+
+[ ca_reqext ]
+keyUsage = critical,keyCertSign,cRLSign
+basicConstraints = critical,CA:true,pathlen:0
+subjectKeyIdentifier = hash
+
+# The remainder of the configuration file is used by the openssl ca command.
+# The CA section defines the locations of CA assets, as well as the policies
+# applying to the CA.
+
+[ ca ]
+default_ca = signing_ca # The default CA section
+
+[ signing_ca ]
+certificate = $dir/ca.crt # The CA cert
+private_key = $dir/ca.key # CA private key
+new_certs_dir = $dir/ # Certificate archive
+serial = $dir/ca.serial.txt # Serial number file
+crlnumber = $dir/ca.crl.srl # CRL number file
+database = $dir/ca.db # Index file
+unique_subject = no # Require unique subject
+default_days = 730 # How long to certify for
+default_md = sha1 # MD to use
+policy = any_pol # Default naming policy
+email_in_dn = no # Add email to cert DN
+preserve = no # Keep passed DN ordering
+name_opt = ca_default # Subject DN display options
+cert_opt = ca_default # Certificate display options
+copy_extensions = copy # Copy extensions from CSR
+x509_extensions = client_ext # Default cert extensions
+default_crl_days = 7 # How long before next CRL
+crl_extensions = crl_ext # CRL extensions
+
+# Naming policies control which parts of a DN end up in the certificate and
+# under what circumstances certification should be denied.
+
+[ match_pol ]
+domainComponent = match # Must match 'simple.org'
+organizationName = match # Must match 'Simple Inc'
+organizationalUnitName = optional # Included if present
+commonName = supplied # Must be present
+
+[ any_pol ]
+domainComponent = optional
+countryName = optional
+stateOrProvinceName = optional
+localityName = optional
+organizationName = optional
+organizationalUnitName = optional
+commonName = optional
+emailAddress = optional
+
+# Certificate extensions define what types of certificates the CA is able to
+# create.
+
+[ client_ext ]
+keyUsage = critical,digitalSignature,keyEncipherment
+basicConstraints = CA:false
+extendedKeyUsage = clientAuth
+subjectKeyIdentifier = hash
+authorityKeyIdentifier = keyid
+
+[ server_ext ]
+keyUsage = critical,digitalSignature,keyEncipherment
+basicConstraints = CA:false
+extendedKeyUsage = serverAuth,clientAuth
+subjectKeyIdentifier = hash
+authorityKeyIdentifier = keyid
+
+# CRL extensions exist solely to point to the CA certificate that has issued
+# the CRL.
+
+[ crl_ext ]
+authorityKeyIdentifier = keyid
diff --git a/roles/openshift_logging/vars/main.yaml b/roles/openshift_logging/vars/main.yaml
new file mode 100644
index 000000000..11662c446
--- /dev/null
+++ b/roles/openshift_logging/vars/main.yaml
@@ -0,0 +1,8 @@
+---
+openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
+es_node_quorum: "{{openshift_logging_es_cluster_size/2 + 1}}"
+es_recover_after_nodes: "{{openshift_logging_es_cluster_size - 1}}"
+es_recover_expected_nodes: "{{openshift_logging_es_cluster_size}}"
+es_ops_node_quorum: "{{openshift_logging_es_ops_cluster_size/2 + 1}}"
+es_ops_recover_after_nodes: "{{openshift_logging_es_ops_cluster_size - 1}}"
+es_ops_recover_expected_nodes: "{{openshift_logging_es_ops_cluster_size}}"