From b579a4acfa64f85119ffbcbb8f6701972ef0dbb6 Mon Sep 17 00:00:00 2001 From: ewolinetz Date: Wed, 28 Sep 2016 10:52:07 -0500 Subject: Creating openshift_logging role for deploying Aggregated Logging without a deployer image --- roles/openshift_logging/README.md | 86 ++++++ roles/openshift_logging/defaults/main.yml | 83 ++++++ roles/openshift_logging/files/curator.yml | 18 ++ .../files/elasticsearch-logging.yml | 72 +++++ roles/openshift_logging/files/elasticsearch.yml | 74 +++++ roles/openshift_logging/files/es_migration.sh | 81 ++++++ roles/openshift_logging/files/fluent.conf | 34 +++ .../files/fluentd-throttle-config.yaml | 7 + roles/openshift_logging/files/generate-jks.sh | 71 +++++ .../files/logging-deployer-sa.yaml | 6 + roles/openshift_logging/files/secure-forward.conf | 24 ++ roles/openshift_logging/files/server-tls.json | 5 + roles/openshift_logging/files/signing.conf | 103 +++++++ roles/openshift_logging/files/util.sh | 192 +++++++++++++ roles/openshift_logging/filter_plugins/__init__.py | 0 .../filter_plugins/openshift_logging.py | 29 ++ roles/openshift_logging/library/__init.py__ | 0 .../library/openshift_logging_facts.py | 303 +++++++++++++++++++++ roles/openshift_logging/meta/main.yaml | 3 + roles/openshift_logging/tasks/delete_logging.yaml | 93 +++++++ roles/openshift_logging/tasks/generate_certs.yaml | 168 ++++++++++++ .../tasks/generate_clusterrolebindings.yaml | 12 + .../tasks/generate_clusterroles.yaml | 10 + .../tasks/generate_configmaps.yaml | 103 +++++++ .../tasks/generate_deploymentconfigs.yaml | 59 ++++ .../tasks/generate_jks_chain.yaml | 60 ++++ roles/openshift_logging/tasks/generate_pems.yaml | 36 +++ roles/openshift_logging/tasks/generate_pkcs12.yaml | 24 ++ roles/openshift_logging/tasks/generate_pvcs.yaml | 47 ++++ .../tasks/generate_rolebindings.yaml | 11 + roles/openshift_logging/tasks/generate_routes.yaml | 20 ++ .../openshift_logging/tasks/generate_secrets.yaml | 73 +++++ .../tasks/generate_serviceaccounts.yaml | 13 + .../openshift_logging/tasks/generate_services.yaml | 81 ++++++ roles/openshift_logging/tasks/install_curator.yaml | 27 ++ .../tasks/install_elasticsearch.yaml | 105 +++++++ roles/openshift_logging/tasks/install_fluentd.yaml | 38 +++ roles/openshift_logging/tasks/install_kibana.yaml | 33 +++ roles/openshift_logging/tasks/install_logging.yaml | 49 ++++ roles/openshift_logging/tasks/install_support.yaml | 52 ++++ roles/openshift_logging/tasks/label_node.yaml | 27 ++ roles/openshift_logging/tasks/main.yaml | 35 +++ .../tasks/procure_server_certs.yaml | 54 ++++ roles/openshift_logging/tasks/scale.yaml | 26 ++ roles/openshift_logging/tasks/start_cluster.yaml | 107 ++++++++ roles/openshift_logging/tasks/stop_cluster.yaml | 98 +++++++ roles/openshift_logging/tasks/upgrade_logging.yaml | 33 +++ roles/openshift_logging/templates/clusterrole.j2 | 21 ++ .../templates/clusterrolebinding.j2 | 24 ++ roles/openshift_logging/templates/curator.j2 | 97 +++++++ roles/openshift_logging/templates/es.j2 | 105 +++++++ roles/openshift_logging/templates/fluentd.j2 | 149 ++++++++++ roles/openshift_logging/templates/job.j2 | 26 ++ roles/openshift_logging/templates/kibana.j2 | 110 ++++++++ roles/openshift_logging/templates/oauth-client.j2 | 15 + roles/openshift_logging/templates/pvc.j2 | 27 ++ roles/openshift_logging/templates/rolebinding.j2 | 14 + .../openshift_logging/templates/route_reencrypt.j2 | 25 ++ roles/openshift_logging/templates/secret.j2 | 9 + roles/openshift_logging/templates/service.j2 | 28 ++ .../openshift_logging/templates/serviceaccount.j2 | 16 ++ roles/openshift_logging/vars/main.yaml | 40 +++ 62 files changed, 3391 insertions(+) create mode 100644 roles/openshift_logging/README.md create mode 100644 roles/openshift_logging/defaults/main.yml create mode 100644 roles/openshift_logging/files/curator.yml create mode 100644 roles/openshift_logging/files/elasticsearch-logging.yml create mode 100644 roles/openshift_logging/files/elasticsearch.yml create mode 100644 roles/openshift_logging/files/es_migration.sh create mode 100644 roles/openshift_logging/files/fluent.conf create mode 100644 roles/openshift_logging/files/fluentd-throttle-config.yaml create mode 100644 roles/openshift_logging/files/generate-jks.sh create mode 100644 roles/openshift_logging/files/logging-deployer-sa.yaml create mode 100644 roles/openshift_logging/files/secure-forward.conf create mode 100644 roles/openshift_logging/files/server-tls.json create mode 100644 roles/openshift_logging/files/signing.conf create mode 100644 roles/openshift_logging/files/util.sh create mode 100644 roles/openshift_logging/filter_plugins/__init__.py create mode 100644 roles/openshift_logging/filter_plugins/openshift_logging.py create mode 100644 roles/openshift_logging/library/__init.py__ create mode 100644 roles/openshift_logging/library/openshift_logging_facts.py create mode 100644 roles/openshift_logging/meta/main.yaml create mode 100644 roles/openshift_logging/tasks/delete_logging.yaml create mode 100644 roles/openshift_logging/tasks/generate_certs.yaml create mode 100644 roles/openshift_logging/tasks/generate_clusterrolebindings.yaml create mode 100644 roles/openshift_logging/tasks/generate_clusterroles.yaml create mode 100644 roles/openshift_logging/tasks/generate_configmaps.yaml create mode 100644 roles/openshift_logging/tasks/generate_deploymentconfigs.yaml create mode 100644 roles/openshift_logging/tasks/generate_jks_chain.yaml create mode 100644 roles/openshift_logging/tasks/generate_pems.yaml create mode 100644 roles/openshift_logging/tasks/generate_pkcs12.yaml create mode 100644 roles/openshift_logging/tasks/generate_pvcs.yaml create mode 100644 roles/openshift_logging/tasks/generate_rolebindings.yaml create mode 100644 roles/openshift_logging/tasks/generate_routes.yaml create mode 100644 roles/openshift_logging/tasks/generate_secrets.yaml create mode 100644 roles/openshift_logging/tasks/generate_serviceaccounts.yaml create mode 100644 roles/openshift_logging/tasks/generate_services.yaml create mode 100644 roles/openshift_logging/tasks/install_curator.yaml create mode 100644 roles/openshift_logging/tasks/install_elasticsearch.yaml create mode 100644 roles/openshift_logging/tasks/install_fluentd.yaml create mode 100644 roles/openshift_logging/tasks/install_kibana.yaml create mode 100644 roles/openshift_logging/tasks/install_logging.yaml create mode 100644 roles/openshift_logging/tasks/install_support.yaml create mode 100644 roles/openshift_logging/tasks/label_node.yaml create mode 100644 roles/openshift_logging/tasks/main.yaml create mode 100644 roles/openshift_logging/tasks/procure_server_certs.yaml create mode 100644 roles/openshift_logging/tasks/scale.yaml create mode 100644 roles/openshift_logging/tasks/start_cluster.yaml create mode 100644 roles/openshift_logging/tasks/stop_cluster.yaml create mode 100644 roles/openshift_logging/tasks/upgrade_logging.yaml create mode 100644 roles/openshift_logging/templates/clusterrole.j2 create mode 100644 roles/openshift_logging/templates/clusterrolebinding.j2 create mode 100644 roles/openshift_logging/templates/curator.j2 create mode 100644 roles/openshift_logging/templates/es.j2 create mode 100644 roles/openshift_logging/templates/fluentd.j2 create mode 100644 roles/openshift_logging/templates/job.j2 create mode 100644 roles/openshift_logging/templates/kibana.j2 create mode 100644 roles/openshift_logging/templates/oauth-client.j2 create mode 100644 roles/openshift_logging/templates/pvc.j2 create mode 100644 roles/openshift_logging/templates/rolebinding.j2 create mode 100644 roles/openshift_logging/templates/route_reencrypt.j2 create mode 100644 roles/openshift_logging/templates/secret.j2 create mode 100644 roles/openshift_logging/templates/service.j2 create mode 100644 roles/openshift_logging/templates/serviceaccount.j2 create mode 100644 roles/openshift_logging/vars/main.yaml (limited to 'roles') diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md new file mode 100644 index 000000000..9836fc217 --- /dev/null +++ b/roles/openshift_logging/README.md @@ -0,0 +1,86 @@ +## openshift_logging Role + +### Please note this role is still a work in progress + +This role is used for installing the Aggregated Logging stack. It should be run against +a single host, it will create any missing certificates and API objects that the current +[logging deployer](https://github.com/openshift/origin-aggregated-logging/tree/master/deployer) does. + +As part of the installation, it is recommended that you add the Fluentd node selector label +to the list of persisted [node labels](https://docs.openshift.org/latest/install_config/install/advanced_install.html#configuring-node-host-labels). + +###Required vars: + +- `openshift_logging_install_logging`: When `True` the `openshift_logging` role will install Aggregated Logging. +- `openshift_logging_upgrade_logging`: When `True` the `openshift_logging` role will upgrade Aggregated Logging. + +When both `openshift_logging_install_logging` and `openshift_logging_upgrade_logging` are `False` the `openshift_logging` role will uninstall Aggregated Logging. + +###Optional vars: + +- `openshift_logging_image_prefix`: The prefix for the logging images to use. Defaults to 'docker.io/openshift/origin-'. +- `openshift_logging_image_version`: The image version for the logging images to use. Defaults to 'latest'. +- `openshift_logging_use_ops`: If 'True', set up a second ES and Kibana cluster for infrastructure logs. Defaults to 'False'. +- `master_url`: The URL for the Kubernetes master, this does not need to be public facing but should be accessible from within the cluster. Defaults to 'https://kubernetes.default.svc.cluster.local'. +- `public_master_url`: The public facing URL for the Kubernetes master, this is used for Authentication redirection. Defaults to 'https://localhost:8443'. +- `openshift_logging_namespace`: The namespace that Aggregated Logging will be installed in. Defaults to 'logging'. +- `openshift_logging_curator_default_days`: The default minimum age (in days) Curator uses for deleting log records. Defaults to '30'. +- `openshift_logging_curator_run_hour`: The hour of the day that Curator will run at. Defaults to '0'. +- `openshift_logging_curator_run_minute`: The minute of the hour that Curator will run at. Defaults to '0'. +- `openshift_logging_curator_run_timezone`: The timezone that Curator uses for figuring out its run time. Defaults to 'UTC'. +- `openshift_logging_curator_script_log_level`: The script log level for Curator. Defaults to 'INFO'. +- `openshift_logging_curator_log_level`: The log level for the Curator process. Defaults to 'ERROR'. +- `openshift_logging_curator_cpu_limit`: The amount of CPU to allocate to Curator. Default is '100m'. +- `openshift_logging_curator_memory_limit`: The amount of memor to allocate to Curator. Unset if not specified. + +- `openshift_logging_kibana_hostname`: The Kibana hostname. Defaults to 'kibana.example.com'. +- `openshift_logging_kibana_cpu_limit`: The amount of CPU to allocate to Kibana or unset if not specified. +- `openshift_logging_kibana_memory_limit`: The amount of memory to allocate to Kibana or unset if not specified. +- `openshift_logging_kibana_proxy_debug`: When "True", set the Kibana Proxy log level to DEBUG. Defaults to 'false'. +- `openshift_logging_kibana_proxy_cpu_limit`: The amount of CPU to allocate to Kibana proxy or unset if not specified. +- `openshift_logging_kibana_proxy_memory_limit`: The amount of memory to allocate to Kibana proxy or unset if not specified. + +- `openshift_logging_fluentd_nodeselector`: The node selector that the Fluentd daemonset uses to determine where to deploy to. Defaults to '"logging-infra-fluentd": "true"'. +- `openshift_logging_fluentd_cpu_limit`: The CPU limit for Fluentd pods. Defaults to '100m'. +- `openshift_logging_fluentd_memory_limit`: The memory limit for Fluentd pods. Defaults to '512Mi'. +- `openshift_logging_fluentd_es_copy`: Whether or not to use the ES_COPY feature for Fluentd (DEPRECATED). Defaults to 'False'. +- `openshift_logging_fluentd_use_journal`: Whether or not Fluentd should read log entries from Journal. Defaults to 'False'. NOTE: Fluentd will attempt to detect whether or not Docker is using the journald log driver and may overwrite this value. +- `openshift_logging_fluentd_journal_read_from_head`: Whether or not Fluentd will try to read from the head of Journal when first starting up, using this may cause a delay in ES receiving current log records. Defaults to 'False'. +- `openshift_logging_fluentd_hosts`: List of nodes that should be labeled for Fluentd to be deployed to. Defaults to ['--all']. + +- `openshift_logging_es_host`: The name of the ES service Fluentd should send logs to. Defaults to 'logging-es'. +- `openshift_logging_es_port`: The port for the ES service Fluentd should sent its logs to. Defaults to '9200'. +- `openshift_logging_es_ca`: The location of the ca Fluentd uses to communicate with its openshift_logging_es_host. Defaults to '/etc/fluent/keys/ca'. +- `openshift_logging_es_client_cert`: The location of the client certificate Fluentd uses for openshift_logging_es_host. Defaults to '/etc/fluent/keys/cert'. +- `openshift_logging_es_client_key`: The location of the client key Fluentd uses for openshift_logging_es_host. Defaults to '/etc/fluent/keys/key'. + +- `openshift_logging_es_cluster_size`: The number of ES cluster members. Defaults to '1'. +- `openshift_logging_es_cpu_limit`: The amount of CPU limit for the ES cluster. Unused if not set +- `openshift_logging_es_memory_limit`: The amount of RAM that should be assigned to ES. Defaults to '1024Mi'. +- `openshift_logging_es_pv_selector`: A key/value map added to a PVC in order to select specific PVs. Defaults to 'None'. +- `openshift_logging_es_pvc_dynamic`: Whether or not to add the dynamic PVC annotation for any generated PVCs. Defaults to 'False'. +- `openshift_logging_es_pvc_size`: The requested size for the ES PVCs, when not provided the role will not generate any PVCs. Defaults to '""'. +- `openshift_logging_es_pvc_prefix`: The prefix for the generated PVCs. Defaults to 'logging-es'. +- `openshift_logging_es_recover_after_time`: The amount of time ES will wait before it tries to recover. Defaults to '5m'. +- `openshift_logging_es_storage_group`: The storage group used for ES. Defaults to '65534'. + +When `openshift_logging_use_ops` is `True`, there are some additional vars. These work the +same as above for their non-ops counterparts, but apply to the OPS cluster instance: +- `openshift_logging_es_ops_host`: logging-es-ops +- `openshift_logging_es_ops_port`: 9200 +- `openshift_logging_es_ops_ca`: /etc/fluent/keys/ca +- `openshift_logging_es_ops_client_cert`: /etc/fluent/keys/cert +- `openshift_logging_es_ops_client_key`: /etc/fluent/keys/key +- `openshift_logging_es_ops_cluster_size`: 1 +- `openshift_logging_es_ops_cpu_limit`: The amount of CPU limit for the ES cluster. Unused if not set +- `openshift_logging_es_ops_memory_limit`: 1024Mi +- `openshift_logging_es_ops_pvc_dynamic`: False +- `openshift_logging_es_ops_pvc_size`: "" +- `openshift_logging_es_ops_pvc_prefix`: logging-es-ops +- `openshift_logging_es_ops_recover_after_time`: 5m +- `openshift_logging_es_ops_storage_group`: 65534 +- `openshift_logging_kibana_ops_hostname`: The Operations Kibana hostname. Defaults to 'kibana-ops.example.com'. +- `openshift_logging_kibana_ops_cpu_limit`: The amount of CPU to allocate to Kibana or unset if not specified. +- `openshift_logging_kibana_ops_memory_limit`: The amount of memory to allocate to Kibana or unset if not specified. +- `openshift_logging_kibana_ops_proxy_cpu_limit`: The amount of CPU to allocate to Kibana proxy or unset if not specified. +- `openshift_logging_kibana_ops_proxy_memory_limit`: The amount of memory to allocate to Kibana proxy or unset if not specified. diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml new file mode 100644 index 000000000..a441f10b9 --- /dev/null +++ b/roles/openshift_logging/defaults/main.yml @@ -0,0 +1,83 @@ +--- +openshift_logging_image_prefix: docker.io/openshift/origin- +openshift_logging_image_version: latest +openshift_logging_use_ops: False +master_url: "https://kubernetes.default.svc.{{ openshift.common.dns_domain }}" +public_master_url: "https://{{openshift.common.public_hostname}}:8443" +openshift_logging_namespace: logging +openshift_logging_install_logging: True + +openshift_logging_curator_default_days: 30 +openshift_logging_curator_run_hour: 0 +openshift_logging_curator_run_minute: 0 +openshift_logging_curator_run_timezone: UTC +openshift_logging_curator_script_log_level: INFO +openshift_logging_curator_log_level: ERROR +openshift_logging_curator_cpu_limit: 100m +openshift_logging_curator_memory_limit: null + +openshift_logging_curator_ops_cpu_limit: 100m +openshift_logging_curator_ops_memory_limit: null + +openshift_logging_kibana_hostname: "kibana.{{openshift.common.dns_domain}}" +openshift_logging_kibana_cpu_limit: null +openshift_logging_kibana_memory_limit: null +openshift_logging_kibana_proxy_debug: false +openshift_logging_kibana_proxy_cpu_limit: null +openshift_logging_kibana_proxy_memory_limit: null + +openshift_logging_kibana_ops_hostname: "kibana-ops.{{openshift.common.dns_domain}}" +openshift_logging_kibana_ops_cpu_limit: null +openshift_logging_kibana_ops_memory_limit: null +openshift_logging_kibana_ops_proxy_debug: false +openshift_logging_kibana_ops_proxy_cpu_limit: null +openshift_logging_kibana_ops_proxy_memory_limit: null + +openshift_logging_fluentd_nodeselector: '"logging-infra-fluentd": "true"' +openshift_logging_fluentd_cpu_limit: 100m +openshift_logging_fluentd_memory_limit: 512Mi +openshift_logging_fluentd_es_copy: false +openshift_logging_fluentd_use_journal: false +openshift_logging_fluentd_journal_read_from_head: false +openshift_logging_fluentd_hosts: ['--all'] + +openshift_logging_es_host: logging-es +openshift_logging_es_port: 9200 +openshift_logging_es_ca: /etc/fluent/keys/ca +openshift_logging_es_client_cert: /etc/fluent/keys/cert +openshift_logging_es_client_key: /etc/fluent/keys/key +openshift_logging_es_cluster_size: 1 +openshift_logging_es_cpu_limit: null +openshift_logging_es_memory_limit: 1024Mi +openshift_logging_es_pv_selector: null +openshift_logging_es_pvc_dynamic: False +openshift_logging_es_pvc_size: "" +openshift_logging_es_pvc_prefix: logging-es +openshift_logging_es_recover_after_time: 5m +openshift_logging_es_storage_group: 65534 + +# allow cluster-admin or cluster-reader to view operations index +openshift_logging_es_ops_allow_cluster_reader: False + +openshift_logging_es_ops_host: logging-es-ops +openshift_logging_es_ops_port: 9200 +openshift_logging_es_ops_ca: /etc/fluent/keys/ca +openshift_logging_es_ops_client_cert: /etc/fluent/keys/cert +openshift_logging_es_ops_client_key: /etc/fluent/keys/key +openshift_logging_es_ops_cluster_size: 1 +openshift_logging_es_ops_cpu_limit: null +openshift_logging_es_ops_memory_limit: 1024Mi +openshift_logging_es_ops_pv_selector: None +openshift_logging_es_ops_pvc_dynamic: False +openshift_logging_es_ops_pvc_size: "" +openshift_logging_es_ops_pvc_prefix: logging-es-ops +openshift_logging_es_ops_recover_after_time: 5m +openshift_logging_es_ops_storage_group: 65534 + +# following can be uncommented to provide values for configmaps -- take care when providing file contents as it may cause your cluster to not operate correctly +#es_logging_contents: +#es_config_contents: +#curator_config_contents: +#fluentd_config_contents: +#fluentd_throttle_contents: +#fluentd_secureforward_contents: diff --git a/roles/openshift_logging/files/curator.yml b/roles/openshift_logging/files/curator.yml new file mode 100644 index 000000000..8d62d8e7d --- /dev/null +++ b/roles/openshift_logging/files/curator.yml @@ -0,0 +1,18 @@ +# Logging example curator config file + +# uncomment and use this to override the defaults from env vars +#.defaults: +# delete: +# days: 30 +# runhour: 0 +# runminute: 0 + +# to keep ops logs for a different duration: +#.operations: +# delete: +# weeks: 8 + +# example for a normal project +#myapp: +# delete: +# weeks: 1 diff --git a/roles/openshift_logging/files/elasticsearch-logging.yml b/roles/openshift_logging/files/elasticsearch-logging.yml new file mode 100644 index 000000000..377abe21f --- /dev/null +++ b/roles/openshift_logging/files/elasticsearch-logging.yml @@ -0,0 +1,72 @@ +# you can override this using by setting a system property, for example -Des.logger.level=DEBUG +es.logger.level: INFO +rootLogger: ${es.logger.level}, console, file +logger: + # log action execution errors for easier debugging + action: WARN + # reduce the logging for aws, too much is logged under the default INFO + com.amazonaws: WARN + io.fabric8.elasticsearch: ${PLUGIN_LOGLEVEL} + io.fabric8.kubernetes: ${PLUGIN_LOGLEVEL} + + # gateway + #gateway: DEBUG + #index.gateway: DEBUG + + # peer shard recovery + #indices.recovery: DEBUG + + # discovery + #discovery: TRACE + + index.search.slowlog: TRACE, index_search_slow_log_file + index.indexing.slowlog: TRACE, index_indexing_slow_log_file + + # search-guard + com.floragunn.searchguard: WARN + +additivity: + index.search.slowlog: false + index.indexing.slowlog: false + +appender: + console: + type: console + layout: + type: consolePattern + conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" + + file: + type: dailyRollingFile + file: ${path.logs}/${cluster.name}.log + datePattern: "'.'yyyy-MM-dd" + layout: + type: pattern + conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" + + # Use the following log4j-extras RollingFileAppender to enable gzip compression of log files. + # For more information see https://logging.apache.org/log4j/extras/apidocs/org/apache/log4j/rolling/RollingFileAppender.html + #file: + #type: extrasRollingFile + #file: ${path.logs}/${cluster.name}.log + #rollingPolicy: timeBased + #rollingPolicy.FileNamePattern: ${path.logs}/${cluster.name}.log.%d{yyyy-MM-dd}.gz + #layout: + #type: pattern + #conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" + + index_search_slow_log_file: + type: dailyRollingFile + file: ${path.logs}/${cluster.name}_index_search_slowlog.log + datePattern: "'.'yyyy-MM-dd" + layout: + type: pattern + conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" + + index_indexing_slow_log_file: + type: dailyRollingFile + file: ${path.logs}/${cluster.name}_index_indexing_slowlog.log + datePattern: "'.'yyyy-MM-dd" + layout: + type: pattern + conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" diff --git a/roles/openshift_logging/files/elasticsearch.yml b/roles/openshift_logging/files/elasticsearch.yml new file mode 100644 index 000000000..4eff30e61 --- /dev/null +++ b/roles/openshift_logging/files/elasticsearch.yml @@ -0,0 +1,74 @@ +cluster: + name: ${CLUSTER_NAME} + +script: + inline: on + indexed: on + +index: + number_of_shards: 1 + number_of_replicas: 0 + auto_expand_replicas: 0-3 + unassigned.node_left.delayed_timeout: 2m + translog: + flush_threshold_size: 256mb + flush_threshold_period: 5m + +node: + master: true + data: true + +network: + host: 0.0.0.0 + +cloud: + kubernetes: + service: ${SERVICE_DNS} + namespace: ${NAMESPACE} + +discovery: + type: kubernetes + zen.ping.multicast.enabled: false + +gateway: + expected_master_nodes: ${NODE_QUORUM} + recover_after_nodes: ${RECOVER_AFTER_NODES} + expected_nodes: ${RECOVER_EXPECTED_NODES} + recover_after_time: ${RECOVER_AFTER_TIME} + +io.fabric8.elasticsearch.authentication.users: ["system.logging.kibana", "system.logging.fluentd", "system.logging.curator", "system.admin"] + +openshift.searchguard: + keystore.path: /etc/elasticsearch/secret/admin.jks + truststore.path: /etc/elasticsearch/secret/searchguard.truststore + + +path: + data: /elasticsearch/persistent/${CLUSTER_NAME}/data + logs: /elasticsearch/${CLUSTER_NAME}/logs + work: /elasticsearch/${CLUSTER_NAME}/work + scripts: /elasticsearch/${CLUSTER_NAME}/scripts + +searchguard: + authcz.admin_dn: + - CN=system.admin,OU=OpenShift,O=Logging + config_index_name: ".searchguard.${HOSTNAME}" + ssl: + transport: + enabled: true + enforce_hostname_verification: false + keystore_type: JKS + keystore_filepath: /etc/elasticsearch/secret/searchguard.key + keystore_password: kspass + truststore_type: JKS + truststore_filepath: /etc/elasticsearch/secret/searchguard.truststore + truststore_password: tspass + http: + enabled: true + keystore_type: JKS + keystore_filepath: /etc/elasticsearch/secret/key + keystore_password: kspass + clientauth_mode: OPTIONAL + truststore_type: JKS + truststore_filepath: /etc/elasticsearch/secret/truststore + truststore_password: tspass diff --git a/roles/openshift_logging/files/es_migration.sh b/roles/openshift_logging/files/es_migration.sh new file mode 100644 index 000000000..cca283bae --- /dev/null +++ b/roles/openshift_logging/files/es_migration.sh @@ -0,0 +1,81 @@ +#! bin/bash + +CA=${1:-/etc/openshift/logging/ca.crt} +KEY=${2:-/etc/openshift/logging/system.admin.key} +CERT=${3:-/etc/openshift/logging/system.admin.crt} +openshift_logging_es_host=${4:-logging-es} +openshift_logging_es_port=${5:-9200} +namespace=${6:-logging} + +# for each index in _cat/indices +# skip indices that begin with . - .kibana, .operations, etc. +# skip indices that contain a uuid +# get a list of unique project +# daterx - the date regex that matches the .%Y.%m.%d at the end of the indices +# we are interested in - the awk will strip that part off +function get_list_of_indices() { + curl -s --cacert $CA --key $KEY --cert $CERT https://$openshift_logging_es_host:$openshift_logging_es_port/_cat/indices | \ + awk -v daterx='[.]20[0-9]{2}[.][0-1]?[0-9][.][0-9]{1,2}$' \ + '$3 !~ "^[.]" && $3 !~ "^[^.]+[.][^.]+"daterx && $3 !~ "^project." && $3 ~ daterx {print gensub(daterx, "", "", $3)}' | \ + sort -u +} + +# for each index in _cat/indices +# skip indices that begin with . - .kibana, .operations, etc. +# get a list of unique project.uuid +# daterx - the date regex that matches the .%Y.%m.%d at the end of the indices +# we are interested in - the awk will strip that part off +function get_list_of_proj_uuid_indices() { + curl -s --cacert $CA --key $KEY --cert $CERT https://$openshift_logging_es_host:$openshift_logging_es_port/_cat/indices | \ + awk -v daterx='[.]20[0-9]{2}[.][0-1]?[0-9][.][0-9]{1,2}$' \ + '$3 !~ "^[.]" && $3 ~ "^[^.]+[.][^.]+"daterx && $3 !~ "^project." && $3 ~ daterx {print gensub(daterx, "", "", $3)}' | \ + sort -u +} + +if [[ -z "$(oc get pods -l component=es -o jsonpath='{.items[?(@.status.phase == "Running")].metadata.name}')" ]]; then + echo "No Elasticsearch pods found running. Cannot update common data model." + exit 1 +fi + +count=$(get_list_of_indices | wc -l) +if [ $count -eq 0 ]; then + echo No matching indices found - skipping update_for_uuid +else + echo Creating aliases for $count index patterns . . . + { + echo '{"actions":[' + get_list_of_indices | \ + while IFS=. read proj ; do + # e.g. make test.uuid.* an alias of test.* so we can search for + # /test.uuid.*/_search and get both the test.uuid.* and + # the test.* indices + uid=$(oc get project "$proj" -o jsonpath='{.metadata.uid}' 2>/dev/null) + [ -n "$uid" ] && echo "{\"add\":{\"index\":\"$proj.*\",\"alias\":\"$proj.$uuid.*\"}}" + done + echo ']}' + } | curl -s --cacert $CA --key $KEY --cert $CERT -XPOST -d @- "https://$openshift_logging_es_host:$openshift_logging_es_port/_aliases" +fi + +count=$(get_list_of_proj_uuid_indices | wc -l) +if [ $count -eq 0 ] ; then + echo No matching indexes found - skipping update_for_common_data_model + exit 0 +fi + +echo Creating aliases for $count index patterns . . . +# for each index in _cat/indices +# skip indices that begin with . - .kibana, .operations, etc. +# get a list of unique project.uuid +# daterx - the date regex that matches the .%Y.%m.%d at the end of the indices +# we are interested in - the awk will strip that part off +{ + echo '{"actions":[' + get_list_of_proj_uuid_indices | \ + while IFS=. read proj uuid ; do + # e.g. make project.test.uuid.* and alias of test.uuid.* so we can search for + # /project.test.uuid.*/_search and get both the test.uuid.* and + # the project.test.uuid.* indices + echo "{\"add\":{\"index\":\"$proj.$uuid.*\",\"alias\":\"${PROJ_PREFIX}$proj.$uuid.*\"}}" + done + echo ']}' +} | curl -s --cacert $CA --key $KEY --cert $CERT -XPOST -d @- "https://$openshift_logging_es_host:$openshift_logging_es_port/_aliases" diff --git a/roles/openshift_logging/files/fluent.conf b/roles/openshift_logging/files/fluent.conf new file mode 100644 index 000000000..aa843e983 --- /dev/null +++ b/roles/openshift_logging/files/fluent.conf @@ -0,0 +1,34 @@ +# This file is the fluentd configuration entrypoint. Edit with care. + +@include configs.d/openshift/system.conf + +# In each section below, pre- and post- includes don't include anything initially; +# they exist to enable future additions to openshift conf as needed. + +## sources +## ordered so that syslog always runs last... +@include configs.d/openshift/input-pre-*.conf +@include configs.d/dynamic/input-docker-*.conf +@include configs.d/dynamic/input-syslog-*.conf +@include configs.d/openshift/input-post-*.conf +## + + diff --git a/roles/openshift_logging/files/fluentd-throttle-config.yaml b/roles/openshift_logging/files/fluentd-throttle-config.yaml new file mode 100644 index 000000000..375621ff1 --- /dev/null +++ b/roles/openshift_logging/files/fluentd-throttle-config.yaml @@ -0,0 +1,7 @@ +# Logging example fluentd throttling config file + +#example-project: +# read_lines_limit: 10 +# +#.operations: +# read_lines_limit: 100 diff --git a/roles/openshift_logging/files/generate-jks.sh b/roles/openshift_logging/files/generate-jks.sh new file mode 100644 index 000000000..8760f37fe --- /dev/null +++ b/roles/openshift_logging/files/generate-jks.sh @@ -0,0 +1,71 @@ +#! /bin/sh +set -ex + +function importPKCS() { + dir=${SCRATCH_DIR:-_output} + NODE_NAME=$1 + ks_pass=${KS_PASS:-kspass} + ts_pass=${TS_PASS:-tspass} + rm -rf $NODE_NAME + + keytool \ + -importkeystore \ + -srckeystore $NODE_NAME.pkcs12 \ + -srcstoretype PKCS12 \ + -srcstorepass pass \ + -deststorepass $ks_pass \ + -destkeypass $ks_pass \ + -destkeystore $dir/keystore.jks \ + -alias 1 \ + -destalias $NODE_NAME + + echo "Import back to keystore (including CA chain)" + + keytool \ + -import \ + -file $dir/ca.crt \ + -keystore $dir/keystore.jks \ + -storepass $ks_pass \ + -noprompt -alias sig-ca + + echo All done for $NODE_NAME +} + +function createTruststore() { + + echo "Import CA to truststore for validating client certs" + + keytool \ + -import \ + -file $dir/ca.crt \ + -keystore $dir/truststore.jks \ + -storepass $ts_pass \ + -noprompt -alias sig-ca +} + +dir="/opt/deploy/" +SCRATCH_DIR=$dir + +admin_user='system.admin' + +if [[ ! -f $dir/system.admin.jks || -z "$(keytool -list -keystore $dir/system.admin.jks -storepass kspass | grep sig-ca)" ]]; then + importPKCS "system.admin" + mv $dir/keystore.jks $dir/system.admin.jks +fi + +if [[ ! -f $dir/searchguard_node_key || -z "$(keytool -list -keystore $dir/searchguard_node_key -storepass kspass | grep sig-ca)" ]]; then + importPKCS "elasticsearch" + mv $dir/keystore.jks $dir/searchguard_node_key +fi + + +if [[ ! -f $dir/system.admin.jks || -z "$(keytool -list -keystore $dir/system.admin.jks -storepass kspass | grep sig-ca)" ]]; then + importPKCS "logging-es" +fi + +[ ! -f $dir/truststore.jks ] && createTruststore + +[ ! -f $dir/searchguard_node_truststore ] && cp $dir/truststore.jks $dir/searchguard_node_truststore + +# necessary so that the job knows it completed successfully +exit 0 diff --git a/roles/openshift_logging/files/logging-deployer-sa.yaml b/roles/openshift_logging/files/logging-deployer-sa.yaml new file mode 100644 index 000000000..334c9402b --- /dev/null +++ b/roles/openshift_logging/files/logging-deployer-sa.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: logging-deployer +secrets: +- name: logging-deployer diff --git a/roles/openshift_logging/files/secure-forward.conf b/roles/openshift_logging/files/secure-forward.conf new file mode 100644 index 000000000..f4483df79 --- /dev/null +++ b/roles/openshift_logging/files/secure-forward.conf @@ -0,0 +1,24 @@ +# @type secure_forward + +# self_hostname ${HOSTNAME} +# shared_key + +# secure yes +# enable_strict_verification yes + +# ca_cert_path /etc/fluent/keys/your_ca_cert +# ca_private_key_path /etc/fluent/keys/your_private_key + # for private CA secret key +# ca_private_key_passphrase passphrase + +# + # or IP +# host server.fqdn.example.com +# port 24284 +# +# + # ip address to connect +# host 203.0.113.8 + # specify hostlabel for FQDN verification if ipaddress is used for host +# hostlabel server.fqdn.example.com +# diff --git a/roles/openshift_logging/files/server-tls.json b/roles/openshift_logging/files/server-tls.json new file mode 100644 index 000000000..86deb23e3 --- /dev/null +++ b/roles/openshift_logging/files/server-tls.json @@ -0,0 +1,5 @@ +// See for available options: https://nodejs.org/api/tls.html#tls_tls_createserver_options_secureconnectionlistener +tls_options = { + ciphers: 'kEECDH:+kEECDH+SHA:kEDH:+kEDH+SHA:+kEDH+CAMELLIA:kECDH:+kECDH+SHA:kRSA:+kRSA+SHA:+kRSA+CAMELLIA:!aNULL:!eNULL:!SSLv2:!RC4:!DES:!EXP:!SEED:!IDEA:+3DES', + honorCipherOrder: true +} diff --git a/roles/openshift_logging/files/signing.conf b/roles/openshift_logging/files/signing.conf new file mode 100644 index 000000000..810a057d9 --- /dev/null +++ b/roles/openshift_logging/files/signing.conf @@ -0,0 +1,103 @@ +# Simple Signing CA + +# The [default] section contains global constants that can be referred to from +# the entire configuration file. It may also hold settings pertaining to more +# than one openssl command. + +[ default ] +#dir = _output # Top dir + +# The next part of the configuration file is used by the openssl req command. +# It defines the CA's key pair, its DN, and the desired extensions for the CA +# certificate. + +[ req ] +default_bits = 2048 # RSA key size +encrypt_key = yes # Protect private key +default_md = sha1 # MD to use +utf8 = yes # Input is UTF-8 +string_mask = utf8only # Emit UTF-8 strings +prompt = no # Don't prompt for DN +distinguished_name = ca_dn # DN section +req_extensions = ca_reqext # Desired extensions + +[ ca_dn ] +0.domainComponent = "io" +1.domainComponent = "openshift" +organizationName = "OpenShift Origin" +organizationalUnitName = "Logging Signing CA" +commonName = "Logging Signing CA" + +[ ca_reqext ] +keyUsage = critical,keyCertSign,cRLSign +basicConstraints = critical,CA:true,pathlen:0 +subjectKeyIdentifier = hash + +# The remainder of the configuration file is used by the openssl ca command. +# The CA section defines the locations of CA assets, as well as the policies +# applying to the CA. + +[ ca ] +default_ca = signing_ca # The default CA section + +[ signing_ca ] +certificate = $dir/ca.crt # The CA cert +private_key = $dir/ca.key # CA private key +new_certs_dir = $dir/ # Certificate archive +serial = $dir/ca.serial.txt # Serial number file +crlnumber = $dir/ca.crl.srl # CRL number file +database = $dir/ca.db # Index file +unique_subject = no # Require unique subject +default_days = 730 # How long to certify for +default_md = sha1 # MD to use +policy = any_pol # Default naming policy +email_in_dn = no # Add email to cert DN +preserve = no # Keep passed DN ordering +name_opt = ca_default # Subject DN display options +cert_opt = ca_default # Certificate display options +copy_extensions = copy # Copy extensions from CSR +x509_extensions = client_ext # Default cert extensions +default_crl_days = 7 # How long before next CRL +crl_extensions = crl_ext # CRL extensions + +# Naming policies control which parts of a DN end up in the certificate and +# under what circumstances certification should be denied. + +[ match_pol ] +domainComponent = match # Must match 'simple.org' +organizationName = match # Must match 'Simple Inc' +organizationalUnitName = optional # Included if present +commonName = supplied # Must be present + +[ any_pol ] +domainComponent = optional +countryName = optional +stateOrProvinceName = optional +localityName = optional +organizationName = optional +organizationalUnitName = optional +commonName = optional +emailAddress = optional + +# Certificate extensions define what types of certificates the CA is able to +# create. + +[ client_ext ] +keyUsage = critical,digitalSignature,keyEncipherment +basicConstraints = CA:false +extendedKeyUsage = clientAuth +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid + +[ server_ext ] +keyUsage = critical,digitalSignature,keyEncipherment +basicConstraints = CA:false +extendedKeyUsage = serverAuth,clientAuth +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid + +# CRL extensions exist solely to point to the CA certificate that has issued +# the CRL. + +[ crl_ext ] +authorityKeyIdentifier = keyid diff --git a/roles/openshift_logging/files/util.sh b/roles/openshift_logging/files/util.sh new file mode 100644 index 000000000..5752a0fcd --- /dev/null +++ b/roles/openshift_logging/files/util.sh @@ -0,0 +1,192 @@ +#!/bin/bash + +function generate_JKS_chain() { + dir=${SCRATCH_DIR:-_output} + ADD_OID=$1 + NODE_NAME=$2 + CERT_NAMES=${3:-$NODE_NAME} + ks_pass=${KS_PASS:-kspass} + ts_pass=${TS_PASS:-tspass} + rm -rf $NODE_NAME + + extension_names="" + for name in ${CERT_NAMES//,/ }; do + extension_names="${extension_names},dns:${name}" + done + + if [ "$ADD_OID" = true ]; then + extension_names="${extension_names},oid:1.2.3.4.5.5" + fi + + echo Generating keystore and certificate for node $NODE_NAME + + "$keytool" -genkey \ + -alias $NODE_NAME \ + -keystore $dir/keystore.jks \ + -keypass $ks_pass \ + -storepass $ks_pass \ + -keyalg RSA \ + -keysize 2048 \ + -validity 712 \ + -dname "CN=$NODE_NAME, OU=OpenShift, O=Logging" \ + -ext san=dns:localhost,ip:127.0.0.1"${extension_names}" + + echo Generating certificate signing request for node $NODE_NAME + + "$keytool" -certreq \ + -alias $NODE_NAME \ + -keystore $dir/keystore.jks \ + -storepass $ks_pass \ + -file $dir/$NODE_NAME.csr \ + -keyalg rsa \ + -dname "CN=$NODE_NAME, OU=OpenShift, O=Logging" \ + -ext san=dns:localhost,ip:127.0.0.1"${extension_names}" + + echo Sign certificate request with CA + + openssl ca \ + -in $dir/$NODE_NAME.csr \ + -notext \ + -out $dir/$NODE_NAME.crt \ + -config $dir/signing.conf \ + -extensions v3_req \ + -batch \ + -extensions server_ext + + echo "Import back to keystore (including CA chain)" + + "$keytool" \ + -import \ + -file $dir/ca.crt \ + -keystore $dir/keystore.jks \ + -storepass $ks_pass \ + -noprompt -alias sig-ca + + "$keytool" \ + -import \ + -file $dir/$NODE_NAME.crt \ + -keystore $dir/keystore.jks \ + -storepass $ks_pass \ + -noprompt \ + -alias $NODE_NAME + + echo "Import CA to truststore for validating client certs" + + "$keytool" \ + -import \ + -file $dir/ca.crt \ + -keystore $dir/truststore.jks \ + -storepass $ts_pass \ + -noprompt -alias sig-ca + + echo All done for $NODE_NAME +} + +function generate_PEM_cert() { + NODE_NAME="$1" + dir=${SCRATCH_DIR:-_output} # for writing files to bundle into secrets + + echo Generating keystore and certificate for node ${NODE_NAME} + + openssl req -out "$dir/$NODE_NAME.csr" -new -newkey rsa:2048 -keyout "$dir/$NODE_NAME.key" -subj "/CN=$NODE_NAME/OU=OpenShift/O=Logging" -days 712 -nodes + + echo Sign certificate request with CA + openssl ca \ + -in "$dir/$NODE_NAME.csr" \ + -notext \ + -out "$dir/$NODE_NAME.crt" \ + -config $dir/signing.conf \ + -extensions v3_req \ + -batch \ + -extensions server_ext +} + +function generate_JKS_client_cert() { + NODE_NAME="$1" + ks_pass=${KS_PASS:-kspass} + ts_pass=${TS_PASS:-tspass} + dir=${SCRATCH_DIR:-_output} # for writing files to bundle into secrets + + echo Generating keystore and certificate for node ${NODE_NAME} + + "$keytool" -genkey \ + -alias $NODE_NAME \ + -keystore $dir/$NODE_NAME.jks \ + -keyalg RSA \ + -keysize 2048 \ + -validity 712 \ + -keypass $ks_pass \ + -storepass $ks_pass \ + -dname "CN=$NODE_NAME, OU=OpenShift, O=Logging" + + echo Generating certificate signing request for node $NODE_NAME + + "$keytool" -certreq \ + -alias $NODE_NAME \ + -keystore $dir/$NODE_NAME.jks \ + -file $dir/$NODE_NAME.csr \ + -keyalg rsa \ + -keypass $ks_pass \ + -storepass $ks_pass \ + -dname "CN=$NODE_NAME, OU=OpenShift, O=Logging" + + echo Sign certificate request with CA + openssl ca \ + -in "$dir/$NODE_NAME.csr" \ + -notext \ + -out "$dir/$NODE_NAME.crt" \ + -config $dir/signing.conf \ + -extensions v3_req \ + -batch \ + -extensions server_ext + + echo "Import back to keystore (including CA chain)" + + "$keytool" \ + -import \ + -file $dir/ca.crt \ + -keystore $dir/$NODE_NAME.jks \ + -storepass $ks_pass \ + -noprompt -alias sig-ca + + "$keytool" \ + -import \ + -file $dir/$NODE_NAME.crt \ + -keystore $dir/$NODE_NAME.jks \ + -storepass $ks_pass \ + -noprompt \ + -alias $NODE_NAME + + echo All done for $NODE_NAME +} + +function join { local IFS="$1"; shift; echo "$*"; } + +function get_es_dcs() { + oc get dc --selector logging-infra=elasticsearch -o name +} + +function get_curator_dcs() { + oc get dc --selector logging-infra=curator -o name +} + +function extract_nodeselector() { + local inputstring="${1//\"/}" # remove any errant double quotes in the inputs + local selectors=() + + for keyvalstr in ${inputstring//\,/ }; do + + keyval=( ${keyvalstr//=/ } ) + + if [[ -n "${keyval[0]}" && -n "${keyval[1]}" ]]; then + selectors+=( "\"${keyval[0]}\": \"${keyval[1]}\"") + else + echo "Could not make a node selector label from '${keyval[*]}'" + exit 255 + fi + done + + if [[ "${#selectors[*]}" -gt 0 ]]; then + echo nodeSelector: "{" $(join , "${selectors[@]}") "}" + fi +} diff --git a/roles/openshift_logging/filter_plugins/__init__.py b/roles/openshift_logging/filter_plugins/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/roles/openshift_logging/filter_plugins/openshift_logging.py b/roles/openshift_logging/filter_plugins/openshift_logging.py new file mode 100644 index 000000000..b42d5da5f --- /dev/null +++ b/roles/openshift_logging/filter_plugins/openshift_logging.py @@ -0,0 +1,29 @@ +import random, string +import shutil +import sys +import StringIO + +def random_word(source_alpha,length): + return ''.join(random.choice(source_alpha) for i in range(length)) + +def entry_from_named_pair(register_pairs, key): + from ansible.utils.display import Display + results = register_pairs.get("results") + if results == None: + raise RuntimeError("The dict argument does not have a 'results' entry. Must not have been created using 'register' in a loop") + for result in results: + item = result.get("item") + if item != None: + name = item.get("name") + if name == key: + return result["content"] + raise RuntimeError("There was no entry found in the dict that had an item with a name that matched {}".format(key)) + +class FilterModule(object): + ''' OpenShift Logging Filters ''' + + def filters(self): + return { + 'random_word': random_word, + 'entry_from_named_pair': entry_from_named_pair, + } diff --git a/roles/openshift_logging/library/__init.py__ b/roles/openshift_logging/library/__init.py__ new file mode 100644 index 000000000..e69de29bb diff --git a/roles/openshift_logging/library/openshift_logging_facts.py b/roles/openshift_logging/library/openshift_logging_facts.py new file mode 100644 index 000000000..1f0c25a84 --- /dev/null +++ b/roles/openshift_logging/library/openshift_logging_facts.py @@ -0,0 +1,303 @@ + +DOCUMENTATION = """ +--- +module: openshift_logging_facts +version_added: "" +short_description: Gather facts about the OpenShift logging stack +description: + - Determine the current facts about the OpenShift logging stack (e.g. cluster size) +options: +author: Red Hat, Inc +""" + +EXAMPLES = """ +- action: opneshift_logging_facts +""" + +RETURN = """ +""" + +import copy +import json +import exceptions +import yaml +from subprocess import * + +default_oc_options = ["-o","json"] + +#constants used for various labels and selectors +COMPONENT_KEY="component" +LOGGING_INFRA_KEY="logging-infra" + +#selectors for filtering resources +DS_FLUENTD_SELECTOR=LOGGING_INFRA_KEY + "=" + "fluentd" +LOGGING_SELECTOR=LOGGING_INFRA_KEY + "=" + "support" +ROUTE_SELECTOR = "component=support,logging-infra=support,provider=openshift" +COMPONENTS = ["kibana","curator","elasticsearch","fluentd", "kibana_ops", "curator_ops", "elasticsearch_ops"] + +class OCBaseCommand(object): + def __init__(self, binary, kubeconfig, namespace): + self.binary = binary + self.kubeconfig = kubeconfig + self.user = self.getSystemAdmin(self.kubeconfig) + self.namespace = namespace + + def getSystemAdmin(self,kubeconfig): + with open(kubeconfig,'r') as f: + config = yaml.load(f) + for user in config["users"]: + if user["name"].startswith("system:admin"): + return user["name"] + raise Exception("Unable to find system:admin in: " + kubeconfig) + + def oc(self, sub, kind, namespace=None, name=None,addOptions=[]): + cmd = [self.binary, sub, kind] + if name != None: + cmd = cmd + [name] + if namespace != None: + cmd = cmd + ["-n", namespace] + cmd = cmd + ["--user="+self.user,"--config="+self.kubeconfig] + default_oc_options + addOptions + try: + process = Popen(cmd, stdout=PIPE, stderr=PIPE) + out, err = process.communicate(cmd) + if len(err) > 0: + if 'not found' in err: + return {'items':[]} + if 'No resources found' in err: + return {'items':[]} + raise Exception(err) + except Exception as e: + err = "There was an exception trying to run the command '"+ " ".join(cmd) +"' " + str(e) + raise Exception(err) + + return json.loads(out) + +class OpenshiftLoggingFacts(OCBaseCommand): + + name = "facts" + + def __init__(self, logger, binary, kubeconfig, namespace): + super(OpenshiftLoggingFacts, self).__init__(binary, kubeconfig, namespace) + self.logger = logger + self.facts = dict() + + def defaultKeysFor(self, kind): + for comp in COMPONENTS: + self.addFactsFor(comp, kind) + + def addFactsFor(self, comp, kind, name=None, facts=None): + if self.facts.has_key(comp) == False: + self.facts[comp] = dict() + if self.facts[comp].has_key(kind) == False: + self.facts[comp][kind] = dict() + if name: + self.facts[comp][kind][name] = facts + + def factsForRoutes(self, namespace): + self.defaultKeysFor("routes") + routeList = self.oc("get","routes", namespace=namespace, addOptions=["-l",ROUTE_SELECTOR]) + if len(routeList["items"]) == 0: + return None + for route in routeList["items"]: + name = route["metadata"]["name"] + comp = self.comp(name) + if comp != None: + self.addFactsFor(comp, "routes", name, dict(host=route["spec"]["host"])) + self.facts["agl_namespace"] = namespace + + + def factsForDaemonsets(self, namespace): + self.defaultKeysFor("daemonsets") + dsList = self.oc("get", "daemonsets", namespace=namespace, addOptions=["-l",LOGGING_INFRA_KEY+"=fluentd"]) + if len(dsList["items"]) == 0: + return + for ds in dsList["items"]: + name = ds["metadata"]["name"] + comp = self.comp(name) + spec = ds["spec"]["template"]["spec"] + container = spec["containers"][0] + result = dict( + selector = ds["spec"]["selector"], + image = container["image"], + resources = container["resources"], + nodeSelector = spec["nodeSelector"], + serviceAccount = spec["serviceAccount"], + terminationGracePeriodSeconds = spec["terminationGracePeriodSeconds"] + ) + self.addFactsFor(comp, "daemonsets", name, result) + + def factsForPvcs(self, namespace): + self.defaultKeysFor("pvcs") + pvclist = self.oc("get", "pvc", namespace=namespace, addOptions=["-l",LOGGING_INFRA_KEY]) + if len(pvclist["items"]) == 0: + return + pvcs = [] + for pvc in pvclist["items"]: + name = pvc["metadata"]["name"] + comp = self.comp(name) + self.addFactsFor(comp,"pvcs",name,dict()) + + def factsForDeploymentConfigs(self, namespace): + self.defaultKeysFor("deploymentconfigs") + dclist = self.oc("get", "deploymentconfigs", namespace=namespace, addOptions=["-l",LOGGING_INFRA_KEY]) + if len(dclist["items"]) == 0: + return + dcs = dclist["items"] + for dc in dcs: + name = dc["metadata"]["name"] + comp = self.comp(name) + if comp != None: + spec = dc["spec"]["template"]["spec"] + facts = dict( + selector = dc["spec"]["selector"], + replicas = dc["spec"]["replicas"], + serviceAccount = spec["serviceAccount"], + containers = dict(), + volumes = dict() + ) + if spec.has_key("volumes"): + for vol in spec["volumes"]: + clone = copy.deepcopy(vol) + clone.pop("name", None) + facts["volumes"][vol["name"]] = clone + for container in spec["containers"]: + facts["containers"][container["name"]] = dict( + image = container["image"], + resources = container["resources"], + ) + self.addFactsFor(comp,"deploymentconfigs",name,facts) + + def factsForServices(self, namespace): + self.defaultKeysFor("services") + servicelist = self.oc("get", "services", namespace=namespace, addOptions=["-l",LOGGING_SELECTOR]) + if len(servicelist["items"]) == 0: + return + for service in servicelist["items"]: + name = service["metadata"]["name"] + comp = self.comp(name) + if comp != None: + self.addFactsFor(comp, "services", name, dict()) + + def factsForConfigMaps(self, namespace): + self.defaultKeysFor("configmaps") + aList = self.oc("get", "configmaps", namespace=namespace, addOptions=["-l",LOGGING_SELECTOR]) + if len(aList["items"]) == 0: + return + for item in aList["items"]: + name = item["metadata"]["name"] + comp = self.comp(name) + if comp != None: + self.addFactsFor(comp, "configmaps", name, item["data"]) + + def factsForOAuthClients(self, namespace): + self.defaultKeysFor("oauthclients") + aList = self.oc("get", "oauthclients", namespace=namespace, addOptions=["-l",LOGGING_SELECTOR]) + if len(aList["items"]) == 0: + return + for item in aList["items"]: + name = item["metadata"]["name"] + comp = self.comp(name) + if comp != None: + result = dict( + redirectURIs = item["redirectURIs"] + ) + self.addFactsFor(comp, "oauthclients", name, result) + + def factsForSecrets(self, namespace): + self.defaultKeysFor("secrets") + aList = self.oc("get", "secrets", namespace=namespace) + if len(aList["items"]) == 0: + return + for item in aList["items"]: + name = item["metadata"]["name"] + comp = self.comp(name) + if comp != None and item["type"] == "Opaque": + result = dict( + keys = item["data"].keys() + ) + self.addFactsFor(comp, "secrets", name, result) + + def factsForSCCs(self, namespace): + self.defaultKeysFor("sccs") + scc = self.oc("get", "scc", name="privileged") + if len(scc["users"]) == 0: + return + for item in scc["users"]: + comp = self.comp(item) + if comp != None: + self.addFactsFor(comp, "sccs", "privileged", dict()) + + def factsForClusterRoleBindings(self, namespace): + self.defaultKeysFor("clusterrolebindings") + role = self.oc("get", "clusterrolebindings", name="cluster-readers") + if "subjects" not in role or len(role["subjects"]) == 0: + return + for item in role["subjects"]: + comp = self.comp(item["name"]) + if comp != None and namespace == item["namespace"]: + self.addFactsFor(comp, "clusterrolebindings", "cluster-readers", dict()) + +# this needs to end up nested under the service account... + def factsForRoleBindings(self, namespace): + self.defaultKeysFor("rolebindings") + role = self.oc("get", "rolebindings", namespace=namespace, name="logging-elasticsearch-view-role") + if "subjects" not in role or len(role["subjects"]) == 0: + return + for item in role["subjects"]: + comp = self.comp(item["name"]) + if comp != None and namespace == item["namespace"]: + self.addFactsFor(comp, "rolebindings", "logging-elasticsearch-view-role", dict()) + + def comp(self, name): + if name.startswith("logging-curator-ops"): + return "curator_ops" + elif name.startswith("logging-kibana-ops") or name.startswith("kibana-ops"): + return "kibana_ops" + elif name.startswith("logging-es-ops") or name.startswith("logging-elasticsearch-ops"): + return "elasticsearch_ops" + elif name.startswith("logging-curator"): + return "curator" + elif name.startswith("logging-kibana") or name.startswith("kibana"): + return "kibana" + elif name.startswith("logging-es") or name.startswith("logging-elasticsearch"): + return "elasticsearch" + elif name.startswith("logging-fluentd") or name.endswith("aggregated-logging-fluentd"): + return "fluentd" + else: + return None + + def do(self): + self.factsForRoutes(self.namespace) + self.factsForDaemonsets(self.namespace) + self.factsForDeploymentConfigs(self.namespace) + self.factsForServices(self.namespace) + self.factsForConfigMaps(self.namespace) + self.factsForSCCs(self.namespace) + self.factsForOAuthClients(self.namespace) + self.factsForClusterRoleBindings(self.namespace) + self.factsForRoleBindings(self.namespace) + self.factsForSecrets(self.namespace) + self.factsForPvcs(self.namespace) + + return self.facts + +def main(): + module = AnsibleModule( + argument_spec=dict( + admin_kubeconfig = {"required": True, "type": "str"}, + oc_bin = {"required": True, "type": "str"}, + openshift_logging_namespace = {"required": True, "type": "str"} + ), + supports_check_mode = False + ) + try: + cmd = OpenshiftLoggingFacts(module, module.params['oc_bin'], module.params['admin_kubeconfig'],module.params['openshift_logging_namespace']) + module.exit_json( + ansible_facts = {"openshift_logging_facts": cmd.do() } + ) + except Exception as e: + module.fail_json(msg=str(e)) + +from ansible.module_utils.basic import * +if __name__ == '__main__': + main() diff --git a/roles/openshift_logging/meta/main.yaml b/roles/openshift_logging/meta/main.yaml new file mode 100644 index 000000000..8bff6cfb7 --- /dev/null +++ b/roles/openshift_logging/meta/main.yaml @@ -0,0 +1,3 @@ +--- +dependencies: + - { role: openshift_facts } diff --git a/roles/openshift_logging/tasks/delete_logging.yaml b/roles/openshift_logging/tasks/delete_logging.yaml new file mode 100644 index 000000000..6e8fc29d0 --- /dev/null +++ b/roles/openshift_logging/tasks/delete_logging.yaml @@ -0,0 +1,93 @@ +--- +- name: stop logging + include: stop_cluster.yaml + +# delete the deployment objects that we had created +- name: delete logging api objects + command: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig + delete {{ item }} --selector logging-infra -n {{ openshift_logging_namespace }} --ignore-not-found=true + with_items: + - dc + - rc + - svc + - routes + - templates + - daemonset + +# delete the oauthclient +- name: delete oauthclient kibana-proxy + command: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete oauthclient kibana-proxy --ignore-not-found=true + +# delete any image streams that we may have created +- name: delete logging is + command: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig + delete is -l logging-infra=support -n {{ openshift_logging_namespace }} --ignore-not-found=true + +# delete our old secrets +- name: delete logging secrets + command: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig + delete secret {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true + with_items: + - logging-fluentd + - logging-elasticsearch + - logging-kibana + - logging-kibana-proxy + - logging-curator + ignore_errors: yes + +# delete role bindings +- name: delete rolebindings + command: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig + delete rolebinding {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true + with_items: + - logging-elasticsearch-view-role + +# delete cluster role bindings +- name: delete cluster role bindings + command: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig + delete clusterrolebindings {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true + with_items: + - rolebinding-reader + +# delete cluster roles +- name: delete cluster roles + command: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig + delete clusterroles {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true + with_items: + - rolebinding-reader + +# delete our service accounts +- name: delete service accounts + command: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig + delete serviceaccount {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true + with_items: + - aggregated-logging-elasticsearch + - aggregated-logging-kibana + - aggregated-logging-curator + - aggregated-logging-fluentd + +# delete our roles +- name: delete roles + command: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig + delete clusterrole {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true + with_items: + - daemonset-admin + +# delete our configmaps +- name: delete configmaps + command: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig + delete configmap {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true + with_items: + - logging-curator + - logging-elasticsearch + - logging-fluentd diff --git a/roles/openshift_logging/tasks/generate_certs.yaml b/roles/openshift_logging/tasks/generate_certs.yaml new file mode 100644 index 000000000..161d51055 --- /dev/null +++ b/roles/openshift_logging/tasks/generate_certs.yaml @@ -0,0 +1,168 @@ +--- +# we will ensure our secrets and configmaps are set up here first +- name: Checking for ca.key + stat: path="{{generated_certs_dir}}/ca.key" + register: ca_key_file + check_mode: no + +- name: Checking for ca.crt + stat: path="{{generated_certs_dir}}/ca.crt" + register: ca_cert_file + check_mode: no + +- name: Checking for ca.serial.txt + stat: path="{{generated_certs_dir}}/ca.serial.txt" + register: ca_serial_file + check_mode: no + +- name: Generate certificates + command: > + {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig ca create-signer-cert + --key={{generated_certs_dir}}/ca.key --cert={{generated_certs_dir}}/ca.crt + --serial={{generated_certs_dir}}/ca.serial.txt --name=logging-signer-test + check_mode: no + when: + - not ca_key_file.stat.exists + - not ca_cert_file.stat.exists + - not ca_serial_file.stat.exists + +- name: Checking for signing.conf + stat: path="{{generated_certs_dir}}/signing.conf" + register: signing_conf_file + check_mode: no + +- block: + - copy: src=signing.conf dest={{generated_certs_dir}}/signing.conf + check_mode: no + + - lineinfile: "dest={{generated_certs_dir}}/signing.conf regexp='# Top dir$' line='dir = {{generated_certs_dir}} # Top dir'" + check_mode: no + when: + - not signing_conf_file.stat.exists + +- include: procure_server_certs.yaml + loop_control: + loop_var: cert_info + with_items: + - procure_component: kibana + - procure_component: kibana-ops + - procure_component: kibana-internal + hostnames: "kibana, kibana-ops, {{openshift_logging_kibana_hostname}}, {{openshift_logging_kibana_ops_hostname}}" + +# - include: procure_server_certs.yaml +# vars: +# - procure_component: kibana + +# - include: procure_server_certs.yaml +# vars: +# - procure_component: kibana-ops + +# - include: procure_server_certs.yaml +# vars: +# - procure_component: kibana-internal +# - hostnames: "kibana, kibana-ops, {{openshift_logging_kibana_hostname}}, {{openshift_logging_kibana_ops_hostname}}" + +- name: Copy proxy TLS configuration file + copy: src=server-tls.json dest={{generated_certs_dir}}/server-tls.json + when: server_tls_json is undefined + check_mode: no + +- name: Copy proxy TLS configuration file + copy: content="{{server_tls_json}}" dest={{generated_certs_dir}}/server-tls.json + when: server_tls_json is defined + check_mode: no + +- name: Checking for ca.db + stat: path="{{generated_certs_dir}}/ca.db" + register: ca_db_file + check_mode: no + +- copy: content="" dest={{generated_certs_dir}}/ca.db + check_mode: no + when: + - not ca_db_file.stat.exists + +- name: Checking for ca.crt.srl + stat: path="{{generated_certs_dir}}/ca.crt.srl" + register: ca_cert_srl_file + check_mode: no + +- copy: content="" dest={{generated_certs_dir}}/ca.crt.srl + check_mode: no + when: + - not ca_cert_srl_file.stat.exists + +- name: Generate PEM certs + include: generate_pems.yaml component={{node_name}} + with_items: + - system.logging.fluentd + - system.logging.kibana + - system.logging.curator + - system.admin + loop_control: + loop_var: node_name + +- shell: certs=""; for cert in $(echo logging-es{,-ops}); do certs=$certs,dns:$cert; done; echo $certs + register: elasticsearch_certs + check_mode: no + +- shell: certs=""; for cert in $(echo logging-es{,-ops}{,-cluster}{,.logging.svc.cluster.local}); do certs=$certs,dns:$cert; done; echo $certs + register: logging_es_certs + check_mode: no + +#- shell: index=2; certs=""; for cert in $(echo logging-es{,-ops}); do certs=$certs,DNS.$index=$cert; index=$(($index+1)); done; echo $certs +# register: elasticsearch_certs +# check_mode: no + +#- shell: index=2; certs=""; for cert in $(echo logging-es{,-ops}{,-cluster}{,.logging.svc.cluster.local}); do certs=$certs,DNS.$index=$cert; index=$(($index+1)); done; echo $certs +# register: logging_es_certs +# check_mode: no + +- name: Generate PKCS12 chains +# include: generate_pkcs12.yaml component='system.admin' + include: generate_jks_chain.yaml component='system.admin' + +- name: Generate PKCS12 chains +# include: generate_pkcs12.yaml component={{node.name}} oid={{node.oid | default(False)}} chain_certs={{node.certs}} + include: generate_jks_chain.yaml component={{node.name}} oid={{node.oid | default(False)}} chain_certs={{node.certs}} + with_items: + - {name: 'elasticsearch', oid: True, certs: '{{elasticsearch_certs.stdout}}'} + - {name: 'logging-es', certs: '{{logging_es_certs.stdout}}'} + loop_control: + loop_var: node +# This should be handled within the ES image instead... --- +#- name: Copy jks script +# copy: +# src: generate-jks.sh +# dest: "{{etcd_generated_certs_dir}}/logging" + +#- name: Generate JKS chains +# template: +# src: job.j2 +# dest: "{{mktemp.stdout}}/jks_job.yaml" + +#- name: kick off job +# shell: > +# {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{mktemp.stdout}}/jks_job.yaml -n {{logging_namespace}} +# register: podoutput + +#- shell: > +# echo {{podoutput.stdout}} | awk -v podname='\\\".*\\\"' '{print $2}' +# register: podname + +#- action: shell > +# {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig oc get pod/{{podname.stdout}} -o go-template='{{ '{{' }}index .status "phase"{{ '}}' }}' -n {{logging_namespace}} +# register: result +# until: result.stdout.find("Succeeded") != -1 +# retries: 5 +# delay: 10 +# --- This should be handled within the ES image instead... +- name: Generate proxy session + shell: tr -dc 'a-zA-Z0-9' < /dev/urandom | head -c 200 + register: session_secret + check_mode: no + +- name: Generate oauth client secret + shell: tr -dc 'a-zA-Z0-9' < /dev/urandom | head -c 64 + register: oauth_secret + check_mode: no diff --git a/roles/openshift_logging/tasks/generate_clusterrolebindings.yaml b/roles/openshift_logging/tasks/generate_clusterrolebindings.yaml new file mode 100644 index 000000000..ffd5f1e00 --- /dev/null +++ b/roles/openshift_logging/tasks/generate_clusterrolebindings.yaml @@ -0,0 +1,12 @@ +--- +- name: Generate ClusterRoleBindings + template: src=clusterrolebinding.j2 dest={{mktemp.stdout}}/templates/logging-15-{{obj_name}}-clusterrolebinding.yaml + vars: + acct_name: aggregated-logging-elasticsearch + obj_name: rolebinding-reader + crb_usernames: ["system:serviceaccount:{{openshift_logging_namespace}}:{{acct_name}}"] + subjects: + - kind: ServiceAccount + name: "{{acct_name}}" + namespace: "{{openshift_logging_namespace}}" + check_mode: no diff --git a/roles/openshift_logging/tasks/generate_clusterroles.yaml b/roles/openshift_logging/tasks/generate_clusterroles.yaml new file mode 100644 index 000000000..8b0ef377a --- /dev/null +++ b/roles/openshift_logging/tasks/generate_clusterroles.yaml @@ -0,0 +1,10 @@ +--- +- name: Generate ClusterRole for cluster-reader + template: src=clusterrole.j2 dest={{mktemp.stdout}}/templates/logging-10-{{obj_name}}-clusterrole.yaml + vars: + obj_name: rolebinding-reader + rules: + - resources: [clusterrolebindings] + verbs: + - get + check_mode: no diff --git a/roles/openshift_logging/tasks/generate_configmaps.yaml b/roles/openshift_logging/tasks/generate_configmaps.yaml new file mode 100644 index 000000000..86882a5da --- /dev/null +++ b/roles/openshift_logging/tasks/generate_configmaps.yaml @@ -0,0 +1,103 @@ +--- +- block: + - copy: + src: elasticsearch-logging.yml + dest: "{{mktemp.stdout}}/elasticsearch-logging.yml" + when: es_logging_contents is undefined + + - copy: + src: elasticsearch.yml + dest: "{{mktemp.stdout}}/elasticsearch.yml" + when: es_config_contents is undefined + + - lineinfile: + dest: "{{mktemp.stdout}}/elasticsearch.yml" + regexp: '^openshift\.operations\.allow_cluster_reader(.)*$' + line: "\nopenshift.operations.allow_cluster_reader: {{openshift_logging_es_ops_allow_cluster_reader | lower}}" + when: es_config_contents is undefined + + - copy: + content: "{{es_logging_contents}}" + dest: "{{mktemp.stdout}}/elasticsearch-logging.yml" + when: es_logging_contents is defined + + - copy: + content: "{{es_config_contents}}" + dest: "{{mktemp.stdout}}/elasticsearch.yml" + when: es_config_contents is defined + + - shell: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-elasticsearch + --from-file=logging.yml={{mktemp.stdout}}/elasticsearch-logging.yml --from-file=elasticsearch.yml={{mktemp.stdout}}/elasticsearch.yml -o yaml --dry-run + register: es_configmap + + - copy: + content: "{{es_configmap.stdout}}" + dest: "{{mktemp.stdout}}/templates/logging-elasticsearch-configmap.yaml" + when: es_configmap.stdout is defined + check_mode: no + +- block: + - copy: + src: curator.yml + dest: "{{mktemp.stdout}}/curator.yml" + when: curator_config_contents is undefined + + - copy: + content: "{{curator_config_contents}}" + dest: "{{mktemp.stdout}}/curator.yml" + when: curator_config_contenets is defined + + - shell: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-curator + --from-file=config.yaml={{mktemp.stdout}}/curator.yml -o yaml --dry-run + register: curator_configmap + + - copy: + content: "{{curator_configmap.stdout}}" + dest: "{{mktemp.stdout}}/templates/logging-curator-configmap.yaml" + when: curator_configmap.stdout is defined + check_mode: no + +- block: + - copy: + src: fluent.conf + dest: "{{mktemp.stdout}}/fluent.conf" + when: fluentd_config_contents is undefined + + - copy: + src: fluentd-throttle-config.yaml + dest: "{{mktemp.stdout}}/fluentd-throttle-config.yaml" + when: fluentd_throttle_contents is undefined + + - copy: + src: secure-forward.conf + dest: "{{mktemp.stdout}}/secure-forward.conf" + when: fluentd_securefoward_contents is undefined + + - copy: + content: "{{fluentd_config_contents}}" + dest: "{{mktemp.stdout}}/fluent.conf" + when: fluentd_config_contents is defined + + - copy: + content: "{{fluentd_throttle_contents}}" + dest: "{{mktemp.stdout}}/fluentd-throttle-config.yaml" + when: fluentd_throttle_contents is defined + + - copy: + content: "{{fluentd_secureforward_contents}}" + dest: "{{mktemp.stdout}}/secure-forward.conf" + when: fluentd_secureforward_contents is defined + + - shell: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-fluentd + --from-file=fluent.conf={{mktemp.stdout}}/fluent.conf --from-file=throttle-config.yaml={{mktemp.stdout}}/fluentd-throttle-config.yaml + --from-file=secure-forward.conf={{mktemp.stdout}}/secure-forward.conf -o yaml --dry-run + register: fluentd_configmap + + - copy: + content: "{{fluentd_configmap.stdout}}" + dest: "{{mktemp.stdout}}/templates/logging-fluentd-configmap.yaml" + when: fluentd_configmap.stdout is defined + check_mode: no diff --git a/roles/openshift_logging/tasks/generate_deploymentconfigs.yaml b/roles/openshift_logging/tasks/generate_deploymentconfigs.yaml new file mode 100644 index 000000000..151cafd9d --- /dev/null +++ b/roles/openshift_logging/tasks/generate_deploymentconfigs.yaml @@ -0,0 +1,59 @@ +--- +- name: Generate kibana deploymentconfig + template: src=kibana.j2 dest={{mktemp.stdout}}/logging-kibana-dc.yaml + vars: + component: kibana + deploy_name: "logging-{{component}}" + image: "{{openshift_logging_image_prefix}}logging-kibana:{{openshift_logging_image_version}}" + proxy_image: "{{openshift_logging_image_prefix}}logging-auth-proxy:{{openshift_logging_image_version}}" + es_host: logging-es + es_port: "{{openshift_logging_es_port}}" + check_mode: no + +- name: Generate OPS kibana deploymentconfig + template: src=kibana.j2 dest={{mktemp.stdout}}/logging-kibana-ops-dc.yaml + vars: + component: kibana-ops + deploy_name: "logging-{{component}}" + image: "{{openshift_logging_image_prefix}}logging-kibana:{{openshift_logging_image_version}}" + proxy_image: "{{openshift_logging_image_prefix}}logging-auth-proxy:{{openshift_logging_image_version}}" + es_host: logging-es-ops + es_port: "{{openshift_logging_es_ops_port}}" + check_mode: no + +- name: Generate elasticsearch deploymentconfig + template: src=es.j2 dest={{mktemp.stdout}}/logging-es-dc.yaml + vars: + component: es + deploy_name_prefix: "logging-{{component}}" + deploy_name: "{{deploy_name_prefix}}-abc123" + image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}" + es_cluster_name: "{{component}}" + check_mode: no + +- name: Generate OPS elasticsearch deploymentconfig + template: src=es.j2 dest={{mktemp.stdout}}/logging-es-ops-dc.yaml + vars: + component: es-ops + deploy_name_prefix: "logging-{{component}}" + deploy_name: "{{deploy_name_prefix}}-abc123" + image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}" + es_cluster_name: "{{component}}" + check_mode: no + +- name: Generate curator deploymentconfig + template: src=curator.j2 dest={{mktemp.stdout}}/logging-curator-dc.yaml + vars: + component: curator + deploy_name: "logging-{{component}}" + image: "{{openshift_logging_image_prefix}}logging-curator:{{openshift_logging_image_version}}" + check_mode: no + +- name: Generate OPS curator deploymentconfig + template: src=curator.j2 dest={{mktemp.stdout}}/logging-curator-ops-dc.yaml + vars: + component: curator-ops + deploy_name: "logging-{{component}}" + image: "{{openshift_logging_image_prefix}}logging-curator:{{openshift_logging_image_version}}" + openshift_logging_es_host: logging-es-ops + check_mode: no diff --git a/roles/openshift_logging/tasks/generate_jks_chain.yaml b/roles/openshift_logging/tasks/generate_jks_chain.yaml new file mode 100644 index 000000000..14ffdc51f --- /dev/null +++ b/roles/openshift_logging/tasks/generate_jks_chain.yaml @@ -0,0 +1,60 @@ +--- +- debug: msg="certs are {{chain_certs}} and oid is {{oid}}" + when: chain_certs is defined and oid is defined + +- debug: msg="certs are {{chain_certs}}" + when: chain_certs is defined and oid is undefined + +- name: Build extensions with certs + shell: echo "{{chain_certs}}{{ (oid) | ternary(',oid:1.2.3.4.5.5','') }}" + register: cert_ext + when: chain_certs is defined and oid is defined + check_mode: no + +- debug: msg="extensions are {{cert_ext.stdout}}" + when: cert_ext.stdout is defined + +- shell: > + echo {{ (cert_ext.stdout is defined) | ternary( '-ext san=dns:localhost,ip:127.0.0.1','') }}{{ (cert_ext.stdout is defined) | ternary( cert_ext.stdout, '') }} + register: extensions + check_mode: no + +- name: Checking for {{component}}.jks ... + stat: path="{{generated_certs_dir}}/{{component}}.jks" + register: jks_file + check_mode: no + +- name: Checking for truststore... + stat: path="{{generated_certs_dir}}/truststore.jks" + register: jks_truststore + check_mode: no + +- block: + - shell: > + keytool -genkey -alias {{component}} -keystore {{generated_certs_dir}}/{{component}}.jks -keypass kspass -storepass kspass + -keyalg RSA -keysize 2048 -validity 712 -dname "CN={{component}}, OU=OpenShift, O=Logging" {{extensions.stdout}} + + - shell: > + keytool -certreq -alias {{component}} -keystore {{generated_certs_dir}}/{{component}}.jks -storepass kspass + -file {{generated_certs_dir}}/{{component}}-jks.csr -keyalg RSA -dname "CN={{component}}, OU=OpenShift, O=Logging" {{extensions.stdout}} + + - shell: > + openssl ca -in {{generated_certs_dir}}/{{component}}-jks.csr -notext -out {{generated_certs_dir}}/{{component}}-jks.crt + -config {{generated_certs_dir}}/signing.conf -extensions v3_req -batch -extensions server_ext + + - shell: > + keytool -import -file {{generated_certs_dir}}/ca.crt -keystore {{generated_certs_dir}}/{{component}}.jks + -storepass kspass -noprompt -alias sig-ca + + - shell: > + keytool -import -file {{generated_certs_dir}}/{{component}}-jks.crt -keystore {{generated_certs_dir}}/{{component}}.jks + -storepass kspass -noprompt -alias {{component}} + + when: not jks_file.stat.exists + check_mode: no + +- block: + - shell: > + keytool -import -file {{generated_certs_dir}}/ca.crt -keystore {{generated_certs_dir}}/truststore.jks -storepass tspass -noprompt -alias sig-ca + when: not jks_truststore.stat.exists + check_mode: no diff --git a/roles/openshift_logging/tasks/generate_pems.yaml b/roles/openshift_logging/tasks/generate_pems.yaml new file mode 100644 index 000000000..289b72ea6 --- /dev/null +++ b/roles/openshift_logging/tasks/generate_pems.yaml @@ -0,0 +1,36 @@ +--- +- name: Checking for {{component}}.key + stat: path="{{generated_certs_dir}}/{{component}}.key" + register: key_file + check_mode: no + +- name: Checking for {{component}}.crt + stat: path="{{generated_certs_dir}}/{{component}}.crt" + register: cert_file + check_mode: no + +- name: Creating cert req for {{component}} + command: > + openssl req -out {{generated_certs_dir}}/{{component}}.csr -new -newkey rsa:2048 -keyout {{generated_certs_dir}}/{{component}}.key + -subj "/CN={{component}}/OU=OpenShift/O=Logging/subjectAltName=DNS.1=localhost{{cert_ext.stdout}}" -days 712 -nodes + when: + - not key_file.stat.exists + - cert_ext.stdout is defined + check_mode: no + +- name: Creating cert req for {{component}} + command: > + openssl req -out {{generated_certs_dir}}/{{component}}.csr -new -newkey rsa:2048 -keyout {{generated_certs_dir}}/{{component}}.key + -subj "/CN={{component}}/OU=OpenShift/O=Logging" -days 712 -nodes + when: + - not key_file.stat.exists + - cert_ext.stdout is undefined + check_mode: no + +- name: Sign cert request with CA for {{component}} + command: > + openssl ca -in {{generated_certs_dir}}/{{component}}.csr -notext -out {{generated_certs_dir}}/{{component}}.crt + -config {{generated_certs_dir}}/signing.conf -extensions v3_req -batch -extensions server_ext + when: + - not cert_file.stat.exists + check_mode: no diff --git a/roles/openshift_logging/tasks/generate_pkcs12.yaml b/roles/openshift_logging/tasks/generate_pkcs12.yaml new file mode 100644 index 000000000..dde65746f --- /dev/null +++ b/roles/openshift_logging/tasks/generate_pkcs12.yaml @@ -0,0 +1,24 @@ +--- +- debug: msg="certs are {{chain_certs}} and oid is {{oid}}" + when: chain_certs is defined and oid is defined + +- debug: msg="certs are {{chain_certs}}" + when: chain_certs is defined and oid is undefined + +- name: Build extensions with certs + shell: echo "{{chain_certs}}{{ (oid) | ternary(',oid=1.2.3.4.5.5','') }}" + register: cert_ext + when: chain_certs is defined and oid is defined + +- debug: msg="extensions are {{cert_ext.stdout}}" + when: cert_ext.stdout is defined + +- include: generate_pems.yaml + +- local_action: stat path="{{mktemp.stdout}}/{{component}}.pkcs12" + register: pkcs_file + become: no + +- name: Generating pkcs12 chain for {{component}} + command: openssl pkcs12 -export -out {{generated_certs_dir}}/{{component}}.pkcs12 -inkey {{generated_certs_dir}}/{{component}}.key -in {{generated_certs_dir}}/{{component}}.crt -password pass:pass + when: not pkcs_file.stat.exists diff --git a/roles/openshift_logging/tasks/generate_pvcs.yaml b/roles/openshift_logging/tasks/generate_pvcs.yaml new file mode 100644 index 000000000..ee4416bbd --- /dev/null +++ b/roles/openshift_logging/tasks/generate_pvcs.yaml @@ -0,0 +1,47 @@ +--- +- name: Init pool of PersistentVolumeClaim names + set_fact: es_pvc_pool={{es_pvc_pool|default([]) + [pvc_name]}} + vars: + pvc_name: "{{openshift_logging_es_pvc_prefix}}-{{item| int}}" + start: "{{es_pvc_names | map('regex_search',openshift_logging_es_pvc_prefix+'.*')|select('string')|list|length}}" + with_sequence: start={{start}} end={{ (start|int > openshift_logging_es_cluster_size - 1) | ternary(start, openshift_logging_es_cluster_size - 1)}} + when: + - openshift_logging_es_pvc_size | search('^\d.*') + - "{{ es_dc_names|default([]) | length < openshift_logging_es_cluster_size }}" + check_mode: no + +- name: Generating PersistentVolumeClaims + template: src=pvc.j2 dest={{mktemp.stdout}}/templates/logging-{{obj_name}}-pvc.yaml + vars: + obj_name: "{{claim_name}}" + size: "{{openshift_logging_es_pvc_size}}" + access_modes: + - ReadWriteOnce + pv_selector: "{{openshift_logging_es_pv_selector}}" + with_items: + - "{{es_pvc_pool | default([])}}" + loop_control: + loop_var: claim_name + when: + - not openshift_logging_es_pvc_dynamic + - es_pvc_pool is defined + check_mode: no + +- name: Generating PersistentVolumeClaims - Dynamic + template: src=pvc.j2 dest={{mktemp.stdout}}/templates/logging-{{obj_name}}-pvc.yaml + vars: + obj_name: "{{claim_name}}" + annotations: + volume.alpha.kubernetes.io/storage-class: "dynamic" + size: "{{openshift_logging_es_pvc_size}}" + access_modes: + - ReadWriteOnce + pv_selector: "{{openshift_logging_es_pv_selector}}" + with_items: + - "{{es_pvc_pool|default([])}}" + loop_control: + loop_var: claim_name + when: + - openshift_logging_es_pvc_dynamic + - es_pvc_pool is defined + check_mode: no diff --git a/roles/openshift_logging/tasks/generate_rolebindings.yaml b/roles/openshift_logging/tasks/generate_rolebindings.yaml new file mode 100644 index 000000000..02f81368d --- /dev/null +++ b/roles/openshift_logging/tasks/generate_rolebindings.yaml @@ -0,0 +1,11 @@ +--- +- name: Generate RoleBindings + template: src=rolebinding.j2 dest={{mktemp.stdout}}/templates/logging-{{obj_name}}-rolebinding.yaml + vars: + obj_name: logging-elasticsearch-view-role + roleRef: + name: view + subjects: + - kind: ServiceAccount + name: aggregated-logging-elasticsearch + check_mode: no diff --git a/roles/openshift_logging/tasks/generate_routes.yaml b/roles/openshift_logging/tasks/generate_routes.yaml new file mode 100644 index 000000000..d280ac04c --- /dev/null +++ b/roles/openshift_logging/tasks/generate_routes.yaml @@ -0,0 +1,20 @@ +--- +- name: Generating logging routes + template: src=route_reencrypt.j2 dest={{mktemp.stdout}}/templates/logging-{{route_info.name}}-route.yaml + tags: routes + vars: + obj_name: "{{route_info.name}}" + route_host: "{{route_info.host}}" + service_name: "{{route_info.name}}" + tls_ca_cert: "{{key_pairs | entry_from_named_pair('ca_file')| b64decode }}" + tls_dest_ca_cert: "{{key_pairs | entry_from_named_pair('ca_file')| b64decode }}" + labels: + component: support + logging-infra: support + provider: openshift + with_items: + - {name: logging-kibana, host: "{{openshift_logging_kibana_hostname}}"} + - {name: logging-kibana-ops, host: "{{openshift_logging_kibana_ops_hostname}}"} + loop_control: + loop_var: route_info + when: (route_info.name == 'logging-kibana-ops' and openshift_logging_use_ops) or route_info.name == 'logging-kibana' diff --git a/roles/openshift_logging/tasks/generate_secrets.yaml b/roles/openshift_logging/tasks/generate_secrets.yaml new file mode 100644 index 000000000..e20b88c0f --- /dev/null +++ b/roles/openshift_logging/tasks/generate_secrets.yaml @@ -0,0 +1,73 @@ +--- +- name: Retrieving the cert to use when generating secrets for the logging components + slurp: src="{{generated_certs_dir}}/{{item.file}}" + register: key_pairs + with_items: + - { name: "ca_file", file: "ca.crt" } + - { name: "kibana_key", file: "system.logging.kibana.key"} + - { name: "kibana_cert", file: "system.logging.kibana.crt"} + - { name: "curator_key", file: "system.logging.curator.key"} + - { name: "curator_cert", file: "system.logging.curator.crt"} + - { name: "fluentd_key", file: "system.logging.fluentd.key"} + - { name: "fluentd_cert", file: "system.logging.fluentd.crt"} + - { name: "kibana_internal_key", file: "kibana-internal.key"} + - { name: "kibana_internal_cert", file: "kibana-internal.crt"} + - { name: "server_tls", file: "server-tls.json"} + +- name: Generating secrets for logging components + template: src=secret.j2 dest={{mktemp.stdout}}/templates/{{secret_name}}-secret.yaml + vars: + secret_name: logging-{{component}} + secret_key_file: "{{component}}_key" + secret_cert_file: "{{component}}_cert" + secrets: + - {key: ca, value: "{{key_pairs | entry_from_named_pair('ca_file')| b64decode }}"} + - {key: key, value: "{{key_pairs | entry_from_named_pair(secret_key_file)| b64decode }}"} + - {key: cert, value: "{{key_pairs | entry_from_named_pair(secret_cert_file)| b64decode }}"} + secret_keys: ["ca", "cert", "key"] + with_items: + - kibana + - curator + - fluentd + loop_control: + loop_var: component + when: secret_name not in openshift_logging_facts.{{component}}.secrets or + secret_keys | difference(openshift_logging_facts.{{component}}.secrets["{{secret_name}}"]["keys"]) | length != 0 + check_mode: no + +- name: Generating secrets for kibana proxy + template: src=secret.j2 dest={{mktemp.stdout}}/templates/{{secret_name}}-secret.yaml + vars: + secret_name: logging-kibana-proxy + secrets: + - {key: oauth-secret, value: "{{oauth_secret.stdout}}"} + - {key: session-secret, value: "{{session_secret.stdout}}"} + - {key: server-key, value: "{{kibana_key_file}}"} + - {key: server-cert, value: "{{kibana_cert_file}}"} + - {key: server-tls, value: "{{server_tls_file}}"} + secret_keys: ["server-tls.json", "server-key", "session-secret", "oauth-secret", "server-cert"] + kibana_key_file: "{{key_pairs | entry_from_named_pair('kibana_internal_key')| b64decode }}" + kibana_cert_file: "{{key_pairs | entry_from_named_pair('kibana_internal_cert')| b64decode }}" + server_tls_file: "{{key_pairs | entry_from_named_pair('server_tls')| b64decode }}" + when: secret_name not in openshift_logging_facts.kibana.secrets or + secret_keys | difference(openshift_logging_facts.kibana.secrets["{{secret_name}}"]["keys"]) | length != 0 + check_mode: no + +- name: Generating secrets for elasticsearch + command: > + {{openshift.common.client_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig secrets new {{secret_name}} + key={{generated_certs_dir}}/logging-es.jks truststore={{generated_certs_dir}}/truststore.jks + searchguard.key={{generated_certs_dir}}/elasticsearch.jks searchguard.truststore={{generated_certs_dir}}/truststore.jks + admin-key={{generated_certs_dir}}/system.admin.key admin-cert={{generated_certs_dir}}/system.admin.crt + admin-ca={{generated_certs_dir}}/ca.crt admin.jks={{generated_certs_dir}}/system.admin.jks -o yaml + vars: + secret_name: logging-elasticsearch + secret_keys: ["admin-cert", "searchguard.key", "admin-ca", "key", "truststore", "admin-key"] + register: logging_es_secret + when: secret_name not in openshift_logging_facts.elasticsearch.secrets or + secret_keys | difference(openshift_logging_facts.elasticsearch.secrets["{{secret_name}}"]["keys"]) | length != 0 + check_mode: no + +- copy: content="{{logging_es_secret.stdout}}" dest={{mktemp.stdout}}/templates/logging-elasticsearch-secret.yaml + when: logging_es_secret.stdout is defined + check_mode: no diff --git a/roles/openshift_logging/tasks/generate_serviceaccounts.yaml b/roles/openshift_logging/tasks/generate_serviceaccounts.yaml new file mode 100644 index 000000000..7b956e2e0 --- /dev/null +++ b/roles/openshift_logging/tasks/generate_serviceaccounts.yaml @@ -0,0 +1,13 @@ +--- +- name: Generating serviceaccounts + template: src=serviceaccount.j2 dest={{mktemp.stdout}}/templates/logging-{{component}}-sa.yaml + vars: + obj_name: aggregated-logging-{{component}} + with_items: + - elasticsearch + - kibana + - fluentd + - curator + loop_control: + loop_var: component + check_mode: no diff --git a/roles/openshift_logging/tasks/generate_services.yaml b/roles/openshift_logging/tasks/generate_services.yaml new file mode 100644 index 000000000..95f113577 --- /dev/null +++ b/roles/openshift_logging/tasks/generate_services.yaml @@ -0,0 +1,81 @@ +--- +- name: Generating logging-es service + template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-es-svc.yaml + vars: + obj_name: logging-es + ports: + - {port: 9200, targetPort: restapi} + labels: + logging-infra: support + selector: + provider: openshift + component: es + check_mode: no + +- name: Generating logging-es-cluster service + template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-es-cluster-svc.yaml + vars: + obj_name: logging-es-cluster + ports: + - {port: 9300} + labels: + logging-infra: support + selector: + provider: openshift + component: es + check_mode: no + +- name: Generating logging-kibana service + template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-kibana-svc.yaml + vars: + obj_name: logging-kibana + ports: + - {port: 443, targetPort: oaproxy} + labels: + logging-infra: support + selector: + provider: openshift + component: kibana + check_mode: no + +- name: Generating logging-es-ops service + template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-es-ops-svc.yaml + vars: + obj_name: logging-es-ops + ports: + - {port: 9200, targetPort: restapi} + labels: + logging-infra: support + selector: + provider: openshift + component: es-ops + when: openshift_logging_use_ops + check_mode: no + +- name: Generating logging-es-ops-cluster service + template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-es-ops-cluster-svc.yaml + vars: + obj_name: logging-es-ops-cluster + ports: + - {port: 9300} + labels: + logging-infra: support + selector: + provider: openshift + component: es-ops + when: openshift_logging_use_ops + check_mode: no + +- name: Generating logging-kibana-ops service + template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-kibana-ops-svc.yaml + vars: + obj_name: logging-kibana-ops + ports: + - {port: 443, targetPort: oaproxy} + labels: + logging-infra: support + selector: + provider: openshift + component: kibana-ops + when: openshift_logging_use_ops + check_mode: no diff --git a/roles/openshift_logging/tasks/install_curator.yaml b/roles/openshift_logging/tasks/install_curator.yaml new file mode 100644 index 000000000..165a9d14e --- /dev/null +++ b/roles/openshift_logging/tasks/install_curator.yaml @@ -0,0 +1,27 @@ +--- +- name: Generate curator deploymentconfig + template: src=curator.j2 dest={{mktemp.stdout}}/templates/logging-curator-dc.yaml + vars: + component: curator + logging_component: curator + deploy_name: "logging-{{component}}" + image: "{{openshift_logging_image_prefix}}logging-curator:{{openshift_logging_image_version}}" + es_host: logging-es + es_port: "{{openshift_logging_es_port}}" + curator_cpu_limit: "{{openshift_logging_curator_cpu_limit }}" + curator_memory_limit: "{{openshift_logging_curator_memory_limit }}" + check_mode: no + +- name: Generate OPS curator deploymentconfig + template: src=curator.j2 dest={{mktemp.stdout}}/templates/logging-curator-ops-dc.yaml + vars: + component: curator-ops + logging_component: curator + deploy_name: "logging-{{component}}" + image: "{{openshift_logging_image_prefix}}logging-curator:{{openshift_logging_image_version}}" + es_host: logging-es-ops + es_port: "{{openshift_logging_es_ops_port}}" + curator_cpu_limit: "{{openshift_logging_curator_ops_cpu_limit }}" + curator_memory_limit: "{{openshift_logging_curator_ops_memory_limit }}" + when: openshift_logging_use_ops + check_mode: no diff --git a/roles/openshift_logging/tasks/install_elasticsearch.yaml b/roles/openshift_logging/tasks/install_elasticsearch.yaml new file mode 100644 index 000000000..c5d8d3537 --- /dev/null +++ b/roles/openshift_logging/tasks/install_elasticsearch.yaml @@ -0,0 +1,105 @@ +--- +- name: Generate PersistentVolumeClaims + include: "{{ role_path}}/tasks/generate_pvcs.yaml" + vars: + es_pvc_names: "{{openshift_logging_facts.elasticsearch.pvcs.keys()}}" + es_dc_names: "{{openshift_logging_facts.elasticsearch.deploymentconfigs.keys()}}" + when: + - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length < openshift_logging_es_cluster_size }}" + +- name: Init pool of DeploymentConfig names for Elasticsearch + set_fact: es_dc_pool={{es_dc_pool | default([]) + [deploy_name]}} + vars: + component: es + es_cluster_name: "{{component}}" + deploy_name_prefix: "logging-{{component}}" + deploy_name: "{{deploy_name_prefix}}-{{'abcdefghijklmnopqrstuvwxyz0123456789'|random_word(8)}}" + with_sequence: count={{(openshift_logging_es_cluster_size - openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length) | abs}} + when: + - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length < openshift_logging_es_cluster_size }}" + check_mode: no + + +- name: Generate Elasticsearch DeploymentConfig + template: src=es.j2 dest={{mktemp.stdout}}/templates/logging-{{deploy_name}}-dc.yaml + vars: + component: es + logging_component: elasticsearch + deploy_name_prefix: "logging-{{component}}" + image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}" + es_cluster_name: "{{component}}" + es_cpu_limit: "{{openshift_logging_es_cpu_limit }}" + es_memory_limit: "{{openshift_logging_es_memory_limit}}" + volume_names: "{{es_pvc_pool | default([])}}" + pvc_claim: "{{(volume_names | length > item.0) | ternary(volume_names[item.0], None)}}" + deploy_name: "{{item.1}}" + with_indexed_items: + - "{{es_dc_pool | default([])}}" + check_mode: no + when: + - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length < openshift_logging_es_cluster_size }}" + +# --------- Tasks for Operation clusters --------- + +- name: Validate Elasticsearch cluster size for Ops + fail: msg="The openshift_logging_es_ops_cluster_size may not be scaled down more than 1 less (or 0) the number of Elasticsearch nodes already deployed" + vars: + es_dcs: "{{openshift_logging_facts.elasticsearch_ops.deploymentconfigs}}" + cluster_size: "{{openshift_logging_es_ops_cluster_size}}" + when: + - openshift_logging_use_ops + - "{{es_dcs | length - openshift_logging_es_ops_cluster_size | abs > 1}}" + check_mode: no + +- name: Generate PersistentVolumeClaims for Ops + include: "{{ role_path}}/tasks/generate_pvcs.yaml" + vars: + es_pvc_names: "{{openshift_logging_facts.elasticsearch_ops.pvcs.keys()}}" + es_dc_names: "{{openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys()}}" + openshift_logging_es_pvc_prefix: "{{openshift_logging_es_ops_pvc_prefix}}" + openshift_logging_es_cluster_size: "{{openshift_logging_es_ops_cluster_size}}" + openshift_logging_es_pvc_size: "{{openshift_logging_es_ops_pvc_size}}" + openshift_logging_es_pvc_dynamic: "{{openshift_logging_es_ops_pvc_dynamic}}" + openshift_logging_es_pv_selector: "{{openshift_logging_es_ops_pv_selector}}" + when: + - openshift_logging_use_ops + - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | length < openshift_logging_es_ops_cluster_size }}" + check_mode: no + +- name: Init pool of DeploymentConfig names for Elasticsearch for Ops + set_fact: es_dc_pool_ops={{es_dc_pool_ops | default([]) + [deploy_name]}} + vars: + component: es-ops + es_cluster_name: "{{component}}" + deploy_name_prefix: "logging-{{component}}" + deploy_name: "{{deploy_name_prefix}}-{{'abcdefghijklmnopqrstuvwxyz0123456789'|random_word(8)}}" + cluster_size: "{{openshift_logging_es_ops_cluster_size}}" + with_sequence: count={{openshift_logging_es_ops_cluster_size - openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | length}} + when: + - openshift_logging_use_ops + - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | length < openshift_logging_es_ops_cluster_size }}" + check_mode: no + +- name: Generate Elasticsearch DeploymentConfig for Ops + template: src=es.j2 dest={{mktemp.stdout}}/templates/logging-{{deploy_name}}-dc.yaml + vars: + component: es-ops + logging_component: elasticsearch + deploy_name_prefix: "logging-{{component}}" + image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}" + volume_names: "{{es_pvc_pool | default([])}}" + pvc_claim: "{{(volume_names | length > item.0) | ternary(volume_names[item.0], None)}}" + deploy_name: "{{item.1}}" + es_cluster_name: "{{component}}" + es_cpu_limit: "{{openshift_logging_es_ops_cpu_limit }}" + es_memory_limit: "{{openshift_logging_es_ops_memory_limit}}" + es_node_quorum: "{{es_ops_node_quorum}}" + es_recover_after_nodes: "{{es_ops_recover_after_nodes}}" + es_recover_expected_nodes: "{{es_ops_recover_expected_nodes}}" + openshift_logging_es_recover_after_time: "{{openshift_logging_es_ops_recover_after_time}}" + with_indexed_items: + - "{{es_dc_pool_ops | default([])}}" + when: + - openshift_logging_use_ops + - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | length < openshift_logging_es_ops_cluster_size }}" + check_mode: no diff --git a/roles/openshift_logging/tasks/install_fluentd.yaml b/roles/openshift_logging/tasks/install_fluentd.yaml new file mode 100644 index 000000000..35bd452ed --- /dev/null +++ b/roles/openshift_logging/tasks/install_fluentd.yaml @@ -0,0 +1,38 @@ +--- +- shell: > + echo "{{ (openshift_logging_use_ops) | ternary(openshift_logging_es_ops_host, openshift_logging_es_host) }}" + register: fluentd_ops_host + check_mode: no + +- shell: > + echo "{{ (openshift_logging_use_ops) | ternary(openshift_logging_es_ops_port, openshift_logging_es_port) }}" + register: fluentd_ops_port + check_mode: no + + +- name: Generating Fluentd daemonset + template: src=fluentd.j2 dest={{mktemp.stdout}}/templates/logging-fluentd.yaml + vars: + daemonset_name: logging-fluentd + daemonset_component: fluentd + daemonset_container_name: fluentd-elasticsearch + daemonset_serviceAccount: aggregated-logging-fluentd + ops_host: "{{ fluentd_ops_host.stdout }}" + ops_port: "{{ fluentd_ops_port.stdout }}" + check_mode: no + +- name: "Set permissions for fluentd" + command: > + {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy + add-scc-to-user privileged system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd + register: fluentd_output + failed_when: "fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr" + check_mode: no + +- name: "Set additional permissions for fluentd" + command: > + {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy + add-cluster-role-to-user cluster-reader system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd + register: fluentd2_output + failed_when: "fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr" + check_mode: no diff --git a/roles/openshift_logging/tasks/install_kibana.yaml b/roles/openshift_logging/tasks/install_kibana.yaml new file mode 100644 index 000000000..382ab2522 --- /dev/null +++ b/roles/openshift_logging/tasks/install_kibana.yaml @@ -0,0 +1,33 @@ +--- +- name: Generate kibana deploymentconfig + template: src=kibana.j2 dest={{mktemp.stdout}}/templates/logging-kibana-dc.yaml + vars: + component: kibana + logging_component: kibana + deploy_name: "logging-{{component}}" + image: "{{openshift_logging_image_prefix}}logging-kibana:{{openshift_logging_image_version}}" + proxy_image: "{{openshift_logging_image_prefix}}logging-auth-proxy:{{openshift_logging_image_version}}" + es_host: logging-es + es_port: "{{openshift_logging_es_port}}" + kibana_cpu_limit: "{{openshift_logging_kibana_cpu_limit }}" + kibana_memory_limit: "{{openshift_logging_kibana_memory_limit }}" + kibana_proxy_cpu_limit: "{{openshift_logging_kibana_proxy_cpu_limit }}" + kibana_proxy_memory_limit: "{{openshift_logging_kibana_proxy_memory_limit }}" + check_mode: no + +- name: Generate OPS kibana deploymentconfig + template: src=kibana.j2 dest={{mktemp.stdout}}/templates/logging-kibana-ops-dc.yaml + vars: + component: kibana-ops + logging_component: kibana + deploy_name: "logging-{{component}}" + image: "{{openshift_logging_image_prefix}}logging-kibana:{{openshift_logging_image_version}}" + proxy_image: "{{openshift_logging_image_prefix}}logging-auth-proxy:{{openshift_logging_image_version}}" + es_host: logging-es-ops + es_port: "{{openshift_logging_es_ops_port}}" + kibana_cpu_limit: "{{openshift_logging_kibana_ops_cpu_limit }}" + kibana_memory_limit: "{{openshift_logging_kibana_ops_memory_limit }}" + kibana_proxy_cpu_limit: "{{openshift_logging_kibana_ops_proxy_cpu_limit }}" + kibana_proxy_memory_limit: "{{openshift_logging_kibana_ops_proxy_memory_limit }}" + when: openshift_logging_use_ops + check_mode: no diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml new file mode 100644 index 000000000..591f11476 --- /dev/null +++ b/roles/openshift_logging/tasks/install_logging.yaml @@ -0,0 +1,49 @@ +--- +- name: Gather OpenShift Logging Facts + openshift_logging_facts: + oc_bin: "{{openshift.common.client_binary}}" + admin_kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig" + openshift_logging_namespace: "{{openshift_logging_namespace}}" + tags: logging_facts + check_mode: no + +- name: Validate Elasticsearch cluster size + fail: msg="The openshift_logging_es_cluster_size may not be scaled down more than 1 less (or 0) the number of Elasticsearch nodes already deployed" + when: "{{openshift_logging_facts.elasticsearch.deploymentconfigs | length - openshift_logging_es_cluster_size | abs > 1}}" + +- name: Install logging + include: "{{ role_path }}/tasks/install_{{ install_component }}.yaml" + when: openshift_hosted_logging_install | default(true) | bool + with_items: + - support + - elasticsearch + - kibana + - curator + - fluentd + loop_control: + loop_var: install_component + +- name: Register API objects from generated templates + shell: ls -d -1 {{mktemp.stdout}}/templates/* | sort + register: logging_objects + check_mode: no + +- name: Creating API objects from generated templates + command: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig apply -f {{file}} -n {{openshift_logging_namespace}} + with_items: "{{logging_objects.stdout_lines}}" + loop_control: + loop_var: file + when: not ansible_check_mode + +- name: Printing out objects to create + debug: msg="{{lookup('file', file)|quote}}" + with_fileglob: + - "{{mktemp.stdout}}/templates/*.yaml" + loop_control: + loop_var: file + when: ansible_check_mode + +- name: Scaling up cluster + include: start_cluster.yaml + when: start_cluster | default(true) | bool diff --git a/roles/openshift_logging/tasks/install_support.yaml b/roles/openshift_logging/tasks/install_support.yaml new file mode 100644 index 000000000..71979a7d8 --- /dev/null +++ b/roles/openshift_logging/tasks/install_support.yaml @@ -0,0 +1,52 @@ +--- +# This is the base configuration for installing the other components +- name: Check for logging project already exists + command: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get project {{openshift_logging_namespace}} --no-headers + register: logging_project_result + ignore_errors: yes + when: not ansible_check_mode + +- name: "Create logging project" + command: > + {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-project {{openshift_logging_namespace}} + when: not ansible_check_mode and "not found" in logging_project_result.stderr + +- name: Create logging cert directory + file: path={{openshift.common.config_base}}/logging state=directory mode=0755 + changed_when: False + check_mode: no + +- include: generate_certs.yaml + vars: + generated_certs_dir: "{{openshift.common.config_base}}/logging" + +- name: Create temp directory for all our templates + file: path={{mktemp.stdout}}/templates state=directory mode=0755 + changed_when: False + check_mode: no + +- include: generate_secrets.yaml + vars: + generated_certs_dir: "{{openshift.common.config_base}}/logging" + +- include: generate_configmaps.yaml + +- include: generate_services.yaml + +- name: Generate kibana-proxy oauth client + template: src=oauth-client.j2 dest={{mktemp.stdout}}/templates/oauth-client.yaml + vars: + secret: "{{oauth_secret.stdout}}" + when: oauth_secret.stdout is defined + check_mode: no + +- include: generate_clusterroles.yaml + +- include: generate_rolebindings.yaml + +- include: generate_clusterrolebindings.yaml + +- include: generate_serviceaccounts.yaml + +- include: generate_routes.yaml diff --git a/roles/openshift_logging/tasks/label_node.yaml b/roles/openshift_logging/tasks/label_node.yaml new file mode 100644 index 000000000..55cfea38c --- /dev/null +++ b/roles/openshift_logging/tasks/label_node.yaml @@ -0,0 +1,27 @@ +--- +- shell: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get node {{host}} + --template='{{ '{{index .metadata.labels "' }}{{label}}{{ '"}}' }}' + register: label_value + failed_when: label_value.rc == 1 and 'exists' not in label_value.stderr + when: not ansible_check_mode + +- shell: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig label node {{host}} {{label}}={{value}} --overwrite + register: label_result + failed_when: label_result.rc == 1 and 'exists' not in label_result.stderr + when: + - value is defined + - label_value.stdout is defined + - label_value.stdout != value + - unlabel is not defined or not unlabel + - not ansible_check_mode + +- shell: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig label node {{host}} {{label}}- + register: label_result + failed_when: label_result.rc == 1 and 'exists' not in label_result.stderr + when: + - unlabel is defined + - unlabel + - not ansible_check_mode diff --git a/roles/openshift_logging/tasks/main.yaml b/roles/openshift_logging/tasks/main.yaml new file mode 100644 index 000000000..b64c24ade --- /dev/null +++ b/roles/openshift_logging/tasks/main.yaml @@ -0,0 +1,35 @@ +--- +- name: Create temp directory for doing work in + command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX + register: mktemp + changed_when: False + check_mode: no + tags: logging_init + +- debug: msg="Created temp dir {{mktemp.stdout}}" + +- name: Copy the admin client config(s) + command: > + cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig + changed_when: False + check_mode: no + tags: logging_init + +- include: "{{ role_path }}/tasks/install_logging.yaml" + when: openshift_logging_install_logging | default(false) | bool + +- include: "{{ role_path }}/tasks/upgrade_logging.yaml" + when: openshift_logging_upgrade_logging | default(false) | bool + +- include: "{{ role_path }}/tasks/delete_logging.yaml" + when: + - not openshift_logging_install_logging | default(false) | bool + - not openshift_logging_upgrade_logging | default(false) | bool + +- name: Delete temp directory + file: + name: "{{ mktemp.stdout }}" + state: absent + tags: logging_cleanup + changed_when: False + check_mode: no diff --git a/roles/openshift_logging/tasks/procure_server_certs.yaml b/roles/openshift_logging/tasks/procure_server_certs.yaml new file mode 100644 index 000000000..2c046d6e6 --- /dev/null +++ b/roles/openshift_logging/tasks/procure_server_certs.yaml @@ -0,0 +1,54 @@ +--- +- name: Checking for {{ cert_info.procure_component }}.crt + stat: path="{{generated_certs_dir}}/{{ cert_info.procure_component }}.crt" + register: component_cert_file + check_mode: no + +- name: Checking for {{ cert_info.procure_component }}.key + stat: path="{{generated_certs_dir}}/{{ cert_info.procure_component }}.key" + register: component_key_file + check_mode: no + +- name: Trying to discover server cert variable name for {{ cert_info.procure_component }} + command: echo "{{ lookup('env', '{{cert_info.procure_component}}' + '_crt') }}" + register: procure_component_crt + when: cert_info.hostnames is undefined and {{ cert_info.procure_component }}_crt is defined and {{ cert_info.procure_component }}_key is defined + check_mode: no + +- name: Trying to discover the server key variable name for {{ cert_info.procure_component }} + command: echo "{{ lookup('env', '{{cert_info.procure_component}}' + '_key') }}" + register: procure_component_key + when: cert_info.hostnames is undefined and {{ cert_info.procure_component }}_crt is defined and {{ cert_info.procure_component }}_key is defined + check_mode: no + +- name: Creating signed server cert and key for {{ cert_info.procure_component }} + command: > + {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig ca create-server-cert + --key={{generated_certs_dir}}/{{cert_info.procure_component}}.key --cert={{generated_certs_dir}}/{{cert_info.procure_component}}.crt + --hostnames={{cert_info.hostnames|quote}} --signer-cert={{generated_certs_dir}}/ca.crt --signer-key={{generated_certs_dir}}/ca.key + --signer-serial={{generated_certs_dir}}/ca.serial.txt + check_mode: no + when: + - cert_info.hostnames is defined + - not component_key_file.stat.exists + - not component_cert_file.stat.exists + +- name: Copying server key for {{ cert_info.procure_component }} to generated certs directory + copy: content="{{procure_component_key}}" dest={{generated_certs_dir}}/{{cert_info.procure_component}}.key + check_mode: no + when: + - cert_info.hostnames is undefined + - "{{ cert_info.procure_component }}_crt is defined" + - "{{ cert_info.procure_component }}_key is defined" + - not component_key_file.stat.exists + - not component_cert_file.stat.exists + +- name: Copying Server cert for {{ cert_info.procure_component }} to generated certs directory + copy: content="{{procure_component_crt}}" dest={{generated_certs_dir}}/{{cert_info.procure_component}}.crt + check_mode: no + when: + - cert_info.hostnames is undefined + - "{{ cert_info.procure_component }}_crt is defined" + - "{{ cert_info.procure_component }}_key is defined" + - not component_key_file.stat.exists + - not component_cert_file.stat.exists diff --git a/roles/openshift_logging/tasks/scale.yaml b/roles/openshift_logging/tasks/scale.yaml new file mode 100644 index 000000000..3d86ea171 --- /dev/null +++ b/roles/openshift_logging/tasks/scale.yaml @@ -0,0 +1,26 @@ +--- +- shell: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get {{object}} + --template='{{ '{{.spec.replicas}}' }}' -n {{openshift_logging_namespace}} + register: replica_count + failed_when: replica_count.rc == 1 and 'exists' not in replica_count.stderr + when: not ansible_check_mode + +- shell: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig scale {{object}} + --replicas={{desired}} -n {{openshift_logging_namespace}} + register: scale_result + failed_when: scale_result.rc == 1 and 'exists' not in scale_result.stderr + when: + - replica_count.stdout != desired + - not ansible_check_mode + +- shell: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig describe {{object}} -n {{openshift_logging_namespace}} | awk -v statusrx='Pods Status:' '$0 ~ statusrx {print $3}' + register: replica_counts + until: replica_counts.stdout.find("{{desired}}") != -1 + retries: 30 + delay: 10 + when: + - replica_count.stdout != desired + - not ansible_check_mode diff --git a/roles/openshift_logging/tasks/start_cluster.yaml b/roles/openshift_logging/tasks/start_cluster.yaml new file mode 100644 index 000000000..cdfc5f2d3 --- /dev/null +++ b/roles/openshift_logging/tasks/start_cluster.yaml @@ -0,0 +1,107 @@ +--- +- shell: > + echo "{{openshift_logging_fluentd_nodeselector}}" | cut -d':' -f1 + register: openshift_logging_fluentd_nodeselector_key + check_mode: no + +- shell: > + echo "{{openshift_logging_fluentd_nodeselector}}" | cut -d' ' -f2 + register: openshift_logging_fluentd_nodeselector_value + check_mode: no + +- shell: > + {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get node -o name | sed "s,^node/,,g" + register: fluentd_hosts + when: "'--all' in openshift_logging_fluentd_hosts" + check_mode: no + +- name: start fluentd + include: label_node.yaml + vars: + host: "{{fluentd_host}}" + label: "{{openshift_logging_fluentd_nodeselector_key.stdout}}" + value: "{{openshift_logging_fluentd_nodeselector_value.stdout}}" + with_items: "{{(fluentd_hosts.stdout_lines is defined) | ternary(fluentd_hosts.stdout_lines, openshift_logging_fluentd_hosts)}}" + loop_control: + loop_var: fluentd_host + +- shell: > + {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es -o name -n {{openshift_logging_namespace}} + register: es_dc + check_mode: no + +- name: start elasticsearch + include: scale.yaml + vars: + desired: 1 + with_items: "{{es_dc.stdout_lines}}" + loop_control: + loop_var: object + +- shell: > + {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=kibana -o name -n {{openshift_logging_namespace}} + register: kibana_dc + check_mode: no + +- name: start kibana + include: scale.yaml + vars: + desired: 1 + with_items: "{{kibana_dc.stdout_lines}}" + loop_control: + loop_var: object + +- shell: > + {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=curator -o name -n {{openshift_logging_namespace}} + register: curator_dc + check_mode: no + +- name: start curator + include: scale.yaml + vars: + desired: 1 + with_items: "{{curator_dc.stdout_lines}}" + loop_control: + loop_var: object + +- shell: > + {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es-ops -o name -n {{openshift_logging_namespace}} + register: es_dc + check_mode: no + +- name: start elasticsearch-ops + include: scale.yaml + vars: + desired: 1 + with_items: "{{es_dc.stdout_lines}}" + loop_control: + loop_var: object + when: openshift_logging_use_ops + +- shell: > + {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=kibana-ops -o name -n {{openshift_logging_namespace}} + register: kibana_dc + check_mode: no + +- name: start kibana-ops + include: scale.yaml + vars: + desired: 1 + with_items: "{{kibana_dc.stdout_lines}}" + loop_control: + loop_var: object + when: openshift_logging_use_ops + +- shell: > + {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=curator-ops -o name -n {{openshift_logging_namespace}} + register: curator_dc + check_mode: no + +- name: start curator-ops + include: scale.yaml + vars: + desired: 1 + with_items: "{{curator_dc.stdout_lines}}" + loop_control: + loop_var: object + when: openshift_logging_use_ops diff --git a/roles/openshift_logging/tasks/stop_cluster.yaml b/roles/openshift_logging/tasks/stop_cluster.yaml new file mode 100644 index 000000000..e018d0618 --- /dev/null +++ b/roles/openshift_logging/tasks/stop_cluster.yaml @@ -0,0 +1,98 @@ +--- +- shell: > + echo "{{openshift_logging_fluentd_nodeselector}}" | cut -d':' -f1 + register: openshift_logging_fluentd_nodeselector_key + +- shell: > + echo "{{openshift_logging_fluentd_nodeselector}}" | cut -d' ' -f2 + register: openshift_logging_fluentd_nodeselector_value + +- shell: > + {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get node -o name | sed "s,^node/,,g" + register: fluentd_hosts + when: "'--all' in openshift_logging_fluentd_hosts" + +- name: stop fluentd + include: label_node.yaml + vars: + host: "{{fluentd_host}}" + label: "{{openshift_logging_fluentd_nodeselector_key.stdout}}" + unlabel: True + with_items: "{{(fluentd_hosts.stdout_lines is defined) | ternary(fluentd_hosts.stdout_lines, openshift_logging_fluentd_hosts)}}" + loop_control: + loop_var: fluentd_host + +- shell: > + {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es -o name -n {{openshift_logging_namespace}} + register: es_dc + +- name: stop elasticsearch + include: scale.yaml + vars: + desired: 0 + with_items: "{{es_dc.stdout_lines}}" + loop_control: + loop_var: object + +- shell: > + {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=kibana -o name -n {{openshift_logging_namespace}} + register: kibana_dc + +- name: stop kibana + include: scale.yaml + vars: + desired: 0 + with_items: "{{kibana_dc.stdout_lines}}" + loop_control: + loop_var: object + +- shell: > + {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=curator -o name -n {{openshift_logging_namespace}} + register: curator_dc + +- name: stop curator + include: scale.yaml + vars: + desired: 0 + with_items: "{{curator_dc.stdout_lines}}" + loop_control: + loop_var: object + +- shell: > + {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es-ops -o name -n {{openshift_logging_namespace}} + register: es_dc + +- name: stop elasticsearch-ops + include: scale.yaml + vars: + desired: 0 + with_items: "{{es_dc.stdout_lines}}" + loop_control: + loop_var: object + when: openshift_logging_use_ops + +- shell: > + {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=kibana-ops -o name -n {{openshift_logging_namespace}} + register: kibana_dc + +- name: stop kibana-ops + include: scale.yaml + vars: + desired: 0 + with_items: "{{kibana_dc.stdout_lines}}" + loop_control: + loop_var: object + when: openshift_logging_use_ops + +- shell: > + {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=curator-ops -o name -n {{openshift_logging_namespace}} + register: curator_dc + +- name: stop curator-ops + include: scale.yaml + vars: + desired: 0 + with_items: "{{curator_dc.stdout_lines}}" + loop_control: + loop_var: object + when: openshift_logging_use_ops diff --git a/roles/openshift_logging/tasks/upgrade_logging.yaml b/roles/openshift_logging/tasks/upgrade_logging.yaml new file mode 100644 index 000000000..b2c8022d5 --- /dev/null +++ b/roles/openshift_logging/tasks/upgrade_logging.yaml @@ -0,0 +1,33 @@ +--- +- name: Stop the Cluster + include: stop_cluster.yaml + +- name: Upgrade logging + include: install_logging.yaml + vars: + start_cluster: False + +# ensure that ES is running +- shell: > + {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es -o name -n {{openshift_logging_namespace}} + register: es_dc + check_mode: no + +- name: start elasticsearch + include: scale.yaml + vars: + desired: 1 + with_items: "{{es_dc.stdout_lines}}" + loop_control: + loop_var: object + +- copy: + src: es_migration.sh + dest: {{mktemp.stdout}}/es_migration.sh + +- name: Run upgrade scripts + shell: > + sh {{mktemp.stdout}}/es_migration.sh {{openshift.common.config_base}}/logging/ca.crt {{openshift.common.config_base}}/logging/system.admin.key {{openshift.common.config_base}}/logging/system.admin.crt {{openshift_logging_es_host}} {{openshift_logging_es_port}} {{openshift_logging_namespace}} + +- name: Start up rest of cluster + include: start_cluster.yaml diff --git a/roles/openshift_logging/templates/clusterrole.j2 b/roles/openshift_logging/templates/clusterrole.j2 new file mode 100644 index 000000000..0d28db48e --- /dev/null +++ b/roles/openshift_logging/templates/clusterrole.j2 @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: ClusterRole +metadata: + name: {{obj_name}} +rules: +{% for rule in rules %} +- resources: +{% for kind in rule.resources %} + - {{ kind }} +{% endfor %} + apiGroups: +{% if rule.api_groups is defined %} +{% for group in rule.api_groups %} + - {{ group }} +{% endfor %} +{% endif %} + verbs: +{% for verb in rule.verbs %} + - {{ verb }} +{% endfor %} +{% endfor %} diff --git a/roles/openshift_logging/templates/clusterrolebinding.j2 b/roles/openshift_logging/templates/clusterrolebinding.j2 new file mode 100644 index 000000000..2d25ff1fb --- /dev/null +++ b/roles/openshift_logging/templates/clusterrolebinding.j2 @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: ClusterRoleBinding +metadata: + name: {{obj_name}} +{% if crb_usernames is defined %} +userNames: +{% for name in crb_usernames %} + - {{ name }} +{% endfor %} +{% endif %} +{% if crb_groupnames is defined %} +groupNames: +{% for name in crb_groupnames %} + - {{ name }} +{% endfor %} +{% endif %} +subjects: +{% for sub in subjects %} + - kind: {{ sub.kind }} + name: {{ sub.name }} + namespace: {{sub.namespace}} +{% endfor %} +roleRef: + name: {{obj_name}} diff --git a/roles/openshift_logging/templates/curator.j2 b/roles/openshift_logging/templates/curator.j2 new file mode 100644 index 000000000..3ffb48bfb --- /dev/null +++ b/roles/openshift_logging/templates/curator.j2 @@ -0,0 +1,97 @@ +apiVersion: "v1" +kind: "DeploymentConfig" +metadata: + name: "{{deploy_name}}" + labels: + provider: openshift + component: "{{component}}" + logging-infra: "{{logging_component}}" +spec: + replicas: 0 + selector: + provider: openshift + component: "{{component}}" + logging-infra: "{{logging_component}}" + strategy: + rollingParams: + intervalSeconds: 1 + timeoutSeconds: 600 + updatePeriodSeconds: 1 + type: Recreate + template: + metadata: + name: "{{deploy_name}}" + labels: + logging-infra: "{{logging_component}}" + provider: openshift + component: "{{component}}" + spec: + terminationGracePeriod: 600 + serviceAccountName: aggregated-logging-curator + containers: + - + name: "curator" + image: {{image}} + imagePullPolicy: Always + resources: + limits: + cpu: "{{curator_cpu_limit}}" +{% if curator_memory_limit is defined and curator_memory_limit is not none %} + memory: "{{curator_memory_limit}}" +{% endif %} + env: + - + name: "K8S_HOST_URL" + value: "{{master_url}}" + - + name: "ES_HOST" + value: "{{es_host}}" + - + name: "ES_PORT" + value: "{{es_port}}" + - + name: "ES_CLIENT_CERT" + value: "/etc/curator/keys/cert" + - + name: "ES_CLIENT_KEY" + value: "/etc/curator/keys/key" + - + name: "ES_CA" + value: "/etc/curator/keys/ca" + - + name: "CURATOR_DEFAULT_DAYS" + value: "{{openshift_logging_curator_default_days}}" + - + name: "CURATOR_RUN_HOUR" + value: "{{openshift_logging_curator_run_hour}}" + - + name: "CURATOR_RUN_MINUTE" + value: "{{openshift_logging_curator_run_minute}}" + - + name: "CURATOR_RUN_TIMEZONE" + value: "{{openshift_logging_curator_run_timezone}}" + - + name: "CURATOR_SCRIPT_LOG_LEVEL" + value: "{{openshift_logging_curator_script_log_level}}" + - + name: "CURATOR_LOG_LEVEL" + value: "{{openshift_logging_curator_log_level}}" + volumeMounts: + - name: certs + mountPath: /etc/curator/keys + readOnly: true + - name: config + mountPath: /usr/curator/settings + readOnly: true + - name: elasticsearch-storage + mountPath: /elasticsearch/persistent + readOnly: true + volumes: + - name: certs + secret: + secretName: logging-curator + - name: config + configMap: + name: logging-curator + - name: elasticsearch-storage + emptyDir: {} diff --git a/roles/openshift_logging/templates/es.j2 b/roles/openshift_logging/templates/es.j2 new file mode 100644 index 000000000..e5d415f81 --- /dev/null +++ b/roles/openshift_logging/templates/es.j2 @@ -0,0 +1,105 @@ +apiVersion: "v1" +kind: "DeploymentConfig" +metadata: + name: "{{deploy_name}}" + labels: + provider: openshift + component: "{{component}}" + deployment: "{{deploy_name}}" + logging-infra: "{{logging_component}}" +spec: + replicas: 0 + selector: + provider: openshift + component: "{{component}}" + deployment: "{{deploy_name}}" + logging-infra: "{{logging_component}}" + strategy: + type: Recreate + template: + metadata: + name: "{{deploy_name}}" + labels: + logging-infra: "{{logging_component}}" + provider: openshift + component: "{{component}}" + deployment: "{{deploy_name}}" + spec: + terminationGracePeriod: 600 + serviceAccountName: aggregated-logging-elasticsearch + securityContext: + supplementalGroups: + - {{openshift_logging_es_storage_group}} + containers: + - + name: "elasticsearch" + image: {{image}} + imagePullPolicy: Always + resources: + limits: + memory: "{{es_memory_limit}}" +{% if es_cpu_limit is defined and es_cpu_limit is not none %} + cpu: "{{es_cpu_limit}}" +{% endif %} + requests: + memory: "512Mi" + ports: + - + containerPort: 9200 + name: "restapi" + - + containerPort: 9300 + name: "cluster" + env: + - + name: "NAMESPACE" + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - + name: "KUBERNETES_TRUST_CERT" + value: "true" + - + name: "SERVICE_DNS" + value: "logging-{{es_cluster_name}}-cluster" + - + name: "CLUSTER_NAME" + value: "logging-{{es_cluster_name}}" + - + name: "INSTANCE_RAM" + value: "{{openshift_logging_es_memory_limit}}" + - + name: "NODE_QUORUM" + value: "{{es_node_quorum | int}}" + - + name: "RECOVER_AFTER_NODES" + value: "{{es_recover_after_nodes}}" + - + name: "RECOVER_EXPECTED_NODES" + value: "{{es_recover_expected_nodes}}" + - + name: "RECOVER_AFTER_TIME" + value: "{{openshift_logging_es_recover_after_time}}" + volumeMounts: + - name: elasticsearch + mountPath: /etc/elasticsearch/secret + readOnly: true + - name: elasticsearch-config + mountPath: /usr/share/java/elasticsearch/config + readOnly: true + - name: elasticsearch-storage + mountPath: /elasticsearch/persistent + volumes: + - name: elasticsearch + secret: + secretName: logging-elasticsearch + - name: elasticsearch-config + configMap: + name: logging-elasticsearch + - name: elasticsearch-storage +{% if pvc_claim is defined and pvc_claim | trim | length > 0 %} + persistentVolumeClaim: + claimName: {{pvc_claim}} +{% else %} + emptyDir: {} +{% endif %} diff --git a/roles/openshift_logging/templates/fluentd.j2 b/roles/openshift_logging/templates/fluentd.j2 new file mode 100644 index 000000000..a09b582a2 --- /dev/null +++ b/roles/openshift_logging/templates/fluentd.j2 @@ -0,0 +1,149 @@ +apiVersion: extensions/v1beta1 +kind: "DaemonSet" +metadata: + name: "{{daemonset_name}}" + labels: + provider: openshift + component: "{{daemonset_component}}" + logging-infra: "{{daemonset_component}}" +spec: + selector: + matchLabels: + provider: openshift + component: "{{daemonset_component}}" + updateStrategy: + type: RollingUpdate + rollingUpdate: + minReadySeconds: 600 + template: + metadata: + name: "{{daemonset_container_name}}" + labels: + logging-infra: "{{daemonset_component}}" + provider: openshift + component: "{{daemonset_component}}" + spec: + serviceAccountName: "{{daemonset_serviceAccount}}" + nodeSelector: + {{openshift_logging_fluentd_nodeselector}} + containers: + - name: "{{daemonset_container_name}}" + image: "{{openshift_logging_image_prefix}}{{daemonset_name}}:{{openshift_logging_image_version}}" + imagePullPolicy: Always + securityContext: + privileged: true + resources: + limits: + cpu: {{openshift_logging_fluentd_cpu_limit}} + memory: {{openshift_logging_fluentd_memory_limit}} + volumeMounts: + - name: runlogjournal + mountPath: /run/log/journal + - name: varlog + mountPath: /var/log + - name: varlibdockercontainers + mountPath: /var/lib/docker/containers + readOnly: true + - name: config + mountPath: /etc/fluent/configs.d/user + readOnly: true + - name: certs + mountPath: /etc/fluent/keys + readOnly: true + - name: dockerhostname + mountPath: /etc/docker-hostname + readOnly: true + - name: localtime + mountPath: /etc/localtime + readOnly: true + - name: dockercfg + mountPath: /etc/sysconfig/docker + readOnly: true + env: + - name: "K8S_HOST_URL" + value: "{{master_url}}" + - name: "ES_HOST" + value: "{{openshift_logging_es_host}}" + - name: "ES_PORT" + value: "{{openshift_logging_es_port}}" + - name: "ES_CLIENT_CERT" + value: "{{openshift_logging_es_client_cert}}" + - name: "ES_CLIENT_KEY" + value: "{{openshift_logging_es_client_key}}" + - name: "ES_CA" + value: "{{openshift_logging_es_ca}}" + - name: "OPS_HOST" + value: "{{ops_host}}" + - name: "OPS_PORT" + value: "{{ops_port}}" + - name: "OPS_CLIENT_CERT" + value: "{{openshift_logging_es_ops_client_cert}}" + - name: "OPS_CLIENT_KEY" + value: "{{openshift_logging_es_ops_client_key}}" + - name: "OPS_CA" + value: "{{openshift_logging_es_ops_ca}}" + - name: "ES_COPY" + value: "{{openshift_logging_fluentd_es_copy|lower}}" + - name: "ES_COPY_HOST" + value: "{{es_copy_host | default('')}}" + - name: "ES_COPY_PORT" + value: "{{es_copy_port | default('')}}" + - name: "ES_COPY_SCHEME" + value: "{{es_copy_scheme | default('https')}}" + - name: "ES_COPY_CLIENT_CERT" + value: "{{es_copy_client_cert | default('')}}" + - name: "ES_COPY_CLIENT_KEY" + value: "{{es_copy_client_key | default('')}}" + - name: "ES_COPY_CA" + value: "{{es_copy_ca | default('')}}" + - name: "ES_COPY_USERNAME" + value: "{{es_copy_username | default('')}}" + - name: "ES_COPY_PASSWORD" + value: "{{es_copy_password | default('')}}" + - name: "OPS_COPY_HOST" + value: "{{ops_copy_host | default('')}}" + - name: "OPS_COPY_PORT" + value: "{{ops_copy_port | default('')}}" + - name: "OPS_COPY_SCHEME" + value: "{{ops_copy_scheme | default('https')}}" + - name: "OPS_COPY_CLIENT_CERT" + value: "{{ops_copy_client_cert | default('')}}" + - name: "OPS_COPY_CLIENT_KEY" + value: "{{ops_copy_client_key | default('')}}" + - name: "OPS_COPY_CA" + value: "{{ops_copy_ca | default('')}}" + - name: "OPS_COPY_USERNAME" + value: "{{ops_copy_username | default('')}}" + - name: "OPS_COPY_PASSWORD" + value: "{{ops_copy_password | default('')}}" + - name: "USE_JOURNAL" + value: "{{openshift_logging_fluentd_use_journal|lower}}" + - name: "JOURNAL_SOURCE" + value: "{{fluentd_journal_source | default('')}}" + - name: "JOURNAL_READ_FROM_HEAD" + value: "{{openshift_logging_fluentd_journal_read_from_head|lower}}" + volumes: + - name: runlogjournal + hostPath: + path: /run/log/journal + - name: varlog + hostPath: + path: /var/log + - name: varlibdockercontainers + hostPath: + path: /var/lib/docker/containers + - name: config + configMap: + name: logging-fluentd + - name: certs + secret: + secretName: logging-fluentd + - name: dockerhostname + hostPath: + path: /etc/hostname + - name: localtime + hostPath: + path: /etc/localtime + - name: dockercfg + hostPath: + path: /etc/sysconfig/docker diff --git a/roles/openshift_logging/templates/job.j2 b/roles/openshift_logging/templates/job.j2 new file mode 100644 index 000000000..d7794a407 --- /dev/null +++ b/roles/openshift_logging/templates/job.j2 @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: Pod +metadata: + labels: + logging-infra: support + generateName: jks-cert-gen- +spec: + containers: + - name: jks-cert-gen + image: {{openshift_logging_image_prefix}}logging-deployer:{{openshift_logging_image_version}} + imagePullPolicy: Always + command: ["sh", "generate-jks.sh"] + securityContext: + privileged: true + volumeMounts: + - mountPath: /opt/deploy + name: certmount + env: + - name: PROJECT + value: {{openshift_logging_namespace}} + restartPolicy: Never + serviceAccount: aggregated-logging-fluentd + volumes: + - hostPath: + path: "{{generated_certs_dir}}" + name: certmount diff --git a/roles/openshift_logging/templates/kibana.j2 b/roles/openshift_logging/templates/kibana.j2 new file mode 100644 index 000000000..ca3d727bf --- /dev/null +++ b/roles/openshift_logging/templates/kibana.j2 @@ -0,0 +1,110 @@ +apiVersion: "v1" +kind: "DeploymentConfig" +metadata: + name: "{{deploy_name}}" + labels: + provider: openshift + component: "{{component}}" + logging-infra: "{{logging_component}}" +spec: + replicas: 0 + selector: + provider: openshift + component: "{{component}}" + logging-infra: "{{logging_component}}" + strategy: + rollingParams: + intervalSeconds: 1 + timeoutSeconds: 600 + updatePeriodSeconds: 1 + type: Rolling + template: + metadata: + name: "{{deploy_name}}" + labels: + logging-infra: "{{logging_component}}" + provider: openshift + component: "{{component}}" + spec: + serviceAccountName: aggregated-logging-kibana + containers: + - + name: "kibana" + image: {{image}} + imagePullPolicy: Always +{% if (kibana_memory_limit is defined and kibana_memory_limit is not none) or (kibana_cpu_limit is defined and kibana_cpu_limit is not none) %} + resources: + limits: +{% if kibana_cpu_limit is not none %} + cpu: "{{kibana_cpu_limit}}" +{% endif %} +{% if kibana_memory_limit is not none %} + memory: "{{kibana_memory_limit}}" +{% endif %} +{% endif %} + env: + - name: "ES_HOST" + value: "{{es_host}}" + - name: "ES_PORT" + value: "{{es_port}}" + volumeMounts: + - name: kibana + mountPath: /etc/kibana/keys + readOnly: true + - + name: "kibana-proxy" + image: {{proxy_image}} + imagePullPolicy: Always +{% if (kibana_proxy_memory_limit is defined and kibana_proxy_memory_limit is not none) or (kibana_proxy_cpu_limit is defined and kibana_proxy_cpu_limit is not none) %} + resources: + limits: +{% if kibana_proxy_cpu_limit is not none %} + cpu: "{{kibana_proxy_cpu_limit}}" +{% endif %} +{% if kibana_proxy_memory_limit is not none %} + memory: "{{kibana_proxy_memory_limit}}" +{% endif %} +{% endif %} + ports: + - + name: "oaproxy" + containerPort: 3000 + env: + - + name: "OAP_BACKEND_URL" + value: "http://localhost:5601" + - + name: "OAP_AUTH_MODE" + value: "oauth2" + - + name: "OAP_TRANSFORM" + value: "user_header,token_header" + - + name: "OAP_OAUTH_ID" + value: kibana-proxy + - + name: "OAP_MASTER_URL" + value: {{master_url}} + - + name: "OAP_PUBLIC_MASTER_URL" + value: {{public_master_url}} + - + name: "OAP_LOGOUT_REDIRECT" + value: {{public_master_url}}/console/logout + - + name: "OAP_MASTER_CA_FILE" + value: "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" + - + name: "OAP_DEBUG" + value: "{{openshift_logging_kibana_proxy_debug}}" + volumeMounts: + - name: kibana-proxy + mountPath: /secret + readOnly: true + volumes: + - name: kibana + secret: + secretName: logging-kibana + - name: kibana-proxy + secret: + secretName: logging-kibana-proxy diff --git a/roles/openshift_logging/templates/oauth-client.j2 b/roles/openshift_logging/templates/oauth-client.j2 new file mode 100644 index 000000000..41d3123cb --- /dev/null +++ b/roles/openshift_logging/templates/oauth-client.j2 @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: OAuthClient +metadata: + name: kibana-proxy + labels: + logging-infra: support +secret: {{secret}} +redirectURIs: +- https://{{openshift_logging_kibana_hostname}} +- https://{{openshift_logging_kibana_ops_hostname}} +scopeRestrictions: +- literals: + - user:info + - user:check-access + - user:list-projects diff --git a/roles/openshift_logging/templates/pvc.j2 b/roles/openshift_logging/templates/pvc.j2 new file mode 100644 index 000000000..f19a3a750 --- /dev/null +++ b/roles/openshift_logging/templates/pvc.j2 @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{obj_name}} + labels: + logging-infra: support +{% if annotations is defined %} + annotations: +{% for key,value in annotations.iteritems() %} + {{key}}: {{value}} +{% endfor %} +{% endif %} +spec: +{% if pv_selector is defined and pv_selector is mapping %} + selector: + matchLabels: +{% for key,value in pv_selector.iteritems() %} + {{key}}: {{value}} +{% endfor %} +{% endif %} + accessModes: +{% for mode in access_modes %} + - {{ mode }} +{% endfor %} + resources: + requests: + storage: {{size}} diff --git a/roles/openshift_logging/templates/rolebinding.j2 b/roles/openshift_logging/templates/rolebinding.j2 new file mode 100644 index 000000000..fcd4e87cc --- /dev/null +++ b/roles/openshift_logging/templates/rolebinding.j2 @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: RoleBinding +metadata: + name: {{obj_name}} +roleRef: +{% if roleRef.kind is defined %} + kind: {{ roleRef.kind }} +{% endif %} + name: {{ roleRef.name }} +subjects: +{% for sub in subjects %} + - kind: {{ sub.kind }} + name: {{ sub.name }} +{% endfor %} diff --git a/roles/openshift_logging/templates/route_reencrypt.j2 b/roles/openshift_logging/templates/route_reencrypt.j2 new file mode 100644 index 000000000..8be30a2c4 --- /dev/null +++ b/roles/openshift_logging/templates/route_reencrypt.j2 @@ -0,0 +1,25 @@ +apiVersion: "v1" +kind: "Route" +metadata: + name: "{{obj_name}}" +{% if labels is defined%} + labels: +{% for key, value in labels.iteritems() %} + {{key}}: {{value}} +{% endfor %} +{% endif %} +spec: + host: {{ route_host }} + tls: + caCertificate: | +{% for line in tls_ca_cert.split('\n') %} + {{ line }} +{% endfor %} + destinationCACertificate: | +{% for line in tls_dest_ca_cert.split('\n') %} + {{ line }} +{% endfor %} + termination: reencrypt + to: + kind: Service + name: {{ service_name }} diff --git a/roles/openshift_logging/templates/secret.j2 b/roles/openshift_logging/templates/secret.j2 new file mode 100644 index 000000000..d73bae9c4 --- /dev/null +++ b/roles/openshift_logging/templates/secret.j2 @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Secret +metadata: + name: {{secret_name}} +type: Opaque +data: +{% for s in secrets %} + {{s.key}}: {{s.value | b64encode}} +{% endfor %} diff --git a/roles/openshift_logging/templates/service.j2 b/roles/openshift_logging/templates/service.j2 new file mode 100644 index 000000000..6c4ec0c76 --- /dev/null +++ b/roles/openshift_logging/templates/service.j2 @@ -0,0 +1,28 @@ +apiVersion: "v1" +kind: "Service" +metadata: + name: "{{obj_name}}" +{% if labels is defined%} + labels: +{% for key, value in labels.iteritems() %} + {{key}}: {{value}} +{% endfor %} +{% endif %} +spec: + ports: +{% for port in ports %} + - +{% for key, value in port.iteritems() %} + {{key}}: {{value}} +{% endfor %} +{% if port.targetPort is undefined %} + clusterIP: "None" +{% endif %} +{% endfor %} +{% if service_targetPort is defined %} + targetPort: {{service_targetPort}} +{% endif %} + selector: + {% for key, value in selector.iteritems() %} + {{key}}: {{value}} + {% endfor %} diff --git a/roles/openshift_logging/templates/serviceaccount.j2 b/roles/openshift_logging/templates/serviceaccount.j2 new file mode 100644 index 000000000..b22acc594 --- /dev/null +++ b/roles/openshift_logging/templates/serviceaccount.j2 @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{obj_name}} +{% if labels is defined%} + labels: +{% for key, value in labels.iteritems() %} + {{key}}: {{value}} +{% endfor %} +{% endif %} +{% if secrets is defined %} +secrets: +{% for name in secrets %} +- name: {{ name }} +{% endfor %} +{% endif %} diff --git a/roles/openshift_logging/vars/main.yaml b/roles/openshift_logging/vars/main.yaml new file mode 100644 index 000000000..fb8af11e9 --- /dev/null +++ b/roles/openshift_logging/vars/main.yaml @@ -0,0 +1,40 @@ +tr_or_ohlip: "{{ openshift_hosted_logging_openshift_logging_image_prefix or target_registry or none }}" +ip_kv: "{{ '-p IMAGE_PREFIX=' ~ tr_or_ohlip | quote if tr_or_ohlip is defined else '' }}" +iv_kv: "{{ '-p IMAGE_VERSION=' ~ openshift_hosted_logging_openshift_logging_image_version | quote if openshift_hosted_logging_openshift_logging_image_version is defined else '' }}" +oc_new_app_values: "{{ ip_kv }} {{ iv_kv }}" +openshift_master_config_dir: "{{ openshift.common.config_base }}/master" +kh_cmap_param: "{{ '--from-literal kibana-hostname=' ~ openshift_hosted_logging_hostname | quote if openshift_hosted_logging_hostname is defined else '' }}" +kh_ops_cmap_param: "{{ '--from-literal kibana-ops-hostname=' ~ openshift_hosted_logging_ops_hostname | quote if openshift_hosted_logging_ops_hostname is defined else '' }}" +pmu_cmap_param: "{{ '--from-literal public-master-url=' ~ openshift_hosted_logging_master_public_url | quote if openshift_hosted_logging_master_public_url is defined else '' }}" +es_cs_cmap_param: "{{ '--from-literal es-cluster-size=' ~ openshift_hosted_logging_elasticsearch_cluster_size | string | quote if openshift_hosted_logging_elasticsearch_cluster_size is defined else '' }}" +es_ops_cs_cmap_param: "{{ '--from-literal es-ops-cluster-size=' ~ openshift_hosted_logging_elasticsearch_ops_cluster_size | string | quote if openshift_hosted_logging_elasticsearch_ops_cluster_size is defined else '' }}" +es_ir_cmap_param: "{{ '--from-literal es-instance-ram=' ~ openshift_hosted_logging_elasticsearch_instance_ram | quote if openshift_hosted_logging_elasticsearch_instance_ram is defined else '' }}" +es_ops_ir_cmap_param: "{{ '--from-literal es-ops-instance-ram=' ~ openshift_hosted_logging_elasticsearch_ops_instance_ram | quote if openshift_hosted_logging_elasticsearch_ops_instance_ram is defined else '' }}" +es_pvcs_cmap_param: "{{ '--from-literal es-pvc-size=' ~ openshift_hosted_logging_elasticsearch_pvc_size | quote if openshift_hosted_logging_elasticsearch_pvc_size is defined else '' }}" +es_ops_pvcs_cmap_param: "{{ '--from-literal es-ops-pvc-size=' ~ openshift_hosted_logging_elasticsearch_ops_pvc_size | quote if openshift_hosted_logging_elasticsearch_ops_pvc_size is defined else '' }}" +es_pvcp_cmap_param: "{{ '--from-literal es-pvc-prefix=' ~ openshift_hosted_logging_elasticsearch_pvc_prefix | quote if openshift_hosted_logging_elasticsearch_pvc_prefix is defined else '' }}" +es_ops_pvcp_cmap_param: "{{ '--from-literal es-ops-pvc-prefix=' ~ openshift_hosted_logging_elasticsearch_ops_pvc_prefix | quote if openshift_hosted_logging_elasticsearch_ops_pvc_prefix is defined else '' }}" +es_pvcd_cmap_param: "{{ '--from-literal es-pvc-dynamic=' ~ openshift_hosted_logging_elasticsearch_pvc_dynamic | quote if openshift_hosted_logging_elasticsearch_pvc_dynamic is defined else '' }}" +es_ops_pvcd_cmap_param: "{{ '--from-literal es-ops-pvc-dynamic=' ~ openshift_hosted_logging_elasticsearch_ops_pvc_dynamic | quote if openshift_hosted_logging_elasticsearch_ops_pvc_dynamic is defined else '' }}" +es_sg_cmap_param: "{{ '--from-literal storage-group=' ~ openshift_hosted_logging_elasticsearch_storage_group | string | quote if openshift_hosted_logging_elasticsearch_storage_group is defined else '' }}" +es_ns_cmap_param: "{{ '--from-literal es-nodeselector=' ~ openshift_hosted_logging_elasticsearch_nodeselector | quote if openshift_hosted_logging_elasticsearch_nodeselector is defined else '' }}" +es_ops_ns_cmap_param: "{{ '--from-literal es-ops-nodeselector=' ~ openshift_hosted_logging_elasticsearch_ops_nodeselector | quote if openshift_hosted_logging_elasticsearch_ops_nodeselector is defined else '' }}" +fd_ns_cmap_param: "{{ '--from-literal fluentd-nodeselector=' ~ openshift_hosted_logging_openshift_logging_fluentd_nodeselector | quote if openshift_hosted_logging_openshift_logging_fluentd_nodeselector is defined else 'logging-infra-fluentd=true' }}" +kb_ns_cmap_param: "{{ '--from-literal kibana-nodeselector=' ~ openshift_hosted_logging_kibana_nodeselector | quote if openshift_hosted_logging_kibana_nodeselector is defined else '' }}" +kb_ops_ns_cmap_param: "{{ '--from-literal kibana-ops-nodeselector=' ~ openshift_hosted_logging_kibana_ops_nodeselector | quote if openshift_hosted_logging_kibana_ops_nodeselector is defined else '' }}" +cr_ns_cmap_param: "{{ '--from-literal curator-nodeselector=' ~ openshift_hosted_logging_curator_nodeselector | quote if openshift_hosted_logging_curator_nodeselector is defined else '' }}" +cr_ops_ns_cmap_param: "{{ '--from-literal curator-ops-nodeselector=' ~ openshift_hosted_logging_curator_ops_nodeselector | quote if openshift_hosted_logging_curator_ops_nodeselector is defined else '' }}" +ops_cmap_param: "{{ '--from-literal enable-ops-cluster=' ~ openshift_hosted_logging_enable_ops_cluster | string | lower | quote if openshift_hosted_logging_enable_ops_cluster is defined else '' }}" +use_journal_cmap_param: "{{ '--from-literal use-journal=' ~ openshift_hosted_logging_use_journal | string | lower | quote if openshift_hosted_logging_use_journal is defined else '' }}" +journal_source_cmap_param: "{{ '--from-literal journal-source=' ~ openshift_hosted_logging_journal_source | quote if openshift_hosted_logging_journal_source is defined else '' }}" +openshift_logging_fluentd_journal_read_from_head_cmap_param: "{{ '--from-literal journal-read-from-head=' ~ openshift_hosted_logging_openshift_logging_fluentd_journal_read_from_head | string | lower | quote if openshift_hosted_logging_openshift_logging_fluentd_journal_read_from_head is defined else '' }}" +ips_cmap_param: "{{ '--from-literal image-pull-secret=' ~ openshift_hosted_logging_image_pull_secret | quote if openshift_hosted_logging_image_pull_secret is defined else '' }}" +deployer_cmap_params: "{{ kh_cmap_param }} {{ kh_ops_cmap_param }} {{ pmu_cmap_param }} {{ es_cs_cmap_param }} {{ es_ir_cmap_param }} {{ es_pvcs_cmap_param }} {{ es_pvcp_cmap_param }} {{ es_pvcd_cmap_param }} {{ es_ops_cs_cmap_param }} {{ es_ops_ir_cmap_param }} {{ es_ops_pvcs_cmap_param }} {{ es_ops_pvcp_cmap_param }} {{ es_ops_pvcd_cmap_param }} {{ es_sg_cmap_param }} {{ es_ns_cmap_param }} {{ es_ops_ns_cmap_param }} {{ fd_ns_cmap_param }} {{ kb_ns_cmap_param }} {{ kb_ops_ns_cmap_param }} {{ cr_ns_cmap_param }} {{ cr_ops_ns_cmap_param }} {{ ops_cmap_param }} {{ use_journal_cmap_param }} {{ journal_source_cmap_param }} {{ openshift_logging_fluentd_journal_read_from_head_cmap_param }} {{ ips_cmap_param }}" + +es_node_quorum: "{{openshift_logging_es_cluster_size/2 + 1}}" +es_recover_after_nodes: "{{openshift_logging_es_cluster_size - 1}}" +es_recover_expected_nodes: "{{openshift_logging_es_cluster_size}}" + +es_ops_node_quorum: "{{openshift_logging_es_ops_cluster_size/2 + 1}}" +es_ops_recover_after_nodes: "{{openshift_logging_es_ops_cluster_size - 1}}" +es_ops_recover_expected_nodes: "{{openshift_logging_es_ops_cluster_size}}" -- cgit v1.2.3 From f79c819387b93af7b32a09b60652195f850d0574 Mon Sep 17 00:00:00 2001 From: ewolinetz Date: Wed, 14 Dec 2016 16:34:55 -0600 Subject: Updating to use deployer pod to generate JKS chain instead --- roles/openshift_logging/files/generate-jks.sh | 177 +++++++++++++++++----- roles/openshift_logging/tasks/generate_certs.yaml | 102 ++++++------- roles/openshift_logging/templates/jks_pod.j2 | 28 ++++ roles/openshift_logging/templates/job.j2 | 26 ---- 4 files changed, 214 insertions(+), 119 deletions(-) create mode 100644 roles/openshift_logging/templates/jks_pod.j2 delete mode 100644 roles/openshift_logging/templates/job.j2 (limited to 'roles') diff --git a/roles/openshift_logging/files/generate-jks.sh b/roles/openshift_logging/files/generate-jks.sh index 8760f37fe..db7ed9ab8 100644 --- a/roles/openshift_logging/files/generate-jks.sh +++ b/roles/openshift_logging/files/generate-jks.sh @@ -1,36 +1,140 @@ #! /bin/sh set -ex -function importPKCS() { - dir=${SCRATCH_DIR:-_output} - NODE_NAME=$1 - ks_pass=${KS_PASS:-kspass} - ts_pass=${TS_PASS:-tspass} - rm -rf $NODE_NAME - - keytool \ - -importkeystore \ - -srckeystore $NODE_NAME.pkcs12 \ - -srcstoretype PKCS12 \ - -srcstorepass pass \ - -deststorepass $ks_pass \ - -destkeypass $ks_pass \ - -destkeystore $dir/keystore.jks \ - -alias 1 \ - -destalias $NODE_NAME - - echo "Import back to keystore (including CA chain)" - - keytool \ - -import \ - -file $dir/ca.crt \ - -keystore $dir/keystore.jks \ - -storepass $ks_pass \ - -noprompt -alias sig-ca +function generate_JKS_chain() { + dir=${SCRATCH_DIR:-_output} + ADD_OID=$1 + NODE_NAME=$2 + CERT_NAMES=${3:-$NODE_NAME} + ks_pass=${KS_PASS:-kspass} + ts_pass=${TS_PASS:-tspass} + rm -rf $NODE_NAME + + extension_names="" + for name in ${CERT_NAMES//,/ }; do + extension_names="${extension_names},dns:${name}" + done + + if [ "$ADD_OID" = true ]; then + extension_names="${extension_names},oid:1.2.3.4.5.5" + fi + + echo Generating keystore and certificate for node $NODE_NAME + + keytool -genkey \ + -alias $NODE_NAME \ + -keystore $dir/$NODE_NAME.jks \ + -keypass $ks_pass \ + -storepass $ks_pass \ + -keyalg RSA \ + -keysize 2048 \ + -validity 712 \ + -dname "CN=$NODE_NAME, OU=OpenShift, O=Logging" \ + -ext san=dns:localhost,ip:127.0.0.1"${extension_names}" + + echo Generating certificate signing request for node $NODE_NAME + + keytool -certreq \ + -alias $NODE_NAME \ + -keystore $dir/$NODE_NAME.jks \ + -storepass $ks_pass \ + -file $dir/$NODE_NAME.csr \ + -keyalg rsa \ + -dname "CN=$NODE_NAME, OU=OpenShift, O=Logging" \ + -ext san=dns:localhost,ip:127.0.0.1"${extension_names}" + + echo Sign certificate request with CA + + openssl ca \ + -in $dir/$NODE_NAME.csr \ + -notext \ + -out $dir/$NODE_NAME.crt \ + -config $dir/signing.conf \ + -extensions v3_req \ + -batch \ + -extensions server_ext + + echo "Import back to keystore (including CA chain)" + + keytool \ + -import \ + -file $dir/ca.crt \ + -keystore $dir/$NODE_NAME.jks \ + -storepass $ks_pass \ + -noprompt -alias sig-ca + + keytool \ + -import \ + -file $dir/$NODE_NAME.crt \ + -keystore $dir/$NODE_NAME.jks \ + -storepass $ks_pass \ + -noprompt \ + -alias $NODE_NAME + + echo All done for $NODE_NAME +} - echo All done for $NODE_NAME +function generate_JKS_client_cert() { + NODE_NAME="$1" + ks_pass=${KS_PASS:-kspass} + ts_pass=${TS_PASS:-tspass} + dir=${SCRATCH_DIR:-_output} # for writing files to bundle into secrets + + echo Generating keystore and certificate for node ${NODE_NAME} + + keytool -genkey \ + -alias $NODE_NAME \ + -keystore $dir/$NODE_NAME.jks \ + -keyalg RSA \ + -keysize 2048 \ + -validity 712 \ + -keypass $ks_pass \ + -storepass $ks_pass \ + -dname "CN=$NODE_NAME, OU=OpenShift, O=Logging" + + echo Generating certificate signing request for node $NODE_NAME + + keytool -certreq \ + -alias $NODE_NAME \ + -keystore $dir/$NODE_NAME.jks \ + -file $dir/$NODE_NAME.csr \ + -keyalg rsa \ + -keypass $ks_pass \ + -storepass $ks_pass \ + -dname "CN=$NODE_NAME, OU=OpenShift, O=Logging" + + echo Sign certificate request with CA + openssl ca \ + -in "$dir/$NODE_NAME.csr" \ + -notext \ + -out "$dir/$NODE_NAME.crt" \ + -config $dir/signing.conf \ + -extensions v3_req \ + -batch \ + -extensions server_ext + + echo "Import back to keystore (including CA chain)" + + keytool \ + -import \ + -file $dir/ca.crt \ + -keystore $dir/$NODE_NAME.jks \ + -storepass $ks_pass \ + -noprompt -alias sig-ca + + keytool \ + -import \ + -file $dir/$NODE_NAME.crt \ + -keystore $dir/$NODE_NAME.jks \ + -storepass $ks_pass \ + -noprompt \ + -alias $NODE_NAME + + echo All done for $NODE_NAME } +function join { local IFS="$1"; shift; echo "$*"; } + function createTruststore() { echo "Import CA to truststore for validating client certs" @@ -43,29 +147,22 @@ function createTruststore() { -noprompt -alias sig-ca } -dir="/opt/deploy/" +dir="$CERT_DIR" SCRATCH_DIR=$dir -admin_user='system.admin' - if [[ ! -f $dir/system.admin.jks || -z "$(keytool -list -keystore $dir/system.admin.jks -storepass kspass | grep sig-ca)" ]]; then - importPKCS "system.admin" - mv $dir/keystore.jks $dir/system.admin.jks + generate_JKS_client_cert "system.admin" fi -if [[ ! -f $dir/searchguard_node_key || -z "$(keytool -list -keystore $dir/searchguard_node_key -storepass kspass | grep sig-ca)" ]]; then - importPKCS "elasticsearch" - mv $dir/keystore.jks $dir/searchguard_node_key +if [[ ! -f $dir/elasticsearch.jks || -z "$(keytool -list -keystore $dir/elasticsearch.jks -storepass kspass | grep sig-ca)" ]]; then + generate_JKS_chain true elasticsearch "$(join , logging-es{,-ops})" fi - -if [[ ! -f $dir/system.admin.jks || -z "$(keytool -list -keystore $dir/system.admin.jks -storepass kspass | grep sig-ca)" ]]; then - importPKCS "logging-es" +if [[ ! -f $dir/logging-es.jks || -z "$(keytool -list -keystore $dir/logging-es.jks -storepass kspass | grep sig-ca)" ]]; then + generate_JKS_chain false logging-es "$(join , logging-es{,-ops}{,-cluster}{,.${PROJECT}.svc.cluster.local})" fi [ ! -f $dir/truststore.jks ] && createTruststore -[ ! -f $dir/searchguard_node_truststore ] && cp $dir/truststore.jks $dir/searchguard_node_truststore - # necessary so that the job knows it completed successfully exit 0 diff --git a/roles/openshift_logging/tasks/generate_certs.yaml b/roles/openshift_logging/tasks/generate_certs.yaml index 161d51055..6bfeccf61 100644 --- a/roles/openshift_logging/tasks/generate_certs.yaml +++ b/roles/openshift_logging/tasks/generate_certs.yaml @@ -102,61 +102,57 @@ loop_control: loop_var: node_name -- shell: certs=""; for cert in $(echo logging-es{,-ops}); do certs=$certs,dns:$cert; done; echo $certs - register: elasticsearch_certs - check_mode: no - -- shell: certs=""; for cert in $(echo logging-es{,-ops}{,-cluster}{,.logging.svc.cluster.local}); do certs=$certs,dns:$cert; done; echo $certs - register: logging_es_certs - check_mode: no - -#- shell: index=2; certs=""; for cert in $(echo logging-es{,-ops}); do certs=$certs,DNS.$index=$cert; index=$(($index+1)); done; echo $certs -# register: elasticsearch_certs -# check_mode: no - -#- shell: index=2; certs=""; for cert in $(echo logging-es{,-ops}{,-cluster}{,.logging.svc.cluster.local}); do certs=$certs,DNS.$index=$cert; index=$(($index+1)); done; echo $certs -# register: logging_es_certs -# check_mode: no +- name: Check for jks-generator service account + command: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get serviceaccount/jks-generator --no-headers -n {{openshift_logging_namespace}} + register: serviceaccount_result + ignore_errors: yes + when: not ansible_check_mode -- name: Generate PKCS12 chains -# include: generate_pkcs12.yaml component='system.admin' - include: generate_jks_chain.yaml component='system.admin' +- name: Create jks-generator service account + command: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create serviceaccount jks-generator -n {{openshift_logging_namespace}} + when: not ansible_check_mode and "not found" in serviceaccount_result.stderr + +- name: Check for hostmount-anyuid scc entry + shell: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get scc hostmount-anyuid -o go-template='{{ '{{' }}.users{{ '}}' }}' | + grep system:serviceaccount:{{openshift_logging_namespace}}:jks-generator + register: scc_result + ignore_errors: yes + when: not ansible_check_mode + +- name: Add to hostmount-anyuid scc + command: > + {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig policy add-scc-to-user hostmount-anyuid -z jks-generator -n {{openshift_logging_namespace}} + when: not ansible_check_mode and scc_result.rc == 1 + +- name: Copy jks script + copy: + src: generate-jks.sh + dest: "{{generated_certs_dir}}/generate-jks.sh" + +- name: Generate JKS chains + template: + src: jks_pod.j2 + dest: "{{mktemp.stdout}}/jks_pod.yaml" + +- name: create pod + shell: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{mktemp.stdout}}/jks_pod.yaml -n {{openshift_logging_namespace}} + register: podoutput + +- shell: > + echo {{podoutput.stdout}} | awk -v podname='\\\".*\\\"' '{print $2}' + register: podname + +- shell: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pod {{podname.stdout}} -o go-template='{{ '{{' }}index .status "phase"{{ '}}' }}' -n {{openshift_logging_namespace}} + register: result + until: result.stdout.find("Succeeded") != -1 + retries: 5 + delay: 10 -- name: Generate PKCS12 chains -# include: generate_pkcs12.yaml component={{node.name}} oid={{node.oid | default(False)}} chain_certs={{node.certs}} - include: generate_jks_chain.yaml component={{node.name}} oid={{node.oid | default(False)}} chain_certs={{node.certs}} - with_items: - - {name: 'elasticsearch', oid: True, certs: '{{elasticsearch_certs.stdout}}'} - - {name: 'logging-es', certs: '{{logging_es_certs.stdout}}'} - loop_control: - loop_var: node -# This should be handled within the ES image instead... --- -#- name: Copy jks script -# copy: -# src: generate-jks.sh -# dest: "{{etcd_generated_certs_dir}}/logging" - -#- name: Generate JKS chains -# template: -# src: job.j2 -# dest: "{{mktemp.stdout}}/jks_job.yaml" - -#- name: kick off job -# shell: > -# {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{mktemp.stdout}}/jks_job.yaml -n {{logging_namespace}} -# register: podoutput - -#- shell: > -# echo {{podoutput.stdout}} | awk -v podname='\\\".*\\\"' '{print $2}' -# register: podname - -#- action: shell > -# {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig oc get pod/{{podname.stdout}} -o go-template='{{ '{{' }}index .status "phase"{{ '}}' }}' -n {{logging_namespace}} -# register: result -# until: result.stdout.find("Succeeded") != -1 -# retries: 5 -# delay: 10 -# --- This should be handled within the ES image instead... - name: Generate proxy session shell: tr -dc 'a-zA-Z0-9' < /dev/urandom | head -c 200 register: session_secret diff --git a/roles/openshift_logging/templates/jks_pod.j2 b/roles/openshift_logging/templates/jks_pod.j2 new file mode 100644 index 000000000..8b1c74211 --- /dev/null +++ b/roles/openshift_logging/templates/jks_pod.j2 @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Pod +metadata: + labels: + logging-infra: support + generateName: jks-cert-gen- +spec: + containers: + - name: jks-cert-gen + image: {{openshift_logging_image_prefix}}logging-deployer:{{openshift_logging_image_version}} + imagePullPolicy: Always + command: ["sh", "{{generated_certs_dir}}/generate-jks.sh"] + securityContext: + privileged: true + volumeMounts: + - mountPath: {{generated_certs_dir}} + name: certmount + env: + - name: PROJECT + value: {{openshift_logging_namespace}} + - name: CERT_DIR + value: {{generated_certs_dir}} + restartPolicy: Never + serviceAccount: jks-generator + volumes: + - hostPath: + path: "{{generated_certs_dir}}" + name: certmount diff --git a/roles/openshift_logging/templates/job.j2 b/roles/openshift_logging/templates/job.j2 deleted file mode 100644 index d7794a407..000000000 --- a/roles/openshift_logging/templates/job.j2 +++ /dev/null @@ -1,26 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - labels: - logging-infra: support - generateName: jks-cert-gen- -spec: - containers: - - name: jks-cert-gen - image: {{openshift_logging_image_prefix}}logging-deployer:{{openshift_logging_image_version}} - imagePullPolicy: Always - command: ["sh", "generate-jks.sh"] - securityContext: - privileged: true - volumeMounts: - - mountPath: /opt/deploy - name: certmount - env: - - name: PROJECT - value: {{openshift_logging_namespace}} - restartPolicy: Never - serviceAccount: aggregated-logging-fluentd - volumes: - - hostPath: - path: "{{generated_certs_dir}}" - name: certmount -- cgit v1.2.3 From 97a36d297fa79f0f1e5e4cdee661da345146c3c1 Mon Sep 17 00:00:00 2001 From: Jeff Cantrill Date: Mon, 19 Dec 2016 16:03:06 -0500 Subject: minor updates for code reviews, remove unused params --- roles/openshift_logging/vars/main.yaml | 32 +------------------------------- 1 file changed, 1 insertion(+), 31 deletions(-) (limited to 'roles') diff --git a/roles/openshift_logging/vars/main.yaml b/roles/openshift_logging/vars/main.yaml index fb8af11e9..4725820da 100644 --- a/roles/openshift_logging/vars/main.yaml +++ b/roles/openshift_logging/vars/main.yaml @@ -1,35 +1,5 @@ -tr_or_ohlip: "{{ openshift_hosted_logging_openshift_logging_image_prefix or target_registry or none }}" -ip_kv: "{{ '-p IMAGE_PREFIX=' ~ tr_or_ohlip | quote if tr_or_ohlip is defined else '' }}" -iv_kv: "{{ '-p IMAGE_VERSION=' ~ openshift_hosted_logging_openshift_logging_image_version | quote if openshift_hosted_logging_openshift_logging_image_version is defined else '' }}" -oc_new_app_values: "{{ ip_kv }} {{ iv_kv }}" + openshift_master_config_dir: "{{ openshift.common.config_base }}/master" -kh_cmap_param: "{{ '--from-literal kibana-hostname=' ~ openshift_hosted_logging_hostname | quote if openshift_hosted_logging_hostname is defined else '' }}" -kh_ops_cmap_param: "{{ '--from-literal kibana-ops-hostname=' ~ openshift_hosted_logging_ops_hostname | quote if openshift_hosted_logging_ops_hostname is defined else '' }}" -pmu_cmap_param: "{{ '--from-literal public-master-url=' ~ openshift_hosted_logging_master_public_url | quote if openshift_hosted_logging_master_public_url is defined else '' }}" -es_cs_cmap_param: "{{ '--from-literal es-cluster-size=' ~ openshift_hosted_logging_elasticsearch_cluster_size | string | quote if openshift_hosted_logging_elasticsearch_cluster_size is defined else '' }}" -es_ops_cs_cmap_param: "{{ '--from-literal es-ops-cluster-size=' ~ openshift_hosted_logging_elasticsearch_ops_cluster_size | string | quote if openshift_hosted_logging_elasticsearch_ops_cluster_size is defined else '' }}" -es_ir_cmap_param: "{{ '--from-literal es-instance-ram=' ~ openshift_hosted_logging_elasticsearch_instance_ram | quote if openshift_hosted_logging_elasticsearch_instance_ram is defined else '' }}" -es_ops_ir_cmap_param: "{{ '--from-literal es-ops-instance-ram=' ~ openshift_hosted_logging_elasticsearch_ops_instance_ram | quote if openshift_hosted_logging_elasticsearch_ops_instance_ram is defined else '' }}" -es_pvcs_cmap_param: "{{ '--from-literal es-pvc-size=' ~ openshift_hosted_logging_elasticsearch_pvc_size | quote if openshift_hosted_logging_elasticsearch_pvc_size is defined else '' }}" -es_ops_pvcs_cmap_param: "{{ '--from-literal es-ops-pvc-size=' ~ openshift_hosted_logging_elasticsearch_ops_pvc_size | quote if openshift_hosted_logging_elasticsearch_ops_pvc_size is defined else '' }}" -es_pvcp_cmap_param: "{{ '--from-literal es-pvc-prefix=' ~ openshift_hosted_logging_elasticsearch_pvc_prefix | quote if openshift_hosted_logging_elasticsearch_pvc_prefix is defined else '' }}" -es_ops_pvcp_cmap_param: "{{ '--from-literal es-ops-pvc-prefix=' ~ openshift_hosted_logging_elasticsearch_ops_pvc_prefix | quote if openshift_hosted_logging_elasticsearch_ops_pvc_prefix is defined else '' }}" -es_pvcd_cmap_param: "{{ '--from-literal es-pvc-dynamic=' ~ openshift_hosted_logging_elasticsearch_pvc_dynamic | quote if openshift_hosted_logging_elasticsearch_pvc_dynamic is defined else '' }}" -es_ops_pvcd_cmap_param: "{{ '--from-literal es-ops-pvc-dynamic=' ~ openshift_hosted_logging_elasticsearch_ops_pvc_dynamic | quote if openshift_hosted_logging_elasticsearch_ops_pvc_dynamic is defined else '' }}" -es_sg_cmap_param: "{{ '--from-literal storage-group=' ~ openshift_hosted_logging_elasticsearch_storage_group | string | quote if openshift_hosted_logging_elasticsearch_storage_group is defined else '' }}" -es_ns_cmap_param: "{{ '--from-literal es-nodeselector=' ~ openshift_hosted_logging_elasticsearch_nodeselector | quote if openshift_hosted_logging_elasticsearch_nodeselector is defined else '' }}" -es_ops_ns_cmap_param: "{{ '--from-literal es-ops-nodeselector=' ~ openshift_hosted_logging_elasticsearch_ops_nodeselector | quote if openshift_hosted_logging_elasticsearch_ops_nodeselector is defined else '' }}" -fd_ns_cmap_param: "{{ '--from-literal fluentd-nodeselector=' ~ openshift_hosted_logging_openshift_logging_fluentd_nodeselector | quote if openshift_hosted_logging_openshift_logging_fluentd_nodeselector is defined else 'logging-infra-fluentd=true' }}" -kb_ns_cmap_param: "{{ '--from-literal kibana-nodeselector=' ~ openshift_hosted_logging_kibana_nodeselector | quote if openshift_hosted_logging_kibana_nodeselector is defined else '' }}" -kb_ops_ns_cmap_param: "{{ '--from-literal kibana-ops-nodeselector=' ~ openshift_hosted_logging_kibana_ops_nodeselector | quote if openshift_hosted_logging_kibana_ops_nodeselector is defined else '' }}" -cr_ns_cmap_param: "{{ '--from-literal curator-nodeselector=' ~ openshift_hosted_logging_curator_nodeselector | quote if openshift_hosted_logging_curator_nodeselector is defined else '' }}" -cr_ops_ns_cmap_param: "{{ '--from-literal curator-ops-nodeselector=' ~ openshift_hosted_logging_curator_ops_nodeselector | quote if openshift_hosted_logging_curator_ops_nodeselector is defined else '' }}" -ops_cmap_param: "{{ '--from-literal enable-ops-cluster=' ~ openshift_hosted_logging_enable_ops_cluster | string | lower | quote if openshift_hosted_logging_enable_ops_cluster is defined else '' }}" -use_journal_cmap_param: "{{ '--from-literal use-journal=' ~ openshift_hosted_logging_use_journal | string | lower | quote if openshift_hosted_logging_use_journal is defined else '' }}" -journal_source_cmap_param: "{{ '--from-literal journal-source=' ~ openshift_hosted_logging_journal_source | quote if openshift_hosted_logging_journal_source is defined else '' }}" -openshift_logging_fluentd_journal_read_from_head_cmap_param: "{{ '--from-literal journal-read-from-head=' ~ openshift_hosted_logging_openshift_logging_fluentd_journal_read_from_head | string | lower | quote if openshift_hosted_logging_openshift_logging_fluentd_journal_read_from_head is defined else '' }}" -ips_cmap_param: "{{ '--from-literal image-pull-secret=' ~ openshift_hosted_logging_image_pull_secret | quote if openshift_hosted_logging_image_pull_secret is defined else '' }}" -deployer_cmap_params: "{{ kh_cmap_param }} {{ kh_ops_cmap_param }} {{ pmu_cmap_param }} {{ es_cs_cmap_param }} {{ es_ir_cmap_param }} {{ es_pvcs_cmap_param }} {{ es_pvcp_cmap_param }} {{ es_pvcd_cmap_param }} {{ es_ops_cs_cmap_param }} {{ es_ops_ir_cmap_param }} {{ es_ops_pvcs_cmap_param }} {{ es_ops_pvcp_cmap_param }} {{ es_ops_pvcd_cmap_param }} {{ es_sg_cmap_param }} {{ es_ns_cmap_param }} {{ es_ops_ns_cmap_param }} {{ fd_ns_cmap_param }} {{ kb_ns_cmap_param }} {{ kb_ops_ns_cmap_param }} {{ cr_ns_cmap_param }} {{ cr_ops_ns_cmap_param }} {{ ops_cmap_param }} {{ use_journal_cmap_param }} {{ journal_source_cmap_param }} {{ openshift_logging_fluentd_journal_read_from_head_cmap_param }} {{ ips_cmap_param }}" es_node_quorum: "{{openshift_logging_es_cluster_size/2 + 1}}" es_recover_after_nodes: "{{openshift_logging_es_cluster_size - 1}}" -- cgit v1.2.3 From a8c2999d94548d1c82b75387ef33d2e3f5c67536 Mon Sep 17 00:00:00 2001 From: ewolinetz Date: Wed, 4 Jan 2017 15:09:47 -0600 Subject: Fixing collision of system.admin cert generation --- roles/openshift_logging/files/generate-jks.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'roles') diff --git a/roles/openshift_logging/files/generate-jks.sh b/roles/openshift_logging/files/generate-jks.sh index db7ed9ab8..995ec0b98 100644 --- a/roles/openshift_logging/files/generate-jks.sh +++ b/roles/openshift_logging/files/generate-jks.sh @@ -97,7 +97,7 @@ function generate_JKS_client_cert() { keytool -certreq \ -alias $NODE_NAME \ -keystore $dir/$NODE_NAME.jks \ - -file $dir/$NODE_NAME.csr \ + -file $dir/$NODE_NAME.jks.csr \ -keyalg rsa \ -keypass $ks_pass \ -storepass $ks_pass \ @@ -105,9 +105,9 @@ function generate_JKS_client_cert() { echo Sign certificate request with CA openssl ca \ - -in "$dir/$NODE_NAME.csr" \ + -in "$dir/$NODE_NAME.jks.csr" \ -notext \ - -out "$dir/$NODE_NAME.crt" \ + -out "$dir/$NODE_NAME.jks.crt" \ -config $dir/signing.conf \ -extensions v3_req \ -batch \ @@ -124,7 +124,7 @@ function generate_JKS_client_cert() { keytool \ -import \ - -file $dir/$NODE_NAME.crt \ + -file $dir/$NODE_NAME.jks.crt \ -keystore $dir/$NODE_NAME.jks \ -storepass $ks_pass \ -noprompt \ -- cgit v1.2.3 From 06c111d22641ba5cc2dbbe0144d9d6722d94f159 Mon Sep 17 00:00:00 2001 From: ewolinetz Date: Wed, 11 Jan 2017 15:26:46 -0600 Subject: addressing comments --- roles/openshift_logging/defaults/main.yml | 2 +- roles/openshift_logging/files/signing.conf | 103 ----------- roles/openshift_logging/files/util.sh | 192 --------------------- roles/openshift_logging/filter_plugins/__init__.py | 0 roles/openshift_logging/library/__init.py__ | 0 roles/openshift_logging/meta/main.yaml | 14 +- roles/openshift_logging/tasks/generate_certs.yaml | 48 ++---- .../tasks/generate_configmaps.yaml | 25 ++- .../tasks/generate_jks_chain.yaml | 60 ------- roles/openshift_logging/tasks/generate_pkcs12.yaml | 24 --- roles/openshift_logging/tasks/install_fluentd.yaml | 15 +- roles/openshift_logging/tasks/install_logging.yaml | 6 +- roles/openshift_logging/tasks/label_node.yaml | 8 +- roles/openshift_logging/tasks/main.yaml | 5 + roles/openshift_logging/tasks/scale.yaml | 16 +- roles/openshift_logging/tasks/start_cluster.yaml | 24 +-- roles/openshift_logging/tasks/stop_cluster.yaml | 24 +-- roles/openshift_logging/tasks/upgrade_logging.yaml | 4 +- roles/openshift_logging/templates/fluentd.j2 | 2 +- roles/openshift_logging/templates/signing.conf.j2 | 103 +++++++++++ 20 files changed, 214 insertions(+), 461 deletions(-) delete mode 100644 roles/openshift_logging/files/signing.conf delete mode 100644 roles/openshift_logging/files/util.sh delete mode 100644 roles/openshift_logging/filter_plugins/__init__.py delete mode 100644 roles/openshift_logging/library/__init.py__ delete mode 100644 roles/openshift_logging/tasks/generate_jks_chain.yaml delete mode 100644 roles/openshift_logging/tasks/generate_pkcs12.yaml create mode 100644 roles/openshift_logging/templates/signing.conf.j2 (limited to 'roles') diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml index a441f10b9..4eb852207 100644 --- a/roles/openshift_logging/defaults/main.yml +++ b/roles/openshift_logging/defaults/main.yml @@ -33,7 +33,7 @@ openshift_logging_kibana_ops_proxy_debug: false openshift_logging_kibana_ops_proxy_cpu_limit: null openshift_logging_kibana_ops_proxy_memory_limit: null -openshift_logging_fluentd_nodeselector: '"logging-infra-fluentd": "true"' +openshift_logging_fluentd_nodeselector: {'logging-infra-fluentd': 'true'} openshift_logging_fluentd_cpu_limit: 100m openshift_logging_fluentd_memory_limit: 512Mi openshift_logging_fluentd_es_copy: false diff --git a/roles/openshift_logging/files/signing.conf b/roles/openshift_logging/files/signing.conf deleted file mode 100644 index 810a057d9..000000000 --- a/roles/openshift_logging/files/signing.conf +++ /dev/null @@ -1,103 +0,0 @@ -# Simple Signing CA - -# The [default] section contains global constants that can be referred to from -# the entire configuration file. It may also hold settings pertaining to more -# than one openssl command. - -[ default ] -#dir = _output # Top dir - -# The next part of the configuration file is used by the openssl req command. -# It defines the CA's key pair, its DN, and the desired extensions for the CA -# certificate. - -[ req ] -default_bits = 2048 # RSA key size -encrypt_key = yes # Protect private key -default_md = sha1 # MD to use -utf8 = yes # Input is UTF-8 -string_mask = utf8only # Emit UTF-8 strings -prompt = no # Don't prompt for DN -distinguished_name = ca_dn # DN section -req_extensions = ca_reqext # Desired extensions - -[ ca_dn ] -0.domainComponent = "io" -1.domainComponent = "openshift" -organizationName = "OpenShift Origin" -organizationalUnitName = "Logging Signing CA" -commonName = "Logging Signing CA" - -[ ca_reqext ] -keyUsage = critical,keyCertSign,cRLSign -basicConstraints = critical,CA:true,pathlen:0 -subjectKeyIdentifier = hash - -# The remainder of the configuration file is used by the openssl ca command. -# The CA section defines the locations of CA assets, as well as the policies -# applying to the CA. - -[ ca ] -default_ca = signing_ca # The default CA section - -[ signing_ca ] -certificate = $dir/ca.crt # The CA cert -private_key = $dir/ca.key # CA private key -new_certs_dir = $dir/ # Certificate archive -serial = $dir/ca.serial.txt # Serial number file -crlnumber = $dir/ca.crl.srl # CRL number file -database = $dir/ca.db # Index file -unique_subject = no # Require unique subject -default_days = 730 # How long to certify for -default_md = sha1 # MD to use -policy = any_pol # Default naming policy -email_in_dn = no # Add email to cert DN -preserve = no # Keep passed DN ordering -name_opt = ca_default # Subject DN display options -cert_opt = ca_default # Certificate display options -copy_extensions = copy # Copy extensions from CSR -x509_extensions = client_ext # Default cert extensions -default_crl_days = 7 # How long before next CRL -crl_extensions = crl_ext # CRL extensions - -# Naming policies control which parts of a DN end up in the certificate and -# under what circumstances certification should be denied. - -[ match_pol ] -domainComponent = match # Must match 'simple.org' -organizationName = match # Must match 'Simple Inc' -organizationalUnitName = optional # Included if present -commonName = supplied # Must be present - -[ any_pol ] -domainComponent = optional -countryName = optional -stateOrProvinceName = optional -localityName = optional -organizationName = optional -organizationalUnitName = optional -commonName = optional -emailAddress = optional - -# Certificate extensions define what types of certificates the CA is able to -# create. - -[ client_ext ] -keyUsage = critical,digitalSignature,keyEncipherment -basicConstraints = CA:false -extendedKeyUsage = clientAuth -subjectKeyIdentifier = hash -authorityKeyIdentifier = keyid - -[ server_ext ] -keyUsage = critical,digitalSignature,keyEncipherment -basicConstraints = CA:false -extendedKeyUsage = serverAuth,clientAuth -subjectKeyIdentifier = hash -authorityKeyIdentifier = keyid - -# CRL extensions exist solely to point to the CA certificate that has issued -# the CRL. - -[ crl_ext ] -authorityKeyIdentifier = keyid diff --git a/roles/openshift_logging/files/util.sh b/roles/openshift_logging/files/util.sh deleted file mode 100644 index 5752a0fcd..000000000 --- a/roles/openshift_logging/files/util.sh +++ /dev/null @@ -1,192 +0,0 @@ -#!/bin/bash - -function generate_JKS_chain() { - dir=${SCRATCH_DIR:-_output} - ADD_OID=$1 - NODE_NAME=$2 - CERT_NAMES=${3:-$NODE_NAME} - ks_pass=${KS_PASS:-kspass} - ts_pass=${TS_PASS:-tspass} - rm -rf $NODE_NAME - - extension_names="" - for name in ${CERT_NAMES//,/ }; do - extension_names="${extension_names},dns:${name}" - done - - if [ "$ADD_OID" = true ]; then - extension_names="${extension_names},oid:1.2.3.4.5.5" - fi - - echo Generating keystore and certificate for node $NODE_NAME - - "$keytool" -genkey \ - -alias $NODE_NAME \ - -keystore $dir/keystore.jks \ - -keypass $ks_pass \ - -storepass $ks_pass \ - -keyalg RSA \ - -keysize 2048 \ - -validity 712 \ - -dname "CN=$NODE_NAME, OU=OpenShift, O=Logging" \ - -ext san=dns:localhost,ip:127.0.0.1"${extension_names}" - - echo Generating certificate signing request for node $NODE_NAME - - "$keytool" -certreq \ - -alias $NODE_NAME \ - -keystore $dir/keystore.jks \ - -storepass $ks_pass \ - -file $dir/$NODE_NAME.csr \ - -keyalg rsa \ - -dname "CN=$NODE_NAME, OU=OpenShift, O=Logging" \ - -ext san=dns:localhost,ip:127.0.0.1"${extension_names}" - - echo Sign certificate request with CA - - openssl ca \ - -in $dir/$NODE_NAME.csr \ - -notext \ - -out $dir/$NODE_NAME.crt \ - -config $dir/signing.conf \ - -extensions v3_req \ - -batch \ - -extensions server_ext - - echo "Import back to keystore (including CA chain)" - - "$keytool" \ - -import \ - -file $dir/ca.crt \ - -keystore $dir/keystore.jks \ - -storepass $ks_pass \ - -noprompt -alias sig-ca - - "$keytool" \ - -import \ - -file $dir/$NODE_NAME.crt \ - -keystore $dir/keystore.jks \ - -storepass $ks_pass \ - -noprompt \ - -alias $NODE_NAME - - echo "Import CA to truststore for validating client certs" - - "$keytool" \ - -import \ - -file $dir/ca.crt \ - -keystore $dir/truststore.jks \ - -storepass $ts_pass \ - -noprompt -alias sig-ca - - echo All done for $NODE_NAME -} - -function generate_PEM_cert() { - NODE_NAME="$1" - dir=${SCRATCH_DIR:-_output} # for writing files to bundle into secrets - - echo Generating keystore and certificate for node ${NODE_NAME} - - openssl req -out "$dir/$NODE_NAME.csr" -new -newkey rsa:2048 -keyout "$dir/$NODE_NAME.key" -subj "/CN=$NODE_NAME/OU=OpenShift/O=Logging" -days 712 -nodes - - echo Sign certificate request with CA - openssl ca \ - -in "$dir/$NODE_NAME.csr" \ - -notext \ - -out "$dir/$NODE_NAME.crt" \ - -config $dir/signing.conf \ - -extensions v3_req \ - -batch \ - -extensions server_ext -} - -function generate_JKS_client_cert() { - NODE_NAME="$1" - ks_pass=${KS_PASS:-kspass} - ts_pass=${TS_PASS:-tspass} - dir=${SCRATCH_DIR:-_output} # for writing files to bundle into secrets - - echo Generating keystore and certificate for node ${NODE_NAME} - - "$keytool" -genkey \ - -alias $NODE_NAME \ - -keystore $dir/$NODE_NAME.jks \ - -keyalg RSA \ - -keysize 2048 \ - -validity 712 \ - -keypass $ks_pass \ - -storepass $ks_pass \ - -dname "CN=$NODE_NAME, OU=OpenShift, O=Logging" - - echo Generating certificate signing request for node $NODE_NAME - - "$keytool" -certreq \ - -alias $NODE_NAME \ - -keystore $dir/$NODE_NAME.jks \ - -file $dir/$NODE_NAME.csr \ - -keyalg rsa \ - -keypass $ks_pass \ - -storepass $ks_pass \ - -dname "CN=$NODE_NAME, OU=OpenShift, O=Logging" - - echo Sign certificate request with CA - openssl ca \ - -in "$dir/$NODE_NAME.csr" \ - -notext \ - -out "$dir/$NODE_NAME.crt" \ - -config $dir/signing.conf \ - -extensions v3_req \ - -batch \ - -extensions server_ext - - echo "Import back to keystore (including CA chain)" - - "$keytool" \ - -import \ - -file $dir/ca.crt \ - -keystore $dir/$NODE_NAME.jks \ - -storepass $ks_pass \ - -noprompt -alias sig-ca - - "$keytool" \ - -import \ - -file $dir/$NODE_NAME.crt \ - -keystore $dir/$NODE_NAME.jks \ - -storepass $ks_pass \ - -noprompt \ - -alias $NODE_NAME - - echo All done for $NODE_NAME -} - -function join { local IFS="$1"; shift; echo "$*"; } - -function get_es_dcs() { - oc get dc --selector logging-infra=elasticsearch -o name -} - -function get_curator_dcs() { - oc get dc --selector logging-infra=curator -o name -} - -function extract_nodeselector() { - local inputstring="${1//\"/}" # remove any errant double quotes in the inputs - local selectors=() - - for keyvalstr in ${inputstring//\,/ }; do - - keyval=( ${keyvalstr//=/ } ) - - if [[ -n "${keyval[0]}" && -n "${keyval[1]}" ]]; then - selectors+=( "\"${keyval[0]}\": \"${keyval[1]}\"") - else - echo "Could not make a node selector label from '${keyval[*]}'" - exit 255 - fi - done - - if [[ "${#selectors[*]}" -gt 0 ]]; then - echo nodeSelector: "{" $(join , "${selectors[@]}") "}" - fi -} diff --git a/roles/openshift_logging/filter_plugins/__init__.py b/roles/openshift_logging/filter_plugins/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/roles/openshift_logging/library/__init.py__ b/roles/openshift_logging/library/__init.py__ deleted file mode 100644 index e69de29bb..000000000 diff --git a/roles/openshift_logging/meta/main.yaml b/roles/openshift_logging/meta/main.yaml index 8bff6cfb7..a95c84901 100644 --- a/roles/openshift_logging/meta/main.yaml +++ b/roles/openshift_logging/meta/main.yaml @@ -1,3 +1,15 @@ --- +galaxy_info: + author: OpenShift Red Hat + description: OpenShift Embedded Router + company: Red Hat, Inc. + license: Apache License, Version 2.0 + min_ansible_version: 2.2 + platforms: + - name: EL + versions: + - 7 + categories: + - cloud dependencies: - - { role: openshift_facts } + - role: openshift_facts diff --git a/roles/openshift_logging/tasks/generate_certs.yaml b/roles/openshift_logging/tasks/generate_certs.yaml index 6bfeccf61..bcf4881bb 100644 --- a/roles/openshift_logging/tasks/generate_certs.yaml +++ b/roles/openshift_logging/tasks/generate_certs.yaml @@ -31,14 +31,10 @@ register: signing_conf_file check_mode: no -- block: - - copy: src=signing.conf dest={{generated_certs_dir}}/signing.conf - check_mode: no - - - lineinfile: "dest={{generated_certs_dir}}/signing.conf regexp='# Top dir$' line='dir = {{generated_certs_dir}} # Top dir'" - check_mode: no - when: - - not signing_conf_file.stat.exists +- template: src=signing.conf.j2 dest={{generated_certs_dir}}/signing.conf + vars: + - top_dir: '{{generated_certs_dir}}' + when: not signing_conf_file.stat.exists - include: procure_server_certs.yaml loop_control: @@ -49,19 +45,6 @@ - procure_component: kibana-internal hostnames: "kibana, kibana-ops, {{openshift_logging_kibana_hostname}}, {{openshift_logging_kibana_ops_hostname}}" -# - include: procure_server_certs.yaml -# vars: -# - procure_component: kibana - -# - include: procure_server_certs.yaml -# vars: -# - procure_component: kibana-ops - -# - include: procure_server_certs.yaml -# vars: -# - procure_component: kibana-internal -# - hostnames: "kibana, kibana-ops, {{openshift_logging_kibana_hostname}}, {{openshift_logging_kibana_ops_hostname}}" - - name: Copy proxy TLS configuration file copy: src=server-tls.json dest={{generated_certs_dir}}/server-tls.json when: server_tls_json is undefined @@ -116,8 +99,8 @@ - name: Check for hostmount-anyuid scc entry shell: > - {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get scc hostmount-anyuid -o go-template='{{ '{{' }}.users{{ '}}' }}' | - grep system:serviceaccount:{{openshift_logging_namespace}}:jks-generator + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get scc hostmount-anyuid -o jsonpath='{.users}' | + grep system:serviceaccount:{{openshift_logging_namespace | quote}}:jks-generator register: scc_result ignore_errors: yes when: not ansible_check_mode @@ -131,34 +114,33 @@ copy: src: generate-jks.sh dest: "{{generated_certs_dir}}/generate-jks.sh" + check_mode: no - name: Generate JKS chains template: src: jks_pod.j2 dest: "{{mktemp.stdout}}/jks_pod.yaml" + check_mode: no - name: create pod - shell: > - {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{mktemp.stdout}}/jks_pod.yaml -n {{openshift_logging_namespace}} + command: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{mktemp.stdout}}/jks_pod.yaml -n {{openshift_logging_namespace}} -o name register: podoutput + check_mode: no -- shell: > - echo {{podoutput.stdout}} | awk -v podname='\\\".*\\\"' '{print $2}' - register: podname - -- shell: > - {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pod {{podname.stdout}} -o go-template='{{ '{{' }}index .status "phase"{{ '}}' }}' -n {{openshift_logging_namespace}} +- command: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get {{podoutput.stdout}} -o jsonpath='{.status.phase}' -n {{openshift_logging_namespace}} register: result until: result.stdout.find("Succeeded") != -1 retries: 5 delay: 10 - name: Generate proxy session - shell: tr -dc 'a-zA-Z0-9' < /dev/urandom | head -c 200 + command: echo {{'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'|random_word(200)}} register: session_secret check_mode: no - name: Generate oauth client secret - shell: tr -dc 'a-zA-Z0-9' < /dev/urandom | head -c 64 + command: echo {{'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'|random_word(64)}} register: oauth_secret check_mode: no diff --git a/roles/openshift_logging/tasks/generate_configmaps.yaml b/roles/openshift_logging/tasks/generate_configmaps.yaml index 86882a5da..f9f9ee79f 100644 --- a/roles/openshift_logging/tasks/generate_configmaps.yaml +++ b/roles/openshift_logging/tasks/generate_configmaps.yaml @@ -4,37 +4,44 @@ src: elasticsearch-logging.yml dest: "{{mktemp.stdout}}/elasticsearch-logging.yml" when: es_logging_contents is undefined + changed_when: no - copy: src: elasticsearch.yml dest: "{{mktemp.stdout}}/elasticsearch.yml" when: es_config_contents is undefined + changed_when: no - lineinfile: dest: "{{mktemp.stdout}}/elasticsearch.yml" regexp: '^openshift\.operations\.allow_cluster_reader(.)*$' line: "\nopenshift.operations.allow_cluster_reader: {{openshift_logging_es_ops_allow_cluster_reader | lower}}" when: es_config_contents is undefined + changed_when: no - copy: content: "{{es_logging_contents}}" dest: "{{mktemp.stdout}}/elasticsearch-logging.yml" when: es_logging_contents is defined + changed_when: no - copy: content: "{{es_config_contents}}" dest: "{{mktemp.stdout}}/elasticsearch.yml" when: es_config_contents is defined + changed_when: no - - shell: > + - command: > {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-elasticsearch --from-file=logging.yml={{mktemp.stdout}}/elasticsearch-logging.yml --from-file=elasticsearch.yml={{mktemp.stdout}}/elasticsearch.yml -o yaml --dry-run register: es_configmap + changed_when: no - copy: content: "{{es_configmap.stdout}}" dest: "{{mktemp.stdout}}/templates/logging-elasticsearch-configmap.yaml" when: es_configmap.stdout is defined + changed_when: no check_mode: no - block: @@ -42,21 +49,25 @@ src: curator.yml dest: "{{mktemp.stdout}}/curator.yml" when: curator_config_contents is undefined + changed_when: no - copy: content: "{{curator_config_contents}}" dest: "{{mktemp.stdout}}/curator.yml" when: curator_config_contenets is defined + changed_when: no - - shell: > + - command: > {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-curator --from-file=config.yaml={{mktemp.stdout}}/curator.yml -o yaml --dry-run register: curator_configmap + changed_when: no - copy: content: "{{curator_configmap.stdout}}" dest: "{{mktemp.stdout}}/templates/logging-curator-configmap.yaml" when: curator_configmap.stdout is defined + changed_when: no check_mode: no - block: @@ -64,40 +75,48 @@ src: fluent.conf dest: "{{mktemp.stdout}}/fluent.conf" when: fluentd_config_contents is undefined + changed_when: no - copy: src: fluentd-throttle-config.yaml dest: "{{mktemp.stdout}}/fluentd-throttle-config.yaml" when: fluentd_throttle_contents is undefined + changed_when: no - copy: src: secure-forward.conf dest: "{{mktemp.stdout}}/secure-forward.conf" when: fluentd_securefoward_contents is undefined + changed_when: no - copy: content: "{{fluentd_config_contents}}" dest: "{{mktemp.stdout}}/fluent.conf" when: fluentd_config_contents is defined + changed_when: no - copy: content: "{{fluentd_throttle_contents}}" dest: "{{mktemp.stdout}}/fluentd-throttle-config.yaml" when: fluentd_throttle_contents is defined + changed_when: no - copy: content: "{{fluentd_secureforward_contents}}" dest: "{{mktemp.stdout}}/secure-forward.conf" when: fluentd_secureforward_contents is defined + changed_when: no - - shell: > + - command: > {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-fluentd --from-file=fluent.conf={{mktemp.stdout}}/fluent.conf --from-file=throttle-config.yaml={{mktemp.stdout}}/fluentd-throttle-config.yaml --from-file=secure-forward.conf={{mktemp.stdout}}/secure-forward.conf -o yaml --dry-run register: fluentd_configmap + changed_when: no - copy: content: "{{fluentd_configmap.stdout}}" dest: "{{mktemp.stdout}}/templates/logging-fluentd-configmap.yaml" when: fluentd_configmap.stdout is defined + changed_when: no check_mode: no diff --git a/roles/openshift_logging/tasks/generate_jks_chain.yaml b/roles/openshift_logging/tasks/generate_jks_chain.yaml deleted file mode 100644 index 14ffdc51f..000000000 --- a/roles/openshift_logging/tasks/generate_jks_chain.yaml +++ /dev/null @@ -1,60 +0,0 @@ ---- -- debug: msg="certs are {{chain_certs}} and oid is {{oid}}" - when: chain_certs is defined and oid is defined - -- debug: msg="certs are {{chain_certs}}" - when: chain_certs is defined and oid is undefined - -- name: Build extensions with certs - shell: echo "{{chain_certs}}{{ (oid) | ternary(',oid:1.2.3.4.5.5','') }}" - register: cert_ext - when: chain_certs is defined and oid is defined - check_mode: no - -- debug: msg="extensions are {{cert_ext.stdout}}" - when: cert_ext.stdout is defined - -- shell: > - echo {{ (cert_ext.stdout is defined) | ternary( '-ext san=dns:localhost,ip:127.0.0.1','') }}{{ (cert_ext.stdout is defined) | ternary( cert_ext.stdout, '') }} - register: extensions - check_mode: no - -- name: Checking for {{component}}.jks ... - stat: path="{{generated_certs_dir}}/{{component}}.jks" - register: jks_file - check_mode: no - -- name: Checking for truststore... - stat: path="{{generated_certs_dir}}/truststore.jks" - register: jks_truststore - check_mode: no - -- block: - - shell: > - keytool -genkey -alias {{component}} -keystore {{generated_certs_dir}}/{{component}}.jks -keypass kspass -storepass kspass - -keyalg RSA -keysize 2048 -validity 712 -dname "CN={{component}}, OU=OpenShift, O=Logging" {{extensions.stdout}} - - - shell: > - keytool -certreq -alias {{component}} -keystore {{generated_certs_dir}}/{{component}}.jks -storepass kspass - -file {{generated_certs_dir}}/{{component}}-jks.csr -keyalg RSA -dname "CN={{component}}, OU=OpenShift, O=Logging" {{extensions.stdout}} - - - shell: > - openssl ca -in {{generated_certs_dir}}/{{component}}-jks.csr -notext -out {{generated_certs_dir}}/{{component}}-jks.crt - -config {{generated_certs_dir}}/signing.conf -extensions v3_req -batch -extensions server_ext - - - shell: > - keytool -import -file {{generated_certs_dir}}/ca.crt -keystore {{generated_certs_dir}}/{{component}}.jks - -storepass kspass -noprompt -alias sig-ca - - - shell: > - keytool -import -file {{generated_certs_dir}}/{{component}}-jks.crt -keystore {{generated_certs_dir}}/{{component}}.jks - -storepass kspass -noprompt -alias {{component}} - - when: not jks_file.stat.exists - check_mode: no - -- block: - - shell: > - keytool -import -file {{generated_certs_dir}}/ca.crt -keystore {{generated_certs_dir}}/truststore.jks -storepass tspass -noprompt -alias sig-ca - when: not jks_truststore.stat.exists - check_mode: no diff --git a/roles/openshift_logging/tasks/generate_pkcs12.yaml b/roles/openshift_logging/tasks/generate_pkcs12.yaml deleted file mode 100644 index dde65746f..000000000 --- a/roles/openshift_logging/tasks/generate_pkcs12.yaml +++ /dev/null @@ -1,24 +0,0 @@ ---- -- debug: msg="certs are {{chain_certs}} and oid is {{oid}}" - when: chain_certs is defined and oid is defined - -- debug: msg="certs are {{chain_certs}}" - when: chain_certs is defined and oid is undefined - -- name: Build extensions with certs - shell: echo "{{chain_certs}}{{ (oid) | ternary(',oid=1.2.3.4.5.5','') }}" - register: cert_ext - when: chain_certs is defined and oid is defined - -- debug: msg="extensions are {{cert_ext.stdout}}" - when: cert_ext.stdout is defined - -- include: generate_pems.yaml - -- local_action: stat path="{{mktemp.stdout}}/{{component}}.pkcs12" - register: pkcs_file - become: no - -- name: Generating pkcs12 chain for {{component}} - command: openssl pkcs12 -export -out {{generated_certs_dir}}/{{component}}.pkcs12 -inkey {{generated_certs_dir}}/{{component}}.key -in {{generated_certs_dir}}/{{component}}.crt -password pass:pass - when: not pkcs_file.stat.exists diff --git a/roles/openshift_logging/tasks/install_fluentd.yaml b/roles/openshift_logging/tasks/install_fluentd.yaml index 35bd452ed..6f93081d7 100644 --- a/roles/openshift_logging/tasks/install_fluentd.yaml +++ b/roles/openshift_logging/tasks/install_fluentd.yaml @@ -1,14 +1,23 @@ --- -- shell: > +- command: > echo "{{ (openshift_logging_use_ops) | ternary(openshift_logging_es_ops_host, openshift_logging_es_host) }}" register: fluentd_ops_host check_mode: no -- shell: > +- command: > echo "{{ (openshift_logging_use_ops) | ternary(openshift_logging_es_ops_port, openshift_logging_es_port) }}" register: fluentd_ops_port check_mode: no +- command: > + echo "{{openshift_logging_fluentd_nodeselector.keys()[0]}}" + register: openshift_logging_fluentd_nodeselector_key + check_mode: no + +- command: > + echo "{{openshift_logging_fluentd_nodeselector.values()[0]}}" + register: openshift_logging_fluentd_nodeselector_value + check_mode: no - name: Generating Fluentd daemonset template: src=fluentd.j2 dest={{mktemp.stdout}}/templates/logging-fluentd.yaml @@ -19,6 +28,8 @@ daemonset_serviceAccount: aggregated-logging-fluentd ops_host: "{{ fluentd_ops_host.stdout }}" ops_port: "{{ fluentd_ops_port.stdout }}" + fluentd_nodeselector_key: "{{openshift_logging_fluentd_nodeselector_key.stdout}}" + fluentd_nodeselector_value: "{{openshift_logging_fluentd_nodeselector_value.stdout}}" check_mode: no - name: "Set permissions for fluentd" diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml index 591f11476..09630e213 100644 --- a/roles/openshift_logging/tasks/install_logging.yaml +++ b/roles/openshift_logging/tasks/install_logging.yaml @@ -24,14 +24,14 @@ loop_var: install_component - name: Register API objects from generated templates - shell: ls -d -1 {{mktemp.stdout}}/templates/* | sort + command: ls -1 {{mktemp.stdout}}/templates/ register: logging_objects check_mode: no - name: Creating API objects from generated templates command: > - {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig apply -f {{file}} -n {{openshift_logging_namespace}} - with_items: "{{logging_objects.stdout_lines}}" + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig apply -f {{mktemp.stdout}}/templates/{{file}} -n {{openshift_logging_namespace}} + with_items: "{{logging_objects.stdout_lines | sort}}" loop_control: loop_var: file when: not ansible_check_mode diff --git a/roles/openshift_logging/tasks/label_node.yaml b/roles/openshift_logging/tasks/label_node.yaml index 55cfea38c..f35ccc3b6 100644 --- a/roles/openshift_logging/tasks/label_node.yaml +++ b/roles/openshift_logging/tasks/label_node.yaml @@ -1,12 +1,12 @@ --- -- shell: > +- command: > {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get node {{host}} - --template='{{ '{{index .metadata.labels "' }}{{label}}{{ '"}}' }}' + -o jsonpath='{.metadata.labels.{{ label }}}' register: label_value failed_when: label_value.rc == 1 and 'exists' not in label_value.stderr when: not ansible_check_mode -- shell: > +- command: > {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig label node {{host}} {{label}}={{value}} --overwrite register: label_result failed_when: label_result.rc == 1 and 'exists' not in label_result.stderr @@ -17,7 +17,7 @@ - unlabel is not defined or not unlabel - not ansible_check_mode -- shell: > +- command: > {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig label node {{host}} {{label}}- register: label_result failed_when: label_result.rc == 1 and 'exists' not in label_result.stderr diff --git a/roles/openshift_logging/tasks/main.yaml b/roles/openshift_logging/tasks/main.yaml index b64c24ade..c4ec1b255 100644 --- a/roles/openshift_logging/tasks/main.yaml +++ b/roles/openshift_logging/tasks/main.yaml @@ -1,4 +1,9 @@ --- +- fail: + msg: Only one Fluentd nodeselector key pair should be provided + when: "{{ openshift_logging_fluentd_nodeselector.keys() | count }} > 1" + + - name: Create temp directory for doing work in command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX register: mktemp diff --git a/roles/openshift_logging/tasks/scale.yaml b/roles/openshift_logging/tasks/scale.yaml index 3d86ea171..aa3e39641 100644 --- a/roles/openshift_logging/tasks/scale.yaml +++ b/roles/openshift_logging/tasks/scale.yaml @@ -1,26 +1,26 @@ --- -- shell: > +- command: > {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get {{object}} - --template='{{ '{{.spec.replicas}}' }}' -n {{openshift_logging_namespace}} + -o jsonpath='{.spec.replicas}' -n {{openshift_logging_namespace}} register: replica_count failed_when: replica_count.rc == 1 and 'exists' not in replica_count.stderr when: not ansible_check_mode -- shell: > +- command: > {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig scale {{object}} --replicas={{desired}} -n {{openshift_logging_namespace}} register: scale_result failed_when: scale_result.rc == 1 and 'exists' not in scale_result.stderr when: - - replica_count.stdout != desired - not ansible_check_mode + - replica_count.stdout|int != desired -- shell: > - {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig describe {{object}} -n {{openshift_logging_namespace}} | awk -v statusrx='Pods Status:' '$0 ~ statusrx {print $3}' +- command: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get {{object}} -n {{openshift_logging_namespace}} -o jsonpath='{.status.replicas}' register: replica_counts - until: replica_counts.stdout.find("{{desired}}") != -1 + until: replica_counts.stdout|int == desired retries: 30 delay: 10 when: - - replica_count.stdout != desired - not ansible_check_mode + - replica_count.stdout|int != desired diff --git a/roles/openshift_logging/tasks/start_cluster.yaml b/roles/openshift_logging/tasks/start_cluster.yaml index cdfc5f2d3..090ca8359 100644 --- a/roles/openshift_logging/tasks/start_cluster.yaml +++ b/roles/openshift_logging/tasks/start_cluster.yaml @@ -1,16 +1,16 @@ --- -- shell: > - echo "{{openshift_logging_fluentd_nodeselector}}" | cut -d':' -f1 +- command: > + echo "{{openshift_logging_fluentd_nodeselector.keys()[0]}}" register: openshift_logging_fluentd_nodeselector_key check_mode: no -- shell: > - echo "{{openshift_logging_fluentd_nodeselector}}" | cut -d' ' -f2 +- command: > + echo "{{openshift_logging_fluentd_nodeselector.values()[0]}}" register: openshift_logging_fluentd_nodeselector_value check_mode: no -- shell: > - {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get node -o name | sed "s,^node/,,g" +- command: > + {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get node -o jsonpath='{.items[*].metadata.name}' register: fluentd_hosts when: "'--all' in openshift_logging_fluentd_hosts" check_mode: no @@ -25,7 +25,7 @@ loop_control: loop_var: fluentd_host -- shell: > +- command: > {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es -o name -n {{openshift_logging_namespace}} register: es_dc check_mode: no @@ -38,7 +38,7 @@ loop_control: loop_var: object -- shell: > +- command: > {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=kibana -o name -n {{openshift_logging_namespace}} register: kibana_dc check_mode: no @@ -51,7 +51,7 @@ loop_control: loop_var: object -- shell: > +- command: > {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=curator -o name -n {{openshift_logging_namespace}} register: curator_dc check_mode: no @@ -64,7 +64,7 @@ loop_control: loop_var: object -- shell: > +- command: > {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es-ops -o name -n {{openshift_logging_namespace}} register: es_dc check_mode: no @@ -78,7 +78,7 @@ loop_var: object when: openshift_logging_use_ops -- shell: > +- command: > {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=kibana-ops -o name -n {{openshift_logging_namespace}} register: kibana_dc check_mode: no @@ -92,7 +92,7 @@ loop_var: object when: openshift_logging_use_ops -- shell: > +- command: > {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=curator-ops -o name -n {{openshift_logging_namespace}} register: curator_dc check_mode: no diff --git a/roles/openshift_logging/tasks/stop_cluster.yaml b/roles/openshift_logging/tasks/stop_cluster.yaml index e018d0618..dd3693f7e 100644 --- a/roles/openshift_logging/tasks/stop_cluster.yaml +++ b/roles/openshift_logging/tasks/stop_cluster.yaml @@ -1,14 +1,14 @@ --- -- shell: > - echo "{{openshift_logging_fluentd_nodeselector}}" | cut -d':' -f1 +- command: > + echo "{{openshift_logging_fluentd_nodeselector.keys()[0]}}" register: openshift_logging_fluentd_nodeselector_key -- shell: > - echo "{{openshift_logging_fluentd_nodeselector}}" | cut -d' ' -f2 +- command: > + echo "{{openshift_logging_fluentd_nodeselector.values()[0]}}" register: openshift_logging_fluentd_nodeselector_value -- shell: > - {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get node -o name | sed "s,^node/,,g" +- command: > + {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get node -o jsonpath='{.items[*].metadata.name}' register: fluentd_hosts when: "'--all' in openshift_logging_fluentd_hosts" @@ -22,7 +22,7 @@ loop_control: loop_var: fluentd_host -- shell: > +- command: > {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es -o name -n {{openshift_logging_namespace}} register: es_dc @@ -34,7 +34,7 @@ loop_control: loop_var: object -- shell: > +- command: > {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=kibana -o name -n {{openshift_logging_namespace}} register: kibana_dc @@ -46,7 +46,7 @@ loop_control: loop_var: object -- shell: > +- command: > {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=curator -o name -n {{openshift_logging_namespace}} register: curator_dc @@ -58,7 +58,7 @@ loop_control: loop_var: object -- shell: > +- command: > {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es-ops -o name -n {{openshift_logging_namespace}} register: es_dc @@ -71,7 +71,7 @@ loop_var: object when: openshift_logging_use_ops -- shell: > +- command: > {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=kibana-ops -o name -n {{openshift_logging_namespace}} register: kibana_dc @@ -84,7 +84,7 @@ loop_var: object when: openshift_logging_use_ops -- shell: > +- command: > {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=curator-ops -o name -n {{openshift_logging_namespace}} register: curator_dc diff --git a/roles/openshift_logging/tasks/upgrade_logging.yaml b/roles/openshift_logging/tasks/upgrade_logging.yaml index b2c8022d5..9b285a5fe 100644 --- a/roles/openshift_logging/tasks/upgrade_logging.yaml +++ b/roles/openshift_logging/tasks/upgrade_logging.yaml @@ -8,7 +8,7 @@ start_cluster: False # ensure that ES is running -- shell: > +- command: > {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es -o name -n {{openshift_logging_namespace}} register: es_dc check_mode: no @@ -26,7 +26,7 @@ dest: {{mktemp.stdout}}/es_migration.sh - name: Run upgrade scripts - shell: > + command: > sh {{mktemp.stdout}}/es_migration.sh {{openshift.common.config_base}}/logging/ca.crt {{openshift.common.config_base}}/logging/system.admin.key {{openshift.common.config_base}}/logging/system.admin.crt {{openshift_logging_es_host}} {{openshift_logging_es_port}} {{openshift_logging_namespace}} - name: Start up rest of cluster diff --git a/roles/openshift_logging/templates/fluentd.j2 b/roles/openshift_logging/templates/fluentd.j2 index a09b582a2..b6c91f8ed 100644 --- a/roles/openshift_logging/templates/fluentd.j2 +++ b/roles/openshift_logging/templates/fluentd.j2 @@ -25,7 +25,7 @@ spec: spec: serviceAccountName: "{{daemonset_serviceAccount}}" nodeSelector: - {{openshift_logging_fluentd_nodeselector}} + {{fluentd_nodeselector_key}}: "{{fluentd_nodeselector_value}}" containers: - name: "{{daemonset_container_name}}" image: "{{openshift_logging_image_prefix}}{{daemonset_name}}:{{openshift_logging_image_version}}" diff --git a/roles/openshift_logging/templates/signing.conf.j2 b/roles/openshift_logging/templates/signing.conf.j2 new file mode 100644 index 000000000..727cde4c9 --- /dev/null +++ b/roles/openshift_logging/templates/signing.conf.j2 @@ -0,0 +1,103 @@ +# Simple Signing CA + +# The [default] section contains global constants that can be referred to from +# the entire configuration file. It may also hold settings pertaining to more +# than one openssl command. + +[ default ] +dir = {{top_dir}} # Top dir + +# The next part of the configuration file is used by the openssl req command. +# It defines the CA's key pair, its DN, and the desired extensions for the CA +# certificate. + +[ req ] +default_bits = 2048 # RSA key size +encrypt_key = yes # Protect private key +default_md = sha1 # MD to use +utf8 = yes # Input is UTF-8 +string_mask = utf8only # Emit UTF-8 strings +prompt = no # Don't prompt for DN +distinguished_name = ca_dn # DN section +req_extensions = ca_reqext # Desired extensions + +[ ca_dn ] +0.domainComponent = "io" +1.domainComponent = "openshift" +organizationName = "OpenShift Origin" +organizationalUnitName = "Logging Signing CA" +commonName = "Logging Signing CA" + +[ ca_reqext ] +keyUsage = critical,keyCertSign,cRLSign +basicConstraints = critical,CA:true,pathlen:0 +subjectKeyIdentifier = hash + +# The remainder of the configuration file is used by the openssl ca command. +# The CA section defines the locations of CA assets, as well as the policies +# applying to the CA. + +[ ca ] +default_ca = signing_ca # The default CA section + +[ signing_ca ] +certificate = $dir/ca.crt # The CA cert +private_key = $dir/ca.key # CA private key +new_certs_dir = $dir/ # Certificate archive +serial = $dir/ca.serial.txt # Serial number file +crlnumber = $dir/ca.crl.srl # CRL number file +database = $dir/ca.db # Index file +unique_subject = no # Require unique subject +default_days = 730 # How long to certify for +default_md = sha1 # MD to use +policy = any_pol # Default naming policy +email_in_dn = no # Add email to cert DN +preserve = no # Keep passed DN ordering +name_opt = ca_default # Subject DN display options +cert_opt = ca_default # Certificate display options +copy_extensions = copy # Copy extensions from CSR +x509_extensions = client_ext # Default cert extensions +default_crl_days = 7 # How long before next CRL +crl_extensions = crl_ext # CRL extensions + +# Naming policies control which parts of a DN end up in the certificate and +# under what circumstances certification should be denied. + +[ match_pol ] +domainComponent = match # Must match 'simple.org' +organizationName = match # Must match 'Simple Inc' +organizationalUnitName = optional # Included if present +commonName = supplied # Must be present + +[ any_pol ] +domainComponent = optional +countryName = optional +stateOrProvinceName = optional +localityName = optional +organizationName = optional +organizationalUnitName = optional +commonName = optional +emailAddress = optional + +# Certificate extensions define what types of certificates the CA is able to +# create. + +[ client_ext ] +keyUsage = critical,digitalSignature,keyEncipherment +basicConstraints = CA:false +extendedKeyUsage = clientAuth +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid + +[ server_ext ] +keyUsage = critical,digitalSignature,keyEncipherment +basicConstraints = CA:false +extendedKeyUsage = serverAuth,clientAuth +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid + +# CRL extensions exist solely to point to the CA certificate that has issued +# the CRL. + +[ crl_ext ] +authorityKeyIdentifier = keyid -- cgit v1.2.3 From 55ddb4f4b901632f051251ba0387a107dd3bb7ef Mon Sep 17 00:00:00 2001 From: ewolinetz Date: Thu, 12 Jan 2017 12:52:41 -0600 Subject: Removing shell module calls and cleaning up changed --- roles/openshift_logging/tasks/generate_certs.yaml | 49 ++++++++++++++++------ .../tasks/generate_clusterrolebindings.yaml | 1 + .../tasks/generate_clusterroles.yaml | 1 + .../tasks/generate_deploymentconfigs.yaml | 6 +++ roles/openshift_logging/tasks/generate_pvcs.yaml | 2 + .../tasks/generate_rolebindings.yaml | 1 + roles/openshift_logging/tasks/generate_routes.yaml | 1 + .../openshift_logging/tasks/generate_secrets.yaml | 18 ++++---- .../tasks/generate_serviceaccounts.yaml | 1 + .../openshift_logging/tasks/generate_services.yaml | 6 +++ roles/openshift_logging/tasks/install_curator.yaml | 20 +++++++++ .../tasks/install_elasticsearch.yaml | 2 + roles/openshift_logging/tasks/install_fluentd.yaml | 49 ++++++++++++---------- roles/openshift_logging/tasks/install_kibana.yaml | 23 ++++++++++ roles/openshift_logging/tasks/install_logging.yaml | 1 + roles/openshift_logging/tasks/install_support.yaml | 6 ++- roles/openshift_logging/tasks/label_node.yaml | 1 + .../tasks/procure_server_certs.yaml | 6 +-- roles/openshift_logging/tasks/scale.yaml | 2 + roles/openshift_logging/tasks/start_cluster.yaml | 21 ++++------ roles/openshift_logging/tasks/stop_cluster.yaml | 17 ++++---- roles/openshift_logging/templates/curator.j2 | 2 +- roles/openshift_logging/templates/es.j2 | 2 +- roles/openshift_logging/templates/kibana.j2 | 2 +- 24 files changed, 169 insertions(+), 71 deletions(-) (limited to 'roles') diff --git a/roles/openshift_logging/tasks/generate_certs.yaml b/roles/openshift_logging/tasks/generate_certs.yaml index bcf4881bb..5e6498ad7 100644 --- a/roles/openshift_logging/tasks/generate_certs.yaml +++ b/roles/openshift_logging/tasks/generate_certs.yaml @@ -91,6 +91,7 @@ register: serviceaccount_result ignore_errors: yes when: not ansible_check_mode + changed_when: no - name: Create jks-generator service account command: > @@ -98,35 +99,59 @@ when: not ansible_check_mode and "not found" in serviceaccount_result.stderr - name: Check for hostmount-anyuid scc entry - shell: > - {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get scc hostmount-anyuid -o jsonpath='{.users}' | - grep system:serviceaccount:{{openshift_logging_namespace | quote}}:jks-generator + command: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get scc hostmount-anyuid -o jsonpath='{.users}' register: scc_result - ignore_errors: yes when: not ansible_check_mode + changed_when: no - name: Add to hostmount-anyuid scc command: > {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig policy add-scc-to-user hostmount-anyuid -z jks-generator -n {{openshift_logging_namespace}} - when: not ansible_check_mode and scc_result.rc == 1 + when: + - not ansible_check_mode + - scc_result.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:jks-generator") == -1 -- name: Copy jks script +- name: Copy JKS generation script copy: src: generate-jks.sh dest: "{{generated_certs_dir}}/generate-jks.sh" check_mode: no -- name: Generate JKS chains +- name: Generate JKS pod template template: src: jks_pod.j2 dest: "{{mktemp.stdout}}/jks_pod.yaml" check_mode: no + changed_when: no + +# check if pod generated files exist -- if they all do don't run the pod +- name: Checking for elasticsearch.jks + stat: path="{{generated_certs_dir}}/elasticsearch.jks" + register: elasticsearch_jks + check_mode: no + +- name: Checking for logging-es.jks + stat: path="{{generated_certs_dir}}/logging-es.jks" + register: logging_es_jks + check_mode: no + +- name: Checking for system.admin.jks + stat: path="{{generated_certs_dir}}/system.admin.jks" + register: system_admin_jks + check_mode: no + +- name: Checking for truststore.jks + stat: path="{{generated_certs_dir}}/truststore.jks" + register: truststore_jks + check_mode: no -- name: create pod +- name: create JKS generation pod command: > {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{mktemp.stdout}}/jks_pod.yaml -n {{openshift_logging_namespace}} -o name register: podoutput check_mode: no + when: not elasticsearch_jks.stat.exists or not logging_es_jks.stat.exists or not system_admin_jks.stat.exists or not truststore_jks.stat.exists - command: > {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get {{podoutput.stdout}} -o jsonpath='{.status.phase}' -n {{openshift_logging_namespace}} @@ -134,13 +159,13 @@ until: result.stdout.find("Succeeded") != -1 retries: 5 delay: 10 + changed_when: no + when: not elasticsearch_jks.stat.exists or not logging_es_jks.stat.exists or not system_admin_jks.stat.exists or not truststore_jks.stat.exists - name: Generate proxy session - command: echo {{'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'|random_word(200)}} - register: session_secret + set_fact: session_secret={{'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'|random_word(200)}} check_mode: no - name: Generate oauth client secret - command: echo {{'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'|random_word(64)}} - register: oauth_secret + set_fact: oauth_secret={{'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'|random_word(64)}} check_mode: no diff --git a/roles/openshift_logging/tasks/generate_clusterrolebindings.yaml b/roles/openshift_logging/tasks/generate_clusterrolebindings.yaml index ffd5f1e00..56f590717 100644 --- a/roles/openshift_logging/tasks/generate_clusterrolebindings.yaml +++ b/roles/openshift_logging/tasks/generate_clusterrolebindings.yaml @@ -10,3 +10,4 @@ name: "{{acct_name}}" namespace: "{{openshift_logging_namespace}}" check_mode: no + changed_when: no diff --git a/roles/openshift_logging/tasks/generate_clusterroles.yaml b/roles/openshift_logging/tasks/generate_clusterroles.yaml index 8b0ef377a..0b8b1014c 100644 --- a/roles/openshift_logging/tasks/generate_clusterroles.yaml +++ b/roles/openshift_logging/tasks/generate_clusterroles.yaml @@ -8,3 +8,4 @@ verbs: - get check_mode: no + changed_when: no diff --git a/roles/openshift_logging/tasks/generate_deploymentconfigs.yaml b/roles/openshift_logging/tasks/generate_deploymentconfigs.yaml index 151cafd9d..8aea4e81f 100644 --- a/roles/openshift_logging/tasks/generate_deploymentconfigs.yaml +++ b/roles/openshift_logging/tasks/generate_deploymentconfigs.yaml @@ -9,6 +9,7 @@ es_host: logging-es es_port: "{{openshift_logging_es_port}}" check_mode: no + changed_when: no - name: Generate OPS kibana deploymentconfig template: src=kibana.j2 dest={{mktemp.stdout}}/logging-kibana-ops-dc.yaml @@ -20,6 +21,7 @@ es_host: logging-es-ops es_port: "{{openshift_logging_es_ops_port}}" check_mode: no + changed_when: no - name: Generate elasticsearch deploymentconfig template: src=es.j2 dest={{mktemp.stdout}}/logging-es-dc.yaml @@ -30,6 +32,7 @@ image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}" es_cluster_name: "{{component}}" check_mode: no + changed_when: no - name: Generate OPS elasticsearch deploymentconfig template: src=es.j2 dest={{mktemp.stdout}}/logging-es-ops-dc.yaml @@ -40,6 +43,7 @@ image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}" es_cluster_name: "{{component}}" check_mode: no + changed_when: no - name: Generate curator deploymentconfig template: src=curator.j2 dest={{mktemp.stdout}}/logging-curator-dc.yaml @@ -48,6 +52,7 @@ deploy_name: "logging-{{component}}" image: "{{openshift_logging_image_prefix}}logging-curator:{{openshift_logging_image_version}}" check_mode: no + changed_when: no - name: Generate OPS curator deploymentconfig template: src=curator.j2 dest={{mktemp.stdout}}/logging-curator-ops-dc.yaml @@ -57,3 +62,4 @@ image: "{{openshift_logging_image_prefix}}logging-curator:{{openshift_logging_image_version}}" openshift_logging_es_host: logging-es-ops check_mode: no + changed_when: no diff --git a/roles/openshift_logging/tasks/generate_pvcs.yaml b/roles/openshift_logging/tasks/generate_pvcs.yaml index ee4416bbd..601ec9e83 100644 --- a/roles/openshift_logging/tasks/generate_pvcs.yaml +++ b/roles/openshift_logging/tasks/generate_pvcs.yaml @@ -26,6 +26,7 @@ - not openshift_logging_es_pvc_dynamic - es_pvc_pool is defined check_mode: no + changed_when: no - name: Generating PersistentVolumeClaims - Dynamic template: src=pvc.j2 dest={{mktemp.stdout}}/templates/logging-{{obj_name}}-pvc.yaml @@ -45,3 +46,4 @@ - openshift_logging_es_pvc_dynamic - es_pvc_pool is defined check_mode: no + changed_when: no diff --git a/roles/openshift_logging/tasks/generate_rolebindings.yaml b/roles/openshift_logging/tasks/generate_rolebindings.yaml index 02f81368d..7dc9530df 100644 --- a/roles/openshift_logging/tasks/generate_rolebindings.yaml +++ b/roles/openshift_logging/tasks/generate_rolebindings.yaml @@ -9,3 +9,4 @@ - kind: ServiceAccount name: aggregated-logging-elasticsearch check_mode: no + changed_when: no diff --git a/roles/openshift_logging/tasks/generate_routes.yaml b/roles/openshift_logging/tasks/generate_routes.yaml index d280ac04c..25877ebff 100644 --- a/roles/openshift_logging/tasks/generate_routes.yaml +++ b/roles/openshift_logging/tasks/generate_routes.yaml @@ -18,3 +18,4 @@ loop_control: loop_var: route_info when: (route_info.name == 'logging-kibana-ops' and openshift_logging_use_ops) or route_info.name == 'logging-kibana' + changed_when: no diff --git a/roles/openshift_logging/tasks/generate_secrets.yaml b/roles/openshift_logging/tasks/generate_secrets.yaml index e20b88c0f..c4a70114d 100644 --- a/roles/openshift_logging/tasks/generate_secrets.yaml +++ b/roles/openshift_logging/tasks/generate_secrets.yaml @@ -9,10 +9,10 @@ - { name: "curator_key", file: "system.logging.curator.key"} - { name: "curator_cert", file: "system.logging.curator.crt"} - { name: "fluentd_key", file: "system.logging.fluentd.key"} - - { name: "fluentd_cert", file: "system.logging.fluentd.crt"} - - { name: "kibana_internal_key", file: "kibana-internal.key"} - - { name: "kibana_internal_cert", file: "kibana-internal.crt"} - - { name: "server_tls", file: "server-tls.json"} + - { name: "fluentd_cert", file: "system.logging.fluentd.crt"} + - { name: "kibana_internal_key", file: "kibana-internal.key"} + - { name: "kibana_internal_cert", file: "kibana-internal.crt"} + - { name: "server_tls", file: "server-tls.json"} - name: Generating secrets for logging components template: src=secret.j2 dest={{mktemp.stdout}}/templates/{{secret_name}}-secret.yaml @@ -34,14 +34,15 @@ when: secret_name not in openshift_logging_facts.{{component}}.secrets or secret_keys | difference(openshift_logging_facts.{{component}}.secrets["{{secret_name}}"]["keys"]) | length != 0 check_mode: no + changed_when: no - name: Generating secrets for kibana proxy template: src=secret.j2 dest={{mktemp.stdout}}/templates/{{secret_name}}-secret.yaml vars: secret_name: logging-kibana-proxy - secrets: - - {key: oauth-secret, value: "{{oauth_secret.stdout}}"} - - {key: session-secret, value: "{{session_secret.stdout}}"} + secrets: + - {key: oauth-secret, value: "{{oauth_secret}}"} + - {key: session-secret, value: "{{session_secret}}"} - {key: server-key, value: "{{kibana_key_file}}"} - {key: server-cert, value: "{{kibana_cert_file}}"} - {key: server-tls, value: "{{server_tls_file}}"} @@ -52,6 +53,7 @@ when: secret_name not in openshift_logging_facts.kibana.secrets or secret_keys | difference(openshift_logging_facts.kibana.secrets["{{secret_name}}"]["keys"]) | length != 0 check_mode: no + changed_when: no - name: Generating secrets for elasticsearch command: > @@ -67,7 +69,9 @@ when: secret_name not in openshift_logging_facts.elasticsearch.secrets or secret_keys | difference(openshift_logging_facts.elasticsearch.secrets["{{secret_name}}"]["keys"]) | length != 0 check_mode: no + changed_when: no - copy: content="{{logging_es_secret.stdout}}" dest={{mktemp.stdout}}/templates/logging-elasticsearch-secret.yaml when: logging_es_secret.stdout is defined check_mode: no + changed_when: no diff --git a/roles/openshift_logging/tasks/generate_serviceaccounts.yaml b/roles/openshift_logging/tasks/generate_serviceaccounts.yaml index 7b956e2e0..21bcdfecb 100644 --- a/roles/openshift_logging/tasks/generate_serviceaccounts.yaml +++ b/roles/openshift_logging/tasks/generate_serviceaccounts.yaml @@ -11,3 +11,4 @@ loop_control: loop_var: component check_mode: no + changed_when: no diff --git a/roles/openshift_logging/tasks/generate_services.yaml b/roles/openshift_logging/tasks/generate_services.yaml index 95f113577..8eaac76c4 100644 --- a/roles/openshift_logging/tasks/generate_services.yaml +++ b/roles/openshift_logging/tasks/generate_services.yaml @@ -11,6 +11,7 @@ provider: openshift component: es check_mode: no + changed_when: no - name: Generating logging-es-cluster service template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-es-cluster-svc.yaml @@ -24,6 +25,7 @@ provider: openshift component: es check_mode: no + changed_when: no - name: Generating logging-kibana service template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-kibana-svc.yaml @@ -37,6 +39,7 @@ provider: openshift component: kibana check_mode: no + changed_when: no - name: Generating logging-es-ops service template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-es-ops-svc.yaml @@ -51,6 +54,7 @@ component: es-ops when: openshift_logging_use_ops check_mode: no + changed_when: no - name: Generating logging-es-ops-cluster service template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-es-ops-cluster-svc.yaml @@ -65,6 +69,7 @@ component: es-ops when: openshift_logging_use_ops check_mode: no + changed_when: no - name: Generating logging-kibana-ops service template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-kibana-ops-svc.yaml @@ -79,3 +84,4 @@ component: kibana-ops when: openshift_logging_use_ops check_mode: no + changed_when: no diff --git a/roles/openshift_logging/tasks/install_curator.yaml b/roles/openshift_logging/tasks/install_curator.yaml index 165a9d14e..5f3063380 100644 --- a/roles/openshift_logging/tasks/install_curator.yaml +++ b/roles/openshift_logging/tasks/install_curator.yaml @@ -1,4 +1,20 @@ --- +- command: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc/logging-curator + -o jsonpath='{.spec.replicas}' -n {{openshift_logging_namespace}} + register: curator_replica_count + when: not ansible_check_mode + ignore_errors: yes + changed_when: no + +- command: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc/logging-curator-ops + -o jsonpath='{.spec.replicas}' -n {{openshift_logging_namespace}} + register: curator_ops_replica_count + when: not ansible_check_mode + ignore_errors: yes + changed_when: no + - name: Generate curator deploymentconfig template: src=curator.j2 dest={{mktemp.stdout}}/templates/logging-curator-dc.yaml vars: @@ -10,7 +26,9 @@ es_port: "{{openshift_logging_es_port}}" curator_cpu_limit: "{{openshift_logging_curator_cpu_limit }}" curator_memory_limit: "{{openshift_logging_curator_memory_limit }}" + replicas: "{{curator_replica_count.stdout | default (1)}}" check_mode: no + changed_when: no - name: Generate OPS curator deploymentconfig template: src=curator.j2 dest={{mktemp.stdout}}/templates/logging-curator-ops-dc.yaml @@ -23,5 +41,7 @@ es_port: "{{openshift_logging_es_ops_port}}" curator_cpu_limit: "{{openshift_logging_curator_ops_cpu_limit }}" curator_memory_limit: "{{openshift_logging_curator_ops_memory_limit }}" + replicas: "{{curator_ops_replica_count.stdout | default (1)}}" when: openshift_logging_use_ops check_mode: no + changed_when: no diff --git a/roles/openshift_logging/tasks/install_elasticsearch.yaml b/roles/openshift_logging/tasks/install_elasticsearch.yaml index c5d8d3537..b1f8855c4 100644 --- a/roles/openshift_logging/tasks/install_elasticsearch.yaml +++ b/roles/openshift_logging/tasks/install_elasticsearch.yaml @@ -38,6 +38,7 @@ check_mode: no when: - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length < openshift_logging_es_cluster_size }}" + changed_when: no # --------- Tasks for Operation clusters --------- @@ -103,3 +104,4 @@ - openshift_logging_use_ops - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | length < openshift_logging_es_ops_cluster_size }}" check_mode: no + changed_when: no diff --git a/roles/openshift_logging/tasks/install_fluentd.yaml b/roles/openshift_logging/tasks/install_fluentd.yaml index 6f93081d7..4c510c6e7 100644 --- a/roles/openshift_logging/tasks/install_fluentd.yaml +++ b/roles/openshift_logging/tasks/install_fluentd.yaml @@ -1,22 +1,8 @@ --- -- command: > - echo "{{ (openshift_logging_use_ops) | ternary(openshift_logging_es_ops_host, openshift_logging_es_host) }}" - register: fluentd_ops_host +- set_fact: fluentd_ops_host={{ (openshift_logging_use_ops) | ternary(openshift_logging_es_ops_host, openshift_logging_es_host) }} check_mode: no -- command: > - echo "{{ (openshift_logging_use_ops) | ternary(openshift_logging_es_ops_port, openshift_logging_es_port) }}" - register: fluentd_ops_port - check_mode: no - -- command: > - echo "{{openshift_logging_fluentd_nodeselector.keys()[0]}}" - register: openshift_logging_fluentd_nodeselector_key - check_mode: no - -- command: > - echo "{{openshift_logging_fluentd_nodeselector.values()[0]}}" - register: openshift_logging_fluentd_nodeselector_value +- set_fact: fluentd_ops_port={{ (openshift_logging_use_ops) | ternary(openshift_logging_es_ops_port, openshift_logging_es_port) }} check_mode: no - name: Generating Fluentd daemonset @@ -26,24 +12,43 @@ daemonset_component: fluentd daemonset_container_name: fluentd-elasticsearch daemonset_serviceAccount: aggregated-logging-fluentd - ops_host: "{{ fluentd_ops_host.stdout }}" - ops_port: "{{ fluentd_ops_port.stdout }}" - fluentd_nodeselector_key: "{{openshift_logging_fluentd_nodeselector_key.stdout}}" - fluentd_nodeselector_value: "{{openshift_logging_fluentd_nodeselector_value.stdout}}" + ops_host: "{{ fluentd_ops_host }}" + ops_port: "{{ fluentd_ops_port }}" + fluentd_nodeselector_key: "{{openshift_logging_fluentd_nodeselector.keys()[0]}}" + fluentd_nodeselector_value: "{{openshift_logging_fluentd_nodeselector.values()[0]}}" + check_mode: no + changed_when: no + +- name: "Check fluentd privileged permissions" + command: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig + get scc/privileged -o jsonpath='{.users}' + register: fluentd_privileged check_mode: no + changed_when: no -- name: "Set permissions for fluentd" +- name: "Set privileged permissions for fluentd" command: > {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy add-scc-to-user privileged system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd register: fluentd_output failed_when: "fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr" check_mode: no + when: fluentd_privileged.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd") == -1 + +- name: "Check fluentd cluster-reader permissions" + command: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig + get clusterrolebinding/cluster-readers -o jsonpath='{.userNames}' + register: fluentd_cluster_reader + check_mode: no + changed_when: no -- name: "Set additional permissions for fluentd" +- name: "Set cluster-reader permissions for fluentd" command: > {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy add-cluster-role-to-user cluster-reader system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd register: fluentd2_output failed_when: "fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr" check_mode: no + when: fluentd_cluster_reader.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd") == -1 diff --git a/roles/openshift_logging/tasks/install_kibana.yaml b/roles/openshift_logging/tasks/install_kibana.yaml index 382ab2522..f4ce85f97 100644 --- a/roles/openshift_logging/tasks/install_kibana.yaml +++ b/roles/openshift_logging/tasks/install_kibana.yaml @@ -1,4 +1,23 @@ --- +- command: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc/logging-kibana + -o jsonpath='{.spec.replicas}' -n {{openshift_logging_namespace}} + register: kibana_replica_count + when: not ansible_check_mode + ignore_errors: yes + changed_when: no + +- command: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc/logging-kibana-ops + -o jsonpath='{.spec.replicas}' -n {{openshift_logging_namespace}} + register: kibana_ops_replica_count + when: + - not ansible_check_mode + - openshift_logging_use_ops + ignore_errors: yes + changed_when: no + + - name: Generate kibana deploymentconfig template: src=kibana.j2 dest={{mktemp.stdout}}/templates/logging-kibana-dc.yaml vars: @@ -13,7 +32,9 @@ kibana_memory_limit: "{{openshift_logging_kibana_memory_limit }}" kibana_proxy_cpu_limit: "{{openshift_logging_kibana_proxy_cpu_limit }}" kibana_proxy_memory_limit: "{{openshift_logging_kibana_proxy_memory_limit }}" + replicas: "{{kibana_replica_count.stdout | default (0)}}" check_mode: no + changed_when: no - name: Generate OPS kibana deploymentconfig template: src=kibana.j2 dest={{mktemp.stdout}}/templates/logging-kibana-ops-dc.yaml @@ -29,5 +50,7 @@ kibana_memory_limit: "{{openshift_logging_kibana_ops_memory_limit }}" kibana_proxy_cpu_limit: "{{openshift_logging_kibana_ops_proxy_cpu_limit }}" kibana_proxy_memory_limit: "{{openshift_logging_kibana_ops_proxy_memory_limit }}" + replicas: "{{kibana_ops_replica_count.stdout | default (0)}}" when: openshift_logging_use_ops check_mode: no + changed_when: no diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml index 09630e213..6a11baeb9 100644 --- a/roles/openshift_logging/tasks/install_logging.yaml +++ b/roles/openshift_logging/tasks/install_logging.yaml @@ -27,6 +27,7 @@ command: ls -1 {{mktemp.stdout}}/templates/ register: logging_objects check_mode: no + changed_when: no - name: Creating API objects from generated templates command: > diff --git a/roles/openshift_logging/tasks/install_support.yaml b/roles/openshift_logging/tasks/install_support.yaml index 71979a7d8..da0bbb627 100644 --- a/roles/openshift_logging/tasks/install_support.yaml +++ b/roles/openshift_logging/tasks/install_support.yaml @@ -6,6 +6,7 @@ register: logging_project_result ignore_errors: yes when: not ansible_check_mode + changed_when: no - name: "Create logging project" command: > @@ -37,9 +38,10 @@ - name: Generate kibana-proxy oauth client template: src=oauth-client.j2 dest={{mktemp.stdout}}/templates/oauth-client.yaml vars: - secret: "{{oauth_secret.stdout}}" - when: oauth_secret.stdout is defined + secret: "{{oauth_secret}}" + when: oauth_secret is defined check_mode: no + changed_when: no - include: generate_clusterroles.yaml diff --git a/roles/openshift_logging/tasks/label_node.yaml b/roles/openshift_logging/tasks/label_node.yaml index f35ccc3b6..6a97fece2 100644 --- a/roles/openshift_logging/tasks/label_node.yaml +++ b/roles/openshift_logging/tasks/label_node.yaml @@ -5,6 +5,7 @@ register: label_value failed_when: label_value.rc == 1 and 'exists' not in label_value.stderr when: not ansible_check_mode + changed_when: no - command: > {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig label node {{host}} {{label}}={{value}} --overwrite diff --git a/roles/openshift_logging/tasks/procure_server_certs.yaml b/roles/openshift_logging/tasks/procure_server_certs.yaml index 2c046d6e6..44dd5e894 100644 --- a/roles/openshift_logging/tasks/procure_server_certs.yaml +++ b/roles/openshift_logging/tasks/procure_server_certs.yaml @@ -10,14 +10,12 @@ check_mode: no - name: Trying to discover server cert variable name for {{ cert_info.procure_component }} - command: echo "{{ lookup('env', '{{cert_info.procure_component}}' + '_crt') }}" - register: procure_component_crt + set_fact: procure_component_crt={{ lookup('env', '{{cert_info.procure_component}}' + '_crt') }} when: cert_info.hostnames is undefined and {{ cert_info.procure_component }}_crt is defined and {{ cert_info.procure_component }}_key is defined check_mode: no - name: Trying to discover the server key variable name for {{ cert_info.procure_component }} - command: echo "{{ lookup('env', '{{cert_info.procure_component}}' + '_key') }}" - register: procure_component_key + set_fact: procure_component_key={{ lookup('env', '{{cert_info.procure_component}}' + '_key') }} when: cert_info.hostnames is undefined and {{ cert_info.procure_component }}_crt is defined and {{ cert_info.procure_component }}_key is defined check_mode: no diff --git a/roles/openshift_logging/tasks/scale.yaml b/roles/openshift_logging/tasks/scale.yaml index aa3e39641..42e9f0eb6 100644 --- a/roles/openshift_logging/tasks/scale.yaml +++ b/roles/openshift_logging/tasks/scale.yaml @@ -5,6 +5,7 @@ register: replica_count failed_when: replica_count.rc == 1 and 'exists' not in replica_count.stderr when: not ansible_check_mode + changed_when: no - command: > {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig scale {{object}} @@ -24,3 +25,4 @@ when: - not ansible_check_mode - replica_count.stdout|int != desired + changed_when: no diff --git a/roles/openshift_logging/tasks/start_cluster.yaml b/roles/openshift_logging/tasks/start_cluster.yaml index 090ca8359..4ce6d1aa7 100644 --- a/roles/openshift_logging/tasks/start_cluster.yaml +++ b/roles/openshift_logging/tasks/start_cluster.yaml @@ -1,26 +1,17 @@ --- -- command: > - echo "{{openshift_logging_fluentd_nodeselector.keys()[0]}}" - register: openshift_logging_fluentd_nodeselector_key - check_mode: no - -- command: > - echo "{{openshift_logging_fluentd_nodeselector.values()[0]}}" - register: openshift_logging_fluentd_nodeselector_value - check_mode: no - - command: > {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get node -o jsonpath='{.items[*].metadata.name}' register: fluentd_hosts when: "'--all' in openshift_logging_fluentd_hosts" check_mode: no + changed_when: no - name: start fluentd include: label_node.yaml vars: host: "{{fluentd_host}}" - label: "{{openshift_logging_fluentd_nodeselector_key.stdout}}" - value: "{{openshift_logging_fluentd_nodeselector_value.stdout}}" + label: "{{openshift_logging_fluentd_nodeselector.keys()[0]}}" + value: "{{openshift_logging_fluentd_nodeselector.values()[0]}}" with_items: "{{(fluentd_hosts.stdout_lines is defined) | ternary(fluentd_hosts.stdout_lines, openshift_logging_fluentd_hosts)}}" loop_control: loop_var: fluentd_host @@ -29,6 +20,7 @@ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es -o name -n {{openshift_logging_namespace}} register: es_dc check_mode: no + changed_when: no - name: start elasticsearch include: scale.yaml @@ -42,6 +34,7 @@ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=kibana -o name -n {{openshift_logging_namespace}} register: kibana_dc check_mode: no + changed_when: no - name: start kibana include: scale.yaml @@ -55,6 +48,7 @@ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=curator -o name -n {{openshift_logging_namespace}} register: curator_dc check_mode: no + changed_when: no - name: start curator include: scale.yaml @@ -68,6 +62,7 @@ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es-ops -o name -n {{openshift_logging_namespace}} register: es_dc check_mode: no + changed_when: no - name: start elasticsearch-ops include: scale.yaml @@ -82,6 +77,7 @@ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=kibana-ops -o name -n {{openshift_logging_namespace}} register: kibana_dc check_mode: no + changed_when: no - name: start kibana-ops include: scale.yaml @@ -96,6 +92,7 @@ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=curator-ops -o name -n {{openshift_logging_namespace}} register: curator_dc check_mode: no + changed_when: no - name: start curator-ops include: scale.yaml diff --git a/roles/openshift_logging/tasks/stop_cluster.yaml b/roles/openshift_logging/tasks/stop_cluster.yaml index dd3693f7e..e44493e4d 100644 --- a/roles/openshift_logging/tasks/stop_cluster.yaml +++ b/roles/openshift_logging/tasks/stop_cluster.yaml @@ -1,22 +1,15 @@ --- -- command: > - echo "{{openshift_logging_fluentd_nodeselector.keys()[0]}}" - register: openshift_logging_fluentd_nodeselector_key - -- command: > - echo "{{openshift_logging_fluentd_nodeselector.values()[0]}}" - register: openshift_logging_fluentd_nodeselector_value - - command: > {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get node -o jsonpath='{.items[*].metadata.name}' register: fluentd_hosts when: "'--all' in openshift_logging_fluentd_hosts" + changed_when: no - name: stop fluentd include: label_node.yaml vars: host: "{{fluentd_host}}" - label: "{{openshift_logging_fluentd_nodeselector_key.stdout}}" + label: "{{openshift_logging_fluentd_nodeselector.keys()[0]}}" unlabel: True with_items: "{{(fluentd_hosts.stdout_lines is defined) | ternary(fluentd_hosts.stdout_lines, openshift_logging_fluentd_hosts)}}" loop_control: @@ -25,6 +18,7 @@ - command: > {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es -o name -n {{openshift_logging_namespace}} register: es_dc + changed_when: no - name: stop elasticsearch include: scale.yaml @@ -37,6 +31,7 @@ - command: > {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=kibana -o name -n {{openshift_logging_namespace}} register: kibana_dc + changed_when: no - name: stop kibana include: scale.yaml @@ -49,6 +44,7 @@ - command: > {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=curator -o name -n {{openshift_logging_namespace}} register: curator_dc + changed_when: no - name: stop curator include: scale.yaml @@ -61,6 +57,7 @@ - command: > {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es-ops -o name -n {{openshift_logging_namespace}} register: es_dc + changed_when: no - name: stop elasticsearch-ops include: scale.yaml @@ -74,6 +71,7 @@ - command: > {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=kibana-ops -o name -n {{openshift_logging_namespace}} register: kibana_dc + changed_when: no - name: stop kibana-ops include: scale.yaml @@ -87,6 +85,7 @@ - command: > {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=curator-ops -o name -n {{openshift_logging_namespace}} register: curator_dc + changed_when: no - name: stop curator-ops include: scale.yaml diff --git a/roles/openshift_logging/templates/curator.j2 b/roles/openshift_logging/templates/curator.j2 index 3ffb48bfb..d3b5d33a2 100644 --- a/roles/openshift_logging/templates/curator.j2 +++ b/roles/openshift_logging/templates/curator.j2 @@ -7,7 +7,7 @@ metadata: component: "{{component}}" logging-infra: "{{logging_component}}" spec: - replicas: 0 + replicas: {{replicas|default(0)}} selector: provider: openshift component: "{{component}}" diff --git a/roles/openshift_logging/templates/es.j2 b/roles/openshift_logging/templates/es.j2 index e5d415f81..291589690 100644 --- a/roles/openshift_logging/templates/es.j2 +++ b/roles/openshift_logging/templates/es.j2 @@ -8,7 +8,7 @@ metadata: deployment: "{{deploy_name}}" logging-infra: "{{logging_component}}" spec: - replicas: 0 + replicas: {{replicas|default(0)}} selector: provider: openshift component: "{{component}}" diff --git a/roles/openshift_logging/templates/kibana.j2 b/roles/openshift_logging/templates/kibana.j2 index ca3d727bf..1ec97701a 100644 --- a/roles/openshift_logging/templates/kibana.j2 +++ b/roles/openshift_logging/templates/kibana.j2 @@ -7,7 +7,7 @@ metadata: component: "{{component}}" logging-infra: "{{logging_component}}" spec: - replicas: 0 + replicas: {{replicas|default(0)}} selector: provider: openshift component: "{{component}}" -- cgit v1.2.3 From 8db66f0929fecb26b3c4e71fe0797f20df13007a Mon Sep 17 00:00:00 2001 From: ewolinetz Date: Fri, 13 Jan 2017 15:04:57 -0600 Subject: Using oc_apply task for idempotent --- roles/openshift_logging/tasks/generate_certs.yaml | 46 ++++++++++++++++++++++ roles/openshift_logging/tasks/install_curator.yaml | 4 +- roles/openshift_logging/tasks/install_logging.yaml | 19 +++++---- roles/openshift_logging/tasks/oc_apply.yaml | 29 ++++++++++++++ 4 files changed, 87 insertions(+), 11 deletions(-) create mode 100644 roles/openshift_logging/tasks/oc_apply.yaml (limited to 'roles') diff --git a/roles/openshift_logging/tasks/generate_certs.yaml b/roles/openshift_logging/tasks/generate_certs.yaml index 5e6498ad7..e16071e46 100644 --- a/roles/openshift_logging/tasks/generate_certs.yaml +++ b/roles/openshift_logging/tasks/generate_certs.yaml @@ -162,10 +162,56 @@ changed_when: no when: not elasticsearch_jks.stat.exists or not logging_es_jks.stat.exists or not system_admin_jks.stat.exists or not truststore_jks.stat.exists +# check for secret/logging-kibana-proxy +- command: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get secret/logging-kibana-proxy -n {{openshift_logging_namespace}} -o jsonpath='{.data.oauth-secret}' + register: kibana_secret_oauth_check + ignore_errors: yes + changed_when: no + check_mode: no + +- command: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get secret/logging-kibana-proxy -n {{openshift_logging_namespace}} -o jsonpath='{.data.session-secret}' + register: kibana_secret_session_check + ignore_errors: yes + changed_when: no + check_mode: no + +# check for oauthclient secret +- command: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get oauthclient/kibana-proxy -n {{openshift_logging_namespace}} -o jsonpath='{.secret}' + register: oauth_secret_check + ignore_errors: yes + changed_when: no + check_mode: no + +# set or generate as needed - name: Generate proxy session set_fact: session_secret={{'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'|random_word(200)}} check_mode: no + when: + - kibana_secret_session_check.stdout is not defined or kibana_secret_session_check.stdout == '' + +- name: Generate proxy session + set_fact: session_secret={{kibana_secret_session_check.stdout | b64decode }} + check_mode: no + when: + - kibana_secret_session_check.stdout is defined + - kibana_secret_session_check.stdout != '' - name: Generate oauth client secret set_fact: oauth_secret={{'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'|random_word(64)}} check_mode: no + when: kibana_secret_oauth_check.stdout is not defined or kibana_secret_oauth_check.stdout == '' + or oauth_secret_check.stdout is not defined or oauth_secret_check.stdout == '' + or kibana_secret_oauth_check.stdout | b64decode != oauth_secret_check.stdout + +- name: Generate oauth client secret + set_fact: oauth_secret={{kibana_secret_oauth_check.stdout | b64decode}} + check_mode: no + when: + - kibana_secret_oauth_check is defined + - kibana_secret_oauth_check.stdout != '' + - oauth_secret_check.stdout is defined + - oauth_secret_check.stdout != '' + - kibana_secret_oauth_check.stdout | b64decode == oauth_secret_check.stdout diff --git a/roles/openshift_logging/tasks/install_curator.yaml b/roles/openshift_logging/tasks/install_curator.yaml index 5f3063380..35116ae2b 100644 --- a/roles/openshift_logging/tasks/install_curator.yaml +++ b/roles/openshift_logging/tasks/install_curator.yaml @@ -11,7 +11,9 @@ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc/logging-curator-ops -o jsonpath='{.spec.replicas}' -n {{openshift_logging_namespace}} register: curator_ops_replica_count - when: not ansible_check_mode + when: + - not ansible_check_mode + - openshift_logging_use_ops ignore_errors: yes changed_when: no diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml index 6a11baeb9..af03e9371 100644 --- a/roles/openshift_logging/tasks/install_logging.yaml +++ b/roles/openshift_logging/tasks/install_logging.yaml @@ -23,16 +23,15 @@ loop_control: loop_var: install_component -- name: Register API objects from generated templates - command: ls -1 {{mktemp.stdout}}/templates/ - register: logging_objects - check_mode: no - changed_when: no - -- name: Creating API objects from generated templates - command: > - {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig apply -f {{mktemp.stdout}}/templates/{{file}} -n {{openshift_logging_namespace}} - with_items: "{{logging_objects.stdout_lines | sort}}" +- name: Create objects + include: oc_apply.yaml + vars: + - kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig" + - namespace: "{{ openshift_logging_namespace }}" + - file_name: "{{ file }}" + - file_content: "{{ lookup('file', file) | from_yaml }}" + with_fileglob: + - "{{ mktemp.stdout }}/templates/*.yaml" loop_control: loop_var: file when: not ansible_check_mode diff --git a/roles/openshift_logging/tasks/oc_apply.yaml b/roles/openshift_logging/tasks/oc_apply.yaml new file mode 100644 index 000000000..c362b7fca --- /dev/null +++ b/roles/openshift_logging/tasks/oc_apply.yaml @@ -0,0 +1,29 @@ +--- +- name: Checking generation of {{file_content.kind}} {{file_content.metadata.name}} + shell: > + {{ openshift.common.client_binary }} + --config={{ kubeconfig }} + get {{file_content.kind}} {{file_content.metadata.name}} + -o jsonpath='{.metadata.resourceVersion}' + -n {{namespace}} || echo 0 + register: generation_init + changed_when: no + +- name: Applying {{file_name}} + command: > + {{ openshift.common.client_binary }} --config={{ kubeconfig }} + apply -f {{ file_name }} + -n {{ namespace }} + register: generation_apply + failed_when: "'error' in generation_apply.stderr" + changed_when: no + +- name: Determine change status of {{file_content.kind}} {{file_content.metadata.name}} + shell: > + {{ openshift.common.client_binary }} --config={{ kubeconfig }} + get {{file_content.kind}} {{file_content.metadata.name}} + -o jsonpath='{.metadata.resourceVersion}' + -n {{namespace}} || echo 0 + register: generation_changed + failed_when: "'error' in generation_changed.stderr" + changed_when: generation_changed.stdout | int > generation_init.stdout | int -- cgit v1.2.3 From 054c2a9f169c5547458a4e168855aeb4812b5797 Mon Sep 17 00:00:00 2001 From: ewolinetz Date: Fri, 13 Jan 2017 16:25:16 -0600 Subject: Updating upgrade_logging to be more idempotent --- roles/openshift_logging/README.md | 4 +++- roles/openshift_logging/defaults/main.yml | 2 ++ roles/openshift_logging/files/es_migration.sh | 2 -- roles/openshift_logging/tasks/install_curator.yaml | 10 ++++++---- roles/openshift_logging/tasks/install_kibana.yaml | 6 ++++-- roles/openshift_logging/tasks/start_cluster.yaml | 4 ++-- roles/openshift_logging/tasks/upgrade_logging.yaml | 22 +++++++++++++++------- 7 files changed, 32 insertions(+), 18 deletions(-) (limited to 'roles') diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md index 9836fc217..2cc2c48ee 100644 --- a/roles/openshift_logging/README.md +++ b/roles/openshift_logging/README.md @@ -31,7 +31,7 @@ When both `openshift_logging_install_logging` and `openshift_logging_upgrade_log - `openshift_logging_curator_script_log_level`: The script log level for Curator. Defaults to 'INFO'. - `openshift_logging_curator_log_level`: The log level for the Curator process. Defaults to 'ERROR'. - `openshift_logging_curator_cpu_limit`: The amount of CPU to allocate to Curator. Default is '100m'. -- `openshift_logging_curator_memory_limit`: The amount of memor to allocate to Curator. Unset if not specified. +- `openshift_logging_curator_memory_limit`: The amount of memory to allocate to Curator. Unset if not specified. - `openshift_logging_kibana_hostname`: The Kibana hostname. Defaults to 'kibana.example.com'. - `openshift_logging_kibana_cpu_limit`: The amount of CPU to allocate to Kibana or unset if not specified. @@ -39,6 +39,7 @@ When both `openshift_logging_install_logging` and `openshift_logging_upgrade_log - `openshift_logging_kibana_proxy_debug`: When "True", set the Kibana Proxy log level to DEBUG. Defaults to 'false'. - `openshift_logging_kibana_proxy_cpu_limit`: The amount of CPU to allocate to Kibana proxy or unset if not specified. - `openshift_logging_kibana_proxy_memory_limit`: The amount of memory to allocate to Kibana proxy or unset if not specified. +- `openshift_logging_kibana_replica_count`: The number of replicas Kibana should be scaled up to. Defaults to 1. - `openshift_logging_fluentd_nodeselector`: The node selector that the Fluentd daemonset uses to determine where to deploy to. Defaults to '"logging-infra-fluentd": "true"'. - `openshift_logging_fluentd_cpu_limit`: The CPU limit for Fluentd pods. Defaults to '100m'. @@ -84,3 +85,4 @@ same as above for their non-ops counterparts, but apply to the OPS cluster insta - `openshift_logging_kibana_ops_memory_limit`: The amount of memory to allocate to Kibana or unset if not specified. - `openshift_logging_kibana_ops_proxy_cpu_limit`: The amount of CPU to allocate to Kibana proxy or unset if not specified. - `openshift_logging_kibana_ops_proxy_memory_limit`: The amount of memory to allocate to Kibana proxy or unset if not specified. +- `openshift_logging_kibana_ops_replica_count`: The number of replicas Kibana ops should be scaled up to. Defaults to 1. diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml index 4eb852207..919c53787 100644 --- a/roles/openshift_logging/defaults/main.yml +++ b/roles/openshift_logging/defaults/main.yml @@ -25,6 +25,7 @@ openshift_logging_kibana_memory_limit: null openshift_logging_kibana_proxy_debug: false openshift_logging_kibana_proxy_cpu_limit: null openshift_logging_kibana_proxy_memory_limit: null +openshift_logging_kibana_replica_count: 1 openshift_logging_kibana_ops_hostname: "kibana-ops.{{openshift.common.dns_domain}}" openshift_logging_kibana_ops_cpu_limit: null @@ -32,6 +33,7 @@ openshift_logging_kibana_ops_memory_limit: null openshift_logging_kibana_ops_proxy_debug: false openshift_logging_kibana_ops_proxy_cpu_limit: null openshift_logging_kibana_ops_proxy_memory_limit: null +openshift_logging_kibana_ops_replica_count: 1 openshift_logging_fluentd_nodeselector: {'logging-infra-fluentd': 'true'} openshift_logging_fluentd_cpu_limit: 100m diff --git a/roles/openshift_logging/files/es_migration.sh b/roles/openshift_logging/files/es_migration.sh index cca283bae..339b5a1b2 100644 --- a/roles/openshift_logging/files/es_migration.sh +++ b/roles/openshift_logging/files/es_migration.sh @@ -1,5 +1,3 @@ -#! bin/bash - CA=${1:-/etc/openshift/logging/ca.crt} KEY=${2:-/etc/openshift/logging/system.admin.key} CERT=${3:-/etc/openshift/logging/system.admin.crt} diff --git a/roles/openshift_logging/tasks/install_curator.yaml b/roles/openshift_logging/tasks/install_curator.yaml index 35116ae2b..8f2825552 100644 --- a/roles/openshift_logging/tasks/install_curator.yaml +++ b/roles/openshift_logging/tasks/install_curator.yaml @@ -1,5 +1,6 @@ --- -- command: > +- name: Check Curator current replica count + command: > {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc/logging-curator -o jsonpath='{.spec.replicas}' -n {{openshift_logging_namespace}} register: curator_replica_count @@ -7,7 +8,8 @@ ignore_errors: yes changed_when: no -- command: > +- name: Check Curator ops current replica count + command: > {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc/logging-curator-ops -o jsonpath='{.spec.replicas}' -n {{openshift_logging_namespace}} register: curator_ops_replica_count @@ -28,7 +30,7 @@ es_port: "{{openshift_logging_es_port}}" curator_cpu_limit: "{{openshift_logging_curator_cpu_limit }}" curator_memory_limit: "{{openshift_logging_curator_memory_limit }}" - replicas: "{{curator_replica_count.stdout | default (1)}}" + replicas: "{{curator_replica_count.stdout | default (0)}}" check_mode: no changed_when: no @@ -43,7 +45,7 @@ es_port: "{{openshift_logging_es_ops_port}}" curator_cpu_limit: "{{openshift_logging_curator_ops_cpu_limit }}" curator_memory_limit: "{{openshift_logging_curator_ops_memory_limit }}" - replicas: "{{curator_ops_replica_count.stdout | default (1)}}" + replicas: "{{curator_ops_replica_count.stdout | default (0)}}" when: openshift_logging_use_ops check_mode: no changed_when: no diff --git a/roles/openshift_logging/tasks/install_kibana.yaml b/roles/openshift_logging/tasks/install_kibana.yaml index f4ce85f97..de4b018dd 100644 --- a/roles/openshift_logging/tasks/install_kibana.yaml +++ b/roles/openshift_logging/tasks/install_kibana.yaml @@ -1,5 +1,6 @@ --- -- command: > +- name: Check Kibana current replica count + command: > {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc/logging-kibana -o jsonpath='{.spec.replicas}' -n {{openshift_logging_namespace}} register: kibana_replica_count @@ -7,7 +8,8 @@ ignore_errors: yes changed_when: no -- command: > +- name: Check Kibana ops current replica count + command: > {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc/logging-kibana-ops -o jsonpath='{.spec.replicas}' -n {{openshift_logging_namespace}} register: kibana_ops_replica_count diff --git a/roles/openshift_logging/tasks/start_cluster.yaml b/roles/openshift_logging/tasks/start_cluster.yaml index 4ce6d1aa7..a96ad3f3a 100644 --- a/roles/openshift_logging/tasks/start_cluster.yaml +++ b/roles/openshift_logging/tasks/start_cluster.yaml @@ -39,7 +39,7 @@ - name: start kibana include: scale.yaml vars: - desired: 1 + desired: "{{ openshift_logging_kibana_replica_count | default (1) }}" with_items: "{{kibana_dc.stdout_lines}}" loop_control: loop_var: object @@ -82,7 +82,7 @@ - name: start kibana-ops include: scale.yaml vars: - desired: 1 + desired: "{{ openshift_logging_kibana_ops_replica_count | default (1) }}" with_items: "{{kibana_dc.stdout_lines}}" loop_control: loop_var: object diff --git a/roles/openshift_logging/tasks/upgrade_logging.yaml b/roles/openshift_logging/tasks/upgrade_logging.yaml index 9b285a5fe..a93463239 100644 --- a/roles/openshift_logging/tasks/upgrade_logging.yaml +++ b/roles/openshift_logging/tasks/upgrade_logging.yaml @@ -7,7 +7,7 @@ vars: start_cluster: False -# ensure that ES is running +# start ES so that we can run migrate script - command: > {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es -o name -n {{openshift_logging_namespace}} register: es_dc @@ -21,13 +21,21 @@ loop_control: loop_var: object -- copy: - src: es_migration.sh - dest: {{mktemp.stdout}}/es_migration.sh +- command: > + {{ openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get pods -n {{openshift_logging_namespace}} -l component=es -o jsonpath='{.items[?(@.status.phase == "Running")].metadata.name}' + register: running_pod + until: running_pod.stdout != '' + retries: 30 + delay: 10 + changed_when: no + check_mode: no -- name: Run upgrade scripts - command: > - sh {{mktemp.stdout}}/es_migration.sh {{openshift.common.config_base}}/logging/ca.crt {{openshift.common.config_base}}/logging/system.admin.key {{openshift.common.config_base}}/logging/system.admin.crt {{openshift_logging_es_host}} {{openshift_logging_es_port}} {{openshift_logging_namespace}} +- name: Run upgrade script + script: es_migration.sh {{openshift.common.config_base}}/logging/ca.crt {{openshift.common.config_base}}/logging/system.admin.key {{openshift.common.config_base}}/logging/system.admin.crt {{openshift_logging_es_host}} {{openshift_logging_es_port}} {{openshift_logging_namespace}} + register: script_output + changed_when: + - script_output.rc == 0 + - script_output.stdout.find("skipping update_for_uuid") == -1 or script_output.stdout.find("skipping update_for_common_data_model") == -1 - name: Start up rest of cluster include: start_cluster.yaml -- cgit v1.2.3 From 9cf70bb6991df874350ea0f5c97da26bb6757edb Mon Sep 17 00:00:00 2001 From: ewolinetz Date: Fri, 13 Jan 2017 16:37:27 -0600 Subject: additional comments addressed --- roles/openshift_logging/files/elasticsearch.yml | 74 --------------------- .../tasks/generate_configmaps.yaml | 13 ++-- .../templates/elasticsearch.yml.j2 | 75 ++++++++++++++++++++++ 3 files changed, 79 insertions(+), 83 deletions(-) delete mode 100644 roles/openshift_logging/files/elasticsearch.yml create mode 100644 roles/openshift_logging/templates/elasticsearch.yml.j2 (limited to 'roles') diff --git a/roles/openshift_logging/files/elasticsearch.yml b/roles/openshift_logging/files/elasticsearch.yml deleted file mode 100644 index 4eff30e61..000000000 --- a/roles/openshift_logging/files/elasticsearch.yml +++ /dev/null @@ -1,74 +0,0 @@ -cluster: - name: ${CLUSTER_NAME} - -script: - inline: on - indexed: on - -index: - number_of_shards: 1 - number_of_replicas: 0 - auto_expand_replicas: 0-3 - unassigned.node_left.delayed_timeout: 2m - translog: - flush_threshold_size: 256mb - flush_threshold_period: 5m - -node: - master: true - data: true - -network: - host: 0.0.0.0 - -cloud: - kubernetes: - service: ${SERVICE_DNS} - namespace: ${NAMESPACE} - -discovery: - type: kubernetes - zen.ping.multicast.enabled: false - -gateway: - expected_master_nodes: ${NODE_QUORUM} - recover_after_nodes: ${RECOVER_AFTER_NODES} - expected_nodes: ${RECOVER_EXPECTED_NODES} - recover_after_time: ${RECOVER_AFTER_TIME} - -io.fabric8.elasticsearch.authentication.users: ["system.logging.kibana", "system.logging.fluentd", "system.logging.curator", "system.admin"] - -openshift.searchguard: - keystore.path: /etc/elasticsearch/secret/admin.jks - truststore.path: /etc/elasticsearch/secret/searchguard.truststore - - -path: - data: /elasticsearch/persistent/${CLUSTER_NAME}/data - logs: /elasticsearch/${CLUSTER_NAME}/logs - work: /elasticsearch/${CLUSTER_NAME}/work - scripts: /elasticsearch/${CLUSTER_NAME}/scripts - -searchguard: - authcz.admin_dn: - - CN=system.admin,OU=OpenShift,O=Logging - config_index_name: ".searchguard.${HOSTNAME}" - ssl: - transport: - enabled: true - enforce_hostname_verification: false - keystore_type: JKS - keystore_filepath: /etc/elasticsearch/secret/searchguard.key - keystore_password: kspass - truststore_type: JKS - truststore_filepath: /etc/elasticsearch/secret/searchguard.truststore - truststore_password: tspass - http: - enabled: true - keystore_type: JKS - keystore_filepath: /etc/elasticsearch/secret/key - keystore_password: kspass - clientauth_mode: OPTIONAL - truststore_type: JKS - truststore_filepath: /etc/elasticsearch/secret/truststore - truststore_password: tspass diff --git a/roles/openshift_logging/tasks/generate_configmaps.yaml b/roles/openshift_logging/tasks/generate_configmaps.yaml index f9f9ee79f..b24a7c342 100644 --- a/roles/openshift_logging/tasks/generate_configmaps.yaml +++ b/roles/openshift_logging/tasks/generate_configmaps.yaml @@ -6,16 +6,11 @@ when: es_logging_contents is undefined changed_when: no - - copy: - src: elasticsearch.yml - dest: "{{mktemp.stdout}}/elasticsearch.yml" - when: es_config_contents is undefined - changed_when: no - - - lineinfile: + - template: + src: elasticsearch.yml.j2 dest: "{{mktemp.stdout}}/elasticsearch.yml" - regexp: '^openshift\.operations\.allow_cluster_reader(.)*$' - line: "\nopenshift.operations.allow_cluster_reader: {{openshift_logging_es_ops_allow_cluster_reader | lower}}" + vars: + - allow_cluster_reader: "{{openshift_logging_es_ops_allow_cluster_reader | lower | default('false')}}" when: es_config_contents is undefined changed_when: no diff --git a/roles/openshift_logging/templates/elasticsearch.yml.j2 b/roles/openshift_logging/templates/elasticsearch.yml.j2 new file mode 100644 index 000000000..dad78b844 --- /dev/null +++ b/roles/openshift_logging/templates/elasticsearch.yml.j2 @@ -0,0 +1,75 @@ +cluster: + name: ${CLUSTER_NAME} + +script: + inline: on + indexed: on + +index: + number_of_shards: 1 + number_of_replicas: 0 + auto_expand_replicas: 0-3 + unassigned.node_left.delayed_timeout: 2m + translog: + flush_threshold_size: 256mb + flush_threshold_period: 5m + +node: + master: true + data: true + +network: + host: 0.0.0.0 + +cloud: + kubernetes: + service: ${SERVICE_DNS} + namespace: ${NAMESPACE} + +discovery: + type: kubernetes + zen.ping.multicast.enabled: false + +gateway: + expected_master_nodes: ${NODE_QUORUM} + recover_after_nodes: ${RECOVER_AFTER_NODES} + expected_nodes: ${RECOVER_EXPECTED_NODES} + recover_after_time: ${RECOVER_AFTER_TIME} + +io.fabric8.elasticsearch.authentication.users: ["system.logging.kibana", "system.logging.fluentd", "system.logging.curator", "system.admin"] + +openshift.searchguard: + keystore.path: /etc/elasticsearch/secret/admin.jks + truststore.path: /etc/elasticsearch/secret/searchguard.truststore + +openshift.operations.allow_cluster_reader: {{allow_cluster_reader | default ('false')}} + +path: + data: /elasticsearch/persistent/${CLUSTER_NAME}/data + logs: /elasticsearch/${CLUSTER_NAME}/logs + work: /elasticsearch/${CLUSTER_NAME}/work + scripts: /elasticsearch/${CLUSTER_NAME}/scripts + +searchguard: + authcz.admin_dn: + - CN=system.admin,OU=OpenShift,O=Logging + config_index_name: ".searchguard.${HOSTNAME}" + ssl: + transport: + enabled: true + enforce_hostname_verification: false + keystore_type: JKS + keystore_filepath: /etc/elasticsearch/secret/searchguard.key + keystore_password: kspass + truststore_type: JKS + truststore_filepath: /etc/elasticsearch/secret/searchguard.truststore + truststore_password: tspass + http: + enabled: true + keystore_type: JKS + keystore_filepath: /etc/elasticsearch/secret/key + keystore_password: kspass + clientauth_mode: OPTIONAL + truststore_type: JKS + truststore_filepath: /etc/elasticsearch/secret/truststore + truststore_password: tspass -- cgit v1.2.3 From 427d4c235c58196f5745184c9d0b3e6bc2a92618 Mon Sep 17 00:00:00 2001 From: ewolinetz Date: Fri, 13 Jan 2017 17:03:32 -0600 Subject: delete idempotent --- roles/openshift_logging/tasks/delete_logging.yaml | 21 +++++++++++++++++++++ roles/openshift_logging/tasks/label_node.yaml | 1 + 2 files changed, 22 insertions(+) (limited to 'roles') diff --git a/roles/openshift_logging/tasks/delete_logging.yaml b/roles/openshift_logging/tasks/delete_logging.yaml index 6e8fc29d0..908f3ee88 100644 --- a/roles/openshift_logging/tasks/delete_logging.yaml +++ b/roles/openshift_logging/tasks/delete_logging.yaml @@ -14,17 +14,24 @@ - routes - templates - daemonset + register: delete_result + changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0 + # delete the oauthclient - name: delete oauthclient kibana-proxy command: > {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete oauthclient kibana-proxy --ignore-not-found=true + register: delete_result + changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0 # delete any image streams that we may have created - name: delete logging is command: > {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete is -l logging-infra=support -n {{ openshift_logging_namespace }} --ignore-not-found=true + register: delete_result + changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0 # delete our old secrets - name: delete logging secrets @@ -38,6 +45,8 @@ - logging-kibana-proxy - logging-curator ignore_errors: yes + register: delete_result + changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0 # delete role bindings - name: delete rolebindings @@ -46,6 +55,8 @@ delete rolebinding {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true with_items: - logging-elasticsearch-view-role + register: delete_result + changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0 # delete cluster role bindings - name: delete cluster role bindings @@ -54,6 +65,8 @@ delete clusterrolebindings {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true with_items: - rolebinding-reader + register: delete_result + changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0 # delete cluster roles - name: delete cluster roles @@ -62,6 +75,8 @@ delete clusterroles {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true with_items: - rolebinding-reader + register: delete_result + changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0 # delete our service accounts - name: delete service accounts @@ -73,6 +88,8 @@ - aggregated-logging-kibana - aggregated-logging-curator - aggregated-logging-fluentd + register: delete_result + changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0 # delete our roles - name: delete roles @@ -81,6 +98,8 @@ delete clusterrole {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true with_items: - daemonset-admin + register: delete_result + changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0 # delete our configmaps - name: delete configmaps @@ -91,3 +110,5 @@ - logging-curator - logging-elasticsearch - logging-fluentd + register: delete_result + changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0 diff --git a/roles/openshift_logging/tasks/label_node.yaml b/roles/openshift_logging/tasks/label_node.yaml index 6a97fece2..aecb5d81b 100644 --- a/roles/openshift_logging/tasks/label_node.yaml +++ b/roles/openshift_logging/tasks/label_node.yaml @@ -26,3 +26,4 @@ - unlabel is defined - unlabel - not ansible_check_mode + - label_value.stdout != "" -- cgit v1.2.3 From 598b2652ac9bfe94622cbe6324d4f121bf996c70 Mon Sep 17 00:00:00 2001 From: ewolinetz Date: Tue, 17 Jan 2017 14:42:41 -0600 Subject: Addressing Travis errors --- .../filter_plugins/openshift_logging.py | 29 +- .../library/openshift_logging_facts.py | 333 ++++++++++++--------- roles/openshift_logging/meta/main.yaml | 2 +- .../openshift_logging/tasks/generate_secrets.yaml | 20 +- .../tasks/install_elasticsearch.yaml | 4 +- roles/openshift_logging/tasks/scale.yaml | 4 +- roles/openshift_logging/vars/main.yaml | 4 +- 7 files changed, 220 insertions(+), 176 deletions(-) (limited to 'roles') diff --git a/roles/openshift_logging/filter_plugins/openshift_logging.py b/roles/openshift_logging/filter_plugins/openshift_logging.py index b42d5da5f..007be3ac0 100644 --- a/roles/openshift_logging/filter_plugins/openshift_logging.py +++ b/roles/openshift_logging/filter_plugins/openshift_logging.py @@ -1,28 +1,37 @@ -import random, string -import shutil -import sys -import StringIO +''' + Openshift Logging class that provides useful filters used in Logging +''' -def random_word(source_alpha,length): +import random + + +def random_word(source_alpha, length): + ''' Returns a random word given the source of characters to pick from and resulting length ''' return ''.join(random.choice(source_alpha) for i in range(length)) + def entry_from_named_pair(register_pairs, key): - from ansible.utils.display import Display + ''' Returns the entry in key given results provided by register_pairs ''' results = register_pairs.get("results") - if results == None: - raise RuntimeError("The dict argument does not have a 'results' entry. Must not have been created using 'register' in a loop") + if results is None: + raise RuntimeError("The dict argument does not have a 'results' entry. " + "Must not have been created using 'register' in a loop") for result in results: item = result.get("item") - if item != None: - name = item.get("name") + if item is not None: + name = item.get("name") if name == key: return result["content"] raise RuntimeError("There was no entry found in the dict that had an item with a name that matched {}".format(key)) + +# pylint: disable=too-few-public-methods class FilterModule(object): ''' OpenShift Logging Filters ''' + # pylint: disable=no-self-use, too-few-public-methods def filters(self): + ''' Returns the names of the filters provided by this class ''' return { 'random_word': random_word, 'entry_from_named_pair': entry_from_named_pair, diff --git a/roles/openshift_logging/library/openshift_logging_facts.py b/roles/openshift_logging/library/openshift_logging_facts.py index 1f0c25a84..8bbfdf7bf 100644 --- a/roles/openshift_logging/library/openshift_logging_facts.py +++ b/roles/openshift_logging/library/openshift_logging_facts.py @@ -1,5 +1,4 @@ - -DOCUMENTATION = """ +''' --- module: openshift_logging_facts version_added: "" @@ -8,7 +7,19 @@ description: - Determine the current facts about the OpenShift logging stack (e.g. cluster size) options: author: Red Hat, Inc -""" +''' + +import copy +import json + +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import +from subprocess import * # noqa: F402,F403 + +# ignore pylint errors related to the module_utils import +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import +from ansible.module_utils.basic import * # noqa: F402,F403 + +import yaml EXAMPLES = """ - action: opneshift_logging_facts @@ -17,238 +28,258 @@ EXAMPLES = """ RETURN = """ """ -import copy -import json -import exceptions -import yaml -from subprocess import * +DEFAULT_OC_OPTIONS = ["-o", "json"] -default_oc_options = ["-o","json"] +# constants used for various labels and selectors +COMPONENT_KEY = "component" +LOGGING_INFRA_KEY = "logging-infra" -#constants used for various labels and selectors -COMPONENT_KEY="component" -LOGGING_INFRA_KEY="logging-infra" +# selectors for filtering resources +DS_FLUENTD_SELECTOR = LOGGING_INFRA_KEY + "=" + "fluentd" +LOGGING_SELECTOR = LOGGING_INFRA_KEY + "=" + "support" +ROUTE_SELECTOR = "component=support, logging-infra=support, provider=openshift" +COMPONENTS = ["kibana", "curator", "elasticsearch", "fluentd", "kibana_ops", "curator_ops", "elasticsearch_ops"] -#selectors for filtering resources -DS_FLUENTD_SELECTOR=LOGGING_INFRA_KEY + "=" + "fluentd" -LOGGING_SELECTOR=LOGGING_INFRA_KEY + "=" + "support" -ROUTE_SELECTOR = "component=support,logging-infra=support,provider=openshift" -COMPONENTS = ["kibana","curator","elasticsearch","fluentd", "kibana_ops", "curator_ops", "elasticsearch_ops"] class OCBaseCommand(object): + ''' The base class used to query openshift ''' + def __init__(self, binary, kubeconfig, namespace): + ''' the init method of OCBaseCommand class ''' self.binary = binary self.kubeconfig = kubeconfig - self.user = self.getSystemAdmin(self.kubeconfig) + self.user = self.get_system_admin(self.kubeconfig) self.namespace = namespace - def getSystemAdmin(self,kubeconfig): - with open(kubeconfig,'r') as f: - config = yaml.load(f) + # pylint: disable=no-self-use + def get_system_admin(self, kubeconfig): + ''' Retrieves the system admin ''' + with open(kubeconfig, 'r') as kubeconfig_file: + config = yaml.load(kubeconfig_file) for user in config["users"]: if user["name"].startswith("system:admin"): return user["name"] raise Exception("Unable to find system:admin in: " + kubeconfig) - def oc(self, sub, kind, namespace=None, name=None,addOptions=[]): + # pylint: disable=too-many-arguments, dangerous-default-value + def oc_command(self, sub, kind, namespace=None, name=None, add_options=None): + ''' Wrapper method for the "oc" command ''' cmd = [self.binary, sub, kind] - if name != None: + if name is not None: cmd = cmd + [name] - if namespace != None: + if namespace is not None: cmd = cmd + ["-n", namespace] - cmd = cmd + ["--user="+self.user,"--config="+self.kubeconfig] + default_oc_options + addOptions + if add_options is None: + add_options = [] + cmd = cmd + ["--user=" + self.user, "--config=" + self.kubeconfig] + DEFAULT_OC_OPTIONS + add_options try: - process = Popen(cmd, stdout=PIPE, stderr=PIPE) + process = Popen(cmd, stdout=PIPE, stderr=PIPE) # noqa: F405 out, err = process.communicate(cmd) if len(err) > 0: if 'not found' in err: - return {'items':[]} + return {'items': []} if 'No resources found' in err: - return {'items':[]} + return {'items': []} raise Exception(err) - except Exception as e: - err = "There was an exception trying to run the command '"+ " ".join(cmd) +"' " + str(e) + except Exception as excp: + err = "There was an exception trying to run the command '" + " ".join(cmd) + "' " + str(excp) raise Exception(err) return json.loads(out) -class OpenshiftLoggingFacts(OCBaseCommand): +class OpenshiftLoggingFacts(OCBaseCommand): + ''' The class structure for holding the OpenshiftLogging Facts''' name = "facts" def __init__(self, logger, binary, kubeconfig, namespace): + ''' The init method for OpenshiftLoggingFacts ''' super(OpenshiftLoggingFacts, self).__init__(binary, kubeconfig, namespace) self.logger = logger self.facts = dict() - def defaultKeysFor(self, kind): + def default_keys_for(self, kind): + ''' Sets the default key values for kind ''' for comp in COMPONENTS: - self.addFactsFor(comp, kind) + self.add_facts_for(comp, kind) - def addFactsFor(self, comp, kind, name=None, facts=None): - if self.facts.has_key(comp) == False: + def add_facts_for(self, comp, kind, name=None, facts=None): + ''' Add facts for the provided kind ''' + if comp in self.facts is False: self.facts[comp] = dict() - if self.facts[comp].has_key(kind) == False: + if kind in self.facts[comp] is False: self.facts[comp][kind] = dict() if name: - self.facts[comp][kind][name] = facts + self.facts[comp][kind][name] = facts - def factsForRoutes(self, namespace): - self.defaultKeysFor("routes") - routeList = self.oc("get","routes", namespace=namespace, addOptions=["-l",ROUTE_SELECTOR]) - if len(routeList["items"]) == 0: + def facts_for_routes(self, namespace): + ''' Gathers facts for Routes in logging namespace ''' + self.default_keys_for("routes") + route_list = self.oc_command("get", "routes", namespace=namespace, add_options=["-l", ROUTE_SELECTOR]) + if len(route_list["items"]) == 0: return None - for route in routeList["items"]: + for route in route_list["items"]: name = route["metadata"]["name"] comp = self.comp(name) - if comp != None: - self.addFactsFor(comp, "routes", name, dict(host=route["spec"]["host"])) + if comp is not None: + self.add_facts_for(comp, "routes", name, dict(host=route["spec"]["host"])) self.facts["agl_namespace"] = namespace - - def factsForDaemonsets(self, namespace): - self.defaultKeysFor("daemonsets") - dsList = self.oc("get", "daemonsets", namespace=namespace, addOptions=["-l",LOGGING_INFRA_KEY+"=fluentd"]) - if len(dsList["items"]) == 0: + def facts_for_daemonsets(self, namespace): + ''' Gathers facts for Daemonsets in logging namespace ''' + self.default_keys_for("daemonsets") + ds_list = self.oc_command("get", "daemonsets", namespace=namespace, + add_options=["-l", LOGGING_INFRA_KEY + "=fluentd"]) + if len(ds_list["items"]) == 0: return - for ds in dsList["items"]: - name = ds["metadata"]["name"] + for ds_item in ds_list["items"]: + name = ds_item["metadata"]["name"] comp = self.comp(name) - spec = ds["spec"]["template"]["spec"] + spec = ds_item["spec"]["template"]["spec"] container = spec["containers"][0] result = dict( - selector = ds["spec"]["selector"], - image = container["image"], - resources = container["resources"], - nodeSelector = spec["nodeSelector"], - serviceAccount = spec["serviceAccount"], - terminationGracePeriodSeconds = spec["terminationGracePeriodSeconds"] + selector=ds_item["spec"]["selector"], + image=container["image"], + resources=container["resources"], + nodeSelector=spec["nodeSelector"], + serviceAccount=spec["serviceAccount"], + terminationGracePeriodSeconds=spec["terminationGracePeriodSeconds"] ) - self.addFactsFor(comp, "daemonsets", name, result) + self.add_facts_for(comp, "daemonsets", name, result) - def factsForPvcs(self, namespace): - self.defaultKeysFor("pvcs") - pvclist = self.oc("get", "pvc", namespace=namespace, addOptions=["-l",LOGGING_INFRA_KEY]) + def facts_for_pvcs(self, namespace): + ''' Gathers facts for PVCS in logging namespace''' + self.default_keys_for("pvcs") + pvclist = self.oc_command("get", "pvc", namespace=namespace, add_options=["-l", LOGGING_INFRA_KEY]) if len(pvclist["items"]) == 0: return - pvcs = [] for pvc in pvclist["items"]: name = pvc["metadata"]["name"] comp = self.comp(name) - self.addFactsFor(comp,"pvcs",name,dict()) + self.add_facts_for(comp, "pvcs", name, dict()) - def factsForDeploymentConfigs(self, namespace): - self.defaultKeysFor("deploymentconfigs") - dclist = self.oc("get", "deploymentconfigs", namespace=namespace, addOptions=["-l",LOGGING_INFRA_KEY]) + def facts_for_deploymentconfigs(self, namespace): + ''' Gathers facts for DeploymentConfigs in logging namespace ''' + self.default_keys_for("deploymentconfigs") + dclist = self.oc_command("get", "deploymentconfigs", namespace=namespace, add_options=["-l", LOGGING_INFRA_KEY]) if len(dclist["items"]) == 0: return dcs = dclist["items"] - for dc in dcs: - name = dc["metadata"]["name"] + for dc_item in dcs: + name = dc_item["metadata"]["name"] comp = self.comp(name) - if comp != None: - spec = dc["spec"]["template"]["spec"] + if comp is not None: + spec = dc_item["spec"]["template"]["spec"] facts = dict( - selector = dc["spec"]["selector"], - replicas = dc["spec"]["replicas"], - serviceAccount = spec["serviceAccount"], - containers = dict(), - volumes = dict() + selector=dc_item["spec"]["selector"], + replicas=dc_item["spec"]["replicas"], + serviceAccount=spec["serviceAccount"], + containers=dict(), + volumes=dict() ) - if spec.has_key("volumes"): + if "volumes" in spec: for vol in spec["volumes"]: clone = copy.deepcopy(vol) clone.pop("name", None) facts["volumes"][vol["name"]] = clone for container in spec["containers"]: facts["containers"][container["name"]] = dict( - image = container["image"], - resources = container["resources"], + image=container["image"], + resources=container["resources"], ) - self.addFactsFor(comp,"deploymentconfigs",name,facts) + self.add_facts_for(comp, "deploymentconfigs", name, facts) - def factsForServices(self, namespace): - self.defaultKeysFor("services") - servicelist = self.oc("get", "services", namespace=namespace, addOptions=["-l",LOGGING_SELECTOR]) + def facts_for_services(self, namespace): + ''' Gathers facts for services in logging namespace ''' + self.default_keys_for("services") + servicelist = self.oc_command("get", "services", namespace=namespace, add_options=["-l", LOGGING_SELECTOR]) if len(servicelist["items"]) == 0: return for service in servicelist["items"]: name = service["metadata"]["name"] comp = self.comp(name) - if comp != None: - self.addFactsFor(comp, "services", name, dict()) - - def factsForConfigMaps(self, namespace): - self.defaultKeysFor("configmaps") - aList = self.oc("get", "configmaps", namespace=namespace, addOptions=["-l",LOGGING_SELECTOR]) - if len(aList["items"]) == 0: + if comp is not None: + self.add_facts_for(comp, "services", name, dict()) + + def facts_for_configmaps(self, namespace): + ''' Gathers facts for configmaps in logging namespace ''' + self.default_keys_for("configmaps") + a_list = self.oc_command("get", "configmaps", namespace=namespace, add_options=["-l", LOGGING_SELECTOR]) + if len(a_list["items"]) == 0: return - for item in aList["items"]: + for item in a_list["items"]: name = item["metadata"]["name"] comp = self.comp(name) - if comp != None: - self.addFactsFor(comp, "configmaps", name, item["data"]) - - def factsForOAuthClients(self, namespace): - self.defaultKeysFor("oauthclients") - aList = self.oc("get", "oauthclients", namespace=namespace, addOptions=["-l",LOGGING_SELECTOR]) - if len(aList["items"]) == 0: + if comp is not None: + self.add_facts_for(comp, "configmaps", name, item["data"]) + + def facts_for_oauthclients(self, namespace): + ''' Gathers facts for oauthclients used with logging ''' + self.default_keys_for("oauthclients") + a_list = self.oc_command("get", "oauthclients", namespace=namespace, add_options=["-l", LOGGING_SELECTOR]) + if len(a_list["items"]) == 0: return - for item in aList["items"]: + for item in a_list["items"]: name = item["metadata"]["name"] comp = self.comp(name) - if comp != None: + if comp is not None: result = dict( - redirectURIs = item["redirectURIs"] + redirectURIs=item["redirectURIs"] ) - self.addFactsFor(comp, "oauthclients", name, result) + self.add_facts_for(comp, "oauthclients", name, result) - def factsForSecrets(self, namespace): - self.defaultKeysFor("secrets") - aList = self.oc("get", "secrets", namespace=namespace) - if len(aList["items"]) == 0: + def facts_for_secrets(self, namespace): + ''' Gathers facts for secrets in the logging namespace ''' + self.default_keys_for("secrets") + a_list = self.oc_command("get", "secrets", namespace=namespace) + if len(a_list["items"]) == 0: return - for item in aList["items"]: + for item in a_list["items"]: name = item["metadata"]["name"] comp = self.comp(name) - if comp != None and item["type"] == "Opaque": + if comp is not None and item["type"] == "Opaque": result = dict( - keys = item["data"].keys() + keys=item["data"].keys() ) - self.addFactsFor(comp, "secrets", name, result) + self.add_facts_for(comp, "secrets", name, result) - def factsForSCCs(self, namespace): - self.defaultKeysFor("sccs") - scc = self.oc("get", "scc", name="privileged") + def facts_for_sccs(self): + ''' Gathers facts for SCCs used with logging ''' + self.default_keys_for("sccs") + scc = self.oc_command("get", "scc", name="privileged") if len(scc["users"]) == 0: return for item in scc["users"]: comp = self.comp(item) - if comp != None: - self.addFactsFor(comp, "sccs", "privileged", dict()) + if comp is not None: + self.add_facts_for(comp, "sccs", "privileged", dict()) - def factsForClusterRoleBindings(self, namespace): - self.defaultKeysFor("clusterrolebindings") - role = self.oc("get", "clusterrolebindings", name="cluster-readers") - if "subjects" not in role or len(role["subjects"]) == 0: + def facts_for_clusterrolebindings(self, namespace): + ''' Gathers ClusterRoleBindings used with logging ''' + self.default_keys_for("clusterrolebindings") + role = self.oc_command("get", "clusterrolebindings", name="cluster-readers") + if "subjects" not in role or len(role["subjects"]) == 0: return for item in role["subjects"]: comp = self.comp(item["name"]) - if comp != None and namespace == item["namespace"]: - self.addFactsFor(comp, "clusterrolebindings", "cluster-readers", dict()) + if comp is not None and namespace == item["namespace"]: + self.add_facts_for(comp, "clusterrolebindings", "cluster-readers", dict()) # this needs to end up nested under the service account... - def factsForRoleBindings(self, namespace): - self.defaultKeysFor("rolebindings") - role = self.oc("get", "rolebindings", namespace=namespace, name="logging-elasticsearch-view-role") + def facts_for_rolebindings(self, namespace): + ''' Gathers facts for RoleBindings used with logging ''' + self.default_keys_for("rolebindings") + role = self.oc_command("get", "rolebindings", namespace=namespace, name="logging-elasticsearch-view-role") if "subjects" not in role or len(role["subjects"]) == 0: return for item in role["subjects"]: comp = self.comp(item["name"]) - if comp != None and namespace == item["namespace"]: - self.addFactsFor(comp, "rolebindings", "logging-elasticsearch-view-role", dict()) + if comp is not None and namespace == item["namespace"]: + self.add_facts_for(comp, "rolebindings", "logging-elasticsearch-view-role", dict()) + # pylint: disable=no-self-use, too-many-return-statements def comp(self, name): + ''' Does a comparison to evaluate the logging component ''' if name.startswith("logging-curator-ops"): return "curator_ops" elif name.startswith("logging-kibana-ops") or name.startswith("kibana-ops"): @@ -266,38 +297,44 @@ class OpenshiftLoggingFacts(OCBaseCommand): else: return None - def do(self): - self.factsForRoutes(self.namespace) - self.factsForDaemonsets(self.namespace) - self.factsForDeploymentConfigs(self.namespace) - self.factsForServices(self.namespace) - self.factsForConfigMaps(self.namespace) - self.factsForSCCs(self.namespace) - self.factsForOAuthClients(self.namespace) - self.factsForClusterRoleBindings(self.namespace) - self.factsForRoleBindings(self.namespace) - self.factsForSecrets(self.namespace) - self.factsForPvcs(self.namespace) + def build_facts(self): + ''' Builds the logging facts and returns them ''' + self.facts_for_routes(self.namespace) + self.facts_for_daemonsets(self.namespace) + self.facts_for_deploymentconfigs(self.namespace) + self.facts_for_services(self.namespace) + self.facts_for_configmaps(self.namespace) + self.facts_for_sccs() + self.facts_for_oauthclients(self.namespace) + self.facts_for_clusterrolebindings(self.namespace) + self.facts_for_rolebindings(self.namespace) + self.facts_for_secrets(self.namespace) + self.facts_for_pvcs(self.namespace) return self.facts + def main(): - module = AnsibleModule( + ''' The main method ''' + module = AnsibleModule( # noqa: F405 argument_spec=dict( - admin_kubeconfig = {"required": True, "type": "str"}, - oc_bin = {"required": True, "type": "str"}, - openshift_logging_namespace = {"required": True, "type": "str"} + admin_kubeconfig={"required": True, "type": "str"}, + oc_bin={"required": True, "type": "str"}, + openshift_logging_namespace={"required": True, "type": "str"} ), - supports_check_mode = False + supports_check_mode=False ) try: - cmd = OpenshiftLoggingFacts(module, module.params['oc_bin'], module.params['admin_kubeconfig'],module.params['openshift_logging_namespace']) + cmd = OpenshiftLoggingFacts(module, module.params['oc_bin'], module.params['admin_kubeconfig'], + module.params['openshift_logging_namespace']) module.exit_json( - ansible_facts = {"openshift_logging_facts": cmd.do() } + ansible_facts={"openshift_logging_facts": cmd.build_facts()} ) - except Exception as e: - module.fail_json(msg=str(e)) + # ignore broad-except error to avoid stack trace to ansible user + # pylint: disable=broad-except + except Exception as error: + module.fail_json(msg=str(error)) + -from ansible.module_utils.basic import * if __name__ == '__main__': main() diff --git a/roles/openshift_logging/meta/main.yaml b/roles/openshift_logging/meta/main.yaml index a95c84901..7050e51db 100644 --- a/roles/openshift_logging/meta/main.yaml +++ b/roles/openshift_logging/meta/main.yaml @@ -12,4 +12,4 @@ galaxy_info: categories: - cloud dependencies: - - role: openshift_facts +- role: openshift_facts diff --git a/roles/openshift_logging/tasks/generate_secrets.yaml b/roles/openshift_logging/tasks/generate_secrets.yaml index c4a70114d..1829acaee 100644 --- a/roles/openshift_logging/tasks/generate_secrets.yaml +++ b/roles/openshift_logging/tasks/generate_secrets.yaml @@ -21,9 +21,9 @@ secret_key_file: "{{component}}_key" secret_cert_file: "{{component}}_cert" secrets: - - {key: ca, value: "{{key_pairs | entry_from_named_pair('ca_file')| b64decode }}"} - - {key: key, value: "{{key_pairs | entry_from_named_pair(secret_key_file)| b64decode }}"} - - {key: cert, value: "{{key_pairs | entry_from_named_pair(secret_cert_file)| b64decode }}"} + - {key: ca, value: "{{key_pairs | entry_from_named_pair('ca_file')| b64decode }}"} + - {key: key, value: "{{key_pairs | entry_from_named_pair(secret_key_file)| b64decode }}"} + - {key: cert, value: "{{key_pairs | entry_from_named_pair(secret_cert_file)| b64decode }}"} secret_keys: ["ca", "cert", "key"] with_items: - kibana @@ -41,11 +41,11 @@ vars: secret_name: logging-kibana-proxy secrets: - - {key: oauth-secret, value: "{{oauth_secret}}"} - - {key: session-secret, value: "{{session_secret}}"} - - {key: server-key, value: "{{kibana_key_file}}"} - - {key: server-cert, value: "{{kibana_cert_file}}"} - - {key: server-tls, value: "{{server_tls_file}}"} + - {key: oauth-secret, value: "{{oauth_secret}}"} + - {key: session-secret, value: "{{session_secret}}"} + - {key: server-key, value: "{{kibana_key_file}}"} + - {key: server-cert, value: "{{kibana_cert_file}}"} + - {key: server-tls, value: "{{server_tls_file}}"} secret_keys: ["server-tls.json", "server-key", "session-secret", "oauth-secret", "server-cert"] kibana_key_file: "{{key_pairs | entry_from_named_pair('kibana_internal_key')| b64decode }}" kibana_cert_file: "{{key_pairs | entry_from_named_pair('kibana_internal_cert')| b64decode }}" @@ -63,8 +63,8 @@ admin-key={{generated_certs_dir}}/system.admin.key admin-cert={{generated_certs_dir}}/system.admin.crt admin-ca={{generated_certs_dir}}/ca.crt admin.jks={{generated_certs_dir}}/system.admin.jks -o yaml vars: - secret_name: logging-elasticsearch - secret_keys: ["admin-cert", "searchguard.key", "admin-ca", "key", "truststore", "admin-key"] + secret_name: logging-elasticsearch + secret_keys: ["admin-cert", "searchguard.key", "admin-ca", "key", "truststore", "admin-key"] register: logging_es_secret when: secret_name not in openshift_logging_facts.elasticsearch.secrets or secret_keys | difference(openshift_logging_facts.elasticsearch.secrets["{{secret_name}}"]["keys"]) | length != 0 diff --git a/roles/openshift_logging/tasks/install_elasticsearch.yaml b/roles/openshift_logging/tasks/install_elasticsearch.yaml index b1f8855c4..fbba46a35 100644 --- a/roles/openshift_logging/tasks/install_elasticsearch.yaml +++ b/roles/openshift_logging/tasks/install_elasticsearch.yaml @@ -5,7 +5,7 @@ es_pvc_names: "{{openshift_logging_facts.elasticsearch.pvcs.keys()}}" es_dc_names: "{{openshift_logging_facts.elasticsearch.deploymentconfigs.keys()}}" when: - - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length < openshift_logging_es_cluster_size }}" + - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length < openshift_logging_es_cluster_size }}" - name: Init pool of DeploymentConfig names for Elasticsearch set_fact: es_dc_pool={{es_dc_pool | default([]) + [deploy_name]}} @@ -16,7 +16,7 @@ deploy_name: "{{deploy_name_prefix}}-{{'abcdefghijklmnopqrstuvwxyz0123456789'|random_word(8)}}" with_sequence: count={{(openshift_logging_es_cluster_size - openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length) | abs}} when: - - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length < openshift_logging_es_cluster_size }}" + - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length < openshift_logging_es_cluster_size }}" check_mode: no diff --git a/roles/openshift_logging/tasks/scale.yaml b/roles/openshift_logging/tasks/scale.yaml index 42e9f0eb6..125d3b8af 100644 --- a/roles/openshift_logging/tasks/scale.yaml +++ b/roles/openshift_logging/tasks/scale.yaml @@ -23,6 +23,6 @@ retries: 30 delay: 10 when: - - not ansible_check_mode - - replica_count.stdout|int != desired + - not ansible_check_mode + - replica_count.stdout|int != desired changed_when: no diff --git a/roles/openshift_logging/vars/main.yaml b/roles/openshift_logging/vars/main.yaml index 4725820da..11662c446 100644 --- a/roles/openshift_logging/vars/main.yaml +++ b/roles/openshift_logging/vars/main.yaml @@ -1,10 +1,8 @@ - +--- openshift_master_config_dir: "{{ openshift.common.config_base }}/master" - es_node_quorum: "{{openshift_logging_es_cluster_size/2 + 1}}" es_recover_after_nodes: "{{openshift_logging_es_cluster_size - 1}}" es_recover_expected_nodes: "{{openshift_logging_es_cluster_size}}" - es_ops_node_quorum: "{{openshift_logging_es_ops_cluster_size/2 + 1}}" es_ops_recover_after_nodes: "{{openshift_logging_es_ops_cluster_size - 1}}" es_ops_recover_expected_nodes: "{{openshift_logging_es_ops_cluster_size}}" -- cgit v1.2.3