diff options
Diffstat (limited to 'roles')
60 files changed, 948 insertions, 533 deletions
diff --git a/roles/cockpit-ui/meta/main.yml b/roles/cockpit-ui/meta/main.yml new file mode 100644 index 000000000..6ad2e324a --- /dev/null +++ b/roles/cockpit-ui/meta/main.yml @@ -0,0 +1,13 @@ +--- +galaxy_info: + author: Samuel Munilla + description: Deploy and Enable cockpit-ui + company: Red Hat, Inc. + license: Apache License, Version 2.0 + min_ansible_version: 2.1 + platforms: + - name: EL + versions: + - 7 + categories: + - cloud diff --git a/roles/cockpit-ui/tasks/main.yml b/roles/cockpit-ui/tasks/main.yml new file mode 100644 index 000000000..c752bcff1 --- /dev/null +++ b/roles/cockpit-ui/tasks/main.yml @@ -0,0 +1,44 @@ +--- +- name: Expose docker-registry + command: > + {{ openshift.common.client_binary }} expose service docker-registry -n default + register: expose_docker_registry + changed_when: "'already exists' not in expose_docker_registry.stderr" + failed_when: "'already exists' not in expose_docker_registry.stderr and expose_docker_registry.rc != 0" + +- name: Create passthrough route for registry-console + command: > + {{ openshift.common.client_binary }} create route passthrough + --service registry-console + --port registry-console + -n default + register: create_registry_console_route + changed_when: "'already exists' not in create_registry_console_route.stderr" + failed_when: "'already exists' not in create_registry_console_route.stderr and create_registry_console_route.rc != 0" + +- name: Retrieve docker-registry route + command: "{{ openshift.common.client_binary }} get route docker-registry -n default --template='{{ '{{' }} .spec.host {{ '}}' }}'" + register: docker_registry_route + failed_when: false + changed_when: false + +- name: Retrieve cockpit kube url + command: "{{ openshift.common.client_binary }} get route registry-console -n default --template='https://{{ '{{' }} .spec.host {{ '}}' }}'" + register: registry_console_cockpit_kube_url + failed_when: false + changed_when: false + +- set_fact: + cockpit_image_prefix: "{{ '-p IMAGE_PREFIX=' ~ openshift_cockpit_deployer_prefix | default('') }}" + +- name: Deploy registry-console + command: > + {{ openshift.common.client_binary }} new-app --template=registry-console + {{ cockpit_image_prefix }} + -p OPENSHIFT_OAUTH_PROVIDER_URL="{{ openshift.master.public_api_url }}" + -p REGISTRY_HOST="{{ docker_registry_route.stdout }}:80" + -p COCKPIT_KUBE_URL="{{ registry_console_cockpit_kube_url.stdout }}" + -n default + register: deploy_registry_console + changed_when: "'already exists' not in deploy_registry_console.stderr" + failed_when: "'already exists' not in deploy_registry_console.stderr and deploy_registry_console.rc != 0" diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml index 87a4e7af0..a89f5b91a 100644 --- a/roles/docker/tasks/main.yml +++ b/roles/docker/tasks/main.yml @@ -67,7 +67,7 @@ dest: /etc/sysconfig/docker regexp: '^{{ item.reg_conf_var }}=.*$' line: "{{ item.reg_conf_var }}='{{ item.reg_fact_val | oo_prepend_strings_in_list(item.reg_flag ~ ' ') | join(' ') }}'" - when: item.reg_fact_val != '' and docker_check.stat.isreg + when: item.reg_fact_val != '' and docker_check.stat.isreg is defined and docker_check.stat.isreg with_items: - reg_conf_var: ADD_REGISTRY reg_fact_val: "{{ docker_additional_registries | default(None, true)}}" @@ -96,7 +96,8 @@ reg_fact_val: "{{ docker_no_proxy | default('') | join(',') }}" notify: - restart docker - when: "{{ 'http_proxy' in openshift.common or 'https_proxy' in openshift.common and docker_check.stat.isreg }}" + when: + - docker_check.stat.isreg is defined and docker_check.stat.isreg and '"http_proxy" in openshift.common or "https_proxy" in openshift.common' - name: Set various Docker options lineinfile: @@ -108,7 +109,7 @@ {% if docker_log_options is defined %} {{ docker_log_options | oo_split() | oo_prepend_strings_in_list('--log-opt ') | join(' ')}}{% endif %}\ {% if docker_options is defined %} {{ docker_options }}{% endif %}\ {% if docker_disable_push_dockerhub is defined %} --confirm-def-push={{ docker_disable_push_dockerhub | bool }}{% endif %}'" - when: docker_check.stat.isreg + when: docker_check.stat.isreg is defined and docker_check.stat.isreg notify: - restart docker diff --git a/roles/flannel/README.md b/roles/flannel/README.md index 8f271aada..84e2c5c49 100644 --- a/roles/flannel/README.md +++ b/roles/flannel/README.md @@ -13,15 +13,15 @@ to 0.3. Role Variables -------------- -| Name | Default value | Description | -|---------------------|-----------------------------------------|-----------------------------------------------| -| flannel_interface | ansible_default_ipv4.interface | interface to use for inter-host communication | -| flannel_etcd_key | /openshift.com/network | etcd prefix | -| etcd_hosts | etcd_urls | a list of etcd endpoints | -| etcd_conf_dir | {{ openshift.common.config_base }}/node | SSL certificates directory | -| etcd_peer_ca_file | {{ etcd_conf_dir }}/ca.crt | SSL CA to use for etcd | -| etcd_peer_cert_file | Openshift SSL cert | SSL cert to use for etcd | -| etcd_peer_key_file | Openshift SSL key | SSL key to use for etcd | +| Name | Default value | Description | +|----------------------|-----------------------------------------|-----------------------------------------------| +| flannel_interface | ansible_default_ipv4.interface | interface to use for inter-host communication | +| flannel_etcd_key | /openshift.com/network | etcd prefix | +| etcd_hosts | etcd_urls | a list of etcd endpoints | +| etcd_cert_config_dir | {{ openshift.common.config_base }}/node | SSL certificates directory | +| etcd_peer_ca_file | {{ etcd_conf_dir }}/ca.crt | SSL CA to use for etcd | +| etcd_peer_cert_file | Openshift SSL cert | SSL cert to use for etcd | +| etcd_peer_key_file | Openshift SSL key | SSL key to use for etcd | Dependencies ------------ diff --git a/roles/flannel/defaults/main.yaml b/roles/flannel/defaults/main.yaml index 34cebda9c..988731ef2 100644 --- a/roles/flannel/defaults/main.yaml +++ b/roles/flannel/defaults/main.yaml @@ -2,7 +2,6 @@ flannel_interface: "{{ ansible_default_ipv4.interface }}" flannel_etcd_key: /openshift.com/network etcd_hosts: "{{ etcd_urls }}" -etcd_conf_dir: "{{ openshift.common.config_base }}/node" -etcd_peer_ca_file: "{{ etcd_conf_dir }}/{{ 'ca' if (embedded_etcd | bool) else 'node.etcd-ca' }}.crt" -etcd_peer_cert_file: "{{ etcd_conf_dir }}/{{ 'system:node:' + openshift.common.hostname if (embedded_etcd | bool) else 'node.etcd-client' }}.crt" -etcd_peer_key_file: "{{ etcd_conf_dir }}/{{ 'system:node:' + openshift.common.hostname if (embedded_etcd | bool) else 'node.etcd-client' }}.key" +etcd_peer_ca_file: "{{ openshift.common.config_base }}/node/{{ 'ca' if (embedded_etcd | bool) else 'flannel.etcd-ca' }}.crt" +etcd_peer_cert_file: "{{ openshift.common.config_base }}/node/{{ 'system:node:' + openshift.common.hostname if (embedded_etcd | bool) else 'flannel.etcd-client' }}.crt" +etcd_peer_key_file: "{{ openshift.common.config_base }}/node/{{ 'system:node:' + openshift.common.hostname if (embedded_etcd | bool) else 'flannel.etcd-client' }}.key" diff --git a/roles/flannel_register/README.md b/roles/flannel_register/README.md index 623c4c7cf..20a07c35e 100644 --- a/roles/flannel_register/README.md +++ b/roles/flannel_register/README.md @@ -16,7 +16,7 @@ Role Variables |---------------------|----------------------------------------------------|-------------------------------------------------| | flannel_network | {{ openshift.common.portal_net }} or 172.16.1.1/16 | interface to use for inter-host communication | | flannel_min_network | {{ min_network }} or 172.16.5.0 | beginning of IP range for the subnet allocation | -| flannel_subnet_len | /openshift.com/network | size of the subnet allocated to each host | +| flannel_subnet_len | 24 | size of the subnet allocated to each host | | flannel_etcd_key | /openshift.com/network | etcd prefix | | etcd_hosts | etcd_urls | a list of etcd endpoints | | etcd_conf_dir | {{ openshift.common.config_base }}/master | SSL certificates directory | diff --git a/roles/openshift_cloud_provider/tasks/main.yml b/roles/openshift_cloud_provider/tasks/main.yml index 6111d1207..e217e37ea 100644 --- a/roles/openshift_cloud_provider/tasks/main.yml +++ b/roles/openshift_cloud_provider/tasks/main.yml @@ -2,12 +2,8 @@ - name: Set cloud provider facts openshift_facts: role: cloudprovider - openshift_env: "{{ hostvars - | oo_merge_hostvars(vars, inventory_hostname) - | oo_openshift_env }}" - openshift_env_structures: - - 'openshift.cloudprovider.aws.*' - - 'openshift.cloudprovider.openstack.*' + local_facts: + kind: "{{ openshift_cloudprovider_kind | default(None) }}" - name: Create cloudprovider config dir file: diff --git a/roles/openshift_cloud_provider/tasks/openstack.yml b/roles/openshift_cloud_provider/tasks/openstack.yml index c501121e5..f22dd4520 100644 --- a/roles/openshift_cloud_provider/tasks/openstack.yml +++ b/roles/openshift_cloud_provider/tasks/openstack.yml @@ -7,4 +7,4 @@ template: dest: "{{ openshift.common.config_base }}/cloudprovider/openstack.conf" src: openstack.conf.j2 - when: "'auth_url' in openshift.cloudprovider.openstack and 'username' in openshift.cloudprovider.openstack and 'password' in openshift.cloudprovider.openstack and ('tenant_id' in openshift.cloudprovider.openstack or 'tenant_name' in openshift.cloudprovider.openstack)" + when: "openshift_cloudprovider_openstack_auth_url is defined and openshift_cloudprovider_openstack_username is defined and openshift_cloudprovider_openstack_password is defined and (openshift_cloudprovider_openstack_tenant_id is defined or openshift_cloudprovider_openstack_tenant_name is defined)" diff --git a/roles/openshift_cloud_provider/templates/openstack.conf.j2 b/roles/openshift_cloud_provider/templates/openstack.conf.j2 index 8a06b3a08..ce452db24 100644 --- a/roles/openshift_cloud_provider/templates/openstack.conf.j2 +++ b/roles/openshift_cloud_provider/templates/openstack.conf.j2 @@ -1,16 +1,16 @@ [Global] -auth-url = {{ openshift.cloudprovider.openstack.auth_url }} -username = {{ openshift.cloudprovider.openstack.username }} -password = {{ openshift.cloudprovider.openstack.password }} -{% if 'tenant_id' in openshift.cloudprovider.openstack %} -tenant-id = {{ openshift.cloudprovider.openstack.tenant_id }} +auth-url = {{ openshift_cloudprovider_openstack_auth_url }} +username = {{ openshift_cloudprovider_openstack_username }} +password = {{ openshift_cloudprovider_openstack_password }} +{% if openshift_cloudprovider_openstack_tenant_id is defined %} +tenant-id = {{ openshift_cloudprovider_openstack_tenant_id }} {% else %} -tenant-name = {{ openshift.cloudprovider.openstack.tenant_name }} +tenant-name = {{ openshift_cloudprovider_openstack_tenant_name }} {% endif %} -{% if 'region' in openshift.cloudprovider.openstack %} -region = {{ openshift.cloudprovider.openstack.region }} +{% if openshift_cloudprovider_openstack_region is defined %} +region = {{ openshift_cloudprovider_openstack_region }} {% endif %} -{% if 'lb_subnet_id' in openshift.cloudprovider.openstack %} +{% if openshift_cloudprovider_openstack_lb_subnet_id is defined %} [LoadBalancer] -subnet-id = {{ openshift.cloudprovider.openstack.lb_subnet_id }} +subnet-id = {{ openshift_cloudprovider_openstack_lb_subnet_id }} {% endif %} diff --git a/roles/openshift_cloud_provider/vars/main.yml b/roles/openshift_cloud_provider/vars/main.yml index c608e9b54..83bf6edc8 100644 --- a/roles/openshift_cloud_provider/vars/main.yml +++ b/roles/openshift_cloud_provider/vars/main.yml @@ -1,4 +1,4 @@ --- -has_cloudprovider: "{{ 'cloudprovider' in openshift and 'kind' in openshift.cloudprovider and openshift.cloudprovider.kind != None }}" -cloudprovider_is_aws: "{{ has_cloudprovider | bool and openshift.cloudprovider.kind == 'aws' }}" -cloudprovider_is_openstack: "{{ has_cloudprovider | bool and openshift.cloudprovider.kind == 'openstack' }}" +has_cloudprovider: "{{ openshift_cloudprovider_kind | default(None) != None }}" +cloudprovider_is_aws: "{{ has_cloudprovider | bool and openshift_cloudprovider_kind == 'aws' }}" +cloudprovider_is_openstack: "{{ has_cloudprovider | bool and openshift_cloudprovider_kind == 'openstack' }}" diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml index ece335fbf..6dae98f9f 100644 --- a/roles/openshift_common/tasks/main.yml +++ b/roles/openshift_common/tasks/main.yml @@ -1,7 +1,7 @@ --- - fail: - msg: Flannel can not be used with openshift sdn - when: openshift_use_openshift_sdn | default(false) | bool and openshift_use_flannel | default(false) | bool + msg: Flannel can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use flannel + when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_flannel | default(false) | bool - fail: msg: Nuage sdn can not be used with openshift sdn diff --git a/roles/openshift_examples/defaults/main.yml b/roles/openshift_examples/defaults/main.yml index a15285417..e843049f9 100644 --- a/roles/openshift_examples/defaults/main.yml +++ b/roles/openshift_examples/defaults/main.yml @@ -20,6 +20,8 @@ xpaas_templates_base: "{{ examples_base }}/xpaas-templates" quickstarts_base: "{{ examples_base }}/quickstart-templates" infrastructure_origin_base: "{{ examples_base }}/infrastructure-templates/origin" infrastructure_enterprise_base: "{{ examples_base }}/infrastructure-templates/enterprise" +cockpit_ui_base: "{{ examples_base }}/infrastructure-templates/enterprise" + openshift_examples_import_command: "create" registry_url: "" diff --git a/roles/openshift_examples/examples-sync.sh b/roles/openshift_examples/examples-sync.sh index f598cf8f2..1ad0d93a2 100755 --- a/roles/openshift_examples/examples-sync.sh +++ b/roles/openshift_examples/examples-sync.sh @@ -9,7 +9,7 @@ XPAAS_VERSION=ose-v1.3.3 ORIGIN_VERSION=${1:-v1.3} EXAMPLES_BASE=$(pwd)/files/examples/${ORIGIN_VERSION} find ${EXAMPLES_BASE} -name '*.json' -delete -find ${EXAMPLES_BASE} -name '*.yaml' -delete +find ${EXAMPLES_BASE} -name '*.yaml' -delete -exclude registry-console.json TEMP=`mktemp -d` pushd $TEMP @@ -29,7 +29,6 @@ unzip cakephp-ex-master.zip unzip application-templates-master.zip cp origin-master/examples/db-templates/* ${EXAMPLES_BASE}/db-templates/ cp origin-master/examples/jenkins/jenkins-*template.json ${EXAMPLES_BASE}/quickstart-templates/ -cp origin-master/examples/jenkins/pipeline/jenkinstemplate.json ${EXAMPLES_BASE}/quickstart-templates/ cp origin-master/examples/image-streams/* ${EXAMPLES_BASE}/image-streams/ cp django-ex-master/openshift/templates/* ${EXAMPLES_BASE}/quickstart-templates/ cp rails-ex-master/openshift/templates/* ${EXAMPLES_BASE}/quickstart-templates/ diff --git a/roles/openshift_examples/files/examples/v1.3/db-templates/README.md b/roles/openshift_examples/files/examples/v1.3/db-templates/README.md index 609f4dec9..c66bdb8bf 100644 --- a/roles/openshift_examples/files/examples/v1.3/db-templates/README.md +++ b/roles/openshift_examples/files/examples/v1.3/db-templates/README.md @@ -38,35 +38,11 @@ Replace `/path/to/template.json` with an appropriate path, that can be either a local path or an URL. Example: $ oc new-app https://raw.githubusercontent.com/openshift/origin/master/examples/db-templates/mongodb-ephemeral-template.json - --> Deploying template mongodb-ephemeral for "https://raw.githubusercontent.com/openshift/origin/master/examples/db-templates/mongodb-ephemeral-template.json" - With parameters: - DATABASE_SERVICE_NAME=mongodb - MONGODB_USER=userJNX # generated - MONGODB_PASSWORD=tnEDilMVrgjp5AI2 # generated - MONGODB_DATABASE=sampledb - MONGODB_ADMIN_PASSWORD=8bYEs8OlNYhVyMBs # generated - --> Creating resources ... - Service "mongodb" created - DeploymentConfig "mongodb" created - --> Success - Run 'oc status' to view your app. The parameters listed in the output above can be tweaked by specifying values in the command line with the `-p` option: $ oc new-app examples/db-templates/mongodb-ephemeral-template.json -p DATABASE_SERVICE_NAME=mydb -p MONGODB_USER=default - --> Deploying template mongodb-ephemeral for "examples/db-templates/mongodb-ephemeral-template.json" - With parameters: - DATABASE_SERVICE_NAME=mydb - MONGODB_USER=default - MONGODB_PASSWORD=RPvMbWlQFOevSowQ # generated - MONGODB_DATABASE=sampledb - MONGODB_ADMIN_PASSWORD=K7tIjDxDHHYCvFrJ # generated - --> Creating resources ... - Service "mydb" created - DeploymentConfig "mydb" created - --> Success - Run 'oc status' to view your app. Note that the persistent template requires an existing persistent volume, otherwise the deployment won't ever succeed. diff --git a/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/enterprise/metrics-deployer.yaml b/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/enterprise/metrics-deployer.yaml index 032f94a18..afd47ec7c 100644 --- a/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/enterprise/metrics-deployer.yaml +++ b/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/enterprise/metrics-deployer.yaml @@ -34,9 +34,11 @@ objects: metadata: generateName: metrics-deployer- spec: + securityContext: {} containers: - image: ${IMAGE_PREFIX}metrics-deployer:${IMAGE_VERSION} name: deployer + securityContext: {} volumeMounts: - name: secret mountPath: /secret @@ -48,6 +50,10 @@ objects: valueFrom: fieldRef: fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name - name: IMAGE_PREFIX value: ${IMAGE_PREFIX} - name: IMAGE_VERSION @@ -58,8 +64,12 @@ objects: value: ${MODE} - name: REDEPLOY value: ${REDEPLOY} + - name: IGNORE_PREFLIGHT + value: ${IGNORE_PREFLIGHT} - name: USE_PERSISTENT_STORAGE value: ${USE_PERSISTENT_STORAGE} + - name: DYNAMICALLY_PROVISION_STORAGE + value: ${DYNAMICALLY_PROVISION_STORAGE} - name: HAWKULAR_METRICS_HOSTNAME value: ${HAWKULAR_METRICS_HOSTNAME} - name: CASSANDRA_NODES @@ -68,6 +78,10 @@ objects: value: ${CASSANDRA_PV_SIZE} - name: METRIC_DURATION value: ${METRIC_DURATION} + - name: USER_WRITE_ACCESS + value: ${USER_WRITE_ACCESS} + - name: HEAPSTER_NODE_ID + value: ${HEAPSTER_NODE_ID} - name: METRIC_RESOLUTION value: ${METRIC_RESOLUTION} dnsPolicy: ClusterFirst @@ -87,7 +101,7 @@ parameters: - description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set version "latest"' name: IMAGE_VERSION - value: "3.2.1" + value: "3.3.0" - description: "Internal URL for the master, for authentication retrieval" name: MASTER_URL @@ -97,7 +111,7 @@ parameters: name: HAWKULAR_METRICS_HOSTNAME required: true - - description: "Can be set to: 'deploy' to perform an initial deployment; 'refresh' to delete and redeploy all components but to keep persisted data and routes; 'redeploy' to delete and redeploy everything (losing all data in the process)" + description: "Can be set to: 'preflight' to perform validation before a deployment; 'deploy' to perform an initial deployment; 'refresh' to delete and redeploy all components but to keep persisted data and routes; 'redeploy' to delete and redeploy everything (losing all data in the process); 'validate' to re-run validations after a deployment" name: MODE value: "deploy" - @@ -105,10 +119,18 @@ parameters: name: REDEPLOY value: "false" - + description: "If preflight validation is blocking deployment and you're sure you don't care about it, this will ignore the results and proceed to deploy." + name: IGNORE_PREFLIGHT + value: "false" +- description: "Set to true for persistent storage, set to false to use non persistent storage" name: USE_PERSISTENT_STORAGE value: "true" - + description: "Set to true to dynamically provision storage, set to false to use use pre-created persistent volumes" + name: DYNAMICALLY_PROVISION_STORAGE + value: "false" +- description: "The number of Cassandra Nodes to deploy for the initial cluster" name: CASSANDRA_NODES value: "1" @@ -121,6 +143,14 @@ parameters: name: METRIC_DURATION value: "7" - - description: "How often metrics should be gathered. Defaults value of '10s' for 10 seconds" + description: "If a user accounts should be allowed to write metrics." + name: USER_WRITE_ACCESS + value: "false" +- + description: "The identifier used when generating metric ids in Hawkular" + name: HEAPSTER_NODE_ID + value: "nodename" +- + description: "How often metrics should be gathered. Defaults value of '15s' for 15 seconds" name: METRIC_RESOLUTION - value: "10s" + value: "15s" diff --git a/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/enterprise/registry-console.yaml b/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/enterprise/registry-console.yaml new file mode 100644 index 000000000..11478263c --- /dev/null +++ b/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/enterprise/registry-console.yaml @@ -0,0 +1,124 @@ +kind: Template +apiVersion: v1 +metadata: + name: "registry-console" + annotations: + description: "Template for deploying registry web console. Requires cluster-admin." + tags: infrastructure +labels: + createdBy: "registry-console-template" +objects: + - kind: DeploymentConfig + apiVersion: v1 + metadata: + name: "registry-console" + labels: + name: "registry-console" + spec: + triggers: + - type: ConfigChange + replicas: 1 + selector: + name: "registry-console" + template: + metadata: + labels: + name: "registry-console" + spec: + containers: + - name: registry-console + image: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION} + ports: + - containerPort: 9090 + protocol: TCP + livenessProbe: + failureThreshold: 3 + httpGet: + path: /ping + port: 9090 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + readinessProbe: + failureThreshold: 3 + httpGet: + path: /ping + port: 9090 + scheme: HTTP + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + env: + - name: OPENSHIFT_OAUTH_PROVIDER_URL + value: "${OPENSHIFT_OAUTH_PROVIDER_URL}" + - name: OPENSHIFT_OAUTH_CLIENT_ID + value: "${OPENSHIFT_OAUTH_CLIENT_ID}" + - name: KUBERNETES_INSECURE + value: "false" + - name: COCKPIT_KUBE_INSECURE + value: "false" + - name: REGISTRY_ONLY + value: "true" + - name: REGISTRY_HOST + value: "${REGISTRY_HOST}" + - kind: Service + apiVersion: v1 + metadata: + name: "registry-console" + labels: + name: "registry-console" + spec: + type: ClusterIP + ports: + - name: registry-console + protocol: TCP + port: 9000 + targetPort: 9090 + selector: + name: "registry-console" + - kind: ImageStream + apiVersion: v1 + metadata: + name: registry-console + annotations: + description: Atomic Registry console + spec: + tags: + - annotations: null + from: + kind: DockerImage + name: ${IMAGE_PREFIX}registry-console + name: ${IMAGE_VERSION} + - kind: OAuthClient + apiVersion: v1 + metadata: + name: "${OPENSHIFT_OAUTH_CLIENT_ID}" + respondWithChallenges: false + secret: "${OPENSHIFT_OAUTH_CLIENT_SECRET}" + redirectURIs: + - "${COCKPIT_KUBE_URL}" +parameters: + - description: 'Specify "registry/repository" prefix for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", set prefix "registry.access.redhat.com/openshift3/"' + name: IMAGE_PREFIX + value: "registry.access.redhat.com/openshift3/" + - description: 'Specify image version; e.g. for "registry.access.redhat.com/openshift3/registry-console:3.3", set version "3.3"' + name: IMAGE_VERSION + value: "3.3" + - description: "The public URL for the Openshift OAuth Provider, e.g. https://openshift.example.com:8443" + name: OPENSHIFT_OAUTH_PROVIDER_URL + required: true + - description: "The registry console URL. This should be created beforehand using 'oc create route passthrough --service registry-console --port registry-console -n default', e.g. https://registry-console-default.example.com" + name: COCKPIT_KUBE_URL + required: true + - description: "Oauth client secret" + name: OPENSHIFT_OAUTH_CLIENT_SECRET + from: "user[a-zA-Z0-9]{64}" + generate: expression + - description: "Oauth client id" + name: OPENSHIFT_OAUTH_CLIENT_ID + value: "cockpit-oauth-client" + - description: "The integrated registry hostname exposed via route, e.g. registry.example.com" + name: REGISTRY_HOST + required: true diff --git a/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/origin/metrics-deployer.yaml b/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/origin/metrics-deployer.yaml index ab62ae76f..ac5098c8a 100644 --- a/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/origin/metrics-deployer.yaml +++ b/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/origin/metrics-deployer.yaml @@ -151,6 +151,6 @@ parameters: name: HEAPSTER_NODE_ID value: "nodename" - - description: "How often metrics should be gathered. Defaults value of '10s' for 10 seconds" + description: "How often metrics should be gathered. Defaults value of '15s' for 15 seconds" name: METRIC_RESOLUTION - value: "10s" + value: "15s" diff --git a/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/origin/registry-console.yaml b/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/origin/registry-console.yaml new file mode 100644 index 000000000..80cc4233b --- /dev/null +++ b/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/origin/registry-console.yaml @@ -0,0 +1,124 @@ +kind: Template +apiVersion: v1 +metadata: + name: "registry-console" + annotations: + description: "Template for deploying registry web console. Requires cluster-admin." + tags: infrastructure +labels: + createdBy: "registry-console-template" +objects: + - kind: DeploymentConfig + apiVersion: v1 + metadata: + name: "registry-console" + labels: + name: "registry-console" + spec: + triggers: + - type: ConfigChange + replicas: 1 + selector: + name: "registry-console" + template: + metadata: + labels: + name: "registry-console" + spec: + containers: + - name: registry-console + image: ${IMAGE_NAME}:${IMAGE_VERSION} + ports: + - containerPort: 9090 + protocol: TCP + livenessProbe: + failureThreshold: 3 + httpGet: + path: /ping + port: 9090 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + readinessProbe: + failureThreshold: 3 + httpGet: + path: /ping + port: 9090 + scheme: HTTP + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + env: + - name: OPENSHIFT_OAUTH_PROVIDER_URL + value: "${OPENSHIFT_OAUTH_PROVIDER_URL}" + - name: OPENSHIFT_OAUTH_CLIENT_ID + value: "${OPENSHIFT_OAUTH_CLIENT_ID}" + - name: KUBERNETES_INSECURE + value: "false" + - name: COCKPIT_KUBE_INSECURE + value: "false" + - name: REGISTRY_ONLY + value: "true" + - name: REGISTRY_HOST + value: "${REGISTRY_HOST}" + - kind: Service + apiVersion: v1 + metadata: + name: "registry-console" + labels: + name: "registry-console" + spec: + type: ClusterIP + ports: + - name: registry-console + protocol: TCP + port: 9000 + targetPort: 9090 + selector: + name: "registry-console" + - kind: ImageStream + apiVersion: v1 + metadata: + name: registry-console + annotations: + description: Atomic Registry console + spec: + tags: + - annotations: null + from: + kind: DockerImage + name: ${IMAGE_NAME} + name: ${IMAGE_VERSION} + - kind: OAuthClient + apiVersion: v1 + metadata: + name: "${OPENSHIFT_OAUTH_CLIENT_ID}" + respondWithChallenges: false + secret: "${OPENSHIFT_OAUTH_CLIENT_SECRET}" + redirectURIs: + - "${COCKPIT_KUBE_URL}" +parameters: + - description: "Container image name" + name: IMAGE_NAME + value: "cockpit/kubernetes" + - description: 'Specify image version; e.g. for "cockpit/kubernetes:latest", set version "latest"' + name: IMAGE_VERSION + value: latest + - description: "The public URL for the Openshift OAuth Provider, e.g. https://openshift.example.com:8443" + name: OPENSHIFT_OAUTH_PROVIDER_URL + required: true + - description: "The registry console URL. This should be created beforehand using 'oc create route passthrough --service registry-console --port registry-console -n default', e.g. https://registry-console-default.example.com" + name: COCKPIT_KUBE_URL + required: true + - description: "Oauth client secret" + name: OPENSHIFT_OAUTH_CLIENT_SECRET + from: "user[a-zA-Z0-9]{64}" + generate: expression + - description: "Oauth client id" + name: OPENSHIFT_OAUTH_CLIENT_ID + value: "cockpit-oauth-client" + - description: "The integrated registry hostname exposed via route, e.g. registry.example.com" + name: REGISTRY_HOST + required: true diff --git a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/cakephp-mysql.json b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/cakephp-mysql.json index f85e7e537..ab4982690 100644 --- a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/cakephp-mysql.json +++ b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/cakephp-mysql.json @@ -126,9 +126,9 @@ }, "spec": { "strategy": { - "type": "Rolling", + "type": "Recreate", "recreateParams": { - "pre": { + "pre": { "failurePolicy": "Retry", "execNewPod": { "command": [ diff --git a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkins-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkins-ephemeral-template.json index 4f565206f..e8e361415 100644 --- a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkins-ephemeral-template.json +++ b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkins-ephemeral-template.json @@ -5,12 +5,12 @@ "name": "jenkins-ephemeral", "creationTimestamp": null, "annotations": { - "description": "Jenkins service, without persistent storage.\nThe username is 'admin' and the tutorial at https://github.com/openshift/origin/blob/master/examples/jenkins/README.md contains more information about using this template.\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing", + "description": "Jenkins service, without persistent storage.\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing", "iconClass": "icon-jenkins", "tags": "instant-app,jenkins" } }, - "message": "A Jenkins service has been created in your project. The username/password are admin/${JENKINS_PASSWORD}.", + "message": "A Jenkins service has been created in your project. The username/password are admin/${JENKINS_PASSWORD}. The tutorial at https://github.com/openshift/origin/blob/master/examples/jenkins/README.md contains more information about using this template.", "objects": [ { "kind": "Route", @@ -26,6 +26,7 @@ }, "tls": { "termination": "edge", + "insecureEdgeTerminationPolicy": "Redirect", "certificate": "-----BEGIN CERTIFICATE-----\nMIIDIjCCAgqgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBoTELMAkGA1UEBhMCVVMx\nCzAJBgNVBAgMAlNDMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0Rl\nZmF1bHQgQ29tcGFueSBMdGQxEDAOBgNVBAsMB1Rlc3QgQ0ExGjAYBgNVBAMMEXd3\ndy5leGFtcGxlY2EuY29tMSIwIAYJKoZIhvcNAQkBFhNleGFtcGxlQGV4YW1wbGUu\nY29tMB4XDTE1MDExMjE0MTk0MVoXDTE2MDExMjE0MTk0MVowfDEYMBYGA1UEAwwP\nd3d3LmV4YW1wbGUuY29tMQswCQYDVQQIDAJTQzELMAkGA1UEBhMCVVMxIjAgBgkq\nhkiG9w0BCQEWE2V4YW1wbGVAZXhhbXBsZS5jb20xEDAOBgNVBAoMB0V4YW1wbGUx\nEDAOBgNVBAsMB0V4YW1wbGUwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMrv\ngu6ZTTefNN7jjiZbS/xvQjyXjYMN7oVXv76jbX8gjMOmg9m0xoVZZFAE4XyQDuCm\n47VRx5Qrf/YLXmB2VtCFvB0AhXr5zSeWzPwaAPrjA4ebG+LUo24ziS8KqNxrFs1M\nmNrQUgZyQC6XIe1JHXc9t+JlL5UZyZQC1IfaJulDAgMBAAGjDTALMAkGA1UdEwQC\nMAAwDQYJKoZIhvcNAQEFBQADggEBAFCi7ZlkMnESvzlZCvv82Pq6S46AAOTPXdFd\nTMvrh12E1sdVALF1P1oYFJzG1EiZ5ezOx88fEDTW+Lxb9anw5/KJzwtWcfsupf1m\nV7J0D3qKzw5C1wjzYHh9/Pz7B1D0KthQRATQCfNf8s6bbFLaw/dmiIUhHLtIH5Qc\nyfrejTZbOSP77z8NOWir+BWWgIDDB2//3AkDIQvT20vmkZRhkqSdT7et4NmXOX/j\njhPti4b2Fie0LeuvgaOdKjCpQQNrYthZHXeVlOLRhMTSk3qUczenkKTOhvP7IS9q\n+Dzv5hqgSfvMG392KWh5f8xXfJNs4W5KLbZyl901MeReiLrPH3w=\n-----END CERTIFICATE-----", "key": "-----BEGIN PRIVATE KEY-----\nMIICeAIBADANBgkqhkiG9w0BAQEFAASCAmIwggJeAgEAAoGBAMrvgu6ZTTefNN7j\njiZbS/xvQjyXjYMN7oVXv76jbX8gjMOmg9m0xoVZZFAE4XyQDuCm47VRx5Qrf/YL\nXmB2VtCFvB0AhXr5zSeWzPwaAPrjA4ebG+LUo24ziS8KqNxrFs1MmNrQUgZyQC6X\nIe1JHXc9t+JlL5UZyZQC1IfaJulDAgMBAAECgYEAnxOjEj/vrLNLMZE1Q9H7PZVF\nWdP/JQVNvQ7tCpZ3ZdjxHwkvf//aQnuxS5yX2Rnf37BS/TZu+TIkK4373CfHomSx\nUTAn2FsLmOJljupgGcoeLx5K5nu7B7rY5L1NHvdpxZ4YjeISrRtEPvRakllENU5y\ngJE8c2eQOx08ZSRE4TkCQQD7dws2/FldqwdjJucYijsJVuUdoTqxP8gWL6bB251q\nelP2/a6W2elqOcWId28560jG9ZS3cuKvnmu/4LG88vZFAkEAzphrH3673oTsHN+d\nuBd5uyrlnGjWjuiMKv2TPITZcWBjB8nJDSvLneHF59MYwejNNEof2tRjgFSdImFH\nmi995wJBAMtPjW6wiqRz0i41VuT9ZgwACJBzOdvzQJfHgSD9qgFb1CU/J/hpSRIM\nkYvrXK9MbvQFvG6x4VuyT1W8mpe1LK0CQAo8VPpffhFdRpF7psXLK/XQ/0VLkG3O\nKburipLyBg/u9ZkaL0Ley5zL5dFBjTV2Qkx367Ic2b0u9AYTCcgi2DsCQQD3zZ7B\nv7BOm7MkylKokY2MduFFXU0Bxg6pfZ7q3rvg8gqhUFbaMStPRYg6myiDiW/JfLhF\nTcFT4touIo7oriFJ\n-----END PRIVATE KEY-----", "caCertificate": "-----BEGIN CERTIFICATE-----\nMIIEFzCCAv+gAwIBAgIJALK1iUpF2VQLMA0GCSqGSIb3DQEBBQUAMIGhMQswCQYD\nVQQGEwJVUzELMAkGA1UECAwCU0MxFTATBgNVBAcMDERlZmF1bHQgQ2l0eTEcMBoG\nA1UECgwTRGVmYXVsdCBDb21wYW55IEx0ZDEQMA4GA1UECwwHVGVzdCBDQTEaMBgG\nA1UEAwwRd3d3LmV4YW1wbGVjYS5jb20xIjAgBgkqhkiG9w0BCQEWE2V4YW1wbGVA\nZXhhbXBsZS5jb20wHhcNMTUwMTEyMTQxNTAxWhcNMjUwMTA5MTQxNTAxWjCBoTEL\nMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlNDMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkx\nHDAaBgNVBAoME0RlZmF1bHQgQ29tcGFueSBMdGQxEDAOBgNVBAsMB1Rlc3QgQ0Ex\nGjAYBgNVBAMMEXd3dy5leGFtcGxlY2EuY29tMSIwIAYJKoZIhvcNAQkBFhNleGFt\ncGxlQGV4YW1wbGUuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA\nw2rK1J2NMtQj0KDug7g7HRKl5jbf0QMkMKyTU1fBtZ0cCzvsF4CqV11LK4BSVWaK\nrzkaXe99IVJnH8KdOlDl5Dh/+cJ3xdkClSyeUT4zgb6CCBqg78ePp+nN11JKuJlV\nIG1qdJpB1J5O/kCLsGcTf7RS74MtqMFo96446Zvt7YaBhWPz6gDaO/TUzfrNcGLA\nEfHVXkvVWqb3gqXUztZyVex/gtP9FXQ7gxTvJml7UkmT0VAFjtZnCqmFxpLZFZ15\n+qP9O7Q2MpsGUO/4vDAuYrKBeg1ZdPSi8gwqUP2qWsGd9MIWRv3thI2903BczDc7\nr8WaIbm37vYZAS9G56E4+wIDAQABo1AwTjAdBgNVHQ4EFgQUugLrSJshOBk5TSsU\nANs4+SmJUGwwHwYDVR0jBBgwFoAUugLrSJshOBk5TSsUANs4+SmJUGwwDAYDVR0T\nBAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEAaMJ33zAMV4korHo5aPfayV3uHoYZ\n1ChzP3eSsF+FjoscpoNSKs91ZXZF6LquzoNezbfiihK4PYqgwVD2+O0/Ty7UjN4S\nqzFKVR4OS/6lCJ8YncxoFpTntbvjgojf1DEataKFUN196PAANc3yz8cWHF4uvjPv\nWkgFqbIjb+7D1YgglNyovXkRDlRZl0LD1OQ0ZWhd4Ge1qx8mmmanoBeYZ9+DgpFC\nj9tQAbS867yeOryNe7sEOIpXAAqK/DTu0hB6+ySsDfMo4piXCc2aA/eI2DCuw08e\nw17Dz9WnupZjVdwTKzDhFgJZMLDqn37HQnT6EemLFqbcR0VPEnfyhDtZIQ==\n-----END CERTIFICATE-----" @@ -75,6 +76,7 @@ } }, "spec": { + "serviceAccountName": "${JENKINS_SERVICE_NAME}", "containers": [ { "name": "jenkins", @@ -89,7 +91,7 @@ }, "livenessProbe": { "timeoutSeconds": 3, - "initialDelaySeconds": 60, + "initialDelaySeconds": 120, "httpGet": { "path": "/login", "port": 8080 @@ -99,6 +101,18 @@ { "name": "JENKINS_PASSWORD", "value": "${JENKINS_PASSWORD}" + }, + { + "name": "KUBERNETES_MASTER", + "value": "https://kubernetes.default:443" + }, + { + "name": "KUBERNETES_TRUST_CERTIFICATES", + "value": "true" + }, + { + "name": "JNLP_SERVICE_NAME", + "value": "${JNLP_SERVICE_NAME}" } ], "resources": { @@ -136,19 +150,42 @@ } }, { + "kind": "ServiceAccount", + "apiVersion": "v1", + "metadata": { + "name": "${JENKINS_SERVICE_NAME}" + } + }, + { + "kind": "RoleBinding", + "apiVersion": "v1", + "metadata": { + "name": "${JENKINS_SERVICE_NAME}_edit" + }, + "groupNames": null, + "subjects": [ + { + "kind": "ServiceAccount", + "name": "${JENKINS_SERVICE_NAME}" + } + ], + "roleRef": { + "name": "edit" + } + }, + { "kind": "Service", "apiVersion": "v1", "metadata": { - "name": "${JENKINS_SERVICE_NAME}", - "creationTimestamp": null + "name": "${JNLP_SERVICE_NAME}" }, "spec": { "ports": [ { - "name": "web", + "name": "agent", "protocol": "TCP", - "port": 8080, - "targetPort": 8080, + "port": 50000, + "targetPort": 50000, "nodePort": 0 } ], @@ -159,6 +196,35 @@ "type": "ClusterIP", "sessionAffinity": "None" } + }, + { + "kind": "Service", + "apiVersion": "v1", + "metadata": { + "name": "${JENKINS_SERVICE_NAME}", + "annotations": { + "service.alpha.openshift.io/dependencies": "[{\"name\": \"${JNLP_SERVICE_NAME}\", \"namespace\": \"\", \"kind\": \"Service\"}]", + "service.openshift.io/infrastructure": "true" + }, + "creationTimestamp": null + }, + "spec": { + "ports": [ + { + "name": "web", + "protocol": "TCP", + "port": 80, + "targetPort": 8080, + "nodePort": 0 + } + ], + "selector": { + "name": "${JENKINS_SERVICE_NAME}" + }, + "portalIP": "", + "type": "ClusterIP", + "sessionAffinity": "None" + } } ], "parameters": [ @@ -169,6 +235,12 @@ "value": "jenkins" }, { + "name": "JNLP_SERVICE_NAME", + "displayName": "Jenkins JNLP Service Name", + "description": "The name of the service used for master/slave communication.", + "value": "jenkins-jnlp" + }, + { "name": "JENKINS_PASSWORD", "displayName": "Jenkins Password", "description": "Password for the Jenkins 'admin' user.", diff --git a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkins-persistent-template.json b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkins-persistent-template.json index eda826a5b..e4a18961e 100644 --- a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkins-persistent-template.json +++ b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkins-persistent-template.json @@ -5,12 +5,12 @@ "name": "jenkins-persistent", "creationTimestamp": null, "annotations": { - "description": "Jenkins service, with persistent storage.\nThe username is 'admin' and the tutorial at https://github.com/openshift/origin/blob/master/examples/jenkins/README.md contains more information about using this template.\nYou must have persistent volumes available in your cluster to use this template.", + "description": "Jenkins service, with persistent storage.\nYou must have persistent volumes available in your cluster to use this template.", "iconClass": "icon-jenkins", "tags": "instant-app,jenkins" } }, - "message": "A Jenkins service has been created in your project. The username/password are admin/${JENKINS_PASSWORD}.", + "message": "A Jenkins service has been created in your project. The username/password are admin/${JENKINS_PASSWORD}. The tutorial at https://github.com/openshift/origin/blob/master/examples/jenkins/README.md contains more information about using this template.", "objects": [ { "kind": "Route", @@ -26,6 +26,7 @@ }, "tls": { "termination": "edge", + "insecureEdgeTerminationPolicy": "Redirect", "certificate": "-----BEGIN CERTIFICATE-----\nMIIDIjCCAgqgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBoTELMAkGA1UEBhMCVVMx\nCzAJBgNVBAgMAlNDMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0Rl\nZmF1bHQgQ29tcGFueSBMdGQxEDAOBgNVBAsMB1Rlc3QgQ0ExGjAYBgNVBAMMEXd3\ndy5leGFtcGxlY2EuY29tMSIwIAYJKoZIhvcNAQkBFhNleGFtcGxlQGV4YW1wbGUu\nY29tMB4XDTE1MDExMjE0MTk0MVoXDTE2MDExMjE0MTk0MVowfDEYMBYGA1UEAwwP\nd3d3LmV4YW1wbGUuY29tMQswCQYDVQQIDAJTQzELMAkGA1UEBhMCVVMxIjAgBgkq\nhkiG9w0BCQEWE2V4YW1wbGVAZXhhbXBsZS5jb20xEDAOBgNVBAoMB0V4YW1wbGUx\nEDAOBgNVBAsMB0V4YW1wbGUwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMrv\ngu6ZTTefNN7jjiZbS/xvQjyXjYMN7oVXv76jbX8gjMOmg9m0xoVZZFAE4XyQDuCm\n47VRx5Qrf/YLXmB2VtCFvB0AhXr5zSeWzPwaAPrjA4ebG+LUo24ziS8KqNxrFs1M\nmNrQUgZyQC6XIe1JHXc9t+JlL5UZyZQC1IfaJulDAgMBAAGjDTALMAkGA1UdEwQC\nMAAwDQYJKoZIhvcNAQEFBQADggEBAFCi7ZlkMnESvzlZCvv82Pq6S46AAOTPXdFd\nTMvrh12E1sdVALF1P1oYFJzG1EiZ5ezOx88fEDTW+Lxb9anw5/KJzwtWcfsupf1m\nV7J0D3qKzw5C1wjzYHh9/Pz7B1D0KthQRATQCfNf8s6bbFLaw/dmiIUhHLtIH5Qc\nyfrejTZbOSP77z8NOWir+BWWgIDDB2//3AkDIQvT20vmkZRhkqSdT7et4NmXOX/j\njhPti4b2Fie0LeuvgaOdKjCpQQNrYthZHXeVlOLRhMTSk3qUczenkKTOhvP7IS9q\n+Dzv5hqgSfvMG392KWh5f8xXfJNs4W5KLbZyl901MeReiLrPH3w=\n-----END CERTIFICATE-----", "key": "-----BEGIN PRIVATE KEY-----\nMIICeAIBADANBgkqhkiG9w0BAQEFAASCAmIwggJeAgEAAoGBAMrvgu6ZTTefNN7j\njiZbS/xvQjyXjYMN7oVXv76jbX8gjMOmg9m0xoVZZFAE4XyQDuCm47VRx5Qrf/YL\nXmB2VtCFvB0AhXr5zSeWzPwaAPrjA4ebG+LUo24ziS8KqNxrFs1MmNrQUgZyQC6X\nIe1JHXc9t+JlL5UZyZQC1IfaJulDAgMBAAECgYEAnxOjEj/vrLNLMZE1Q9H7PZVF\nWdP/JQVNvQ7tCpZ3ZdjxHwkvf//aQnuxS5yX2Rnf37BS/TZu+TIkK4373CfHomSx\nUTAn2FsLmOJljupgGcoeLx5K5nu7B7rY5L1NHvdpxZ4YjeISrRtEPvRakllENU5y\ngJE8c2eQOx08ZSRE4TkCQQD7dws2/FldqwdjJucYijsJVuUdoTqxP8gWL6bB251q\nelP2/a6W2elqOcWId28560jG9ZS3cuKvnmu/4LG88vZFAkEAzphrH3673oTsHN+d\nuBd5uyrlnGjWjuiMKv2TPITZcWBjB8nJDSvLneHF59MYwejNNEof2tRjgFSdImFH\nmi995wJBAMtPjW6wiqRz0i41VuT9ZgwACJBzOdvzQJfHgSD9qgFb1CU/J/hpSRIM\nkYvrXK9MbvQFvG6x4VuyT1W8mpe1LK0CQAo8VPpffhFdRpF7psXLK/XQ/0VLkG3O\nKburipLyBg/u9ZkaL0Ley5zL5dFBjTV2Qkx367Ic2b0u9AYTCcgi2DsCQQD3zZ7B\nv7BOm7MkylKokY2MduFFXU0Bxg6pfZ7q3rvg8gqhUFbaMStPRYg6myiDiW/JfLhF\nTcFT4touIo7oriFJ\n-----END PRIVATE KEY-----", "caCertificate": "-----BEGIN CERTIFICATE-----\nMIIEFzCCAv+gAwIBAgIJALK1iUpF2VQLMA0GCSqGSIb3DQEBBQUAMIGhMQswCQYD\nVQQGEwJVUzELMAkGA1UECAwCU0MxFTATBgNVBAcMDERlZmF1bHQgQ2l0eTEcMBoG\nA1UECgwTRGVmYXVsdCBDb21wYW55IEx0ZDEQMA4GA1UECwwHVGVzdCBDQTEaMBgG\nA1UEAwwRd3d3LmV4YW1wbGVjYS5jb20xIjAgBgkqhkiG9w0BCQEWE2V4YW1wbGVA\nZXhhbXBsZS5jb20wHhcNMTUwMTEyMTQxNTAxWhcNMjUwMTA5MTQxNTAxWjCBoTEL\nMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlNDMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkx\nHDAaBgNVBAoME0RlZmF1bHQgQ29tcGFueSBMdGQxEDAOBgNVBAsMB1Rlc3QgQ0Ex\nGjAYBgNVBAMMEXd3dy5leGFtcGxlY2EuY29tMSIwIAYJKoZIhvcNAQkBFhNleGFt\ncGxlQGV4YW1wbGUuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA\nw2rK1J2NMtQj0KDug7g7HRKl5jbf0QMkMKyTU1fBtZ0cCzvsF4CqV11LK4BSVWaK\nrzkaXe99IVJnH8KdOlDl5Dh/+cJ3xdkClSyeUT4zgb6CCBqg78ePp+nN11JKuJlV\nIG1qdJpB1J5O/kCLsGcTf7RS74MtqMFo96446Zvt7YaBhWPz6gDaO/TUzfrNcGLA\nEfHVXkvVWqb3gqXUztZyVex/gtP9FXQ7gxTvJml7UkmT0VAFjtZnCqmFxpLZFZ15\n+qP9O7Q2MpsGUO/4vDAuYrKBeg1ZdPSi8gwqUP2qWsGd9MIWRv3thI2903BczDc7\nr8WaIbm37vYZAS9G56E4+wIDAQABo1AwTjAdBgNVHQ4EFgQUugLrSJshOBk5TSsU\nANs4+SmJUGwwHwYDVR0jBBgwFoAUugLrSJshOBk5TSsUANs4+SmJUGwwDAYDVR0T\nBAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEAaMJ33zAMV4korHo5aPfayV3uHoYZ\n1ChzP3eSsF+FjoscpoNSKs91ZXZF6LquzoNezbfiihK4PYqgwVD2+O0/Ty7UjN4S\nqzFKVR4OS/6lCJ8YncxoFpTntbvjgojf1DEataKFUN196PAANc3yz8cWHF4uvjPv\nWkgFqbIjb+7D1YgglNyovXkRDlRZl0LD1OQ0ZWhd4Ge1qx8mmmanoBeYZ9+DgpFC\nj9tQAbS867yeOryNe7sEOIpXAAqK/DTu0hB6+ySsDfMo4piXCc2aA/eI2DCuw08e\nw17Dz9WnupZjVdwTKzDhFgJZMLDqn37HQnT6EemLFqbcR0VPEnfyhDtZIQ==\n-----END CERTIFICATE-----" @@ -92,6 +93,7 @@ } }, "spec": { + "serviceAccountName": "${JENKINS_SERVICE_NAME}", "containers": [ { "name": "jenkins", @@ -106,7 +108,7 @@ }, "livenessProbe": { "timeoutSeconds": 3, - "initialDelaySeconds": 60, + "initialDelaySeconds": 120, "httpGet": { "path": "/login", "port": 8080 @@ -116,6 +118,18 @@ { "name": "JENKINS_PASSWORD", "value": "${JENKINS_PASSWORD}" + }, + { + "name": "KUBERNETES_MASTER", + "value": "https://kubernetes.default:443" + }, + { + "name": "KUBERNETES_TRUST_CERTIFICATES", + "value": "true" + }, + { + "name": "JNLP_SERVICE_NAME", + "value": "${JNLP_SERVICE_NAME}" } ], "resources": { @@ -153,19 +167,42 @@ } }, { + "kind": "ServiceAccount", + "apiVersion": "v1", + "metadata": { + "name": "${JENKINS_SERVICE_NAME}" + } + }, + { + "kind": "RoleBinding", + "apiVersion": "v1", + "metadata": { + "name": "${JENKINS_SERVICE_NAME}_edit" + }, + "groupNames": null, + "subjects": [ + { + "kind": "ServiceAccount", + "name": "${JENKINS_SERVICE_NAME}" + } + ], + "roleRef": { + "name": "edit" + } + }, + { "kind": "Service", "apiVersion": "v1", "metadata": { - "name": "${JENKINS_SERVICE_NAME}", - "creationTimestamp": null + "name": "${JNLP_SERVICE_NAME}" }, "spec": { "ports": [ { - "name": "web", + "name": "agent", "protocol": "TCP", - "port": 8080, - "targetPort": 8080, + "port": 50000, + "targetPort": 50000, "nodePort": 0 } ], @@ -176,6 +213,35 @@ "type": "ClusterIP", "sessionAffinity": "None" } + }, + { + "kind": "Service", + "apiVersion": "v1", + "metadata": { + "name": "${JENKINS_SERVICE_NAME}", + "annotations": { + "service.alpha.openshift.io/dependencies": "[{\"name\": \"${JNLP_SERVICE_NAME}\", \"namespace\": \"\", \"kind\": \"Service\"}]", + "service.openshift.io/infrastructure": "true" + }, + "creationTimestamp": null + }, + "spec": { + "ports": [ + { + "name": "web", + "protocol": "TCP", + "port": 80, + "targetPort": 8080, + "nodePort": 0 + } + ], + "selector": { + "name": "${JENKINS_SERVICE_NAME}" + }, + "portalIP": "", + "type": "ClusterIP", + "sessionAffinity": "None" + } } ], "parameters": [ @@ -186,6 +252,12 @@ "value": "jenkins" }, { + "name": "JNLP_SERVICE_NAME", + "displayName": "Jenkins JNLP Service Name", + "description": "The name of the service used for master/slave communication.", + "value": "jenkins-jnlp" + }, + { "name": "JENKINS_PASSWORD", "displayName": "Jenkins Password", "description": "Password for the Jenkins 'admin' user.", diff --git a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkinstemplate.json b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkinstemplate.json deleted file mode 100644 index fc409f709..000000000 --- a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkinstemplate.json +++ /dev/null @@ -1,256 +0,0 @@ -{ - "kind": "Template", - "apiVersion": "v1", - "metadata": { - "name": "jenkins", - "creationTimestamp": null, - "annotations": { - "description": "Jenkins service, without persistent storage. WARNING: Any data stored will be lost upon pod destruction. Only use this template for testing", - "iconClass": "icon-jenkins", - "tags": "instant-app,jenkins" - } - }, - "message": "A Jenkins service has been created in your project. The username/password are admin/${JENKINS_PASSWORD}.", - "objects": [ - { - "kind": "Route", - "apiVersion": "v1", - "metadata": { - "name": "jenkins", - "creationTimestamp": null - }, - "spec": { - "to": { - "kind": "Service", - "name": "${JENKINS_SERVICE_NAME}" - }, - "tls": { - "termination": "edge", - "insecureEdgeTerminationPolicy": "Redirect", - "certificate": "-----BEGIN CERTIFICATE-----\nMIIDIjCCAgqgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBoTELMAkGA1UEBhMCVVMx\nCzAJBgNVBAgMAlNDMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0Rl\nZmF1bHQgQ29tcGFueSBMdGQxEDAOBgNVBAsMB1Rlc3QgQ0ExGjAYBgNVBAMMEXd3\ndy5leGFtcGxlY2EuY29tMSIwIAYJKoZIhvcNAQkBFhNleGFtcGxlQGV4YW1wbGUu\nY29tMB4XDTE1MDExMjE0MTk0MVoXDTE2MDExMjE0MTk0MVowfDEYMBYGA1UEAwwP\nd3d3LmV4YW1wbGUuY29tMQswCQYDVQQIDAJTQzELMAkGA1UEBhMCVVMxIjAgBgkq\nhkiG9w0BCQEWE2V4YW1wbGVAZXhhbXBsZS5jb20xEDAOBgNVBAoMB0V4YW1wbGUx\nEDAOBgNVBAsMB0V4YW1wbGUwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMrv\ngu6ZTTefNN7jjiZbS/xvQjyXjYMN7oVXv76jbX8gjMOmg9m0xoVZZFAE4XyQDuCm\n47VRx5Qrf/YLXmB2VtCFvB0AhXr5zSeWzPwaAPrjA4ebG+LUo24ziS8KqNxrFs1M\nmNrQUgZyQC6XIe1JHXc9t+JlL5UZyZQC1IfaJulDAgMBAAGjDTALMAkGA1UdEwQC\nMAAwDQYJKoZIhvcNAQEFBQADggEBAFCi7ZlkMnESvzlZCvv82Pq6S46AAOTPXdFd\nTMvrh12E1sdVALF1P1oYFJzG1EiZ5ezOx88fEDTW+Lxb9anw5/KJzwtWcfsupf1m\nV7J0D3qKzw5C1wjzYHh9/Pz7B1D0KthQRATQCfNf8s6bbFLaw/dmiIUhHLtIH5Qc\nyfrejTZbOSP77z8NOWir+BWWgIDDB2//3AkDIQvT20vmkZRhkqSdT7et4NmXOX/j\njhPti4b2Fie0LeuvgaOdKjCpQQNrYthZHXeVlOLRhMTSk3qUczenkKTOhvP7IS9q\n+Dzv5hqgSfvMG392KWh5f8xXfJNs4W5KLbZyl901MeReiLrPH3w=\n-----END CERTIFICATE-----", - "key": "-----BEGIN PRIVATE KEY-----\nMIICeAIBADANBgkqhkiG9w0BAQEFAASCAmIwggJeAgEAAoGBAMrvgu6ZTTefNN7j\njiZbS/xvQjyXjYMN7oVXv76jbX8gjMOmg9m0xoVZZFAE4XyQDuCm47VRx5Qrf/YL\nXmB2VtCFvB0AhXr5zSeWzPwaAPrjA4ebG+LUo24ziS8KqNxrFs1MmNrQUgZyQC6X\nIe1JHXc9t+JlL5UZyZQC1IfaJulDAgMBAAECgYEAnxOjEj/vrLNLMZE1Q9H7PZVF\nWdP/JQVNvQ7tCpZ3ZdjxHwkvf//aQnuxS5yX2Rnf37BS/TZu+TIkK4373CfHomSx\nUTAn2FsLmOJljupgGcoeLx5K5nu7B7rY5L1NHvdpxZ4YjeISrRtEPvRakllENU5y\ngJE8c2eQOx08ZSRE4TkCQQD7dws2/FldqwdjJucYijsJVuUdoTqxP8gWL6bB251q\nelP2/a6W2elqOcWId28560jG9ZS3cuKvnmu/4LG88vZFAkEAzphrH3673oTsHN+d\nuBd5uyrlnGjWjuiMKv2TPITZcWBjB8nJDSvLneHF59MYwejNNEof2tRjgFSdImFH\nmi995wJBAMtPjW6wiqRz0i41VuT9ZgwACJBzOdvzQJfHgSD9qgFb1CU/J/hpSRIM\nkYvrXK9MbvQFvG6x4VuyT1W8mpe1LK0CQAo8VPpffhFdRpF7psXLK/XQ/0VLkG3O\nKburipLyBg/u9ZkaL0Ley5zL5dFBjTV2Qkx367Ic2b0u9AYTCcgi2DsCQQD3zZ7B\nv7BOm7MkylKokY2MduFFXU0Bxg6pfZ7q3rvg8gqhUFbaMStPRYg6myiDiW/JfLhF\nTcFT4touIo7oriFJ\n-----END PRIVATE KEY-----", - "caCertificate": "-----BEGIN CERTIFICATE-----\nMIIEFzCCAv+gAwIBAgIJALK1iUpF2VQLMA0GCSqGSIb3DQEBBQUAMIGhMQswCQYD\nVQQGEwJVUzELMAkGA1UECAwCU0MxFTATBgNVBAcMDERlZmF1bHQgQ2l0eTEcMBoG\nA1UECgwTRGVmYXVsdCBDb21wYW55IEx0ZDEQMA4GA1UECwwHVGVzdCBDQTEaMBgG\nA1UEAwwRd3d3LmV4YW1wbGVjYS5jb20xIjAgBgkqhkiG9w0BCQEWE2V4YW1wbGVA\nZXhhbXBsZS5jb20wHhcNMTUwMTEyMTQxNTAxWhcNMjUwMTA5MTQxNTAxWjCBoTEL\nMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlNDMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkx\nHDAaBgNVBAoME0RlZmF1bHQgQ29tcGFueSBMdGQxEDAOBgNVBAsMB1Rlc3QgQ0Ex\nGjAYBgNVBAMMEXd3dy5leGFtcGxlY2EuY29tMSIwIAYJKoZIhvcNAQkBFhNleGFt\ncGxlQGV4YW1wbGUuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA\nw2rK1J2NMtQj0KDug7g7HRKl5jbf0QMkMKyTU1fBtZ0cCzvsF4CqV11LK4BSVWaK\nrzkaXe99IVJnH8KdOlDl5Dh/+cJ3xdkClSyeUT4zgb6CCBqg78ePp+nN11JKuJlV\nIG1qdJpB1J5O/kCLsGcTf7RS74MtqMFo96446Zvt7YaBhWPz6gDaO/TUzfrNcGLA\nEfHVXkvVWqb3gqXUztZyVex/gtP9FXQ7gxTvJml7UkmT0VAFjtZnCqmFxpLZFZ15\n+qP9O7Q2MpsGUO/4vDAuYrKBeg1ZdPSi8gwqUP2qWsGd9MIWRv3thI2903BczDc7\nr8WaIbm37vYZAS9G56E4+wIDAQABo1AwTjAdBgNVHQ4EFgQUugLrSJshOBk5TSsU\nANs4+SmJUGwwHwYDVR0jBBgwFoAUugLrSJshOBk5TSsUANs4+SmJUGwwDAYDVR0T\nBAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEAaMJ33zAMV4korHo5aPfayV3uHoYZ\n1ChzP3eSsF+FjoscpoNSKs91ZXZF6LquzoNezbfiihK4PYqgwVD2+O0/Ty7UjN4S\nqzFKVR4OS/6lCJ8YncxoFpTntbvjgojf1DEataKFUN196PAANc3yz8cWHF4uvjPv\nWkgFqbIjb+7D1YgglNyovXkRDlRZl0LD1OQ0ZWhd4Ge1qx8mmmanoBeYZ9+DgpFC\nj9tQAbS867yeOryNe7sEOIpXAAqK/DTu0hB6+ySsDfMo4piXCc2aA/eI2DCuw08e\nw17Dz9WnupZjVdwTKzDhFgJZMLDqn37HQnT6EemLFqbcR0VPEnfyhDtZIQ==\n-----END CERTIFICATE-----" - } - } - }, - { - "kind": "DeploymentConfig", - "apiVersion": "v1", - "metadata": { - "name": "${JENKINS_SERVICE_NAME}", - "creationTimestamp": null - }, - "spec": { - "strategy": { - "type": "Recreate" - }, - "triggers": [ - { - "type": "ImageChange", - "imageChangeParams": { - "automatic": true, - "containerNames": [ - "jenkins" - ], - "from": { - "kind": "ImageStreamTag", - "name": "jenkins:1", - "namespace": "openshift" - } - } - }, - { - "type": "ConfigChange" - } - ], - "replicas": 1, - "selector": { - "name": "${JENKINS_SERVICE_NAME}" - }, - "template": { - "metadata": { - "creationTimestamp": null, - "labels": { - "name": "${JENKINS_SERVICE_NAME}" - } - }, - "spec": { - "serviceAccountName": "${JENKINS_SERVICE_NAME}", - "containers": [ - { - "name": "jenkins", - "image": " ", - "readinessProbe": { - "timeoutSeconds": 3, - "initialDelaySeconds": 3, - "httpGet": { - "path": "/login", - "port": 8080 - } - }, - "livenessProbe": { - "timeoutSeconds": 3, - "initialDelaySeconds": 120, - "httpGet": { - "path": "/login", - "port": 8080 - } - }, - "env": [ - { - "name": "JENKINS_PASSWORD", - "value": "${JENKINS_PASSWORD}" - }, - { - "name": "KUBERNETES_MASTER", - "value": "https://kubernetes.default:443" - }, - { - "name": "KUBERNETES_TRUST_CERTIFICATES", - "value": "true" - } - ], - "resources": { - "limits": { - "memory": "${MEMORY_LIMIT}" - } - }, - "volumeMounts": [ - { - "name": "${JENKINS_SERVICE_NAME}-data", - "mountPath": "/var/lib/jenkins" - } - ], - "terminationMessagePath": "/dev/termination-log", - "imagePullPolicy": "IfNotPresent", - "capabilities": {}, - "securityContext": { - "capabilities": {}, - "privileged": false - } - } - ], - "volumes": [ - { - "name": "${JENKINS_SERVICE_NAME}-data", - "emptyDir": { - "medium": "" - } - } - ], - "restartPolicy": "Always", - "dnsPolicy": "ClusterFirst" - } - } - } - }, - { - "kind": "ServiceAccount", - "apiVersion": "v1", - "metadata": { - "name": "${JENKINS_SERVICE_NAME}" - } - }, - { - "kind": "RoleBinding", - "apiVersion": "v1", - "metadata": { - "name": "${JENKINS_SERVICE_NAME}_edit" - }, - "groupNames": null, - "subjects": [ - { - "kind": "ServiceAccount", - "name": "${JENKINS_SERVICE_NAME}" - } - ], - "roleRef": { - "name": "edit" - } - }, - { - "kind": "Service", - "apiVersion": "v1", - "metadata": { - "name": "jenkins-jnlp" - }, - "spec": { - "ports": [ - { - "name": "agent", - "protocol": "TCP", - "port": 50000, - "targetPort": 50000, - "nodePort": 0 - } - ], - "selector": { - "name": "${JENKINS_SERVICE_NAME}" - }, - "portalIP": "", - "type": "ClusterIP", - "sessionAffinity": "None" - } - }, - { - "kind": "Service", - "apiVersion": "v1", - "metadata": { - "name": "${JENKINS_SERVICE_NAME}", - "annotations": { - "service.alpha.openshift.io/dependencies": "[{\"name\": \"jenkins-jnlp\", \"namespace\": \"\", \"kind\": \"Service\"}]", - "service.openshift.io/infrastructure": "true" - }, - "creationTimestamp": null - }, - "spec": { - "ports": [ - { - "name": "web", - "protocol": "TCP", - "port": 80, - "targetPort": 8080, - "nodePort": 0 - } - ], - "selector": { - "name": "${JENKINS_SERVICE_NAME}" - }, - "portalIP": "", - "type": "ClusterIP", - "sessionAffinity": "None" - } - } - ], - "parameters": [ - { - "name": "MEMORY_LIMIT", - "displayName": "Memory Limit", - "description": "Maximum amount of memory the container can use.", - "value": "512Mi" - }, - { - "name": "NAMESPACE", - "displayName": "Namespace", - "description": "The OpenShift Namespace where the ImageStream resides.", - "value": "openshift" - }, - { - "name": "JENKINS_SERVICE_NAME", - "displayName": "Jenkins Service Name", - "description": "The name of the OpenShift Service exposed for the Jenkins container.", - "value": "jenkins" - }, - { - "name": "JENKINS_PASSWORD", - "displayName": "Jenkins Password", - "description": "Password for the Jenkins 'admin' user.", - "generate": "expression", - "from": "[a-zA-Z0-9]{16}", - "required": true - } - ], - "labels": { - "template": "jenkins-pipeline-template" - } -} diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 6fab996b2..ebd799466 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -477,6 +477,14 @@ def set_selectors(facts): facts['hosted']['registry'] = {} if 'selector' not in facts['hosted']['registry'] or facts['hosted']['registry']['selector'] in [None, 'None']: facts['hosted']['registry']['selector'] = selector + if 'metrics' not in facts['hosted']: + facts['hosted']['metrics'] = {} + if 'selector' not in facts['hosted']['metrics'] or facts['hosted']['metrics']['selector'] in [None, 'None']: + facts['hosted']['metrics']['selector'] = None + if 'logging' not in facts['hosted']: + facts['hosted']['logging'] = {} + if 'selector' not in facts['hosted']['logging'] or facts['hosted']['logging']['selector'] in [None, 'None']: + facts['hosted']['logging']['selector'] = None return facts @@ -789,7 +797,7 @@ def set_deployment_facts_if_unset(facts): curr_disabled_features = set(facts['master']['disabled_features']) facts['master']['disabled_features'] = list(curr_disabled_features.union(openshift_features)) else: - if deployment_type == 'atomic-enterprise': + if facts['common']['deployment_subtype'] == 'registry': facts['master']['disabled_features'] = openshift_features if 'node' in facts: @@ -1649,7 +1657,12 @@ class OpenShiftFacts(object): else: deployment_type = 'origin' - defaults = self.get_defaults(roles, deployment_type) + if 'common' in local_facts and 'deployment_subtype' in local_facts['common']: + deployment_subtype = local_facts['common']['deployment_subtype'] + else: + deployment_subtype = 'basic' + + defaults = self.get_defaults(roles, deployment_type, deployment_subtype) provider_facts = self.init_provider_facts() facts = apply_provider_facts(defaults, provider_facts) facts = merge_facts(facts, @@ -1681,7 +1694,7 @@ class OpenShiftFacts(object): facts = set_installed_variant_rpm_facts(facts) return dict(openshift=facts) - def get_defaults(self, roles, deployment_type): + def get_defaults(self, roles, deployment_type, deployment_subtype): """ Get default fact values Args: @@ -1701,6 +1714,7 @@ class OpenShiftFacts(object): defaults['common'] = dict(use_openshift_sdn=True, ip=ip_addr, public_ip=ip_addr, deployment_type=deployment_type, + deployment_subtype=deployment_subtype, hostname=hostname, public_hostname=hostname, portal_net='172.30.0.0/16', @@ -1791,8 +1805,9 @@ class OpenShiftFacts(object): filesystem='ext4', volumeID='123'), host=None, - access_modes=['ReadWriteMany'], - create_pv=True + access_modes=['ReadWriteOnce'], + create_pv=True, + create_pvc=False ) ), registry=dict( @@ -1807,7 +1822,8 @@ class OpenShiftFacts(object): options='*(rw,root_squash)'), host=None, access_modes=['ReadWriteMany'], - create_pv=True + create_pv=True, + create_pvc=True ) ), router=dict() diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml index 4dbbd7f45..afeb78f95 100644 --- a/roles/openshift_facts/tasks/main.yml +++ b/roles/openshift_facts/tasks/main.yml @@ -24,6 +24,7 @@ local_facts: # TODO: Deprecate deployment_type in favor of openshift_deployment_type deployment_type: "{{ openshift_deployment_type | default(deployment_type) }}" + deployment_subtype: "{{ openshift_deployment_subtype | default(None) }}" cluster_id: "{{ openshift_cluster_id | default('default') }}" hostname: "{{ openshift_hostname | default(None) }}" ip: "{{ openshift_ip | default(None) }}" @@ -40,4 +41,3 @@ - name: Set repoquery command set_fact: repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}" - diff --git a/roles/openshift_hosted/tasks/registry/storage/persistent_volume.yml b/roles/openshift_hosted/tasks/registry/storage/persistent_volume.yml index 6bf859e82..60eefd71a 100644 --- a/roles/openshift_hosted/tasks/registry/storage/persistent_volume.yml +++ b/roles/openshift_hosted/tasks/registry/storage/persistent_volume.yml @@ -5,10 +5,11 @@ - name: Determine if volume is already attached to dc/docker-registry command: "{{ openshift.common.client_binary }} get -o template dc/docker-registry --template=\\{\\{.spec.template.spec.volumes\\}\\} --output-version=v1" changed_when: false + failed_when: false register: registry_volumes_output - set_fact: - volume_attached: "{{ registry_volume_claim in registry_volumes_output.stdout }}" + volume_attached: "{{ registry_volume_claim in (registry_volumes_output).stdout | default(['']) }}" - name: Add volume to dc/docker-registry command: > diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml index 0c0c7e61e..56110c28f 100644 --- a/roles/openshift_master/tasks/systemd_units.yml +++ b/roles/openshift_master/tasks/systemd_units.yml @@ -45,6 +45,13 @@ failed_when: false changed_when: false +- name: Preserve Master API AWS options + command: grep AWS_ /etc/sysconfig/{{ openshift.common.service_type }}-master-api + register: master_api_aws + when: openshift.master.ha is defined and openshift.master.ha | bool and openshift_master_cluster_method == "native" + failed_when: false + changed_when: false + - name: Create the master api service env file template: src: "{{ ha_svc_template_path }}/atomic-openshift-master-api.j2" @@ -62,13 +69,29 @@ line: "{{ item }}" with_items: "{{ master_api_proxy.stdout_lines | default([]) }}" +- name: Restore Master API AWS Options + when: openshift.master.ha is defined and openshift.master.ha | bool and openshift_master_cluster_method == "native" + and master_api_aws.rc == 0 and + not (openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined) + lineinfile: + dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api + line: "{{ item }}" + with_items: "{{ master_api_aws.stdout_lines | default([]) }}" + - name: Preserve Master Controllers Proxy Config options - command: grep PROXY /etc/sysconfig/{{ openshift.common.service_type }}-master-api + command: grep PROXY /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers register: master_controllers_proxy when: openshift.master.ha is defined and openshift.master.ha | bool and openshift_master_cluster_method == "native" failed_when: false changed_when: false +- name: Preserve Master Controllers AWS options + command: grep AWS_ /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers + register: master_controllers_aws + when: openshift.master.ha is defined and openshift.master.ha | bool and openshift_master_cluster_method == "native" + failed_when: false + changed_when: false + - name: Create the master controllers service env file template: src: "{{ ha_svc_template_path }}/atomic-openshift-master-controllers.j2" @@ -86,6 +109,15 @@ when: openshift.master.ha is defined and openshift.master.ha | bool and openshift_master_cluster_method == "native" and master_controllers_proxy.rc == 0 and 'http_proxy' not in openshift.common and 'https_proxy' not in openshift.common +- name: Restore Master Controllers AWS Options + lineinfile: + dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers + line: "{{ item }}" + with_items: "{{ master_controllers_aws.stdout_lines | default([]) }}" + when: openshift.master.ha is defined and openshift.master.ha | bool and openshift_master_cluster_method == "native" + and master_controllers_aws.rc == 0 and + not (openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined) + - name: Install Master docker service file template: dest: "/etc/systemd/system/{{ openshift.common.service_type }}-master.service" @@ -99,6 +131,12 @@ failed_when: false changed_when: false +- name: Preserve Master AWS options + command: grep AWS_ /etc/sysconfig/{{ openshift.common.service_type }}-master + register: master_aws + failed_when: false + changed_when: false + - name: Create the master service env file template: src: "atomic-openshift-master.j2" @@ -113,3 +151,10 @@ line: "{{ item }}" with_items: "{{ master_proxy.stdout_lines | default([]) }}" when: master_proxy.rc == 0 and 'http_proxy' not in openshift.common and 'https_proxy' not in openshift.common + +- name: Restore Master AWS Options + lineinfile: + dest: /etc/sysconfig/{{ openshift.common.service_type }}-master + line: "{{ item }}" + with_items: "{{ master_aws.stdout_lines | default([]) }}" + when: master_aws.rc == 0 and not (openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined) diff --git a/roles/openshift_master/templates/atomic-openshift-master.j2 b/roles/openshift_master/templates/atomic-openshift-master.j2 index 75d44d308..10eaeb401 100644 --- a/roles/openshift_master/templates/atomic-openshift-master.j2 +++ b/roles/openshift_master/templates/atomic-openshift-master.j2 @@ -4,9 +4,9 @@ CONFIG_FILE={{ openshift_master_config_file }} IMAGE_VERSION={{ openshift_image_tag }} {% endif %} -{% if 'cloudprovider' in openshift and 'aws' in openshift.cloudprovider and 'kind' in openshift.cloudprovider and openshift.cloudprovider.kind == 'aws' and 'access_key' in openshift.cloudprovider.aws and 'secret_key' in openshift.cloudprovider.aws %} -AWS_ACCESS_KEY_ID={{ openshift.cloudprovider.aws.access_key }} -AWS_SECRET_ACCESS_KEY={{ openshift.cloudprovider.aws.secret_key }} +{% if openshift_cloudprovider_kind | default('') == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined %} +AWS_ACCESS_KEY_ID={{ openshift_cloudprovider_aws_access_key }} +AWS_SECRET_ACCESS_KEY={{ openshift_cloudprovider_aws_secret_key }} {% endif %} {% if 'api_env_vars' in openshift.master or 'controllers_env_vars' in openshift.master -%} diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 index df1dbb85e..eef0f414e 100644 --- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 +++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 @@ -12,7 +12,7 @@ Requires=docker.service EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-api Environment=GOTRACEBACK=crash ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type}}-master-api -ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-api --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-api -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if 'cloudprovider' in openshift and 'kind' in openshift.cloudprovider and openshift.cloudprovider.kind != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} {{ openshift.master.master_image }}:${IMAGE_VERSION} start master api --config=${CONFIG_FILE} $OPTIONS +ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-api --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-api -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} {{ openshift.master.master_image }}:${IMAGE_VERSION} start master api --config=${CONFIG_FILE} $OPTIONS ExecStartPost=/usr/bin/sleep 10 ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master-api LimitNOFILE=131072 diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 index 5ff2edae4..a8f5d7351 100644 --- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 +++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 @@ -11,7 +11,7 @@ PartOf=docker.service EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers Environment=GOTRACEBACK=crash ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type}}-master-controllers -ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-controllers --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if 'cloudprovider' in openshift and 'kind' in openshift.cloudprovider and openshift.cloudprovider.kind != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} {{ openshift.master.master_image }}:${IMAGE_VERSION} start master controllers --config=${CONFIG_FILE} $OPTIONS +ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-controllers --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} {{ openshift.master.master_image }}:${IMAGE_VERSION} start master controllers --config=${CONFIG_FILE} $OPTIONS ExecStartPost=/usr/bin/sleep 10 ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master-controllers LimitNOFILE=131072 diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2 index 31e86f5bd..ced3eb76f 100644 --- a/roles/openshift_master/templates/master.yaml.v1.j2 +++ b/roles/openshift_master/templates/master.yaml.v1.j2 @@ -44,6 +44,13 @@ auditConfig:{{ openshift.master.audit_config | to_padded_yaml(level=1) }} {% endif %} controllerLeaseTTL: {{ openshift.master.controller_lease_ttl | default('30') }} {% endif %} +{% if openshift.common.version_gte_3_3_or_1_3 | bool %} +controllerConfig: + serviceServingCert: + signer: + certFile: service-signer.crt + keyFile: service-signer.key +{% endif %} controllers: '*' corsAllowedOrigins: {% for origin in ['127.0.0.1', 'localhost', openshift.common.ip, openshift.common.public_ip] | union(openshift.common.all_hostnames) | unique %} @@ -156,6 +163,7 @@ networkConfig: {% endif %} # serviceNetworkCIDR must match kubernetesMasterConfig.servicesSubnet serviceNetworkCIDR: {{ openshift.common.portal_net }} + externalIPNetworkCIDRs: {{ openshift_master_external_ip_network_cidrs | default(["0.0.0.0/0"]) | to_padded_yaml(1,2) }} oauthConfig: {% if 'oauth_always_show_provider_selection' in openshift.master %} alwaysShowProviderSelection: {{ openshift.master.oauth_always_show_provider_selection }} @@ -173,7 +181,7 @@ oauthConfig: {% if openshift.common.version_gte_3_2_or_1_2 | bool %} masterCA: ca-bundle.crt {% else %} - masterCA: ca.rt + masterCA: ca.crt {% endif %} masterPublicURL: {{ openshift.master.public_api_url }} masterURL: {{ openshift.master.api_url }} @@ -210,7 +218,7 @@ serviceAccountConfig: {% if openshift.common.version_gte_3_2_or_1_2 | bool %} masterCA: ca-bundle.crt {% else %} - masterCA: ca.rt + masterCA: ca.crt {% endif %} privateKeyFile: serviceaccounts.private.key publicKeyFiles: diff --git a/roles/openshift_master/templates/master_docker/master.docker.service.j2 b/roles/openshift_master/templates/master_docker/master.docker.service.j2 index 97f698b68..be7644710 100644 --- a/roles/openshift_master/templates/master_docker/master.docker.service.j2 +++ b/roles/openshift_master/templates/master_docker/master.docker.service.j2 @@ -8,7 +8,7 @@ Wants=etcd_container.service [Service] EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-master -ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if 'cloudprovider' in openshift and 'kind' in openshift.cloudprovider and openshift.cloudprovider.kind != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} {{ openshift.master.master_image }}:${IMAGE_VERSION} start master --config=${CONFIG_FILE} $OPTIONS +ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} {{ openshift.master.master_image }}:${IMAGE_VERSION} start master --config=${CONFIG_FILE} $OPTIONS ExecStartPost=/usr/bin/sleep 10 ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master Restart=always diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2 index 41308bd81..43fb3cafa 100644 --- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2 +++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2 @@ -4,9 +4,9 @@ CONFIG_FILE={{ openshift_master_config_file }} IMAGE_VERSION={{ openshift_image_tag }} {% endif %} -{% if 'cloudprovider' in openshift and 'aws' in openshift.cloudprovider and 'kind' in openshift.cloudprovider and openshift.cloudprovider.kind == 'aws' and 'access_key' in openshift.cloudprovider.aws and 'secret_key' in openshift.cloudprovider.aws %} -AWS_ACCESS_KEY_ID={{ openshift.cloudprovider.aws.access_key }} -AWS_SECRET_ACCESS_KEY={{ openshift.cloudprovider.aws.secret_key }} +{% if openshift_cloudprovider_kind | default('') == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined %} +AWS_ACCESS_KEY_ID={{ openshift_cloudprovider_aws_access_key }} +AWS_SECRET_ACCESS_KEY={{ openshift_cloudprovider_aws_secret_key }} {% endif %} {% if 'api_env_vars' in openshift.master -%} diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2 index 37a5d75f2..6d26a69eb 100644 --- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2 +++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2 @@ -4,9 +4,9 @@ CONFIG_FILE={{ openshift_master_config_file }} IMAGE_VERSION={{ openshift_image_tag }} {% endif %} -{% if 'cloudprovider' in openshift and 'aws' in openshift.cloudprovider and 'kind' in openshift.cloudprovider and openshift.cloudprovider.kind == 'aws' and 'access_key' in openshift.cloudprovider.aws and 'secret_key' in openshift.cloudprovider.aws %} -AWS_ACCESS_KEY_ID={{ openshift.cloudprovider.aws.access_key }} -AWS_SECRET_ACCESS_KEY={{ openshift.cloudprovider.aws.secret_key }} +{% if openshift_cloudprovider_kind | default('') == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_access_key is defined %} +AWS_ACCESS_KEY_ID={{ openshift_cloudprovider_aws_access_key }} +AWS_SECRET_ACCESS_KEY={{ openshift_cloudprovider_aws_secret_key }} {% endif %} {% if 'controllers_env_vars' in openshift.master -%} diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml index aafb06f93..ffde59358 100644 --- a/roles/openshift_master_certificates/tasks/main.yml +++ b/roles/openshift_master_certificates/tasks/main.yml @@ -164,3 +164,29 @@ owner: "{{ item }}" group: "{{ 'root' if item == 'root' else _ansible_ssh_user_gid.stdout }}" with_items: "{{ client_users }}" + +# Ensure ca-bundle exists for 3.2+ configuration +- name: Check for ca-bundle.crt + stat: + path: "{{ openshift.common.config_base }}/master/ca-bundle.crt" + register: ca_bundle_stat + failed_when: false + +- name: Check for ca.crt + stat: + path: "{{ openshift.common.config_base }}/master/ca.crt" + register: ca_crt_stat + failed_when: false + +- name: Migrate ca.crt to ca-bundle.crt + command: mv ca.crt ca-bundle.crt + args: + chdir: "{{ openshift.common.config_base }}/master" + when: ca_crt_stat.stat.isreg and not ca_bundle_stat.stat.exists + +- name: Link ca.crt to ca-bundle.crt + file: + src: "{{ openshift.common.config_base }}/master/ca-bundle.crt" + path: "{{ openshift.common.config_base }}/master/ca.crt" + state: link + when: ca_crt_stat.stat.isreg and not ca_bundle_stat.stat.exists diff --git a/roles/openshift_master_facts/tasks/main.yml b/roles/openshift_master_facts/tasks/main.yml index 17c31ec05..e0c0fc644 100644 --- a/roles/openshift_master_facts/tasks/main.yml +++ b/roles/openshift_master_facts/tasks/main.yml @@ -17,7 +17,6 @@ console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}" public_console_url: "{{ openshift_master_public_console_url | default(None) }}" logging_public_url: "{{ openshift_master_logging_public_url | default(None) }}" - metrics_public_url: "{{ openshift_master_metrics_public_url | default(None) }}" logout_url: "{{ openshift_master_logout_url | default(None) }}" extension_scripts: "{{ openshift_master_extension_scripts | default(None) }}" extension_stylesheets: "{{ openshift_master_extension_stylesheets | default(None) }}" @@ -80,3 +79,4 @@ api_env_vars: "{{ openshift_master_api_env_vars | default(None) }}" controllers_env_vars: "{{ openshift_master_controllers_env_vars | default(None) }}" audit_config: "{{ openshift_master_audit_config | default(None) }}" + metrics_public_url: "{% if openshift_hosted_metrics_deploy | default(false) %}https://{{ metrics_hostname }}/hawkular/metrics{% endif %}" diff --git a/roles/openshift_master_facts/vars/main.yml b/roles/openshift_master_facts/vars/main.yml index 086d8340c..406d50c24 100644 --- a/roles/openshift_master_facts/vars/main.yml +++ b/roles/openshift_master_facts/vars/main.yml @@ -17,4 +17,9 @@ builddefaults_yaml: - name: https_proxy value: "{{ openshift.master.builddefaults_https_proxy | default(omit, true) }}" - name: no_proxy - value: "{{ openshift.master.builddefaults_no_proxy | default(omit, true) | join(',') }}"
\ No newline at end of file + value: "{{ openshift.master.builddefaults_no_proxy | default(omit, true) | join(',') }}" + +metrics_hostname: "{{ openshift_hosted_metrics_public_url + | default('hawkular-metrics.' ~ (openshift.master.default_subdomain + | default(openshift_master_default_subdomain ))) + | oo_hostname_from_url }}" diff --git a/roles/openshift_metrics/handlers/main.yml b/roles/openshift_metrics/handlers/main.yml new file mode 100644 index 000000000..edb7369de --- /dev/null +++ b/roles/openshift_metrics/handlers/main.yml @@ -0,0 +1,31 @@ +--- +- name: restart master + service: name={{ openshift.common.service_type }}-master state=restarted + when: (openshift.master.ha is not defined or not openshift.master.ha | bool) and (not (master_service_status_changed | default(false) | bool)) + notify: Verify API Server + +- name: restart master api + service: name={{ openshift.common.service_type }}-master-api state=restarted + when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' + notify: Verify API Server + +- name: restart master controllers + service: name={{ openshift.common.service_type }}-master-controllers state=restarted + when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' + +- name: Verify API Server + # Using curl here since the uri module requires python-httplib2 and + # wait_for port doesn't provide health information. + command: > + curl --silent + {% if openshift.common.version_gte_3_2_or_1_2 | bool %} + --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt + {% else %} + --cacert {{ openshift.common.config_base }}/master/ca.crt + {% endif %} + {{ openshift.master.api_url }}/healthz/ready + register: api_available_output + until: api_available_output.stdout == 'ok' + retries: 120 + delay: 1 + changed_when: false diff --git a/roles/openshift_metrics/tasks/install.yml b/roles/openshift_metrics/tasks/install.yml new file mode 100644 index 000000000..9c4eb22d7 --- /dev/null +++ b/roles/openshift_metrics/tasks/install.yml @@ -0,0 +1,114 @@ +--- + +- name: Test if metrics-deployer service account exists + command: > + {{ openshift.common.client_binary }} + --config={{ openshift_metrics_kubeconfig }} + --namespace=openshift-infra + get serviceaccount metrics-deployer -o json + register: serviceaccount + changed_when: false + failed_when: false + +- name: Create metrics-deployer Service Account + shell: > + echo {{ metrics_deployer_sa | to_json | quote }} | + {{ openshift.common.client_binary }} + --config={{ openshift_metrics_kubeconfig }} + --namespace openshift-infra + create -f - + when: serviceaccount.rc == 1 + +- name: Test edit permissions + command: > + {{ openshift.common.client_binary }} + --config={{ openshift_metrics_kubeconfig }} + --namespace openshift-infra + get rolebindings -o jsonpath='{.items[?(@.metadata.name == "edit")].userNames}' + register: edit_rolebindings + changed_when: false + +- name: Add edit permission to the openshift-infra project to metrics-deployer SA + command: > + {{ openshift.common.admin_binary }} + --config={{ openshift_metrics_kubeconfig }} + --namespace openshift-infra + policy add-role-to-user edit + system:serviceaccount:openshift-infra:metrics-deployer + when: "'system:serviceaccount:openshift-infra:metrics-deployer' not in edit_rolebindings.stdout" + +- name: Test cluster-reader permissions + command: > + {{ openshift.common.client_binary }} + --config={{ openshift_metrics_kubeconfig }} + --namespace openshift-infra + get clusterrolebindings -o jsonpath='{.items[?(@.metadata.name == "cluster-reader")].userNames}' + register: cluster_reader_clusterrolebindings + changed_when: false + +- name: Add cluster-reader permission to the openshift-infra project to heapster SA + command: > + {{ openshift.common.admin_binary }} + --config={{ openshift_metrics_kubeconfig }} + --namespace openshift-infra + policy add-cluster-role-to-user cluster-reader + system:serviceaccount:openshift-infra:heapster + when: "'system:serviceaccount:openshift-infra:heapster' not in cluster_reader_clusterrolebindings.stdout" + +- name: Create metrics-deployer secret + command: > + {{ openshift.common.client_binary }} + --config={{ openshift_metrics_kubeconfig }} + --namespace openshift-infra + secrets new metrics-deployer nothing=/dev/null + register: metrics_deployer_secret + changed_when: metrics_deployer_secret.rc == 0 + failed_when: "metrics_deployer_secret.rc == 1 and 'already exists' not in metrics_deployer_secret.stderr" + +# TODO: extend this to allow user passed in certs or generating cert with +# OpenShift CA +- name: Build metrics deployer command + set_fact: + deployer_cmd: "{{ openshift.common.client_binary }} process -f \ + {{ metrics_template_dir }}/metrics-deployer.yaml -v \ + HAWKULAR_METRICS_HOSTNAME={{ metrics_hostname }},USE_PERSISTENT_STORAGE={{metrics_persistence | string | lower }},DYNAMICALLY_PROVISION_STORAGE={{metrics_dynamic_vol | string | lower }},METRIC_DURATION={{ openshift.hosted.metrics.duration }},METRIC_RESOLUTION={{ openshift.hosted.metrics.resolution }}{{ image_prefix }}{{ image_version }},MODE={{ deployment_mode }} \ + | {{ openshift.common.client_binary }} --namespace openshift-infra \ + --config={{ openshift_metrics_kubeconfig }} \ + create -o name -f -" + +- name: Deploy Metrics + shell: "{{ deployer_cmd }}" + register: deploy_metrics + failed_when: "'already exists' not in deploy_metrics.stderr and deploy_metrics.rc != 0" + changed_when: deploy_metrics.rc == 0 + +- set_fact: + deployer_pod: "{{ deploy_metrics.stdout[1:2] }}" + +# TODO: re-enable this once the metrics deployer validation issue is fixed +# when using dynamically provisioned volumes +- name: "Wait for image pull and deployer pod" + shell: > + {{ openshift.common.client_binary }} + --namespace openshift-infra + --config={{ openshift_metrics_kubeconfig }} + get {{ deploy_metrics.stdout }} + register: deploy_result + until: "{{ 'Completed' in deploy_result.stdout }}" + failed_when: False + retries: 60 + delay: 10 + +- name: Configure master for metrics + modify_yaml: + dest: "{{ openshift.common.config_base }}/master/master-config.yaml" + yaml_key: assetConfig.metricsPublicURL + yaml_value: "https://{{ metrics_hostname }}/hawkular/metrics" + notify: restart master + +- name: Store metrics public_url + openshift_facts: + role: master + local_facts: + metrics_public_url: "https://{{ metrics_hostname }}/hawkular/metrics" + when: deploy_result | changed diff --git a/roles/openshift_metrics/tasks/main.yaml b/roles/openshift_metrics/tasks/main.yaml index 43b85204a..88432a9f8 100644 --- a/roles/openshift_metrics/tasks/main.yaml +++ b/roles/openshift_metrics/tasks/main.yaml @@ -1,64 +1,87 @@ --- -- name: Copy Configuration to temporary conf - command: > - cp {{ openshift.common.config_base }}/master/admin.kubeconfig {{hawkular_tmp_conf}} - changed_when: false +- fail: + msg: This role required openshift_master_default_subdomain or openshift_hosted_metrics_public_url be set + when: openshift.master.metrics_public_url | default(openshift_hosted_metrics_public_url | default(openshift.master.default_subdomain | default(openshift_master_default_subdomain | default(none)))) is none -- name: Create metrics-deployer Service Account - shell: > - echo {{ deployer_service_account | to_json | quote }} | - {{ openshift.common.client_binary }} create - -n openshift-infra - --config={{hawkular_tmp_conf}} - -f - - register: deployer_create_service_account - failed_when: "'already exists' not in deployer_create_service_account.stderr and deployer_create_service_account.rc != 0" - changed_when: deployer_create_service_account.rc == 0 +- name: Create temp directory for kubeconfig + command: mktemp -d /tmp/openshift-ansible-XXXXXX + register: mktemp + changed_when: False -- name: Create metrics-deployer Secret - command: > - {{ openshift.common.client_binary }} - secrets new metrics-deployer - nothing=/dev/null - --config={{hawkular_tmp_conf}} - -n openshift-infra - register: deployer_create_secret - failed_when: "'already exists' not in deployer_create_secret.stderr and deployer_create_secret.rc !=0" - changed_when: deployer_create_secret.rc == 0 +- name: Record kubeconfig tmp dir + set_fact: + openshift_metrics_kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig" -- name: Configure role/user permissions +- name: Copy the admin client config(s) command: > - {{ openshift.common.admin_binary }} {{item}} - --config={{hawkular_tmp_conf}} - with_items: "{{hawkular_permission_oc_commands}}" - register: hawkular_perm_task - failed_when: "'already exists' not in hawkular_perm_task.stderr and hawkular_perm_task.rc != 0" - changed_when: hawkular_perm_task.rc == 0 + cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ openshift_metrics_kubeconfig }} + changed_when: False + +- name: Set hosted metrics facts + openshift_facts: + role: hosted + openshift_env: "{{ hostvars + | oo_merge_hostvars(vars, inventory_hostname) + | oo_openshift_env }}" + openshift_env_structures: + - 'openshift.hosted.metrics.*' + +- set_fact: + # Prefer the master facts over bare variables if present, prefer + # metrics_public_url over creating a default using default_subdomain + metrics_hostname: "{{ openshift.hosted.metrics.public_url + | default('hawkular-metrics.' ~ (openshift.master.default_subdomain + | default(openshift_master_default_subdomain ))) + | oo_hostname_from_url }}" + metrics_persistence: "{{ openshift.hosted.metrics.storage_kind | default(none) is not none }}" + metrics_dynamic_vol: "{{ openshift.hosted.metrics.storage_kind | default(none) == 'dynamic' }}" + metrics_template_dir: "{{ openshift.common.config_base if openshift.common.is_containerized | bool else '/usr/share/openshift' }}/examples/infrastructure-templates/{{ 'origin' if deployment_type == 'origin' else 'enterprise' }}" + cassandra_nodes: "{{ ',CASSANDRA_NODES=' ~ openshift.hosted.metrics.cassandra_nodes if 'cassandra' in openshift.hosted.metrics else '' }}" + cassandra_pv_size: "{{ ',CASSANDRA_PV_SIZE=' ~ openshift.hosted.metrics.storage_volume_size if openshift.hosted.metrics.storage_volume_size | default(none) is not none else '' }}" + image_prefix: "{{ ',IMAGE_PREFIX=' ~ openshift.hosted.metrics.deployer_prefix if 'deployer_prefix' in openshift.hosted.metrics else '' }}" + image_version: "{{ ',IMAGE_VERSION=' ~ openshift.hosted.metrics.deployer_version if 'deployer_version' in openshift.hosted.metrics else '' }}" -- name: Check openshift_master_default_subdomain - fail: - msg: "Default subdomain should be defined" - when: openshift.master.default_subdomain is not defined -- name: Create Heapster and Hawkular/Cassandra Services +- name: Check for existing metrics pods shell: > - {{ openshift.common.client_binary }} process -f \ - /usr/share/openshift/examples/infrastructure-templates/{{ hawkular_type }}/metrics-deployer.yaml -v \ - HAWKULAR_METRICS_HOSTNAME=hawkular-metrics.{{ openshift.master.default_subdomain }},USE_PERSISTENT_STORAGE={{ hawkular_persistence }},METRIC_DURATION={{ openshift.hosted.metrics.duration }},METRIC_RESOLUTION={{ openshift.hosted.metrics.resolution }} \ - | {{ openshift.common.client_binary }} create -n openshift-infra --config={{hawkular_tmp_conf}} -f - - register: oex_heapster_services - failed_when: "'already exists' not in oex_heapster_services.stderr and oex_heapster_services.rc != 0" + {{ openshift.common.client_binary }} + --config={{ openshift_metrics_kubeconfig }} + --namespace openshift-infra + get pods -l {{ item }} | grep -q Running + register: metrics_pods_status + with_items: + - metrics-infra=hawkular-metrics + - metrics-infra=heapster + - metrics-infra=hawkular-cassandra + failed_when: false changed_when: false -- name: Clean temporary config file - command: > - rm -rf {{hawkular_tmp_conf}} +- name: Check for previous deployer + shell: > + {{ openshift.common.client_binary }} + --config={{ openshift_metrics_kubeconfig }} + --namespace openshift-infra + get pods -l metrics-infra=deployer --sort-by='{.metadata.creationTimestamp}' | tail -1 | grep metrics-deployer- + register: metrics_deployer_status + failed_when: false changed_when: false -- name: "Wait for image pull and deployer pod" - shell: "{{ openshift.common.client_binary }} get pods -n openshift-infra | grep metrics-deployer.*Completed" - register: result - until: result.rc == 0 - retries: 60 - delay: 10 +- name: Record current deployment status + set_fact: + greenfield: "{{ not metrics_deployer_status.rc == 0 }}" + failed_error: "{{ True if 'Error' in metrics_deployer_status.stdout else False }}" + metrics_running: "{{ metrics_pods_status.results | oo_collect(attribute='rc') == [0,0,0] }}" + +- name: Set deployment mode + set_fact: + deployment_mode: "{{ 'refresh' if (failed_error | bool or metrics_upgrade | bool) else 'deploy' }}" + +# TODO: handle non greenfield deployments in the future +- include: install.yml + when: greenfield +- name: Delete temp directory + file: + name: "{{ mktemp.stdout }}" + state: absent + changed_when: False diff --git a/roles/openshift_metrics/vars/main.yaml b/roles/openshift_metrics/vars/main.yaml index 82d9d29f7..0331bcb89 100644 --- a/roles/openshift_metrics/vars/main.yaml +++ b/roles/openshift_metrics/vars/main.yaml @@ -2,13 +2,13 @@ hawkular_permission_oc_commands: - policy add-role-to-user edit system:serviceaccount:openshift-infra:metrics-deployer -n openshift-infra - policy add-cluster-role-to-user cluster-admin system:serviceaccount:openshift-infra:heapster -deployer_service_account: - apiVersion: v1 - kind: ServiceAccount - metadata: - name: metrics-deployer - secrets: - - name: metrics-deployer +metrics_deployer_sa: + apiVersion: v1 + kind: ServiceAccount + metadata: + name: metrics-deployer + secrets: + - name: metrics-deployer hawkular_tmp_conf: /tmp/hawkular_admin.kubeconfig @@ -17,3 +17,4 @@ hawkular_persistence: "{% if openshift.hosted.metrics.storage.kind != None %}tru hawkular_type: "{{ 'origin' if deployment_type == 'origin' else 'enterprise' }}" +metrics_upgrade: openshift.hosted.metrics.upgrade | default(False) diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index 9c71af6d9..f49e97745 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -55,11 +55,11 @@ - name: Install the systemd units include: systemd_units.yml -- name: Reload systemd units - command: systemctl daemon-reload - when: openshift.common.is_containerized | bool and (install_node_result | changed or install_ovs_sysconfig | changed or install_node_dep_result | changed) - notify: - - restart node +# The atomic-openshift-node service will set this parameter on +# startup, but if the network service is restarted this setting is +# lost. Reference: https://bugzilla.redhat.com/show_bug.cgi?id=1372388 +- name: Persist net.ipv4.ip_forward sysctl entry + sysctl: name="net.ipv4.ip_forward" value=1 sysctl_set=yes state=present reload=yes - name: Start and enable openvswitch docker service service: name=openvswitch.service enabled=yes state=started @@ -89,10 +89,10 @@ create: true with_items: - regex: '^AWS_ACCESS_KEY_ID=' - line: "AWS_ACCESS_KEY_ID={{ openshift.cloudprovider.aws.access_key }}" + line: "AWS_ACCESS_KEY_ID={{ openshift_cloudprovider_aws_access_key }}" - regex: '^AWS_SECRET_ACCESS_KEY=' - line: "AWS_SECRET_ACCESS_KEY={{ openshift.cloudprovider.aws.secret_key }}" - when: "'cloudprovider' in openshift and 'aws' in openshift.cloudprovider and 'kind' in openshift.cloudprovider and openshift.cloudprovider.kind == 'aws' and 'access_key' in openshift.cloudprovider.aws and 'secret_key' in openshift.cloudprovider.aws" + line: "AWS_SECRET_ACCESS_KEY={{ openshift_cloudprovider_aws_secret_key }}" + when: "openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined" notify: - restart node diff --git a/roles/openshift_node/tasks/storage_plugins/nfs.yml b/roles/openshift_node/tasks/storage_plugins/nfs.yml index e384c1bd7..22b539d16 100644 --- a/roles/openshift_node/tasks/storage_plugins/nfs.yml +++ b/roles/openshift_node/tasks/storage_plugins/nfs.yml @@ -17,16 +17,16 @@ persistent: yes when: ansible_selinux and ansible_selinux.status == "enabled" and virt_use_nfs_output.rc == 0 -- name: Check for existence of virt_sandbox_use_nfs seboolean +- name: Check for existence of virt_sandbox_use_nfs seboolean (RHEL) command: getsebool virt_sandbox_use_nfs register: virt_sandbox_use_nfs_output - when: ansible_selinux and ansible_selinux.status == "enabled" + when: ansible_distribution != "Fedora" and ansible_selinux and ansible_selinux.status == "enabled" failed_when: false changed_when: false -- name: Set seboolean to allow nfs storage plugin access from containers(sandbox) +- name: Set seboolean to allow nfs storage plugin access from containers(sandbox) (RHEL) seboolean: name: virt_sandbox_use_nfs state: yes persistent: yes - when: ansible_selinux and ansible_selinux.status == "enabled" and virt_sandbox_use_nfs_output.rc == 0 + when: ansible_distribution != "Fedora" and ansible_selinux and ansible_selinux.status == "enabled" and virt_sandbox_use_nfs_output.rc == 0 diff --git a/roles/openshift_node/tasks/systemd_units.yml b/roles/openshift_node/tasks/systemd_units.yml index 39e5386d4..025cb567e 100644 --- a/roles/openshift_node/tasks/systemd_units.yml +++ b/roles/openshift_node/tasks/systemd_units.yml @@ -47,3 +47,9 @@ line: "IMAGE_VERSION={{ openshift_image_tag }}" notify: - restart node + +- name: Reload systemd units + command: systemctl daemon-reload + when: openshift.common.is_containerized | bool and (install_node_result | changed or install_ovs_sysconfig | changed or install_node_dep_result | changed) + notify: + - restart node diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2 index 414f0d5e3..68d153052 100644 --- a/roles/openshift_node/templates/node.yaml.v1.j2 +++ b/roles/openshift_node/templates/node.yaml.v1.j2 @@ -20,14 +20,14 @@ masterClientConnectionOverrides: qps: 100 {% endif %} masterKubeConfig: system:node:{{ openshift.common.hostname }}.kubeconfig -{% if openshift.common.use_openshift_sdn | bool and not openshift.common.version_gte_3_3_or_1_3 | bool %} +{% if openshift.common.use_openshift_sdn | bool %} networkPluginName: {{ openshift.common.sdn_network_plugin_name }} {% endif %} # networkConfig struct introduced in origin 1.0.6 and OSE 3.0.2 which # deprecates networkPluginName above. The two should match. networkConfig: mtu: {{ openshift.node.sdn_mtu }} -{% if ( openshift.common.use_openshift_sdn | bool or openshift.common.use_nuage | bool ) and not openshift.common.version_gte_3_3_or_1_3 | bool%} +{% if openshift.common.use_openshift_sdn | bool or openshift.common.use_nuage | bool %} networkPluginName: {{ openshift.common.sdn_network_plugin_name }} {% endif %} {% if openshift.node.set_node_ip | bool %} diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service index e33b665ca..3b5865a50 100644 --- a/roles/openshift_node/templates/openshift.docker.node.service +++ b/roles/openshift_node/templates/openshift.docker.node.service @@ -15,7 +15,7 @@ After={{ openshift.common.service_type }}-node-dep.service EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node-dep ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-node -ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if 'cloudprovider' in openshift and 'kind' in openshift.cloudprovider and openshift.cloudprovider.kind != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS {{ openshift.node.node_image }}:${IMAGE_VERSION} +ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS {{ openshift.node.node_image }}:${IMAGE_VERSION} ExecStartPost=/usr/bin/sleep 10 ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node SyslogIdentifier={{ openshift.common.service_type }}-node diff --git a/roles/openshift_repos/files/fedora-openshift-enterprise/gpg_keys/.gitkeep b/roles/openshift_repos/files/fedora-openshift-enterprise/gpg_keys/.gitkeep deleted file mode 100644 index e69de29bb..000000000 --- a/roles/openshift_repos/files/fedora-openshift-enterprise/gpg_keys/.gitkeep +++ /dev/null diff --git a/roles/openshift_repos/files/fedora-openshift-enterprise/repos/.gitkeep b/roles/openshift_repos/files/fedora-openshift-enterprise/repos/.gitkeep deleted file mode 100644 index e69de29bb..000000000 --- a/roles/openshift_repos/files/fedora-openshift-enterprise/repos/.gitkeep +++ /dev/null diff --git a/roles/openshift_repos/files/fedora-origin/repos/maxamillion-fedora-openshift-fedora.repo b/roles/openshift_repos/files/fedora-origin/repos/maxamillion-fedora-openshift-fedora.repo deleted file mode 100644 index bc0435d82..000000000 --- a/roles/openshift_repos/files/fedora-origin/repos/maxamillion-fedora-openshift-fedora.repo +++ /dev/null @@ -1,8 +0,0 @@ -[maxamillion-fedora-openshift] -name=Copr repo for fedora-openshift owned by maxamillion -baseurl=https://copr-be.cloud.fedoraproject.org/results/maxamillion/fedora-openshift/fedora-$releasever-$basearch/ -skip_if_unavailable=True -gpgcheck=1 -gpgkey=https://copr-be.cloud.fedoraproject.org/results/maxamillion/fedora-openshift/pubkey.gpg -enabled=1 -enabled_metadata=1
\ No newline at end of file diff --git a/roles/openshift_repos/files/online/repos/enterprise-v3.repo b/roles/openshift_repos/files/online/repos/enterprise-v3.repo deleted file mode 100644 index 92bd35834..000000000 --- a/roles/openshift_repos/files/online/repos/enterprise-v3.repo +++ /dev/null @@ -1,10 +0,0 @@ -[enterprise-v3] -name=OpenShift Enterprise -baseurl=https://mirror.ops.rhcloud.com/libra/rhui-rhel-server-7-ose/ - https://gce-mirror1.ops.rhcloud.com/libra/rhui-rhel-server-7-ose/ -enabled=1 -gpgcheck=0 -failovermethod=priority -sslverify=False -sslclientcert=/var/lib/yum/client-cert.pem -sslclientkey=/var/lib/yum/client-key.pem diff --git a/roles/openshift_repos/files/online/repos/rhel-7-libra-candidate.repo b/roles/openshift_repos/files/online/repos/rhel-7-libra-candidate.repo deleted file mode 100644 index b4215679f..000000000 --- a/roles/openshift_repos/files/online/repos/rhel-7-libra-candidate.repo +++ /dev/null @@ -1,11 +0,0 @@ -[rhel-7-libra-candidate] -name=rhel-7-libra-candidate - \$basearch -baseurl=https://gce-mirror1.ops.rhcloud.com/libra/rhel-7-libra-candidate/\$basearch/ - https://mirror.ops.rhcloud.com/libra/rhel-7-libra-candidate/\$basearch/ -gpgkey=https://mirror.ops.rhcloud.com/libra/RPM-GPG-KEY-redhat-openshifthosted -skip_if_unavailable=True -gpgcheck=0 -enabled=1 -sslclientcert=/var/lib/yum/client-cert.pem -sslclientkey=/var/lib/yum/client-key.pem -sslverify=False diff --git a/roles/openshift_repos/files/openshift-enterprise/gpg_keys/.gitkeep b/roles/openshift_repos/files/openshift-enterprise/gpg_keys/.gitkeep deleted file mode 100644 index e69de29bb..000000000 --- a/roles/openshift_repos/files/openshift-enterprise/gpg_keys/.gitkeep +++ /dev/null diff --git a/roles/openshift_repos/files/openshift-enterprise/repos/.gitkeep b/roles/openshift_repos/files/openshift-enterprise/repos/.gitkeep deleted file mode 100644 index e69de29bb..000000000 --- a/roles/openshift_repos/files/openshift-enterprise/repos/.gitkeep +++ /dev/null diff --git a/roles/openshift_repos/files/removed/repos/epel7-openshift.repo b/roles/openshift_repos/files/removed/repos/epel7-openshift.repo deleted file mode 100644 index e69de29bb..000000000 --- a/roles/openshift_repos/files/removed/repos/epel7-openshift.repo +++ /dev/null diff --git a/roles/openshift_repos/files/removed/repos/maxamillion-origin-next-epel-7.repo b/roles/openshift_repos/files/removed/repos/maxamillion-origin-next-epel-7.repo deleted file mode 100644 index 0b21e0a65..000000000 --- a/roles/openshift_repos/files/removed/repos/maxamillion-origin-next-epel-7.repo +++ /dev/null @@ -1,7 +0,0 @@ -[maxamillion-origin-next] -name=Copr repo for origin-next owned by maxamillion -baseurl=https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/ -skip_if_unavailable=True -gpgcheck=1 -gpgkey=https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg -enabled=1 diff --git a/roles/openshift_repos/files/removed/repos/oso-rhui-rhel-7-extras.repo b/roles/openshift_repos/files/removed/repos/oso-rhui-rhel-7-extras.repo deleted file mode 100644 index e69de29bb..000000000 --- a/roles/openshift_repos/files/removed/repos/oso-rhui-rhel-7-extras.repo +++ /dev/null diff --git a/roles/openshift_repos/files/removed/repos/oso-rhui-rhel-7-server.repo b/roles/openshift_repos/files/removed/repos/oso-rhui-rhel-7-server.repo deleted file mode 100644 index e69de29bb..000000000 --- a/roles/openshift_repos/files/removed/repos/oso-rhui-rhel-7-server.repo +++ /dev/null diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml index 07a8d28fd..9be168611 100644 --- a/roles/openshift_repos/tasks/main.yaml +++ b/roles/openshift_repos/tasks/main.yaml @@ -29,62 +29,20 @@ when: openshift_additional_repos | length == 0 and not openshift.common.is_containerized | bool notify: refresh cache -- name: Remove any yum repo files for other deployment types RHEL/CentOS - file: - path: "/etc/yum.repos.d/{{ item | basename }}" - state: absent - with_fileglob: - - "fedora-openshift-enterprise/repos/*" - - "fedora-origin/repos/*" - - "online/repos/*" - - "openshift-enterprise/repos/*" - - "origin/repos/*" - - "removed/repos/*" - when: not openshift.common.is_containerized | bool - and not (item | search("/files/" ~ openshift_deployment_type ~ "/repos")) - and (ansible_os_family == "RedHat" and ansible_distribution != "Fedora") - notify: refresh cache - -- name: Remove any yum repo files for other deployment types Fedora - file: - path: "{{ item | basename }}" - state: absent - with_fileglob: - - "fedora-openshift-enterprise/repos/*" - - "fedora-origin/repos/*" - - "online/repos/*" - - "openshift-enterprise/repos/*" - - "origin/repos/*" - - "removed/repos/*" - when: not openshift.common.is_containerized | bool - and not (item | search("/files/fedora-" ~ openshift_deployment_type ~ "/repos")) - and (ansible_distribution == "Fedora") - notify: refresh cache - -- name: Configure gpg keys if needed +- name: Configure origin gpg keys if needed copy: - src: "{{ item }}" + src: origin/gpg_keys/openshift-ansible-CentOS-SIG-PaaS dest: /etc/pki/rpm-gpg/ - with_fileglob: - - "{{ openshift_deployment_type }}/gpg_keys/*" - notify: refresh cache - when: not openshift.common.is_containerized | bool - -- name: Configure yum repositories RHEL/CentOS - copy: - src: "{{ item }}" - dest: /etc/yum.repos.d/ - with_fileglob: - - "{{ openshift_deployment_type }}/repos/*" notify: refresh cache - when: (ansible_os_family == "RedHat" and ansible_distribution != "Fedora") + when: ansible_os_family == "RedHat" and ansible_distribution != "Fedora" + and openshift_deployment_type == 'origin' and not openshift.common.is_containerized | bool -- name: Configure yum repositories Fedora +- name: Configure origin yum repositories RHEL/CentOS copy: - src: "{{ item }}" + src: origin/repos/openshift-ansible-centos-paas-sig.repo dest: /etc/yum.repos.d/ - with_fileglob: - - "fedora-{{ openshift_deployment_type }}/repos/*" notify: refresh cache - when: (ansible_distribution == "Fedora") and not openshift.common.is_containerized | bool + when: ansible_os_family == "RedHat" and ansible_distribution != "Fedora" + and openshift_deployment_type == 'origin' + and not openshift.common.is_containerized | bool diff --git a/roles/openshift_storage_nfs/tasks/main.yml b/roles/openshift_storage_nfs/tasks/main.yml index fe7f83cbb..08d0b8540 100644 --- a/roles/openshift_storage_nfs/tasks/main.yml +++ b/roles/openshift_storage_nfs/tasks/main.yml @@ -20,21 +20,37 @@ - name: Ensure export directories exist file: - path: "{{ openshift.hosted.registry.storage.nfs.directory }}/{{ item }}" + path: "{{ item.storage.nfs.directory }}/{{ item.storage.volume.name }}" state: directory mode: 0777 owner: nfsnobody group: nfsnobody with_items: - - "{{ openshift.hosted.registry.storage.volume.name }}" + - "{{ openshift.hosted.registry }}" + - "{{ openshift.hosted.metrics }}" - name: Configure exports template: - dest: /etc/exports + dest: /etc/exports.d/openshift-ansible.exports src: exports.j2 notify: - restart nfs-server +# Now that we're putting our exports in our own file clean up the old ones +- name: register exports + command: cat /etc/exports.d/openshift-ansible.exports + register: exports_out + +- name: remove exports from /etc/exports + lineinfile: + dest: /etc/exports + line: "{{ item }}" + state: absent + with_items: "{{ exports_out.stdout_lines | default([]) }}" + when: exports_out.rc == 0 + notify: + - restart nfs-server + - name: Enable and start services service: name: "{{ item }}" diff --git a/roles/openshift_storage_nfs/templates/exports.j2 b/roles/openshift_storage_nfs/templates/exports.j2 index c1e1994b0..d6d936b72 100644 --- a/roles/openshift_storage_nfs/templates/exports.j2 +++ b/roles/openshift_storage_nfs/templates/exports.j2 @@ -1 +1,2 @@ {{ openshift.hosted.registry.storage.nfs.directory }}/{{ openshift.hosted.registry.storage.volume.name }} {{ openshift.hosted.registry.storage.nfs.options }} +{{ openshift.hosted.metrics.storage.nfs.directory }}/{{ openshift.hosted.metrics.storage.volume.name }} {{ openshift.hosted.metrics.storage.nfs.options }} diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml index 6e5d2b22c..a3a99d248 100644 --- a/roles/openshift_version/tasks/main.yml +++ b/roles/openshift_version/tasks/main.yml @@ -73,6 +73,10 @@ msg: openshift_version role was unable to set openshift_pkg_version when: openshift_pkg_version is not defined +- fail: + msg: "No OpenShift version available, please ensure your systems are fully registered and have access to appropriate yum repositories." + when: not is_containerized | bool and openshift_version == '0.0' + # We can't map an openshift_release to full rpm version like we can with containers, make sure # the rpm version we looked up matches the release requested and error out if not. - fail: |