diff options
Diffstat (limited to 'inventory')
-rw-r--r-- | inventory/byo/hosts.byo.glusterfs.external.example | 4 | ||||
-rw-r--r-- | inventory/byo/hosts.byo.glusterfs.mixed.example | 4 | ||||
-rw-r--r-- | inventory/byo/hosts.byo.glusterfs.native.example | 4 | ||||
-rw-r--r-- | inventory/byo/hosts.byo.glusterfs.registry-only.example | 4 | ||||
-rw-r--r-- | inventory/byo/hosts.byo.glusterfs.storage-and-registry.example | 4 | ||||
-rw-r--r-- | inventory/byo/hosts.example | 177 |
6 files changed, 164 insertions, 33 deletions
diff --git a/inventory/byo/hosts.byo.glusterfs.external.example b/inventory/byo/hosts.byo.glusterfs.external.example index 5a284ce97..acf68266e 100644 --- a/inventory/byo/hosts.byo.glusterfs.external.example +++ b/inventory/byo/hosts.byo.glusterfs.external.example @@ -19,6 +19,7 @@ [OSEv3:children] masters nodes +etcd # Specify there will be GlusterFS nodes glusterfs @@ -39,6 +40,9 @@ node0 openshift_schedulable=True node1 openshift_schedulable=True node2 openshift_schedulable=True +[etcd] +master + # Specify the glusterfs group, which contains the nodes of the external # GlusterFS cluster. At a minimum, each node must have "glusterfs_hostname" # and "glusterfs_devices" variables defined. diff --git a/inventory/byo/hosts.byo.glusterfs.mixed.example b/inventory/byo/hosts.byo.glusterfs.mixed.example index d16df6470..a559dc377 100644 --- a/inventory/byo/hosts.byo.glusterfs.mixed.example +++ b/inventory/byo/hosts.byo.glusterfs.mixed.example @@ -19,6 +19,7 @@ [OSEv3:children] masters nodes +etcd # Specify there will be GlusterFS nodes glusterfs @@ -42,6 +43,9 @@ node0 openshift_schedulable=True node1 openshift_schedulable=True node2 openshift_schedulable=True +[etcd] +master + # Specify the glusterfs group, which contains the nodes of the external # GlusterFS cluster. At a minimum, each node must have "glusterfs_hostname" # and "glusterfs_devices" variables defined. diff --git a/inventory/byo/hosts.byo.glusterfs.native.example b/inventory/byo/hosts.byo.glusterfs.native.example index c1a1f6f84..ca4765c53 100644 --- a/inventory/byo/hosts.byo.glusterfs.native.example +++ b/inventory/byo/hosts.byo.glusterfs.native.example @@ -16,6 +16,7 @@ [OSEv3:children] masters nodes +etcd # Specify there will be GlusterFS nodes glusterfs @@ -34,6 +35,9 @@ node0 openshift_schedulable=True node1 openshift_schedulable=True node2 openshift_schedulable=True +[etcd] +master + # Specify the glusterfs group, which contains the nodes that will host # GlusterFS storage pods. At a minimum, each node must have a # "glusterfs_devices" variable defined. This variable is a list of block diff --git a/inventory/byo/hosts.byo.glusterfs.registry-only.example b/inventory/byo/hosts.byo.glusterfs.registry-only.example index 31a85ee42..32040f593 100644 --- a/inventory/byo/hosts.byo.glusterfs.registry-only.example +++ b/inventory/byo/hosts.byo.glusterfs.registry-only.example @@ -20,6 +20,7 @@ [OSEv3:children] masters nodes +etcd # Specify there will be GlusterFS nodes glusterfs_registry @@ -40,6 +41,9 @@ node0 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True node1 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True node2 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True +[etcd] +master + # Specify the glusterfs group, which contains the nodes that will host # GlusterFS storage pods. At a minimum, each node must have a # "glusterfs_devices" variable defined. This variable is a list of block diff --git a/inventory/byo/hosts.byo.glusterfs.storage-and-registry.example b/inventory/byo/hosts.byo.glusterfs.storage-and-registry.example index 54bd89ddc..9bd37cbf6 100644 --- a/inventory/byo/hosts.byo.glusterfs.storage-and-registry.example +++ b/inventory/byo/hosts.byo.glusterfs.storage-and-registry.example @@ -20,6 +20,7 @@ [OSEv3:children] masters nodes +etcd # Specify there will be GlusterFS nodes glusterfs glusterfs_registry @@ -46,6 +47,9 @@ node3 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True node4 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True node5 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True +[etcd] +master + # Specify the glusterfs group, which contains the nodes that will host # GlusterFS storage pods. At a minimum, each node must have a # "glusterfs_devices" variable defined. This variable is a list of block diff --git a/inventory/byo/hosts.example b/inventory/byo/hosts.example index 436135bcf..1a9a5b6cf 100644 --- a/inventory/byo/hosts.example +++ b/inventory/byo/hosts.example @@ -17,9 +17,9 @@ nfs # SSH user, this user should allow ssh based auth without requiring a # password. If using ssh key based auth, then the key should be managed by an # ssh agent. -ansible_ssh_user=root +ansible_user=root -# If ansible_ssh_user is not root, ansible_become must be set to true and the +# If ansible_user is not root, ansible_become must be set to true and the # user must be configured for passwordless sudo #ansible_become=yes @@ -123,6 +123,15 @@ openshift_release=v3.7 # use this option if you are sure you know what you are doing! #openshift_docker_systemcontainer_image_override="registry.example.com/container-engine:latest" #openshift_crio_systemcontainer_image_override="registry.example.com/cri-o:latest" +# NOTE: The following crio docker-gc items are tech preview and likely shouldn't be used +# unless you know what you are doing!! +# The following two variables are used when opneshift_use_crio is True +# and cleans up after builds that pass through docker. +# Enable docker garbage collection when using cri-o +#openshift_crio_enable_docker_gc=false +# Node Selectors to run the garbage collection +#openshift_crio_docker_gc_node_selector: {'runtime': 'cri-o'} + # Items added, as is, to end of /etc/sysconfig/docker OPTIONS # Default value: "--log-driver=journald" #openshift_docker_options="-l warn --ipv6=false" @@ -310,9 +319,6 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', #openshift_master_cluster_hostname=openshift-ansible.test.example.com #openshift_master_cluster_public_hostname=openshift-ansible.test.example.com -# Override the default controller lease ttl -#osm_controller_lease_ttl=30 - # Configure controller arguments #osm_controller_args={'resource-quota-sync-period': ['10s']} @@ -394,10 +400,11 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', #openshift_hosted_routers=[{'name': 'router1', 'certificate': {'certfile': '/path/to/certificate/abc.crt', 'keyfile': '/path/to/certificate/abc.key', 'cafile': '/path/to/certificate/ca.crt'}, 'replicas': 1, 'serviceaccount': 'router', 'namespace': 'default', 'stats_port': 1936, 'edits': [], 'images': 'openshift3/ose-${component}:${version}', 'selector': 'type=router1', 'ports': ['80:80', '443:443']}, {'name': 'router2', 'certificate': {'certfile': '/path/to/certificate/xyz.crt', 'keyfile': '/path/to/certificate/xyz.key', 'cafile': '/path/to/certificate/ca.crt'}, 'replicas': 1, 'serviceaccount': 'router', 'namespace': 'default', 'stats_port': 1936, 'edits': [{'action': 'append', 'key': 'spec.template.spec.containers[0].env', 'value': {'name': 'ROUTE_LABELS', 'value': 'route=external'}}], 'images': 'openshift3/ose-${component}:${version}', 'selector': 'type=router2', 'ports': ['80:80', '443:443']}] # OpenShift Registry Console Options -# Override the console image prefix for enterprise deployments, not used in origin -# default is "registry.access.redhat.com/openshift3/" and the image appended is "registry-console" +# Override the console image prefix: +# origin default is "cockpit/" and the image appended is "kubernetes" +# enterprise default is "registry.access.redhat.com/openshift3/" and the image appended is "registry-console" #openshift_cockpit_deployer_prefix=registry.example.com/myrepo/ -# Override image version, defaults to latest for origin, matches the product version for enterprise +# Override image version, defaults to latest for origin, vX.Y product version for enterprise #openshift_cockpit_deployer_version=1.4.1 # Openshift Registry Options @@ -432,7 +439,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # NFS Host Group # An NFS volume will be created with path "nfs_directory/volume_name" # on the host within the [nfs] host group. For example, the volume -# path using these options would be "/exports/registry" +# path using these options would be "/exports/registry". "exports" is +# is the name of the export served by the nfs server. "registry" is +# the name of a directory inside of "/exports". #openshift_hosted_registry_storage_kind=nfs #openshift_hosted_registry_storage_access_modes=['ReadWriteMany'] # nfs_directory must conform to DNS-1123 subdomain must consist of lower case @@ -445,7 +454,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # External NFS Host # NFS volume must already exist with path "nfs_directory/_volume_name" on # the storage_host. For example, the remote volume path using these -# options would be "nfs.example.com:/exports/registry" +# options would be "nfs.example.com:/exports/registry". "exports" is +# is the name of the export served by the nfs server. "registry" is +# the name of a directory inside of "/exports". #openshift_hosted_registry_storage_kind=nfs #openshift_hosted_registry_storage_access_modes=['ReadWriteMany'] #openshift_hosted_registry_storage_host=nfs.example.com @@ -517,7 +528,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # Option A - NFS Host Group # An NFS volume will be created with path "nfs_directory/volume_name" # on the host within the [nfs] host group. For example, the volume -# path using these options would be "/exports/metrics" +# path using these options would be "/exports/metrics". "exports" is +# is the name of the export served by the nfs server. "metrics" is +# the name of a directory inside of "/exports". #openshift_metrics_storage_kind=nfs #openshift_metrics_storage_access_modes=['ReadWriteOnce'] #openshift_metrics_storage_nfs_directory=/exports @@ -529,7 +542,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # Option B - External NFS Host # NFS volume must already exist with path "nfs_directory/_volume_name" on # the storage_host. For example, the remote volume path using these -# options would be "nfs.example.com:/exports/metrics" +# options would be "nfs.example.com:/exports/metrics". "exports" is +# is the name of the export served by the nfs server. "metrics" is +# the name of a directory inside of "/exports". #openshift_metrics_storage_kind=nfs #openshift_metrics_storage_access_modes=['ReadWriteOnce'] #openshift_metrics_storage_host=nfs.example.com @@ -571,7 +586,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # Option A - NFS Host Group # An NFS volume will be created with path "nfs_directory/volume_name" # on the host within the [nfs] host group. For example, the volume -# path using these options would be "/exports/logging" +# path using these options would be "/exports/logging". "exports" is +# is the name of the export served by the nfs server. "logging" is +# the name of a directory inside of "/exports". #openshift_logging_storage_kind=nfs #openshift_logging_storage_access_modes=['ReadWriteOnce'] #openshift_logging_storage_nfs_directory=/exports @@ -583,7 +600,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # Option B - External NFS Host # NFS volume must already exist with path "nfs_directory/_volume_name" on # the storage_host. For example, the remote volume path using these -# options would be "nfs.example.com:/exports/logging" +# options would be "nfs.example.com:/exports/logging". "exports" is +# is the name of the export served by the nfs server. "logging" is +# the name of a directory inside of "/exports". #openshift_logging_storage_kind=nfs #openshift_logging_storage_access_modes=['ReadWriteOnce'] #openshift_logging_storage_host=nfs.example.com @@ -615,6 +634,77 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', #openshift_logging_image_prefix=registry.access.redhat.com/openshift3/ #openshift_logging_image_version=3.7.0 +# Prometheus deployment +# +# Currently prometheus deployment is disabled by default, enable it by setting this +#openshift_hosted_prometheus_deploy=true +# +# Prometheus storage config +# Option A - NFS Host Group +# An NFS volume will be created with path "nfs_directory/volume_name" +# on the host within the [nfs] host group. For example, the volume +# path using these options would be "/exports/prometheus" +#openshift_prometheus_storage_kind=nfs +#openshift_prometheus_storage_access_modes=['ReadWriteOnce'] +#openshift_prometheus_storage_nfs_directory=/exports +#openshift_prometheus_storage_nfs_options='*(rw,root_squash)' +#openshift_prometheus_storage_volume_name=prometheus +#openshift_prometheus_storage_volume_size=10Gi +#openshift_prometheus_storage_labels={'storage': 'prometheus'} +#openshift_prometheus_storage_type='pvc' +# For prometheus-alertmanager +#openshift_prometheus_alertmanager_storage_kind=nfs +#openshift_prometheus_alertmanager_storage_access_modes=['ReadWriteOnce'] +#openshift_prometheus_alertmanager_storage_nfs_directory=/exports +#openshift_prometheus_alertmanager_storage_nfs_options='*(rw,root_squash)' +#openshift_prometheus_alertmanager_storage_volume_name=prometheus-alertmanager +#openshift_prometheus_alertmanager_storage_volume_size=10Gi +#openshift_prometheus_alertmanager_storage_labels={'storage': 'prometheus-alertmanager'} +#openshift_prometheus_alertmanager_storage_type='pvc' +# For prometheus-alertbuffer +#openshift_prometheus_alertbuffer_storage_kind=nfs +#openshift_prometheus_alertbuffer_storage_access_modes=['ReadWriteOnce'] +#openshift_prometheus_alertbuffer_storage_nfs_directory=/exports +#openshift_prometheus_alertbuffer_storage_nfs_options='*(rw,root_squash)' +#openshift_prometheus_alertbuffer_storage_volume_name=prometheus-alertbuffer +#openshift_prometheus_alertbuffer_storage_volume_size=10Gi +#openshift_prometheus_alertbuffer_storage_labels={'storage': 'prometheus-alertbuffer'} +#openshift_prometheus_alertbuffer_storage_type='pvc' +# +# Option B - External NFS Host +# NFS volume must already exist with path "nfs_directory/_volume_name" on +# the storage_host. For example, the remote volume path using these +# options would be "nfs.example.com:/exports/prometheus" +#openshift_prometheus_storage_kind=nfs +#openshift_prometheus_storage_access_modes=['ReadWriteOnce'] +#openshift_prometheus_storage_host=nfs.example.com +#openshift_prometheus_storage_nfs_directory=/exports +#openshift_prometheus_storage_volume_name=prometheus +#openshift_prometheus_storage_volume_size=10Gi +#openshift_prometheus_storage_labels={'storage': 'prometheus'} +#openshift_prometheus_storage_type='pvc' +# For prometheus-alertmanager +#openshift_prometheus_alertmanager_storage_kind=nfs +#openshift_prometheus_alertmanager_storage_access_modes=['ReadWriteOnce'] +#openshift_prometheus_alertmanager_storage_host=nfs.example.com +#openshift_prometheus_alertmanager_storage_nfs_directory=/exports +#openshift_prometheus_alertmanager_storage_volume_name=prometheus-alertmanager +#openshift_prometheus_alertmanager_storage_volume_size=10Gi +#openshift_prometheus_alertmanager_storage_labels={'storage': 'prometheus-alertmanager'} +#openshift_prometheus_alertmanager_storage_type='pvc' +# For prometheus-alertbuffer +#openshift_prometheus_alertbuffer_storage_kind=nfs +#openshift_prometheus_alertbuffer_storage_access_modes=['ReadWriteOnce'] +#openshift_prometheus_alertbuffer_storage_host=nfs.example.com +#openshift_prometheus_alertbuffer_storage_nfs_directory=/exports +#openshift_prometheus_alertbuffer_storage_volume_name=prometheus-alertbuffer +#openshift_prometheus_alertbuffer_storage_volume_size=10Gi +#openshift_prometheus_alertbuffer_storage_labels={'storage': 'prometheus-alertbuffer'} +#openshift_prometheus_alertbuffer_storage_type='pvc' +# +# Option C - none -- Prometheus, alertmanager and alertbuffer will use emptydir volumes +# which are destroyed when pods are deleted + # Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet') # os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant' @@ -903,25 +993,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # openshift_upgrade_post_storage_migration_enabled=true # openshift_upgrade_post_storage_migration_fatal=false -# host group for masters -[masters] -ose3-master[1:3]-ansible.test.example.com - -[etcd] -ose3-etcd[1:3]-ansible.test.example.com - -# NOTE: Containerized load balancer hosts are not yet supported, if using a global -# containerized=true host variable we must set to false. -[lb] -ose3-lb-ansible.test.example.com containerized=false - -# NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes -# However, in order to ensure that your masters are not burdened with running pods you should -# make them unschedulable by adding openshift_schedulable=False any node that's also a master. -[nodes] -ose3-master[1:3]-ansible.test.example.com -ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}" - +###################################################################### # CloudForms/ManageIQ (CFME/MIQ) Configuration # See the readme for full descriptions and getting started @@ -971,6 +1043,17 @@ ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'prima # setting this variable. Useful for testing specific task files. #openshift_management_storage_nfs_local_hostname: false +# These are the default values for the username and password of the +# management app. Changing these values in your inventory will not +# change your username or password. You should only need to change +# these values in your inventory if you already changed the actual +# name and password AND are trying to use integration scripts. +# +# For example, adding this cluster as a container provider, +# playbooks/byo/openshift-management/add_container_provider.yml +#openshift_management_username: admin +#openshift_management_password: smartvm + # A hash of parameters you want to override or set in the # miq-template.yaml or miq-template-ext-db.yaml templates. Set this in # your inventory file as a simple hash. Acceptable values are defined @@ -979,3 +1062,31 @@ ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'prima # # openshift_management_template_parameters={'APPLICATION_MEM_REQ': '512Mi'} #openshift_management_template_parameters: {} + +# Firewall configuration +# You can open additional firewall ports by defining them as a list. of service +# names and ports/port ranges for either masters or nodes. +#openshift_master_open_ports=[{"service":"svc1","port":"11/tcp"}] +#openshift_node_open_ports=[{"service":"svc2","port":"12-13/tcp"},{"service":"svc3","port":"14/udp"}] + +# host group for masters +[masters] +ose3-master[1:3]-ansible.test.example.com + +[etcd] +ose3-etcd[1:3]-ansible.test.example.com + +# NOTE: Containerized load balancer hosts are not yet supported, if using a global +# containerized=true host variable we must set to false. +[lb] +ose3-lb-ansible.test.example.com containerized=false + +# NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes +# However, in order to ensure that your masters are not burdened with running pods you should +# make them unschedulable by adding openshift_schedulable=False any node that's also a master. +[nodes] +ose3-master[1:3]-ansible.test.example.com +ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}" + +[nfs] +ose3-nfs-ansible.test.example.com |