1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
|
# This is an example of an OpenShift-Ansible host inventory
# Create an OSEv3 group that contains the masters and nodes groups
[OSEv3:children]
masters
nodes
etcd
lb
nfs
# Set variables common for all OSEv3 hosts
[OSEv3:vars]
# Enable unsupported configurations, things that will yield a partially
# functioning cluster but would not be supported for production use
#openshift_enable_unsupported_configurations=false
# SSH user, this user should allow ssh based auth without requiring a
# password. If using ssh key based auth, then the key should be managed by an
# ssh agent.
ansible_user=root
# If ansible_user is not root, ansible_become must be set to true and the
# user must be configured for passwordless sudo
#ansible_become=yes
# Debug level for all OpenShift components (Defaults to 2)
debug_level=2
# Specify the deployment type. Valid values are origin and openshift-enterprise.
openshift_deployment_type=origin
#openshift_deployment_type=openshift-enterprise
# Specify the generic release of OpenShift to install. This is used mainly just during installation, after which we
# rely on the version running on the first master. Works best for containerized installs where we can usually
# use this to lookup the latest exact version of the container images, which is the tag actually used to configure
# the cluster. For RPM installations we just verify the version detected in your configured repos matches this
# release.
openshift_release=v3.7
# Specify an exact container image tag to install or configure.
# WARNING: This value will be used for all hosts in containerized environments, even those that have another version installed.
# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
#openshift_image_tag=v3.7.0
# Specify an exact rpm version to install or configure.
# WARNING: This value will be used for all hosts in RPM based environments, even those that have another version installed.
# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
#openshift_pkg_version=-3.7.0
# This enables all the system containers except for docker:
#openshift_use_system_containers=False
#
# But you can choose separately each component that must be a
# system container:
#
#openshift_use_openvswitch_system_container=False
#openshift_use_node_system_container=False
#openshift_use_master_system_container=False
#openshift_use_etcd_system_container=False
#
# In either case, system_images_registry must be specified to be able to find the system images
#system_images_registry="docker.io"
# when openshift_deployment_type=='openshift-enterprise'
#system_images_registry="registry.access.redhat.com"
# Manage openshift example imagestreams and templates during install and upgrade
#openshift_install_examples=true
# Configure logoutURL in the master config for console customization
# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#changing-the-logout-url
#openshift_master_logout_url=http://example.com
# Configure extensionScripts in the master config for console customization
# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets
#openshift_master_extension_scripts=['/path/to/script1.js','/path/to/script2.js']
# Configure extensionStylesheets in the master config for console customization
# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets
#openshift_master_extension_stylesheets=['/path/to/stylesheet1.css','/path/to/stylesheet2.css']
# Configure extensions in the master config for console customization
# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files
#openshift_master_extensions=[{'name': 'images', 'sourceDirectory': '/path/to/my_images'}]
# Configure extensions in the master config for console customization
# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files
#openshift_master_oauth_templates:
# login: /path/to/login-template.html
# openshift_master_oauth_template is deprecated. Use openshift_master_oauth_templates instead.
#openshift_master_oauth_template=/path/to/login-template.html
# Configure imagePolicyConfig in the master config
# See: https://godoc.org/github.com/openshift/origin/pkg/cmd/server/api#ImagePolicyConfig
#openshift_master_image_policy_config={"maxImagesBulkImportedPerRepository": 3, "disableScheduledImport": true}
# Configure master API rate limits for external clients
#openshift_master_external_ratelimit_qps=200
#openshift_master_external_ratelimit_burst=400
# Configure master API rate limits for loopback clients
#openshift_master_loopback_ratelimit_qps=300
#openshift_master_loopback_ratelimit_burst=600
# Docker Configuration
# Add additional, insecure, and blocked registries to global docker configuration
# For enterprise deployment types we ensure that registry.access.redhat.com is
# included if you do not include it
#openshift_docker_additional_registries=registry.example.com
#openshift_docker_insecure_registries=registry.example.com
#openshift_docker_blocked_registries=registry.hacker.com
# Disable pushing to dockerhub
#openshift_docker_disable_push_dockerhub=True
# Use Docker inside a System Container. Note that this is a tech preview and should
# not be used to upgrade!
# The following options for docker are ignored:
# - docker_version
# - docker_upgrade
# The following options must not be used
# - openshift_docker_options
#openshift_docker_use_system_container=False
# Install and run cri-o along side docker
# NOTE: This uses openshift_docker_systemcontainer_image_registry_override as it's override
# just as container-engine does.
#openshift_use_crio=False
# Force the registry to use for the container-engine/crio system container. By default the registry
# will be built off of the deployment type and ansible_distribution. Only
# use this option if you are sure you know what you are doing!
#openshift_docker_systemcontainer_image_override="registry.example.com/container-engine:latest"
#openshift_crio_systemcontainer_image_override="registry.example.com/cri-o:latest"
# NOTE: The following crio docker-gc items are tech preview and likely shouldn't be used
# unless you know what you are doing!!
# The following two variables are used when openshift_use_crio is True
# and cleans up after builds that pass through docker.
# Enable docker garbage collection when using cri-o
#openshift_crio_enable_docker_gc=false
# Node Selectors to run the garbage collection
#openshift_crio_docker_gc_node_selector: {'runtime': 'cri-o'}
# Items added, as is, to end of /etc/sysconfig/docker OPTIONS
# Default value: "--log-driver=journald"
#openshift_docker_options="-l warn --ipv6=false"
# Specify exact version of Docker to configure or upgrade to.
# Downgrades are not supported and will error out. Be careful when upgrading docker from < 1.10 to > 1.10.
# docker_version="1.12.1"
# Specify whether to run Docker daemon with SELinux enabled in containers. Default is True.
# Uncomment below to disable; for example if your kernel does not support the
# Docker overlay/overlay2 storage drivers with SELinux enabled.
#openshift_docker_selinux_enabled=False
# Skip upgrading Docker during an OpenShift upgrade, leaves the current Docker version alone.
# docker_upgrade=False
# Specify exact version of etcd to configure or upgrade to.
# etcd_version="3.1.0"
# Enable etcd debug logging, defaults to false
# etcd_debug=true
# Set etcd log levels by package
# etcd_log_package_levels="etcdserver=WARNING,security=DEBUG"
# Upgrade Hooks
#
# Hooks are available to run custom tasks at various points during a cluster
# upgrade. Each hook should point to a file with Ansible tasks defined. Suggest using
# absolute paths, if not the path will be treated as relative to the file where the
# hook is actually used.
#
# Tasks to run before each master is upgraded.
# openshift_master_upgrade_pre_hook=/usr/share/custom/pre_master.yml
#
# Tasks to run to upgrade the master. These tasks run after the main openshift-ansible
# upgrade steps, but before we restart system/services.
# openshift_master_upgrade_hook=/usr/share/custom/master.yml
#
# Tasks to run after each master is upgraded and system/services have been restarted.
# openshift_master_upgrade_post_hook=/usr/share/custom/post_master.yml
# Alternate image format string, useful if you've got your own registry mirror
# Configure this setting just on node or master
#oreg_url_master=example.com/openshift3/ose-${component}:${version}
#oreg_url_node=example.com/openshift3/ose-${component}:${version}
# For setting the configuration globally
#oreg_url=example.com/openshift3/ose-${component}:${version}
# If oreg_url points to a registry other than registry.access.redhat.com we can
# modify image streams to point at that registry by setting the following to true
#openshift_examples_modify_imagestreams=true
# If oreg_url points to a registry requiring authentication, provide the following:
#oreg_auth_user=some_user
#oreg_auth_password='my-pass'
# NOTE: oreg_url must be defined by the user for oreg_auth_* to have any affect.
# oreg_auth_pass should be generated from running docker login.
# To update registry auth credentials, uncomment the following:
#oreg_auth_credentials_replace: True
# OpenShift repository configuration
#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, 'gpgkey': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}]
#openshift_repos_enable_testing=false
# If the image for etcd needs to be pulled from anywhere else than registry.access.redhat.com, e.g. in
# a disconnected and containerized installation, use osm_etcd_image to specify the image to use:
#osm_etcd_image=rhel7/etcd
# htpasswd auth
openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}]
# Defining htpasswd users
#openshift_master_htpasswd_users={'user1': '<pre-hashed password>', 'user2': '<pre-hashed password>'}
# or
#openshift_master_htpasswd_file=<path to local pre-generated htpasswd file>
# Allow all auth
#openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}]
# LDAP auth
#openshift_master_identity_providers=[{'name': 'my_ldap_provider', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider', 'attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': '', 'bindPassword': '', 'ca': 'my-ldap-ca.crt', 'insecure': 'false', 'url': 'ldap://ldap.example.com:389/ou=users,dc=example,dc=com?uid'}]
#
# Configure LDAP CA certificate
# Specify either the ASCII contents of the certificate or the path to
# the local file that will be copied to the remote host. CA
# certificate contents will be copied to master systems and saved
# within /etc/origin/master/ with a filename matching the "ca" key set
# within the LDAPPasswordIdentityProvider.
#
#openshift_master_ldap_ca=<ca text>
# or
#openshift_master_ldap_ca_file=<path to local ca file to use>
# OpenID auth
#openshift_master_identity_providers=[{"name": "openid_auth", "login": "true", "challenge": "false", "kind": "OpenIDIdentityProvider", "client_id": "my_client_id", "client_secret": "my_client_secret", "claims": {"id": ["sub"], "preferredUsername": ["preferred_username"], "name": ["name"], "email": ["email"]}, "urls": {"authorize": "https://myidp.example.com/oauth2/authorize", "token": "https://myidp.example.com/oauth2/token"}, "ca": "my-openid-ca-bundle.crt"}]
#
# Configure OpenID CA certificate
# Specify either the ASCII contents of the certificate or the path to
# the local file that will be copied to the remote host. CA
# certificate contents will be copied to master systems and saved
# within /etc/origin/master/ with a filename matching the "ca" key set
# within the OpenIDIdentityProvider.
#
#openshift_master_openid_ca=<ca text>
# or
#openshift_master_openid_ca_file=<path to local ca file to use>
# Request header auth
#openshift_master_identity_providers=[{"name": "my_request_header_provider", "challenge": "true", "login": "true", "kind": "RequestHeaderIdentityProvider", "challengeURL": "https://www.example.com/challenging-proxy/oauth/authorize?${query}", "loginURL": "https://www.example.com/login-proxy/oauth/authorize?${query}", "clientCA": "my-request-header-ca.crt", "clientCommonNames": ["my-auth-proxy"], "headers": ["X-Remote-User", "SSO-User"], "emailHeaders": ["X-Remote-User-Email"], "nameHeaders": ["X-Remote-User-Display-Name"], "preferredUsernameHeaders": ["X-Remote-User-Login"]}]
#
# Configure request header CA certificate
# Specify either the ASCII contents of the certificate or the path to
# the local file that will be copied to the remote host. CA
# certificate contents will be copied to master systems and saved
# within /etc/origin/master/ with a filename matching the "clientCA"
# key set within the RequestHeaderIdentityProvider.
#
#openshift_master_request_header_ca=<ca text>
# or
#openshift_master_request_header_ca_file=<path to local ca file to use>
# CloudForms Management Engine (ManageIQ) App Install
#
# Enables installation of MIQ server. Recommended for dedicated
# clusters only. See roles/openshift_management/README.md for instructions
# and requirements.
#openshift_management_install_management=False
# Cloud Provider Configuration
#
# Note: You may make use of environment variables rather than store
# sensitive configuration within the ansible inventory.
# For example:
#openshift_cloudprovider_aws_access_key="{{ lookup('env','AWS_ACCESS_KEY_ID') }}"
#openshift_cloudprovider_aws_secret_key="{{ lookup('env','AWS_SECRET_ACCESS_KEY') }}"
#
# AWS
#openshift_cloudprovider_kind=aws
# Note: IAM profiles may be used instead of storing API credentials on disk.
#openshift_cloudprovider_aws_access_key=aws_access_key_id
#openshift_cloudprovider_aws_secret_key=aws_secret_access_key
#
# Openstack
#openshift_cloudprovider_kind=openstack
#openshift_cloudprovider_openstack_auth_url=http://openstack.example.com:35357/v2.0/
#openshift_cloudprovider_openstack_username=username
#openshift_cloudprovider_openstack_password=password
#openshift_cloudprovider_openstack_domain_id=domain_id
#openshift_cloudprovider_openstack_domain_name=domain_name
#openshift_cloudprovider_openstack_tenant_id=tenant_id
#openshift_cloudprovider_openstack_tenant_name=tenant_name
#openshift_cloudprovider_openstack_region=region
#openshift_cloudprovider_openstack_lb_subnet_id=subnet_id
#
# GCE
#openshift_cloudprovider_kind=gce
#
# vSphere
#openshift_cloudprovider_kind=vsphere
#openshift_cloudprovider_vsphere_username=username
#openshift_cloudprovider_vsphere_password=password
#openshift_cloudprovider_vsphere_host=vcenter_host or vsphere_host
#openshift_cloudprovider_vsphere_datacenter=datacenter
#openshift_cloudprovider_vsphere_datastore=datastore
#openshift_cloudprovider_vsphere_folder=optional_folder_name
# Project Configuration
#osm_project_request_message=''
#osm_project_request_template=''
#osm_mcs_allocator_range='s0:/2'
#osm_mcs_labels_per_project=5
#osm_uid_allocator_range='1000000000-1999999999/10000'
# Configure additional projects
#openshift_additional_projects={'my-project': {'default_node_selector': 'label=value'}}
# Enable cockpit
#osm_use_cockpit=true
#
# Set cockpit plugins
#osm_cockpit_plugins=['cockpit-kubernetes']
# Native high availability (default cluster method)
# If no lb group is defined, the installer assumes that a load balancer has
# been preconfigured. For installation the value of
# openshift_master_cluster_hostname must resolve to the load balancer
# or to one or all of the masters defined in the inventory if no load
# balancer is present.
#openshift_master_cluster_hostname=openshift-ansible.test.example.com
#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com
# Configure controller arguments
#osm_controller_args={'resource-quota-sync-period': ['10s']}
# Configure api server arguments
#osm_api_server_args={'max-requests-inflight': ['400']}
# default subdomain to use for exposed routes
#openshift_master_default_subdomain=apps.test.example.com
# additional cors origins
#osm_custom_cors_origins=['foo.example.com', 'bar.example.com']
# default project node selector
#osm_default_node_selector='region=primary'
# Override the default pod eviction timeout
#openshift_master_pod_eviction_timeout=5m
# Override the default oauth tokenConfig settings:
# openshift_master_access_token_max_seconds=86400
# openshift_master_auth_token_max_seconds=500
# Override master servingInfo.maxRequestsInFlight
#openshift_master_max_requests_inflight=500
# Override master and node servingInfo.minTLSVersion and .cipherSuites
# valid TLS versions are VersionTLS10, VersionTLS11, VersionTLS12
# example cipher suites override, valid cipher suites are https://golang.org/pkg/crypto/tls/#pkg-constants
#openshift_master_min_tls_version=VersionTLS12
#openshift_master_cipher_suites=['TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', '...']
#
#openshift_node_min_tls_version=VersionTLS12
#openshift_node_cipher_suites=['TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', '...']
# default storage plugin dependencies to install, by default the ceph and
# glusterfs plugin dependencies will be installed, if available.
#osn_storage_plugin_deps=['ceph','glusterfs','iscsi']
# OpenShift Router Options
#
# An OpenShift router will be created during install if there are
# nodes present with labels matching the default router selector,
# "region=infra". Set openshift_node_labels per node as needed in
# order to label nodes.
#
# Example:
# [nodes]
# node.example.com openshift_node_labels="{'region': 'infra'}"
#
# Router selector (optional)
# Router will only be created if nodes matching this label are present.
# Default value: 'region=infra'
#openshift_hosted_router_selector='region=infra'
#
# Router replicas (optional)
# Unless specified, openshift-ansible will calculate the replica count
# based on the number of nodes matching the openshift router selector.
#openshift_hosted_router_replicas=2
#
# Router force subdomain (optional)
# A router path format to force on all routes used by this router
# (will ignore the route host value)
#openshift_hosted_router_force_subdomain='${name}-${namespace}.apps.example.com'
#
# Router certificate (optional)
# Provide local certificate paths which will be configured as the
# router's default certificate.
#openshift_hosted_router_certificate={"certfile": "/path/to/router.crt", "keyfile": "/path/to/router.key", "cafile": "/path/to/router-ca.crt"}
#
# Manage the OpenShift Router (optional)
#openshift_hosted_manage_router=true
#
# Router sharding support has been added and can be achieved by supplying the correct
# data to the inventory. The variable to house the data is openshift_hosted_routers
# and is in the form of a list. If no data is passed then a default router will be
# created. There are multiple combinations of router sharding. The one described
# below supports routers on separate nodes.
#
#openshift_hosted_routers=[{'name': 'router1', 'certificate': {'certfile': '/path/to/certificate/abc.crt', 'keyfile': '/path/to/certificate/abc.key', 'cafile': '/path/to/certificate/ca.crt'}, 'replicas': 1, 'serviceaccount': 'router', 'namespace': 'default', 'stats_port': 1936, 'edits': [], 'images': 'openshift3/ose-${component}:${version}', 'selector': 'type=router1', 'ports': ['80:80', '443:443']}, {'name': 'router2', 'certificate': {'certfile': '/path/to/certificate/xyz.crt', 'keyfile': '/path/to/certificate/xyz.key', 'cafile': '/path/to/certificate/ca.crt'}, 'replicas': 1, 'serviceaccount': 'router', 'namespace': 'default', 'stats_port': 1936, 'edits': [{'action': 'append', 'key': 'spec.template.spec.containers[0].env', 'value': {'name': 'ROUTE_LABELS', 'value': 'route=external'}}], 'images': 'openshift3/ose-${component}:${version}', 'selector': 'type=router2', 'ports': ['80:80', '443:443']}]
# OpenShift Registry Console Options
# Override the console image prefix:
# origin default is "cockpit/", enterprise default is "openshift3/"
#openshift_cockpit_deployer_prefix=registry.example.com/myrepo/
# origin default is "kubernetes", enterprise default is "registry-console"
#openshift_cockpit_deployer_basename=my-console
# Override image version, defaults to latest for origin, vX.Y product version for enterprise
#openshift_cockpit_deployer_version=1.4.1
# Openshift Registry Options
#
# An OpenShift registry will be created during install if there are
# nodes present with labels matching the default registry selector,
# "region=infra". Set openshift_node_labels per node as needed in
# order to label nodes.
#
# Example:
# [nodes]
# node.example.com openshift_node_labels="{'region': 'infra'}"
#
# Registry selector (optional)
# Registry will only be created if nodes matching this label are present.
# Default value: 'region=infra'
#openshift_hosted_registry_selector='region=infra'
#
# Registry replicas (optional)
# Unless specified, openshift-ansible will calculate the replica count
# based on the number of nodes matching the openshift registry selector.
#openshift_hosted_registry_replicas=2
#
# Validity of the auto-generated certificate in days (optional)
#openshift_hosted_registry_cert_expire_days=730
#
# Manage the OpenShift Registry (optional)
#openshift_hosted_manage_registry=true
# Registry Storage Options
#
# NFS Host Group
# An NFS volume will be created with path "nfs_directory/volume_name"
# on the host within the [nfs] host group. For example, the volume
# path using these options would be "/exports/registry". "exports" is
# is the name of the export served by the nfs server. "registry" is
# the name of a directory inside of "/exports".
#openshift_hosted_registry_storage_kind=nfs
#openshift_hosted_registry_storage_access_modes=['ReadWriteMany']
# nfs_directory must conform to DNS-1123 subdomain must consist of lower case
# alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character
#openshift_hosted_registry_storage_nfs_directory=/exports
#openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)'
#openshift_hosted_registry_storage_volume_name=registry
#openshift_hosted_registry_storage_volume_size=10Gi
#
# External NFS Host
# NFS volume must already exist with path "nfs_directory/_volume_name" on
# the storage_host. For example, the remote volume path using these
# options would be "nfs.example.com:/exports/registry". "exports" is
# is the name of the export served by the nfs server. "registry" is
# the name of a directory inside of "/exports".
#openshift_hosted_registry_storage_kind=nfs
#openshift_hosted_registry_storage_access_modes=['ReadWriteMany']
#openshift_hosted_registry_storage_host=nfs.example.com
# nfs_directory must conform to DNS-1123 subdomain must consist of lower case
# alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character
#openshift_hosted_registry_storage_nfs_directory=/exports
#openshift_hosted_registry_storage_volume_name=registry
#openshift_hosted_registry_storage_volume_size=10Gi
#
# Openstack
# Volume must already exist.
#openshift_hosted_registry_storage_kind=openstack
#openshift_hosted_registry_storage_access_modes=['ReadWriteOnce']
#openshift_hosted_registry_storage_openstack_filesystem=ext4
#openshift_hosted_registry_storage_openstack_volumeID=3a650b4f-c8c5-4e0a-8ca5-eaee11f16c57
#openshift_hosted_registry_storage_volume_size=10Gi
#
# AWS S3
# S3 bucket must already exist.
#openshift_hosted_registry_storage_kind=object
#openshift_hosted_registry_storage_provider=s3
#openshift_hosted_registry_storage_s3_encrypt=false
#openshift_hosted_registry_storage_s3_kmskeyid=aws_kms_key_id
#openshift_hosted_registry_storage_s3_accesskey=aws_access_key_id
#openshift_hosted_registry_storage_s3_secretkey=aws_secret_access_key
#openshift_hosted_registry_storage_s3_bucket=bucket_name
#openshift_hosted_registry_storage_s3_region=bucket_region
#openshift_hosted_registry_storage_s3_chunksize=26214400
#openshift_hosted_registry_storage_s3_rootdirectory=/registry
#openshift_hosted_registry_pullthrough=true
#openshift_hosted_registry_acceptschema2=true
#openshift_hosted_registry_enforcequota=true
#
# Any S3 service (Minio, ExoScale, ...): Basically the same as above
# but with regionendpoint configured
# S3 bucket must already exist.
#openshift_hosted_registry_storage_kind=object
#openshift_hosted_registry_storage_provider=s3
#openshift_hosted_registry_storage_s3_accesskey=access_key_id
#openshift_hosted_registry_storage_s3_secretkey=secret_access_key
#openshift_hosted_registry_storage_s3_regionendpoint=https://myendpoint.example.com/
#openshift_hosted_registry_storage_s3_bucket=bucket_name
#openshift_hosted_registry_storage_s3_region=bucket_region
#openshift_hosted_registry_storage_s3_chunksize=26214400
#openshift_hosted_registry_storage_s3_rootdirectory=/registry
#openshift_hosted_registry_pullthrough=true
#openshift_hosted_registry_acceptschema2=true
#openshift_hosted_registry_enforcequota=true
#
# Additional CloudFront Options. When using CloudFront all three
# of the followingg variables must be defined.
#openshift_hosted_registry_storage_s3_cloudfront_baseurl=https://myendpoint.cloudfront.net/
#openshift_hosted_registry_storage_s3_cloudfront_privatekeyfile=/full/path/to/secret.pem
#openshift_hosted_registry_storage_s3_cloudfront_keypairid=yourpairid
# Metrics deployment
# See: https://docs.openshift.com/enterprise/latest/install_config/cluster_metrics.html
#
# By default metrics are not automatically deployed, set this to enable them
#openshift_metrics_install_metrics=true
#
# Storage Options
# If openshift_metrics_storage_kind is unset then metrics will be stored
# in an EmptyDir volume and will be deleted when the cassandra pod terminates.
# Storage options A & B currently support only one cassandra pod which is
# generally enough for up to 1000 pods. Additional volumes can be created
# manually after the fact and metrics scaled per the docs.
#
# Option A - NFS Host Group
# An NFS volume will be created with path "nfs_directory/volume_name"
# on the host within the [nfs] host group. For example, the volume
# path using these options would be "/exports/metrics". "exports" is
# is the name of the export served by the nfs server. "metrics" is
# the name of a directory inside of "/exports".
#openshift_metrics_storage_kind=nfs
#openshift_metrics_storage_access_modes=['ReadWriteOnce']
#openshift_metrics_storage_nfs_directory=/exports
#openshift_metrics_storage_nfs_options='*(rw,root_squash)'
#openshift_metrics_storage_volume_name=metrics
#openshift_metrics_storage_volume_size=10Gi
#openshift_metrics_storage_labels={'storage': 'metrics'}
#
# Option B - External NFS Host
# NFS volume must already exist with path "nfs_directory/_volume_name" on
# the storage_host. For example, the remote volume path using these
# options would be "nfs.example.com:/exports/metrics". "exports" is
# is the name of the export served by the nfs server. "metrics" is
# the name of a directory inside of "/exports".
#openshift_metrics_storage_kind=nfs
#openshift_metrics_storage_access_modes=['ReadWriteOnce']
#openshift_metrics_storage_host=nfs.example.com
#openshift_metrics_storage_nfs_directory=/exports
#openshift_metrics_storage_volume_name=metrics
#openshift_metrics_storage_volume_size=10Gi
#openshift_metrics_storage_labels={'storage': 'metrics'}
#
# Option C - Dynamic -- If openshift supports dynamic volume provisioning for
# your cloud platform use this.
#openshift_metrics_storage_kind=dynamic
#
# Other Metrics Options -- Common items you may wish to reconfigure, for the complete
# list of options please see roles/openshift_metrics/README.md
#
# Override metricsPublicURL in the master config for cluster metrics
# Defaults to https://hawkular-metrics.{{openshift_master_default_subdomain}}/hawkular/metrics
# Currently, you may only alter the hostname portion of the url, alterting the
# `/hawkular/metrics` path will break installation of metrics.
#openshift_metrics_hawkular_hostname=hawkular-metrics.example.com
# Configure the prefix and version for the component images
#openshift_metrics_image_prefix=docker.io/openshift/origin-
#openshift_metrics_image_version=v3.7
# when openshift_deployment_type=='openshift-enterprise'
#openshift_metrics_image_prefix=registry.access.redhat.com/openshift3/
#openshift_metrics_image_version=v3.7
#
# StorageClass
# openshift_storageclass_name=gp2
# openshift_storageclass_parameters={'type': 'gp2', 'encrypted': 'false'}
#
# Logging deployment
#
# Currently logging deployment is disabled by default, enable it by setting this
#openshift_logging_install_logging=true
#
# Logging storage config
# Option A - NFS Host Group
# An NFS volume will be created with path "nfs_directory/volume_name"
# on the host within the [nfs] host group. For example, the volume
# path using these options would be "/exports/logging". "exports" is
# is the name of the export served by the nfs server. "logging" is
# the name of a directory inside of "/exports".
#openshift_logging_storage_kind=nfs
#openshift_logging_storage_access_modes=['ReadWriteOnce']
#openshift_logging_storage_nfs_directory=/exports
#openshift_logging_storage_nfs_options='*(rw,root_squash)'
#openshift_logging_storage_volume_name=logging
#openshift_logging_storage_volume_size=10Gi
#openshift_logging_storage_labels={'storage': 'logging'}
#
# Option B - External NFS Host
# NFS volume must already exist with path "nfs_directory/_volume_name" on
# the storage_host. For example, the remote volume path using these
# options would be "nfs.example.com:/exports/logging". "exports" is
# is the name of the export served by the nfs server. "logging" is
# the name of a directory inside of "/exports".
#openshift_logging_storage_kind=nfs
#openshift_logging_storage_access_modes=['ReadWriteOnce']
#openshift_logging_storage_host=nfs.example.com
#openshift_logging_storage_nfs_directory=/exports
#openshift_logging_storage_volume_name=logging
#openshift_logging_storage_volume_size=10Gi
#openshift_logging_storage_labels={'storage': 'logging'}
#
# Option C - Dynamic -- If openshift supports dynamic volume provisioning for
# your cloud platform use this.
#openshift_logging_storage_kind=dynamic
#
# Option D - none -- Logging will use emptydir volumes which are destroyed when
# pods are deleted
#
# Other Logging Options -- Common items you may wish to reconfigure, for the complete
# list of options please see roles/openshift_logging/README.md
#
# Configure loggingPublicURL in the master config for aggregate logging, defaults
# to kibana.{{ openshift_master_default_subdomain }}
#openshift_logging_kibana_hostname=logging.apps.example.com
# Configure the number of elastic search nodes, unless you're using dynamic provisioning
# this value must be 1
#openshift_logging_es_cluster_size=1
# Configure the prefix and version for the component images
#openshift_logging_image_prefix=docker.io/openshift/origin-
#openshift_logging_image_version=v3.7.0
# when openshift_deployment_type=='openshift-enterprise'
#openshift_logging_image_prefix=registry.access.redhat.com/openshift3/
#openshift_logging_image_version=3.7.0
# Prometheus deployment
#
# Currently prometheus deployment is disabled by default, enable it by setting this
#openshift_hosted_prometheus_deploy=true
#
# Prometheus storage config
# Option A - NFS Host Group
# An NFS volume will be created with path "nfs_directory/volume_name"
# on the host within the [nfs] host group. For example, the volume
# path using these options would be "/exports/prometheus"
#openshift_prometheus_storage_kind=nfs
#openshift_prometheus_storage_access_modes=['ReadWriteOnce']
#openshift_prometheus_storage_nfs_directory=/exports
#openshift_prometheus_storage_nfs_options='*(rw,root_squash)'
#openshift_prometheus_storage_volume_name=prometheus
#openshift_prometheus_storage_volume_size=10Gi
#openshift_prometheus_storage_labels={'storage': 'prometheus'}
#openshift_prometheus_storage_type='pvc'
#openshift_prometheus_storage_class=glusterfs-storage
# For prometheus-alertmanager
#openshift_prometheus_alertmanager_storage_kind=nfs
#openshift_prometheus_alertmanager_storage_access_modes=['ReadWriteOnce']
#openshift_prometheus_alertmanager_storage_nfs_directory=/exports
#openshift_prometheus_alertmanager_storage_nfs_options='*(rw,root_squash)'
#openshift_prometheus_alertmanager_storage_volume_name=prometheus-alertmanager
#openshift_prometheus_alertmanager_storage_volume_size=10Gi
#openshift_prometheus_alertmanager_storage_labels={'storage': 'prometheus-alertmanager'}
#openshift_prometheus_alertmanager_storage_type='pvc'
#openshift_prometheus_alertmanager_storage_class=glusterfs-storage
# For prometheus-alertbuffer
#openshift_prometheus_alertbuffer_storage_kind=nfs
#openshift_prometheus_alertbuffer_storage_access_modes=['ReadWriteOnce']
#openshift_prometheus_alertbuffer_storage_nfs_directory=/exports
#openshift_prometheus_alertbuffer_storage_nfs_options='*(rw,root_squash)'
#openshift_prometheus_alertbuffer_storage_volume_name=prometheus-alertbuffer
#openshift_prometheus_alertbuffer_storage_volume_size=10Gi
#openshift_prometheus_alertbuffer_storage_labels={'storage': 'prometheus-alertbuffer'}
#openshift_prometheus_alertbuffer_storage_type='pvc'
#openshift_prometheus_alertbuffer_storage_class=glusterfs-storage
#
# Option B - External NFS Host
# NFS volume must already exist with path "nfs_directory/_volume_name" on
# the storage_host. For example, the remote volume path using these
# options would be "nfs.example.com:/exports/prometheus"
#openshift_prometheus_storage_kind=nfs
#openshift_prometheus_storage_access_modes=['ReadWriteOnce']
#openshift_prometheus_storage_host=nfs.example.com
#openshift_prometheus_storage_nfs_directory=/exports
#openshift_prometheus_storage_volume_name=prometheus
#openshift_prometheus_storage_volume_size=10Gi
#openshift_prometheus_storage_labels={'storage': 'prometheus'}
#openshift_prometheus_storage_type='pvc'
#openshift_prometheus_storage_class=glusterfs-storage
# For prometheus-alertmanager
#openshift_prometheus_alertmanager_storage_kind=nfs
#openshift_prometheus_alertmanager_storage_access_modes=['ReadWriteOnce']
#openshift_prometheus_alertmanager_storage_host=nfs.example.com
#openshift_prometheus_alertmanager_storage_nfs_directory=/exports
#openshift_prometheus_alertmanager_storage_volume_name=prometheus-alertmanager
#openshift_prometheus_alertmanager_storage_volume_size=10Gi
#openshift_prometheus_alertmanager_storage_labels={'storage': 'prometheus-alertmanager'}
#openshift_prometheus_alertmanager_storage_type='pvc'
#openshift_prometheus_alertmanager_storage_class=glusterfs-storage
# For prometheus-alertbuffer
#openshift_prometheus_alertbuffer_storage_kind=nfs
#openshift_prometheus_alertbuffer_storage_access_modes=['ReadWriteOnce']
#openshift_prometheus_alertbuffer_storage_host=nfs.example.com
#openshift_prometheus_alertbuffer_storage_nfs_directory=/exports
#openshift_prometheus_alertbuffer_storage_volume_name=prometheus-alertbuffer
#openshift_prometheus_alertbuffer_storage_volume_size=10Gi
#openshift_prometheus_alertbuffer_storage_labels={'storage': 'prometheus-alertbuffer'}
#openshift_prometheus_alertbuffer_storage_type='pvc'
#openshift_prometheus_alertbuffer_storage_class=glusterfs-storage
#
# Option C - none -- Prometheus, alertmanager and alertbuffer will use emptydir volumes
# which are destroyed when pods are deleted
# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')
# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
# Disable the OpenShift SDN plugin
# openshift_use_openshift_sdn=False
# Configure SDN cluster network and kubernetes service CIDR blocks. These
# network blocks should be private and should not conflict with network blocks
# in your infrastructure that pods may require access to. Can not be changed
# after deployment.
#
# WARNING : Do not pick subnets that overlap with the default Docker bridge subnet of
# 172.17.0.0/16. Your installation will fail and/or your configuration change will
# cause the Pod SDN or Cluster SDN to fail.
#
# WORKAROUND : If you must use an overlapping subnet, you can configure a non conflicting
# docker0 CIDR range by adding '--bip=192.168.2.1/24' to DOCKER_NETWORK_OPTIONS
# environment variable located in /etc/sysconfig/docker-network.
# When upgrading or scaling up the following must match whats in your master config!
# Inventory: master yaml field
# osm_cluster_network_cidr: clusterNetworkCIDR
# openshift_portal_net: serviceNetworkCIDR
# When installing osm_cluster_network_cidr and openshift_portal_net must be set.
# Sane examples are provided below.
#osm_cluster_network_cidr=10.128.0.0/14
#openshift_portal_net=172.30.0.0/16
# ExternalIPNetworkCIDRs controls what values are acceptable for the
# service external IP field. If empty, no externalIP may be set. It
# may contain a list of CIDRs which are checked for access. If a CIDR
# is prefixed with !, IPs in that CIDR will be rejected. Rejections
# will be applied first, then the IP checked against one of the
# allowed CIDRs. You should ensure this range does not overlap with
# your nodes, pods, or service CIDRs for security reasons.
#openshift_master_external_ip_network_cidrs=['0.0.0.0/0']
# IngressIPNetworkCIDR controls the range to assign ingress IPs from for
# services of type LoadBalancer on bare metal. If empty, ingress IPs will not
# be assigned. It may contain a single CIDR that will be allocated from. For
# security reasons, you should ensure that this range does not overlap with
# the CIDRs reserved for external IPs, nodes, pods, or services.
#openshift_master_ingress_ip_network_cidr=172.46.0.0/16
# Configure number of bits to allocate to each host's subnet e.g. 9
# would mean a /23 network on the host.
# When upgrading or scaling up the following must match whats in your master config!
# Inventory: master yaml field
# osm_host_subnet_length: hostSubnetLength
# When installing osm_host_subnet_length must be set. A sane example is provided below.
#osm_host_subnet_length=9
# Configure master API and console ports.
#openshift_master_api_port=8443
#openshift_master_console_port=8443
# set exact RPM version (include - prefix)
#openshift_pkg_version=-3.6.0
# you may also specify version and release, ie:
#openshift_pkg_version=-3.7.0-0.126.0.git.0.9351aae.el7
# Configure custom ca certificate
#openshift_master_ca_certificate={'certfile': '/path/to/ca.crt', 'keyfile': '/path/to/ca.key'}
#
# NOTE: CA certificate will not be replaced with existing clusters.
# This option may only be specified when creating a new cluster or
# when redeploying cluster certificates with the redeploy-certificates
# playbook.
# Configure custom named certificates (SNI certificates)
#
# https://docs.openshift.org/latest/install_config/certificate_customization.html
# https://docs.openshift.com/enterprise/latest/install_config/certificate_customization.html
#
# NOTE: openshift_master_named_certificates is cached on masters and is an
# additive fact, meaning that each run with a different set of certificates
# will add the newly provided certificates to the cached set of certificates.
#
# An optional CA may be specified for each named certificate. CAs will
# be added to the OpenShift CA bundle which allows for the named
# certificate to be served for internal cluster communication.
#
# If you would like openshift_master_named_certificates to be overwritten with
# the provided value, specify openshift_master_overwrite_named_certificates.
#openshift_master_overwrite_named_certificates=true
#
# Provide local certificate paths which will be deployed to masters
#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "cafile": "/path/to/custom-ca1.crt"}]
#
# Detected names may be overridden by specifying the "names" key
#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "names": ["public-master-host.com"], "cafile": "/path/to/custom-ca1.crt"}]
# Session options
#openshift_master_session_name=ssn
#openshift_master_session_max_seconds=3600
# An authentication and encryption secret will be generated if secrets
# are not provided. If provided, openshift_master_session_auth_secrets
# and openshift_master_encryption_secrets must be equal length.
#
# Signing secrets, used to authenticate sessions using
# HMAC. Recommended to use secrets with 32 or 64 bytes.
#openshift_master_session_auth_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO']
#
# Encrypting secrets, used to encrypt sessions. Must be 16, 24, or 32
# characters long, to select AES-128, AES-192, or AES-256.
#openshift_master_session_encryption_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO']
# configure how often node iptables rules are refreshed
#openshift_node_iptables_sync_period=5s
# Configure nodeIP in the node config
# This is needed in cases where node traffic is desired to go over an
# interface other than the default network interface.
#openshift_set_node_ip=True
# Configure dnsIP in the node config
#openshift_dns_ip=172.30.0.1
# Configure node kubelet arguments. pods-per-core is valid in OpenShift Origin 1.3 or OpenShift Container Platform 3.3 and later.
#openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['85'], 'image-gc-low-threshold': ['80']}
# Configure logrotate scripts
# See: https://github.com/nickhammond/ansible-logrotate
#logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}]
# openshift-ansible will wait indefinitely for your input when it detects that the
# value of openshift_hostname resolves to an IP address not bound to any local
# interfaces. This mis-configuration is problematic for any pod leveraging host
# networking and liveness or readiness probes.
# Setting this variable to true will override that check.
#openshift_override_hostname_check=true
# openshift_use_dnsmasq is deprecated. This must be true, or installs will fail
# in versions >= 3.6
#openshift_use_dnsmasq=False
# Define an additional dnsmasq.conf file to deploy to /etc/dnsmasq.d/openshift-ansible.conf
# This is useful for POC environments where DNS may not actually be available yet or to set
# options like 'strict-order' to alter dnsmasq configuration.
#openshift_node_dnsmasq_additional_config_file=/home/bob/ose-dnsmasq.conf
# Global Proxy Configuration
# These options configure HTTP_PROXY, HTTPS_PROXY, and NOPROXY environment
# variables for docker and master services.
#
# Hosts in the openshift_no_proxy list will NOT use any globally
# configured HTTP(S)_PROXYs. openshift_no_proxy accepts domains
# (.example.com), hosts (example.com), and IP addresses.
#openshift_http_proxy=http://USER:PASSWORD@IPADDR:PORT
#openshift_https_proxy=https://USER:PASSWORD@IPADDR:PORT
#openshift_no_proxy='.hosts.example.com,some-host.com'
#
# Most environments don't require a proxy between openshift masters, nodes, and
# etcd hosts. So automatically add those hostnames to the openshift_no_proxy list.
# If all of your hosts share a common domain you may wish to disable this and
# specify that domain above instead.
#
# For example, having hosts with FQDNs: m1.ex.com, n1.ex.com, and
# n2.ex.com, one would simply add '.ex.com' to the openshift_no_proxy
# variable (above) and set this value to False
#openshift_generate_no_proxy_hosts=True
#
# These options configure the BuildDefaults admission controller which injects
# configuration into Builds. Proxy related values will default to the global proxy
# config values. You only need to set these if they differ from the global proxy settings.
# See BuildDefaults documentation at
# https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html
#openshift_builddefaults_http_proxy=http://USER:PASSWORD@HOST:PORT
#openshift_builddefaults_https_proxy=https://USER:PASSWORD@HOST:PORT
#openshift_builddefaults_no_proxy=mycorp.com
#openshift_builddefaults_git_http_proxy=http://USER:PASSWORD@HOST:PORT
#openshift_builddefaults_git_https_proxy=https://USER:PASSWORD@HOST:PORT
#openshift_builddefaults_git_no_proxy=mycorp.com
#openshift_builddefaults_image_labels=[{'name':'imagelabelname1','value':'imagelabelvalue1'}]
#openshift_builddefaults_nodeselectors={'nodelabel1':'nodelabelvalue1'}
#openshift_builddefaults_annotations={'annotationkey1':'annotationvalue1'}
#openshift_builddefaults_resources_requests_cpu=100m
#openshift_builddefaults_resources_requests_memory=256Mi
#openshift_builddefaults_resources_limits_cpu=1000m
#openshift_builddefaults_resources_limits_memory=512Mi
# Or you may optionally define your own build defaults configuration serialized as json
#openshift_builddefaults_json='{"BuildDefaults":{"configuration":{"apiVersion":"v1","env":[{"name":"HTTP_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"NO_PROXY","value":"ose3-master.example.com"}],"gitHTTPProxy":"http://proxy.example.com:3128","gitNoProxy":"ose3-master.example.com","kind":"BuildDefaultsConfig"}}}'
# These options configure the BuildOverrides admission controller which injects
# configuration into Builds.
# See BuildOverrides documentation at
# https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html
#openshift_buildoverrides_force_pull=true
#openshift_buildoverrides_image_labels=[{'name':'imagelabelname1','value':'imagelabelvalue1'}]
#openshift_buildoverrides_nodeselectors={'nodelabel1':'nodelabelvalue1'}
#openshift_buildoverrides_annotations={'annotationkey1':'annotationvalue1'}
# Or you may optionally define your own build overrides configuration serialized as json
#openshift_buildoverrides_json='{"BuildOverrides":{"configuration":{"apiVersion":"v1","kind":"BuildDefaultsConfig","forcePull":"true"}}}'
# Enable service catalog
#openshift_enable_service_catalog=true
# Enable template service broker (requires service catalog to be enabled, above)
#template_service_broker_install=true
# Force a specific prefix (IE: registry) to use when pulling the service catalog image
# NOTE: The registry all the way up to the start of the image name must be provided. Two examples
# below are provided.
#openshift_service_catalog_image_prefix=docker.io/openshift/origin-
#openshift_service_catalog_image_prefix=registry.access.redhat.com/openshift3/ose-
# Force a specific image version to use when pulling the service catalog image
#openshift_service_catalog_image_version=v3.7
# Configure one of more namespaces whose templates will be served by the TSB
#openshift_template_service_broker_namespaces=['openshift']
# masterConfig.volumeConfig.dynamicProvisioningEnabled, configurable as of 1.2/3.2, enabled by default
#openshift_master_dynamic_provisioning_enabled=False
# Admission plugin config
#openshift_master_admission_plugin_config={"ProjectRequestLimit":{"configuration":{"apiVersion":"v1","kind":"ProjectRequestLimitConfig","limits":[{"selector":{"admin":"true"}},{"maxProjects":"1"}]}},"PodNodeConstraints":{"configuration":{"apiVersion":"v1","kind":"PodNodeConstraintsConfig"}}}
# Configure usage of openshift_clock role.
#openshift_clock_enabled=true
# OpenShift Per-Service Environment Variables
# Environment variables are added to /etc/sysconfig files for
# each OpenShift service: node, master (api and controllers).
# API and controllers environment variables are merged in single
# master environments.
#openshift_master_api_env_vars={"ENABLE_HTTP2": "true"}
#openshift_master_controllers_env_vars={"ENABLE_HTTP2": "true"}
#openshift_node_env_vars={"ENABLE_HTTP2": "true"}
# Enable API service auditing
#openshift_master_audit_config={"enabled": true}
#
# In case you want more advanced setup for the auditlog you can
# use this line.
# The directory in "auditFilePath" will be created if it's not
# exist
#openshift_master_audit_config={"enabled": true, "auditFilePath": "/var/log/openpaas-oscp-audit/openpaas-oscp-audit.log", "maximumFileRetentionDays": 14, "maximumFileSizeMegabytes": 500, "maximumRetainedFiles": 5}
# Enable origin repos that point at Centos PAAS SIG, defaults to true, only used
# by openshift_deployment_type=origin
#openshift_enable_origin_repo=false
# Validity of the auto-generated OpenShift certificates in days.
# See also openshift_hosted_registry_cert_expire_days above.
#
#openshift_ca_cert_expire_days=1825
#openshift_node_cert_expire_days=730
#openshift_master_cert_expire_days=730
# Validity of the auto-generated external etcd certificates in days.
# Controls validity for etcd CA, peer, server and client certificates.
#
#etcd_ca_default_days=1825
#
# ServiceAccountConfig:LimitSecretRefences rejects pods that reference secrets their service accounts do not reference
# openshift_master_saconfig_limitsecretreferences=false
# Upgrade Control
#
# By default nodes are upgraded in a serial manner one at a time and all failures
# are fatal, one set of variables for normal nodes, one set of variables for
# nodes that are part of control plane as the number of hosts may be different
# in those two groups.
#openshift_upgrade_nodes_serial=1
#openshift_upgrade_nodes_max_fail_percentage=0
#openshift_upgrade_control_plane_nodes_serial=1
#openshift_upgrade_control_plane_nodes_max_fail_percentage=0
#
# You can specify the number of nodes to upgrade at once. We do not currently
# attempt to verify that you have capacity to drain this many nodes at once
# so please be careful when specifying these values. You should also verify that
# the expected number of nodes are all schedulable and ready before starting an
# upgrade. If it's not possible to drain the requested nodes the upgrade will
# stall indefinitely until the drain is successful.
#
# If you're upgrading more than one node at a time you can specify the maximum
# percentage of failure within the batch before the upgrade is aborted. Any
# nodes that do fail are ignored for the rest of the playbook run and you should
# take care to investigate the failure and return the node to service so that
# your cluster.
#
# The percentage must exceed the value, this would fail on two failures
# openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=49
# where as this would not
# openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=50
#
# A timeout to wait for nodes to drain pods can be specified to ensure that the
# upgrade continues even if nodes fail to drain pods in the allowed time. The
# default value of 0 will wait indefinitely allowing the admin to investigate
# the root cause and ensuring that disruption budgets are respected. If the
# a timeout of 0 is used there will also be one attempt to re-try draining the
# node. If a non zero timeout is specified there will be no attempt to retry.
#openshift_upgrade_nodes_drain_timeout=0
#
# Multiple data migrations take place and if they fail they will fail the upgrade
# You may wish to disable these or make them non fatal
#
# openshift_upgrade_pre_storage_migration_enabled=true
# openshift_upgrade_pre_storage_migration_fatal=true
# openshift_upgrade_post_storage_migration_enabled=true
# openshift_upgrade_post_storage_migration_fatal=false
######################################################################
# CloudForms/ManageIQ (CFME/MIQ) Configuration
# See the readme for full descriptions and getting started
# instructions: ../../roles/openshift_management/README.md or go directly to
# their definitions: ../../roles/openshift_management/defaults/main.yml
# ../../roles/openshift_management/vars/main.yml
#
# Namespace for the CFME project
#openshift_management_project: openshift-management
# Namespace/project description
#openshift_management_project_description: CloudForms Management Engine
# Choose 'miq-template' for a podified database install
# Choose 'miq-template-ext-db' for an external database install
#
# If you are using the miq-template-ext-db template then you must add
# the required database parameters to the
# openshift_management_template_parameters variable.
#openshift_management_app_template: miq-template
# Allowed options: nfs, nfs_external, preconfigured, cloudprovider.
#openshift_management_storage_class: nfs
# [OPTIONAL] - If you are using an EXTERNAL NFS server, such as a
# netapp appliance, then you must set the hostname here. Leave the
# value as 'false' if you are not using external NFS.
#openshift_management_storage_nfs_external_hostname: false
# [OPTIONAL] - If you are using external NFS then you must set the base
# path to the exports location here.
#
# Additionally: EXTERNAL NFS REQUIRES that YOU CREATE the nfs exports
# that will back the application PV and optionally the database
# pv. Export path definitions, relative to
# {{ openshift_management_storage_nfs_base_dir }}
#
# LOCAL NFS NOTE:
#
# You may may also change this value if you want to change the default
# path used for local NFS exports.
#openshift_management_storage_nfs_base_dir: /exports
# LOCAL NFS NOTE:
#
# You may override the automatically selected LOCAL NFS server by
# setting this variable. Useful for testing specific task files.
#openshift_management_storage_nfs_local_hostname: false
# These are the default values for the username and password of the
# management app. Changing these values in your inventory will not
# change your username or password. You should only need to change
# these values in your inventory if you already changed the actual
# name and password AND are trying to use integration scripts.
#
# For example, adding this cluster as a container provider,
# playbooks/openshift-management/add_container_provider.yml
#openshift_management_username: admin
#openshift_management_password: smartvm
# A hash of parameters you want to override or set in the
# miq-template.yaml or miq-template-ext-db.yaml templates. Set this in
# your inventory file as a simple hash. Acceptable values are defined
# under the .parameters list in files/miq-template{-ext-db}.yaml
# Example:
#
# openshift_management_template_parameters={'APPLICATION_MEM_REQ': '512Mi'}
#openshift_management_template_parameters: {}
# Firewall configuration
# You can open additional firewall ports by defining them as a list. of service
# names and ports/port ranges for either masters or nodes.
#openshift_master_open_ports=[{"service":"svc1","port":"11/tcp"}]
#openshift_node_open_ports=[{"service":"svc2","port":"12-13/tcp"},{"service":"svc3","port":"14/udp"}]
# host group for masters
[masters]
ose3-master[1:3]-ansible.test.example.com
[etcd]
ose3-etcd[1:3]-ansible.test.example.com
# NOTE: Containerized load balancer hosts are not yet supported, if using a global
# containerized=true host variable we must set to false.
[lb]
ose3-lb-ansible.test.example.com containerized=false
# NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes
# However, in order to ensure that your masters are not burdened with running pods you should
# make them unschedulable by adding openshift_schedulable=False any node that's also a master.
[nodes]
ose3-master[1:3]-ansible.test.example.com
ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}"
[nfs]
ose3-nfs-ansible.test.example.com
|