diff options
98 files changed, 1463 insertions, 422 deletions
diff --git a/.gitignore b/.gitignore index 8f46c269f..081659a94 100644 --- a/.gitignore +++ b/.gitignore @@ -17,3 +17,4 @@ gce.ini multi_ec2.yaml multi_inventory.yaml .vagrant +.tags* diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible index bc2fab995..d176e08c5 100644 --- a/.tito/packages/openshift-ansible +++ b/.tito/packages/openshift-ansible @@ -1 +1 @@ -3.0.38-1 ./ +3.0.40-1 ./ diff --git a/README_GCE.md b/README_GCE.md index ea673b44d..9439b569e 100644 --- a/README_GCE.md +++ b/README_GCE.md @@ -42,12 +42,17 @@ Create a gce.ini file for GCE Mandatory customization variables (check the values according to your tenant): * zone = europe-west1-d * network = default -* gce_machine_type = n1-standard-2 -* gce_machine_master_type = n1-standard-1 -* gce_machine_node_type = n1-standard-2 -* gce_machine_image = preinstalled-slave-50g-v5 -* gce_machine_master_image = preinstalled-slave-50g-v5 -* gce_machine_node_image = preinstalled-slave-50g-v5 + +Optional Variable Overrides: +* gce_ssh_user - ssh user, defaults to the current logged in user +* gce_machine_type = n1-standard-1 - default machine type +* gce_machine_etcd_type = n1-standard-1 - machine type for etcd hosts +* gce_machine_master_type = n1-standard-1 - machine type for master hosts +* gce_machine_node_type = n1-standard-1 - machine type for node hosts +* gce_machine_image = centos-7 - default image +* gce_machine_etcd_image = centos-7 - image for etcd hosts +* gce_machine_master_image = centos-7 - image for master hosts +* gce_machine_node_image = centos-7 - image for node hosts 1. vi ~/.gce/gce.ini @@ -62,9 +67,9 @@ network = default gce_machine_type = n1-standard-2 gce_machine_master_type = n1-standard-1 gce_machine_node_type = n1-standard-2 -gce_machine_image = preinstalled-slave-50g-v5 -gce_machine_master_image = preinstalled-slave-50g-v5 -gce_machine_node_image = preinstalled-slave-50g-v5 +gce_machine_image = centos-7 +gce_machine_master_image = centos-7 +gce_machine_node_image = centos-7 ``` 1. Define the environment variable GCE_INI_PATH so gce.py can pick it up and bin/cluster can also read it @@ -92,10 +97,15 @@ argument will result in all gce instances being listed) Creating a cluster ------------------ -1. To create a cluster with one master and two nodes +1. To create a cluster with one master, one infra node, and two compute nodes ``` bin/cluster create gce <cluster-id> ``` +1. To create a cluster with 3 masters, 3 etcd hosts, 2 infra nodes and 10 +compute nodes +``` + bin/cluster create gce -m 3 -e 3 -i 2 -n 10 <cluster-id> +``` Updating a cluster --------------------- @@ -104,6 +114,16 @@ Updating a cluster bin/cluster update gce <cluster-id> ``` +Add additional nodes +--------------------- +1. To add additional infra nodes +``` + bin/cluster add-nodes gce -i <num nodes> <cluster-id> +``` +1. To add additional compute nodes +``` + bin/cluster add-nodes gce -n <num nodes> <cluster-id> +``` Terminating a cluster --------------------- 1. To terminate the cluster diff --git a/Vagrantfile b/Vagrantfile index 362e1ff48..a38378289 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -30,7 +30,9 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| libvirt.memory = 1024 libvirt.driver = 'kvm' case deployment_type - when "enterprise" + when "openshift-enterprise" + override.vm.box = "rhel-7" + when "atomic-enterprise" override.vm.box = "rhel-7" when "origin" override.vm.box = "centos/7" diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py index 2b39bb59e..dcda14c63 100644 --- a/filter_plugins/oo_filters.py +++ b/filter_plugins/oo_filters.py @@ -556,6 +556,147 @@ class FilterModule(object): except Exception as my_e: raise errors.AnsibleFilterError('Failed to convert: %s', my_e) + @staticmethod + def oo_openshift_env(hostvars): + ''' Return facts which begin with "openshift_" + Ex: hostvars = {'openshift_fact': 42, + 'theyre_taking_the_hobbits_to': 'isengard'} + returns = {'openshift_fact': 42} + ''' + if not issubclass(type(hostvars), dict): + raise errors.AnsibleFilterError("|failed expects hostvars is a dict") + + facts = {} + regex = re.compile('^openshift_.*') + for key in hostvars: + if regex.match(key): + facts[key] = hostvars[key] + return facts + + @staticmethod + # pylint: disable=too-many-branches + def oo_persistent_volumes(hostvars, groups, persistent_volumes=None): + """ Generate list of persistent volumes based on oo_openshift_env + storage options set in host variables. + """ + if not issubclass(type(hostvars), dict): + raise errors.AnsibleFilterError("|failed expects hostvars is a dict") + if not issubclass(type(groups), dict): + raise errors.AnsibleFilterError("|failed expects groups is a dict") + if persistent_volumes != None and not issubclass(type(persistent_volumes), list): + raise errors.AnsibleFilterError("|failed expects persistent_volumes is a list") + + if persistent_volumes == None: + persistent_volumes = [] + for component in hostvars['openshift']['hosted']: + kind = hostvars['openshift']['hosted'][component]['storage']['kind'] + create_pv = hostvars['openshift']['hosted'][component]['storage']['create_pv'] + if kind != None and create_pv: + if kind == 'nfs': + host = hostvars['openshift']['hosted'][component]['storage']['host'] + if host == None: + if len(groups['oo_nfs_to_config']) > 0: + host = groups['oo_nfs_to_config'][0] + else: + raise errors.AnsibleFilterError("|failed no storage host detected") + directory = hostvars['openshift']['hosted'][component]['storage']['nfs']['directory'] + volume = hostvars['openshift']['hosted'][component]['storage']['volume']['name'] + path = directory + '/' + volume + size = hostvars['openshift']['hosted'][component]['storage']['volume']['size'] + access_modes = hostvars['openshift']['hosted'][component]['storage']['access_modes'] + persistent_volume = dict( + name="{0}-volume".format(volume), + capacity=size, + access_modes=access_modes, + storage=dict( + nfs=dict( + server=host, + path=path))) + persistent_volumes.append(persistent_volume) + else: + msg = "|failed invalid storage kind '{0}' for component '{1}'".format( + kind, + component) + raise errors.AnsibleFilterError(msg) + return persistent_volumes + + @staticmethod + def oo_persistent_volume_claims(hostvars, persistent_volume_claims=None): + """ Generate list of persistent volume claims based on oo_openshift_env + storage options set in host variables. + """ + if not issubclass(type(hostvars), dict): + raise errors.AnsibleFilterError("|failed expects hostvars is a dict") + if persistent_volume_claims != None and not issubclass(type(persistent_volume_claims), list): + raise errors.AnsibleFilterError("|failed expects persistent_volume_claims is a list") + + if persistent_volume_claims == None: + persistent_volume_claims = [] + for component in hostvars['openshift']['hosted']: + kind = hostvars['openshift']['hosted'][component]['storage']['kind'] + create_pv = hostvars['openshift']['hosted'][component]['storage']['create_pv'] + if kind != None and create_pv: + volume = hostvars['openshift']['hosted'][component]['storage']['volume']['name'] + size = hostvars['openshift']['hosted'][component]['storage']['volume']['size'] + access_modes = hostvars['openshift']['hosted'][component]['storage']['access_modes'] + persistent_volume_claim = dict( + name="{0}-claim".format(volume), + capacity=size, + access_modes=access_modes) + persistent_volume_claims.append(persistent_volume_claim) + return persistent_volume_claims + + @staticmethod + def oo_31_rpm_rename_conversion(rpms, openshift_version=None): + """ Filters a list of 3.0 rpms and return the corresponding 3.1 rpms + names with proper version (if provided) + + If 3.1 rpms are passed in they will only be augmented with the + correct version. This is important for hosts that are running both + Masters and Nodes. + """ + if not isinstance(rpms, list): + raise errors.AnsibleFilterError("failed expects to filter on a list") + if openshift_version is not None and not isinstance(openshift_version, basestring): + raise errors.AnsibleFilterError("failed expects openshift_version to be a string") + + rpms_31 = [] + for rpm in rpms: + if not 'atomic' in rpm: + rpm = rpm.replace("openshift", "atomic-openshift") + if openshift_version: + rpm = rpm + openshift_version + rpms_31.append(rpm) + + return rpms_31 + + @staticmethod + def oo_pods_match_component(pods, deployment_type, component): + """ Filters a list of Pods and returns the ones matching the deployment_type and component + """ + if not isinstance(pods, list): + raise errors.AnsibleFilterError("failed expects to filter on a list") + if not isinstance(deployment_type, basestring): + raise errors.AnsibleFilterError("failed expects deployment_type to be a string") + if not isinstance(component, basestring): + raise errors.AnsibleFilterError("failed expects component to be a string") + + image_prefix = 'openshift/origin-' + if deployment_type in ['enterprise', 'online', 'openshift-enterprise']: + image_prefix = 'openshift3/ose-' + elif deployment_type == 'atomic-enterprise': + image_prefix = 'aep3_beta/aep-' + + matching_pods = [] + image_regex = image_prefix + component + r'.*' + for pod in pods: + for container in pod['spec']['containers']: + if re.search(image_regex, container['image']): + matching_pods.append(pod) + break # stop here, don't add a pod more than once + + return matching_pods + def filters(self): """ returns a mapping of filters to methods """ return { @@ -578,4 +719,9 @@ class FilterModule(object): "oo_generate_secret": self.oo_generate_secret, "to_padded_yaml": self.to_padded_yaml, "oo_nodes_with_label": self.oo_nodes_with_label, + "oo_openshift_env": self.oo_openshift_env, + "oo_persistent_volumes": self.oo_persistent_volumes, + "oo_persistent_volume_claims": self.oo_persistent_volume_claims, + "oo_31_rpm_rename_conversion": self.oo_31_rpm_rename_conversion, + "oo_pods_match_component": self.oo_pods_match_component, } diff --git a/filter_plugins/openshift_master.py b/filter_plugins/openshift_master.py index 35a881a85..c41367f05 100644 --- a/filter_plugins/openshift_master.py +++ b/filter_plugins/openshift_master.py @@ -53,7 +53,20 @@ class IdentityProviderBase(object): self.challenge = ansible_bool(self._idp.pop('challenge', False)) self.provider = dict(apiVersion=api_version, kind=self._idp.pop('kind')) - self._required = [['mappingMethod', 'mapping_method']] + mm_keys = ('mappingMethod', 'mapping_method') + mapping_method = None + for key in mm_keys: + if key in self._idp: + mapping_method = self._idp[key] + if mapping_method is None: + mapping_method = self.get_default('mappingMethod') + self.mapping_method = mapping_method + + valid_mapping_methods = ['add', 'claim', 'generate', 'lookup'] + if self.mapping_method not in valid_mapping_methods: + raise errors.AnsibleFilterError("|failed unkown mapping method " + "for provider {0}".format(self.__class__.__name__)) + self._required = [] self._optional = [] self._allow_additional = True @@ -75,10 +88,7 @@ class IdentityProviderBase(object): def validate(self): ''' validate an instance of this idp class ''' - valid_mapping_methods = ['add', 'claim', 'generate', 'lookup'] - if self.provider['mappingMethod'] not in valid_mapping_methods: - raise errors.AnsibleFilterError("|failed unkown mapping method " - "for provider {0}".format(self.__class__.__name__)) + pass @staticmethod def get_default(key): @@ -121,7 +131,8 @@ class IdentityProviderBase(object): def to_dict(self): ''' translate this idp to a dictionary ''' return dict(name=self.name, challenge=self.challenge, - login=self.login, provider=self.provider) + login=self.login, mappingMethod=self.mapping_method, + provider=self.provider) class LDAPPasswordIdentityProvider(IdentityProviderBase): @@ -436,7 +447,9 @@ class GitHubIdentityProvider(IdentityProviderOauthBase): Raises: AnsibleFilterError: """ - pass + def __init__(self, api_version, idp): + IdentityProviderOauthBase.__init__(self, api_version, idp) + self._optional += [['organizations']] class FilterModule(object): diff --git a/inventory/byo/hosts.aep.example b/inventory/byo/hosts.aep.example index d515ccdad..a2e1708d4 100644 --- a/inventory/byo/hosts.aep.example +++ b/inventory/byo/hosts.aep.example @@ -74,7 +74,7 @@ deployment_type=atomic-enterprise #openshift_additional_repos=[{'id': 'aep-devel', 'name': 'aep-devel', 'baseurl': 'http://example.com/puddle/build/AtomicOpenShift/3.1/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}] # htpasswd auth -openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/htpasswd'}] +openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}] # Allow all auth #openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}] @@ -95,8 +95,8 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # Set cockpit plugins #osm_cockpit_plugins=['cockpit-kubernetes'] -# Native high availbility cluster method with optional load balancer. -# If no lb group is defined installer assumes that a load balancer has +# Native high availability cluster method with optional load balancer. +# If no lb group is defined, the installer assumes that a load balancer has # been preconfigured. For installation the value of # openshift_master_cluster_hostname must resolve to the load balancer # or to one or all of the masters defined in the inventory if no load @@ -139,7 +139,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # default storage plugin dependencies to install, by default the ceph and # glusterfs plugin dependencies will be installed, if available. -#osn_storage_plugin_deps=['ceph','glusterfs'] +#osn_storage_plugin_deps=['ceph','glusterfs','iscsi'] # default selectors for router and registry services # openshift_router_selector='region=infra' @@ -200,6 +200,40 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # Configure dnsIP in the node config #openshift_dns_ip=172.30.0.1 +# Persistent Storage Options +# +## Registry Storage Options +## +## Storage Kind +## Specifies which storage kind will be used for the registry. +## "nfs" is the only supported kind at this time. +##openshift_hosted_registry_storage_kind=nfs +## +## Storage Host +## This variable can be used to identify a pre-existing storage host +## if a storage host group corresponding to the storage kind (such as +## [nfs]) is not specified, +##openshift_hosted_registry_storage_host=nfs.example.com +## +## NFS Export Options +##openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)' +## +## NFS Export Directory +## Specify the root exports directory. This directory will be created +## if specifying an [nfs] host group. +## This variable must be supplied if using a pre-existing nfs server. +##openshift_hosted_registry_storage_nfs_directory=/exports +## +## Registry Volume Name +## Specify the storage volume name. This directory will be created +## within openshift_hosted_registry_storage_nfs_directory if +## specifying an [nfs] group. Ex. /exports/registry +## This variable must be supplied if using a pre-existing nfs server. +##openshift_hosted_registry_storage_volume_name=registry +## +## Persistent Volume Access Mode +##openshift_hosted_registry_storage_access_modes=['ReadWriteMany'] + # Configure node kubelet arguments #openshift_node_kubelet_args={'max-pods': ['40'], 'image-gc-high-threshold': ['90'], 'image-gc-low-threshold': ['80']} @@ -207,6 +241,13 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # See: https://github.com/nickhammond/ansible-logrotate #logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}] +# openshift-ansible will wait indefinitely for your input when it detects that the +# value of openshift_hostname resolves to an IP address not bound to any local +# interfaces. This mis-configuration is problematic for any pod leveraging host +# networking and liveness or readiness probes. +# Setting this variable to true will override that check. +#openshift_override_hostname_check=true + # host group for masters [masters] aep3-master[1:3]-ansible.test.example.com diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example index 62af9aee4..de1d0f88b 100644 --- a/inventory/byo/hosts.origin.example +++ b/inventory/byo/hosts.origin.example @@ -79,7 +79,7 @@ deployment_type=origin #openshift_additional_repos=[{'id': 'fedora-openshift-origin-copr', 'name': 'OpenShift Origin COPR for Fedora', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/fedora-openshift/fedora-$releasever-$basearch/', 'enabled': 1, 'gpgcheck': 1, gpgkey: 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/fedora-openshift/pubkey.gpg'}] # htpasswd auth -openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/htpasswd'}] +openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}] # Allow all auth #openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}] @@ -100,8 +100,8 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # Set cockpit plugins #osm_cockpit_plugins=['cockpit-kubernetes'] -# Native high availbility cluster method with optional load balancer. -# If no lb group is defined installer assumes that a load balancer has +# Native high availability cluster method with optional load balancer. +# If no lb group is defined, the installer assumes that a load balancer has # been preconfigured. For installation the value of # openshift_master_cluster_hostname must resolve to the load balancer # or to one or all of the masters defined in the inventory if no load @@ -144,7 +144,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # default storage plugin dependencies to install, by default the ceph and # glusterfs plugin dependencies will be installed, if available. -#osn_storage_plugin_deps=['ceph','glusterfs'] +#osn_storage_plugin_deps=['ceph','glusterfs','iscsi'] # default selectors for router and registry services # openshift_router_selector='region=infra' @@ -205,10 +205,39 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # Configure dnsIP in the node config #openshift_dns_ip=172.30.0.1 -# NFS Options -#openshift_nfs_exports_dir=/var/export -#openshift_nfs_registry_volume=regvol -#openshift_nfs_export_options='*(rw,sync,all_squash)' +# Persistent Storage Options +# +## Registry Storage Options +## +## Storage Kind +## Specifies which storage kind will be used for the registry. +## nfs is the only supported kind at this time. +##openshift_hosted_registry_storage_kind=nfs +## +## Storage Host +## This variable can be used to identify a pre-existing storage host +## if a storage host group corresponding to the storage kind (such as +## [nfs]) is not specified, +##openshift_hosted_registry_storage_host=nfs.example.com +## +## NFS Export Options +##openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)' +## +## NFS Export Directory +## Specify the root exports directory. This directory will be created +## if specifying an [nfs] host group. +## This variable must be supplied if using a pre-existing nfs server. +##openshift_hosted_registry_storage_nfs_directory=/exports +## +## Registry Volume Name +## Specify the storage volume name. This directory will be created +## within openshift_hosted_registry_storage_nfs_directory if +## specifying an [nfs] group. Ex: /exports/registry +## This variable must be supplied if using a pre-existing nfs server. +##openshift_hosted_registry_storage_volume_name=registry +## +## Persistent Volume Access Mode +##openshift_hosted_registry_storage_access_modes=['ReadWriteMany'] # Configure node kubelet arguments #openshift_node_kubelet_args={'max-pods': ['40'], 'image-gc-high-threshold': ['90'], 'image-gc-low-threshold': ['80']} @@ -217,6 +246,13 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # See: https://github.com/nickhammond/ansible-logrotate #logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}] +# openshift-ansible will wait indefinitely for your input when it detects that the +# value of openshift_hostname resolves to an IP address not bound to any local +# interfaces. This mis-configuration is problematic for any pod leveraging host +# networking and liveness or readiness probes. +# Setting this variable to true will override that check. +#openshift_override_hostname_check=true + # host group for masters [masters] ose3-master[1:3]-ansible.test.example.com diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example index 4825c9de5..c80be574f 100644 --- a/inventory/byo/hosts.ose.example +++ b/inventory/byo/hosts.ose.example @@ -74,7 +74,7 @@ deployment_type=openshift-enterprise #openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://example.com/puddle/build/AtomicOpenShift/3.1/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}] # htpasswd auth -openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/htpasswd'}] +openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}] # Allow all auth #openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}] @@ -95,8 +95,8 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # Set cockpit plugins #osm_cockpit_plugins=['cockpit-kubernetes'] -# Native high availbility cluster method with optional load balancer. -# If no lb group is defined installer assumes that a load balancer has +# Native high availability cluster method with optional load balancer. +# If no lb group is defined, the installer assumes that a load balancer has # been preconfigured. For installation the value of # openshift_master_cluster_hostname must resolve to the load balancer # or to one or all of the masters defined in the inventory if no load @@ -200,6 +200,40 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # Configure dnsIP in the node config #openshift_dns_ip=172.30.0.1 +# Persistent Storage Options +# +## Registry Storage Options +## +## Storage Kind +## Specifies which storage kind will be used for the registry. +## "nfs" is the only supported kind at this time. +##openshift_hosted_registry_storage_kind=nfs +## +## Storage Host +## This variable can be used to identify a pre-existing storage host +## if a storage host group corresponding to the storage kind (such as +## [nfs]) is not specified, +##openshift_hosted_registry_storage_host=nfs.example.com +## +## NFS Export Options +##openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)' +## +## NFS Export Directory +## Specify the root exports directory. This directory will be created +## if specifying an [nfs] host group. +## This variable must be supplied if using a pre-existing nfs server. +##openshift_hosted_registry_storage_nfs_directory=/exports +## +## Registry Volume Name +## Specify the storage volume name. This directory will be created +## within openshift_hosted_registry_storage_nfs_directory if +## specifying an [nfs] group Ex: /exports/registry +## This variable must be supplied if using a pre-existing nfs server. +##openshift_hosted_registry_storage_volume_name=registry +## +## Persistent Volume Access Mode +##openshift_hosted_registry_storage_access_modes=['ReadWriteMany'] + # Configure node kubelet arguments #openshift_node_kubelet_args={'max-pods': ['40'], 'image-gc-high-threshold': ['90'], 'image-gc-low-threshold': ['80']} @@ -207,6 +241,13 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # See: https://github.com/nickhammond/ansible-logrotate #logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}] +# openshift-ansible will wait indefinitely for your input when it detects that the +# value of openshift_hostname resolves to an IP address not bound to any local +# interfaces. This mis-configuration is problematic for any pod leveraging host +# networking and liveness or readiness probes. +# Setting this variable to true will override that check. +#openshift_override_hostname_check=true + # host group for masters [masters] ose3-master[1:3]-ansible.test.example.com diff --git a/openshift-ansible.spec b/openshift-ansible.spec index 218c714f5..7c7fb399c 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -5,7 +5,7 @@ } Name: openshift-ansible -Version: 3.0.38 +Version: 3.0.40 Release: 1%{?dist} Summary: Openshift and Atomic Enterprise Ansible License: ASL 2.0 @@ -259,6 +259,66 @@ Atomic OpenShift Utilities includes %changelog +* Thu Feb 11 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.40-1 +- Bug 1306665 - [metrics] update metrics-deployer template to use latest image + versions (bleanhar@redhat.com) +- Add organizations attribute to github identity provider (jdetiber@redhat.com) +- use correct dict key (jdiaz@redhat.com) +- handle being passed an empty group list (jdiaz@redhat.com) +- fix default value (jdetiber@redhat.com) +- removed notscheduleable trigger, it just makes noise in its current + incarnation (sten@redhat.com) +- trigger on two successive bad pid counts (jdiaz@redhat.com) +- added nodes not ready and nodes not schedulable triggers (sten@redhat.com) +- Enable selection of kubeproxy mode (vishal.patil@nuagenetworks.net) +- add default storage plugins to 'origin' deployment_type + (rvanveelen@tremorvideo.com) +- added nodes not ready and nodes not schedulable triggers (sten@redhat.com) +- Don't mask master service on atomic. (abutcher@redhat.com) +- update defaults and examples w/ iscsi plugin (rvanveelen@tremorvideo.com) +- add iscsi storage_plugin dependency (rvanveelen@tremorvideo.com) +- Add gte check for 3.2, update version checks to gte (jdetiber@redhat.com) +- Specify default namespace when creating router (pat2man@gmail.com) +- add missing connection:local (jdetiber@redhat.com) +- consolidate oo_first_master post-config a bit, fix some roles that use + openshift_facts without declaring a dependency (jdetiber@redhat.com) +- openshift_serviceaccounts updates (jdetiber@redhat.com) +- Fix infra_node deployment (jdetiber@redhat.com) +- changed registry checks to alert based on number of registries with problems + (sten@redhat.com) +- Fix a bug with existing CNAME records (rharriso@redhat.com) +- Fix HA typo in example AEP/OSE/Origin inventories (adellape@redhat.com) +- Updated the key for app create (kwoodson@redhat.com) +- Add missing atomic- and openshift-enterprise (pep@redhat.com) +- Fix enabling iptables for latest rhel versions (jdetiber@redhat.com) +- Make pod_eviction_timeout configurable from cli (jawed.khelil@amadeus.com) + +* Tue Feb 09 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.39-1 +- Bug 1304150 - Can't upgrade atomic-openshift to specified version + (bleanhar@redhat.com) +- Mask master service when using native ha (jdetiber@redhat.com) +- aoi: Safer check for master_routingconfig_subdomain (smunilla@redhat.com) +- Add a DNS server on OpenStack clusters (lhuard@amadeus.com) +- renamed /etc/openshift to /etc/origin (sten@redhat.com) +- gitignore : .tag* (atom editor tag files) (sdodson@redhat.com) +- Add an early check to ensure that node names resolve to an interface on the + host (sdodson@redhat.com) +- Allow compression option to be set to empty for non compressed QCow images + Support tgz and gzip compressed images (akram@free.fr) +- Replace status_changed bool (abutcher@redhat.com) +- Improve docs and consistency of setting the ssh_user (jdetiber@redhat.com) +- remove outdated comments (jdetiber@redhat.com) +- add etcd hosts for gce playbooks (jdetiber@redhat.com) +- GCE cloud provider updates (jdetiber@redhat.com) +- Remove extra nfs configuration. (abutcher@redhat.com) +- Do not apply the etcd_certificates role during node playbook. + (abutcher@redhat.com) +- Add g_new_node_hosts to cluster_hosts. (abutcher@redhat.com) +- Updating examples to use /etc/origin/master/htpasswd (jstuever@redhat.com) +- Refactor registry storage options. (abutcher@redhat.com) +- Additional overrides for cloud provider playbooks (jdetiber@redhat.com) +- Bring first etcd server up before others. (dgoodwin@redhat.com) + * Tue Feb 02 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.38-1 - aoi: Ask for osm_default_subdomain in interactive mode (smunilla@redhat.com) - add item to hold number of stray OVS rules found/removed (jdiaz@redhat.com) diff --git a/playbooks/adhoc/create_pv/create_pv.yaml b/playbooks/adhoc/create_pv/create_pv.yaml index 347d9f574..81c1ee653 100644 --- a/playbooks/adhoc/create_pv/create_pv.yaml +++ b/playbooks/adhoc/create_pv/create_pv.yaml @@ -150,7 +150,7 @@ # We have to use the shell module because we can't set env vars with the command module. - name: "Place PV into oc" - shell: "KUBECONFIG=/etc/openshift/master/admin.kubeconfig oc create -f {{ pv_template | quote }}" + shell: "KUBECONFIG=/etc/origin/master/admin.kubeconfig oc create -f {{ pv_template | quote }}" register: oc_output - debug: var=oc_output diff --git a/playbooks/aws/openshift-cluster/config.yml b/playbooks/aws/openshift-cluster/config.yml index 33fcf6af5..9fba856a2 100644 --- a/playbooks/aws/openshift-cluster/config.yml +++ b/playbooks/aws/openshift-cluster/config.yml @@ -10,7 +10,12 @@ openshift_debug_level: "{{ debug_level }}" openshift_deployment_type: "{{ deployment_type }}" openshift_public_hostname: "{{ ec2_ip_address }}" + openshift_registry_selector: 'type=infra' openshift_router_selector: 'type=infra' openshift_infra_nodes: "{{ g_infra_hosts }}" openshift_node_labels: '{"region": "{{ ec2_region }}", "type": "{{ hostvars[inventory_hostname]["ec2_tag_sub-host-type"] if inventory_hostname in groups["tag_host-type_node"] else hostvars[inventory_hostname]["ec2_tag_host-type"] }}"}' openshift_master_cluster_method: 'native' + openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}" + os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}" + openshift_use_flannel: "{{ lookup('oo_option', 'use_flannel') }}" + openshift_use_fluentd: "{{ lookup('oo_option', 'use_fluentd') }}" diff --git a/playbooks/aws/openshift-cluster/terminate.yml b/playbooks/aws/openshift-cluster/terminate.yml index c20f370bf..6dd5d8b62 100644 --- a/playbooks/aws/openshift-cluster/terminate.yml +++ b/playbooks/aws/openshift-cluster/terminate.yml @@ -18,7 +18,7 @@ hosts: oo_hosts_to_terminate roles: - role: rhel_unsubscribe - when: deployment_type == "enterprise" and + when: deployment_type in ['atomic-enterprise', 'enterprise', 'openshift-enterprise'] and ansible_distribution == "RedHat" and lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) | default('no', True) | lower in ['no', 'false'] diff --git a/playbooks/byo/openshift-cluster/cluster_hosts.yml b/playbooks/byo/openshift-cluster/cluster_hosts.yml index e093b2580..10872e738 100644 --- a/playbooks/byo/openshift-cluster/cluster_hosts.yml +++ b/playbooks/byo/openshift-cluster/cluster_hosts.yml @@ -7,6 +7,8 @@ g_master_hosts: "{{ groups.masters | default([]) }}" g_node_hosts: "{{ groups.nodes | default([]) }}" +g_new_node_hosts: "{{ groups.new_nodes | default([]) }}" + g_nfs_hosts: "{{ groups.nfs | default([]) }}" g_all_hosts: "{{ g_master_hosts | union(g_node_hosts) | union(g_etcd_hosts) diff --git a/playbooks/byo/openshift_facts.yml b/playbooks/byo/openshift_facts.yml index babdfb952..916dfd0a6 100644 --- a/playbooks/byo/openshift_facts.yml +++ b/playbooks/byo/openshift_facts.yml @@ -5,5 +5,6 @@ - openshift_facts tasks: - openshift_facts: + openshift_env: "{{ hostvars[inventory_hostname] | oo_openshift_env }}" register: result - debug: var=result diff --git a/playbooks/byo/rhel_subscribe.yml b/playbooks/byo/rhel_subscribe.yml index f564905ea..990ddd2f2 100644 --- a/playbooks/byo/rhel_subscribe.yml +++ b/playbooks/byo/rhel_subscribe.yml @@ -4,7 +4,7 @@ openshift_deployment_type: "{{ deployment_type }}" roles: - role: rhel_subscribe - when: deployment_type == "enterprise" and + when: deployment_type in ['atomic-enterprise', 'enterprise', 'openshift-enterprise'] and ansible_distribution == "RedHat" and lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) | default('no', True) | lower in ['no', 'false'] diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml index 11e5b68f6..2cad4b362 100644 --- a/playbooks/common/openshift-cluster/config.yml +++ b/playbooks/common/openshift-cluster/config.yml @@ -1,6 +1,8 @@ --- - include: evaluate_groups.yml +- include: validate_hostnames.yml + - include: ../openshift-docker/config.yml - include: ../openshift-etcd/config.yml diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml index db7105ed5..7917bfba5 100644 --- a/playbooks/common/openshift-cluster/evaluate_groups.yml +++ b/playbooks/common/openshift-cluster/evaluate_groups.yml @@ -47,7 +47,7 @@ # Use g_new_node_hosts if it exists otherwise g_node_hosts - set_fact: - g_node_hosts_to_config: "{{ g_new_node_hosts | default(g_node_hosts | default([])) }}" + g_node_hosts_to_config: "{{ g_new_node_hosts | default(g_node_hosts | default([], true), true) }}" - name: Evaluate oo_nodes_to_config add_host: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml index 8ec379109..3f5c37dde 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml @@ -74,6 +74,11 @@ - set_fact: g_new_version: "{{ g_aos_versions.curr_version.split('-', 1).0 if g_aos_versions.avail_version is none else g_aos_versions.avail_version.split('-', 1).0 }}" + when: openshift_pkg_version is not defined + + - set_fact: + g_new_version: "{{ openshift_pkg_version | replace('-','') }}" + when: openshift_pkg_version is defined - fail: msg: This playbook requires Origin 1.0.6 or later @@ -229,12 +234,19 @@ hosts: oo_masters_to_config vars: openshift_version: "{{ openshift_pkg_version | default('') }}" + roles: + - openshift_facts tasks: - name: Upgrade to latest available kernel action: "{{ ansible_pkg_mgr}} name=kernel state=latest" - name: Upgrade master packages command: "{{ ansible_pkg_mgr}} update -y {{ openshift.common.service_type }}-master{{ openshift_version }}" + when: openshift_pkg_version is not defined + + - name: Upgrade packages + command: "{{ ansible_pkg_mgr}} install -y {{ openshift.common.installed_variant_rpms | oo_31_rpm_rename_conversion(openshift_version) | join (' ')}}" + when: openshift_pkg_version is defined and deployment_type == 'openshift-enterprise' - name: Ensure python-yaml present for config upgrade action: "{{ ansible_pkg_mgr }} name=PyYAML state=present" @@ -251,7 +263,7 @@ openshift_master_certs_no_etcd: - admin.crt - master.kubelet-client.crt - - "{{ 'master.proxy-client.crt' if openshift.common.version_greater_than_3_1_or_1_1 else omit }}" + - "{{ 'master.proxy-client.crt' if openshift.common.version_gte_3_1_or_1_1 else omit }}" - master.server.crt - openshift-master.crt - openshift-registry.crt @@ -415,6 +427,11 @@ tasks: - name: Upgrade node packages command: "{{ ansible_pkg_mgr }} update -y {{ openshift.common.service_type }}-node{{ openshift_version }}" + when: openshift_pkg_version is not defined + + - name: Upgrade packages + command: "{{ ansible_pkg_mgr}} install -y {{ openshift.common.installed_variant_rpms | oo_31_rpm_rename_conversion(openshift_version) | join (' ')}}" + when: openshift_pkg_version is defined and deployment_type == 'openshift-enterprise' - name: Restart node service service: name="{{ openshift.common.service_type }}-node" state=restarted @@ -555,46 +572,55 @@ - role: openshift_examples openshift_examples_import_command: replace pre_tasks: - - name: Check for default router + - name: Collect all routers command: > - {{ oc_cmd }} get -n default dc/router - register: _default_router + {{ oc_cmd }} get pods --all-namespaces -l 'router' -o json + register: all_routers failed_when: false changed_when: false + - set_fact: haproxy_routers="{{ (all_routers.stdout | from_json)['items'] | oo_pods_match_component(openshift_deployment_type, 'haproxy-router') | oo_select_keys_from_list(['metadata']) }}" + when: all_routers.rc == 0 + + - set_fact: haproxy_routers=[] + when: all_routers.rc != 0 + - name: Check for allowHostNetwork and allowHostPorts - when: _default_router.rc == 0 + when: all_routers.rc == 0 shell: > {{ oc_cmd }} get -o yaml scc/privileged | /usr/bin/grep -e allowHostPorts -e allowHostNetwork register: _scc - name: Grant allowHostNetwork and allowHostPorts when: - - _default_router.rc == 0 + - all_routers.rc == 0 - "'false' in _scc.stdout" command: > {{ oc_cmd }} patch scc/privileged -p '{"allowHostPorts":true,"allowHostNetwork":true}' --api-version=v1 - name: Update deployment config to 1.0.4/3.0.1 spec - when: _default_router.rc == 0 + when: all_routers.rc == 0 command: > - {{ oc_cmd }} patch dc/router -p + {{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -p '{"spec":{"strategy":{"rollingParams":{"updatePercent":-10},"spec":{"serviceAccount":"router","serviceAccountName":"router"}}}}' --api-version=v1 + with_items: haproxy_routers - name: Switch to hostNetwork=true - when: _default_router.rc == 0 + when: all_routers.rc == 0 command: > - {{ oc_cmd }} patch dc/router -p '{"spec":{"template":{"spec":{"hostNetwork":true}}}}' + {{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -p '{"spec":{"template":{"spec":{"hostNetwork":true}}}}' --api-version=v1 + with_items: haproxy_routers - name: Update router image to current version - when: _default_router.rc == 0 + when: all_routers.rc == 0 command: > - {{ oc_cmd }} patch dc/router -p + {{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -p '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}"}]}}}}' --api-version=v1 + with_items: haproxy_routers - name: Check for default registry command: > diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml index d8336fcae..d7a2ac405 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml @@ -20,13 +20,27 @@ - role: openshift_examples openshift_examples_import_command: replace pre_tasks: - - name: Check for default router + - name: Collect all routers command: > - {{ oc_cmd }} get -n default dc/router - register: _default_router + {{ oc_cmd }} get pods --all-namespaces -l 'router' -o json + register: all_routers failed_when: false changed_when: false + - set_fact: haproxy_routers="{{ (all_routers.stdout | from_json)['items'] | oo_pods_match_component(openshift_deployment_type, 'haproxy-router') | oo_select_keys_from_list(['metadata']) }}" + when: all_routers.rc == 0 + + - set_fact: haproxy_routers=[] + when: all_routers.rc != 0 + + - name: Update router image to current version + when: all_routers.rc == 0 + command: > + {{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -p + '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}"}]}}}}' + --api-version=v1 + with_items: haproxy_routers + - name: Check for default registry command: > {{ oc_cmd }} get -n default dc/docker-registry @@ -34,13 +48,6 @@ failed_when: false changed_when: false - - name: Update router image to current version - when: _default_router.rc == 0 - command: > - {{ oc_cmd }} patch dc/router -p - '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}"}]}}}}' - --api-version=v1 - - name: Update registry image to current version when: _default_registry.rc == 0 command: > diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml index 91780de09..12b9c84d3 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml @@ -41,7 +41,7 @@ g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}" - name: Determine available versions - script: ../files/versions.sh {{ g_new_service_name }} openshift + script: ../files/versions.sh {{ g_new_service_name }} register: g_versions_result - set_fact: diff --git a/playbooks/common/openshift-cluster/validate_hostnames.yml b/playbooks/common/openshift-cluster/validate_hostnames.yml new file mode 100644 index 000000000..047431b63 --- /dev/null +++ b/playbooks/common/openshift-cluster/validate_hostnames.yml @@ -0,0 +1,26 @@ +--- +- include: evaluate_groups.yml + +- name: Gather and set facts for node hosts + hosts: oo_nodes_to_config + roles: + - openshift_facts + tasks: + - openshift_facts: + role: "{{ item.role }}" + local_facts: "{{ item.local_facts }}" + with_items: + - role: common + local_facts: + hostname: "{{ openshift_hostname | default(None) }}" + public_hostname: "{{ openshift_public_hostname | default(None) }}" + - shell: + getent ahostsv4 {{ openshift.common.hostname }} | head -n 1 | awk '{ print $1 }' + register: lookupip + changed_when: false + failed_when: false + - name: Warn user about bad openshift_hostname values + pause: + prompt: "The hostname \"{{ openshift.common.hostname }}\" for \"{{ ansible_nodename }}\" doesn't resolve to an ip address owned by this host. Please set openshift_hostname variable to a hostname that when resolved on the host in question resolves to an IP address matching an interface on this host. This host will fail liveness checks for pods utilizing hostPorts, press CTRL-C to continue." + seconds: "{{ 10 if openshift_override_hostname_check | default(false) | bool else omit }}" + when: lookupip.stdout not in ansible_all_ipv4_addresses diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index 1a3f24371..77edbd1a6 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -18,6 +18,10 @@ - .config_managed - set_fact: + openshift_master_pod_eviction_timeout: "{{ lookup('oo_option', 'openshift_master_pod_eviction_timeout') | default(none, true) }}" + when: openshift_master_pod_eviction_timeout is not defined + + - set_fact: openshift_master_etcd_port: "{{ (etcd_client_port | default('2379')) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else none }}" openshift_master_etcd_hosts: "{{ hostvars | oo_select_keys(groups['oo_etcd_to_config'] @@ -53,6 +57,11 @@ console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}" public_console_url: "{{ openshift_master_public_console_url | default(None) }}" portal_net: "{{ openshift_master_portal_net | default(None) }}" + - openshift_facts: + role: hosted + openshift_env: + openshift_hosted_registry_storage_kind: 'nfs' + when: openshift_hosted_registry_storage_kind is not defined and groups.oo_nfs_to_config is defined and groups.oo_nfs_to_config | length > 0 - name: Check status of external etcd certificatees stat: path: "{{ openshift.common.config_base }}/master/{{ item }}" @@ -141,7 +150,7 @@ openshift_master_certs_no_etcd: - admin.crt - master.kubelet-client.crt - - "{{ 'master.proxy-client.crt' if openshift.common.version_greater_than_3_1_or_1_1 else omit }}" + - "{{ 'master.proxy-client.crt' if openshift.common.version_gte_3_1_or_1_1 else omit }}" - master.server.crt - openshift-master.crt - openshift-registry.crt @@ -165,10 +174,10 @@ master_cert_subdir: master-{{ openshift.common.hostname }} master_cert_config_dir: "{{ openshift.common.config_base }}/master" - set_fact: - openshift_infra_nodes: "{{ hostvars | oo_select_keys(groups['nodes']) + openshift_infra_nodes: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']) | oo_nodes_with_label('region', 'infra') | oo_collect('inventory_hostname') }}" - when: openshift_infra_nodes is not defined + when: openshift_infra_nodes is not defined and groups.oo_nodes_to_config | default([]) | length > 0 - name: Configure master certificates hosts: oo_first_master @@ -353,6 +362,8 @@ - name: Additional master configuration hosts: oo_first_master vars: + cockpit_plugins: "{{ osm_cockpit_plugins | default(['cockpit-kubernetes']) }}" + etcd_urls: "{{ openshift.master.etcd_urls }}" openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" omc_cluster_hosts: "{{ groups.oo_masters_to_config | join(' ')}}" roles: @@ -364,30 +375,16 @@ when: openshift.common.use_cluster_metrics | bool - role: openshift_manageiq when: openshift.common.use_manageiq | bool - -- name: Enable cockpit - hosts: oo_first_master - vars: - cockpit_plugins: "{{ osm_cockpit_plugins | default(['cockpit-kubernetes']) }}" - roles: - role: cockpit when: not openshift.common.is_atomic and ( deployment_type in ['atomic-enterprise','openshift-enterprise'] ) and (osm_use_cockpit | bool or osm_use_cockpit is undefined ) - -- name: Configure flannel - hosts: oo_first_master - vars: - etcd_urls: "{{ openshift.master.etcd_urls }}" - roles: - role: flannel_register when: openshift.common.use_flannel | bool + - role: pods + when: openshift.common.deployment_type == 'online' + - role: os_env_extras + when: openshift.common.deployment_type == 'online' -# Additional instance config for online deployments -- name: Additional instance config - hosts: oo_masters_deployment_type_online - roles: - - pods - - os_env_extras - name: Delete temporary directory on localhost hosts: localhost @@ -398,26 +395,25 @@ - file: name={{ g_master_mktemp.stdout }} state=absent changed_when: False -- name: Configure service accounts - hosts: oo_first_master - - vars: - accounts: ["router", "registry"] - - roles: - - openshift_serviceaccounts - -- name: Create services +- name: Create persistent volumes and create hosted services hosts: oo_first_master vars: - attach_registry_volume: "{{ groups.oo_nfs_to_config | length > 0 }}" - pre_tasks: - - set_fact: - nfs_host: "{{ groups.oo_nfs_to_config.0 }}" - registry_volume_path: "{{ hostvars[groups.oo_nfs_to_config.0].openshift.nfs.exports_dir + '/' + hostvars[groups.oo_nfs_to_config.0].openshift.nfs.registry_volume }}" - when: attach_registry_volume | bool + attach_registry_volume: "{{ openshift.hosted.registry.storage.kind != None }}" + deploy_infra: "{{ openshift.master.infra_nodes | default([]) | length > 0 }}" + persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups) }}" + persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims }}" roles: + - role: openshift_persistent_volumes + when: persistent_volumes | length > 0 or persistent_volume_claims | length > 0 + - role: openshift_serviceaccounts + openshift_serviceaccounts_names: + - router + - registry + openshift_serviceaccounts_namespace: default + openshift_serviceaccounts_sccs: + - privileged - role: openshift_router - when: openshift.master.infra_nodes is defined + when: deploy_infra | bool - role: openshift_registry - when: openshift.master.infra_nodes is defined and attach_registry_volume | bool + when: deploy_infra | bool and attach_registry_volume | bool + diff --git a/playbooks/common/openshift-nfs/config.yml b/playbooks/common/openshift-nfs/config.yml index e3f5c17ca..ba7530ed7 100644 --- a/playbooks/common/openshift-nfs/config.yml +++ b/playbooks/common/openshift-nfs/config.yml @@ -2,4 +2,5 @@ - name: Configure nfs hosts hosts: oo_nfs_to_config roles: + - role: openshift_facts - role: openshift_storage_nfs diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml index 263da5455..56d30e9b9 100644 --- a/playbooks/common/openshift-node/config.yml +++ b/playbooks/common/openshift-node/config.yml @@ -80,6 +80,7 @@ when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing roles: - role: etcd_certificates + when: openshift_use_flannel | default(false) | bool post_tasks: - name: Create a tarball of the etcd flannel certs command: > @@ -175,6 +176,7 @@ - name: Evaluate node groups hosts: localhost become: no + connection: local tasks: - name: Evaluate oo_containerized_master_nodes add_host: diff --git a/playbooks/gce/openshift-cluster/add_nodes.yml b/playbooks/gce/openshift-cluster/add_nodes.yml new file mode 100644 index 000000000..765e03fdc --- /dev/null +++ b/playbooks/gce/openshift-cluster/add_nodes.yml @@ -0,0 +1,43 @@ +--- +- name: Launch instance(s) + hosts: localhost + connection: local + become: no + gather_facts: no + vars_files: + - vars.yml + vars: + oo_extend_env: True + tasks: + - fail: + msg: Deployment type not supported for gce provider yet + when: deployment_type == 'enterprise' + + - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml + vars: + type: "compute" + count: "{{ num_nodes }}" + - include: tasks/launch_instances.yml + vars: + instances: "{{ node_names }}" + cluster: "{{ cluster_id }}" + type: "{{ k8s_type }}" + g_sub_host_type: "{{ sub_host_type }}" + gce_machine_type: "{{ lookup('env', 'gce_machine_node_type') | default(lookup('env', 'gce_machine_type'), true) }}" + gce_machine_image: "{{ lookup('env', 'gce_machine_node_image') | default(lookup('env', 'gce_machine_image'), true) }}" + + - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml + vars: + type: "infra" + count: "{{ num_infra }}" + - include: tasks/launch_instances.yml + vars: + instances: "{{ node_names }}" + cluster: "{{ cluster_id }}" + type: "{{ k8s_type }}" + g_sub_host_type: "{{ sub_host_type }}" + gce_machine_type: "{{ lookup('env', 'gce_machine_node_type') | default(lookup('env', 'gce_machine_type'), true) }}" + gce_machine_image: "{{ lookup('env', 'gce_machine_node_image') | default(lookup('env', 'gce_machine_image'), true) }}" + +- include: scaleup.yml +- include: list.yml diff --git a/playbooks/gce/openshift-cluster/config.yml b/playbooks/gce/openshift-cluster/config.yml index 8bc9b1e53..ba37a3a1f 100644 --- a/playbooks/gce/openshift-cluster/config.yml +++ b/playbooks/gce/openshift-cluster/config.yml @@ -1,6 +1,4 @@ --- -# TODO: fix firewall related bug with GCE and origin, since GCE is overriding -# /etc/sysconfig/iptables - include: ../../common/openshift-cluster/config.yml vars_files: - ../../gce/openshift-cluster/vars.yml @@ -13,6 +11,11 @@ openshift_debug_level: "{{ debug_level }}" openshift_deployment_type: "{{ deployment_type }}" openshift_hostname: "{{ gce_private_ip }}" + openshift_registry_selector: 'type=infra' openshift_router_selector: 'type=infra' openshift_infra_nodes: "{{ g_infra_hosts }}" openshift_master_cluster_method: 'native' + openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}" + os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}" + openshift_use_flannel: "{{ lookup('oo_option', 'use_flannel') }}" + openshift_use_fluentd: "{{ lookup('oo_option', 'use_fluentd') }}" diff --git a/playbooks/gce/openshift-cluster/join_node.yml b/playbooks/gce/openshift-cluster/join_node.yml deleted file mode 100644 index 75343dffa..000000000 --- a/playbooks/gce/openshift-cluster/join_node.yml +++ /dev/null @@ -1,51 +0,0 @@ ---- -- name: Populate oo_hosts_to_update group - hosts: localhost - connection: local - become: no - gather_facts: no - vars_files: - - vars.yml - - cluster_hosts.yml - tasks: - - name: Evaluate oo_hosts_to_update - add_host: - name: "{{ node_ip }}" - groups: oo_hosts_to_update - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - -- include: ../../common/openshift-cluster/update_repos_and_packages.yml - -- name: Populate oo_masters_to_config host group - hosts: localhost - connection: local - become: no - gather_facts: no - vars_files: - - vars.yml - - cluster_hosts.yml - tasks: - - name: Evaluate oo_nodes_to_config - add_host: - name: "{{ node_ip }}" - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - groups: oo_nodes_to_config - - - name: Evaluate oo_first_master - add_host: - name: "{{ master_hosts | first }}" - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - groups: oo_first_master - when: master_hosts is defined and master_hosts|length > 0 - -#- include: config.yml -- include: ../../common/openshift-node/config.yml - vars: - openshift_cluster_id: "{{ cluster_id }}" - openshift_debug_level: 4 - openshift_deployment_type: "{{ deployment_type }}" - openshift_hostname: "{{ ansible_default_ipv4.address }}" - openshift_node_labels: "{{ lookup('oo_option', 'openshift_node_labels') }} " diff --git a/playbooks/gce/openshift-cluster/launch.yml b/playbooks/gce/openshift-cluster/launch.yml index 562bf8d29..7532a678b 100644 --- a/playbooks/gce/openshift-cluster/launch.yml +++ b/playbooks/gce/openshift-cluster/launch.yml @@ -10,6 +10,17 @@ - fail: msg="Deployment type not supported for gce provider yet" when: deployment_type == 'enterprise' + - include: ../../common/openshift-cluster/tasks/set_etcd_launch_facts.yml + - include: tasks/launch_instances.yml + vars: + instances: "{{ etcd_names }}" + cluster: "{{ cluster_id }}" + type: "{{ k8s_type }}" + g_sub_host_type: "default" + gce_machine_type: "{{ lookup('env', 'gce_machine_etcd_type') | default(lookup('env', 'gce_machine_type'), true) }}" + gce_machine_image: "{{ lookup('env', 'gce_machine_etcd_image') | default(lookup('env', 'gce_machine_image'), true) }}" + + - include: ../../common/openshift-cluster/tasks/set_master_launch_facts.yml - include: tasks/launch_instances.yml vars: @@ -43,6 +54,8 @@ cluster: "{{ cluster_id }}" type: "{{ k8s_type }}" g_sub_host_type: "{{ sub_host_type }}" + gce_machine_type: "{{ lookup('env', 'gce_machine_node_type') | default(lookup('env', 'gce_machine_type'), true) }}" + gce_machine_image: "{{ lookup('env', 'gce_machine_node_image') | default(lookup('env', 'gce_machine_image'), true) }}" - add_host: name: "{{ master_names.0 }}" @@ -50,17 +63,5 @@ when: master_names is defined and master_names.0 is defined - include: update.yml -# -#- name: Deploy OpenShift Services -# hosts: service_master -# connection: ssh -# gather_facts: yes -# roles: -# - openshift_registry -# - openshift_router -# -#- include: ../../common/openshift-cluster/create_services.yml -# vars: -# g_svc_master: "{{ service_master }}" - include: list.yml diff --git a/playbooks/gce/openshift-cluster/list.yml b/playbooks/gce/openshift-cluster/list.yml index e67685912..f3004ede9 100644 --- a/playbooks/gce/openshift-cluster/list.yml +++ b/playbooks/gce/openshift-cluster/list.yml @@ -14,7 +14,7 @@ - add_host: name: "{{ item }}" groups: oo_list_hosts - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}" + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" with_items: groups[scratch_group] | default([], true) | difference(['localhost']) | difference(groups.status_terminated | default([], true)) @@ -23,4 +23,4 @@ gather_facts: no tasks: - debug: - msg: "private ip:{{ hostvars[inventory_hostname].gce_private_ip }}" + msg: "public ip: {{ hostvars[inventory_hostname].gce_public_ip }} private ip:{{ hostvars[inventory_hostname].gce_private_ip }}" diff --git a/playbooks/gce/openshift-cluster/service.yml b/playbooks/gce/openshift-cluster/service.yml index 8925de4cb..914f38c1f 100644 --- a/playbooks/gce/openshift-cluster/service.yml +++ b/playbooks/gce/openshift-cluster/service.yml @@ -14,14 +14,14 @@ - add_host: name: "{{ item }}" groups: g_service_nodes - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}" + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" with_items: "{{ node_hosts | default([]) | difference(['localhost']) | difference(groups.status_terminated) }}" - add_host: name: "{{ item }}" groups: g_service_masters - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}" + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" with_items: "{{ master_hosts | default([]) | difference(['localhost']) | difference(groups.status_terminated) }}" diff --git a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml index 488b62eb9..8ebf71cd4 100644 --- a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml @@ -1,7 +1,4 @@ --- -# TODO: when we are ready to go to ansible 1.9+ support only, we can update to -# the gce task to use the disk_auto_delete parameter to avoid having to delete -# the disk as a separate step on termination - name: Launch instance(s) gce: instance_names: "{{ instances }}" @@ -41,7 +38,7 @@ add_host: hostname: "{{ item.name }}" ansible_ssh_host: "{{ item.public_ip }}" - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}" + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" groups: "{{ item.tags | oo_prepend_strings_in_list('tag_') | join(',') }}" gce_public_ip: "{{ item.public_ip }}" diff --git a/playbooks/gce/openshift-cluster/terminate.yml b/playbooks/gce/openshift-cluster/terminate.yml index faa46c0d6..d835c53ba 100644 --- a/playbooks/gce/openshift-cluster/terminate.yml +++ b/playbooks/gce/openshift-cluster/terminate.yml @@ -10,7 +10,7 @@ - add_host: name: "{{ item }}" groups: oo_hosts_to_terminate - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}" + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" with_items: (groups['tag_clusterid-' ~ cluster_id] | default([])) | difference(['localhost']) @@ -20,7 +20,7 @@ - vars.yml roles: - role: rhel_unsubscribe - when: deployment_type == "enterprise" and + when: deployment_type in ['atomic-enterprise', 'enterprise', 'openshift-enterprise'] and ansible_distribution == "RedHat" and lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) | default('no', True) | lower in ['no', 'false'] diff --git a/playbooks/gce/openshift-cluster/update.yml b/playbooks/gce/openshift-cluster/update.yml index dadceae58..2dc540978 100644 --- a/playbooks/gce/openshift-cluster/update.yml +++ b/playbooks/gce/openshift-cluster/update.yml @@ -12,7 +12,7 @@ add_host: name: "{{ item }}" groups: oo_hosts_to_update - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}" + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" with_items: "{{ g_all_hosts | default([]) }}" diff --git a/playbooks/gce/openshift-cluster/vars.yml b/playbooks/gce/openshift-cluster/vars.yml index f004a9e6b..1ae73fd68 100644 --- a/playbooks/gce/openshift-cluster/vars.yml +++ b/playbooks/gce/openshift-cluster/vars.yml @@ -4,14 +4,14 @@ debug_level: 2 deployment_rhel7_ent_base: image: rhel-7 machine_type: n1-standard-1 - ssh_user: + ssh_user: "{{ lookup('env', 'gce_ssh_user') | default(ansible_ssh_user, true) }}" sudo: yes deployment_vars: origin: - image: preinstalled-slave-50g-v5 + image: centos-7 machine_type: n1-standard-1 - ssh_user: root + ssh_user: "{{ lookup('env', 'gce_ssh_user') | default(ansible_ssh_user, true) }}" sudo: yes online: image: libra-rhel7 diff --git a/playbooks/gce/openshift-cluster/wip.yml b/playbooks/gce/openshift-cluster/wip.yml deleted file mode 100644 index 0e3757546..000000000 --- a/playbooks/gce/openshift-cluster/wip.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- -- name: WIP - hosts: localhost - become: no - connection: local - gather_facts: no - vars_files: - - vars.yml - tasks: - - name: Evaluate oo_masters_for_deploy - add_host: - name: "{{ item }}" - groups: oo_masters_for_deploy - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}" - ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - with_items: "{{ g_master_hosts | default([]) }}" - -- name: Deploy OpenShift Services - hosts: oo_masters_for_deploy - connection: ssh - gather_facts: yes - user: root - vars_files: - - vars.yml - roles: - - openshift_registry - - openshift_router diff --git a/playbooks/libvirt/openshift-cluster/config.yml b/playbooks/libvirt/openshift-cluster/config.yml index 9bd99c4fc..0e003ef67 100644 --- a/playbooks/libvirt/openshift-cluster/config.yml +++ b/playbooks/libvirt/openshift-cluster/config.yml @@ -13,6 +13,11 @@ openshift_cluster_id: "{{ cluster_id }}" openshift_debug_level: "{{ debug_level }}" openshift_deployment_type: "{{ deployment_type }}" + openshift_registry_selector: 'type=infra' openshift_router_selector: 'type=infra' openshift_infra_nodes: "{{ g_infra_hosts }}" openshift_master_cluster_method: 'native' + openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}" + os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}" + openshift_use_flannel: "{{ lookup('oo_option', 'use_flannel') }}" + openshift_use_fluentd: "{{ lookup('oo_option', 'use_fluentd') }}" diff --git a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml index ff1cedc94..ebe9f0ad9 100644 --- a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml @@ -13,16 +13,28 @@ get_url: url: '{{ image_url }}' sha256sum: '{{ image_sha256 }}' - dest: '{{ os_libvirt_storage_pool_path }}/{{ [image_name, image_compression] | join(".") }}' + dest: '{{ os_libvirt_storage_pool_path }}/{{ [image_name, image_compression] | reject("equalto", "") | join(".") }}' when: '{{ ( lookup("oo_option", "skip_image_download") | default("no", True) | lower ) in ["false", "no"] }}' register: downloaded_image -- name: Uncompress Base Cloud image +- name: Uncompress xz compressed base cloud image command: 'unxz -kf {{ os_libvirt_storage_pool_path }}/{{ [image_name, image_compression] | join(".") }}' args: creates: '{{ os_libvirt_storage_pool_path }}/{{ image_name }}' when: image_compression in ["xz"] and downloaded_image.changed +- name: Uncompress tgz compressed base cloud image + command: 'tar zxvf {{ os_libvirt_storage_pool_path }}/{{ [image_name, image_compression] | join(".") }}' + args: + creates: '{{ os_libvirt_storage_pool_path }}/{{ image_name }}' + when: image_compression in ["tgz"] and downloaded_image.changed + +- name: Uncompress gzip compressed base cloud image + command: 'gunzip {{ os_libvirt_storage_pool_path }}/{{ [image_name, image_compression] | join(".") }}' + args: + creates: '{{ os_libvirt_storage_pool_path }}/{{ image_name }}' + when: image_compression in ["gz"] and downloaded_image.changed + - name: Create the cloud-init config drive path file: dest: '{{ os_libvirt_storage_pool_path }}/{{ item }}_configdrive/' diff --git a/playbooks/libvirt/openshift-cluster/terminate.yml b/playbooks/libvirt/openshift-cluster/terminate.yml index 8d845c8f2..f4749c28d 100644 --- a/playbooks/libvirt/openshift-cluster/terminate.yml +++ b/playbooks/libvirt/openshift-cluster/terminate.yml @@ -23,7 +23,7 @@ - vars.yml roles: - role: rhel_unsubscribe - when: deployment_type == "enterprise" and + when: deployment_type in ['atomic-enterprise', 'enterprise', 'openshift-enterprise'] and ansible_distribution == "RedHat" and lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) | default('no', True) | lower in ['no', 'false'] diff --git a/playbooks/libvirt/openshift-cluster/vars.yml b/playbooks/libvirt/openshift-cluster/vars.yml index da628786b..c78b52867 100644 --- a/playbooks/libvirt/openshift-cluster/vars.yml +++ b/playbooks/libvirt/openshift-cluster/vars.yml @@ -15,6 +15,7 @@ deployment_rhel7_ent_base: default('rhel-guest-image-7.2-20151102.0.x86_64.qcow2', True) }}" sha256: "{{ lookup('oo_option', 'image_sha256') | default('25f880767ec6bf71beb532e17f1c45231640bbfdfbbb1dffb79d2c1b328388e0', True) }}" + compression: "" ssh_user: openshift sudo: yes @@ -41,3 +42,5 @@ deployment_vars: enterprise: "{{ deployment_rhel7_ent_base }}" openshift-enterprise: "{{ deployment_rhel7_ent_base }}" atomic-enterprise: "{{ deployment_rhel7_ent_base }}" + + diff --git a/playbooks/openstack/openshift-cluster/config.yml b/playbooks/openstack/openshift-cluster/config.yml index 5128e767f..093beaf03 100644 --- a/playbooks/openstack/openshift-cluster/config.yml +++ b/playbooks/openstack/openshift-cluster/config.yml @@ -10,7 +10,11 @@ openshift_cluster_id: "{{ cluster_id }}" openshift_debug_level: "{{ debug_level }}" openshift_deployment_type: "{{ deployment_type }}" - openshift_hostname: "{{ ansible_default_ipv4.address }}" + openshift_registry_selector: 'type=infra' openshift_router_selector: 'type=infra' openshift_infra_nodes: "{{ g_infra_hosts }}" openshift_master_cluster_method: 'native' + openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}" + os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}" + openshift_use_flannel: "{{ lookup('oo_option', 'use_flannel') }}" + openshift_use_fluentd: "{{ lookup('oo_option', 'use_fluentd') }}" diff --git a/playbooks/openstack/openshift-cluster/dns.yml b/playbooks/openstack/openshift-cluster/dns.yml new file mode 100644 index 000000000..5e7671a48 --- /dev/null +++ b/playbooks/openstack/openshift-cluster/dns.yml @@ -0,0 +1,47 @@ +- name: Populate oo_dns_hosts_to_update group + hosts: localhost + connection: local + become: no + gather_facts: no + vars_files: + - vars.yml + - cluster_hosts.yml + tasks: + - name: Evaluate oo_dns_hosts_to_update + add_host: + name: "{{ item }}" + groups: oo_dns_hosts_to_update + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + with_items: "{{ groups[cluster_id ~ '-dns'] }}" + + - name: Evaluate oo_hosts_to_add_in_dns + add_host: + name: "{{ item }}" + groups: oo_hosts_to_add_in_dns + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + with_items: "{{ groups['tag_clusterid_' ~ cluster_id] }}" + +- name: Gather facts + hosts: oo_hosts_to_add_in_dns + vars_files: + - vars.yml + - cluster_hosts.yml + +- name: Configure the DNS + hosts: oo_dns_hosts_to_update + vars_files: + - vars.yml + - cluster_hosts.yml + roles: + - role: rhel_subscribe + when: deployment_type in ["enterprise", "atomic-enterprise", "openshift-enterprise"] and + ansible_distribution == "RedHat" and + lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) | + default('no', True) | lower in ['no', 'false'] + + - { role: dns, + dns_forwarders: "{{ openstack_network_dns }}", + dns_zones: [ novalocal, openstacklocal ], + dns_all_hosts: "{{ g_all_hosts }}" } diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml index 4f6a59a30..ac1612634 100644 --- a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml +++ b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml @@ -14,10 +14,10 @@ parameters: label: Cluster ID description: Identifier of the cluster - cidr: + subnet_24_prefix: type: string - label: CIDR - description: CIDR of the network of the cluster + label: subnet /24 prefix + description: /24 subnet prefix of the network of the cluster (dot separated number triplet) dns_nameservers: type: comma_delimited_list @@ -30,12 +30,6 @@ parameters: description: Name of the external network default: external - floating_ip_pool: - type: string - label: Floating IP pool - description: Floating IP pools - default: external - ssh_public_key: type: string label: SSH public key @@ -88,6 +82,11 @@ parameters: label: Infra image description: Name of the image for the infra node servers + dns_image: + type: string + label: DNS image + description: Name of the image for the DNS server + etcd_flavor: type: string label: Etcd flavor @@ -108,6 +107,11 @@ parameters: label: Infra flavor description: Flavor of the infra node servers + dns_flavor: + type: string + label: DNS flavor + description: Flavor of the DNS server + outputs: etcd_names: @@ -158,6 +162,26 @@ outputs: description: Floating IPs of the nodes value: { get_attr: [ infra_nodes, floating_ip ] } + dns_name: + description: Name of the DNS + value: + get_attr: + - dns + - name + + dns_floating_ip: + description: Floating IP of the DNS + value: + get_attr: + - dns + - addresses + - str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: { get_param: cluster_id } + - 1 + - addr + resources: net: @@ -178,8 +202,27 @@ resources: params: cluster_id: { get_param: cluster_id } network: { get_resource: net } - cidr: { get_param: cidr } - dns_nameservers: { get_param: dns_nameservers } + cidr: + str_replace: + template: subnet_24_prefix.0/24 + params: + subnet_24_prefix: { get_param: subnet_24_prefix } + allocation_pools: + - start: + str_replace: + template: subnet_24_prefix.3 + params: + subnet_24_prefix: { get_param: subnet_24_prefix } + end: + str_replace: + template: subnet_24_prefix.254 + params: + subnet_24_prefix: { get_param: subnet_24_prefix } + dns_nameservers: + - str_replace: + template: subnet_24_prefix.2 + params: + subnet_24_prefix: { get_param: subnet_24_prefix } router: type: OS::Neutron::Router @@ -337,6 +380,44 @@ resources: port_range_min: 443 port_range_max: 443 + dns-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-dns-secgrp + params: + cluster_id: { get_param: cluster_id } + description: + str_replace: + template: Security group for cluster_id cluster DNS + params: + cluster_id: { get_param: cluster_id } + rules: + - direction: ingress + protocol: tcp + port_range_min: 22 + port_range_max: 22 + remote_ip_prefix: { get_param: ssh_incoming } + - direction: ingress + protocol: udp + port_range_min: 53 + port_range_max: 53 + remote_mode: remote_group_id + remote_group_id: { get_resource: etcd-secgrp } + - direction: ingress + protocol: udp + port_range_min: 53 + port_range_max: 53 + remote_mode: remote_group_id + remote_group_id: { get_resource: master-secgrp } + - direction: ingress + protocol: udp + port_range_min: 53 + port_range_max: 53 + remote_mode: remote_group_id + remote_group_id: { get_resource: node-secgrp } + etcd: type: OS::Heat::ResourceGroup properties: @@ -360,13 +441,14 @@ resources: subnet: { get_resource: subnet } secgrp: - { get_resource: etcd-secgrp } - floating_network: { get_param: floating_ip_pool } + floating_network: { get_param: external_net } net_name: str_replace: template: openshift-ansible-cluster_id-net params: cluster_id: { get_param: cluster_id } - depends_on: interface + depends_on: + - interface masters: type: OS::Heat::ResourceGroup @@ -391,13 +473,14 @@ resources: subnet: { get_resource: subnet } secgrp: - { get_resource: master-secgrp } - floating_network: { get_param: floating_ip_pool } + floating_network: { get_param: external_net } net_name: str_replace: template: openshift-ansible-cluster_id-net params: cluster_id: { get_param: cluster_id } - depends_on: interface + depends_on: + - interface compute_nodes: type: OS::Heat::ResourceGroup @@ -424,13 +507,14 @@ resources: subnet: { get_resource: subnet } secgrp: - { get_resource: node-secgrp } - floating_network: { get_param: floating_ip_pool } + floating_network: { get_param: external_net } net_name: str_replace: template: openshift-ansible-cluster_id-net params: cluster_id: { get_param: cluster_id } - depends_on: interface + depends_on: + - interface infra_nodes: type: OS::Heat::ResourceGroup @@ -458,10 +542,77 @@ resources: secgrp: - { get_resource: node-secgrp } - { get_resource: infra-secgrp } - floating_network: { get_param: floating_ip_pool } + floating_network: { get_param: external_net } net_name: str_replace: template: openshift-ansible-cluster_id-net params: cluster_id: { get_param: cluster_id } - depends_on: interface + depends_on: + - interface + + dns: + type: OS::Nova::Server + properties: + name: + str_replace: + template: cluster_id-dns + params: + cluster_id: { get_param: cluster_id } + key_name: { get_resource: keypair } + image: { get_param: dns_image } + flavor: { get_param: dns_flavor } + networks: + - port: { get_resource: dns-port } + user_data: { get_resource: dns-config } + user_data_format: RAW + + dns-port: + type: OS::Neutron::Port + properties: + network: { get_resource: net } + fixed_ips: + - subnet: { get_resource: subnet } + ip_address: + str_replace: + template: subnet_24_prefix.2 + params: + subnet_24_prefix: { get_param: subnet_24_prefix } + security_groups: + - { get_resource: dns-secgrp } + + dns-floating-ip: + type: OS::Neutron::FloatingIP + properties: + floating_network: { get_param: external_net } + port_id: { get_resource: dns-port } + + dns-config: + type: OS::Heat::MultipartMime + properties: + parts: + - config: { get_file: user-data } + - config: + str_replace: + template: | + #cloud-config + write_files: + - path: /etc/sysconfig/network-scripts/ifcfg-eth0 + content: | + DEVICE="eth0" + BOOTPROTO="dhcp" + DNS1="$dns1" + DNS2="$dns2" + PEERDNS="no" + ONBOOT="yes" + runcmd: + - [ "/usr/bin/systemctl", "restart", "network" ] + params: + $dns1: + get_param: + - dns_nameservers + - 0 + $dns2: + get_param: + - dns_nameservers + - 1 diff --git a/playbooks/openstack/openshift-cluster/launch.yml b/playbooks/openstack/openshift-cluster/launch.yml index 76cc64a73..2187cefed 100644 --- a/playbooks/openstack/openshift-cluster/launch.yml +++ b/playbooks/openstack/openshift-cluster/launch.yml @@ -32,10 +32,9 @@ --timeout 3 --enable-rollback -P cluster_env={{ cluster_env }} -P cluster_id={{ cluster_id }} - -P cidr={{ openstack_network_cidr }} + -P subnet_24_prefix={{ openstack_subnet_24_prefix }} -P dns_nameservers={{ openstack_network_dns | join(",") }} -P external_net={{ openstack_network_external_net }} - -P floating_ip_pool={{ openstack_floating_ip_pool }} -P ssh_public_key="{{ openstack_ssh_public_key }}" -P ssh_incoming={{ openstack_ssh_access_from }} -P num_etcd={{ num_etcd }} @@ -46,10 +45,12 @@ -P master_image={{ deployment_vars[deployment_type].image }} -P node_image={{ deployment_vars[deployment_type].image }} -P infra_image={{ deployment_vars[deployment_type].image }} + -P dns_image={{ deployment_vars[deployment_type].image }} -P etcd_flavor={{ openstack_flavor["etcd"] }} -P master_flavor={{ openstack_flavor["master"] }} -P node_flavor={{ openstack_flavor["node"] }} -P infra_flavor={{ openstack_flavor["infra"] }} + -P dns_flavor=m1.small openshift-ansible-{{ cluster_id }}-stack' - name: Wait for OpenStack Stack readiness @@ -115,6 +116,14 @@ - parsed_outputs.infra_ips - parsed_outputs.infra_floating_ips + - name: Add DNS groups and variables + add_host: + hostname: '{{ parsed_outputs.dns_name }}' + ansible_ssh_host: '{{ parsed_outputs.dns_floating_ip }}' + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + groups: '{{ cluster_id }}-dns' + - name: Wait for ssh wait_for: host: '{{ item }}' @@ -123,6 +132,7 @@ - parsed_outputs.master_floating_ips - parsed_outputs.node_floating_ips - parsed_outputs.infra_floating_ips + - parsed_outputs.dns_floating_ip - name: Wait for user setup command: 'ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ deployment_vars[deployment_type].ssh_user }}@{{ item }} echo {{ deployment_vars[deployment_type].ssh_user }} user is setup' @@ -134,6 +144,7 @@ - parsed_outputs.master_floating_ips - parsed_outputs.node_floating_ips - parsed_outputs.infra_floating_ips + - parsed_outputs.dns_floating_ip - include: update.yml diff --git a/playbooks/openstack/openshift-cluster/terminate.yml b/playbooks/openstack/openshift-cluster/terminate.yml index 7a86b78c5..a1fb41b53 100644 --- a/playbooks/openstack/openshift-cluster/terminate.yml +++ b/playbooks/openstack/openshift-cluster/terminate.yml @@ -19,7 +19,7 @@ - vars.yml roles: - role: rhel_unsubscribe - when: deployment_type == "enterprise" and + when: deployment_type in ['atomic-enterprise', 'enterprise', 'openshift-enterprise'] and ansible_distribution == "RedHat" and lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) | default('no', True) | lower in ['no', 'false'] diff --git a/playbooks/openstack/openshift-cluster/update.yml b/playbooks/openstack/openshift-cluster/update.yml index 2dc540978..16027b15c 100644 --- a/playbooks/openstack/openshift-cluster/update.yml +++ b/playbooks/openstack/openshift-cluster/update.yml @@ -1,4 +1,6 @@ --- +- include: dns.yml + - name: Populate oo_hosts_to_update group hosts: localhost connection: local diff --git a/playbooks/openstack/openshift-cluster/vars.yml b/playbooks/openstack/openshift-cluster/vars.yml index 76cde1706..ee26d223e 100644 --- a/playbooks/openstack/openshift-cluster/vars.yml +++ b/playbooks/openstack/openshift-cluster/vars.yml @@ -2,12 +2,10 @@ debug_level: 2 openstack_infra_heat_stack: "{{ lookup('oo_option', 'infra_heat_stack' ) | default('files/heat_stack.yaml', True) }}" -openstack_network_cidr: "{{ lookup('oo_option', 'net_cidr' ) | - default('192.168.' + ( ( 1048576 | random % 256 ) | string() ) + '.0/24', True) }}" +openstack_subnet_24_prefix: "{{ lookup('oo_option', 'subnet_24_prefix' ) | + default('192.168.' + ( ( 1048576 | random % 256 ) | string() ), True) }}" openstack_network_external_net: "{{ lookup('oo_option', 'external_net' ) | default('external', True) }}" -openstack_floating_ip_pool: "{{ lookup('oo_option', 'floating_ip_pool' ) | - default('external', True) }}" openstack_network_dns: "{{ lookup('oo_option', 'dns' ) | default('8.8.8.8,8.8.4.4', True) | oo_split() }}" openstack_ssh_public_key: "{{ lookup('file', lookup('oo_option', 'public_key') | diff --git a/roles/dns/README.md b/roles/dns/README.md new file mode 100644 index 000000000..e238fb92e --- /dev/null +++ b/roles/dns/README.md @@ -0,0 +1,43 @@ +dns +=== + +Configure a DNS server serving IPs of all the nodes of the cluster + +Requirements +------------ + +None + +Role Variables +-------------- + +| Name | Mandatory / Optional | Description | +|------|----------------------|-------------| +| `dns_zones` | Mandatory | DNS zones in which we must find the hosts | +| `dns_forwarders` | If not set, the DNS will be a recursive non-forwarding DNS server | DNS forwarders to delegate the requests for hosts outside of `dns_zones` | +| `dns_all_hosts` | Mandatory | Exhaustive list of hosts | + +Dependencies +------------ + +None + +Example Playbook +---------------- + + - hosts: dns_hosts + roles: + - role: dns + dns_forwarders: [ '8.8.8.8', '8.8.4.4' ] + dns_zones: [ novalocal, openstacklocal ] + dns_all_hosts: "{{ g_all_hosts }}" + +License +------- + +ASL 2.0 + +Author Information +------------------ + +OpenShift operations, Red Hat, Inc diff --git a/roles/dns/handlers/main.yml b/roles/dns/handlers/main.yml new file mode 100644 index 000000000..ef101785e --- /dev/null +++ b/roles/dns/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart bind + service: + name: named + state: restarted diff --git a/roles/dns/meta/main.yml b/roles/dns/meta/main.yml new file mode 100644 index 000000000..b6e9d9ad0 --- /dev/null +++ b/roles/dns/meta/main.yml @@ -0,0 +1,7 @@ +--- +galaxy_info: + author: Lénaïc Huard + description: Deploy and configure a DNS server + company: Amadeus SAS + license: ASL 2.0 +dependencies: [] diff --git a/roles/dns/tasks/main.yml b/roles/dns/tasks/main.yml new file mode 100644 index 000000000..af728585d --- /dev/null +++ b/roles/dns/tasks/main.yml @@ -0,0 +1,22 @@ +- name: Install Bind + action: "{{ ansible_pkg_mgr }} name=bind" + +- name: Configure Bind + template: + src: "{{ item.src }}" + dest: "{{ item.dest }}" + validate: "{{ item.validate }}" + with_items: + - src: openshift-cluster.zone + dest: /var/named/openshift-cluster.zone + validate: "named-checkzone {{ dns_zones[0] }} %s" + - src: named.conf + dest: /etc/named.conf + validate: "named-checkconf %s" + notify: restart bind + +- name: Enable Bind + service: + name: named + state: started + enabled: yes diff --git a/roles/dns/templates/named.conf b/roles/dns/templates/named.conf new file mode 100644 index 000000000..22c1ff935 --- /dev/null +++ b/roles/dns/templates/named.conf @@ -0,0 +1,23 @@ +options +{ + directory "/var/named"; + + allow-query { {{ ansible_default_ipv4.network }}/24; }; + + recursion yes; + +{% if dns_forwarders is defined %} + forwarders { + {% for dns in dns_forwarders %} + {{ dns }}; + {% endfor %} + }; +{% endif %} +}; +{% for zone in dns_zones %} + +zone "{{ zone }}" IN { + type master; + file "openshift-cluster.zone"; +}; +{% endfor %} diff --git a/roles/dns/templates/openshift-cluster.zone b/roles/dns/templates/openshift-cluster.zone new file mode 100644 index 000000000..03f5dc089 --- /dev/null +++ b/roles/dns/templates/openshift-cluster.zone @@ -0,0 +1,14 @@ +$TTL 1d +@ IN SOA {{ ansible_hostname }} openshift ( + {{ ansible_date_time.epoch }} ; Serial (To be fixed before 2039) + 12h ; Refresh + 3m ; Retry + 4w ; Expire + 3h ; TTL for negative replies + ) + + IN NS {{ ansible_hostname }} +{{ ansible_hostname }} IN A {{ ansible_default_ipv4.address }} +{% for host in dns_all_hosts %} +{{ hostvars[host].ansible_hostname }} IN A {{ hostvars[host]['ansible_default_ipv4'].address }} +{% endfor %} diff --git a/roles/etcd_common/tasks/main.yml b/roles/etcd_common/tasks/main.yml index cd108495d..be75fdab2 100644 --- a/roles/etcd_common/tasks/main.yml +++ b/roles/etcd_common/tasks/main.yml @@ -5,9 +5,9 @@ - fail: msg: "Interface {{ item.value.etcd_interface }} not found on host {{ item.key }}" when: "'etcd_interface' in item.value and 'interface' not in item.value" - with_dict: etcd_host_int_map + with_dict: etcd_host_int_map | default({}) - fail: msg: IPv4 address not found for {{ item.value.interface.device }} on host {{ item.key }} when: "'ipv4' not in item.value.interface or 'address' not in item.value.interface.ipv4" - with_dict: etcd_host_int_map + with_dict: etcd_host_int_map | default({}) diff --git a/roles/fluentd_master/meta/main.yml b/roles/fluentd_master/meta/main.yml new file mode 100644 index 000000000..148bc377e --- /dev/null +++ b/roles/fluentd_master/meta/main.yml @@ -0,0 +1,15 @@ +--- +galaxy_info: + author: OpenShift Red Hat + description: Fluentd Master + company: Red Hat, Inc. + license: Apache License, Version 2.0 + min_ansible_version: 1.9 + platforms: + - name: EL + versions: + - 7 + categories: + - monitoring + dependencies: + - openshift_facts diff --git a/roles/lib_dyn/library/dyn_record.py b/roles/lib_dyn/library/dyn_record.py index 5e088a674..f2796ccf2 100644 --- a/roles/lib_dyn/library/dyn_record.py +++ b/roles/lib_dyn/library/dyn_record.py @@ -224,11 +224,13 @@ def main(): # First get a list of existing records for the node values = get_record_values(dyn_node_records) value_key = get_record_key(module.params['record_type']) + param_value = module.params['record_value'] # Check to see if the record is already in place before doing anything. if (dyn_node_records and dyn_node_records[value_key][0].ttl == module.params['record_ttl'] and - module.params['record_value'] in values[value_key]): + (param_value in values[value_key] or + param_value + '.' in values[value_key])): module.exit_json(changed=False) diff --git a/roles/lib_dyn/tasks/main.yml b/roles/lib_dyn/tasks/main.yml index 9b3b1b0b9..965962928 100644 --- a/roles/lib_dyn/tasks/main.yml +++ b/roles/lib_dyn/tasks/main.yml @@ -3,3 +3,5 @@ - name: Make sure python-dyn is installed yum: name=python-dyn state=present + tags: + - lib_dyn diff --git a/roles/lib_zabbix/library/zbx_action.py b/roles/lib_zabbix/library/zbx_action.py index 2f9524556..e7edcde2f 100644 --- a/roles/lib_zabbix/library/zbx_action.py +++ b/roles/lib_zabbix/library/zbx_action.py @@ -120,8 +120,8 @@ def opmessage_usr_diff(zab_val, user_val): ''' Report whether there are differences between opmessage_usr on zabbix and opmessage_usr supplied by user ''' - zab_usr_ids = set([usr['usrid'] for usr in zab_val]) - usr_ids = set([usr['usrid'] for usr in user_val]) + zab_usr_ids = set([usr['userid'] for usr in zab_val]) + usr_ids = set([usr['userid'] for usr in user_val]) if usr_ids != zab_usr_ids: return True @@ -228,12 +228,12 @@ def get_user_groups(zapi, groups): '''get the mediatype id from the mediatype name''' user_groups = [] - content = zapi.get_content('usergroup', - 'get', - {'search': {'name': groups}}) - - for usr_grp in content['result']: - user_groups.append({'usrgrpid': usr_grp['usrgrpid']}) + for group in groups: + content = zapi.get_content('usergroup', + 'get', + {'search': {'name': group}}) + for result in content['result']: + user_groups.append({'usrgrpid': result['usrgrpid']}) return user_groups diff --git a/roles/openshift_cluster_metrics/tasks/main.yml b/roles/openshift_cluster_metrics/tasks/main.yml index 9b7735e54..d45f62eca 100644 --- a/roles/openshift_cluster_metrics/tasks/main.yml +++ b/roles/openshift_cluster_metrics/tasks/main.yml @@ -3,12 +3,12 @@ - name: Install cluster metrics templates copy: src: cluster-metrics - dest: /etc/openshift/ + dest: /etc/origin/ - name: Create InfluxDB Services command: > {{ openshift.common.client_binary }} create -f - /etc/openshift/cluster-metrics/influxdb.yaml + /etc/origin/cluster-metrics/influxdb.yaml register: oex_influxdb_services failed_when: "'already exists' not in oex_influxdb_services.stderr and oex_influxdb_services.rc != 0" changed_when: false @@ -16,7 +16,7 @@ - name: Create Heapster Service Account command: > {{ openshift.common.client_binary }} create -f - /etc/openshift/cluster-metrics/heapster-serviceaccount.yaml + /etc/origin/cluster-metrics/heapster-serviceaccount.yaml register: oex_heapster_serviceaccount failed_when: "'already exists' not in oex_heapster_serviceaccount.stderr and oex_heapster_serviceaccount.rc != 0" changed_when: false @@ -35,7 +35,7 @@ - name: Create Heapster Services command: > {{ openshift.common.client_binary }} create -f - /etc/openshift/cluster-metrics/heapster.yaml + /etc/origin/cluster-metrics/heapster.yaml register: oex_heapster_services failed_when: "'already exists' not in oex_heapster_services.stderr and oex_heapster_services.rc != 0" changed_when: false @@ -43,7 +43,7 @@ - name: Create Grafana Services command: > {{ openshift.common.client_binary }} create -f - /etc/openshift/cluster-metrics/grafana.yaml + /etc/origin/cluster-metrics/grafana.yaml register: oex_grafana_services failed_when: "'already exists' not in oex_grafana_services.stderr and oex_grafana_services.rc != 0" changed_when: false diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml index 83af04798..a2fcff507 100644 --- a/roles/openshift_common/tasks/main.yml +++ b/roles/openshift_common/tasks/main.yml @@ -8,7 +8,7 @@ when: openshift_use_openshift_sdn | default(false) | bool and openshift_use_nuage | default(false) | bool - fail: - msg: Nuage sdn can not be used with flannel + msg: Nuage sdn can not be used with flannel when: openshift_use_flannel | default(false) | bool and openshift_use_nuage | default(false) | bool - fail: @@ -38,13 +38,17 @@ action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}{{ openshift_version | default('') }} state=present" when: not openshift.common.is_containerized | bool -- name: Set version facts +# This invocation also updates the version facts which are necessary +# for setting the hostname below. +- name: openshift_facts openshift_facts: + role: hosted + openshift_env: "{{ hostvars[inventory_hostname] | oo_openshift_env }}" - # For enterprise versions < 3.1 and origin versions < 1.1 we want to set the - # hostname by default. +# For enterprise versions < 3.1 and origin versions < 1.1 we want to set the +# hostname by default. - set_fact: - set_hostname_default: "{{ not openshift.common.version_greater_than_3_1_or_1_1 }}" + set_hostname_default: "{{ not openshift.common.version_gte_3_1_or_1_1 }}" - name: Set hostname command: > diff --git a/roles/openshift_examples/defaults/main.yml b/roles/openshift_examples/defaults/main.yml index 6b9964aec..5e955590e 100644 --- a/roles/openshift_examples/defaults/main.yml +++ b/roles/openshift_examples/defaults/main.yml @@ -6,7 +6,7 @@ openshift_examples_load_db_templates: true openshift_examples_load_xpaas: "{{ openshift_deployment_type != 'origin' }}" openshift_examples_load_quickstarts: true -content_version: "{{ 'v1.1' if openshift.common.version_greater_than_3_1_or_1_1 else 'v1.0' }}" +content_version: "{{ 'v1.1' if openshift.common.version_gte_3_1_or_1_1 else 'v1.0' }}" examples_base: "{{ openshift.common.config_base if openshift.common.is_containerized else '/usr/share/openshift' }}/examples" image_streams_base: "{{ examples_base }}/image-streams" diff --git a/roles/openshift_examples/files/examples/v1.1/infrastructure-templates/enterprise/metrics-deployer.yaml b/roles/openshift_examples/files/examples/v1.1/infrastructure-templates/enterprise/metrics-deployer.yaml index ddd9f2f75..be999bd1a 100644 --- a/roles/openshift_examples/files/examples/v1.1/infrastructure-templates/enterprise/metrics-deployer.yaml +++ b/roles/openshift_examples/files/examples/v1.1/infrastructure-templates/enterprise/metrics-deployer.yaml @@ -85,7 +85,7 @@ parameters: - description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:v1.1", set version "v1.1"' name: IMAGE_VERSION - value: "3.1.0" + value: "3.1.1" - description: "Internal URL for the master, for authentication retrieval" name: MASTER_URL diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index fc36825e8..20f6cc5c8 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -694,8 +694,8 @@ def set_deployment_facts_if_unset(facts): if 'node' in facts: deployment_type = facts['common']['deployment_type'] if 'storage_plugin_deps' not in facts['node']: - if deployment_type in ['openshift-enterprise', 'atomic-enterprise']: - facts['node']['storage_plugin_deps'] = ['ceph', 'glusterfs'] + if deployment_type in ['openshift-enterprise', 'atomic-enterprise', 'origin']: + facts['node']['storage_plugin_deps'] = ['ceph', 'glusterfs', 'iscsi'] else: facts['node']['storage_plugin_deps'] = [] @@ -703,7 +703,7 @@ def set_deployment_facts_if_unset(facts): def set_version_facts_if_unset(facts): """ Set version facts. This currently includes common.version and - common.version_greater_than_3_1_or_1_1. + common.version_gte_3_1_or_1_1. Args: facts (dict): existing facts @@ -715,16 +715,20 @@ def set_version_facts_if_unset(facts): facts['common']['version'] = version = get_openshift_version() if version is not None: if deployment_type == 'origin': - version_gt_3_1_or_1_1 = LooseVersion(version) > LooseVersion('1.0.6') - version_gt_3_1_1_or_1_1_1 = LooseVersion(version) > LooseVersion('1.1.1') + version_gte_3_1_or_1_1 = LooseVersion(version) >= LooseVersion('1.1.0') + version_gte_3_1_1_or_1_1_1 = LooseVersion(version) >= LooseVersion('1.1.1') + version_gte_3_2_or_1_2 = LooseVersion(version) >= LooseVersion('1.1.2') else: - version_gt_3_1_or_1_1 = LooseVersion(version) > LooseVersion('3.0.2.900') - version_gt_3_1_1_or_1_1_1 = LooseVersion(version) > LooseVersion('3.1.1') + version_gte_3_1_or_1_1 = LooseVersion(version) >= LooseVersion('3.1.0') + version_gte_3_1_1_or_1_1_1 = LooseVersion(version) >= LooseVersion('3.1.1') + version_gte_3_2_or_1_2 = LooseVersion(version) >= LooseVersion('3.2.0') else: - version_gt_3_1_or_1_1 = True - version_gt_3_1_1_or_1_1_1 = True - facts['common']['version_greater_than_3_1_or_1_1'] = version_gt_3_1_or_1_1 - facts['common']['version_greater_than_3_1_1_or_1_1_1'] = version_gt_3_1_1_or_1_1_1 + version_gte_3_1_or_1_1 = True + version_gte_3_1_1_or_1_1_1 = True + version_gte_3_2_or_1_2 = True + facts['common']['version_gte_3_1_or_1_1'] = version_gte_3_1_or_1_1 + facts['common']['version_gte_3_1_1_or_1_1_1'] = version_gte_3_1_1_or_1_1_1 + facts['common']['version_gte_3_2_or_1_2'] = version_gte_3_2_or_1_2 return facts @@ -739,12 +743,12 @@ def set_manageiq_facts_if_unset(facts): OpenShiftFactsInternalError: """ if 'common' not in facts: - if 'version_greater_than_3_1_or_1_1' not in facts['common']: + if 'version_gte_3_1_or_1_1' not in facts['common']: raise OpenShiftFactsInternalError( "Invalid invocation: The required facts are not set" ) if 'use_manageiq' not in facts['common']: - facts['common']['use_manageiq'] = facts['common']['version_greater_than_3_1_or_1_1'] + facts['common']['use_manageiq'] = facts['common']['version_gte_3_1_or_1_1'] return facts @@ -940,12 +944,12 @@ def merge_facts(orig, new, additive_facts_to_overwrite): # Fact is additive so we'll combine orig and new. if isinstance(value, list) and isinstance(new[key], list): new_fact = [] - for item in copy.deepcopy(value) + copy.copy(new[key]): + for item in copy.deepcopy(value) + copy.deepcopy(new[key]): if item not in new_fact: new_fact.append(item) facts[key] = new_fact else: - facts[key] = copy.copy(new[key]) + facts[key] = copy.deepcopy(new[key]) else: facts[key] = copy.deepcopy(value) new_keys = set(new.keys()) - set(orig.keys()) @@ -1053,6 +1057,28 @@ def set_container_facts_if_unset(facts): return facts +def set_installed_variant_rpm_facts(facts): + """ Set RPM facts of installed variant + Args: + facts (dict): existing facts + Returns: + dict: the facts dict updated with installed_variant_rpms + """ + installed_rpms = [] + for base_rpm in ['openshift', 'atomic-openshift', 'origin']: + optional_rpms = ['master', 'node', 'clients', 'sdn-ovs'] + variant_rpms = [base_rpm] + \ + ['{0}-{1}'.format(base_rpm, r) for r in optional_rpms] + \ + ['tuned-profiles-%s-node' % base_rpm] + for rpm in variant_rpms: + exit_code, _, _ = module.run_command(['rpm', '-q', rpm]) + if exit_code == 0: + installed_rpms.append(rpm) + + facts['common']['installed_variant_rpms'] = installed_rpms + return facts + + class OpenShiftFactsInternalError(Exception): """Origin Facts Error""" @@ -1091,9 +1117,11 @@ class OpenShiftFacts(object): Raises: OpenShiftFactsUnsupportedRoleError: """ - known_roles = ['common', 'master', 'node', 'etcd', 'nfs'] + known_roles = ['common', 'master', 'node', 'etcd', 'hosted'] - def __init__(self, role, filename, local_facts, additive_facts_to_overwrite=False): + # Disabling too-many-arguments, this should be cleaned up as a TODO item. + # pylint: disable=too-many-arguments + def __init__(self, role, filename, local_facts, additive_facts_to_overwrite=False, openshift_env=None): self.changed = False self.filename = filename if role not in self.known_roles: @@ -1102,9 +1130,9 @@ class OpenShiftFacts(object): ) self.role = role self.system_facts = ansible_facts(module) - self.facts = self.generate_facts(local_facts, additive_facts_to_overwrite) + self.facts = self.generate_facts(local_facts, additive_facts_to_overwrite, openshift_env) - def generate_facts(self, local_facts, additive_facts_to_overwrite): + def generate_facts(self, local_facts, additive_facts_to_overwrite, openshift_env): """ Generate facts Args: @@ -1116,7 +1144,7 @@ class OpenShiftFacts(object): Returns: dict: The generated facts """ - local_facts = self.init_local_facts(local_facts, additive_facts_to_overwrite) + local_facts = self.init_local_facts(local_facts, additive_facts_to_overwrite, openshift_env) roles = local_facts.keys() defaults = self.get_defaults(roles) @@ -1139,6 +1167,8 @@ class OpenShiftFacts(object): facts = set_aggregate_facts(facts) facts = set_etcd_facts_if_unset(facts) facts = set_container_facts_if_unset(facts) + if not facts['common']['is_containerized']: + facts = set_installed_variant_rpm_facts(facts) return dict(openshift=facts) def get_defaults(self, roles): @@ -1187,10 +1217,23 @@ class OpenShiftFacts(object): iptables_sync_period='5s', set_node_ip=False) defaults['node'] = node - if 'nfs' in roles: - nfs = dict(exports_dir='/var/export', registry_volume='regvol', - export_options='*(rw,sync,all_squash)') - defaults['nfs'] = nfs + defaults['hosted'] = dict( + registry=dict( + storage=dict( + kind=None, + volume=dict( + name='registry', + size='5Gi' + ), + nfs=dict( + directory='/exports', + options='*(rw,root_squash)'), + host=None, + access_modes=['ReadWriteMany'], + create_pv=True + ) + ) + ) return defaults @@ -1269,7 +1312,9 @@ class OpenShiftFacts(object): ) return provider_facts - def init_local_facts(self, facts=None, additive_facts_to_overwrite=False): + # Disabling too-many-branches. This should be cleaned up as a TODO item. + #pylint: disable=too-many-branches + def init_local_facts(self, facts=None, additive_facts_to_overwrite=False, openshift_env=None): """ Initialize the provider facts Args: @@ -1282,10 +1327,27 @@ class OpenShiftFacts(object): local facts """ changed = False - facts_to_set = {self.role: dict()} + + facts_to_set = dict() + if facts is not None: facts_to_set[self.role] = facts + if openshift_env != {} and openshift_env != None: + for fact, value in openshift_env.iteritems(): + oo_env_facts = dict() + current_level = oo_env_facts + keys = fact.split('_')[1:] + if keys[0] != self.role: + continue + for key in keys: + if key == keys[-1]: + current_level[key] = value + elif key not in current_level: + current_level[key] = dict() + current_level = current_level[key] + facts_to_set = merge_facts(facts_to_set, oo_env_facts, []) + local_facts = get_local_facts_from_file(self.filename) for arg in ['labels', 'annotations']: @@ -1296,11 +1358,12 @@ class OpenShiftFacts(object): new_local_facts = merge_facts(local_facts, facts_to_set, additive_facts_to_overwrite) for facts in new_local_facts.values(): keys_to_delete = [] - for fact, value in facts.iteritems(): - if value == "" or value is None: - keys_to_delete.append(fact) - for key in keys_to_delete: - del facts[key] + if isinstance(facts, dict): + for fact, value in facts.iteritems(): + if value == "" or value is None: + keys_to_delete.append(fact) + for key in keys_to_delete: + del facts[key] if new_local_facts != local_facts: self.validate_local_facts(new_local_facts) @@ -1388,6 +1451,7 @@ def main(): choices=OpenShiftFacts.known_roles), local_facts=dict(default=None, type='dict', required=False), additive_facts_to_overwrite=dict(default=[], type='list', required=False), + openshift_env=dict(default={}, type='dict', required=False) ), supports_check_mode=True, add_file_common_args=True, @@ -1396,9 +1460,15 @@ def main(): role = module.params['role'] local_facts = module.params['local_facts'] additive_facts_to_overwrite = module.params['additive_facts_to_overwrite'] + openshift_env = module.params['openshift_env'] + fact_file = '/etc/ansible/facts.d/openshift.fact' - openshift_facts = OpenShiftFacts(role, fact_file, local_facts, additive_facts_to_overwrite) + openshift_facts = OpenShiftFacts(role, + fact_file, + local_facts, + additive_facts_to_overwrite, + openshift_env) file_params = module.params.copy() file_params['path'] = fact_file diff --git a/roles/openshift_manageiq/tasks/main.yaml b/roles/openshift_manageiq/tasks/main.yaml index 0357fc85a..d2ff1b4b7 100644 --- a/roles/openshift_manageiq/tasks/main.yaml +++ b/roles/openshift_manageiq/tasks/main.yaml @@ -1,7 +1,7 @@ --- - fail: msg: "The openshift_manageiq role requires OpenShift Enterprise 3.1 or Origin 1.1." - when: not openshift.common.version_greater_than_3_1_or_1_1 | bool + when: not openshift.common.version_gte_3_1_or_1_1 | bool - name: Copy Configuration to temporary conf command: > @@ -9,7 +9,7 @@ changed_when: false - name: Add Managment Infrastructure project - command: > + command: > {{ openshift.common.admin_binary }} new-project management-infra --description="Management Infrastructure" @@ -20,9 +20,9 @@ - name: Create Service Account shell: > - echo {{ manageiq_service_account | to_json | quote }} | - {{ openshift.common.client_binary }} create - -n management-infra + echo {{ manageiq_service_account | to_json | quote }} | + {{ openshift.common.client_binary }} create + -n management-infra --config={{manage_iq_tmp_conf}} -f - register: osmiq_create_service_account @@ -32,7 +32,7 @@ - name: Create Cluster Role shell: > echo {{ manageiq_cluster_role | to_json | quote }} | - {{ openshift.common.client_binary }} create + {{ openshift.common.client_binary }} create --config={{manage_iq_tmp_conf}} -f - register: osmiq_create_cluster_role diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index aa5e593b6..3613808b6 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -15,7 +15,7 @@ when: openshift_master_ha | bool and ((openshift_master_cluster_method is not defined) or (openshift_master_cluster_method is defined and openshift_master_cluster_method not in ["native", "pacemaker"])) - fail: msg: "'native' high availability is not supported for the requested OpenShift version" - when: openshift_master_ha | bool and openshift_master_cluster_method == "native" and not openshift.common.version_greater_than_3_1_or_1_1 | bool + when: openshift_master_ha | bool and openshift_master_cluster_method == "native" and not openshift.common.version_gte_3_1_or_1_1 | bool - fail: msg: "openshift_master_cluster_password must be set for multi-master installations" when: openshift_master_ha | bool and openshift_master_cluster_method == "pacemaker" and (openshift_master_cluster_password is not defined or not openshift_master_cluster_password) @@ -285,6 +285,10 @@ master_service_status_changed: "{{ start_result | changed }}" when: not openshift_master_ha | bool +- name: Mask master service + command: systemctl mask {{ openshift.common.service_type }}-master + when: openshift_master_ha | bool and openshift.master.cluster_method == 'native' and not openshift.common.is_atomic | bool + - name: Start and enable master api service: name={{ openshift.common.service_type }}-master-api enabled=yes state=started when: openshift_master_ha | bool and openshift.master.cluster_method == 'native' diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2 index 1eeab46fe..813a58d60 100644 --- a/roles/openshift_master/templates/master.yaml.v1.j2 +++ b/roles/openshift_master/templates/master.yaml.v1.j2 @@ -1,5 +1,5 @@ apiLevels: -{% if not openshift.common.version_greater_than_3_1_or_1_1 | bool %} +{% if not openshift.common.version_gte_3_1_or_1_1 | bool %} - v1beta3 {% endif %} - v1 @@ -91,7 +91,7 @@ kubeletClientInfo: port: 10250 {% if openshift.master.embedded_kube | bool %} kubernetesMasterConfig: -{% if not openshift.common.version_greater_than_3_1_or_1_1 | bool %} +{% if not openshift.common.version_gte_3_1_or_1_1 | bool %} apiLevels: - v1beta3 - v1 diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2 index e6e97b24f..b393bb9ff 100644 --- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2 +++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2 @@ -7,7 +7,7 @@ Before={{ openshift.common.service_type }}-node.service Requires=network.target [Service] -{% if openshift.common.version_greater_than_3_1_1_or_1_1_1 | bool %} +{% if openshift.common.version_gte_3_1_1_or_1_1_1 | bool %} Type=notify {% else %} Type=simple diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml index 36d953111..7c58e943a 100644 --- a/roles/openshift_master_certificates/tasks/main.yml +++ b/roles/openshift_master_certificates/tasks/main.yml @@ -39,7 +39,7 @@ state: hard with_nested: - masters_needing_certs - - "{{ master_certificates | union(master_31_certificates) if openshift.common.version_greater_than_3_1_or_1_1 | bool else master_certificates }}" + - "{{ master_certificates | union(master_31_certificates) if openshift.common.version_gte_3_1_or_1_1 | bool else master_certificates }}" - name: Create the master certificates if they do not already exist command: > diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index 9035248f9..acf2f74e3 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -17,7 +17,7 @@ # TODO: Replace this with a lookup or filter plugin. dns_ip: "{{ openshift_dns_ip | default(openshift_master_cluster_vip - | default(None if openshift.common.version_greater_than_3_1_or_1_1 | bool else openshift_node_first_master_ip | default(None, true), true), true) }}" + | default(None if openshift.common.version_gte_3_1_or_1_1 | bool else openshift_node_first_master_ip | default(None, true), true), true) }}" - role: node local_facts: annotations: "{{ openshift_node_annotations | default(none) }}" @@ -33,6 +33,7 @@ set_node_ip: "{{ openshift_set_node_ip | default(None) }}" node_image: "{{ osn_image | default(None) }}" ovs_image: "{{ osn_ovs_image | default(None) }}" + proxy_mode: "{{ openshift_node_proxy_mode | default('iptables') }}" # We have to add tuned-profiles in the same transaction otherwise we run into depsolving # problems because the rpms don't pin the version properly. This was fixed in 3.1 packaging. diff --git a/roles/openshift_node/tasks/storage_plugins/iscsi.yml b/roles/openshift_node/tasks/storage_plugins/iscsi.yml new file mode 100644 index 000000000..d6684b34a --- /dev/null +++ b/roles/openshift_node/tasks/storage_plugins/iscsi.yml @@ -0,0 +1,4 @@ +--- +- name: Install iSCSI storage plugin dependencies + action: "{{ ansible_pkg_mgr }} name=iscsi-initiator-utils state=present" + when: not openshift.common.is_atomic | bool diff --git a/roles/openshift_node/tasks/storage_plugins/main.yml b/roles/openshift_node/tasks/storage_plugins/main.yml index 39c7b9390..fe638718d 100644 --- a/roles/openshift_node/tasks/storage_plugins/main.yml +++ b/roles/openshift_node/tasks/storage_plugins/main.yml @@ -11,3 +11,7 @@ - name: Ceph storage plugin configuration include: ceph.yml when: "'ceph' in openshift.node.storage_plugin_deps" + +- name: iSCSI storage plugin configuration + include: iscsi.yml + when: "'iscsi' in openshift.node.storage_plugin_deps" diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2 index 44065f4bd..67975d372 100644 --- a/roles/openshift_node/templates/node.yaml.v1.j2 +++ b/roles/openshift_node/templates/node.yaml.v1.j2 @@ -35,3 +35,6 @@ servingInfo: keyFile: server.key volumeDirectory: {{ openshift.common.data_dir }}/openshift.local.volumes {% include 'partials/kubeletArguments.j2' %} +proxyArguments: + proxy-mode: + - {{ openshift.node.proxy_mode }} diff --git a/roles/openshift_persistent_volumes/README.md b/roles/openshift_persistent_volumes/README.md new file mode 100644 index 000000000..34ae89536 --- /dev/null +++ b/roles/openshift_persistent_volumes/README.md @@ -0,0 +1,60 @@ +OpenShift NFS Server +==================== + +OpenShift Persistent Volumes + +Requirements +------------ + +Role Variables +-------------- + +From this role: +| Name | Default value | | +|--------------------------|---------------|-------------------------------------------------------------------------------------| +| persistent_volumes | [] | List of persistent volume dictionaries, keys: name, capacity, access_modes, storage | +| persistent_volume_claims | [] | List of persistent volume claim dictionaries, keys: name, capacity, access_modes | + + +From openshift_common: +| Name | Default Value | | +|-------------------------------|----------------|----------------------------------------| +| openshift_debug_level | 2 | Global openshift debug log verbosity | + + +Dependencies +------------ + + +Example Playbook +---------------- + +- name: Create persistent volumes/claims + hosts: oo_first_master + vars: + persistent_volumes: + - name: "registry-volume" + capacity: "5Gi" + access_modes: + - "ReadWriteMany" + storage: + nfs: + server: "nfs.example.com" + path: "/var/exports/registry" + persistent_volume_claims: + - name: "registry-claim" + capacity: "5Gi" + access_modes: + - "ReadWriteMany" + roles: + - role: openshift_persistent_volumes + +License +------- + +Apache License, Version 2.0 + +Author Information +------------------ + +Andrew Butcher (abutcher@redhat.com) diff --git a/roles/openshift_persistent_volumes/meta/main.yml b/roles/openshift_persistent_volumes/meta/main.yml new file mode 100644 index 000000000..d9f6fc01a --- /dev/null +++ b/roles/openshift_persistent_volumes/meta/main.yml @@ -0,0 +1,13 @@ +--- +galaxy_info: + author: Andrew Butcher + description: OpenShift Persistent Volumes + company: Red Hat, Inc. + license: Apache License, Version 2.0 + min_ansible_version: 1.9 + platforms: + - name: EL + versions: + - 7 +dependencies: +- { role: openshift_common } diff --git a/roles/openshift_persistent_volumes/tasks/main.yml b/roles/openshift_persistent_volumes/tasks/main.yml new file mode 100644 index 000000000..2455fc792 --- /dev/null +++ b/roles/openshift_persistent_volumes/tasks/main.yml @@ -0,0 +1,50 @@ +--- +- name: Create temp directory for volume definitions + command: mktemp -d /tmp/openshift-ansible-XXXXXXX + register: mktemp + changed_when: False + +- name: Copy the admin client config(s) + command: > + cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig + changed_when: False + +- name: Deploy PersistentVolume definitions + template: + dest: "{{ mktemp.stdout }}/persistent-volumes.yml" + src: persistent-volume.yml.j2 + when: persistent_volumes | length > 0 + changed_when: False + +- name: Create PersistentVolumes + command: > + {{ openshift.common.client_binary }} create + -f {{ mktemp.stdout }}/persistent-volumes.yml + --config={{ mktemp.stdout }}/admin.kubeconfig + register: pv_create_output + when: persistent_volumes | length > 0 + failed_when: ('already exists' not in pv_create_output.stderr if pv_create_output.stderr else False) or ('created' not in pv_create_output.stdout if pv_create_output.stdout else False) + changed_when: ('created' in pv_create_output.stdout) + +- name: Deploy PersistentVolumeClaim definitions + template: + dest: "{{ mktemp.stdout }}/persistent-volume-claims.yml" + src: persistent-volume-claim.yml.j2 + when: persistent_volume_claims | length > 0 + changed_when: False + +- name: Create PersistentVolumeClaims + command: > + {{ openshift.common.client_binary }} create + -f {{ mktemp.stdout }}/persistent-volume-claims.yml + --config={{ mktemp.stdout }}/admin.kubeconfig + register: pvc_create_output + when: persistent_volume_claims | length > 0 + failed_when: ('already exists' not in pvc_create_output.stderr if pvc_create_output.stderr else False) or ('created' not in pvc_create_output.stdout if pvc_create_output.stdout else False) + changed_when: ('created' in pvc_create_output.stdout) + +- name: Delete temp directory + file: + name: "{{ mktemp.stdout }}" + state: absent + changed_when: False diff --git a/roles/openshift_persistent_volumes/templates/persistent-volume-claim.yml.j2 b/roles/openshift_persistent_volumes/templates/persistent-volume-claim.yml.j2 new file mode 100644 index 000000000..d40417a9a --- /dev/null +++ b/roles/openshift_persistent_volumes/templates/persistent-volume-claim.yml.j2 @@ -0,0 +1,15 @@ +--- +apiVersion: "v1" +kind: "List" +items: +{% for claim in persistent_volume_claims %} +- apiVersion: "v1" + kind: "PersistentVolumeClaim" + metadata: + name: "{{ claim.name }}" + spec: + accessModes: {{ claim.access_modes | to_padded_yaml(2, 2) }} + resources: + requests: + storage: "{{ claim.capacity }}" +{% endfor %} diff --git a/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2 b/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2 new file mode 100644 index 000000000..877e88002 --- /dev/null +++ b/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2 @@ -0,0 +1,15 @@ +--- +apiVersion: v1 +kind: List +items: +{% for volume in persistent_volumes %} +- apiVersion: v1 + kind: PersistentVolume + metadata: + name: "{{ volume.name }}" + spec: + capacity: + storage: "{{ volume.capacity }}" + accessModes: {{ volume.access_modes | to_padded_yaml(2, 2) }} + {{ volume.storage.keys()[0] }}: {{ volume.storage[volume.storage.keys()[0]] | to_padded_yaml(3, 2) }} +{% endfor %} diff --git a/roles/openshift_persistent_volumes/vars/main.yml b/roles/openshift_persistent_volumes/vars/main.yml new file mode 100644 index 000000000..9967e26f4 --- /dev/null +++ b/roles/openshift_persistent_volumes/vars/main.yml @@ -0,0 +1,2 @@ +--- +openshift_master_config_dir: "{{ openshift.common.config_base }}/master" diff --git a/roles/openshift_registry/README.md b/roles/openshift_registry/README.md index 8e66c483b..247272668 100644 --- a/roles/openshift_registry/README.md +++ b/roles/openshift_registry/README.md @@ -17,12 +17,6 @@ From this role: |--------------------|-------------------------------------------------------|---------------------| | | | | -From openshift_common: - -| Name | Default value | | -|-----------------------|---------------|--------------------------------------| -| openshift_debug_level | 2 | Global openshift debug log verbosity | - Dependencies ------------ diff --git a/roles/openshift_registry/defaults/main.yml b/roles/openshift_registry/defaults/main.yml new file mode 100644 index 000000000..17a0d5301 --- /dev/null +++ b/roles/openshift_registry/defaults/main.yml @@ -0,0 +1,2 @@ +--- +registry_volume_claim: 'registry-claim' diff --git a/roles/openshift_registry/meta/main.yml b/roles/openshift_registry/meta/main.yml index 93b6797d1..b220a020e 100644 --- a/roles/openshift_registry/meta/main.yml +++ b/roles/openshift_registry/meta/main.yml @@ -4,10 +4,12 @@ galaxy_info: description: OpenShift Embedded Docker Registry company: Red Hat, Inc. license: Apache License, Version 2.0 - min_ansible_version: 1.7 + min_ansible_version: 1.9 platforms: - name: EL versions: - 7 categories: - cloud + dependencies: + - openshift_facts diff --git a/roles/openshift_registry/tasks/main.yml b/roles/openshift_registry/tasks/main.yml index 2804e8f2e..1eeec2fbb 100644 --- a/roles/openshift_registry/tasks/main.yml +++ b/roles/openshift_registry/tasks/main.yml @@ -1,28 +1,24 @@ --- -- set_fact: _oreg_images="--images='{{ openshift.master.registry_url }}'" - -- set_fact: _oreg_selector="--selector='{{ openshift.master.registry_selector }}'" - - name: Deploy OpenShift Registry command: > {{ openshift.common.admin_binary }} registry - --create --service-account=registry {{ _oreg_selector }} - --credentials={{ openshift_master_config_dir }}/openshift-registry.kubeconfig {{ _oreg_images }} - register: _oreg_results - changed_when: "'service exists' not in _oreg_results.stdout" + --create --replicas={{ openshift.master.infra_nodes | length }} + --service-account=registry {{ oreg_selector }} + --credentials={{ openshift_master_config_dir }}/openshift-registry.kubeconfig {{ oreg_images }} + register: oreg_results + changed_when: "'service exists' not in oreg_results.stdout" -- name: Determine if nfs volume is already attached +- name: Determine if volume is already attached to dc/docker-registry command: "{{ openshift.common.client_binary }} get -o template dc/docker-registry --template=\\{\\{.spec.template.spec.volumes\\}\\}" + changed_when: false register: registry_volumes_output - when: attach_registry_volume | bool - set_fact: - volume_already_attached: "{{ 'server:' + nfs_host in registry_volumes_output.stdout and 'path:' + registry_volume_path in registry_volumes_output.stdout }}" - when: attach_registry_volume | bool + volume_attached: "{{ registry_volume_claim in registry_volumes_output.stdout }}" -- name: Add nfs volume to dc/docker-registry +- name: Add volume to dc/docker-registry command: > {{ openshift.common.client_binary }} volume dc/docker-registry - --add --overwrite --name=registry-storage --mount-path=/registry - --source='{"nfs": {"server": "{{ nfs_host }}", "path": "{{ registry_volume_path }}"}}' - when: attach_registry_volume | bool and not volume_already_attached | bool + --add --overwrite -t persistentVolumeClaim --claim-name={{ registry_volume_claim }} + --name=registry-storage + when: not volume_attached | bool diff --git a/roles/openshift_registry/vars/main.yml b/roles/openshift_registry/vars/main.yml index 9967e26f4..306350a5a 100644 --- a/roles/openshift_registry/vars/main.yml +++ b/roles/openshift_registry/vars/main.yml @@ -1,2 +1,4 @@ --- openshift_master_config_dir: "{{ openshift.common.config_base }}/master" +oreg_images: "--images='{{ openshift.master.registry_url }}'" +oreg_selector: "--selector='{{ openshift.master.registry_selector }}'" diff --git a/roles/openshift_router/README.md b/roles/openshift_router/README.md index 836efc443..d490e1038 100644 --- a/roles/openshift_router/README.md +++ b/roles/openshift_router/README.md @@ -16,11 +16,6 @@ From this role: |--------------------|-------------------------------------------------------|---------------------| | | | | -From openshift_common: -| Name | Default value | | -|-----------------------|---------------|--------------------------------------| -| openshift_debug_level | 2 | Global openshift debug log verbosity | - Dependencies ------------ diff --git a/roles/openshift_router/meta/main.yml b/roles/openshift_router/meta/main.yml index 0471e5e14..c2b0777b5 100644 --- a/roles/openshift_router/meta/main.yml +++ b/roles/openshift_router/meta/main.yml @@ -4,10 +4,12 @@ galaxy_info: description: OpenShift Embedded Router company: Red Hat, Inc. license: Apache License, Version 2.0 - min_ansible_version: 1.7 + min_ansible_version: 1.9 platforms: - name: EL versions: - 7 categories: - cloud + dependencies: + - openshift_facts diff --git a/roles/openshift_router/tasks/main.yml b/roles/openshift_router/tasks/main.yml index 355cbf84b..40365d04d 100644 --- a/roles/openshift_router/tasks/main.yml +++ b/roles/openshift_router/tasks/main.yml @@ -3,6 +3,7 @@ command: > {{ openshift.common.admin_binary }} router --create --replicas={{ openshift.master.infra_nodes | length }} + --namespace=default --service-account=router {{ ortr_selector }} --credentials={{ openshift_master_config_dir }}/openshift-router.kubeconfig {{ ortr_images }} register: ortr_results diff --git a/roles/openshift_serviceaccounts/meta/main.yml b/roles/openshift_serviceaccounts/meta/main.yml new file mode 100644 index 000000000..a2c9fee70 --- /dev/null +++ b/roles/openshift_serviceaccounts/meta/main.yml @@ -0,0 +1,15 @@ +--- +galaxy_info: + author: OpenShift Operations + description: OpenShift Service Accounts + company: Red Hat, Inc. + license: Apache License, Version 2.0 + min_ansible_version: 1.9 + platforms: + - name: EL + versions: + - 7 + categories: + - cloud +dependencies: +- { role: openshift_facts } diff --git a/roles/openshift_serviceaccounts/tasks/main.yml b/roles/openshift_serviceaccounts/tasks/main.yml index 4c7faa6fe..5fe7d28f3 100644 --- a/roles/openshift_serviceaccounts/tasks/main.yml +++ b/roles/openshift_serviceaccounts/tasks/main.yml @@ -1,36 +1,33 @@ -- name: tmp dir for openshift - file: - path: /tmp/openshift - state: directory - owner: root - mode: 700 - -- name: Create service account configs - template: - src: serviceaccount.j2 - dest: "/tmp/openshift/{{ item }}-serviceaccount.yaml" - with_items: accounts - -- name: Create {{ item }} service account +- name: test if service accounts exists command: > - {{ openshift.common.client_binary }} create -f "/tmp/openshift/{{ item }}-serviceaccount.yaml" - with_items: accounts - register: _sa_result - failed_when: "'serviceaccounts \"{{ item }}\" already exists' not in _sa_result.stderr and _sa_result.rc != 0" - changed_when: "'serviceaccounts \"{{ item }}\" already exists' not in _sa_result.stderr and _sa_result.rc == 0" + {{ openshift.common.client_binary }} get sa {{ item }} -n {{ openshift_serviceaccounts_namespace }} + with_items: openshift_serviceaccounts_names + failed_when: false + changed_when: false + register: account_test -- name: Get current security context constraints +- name: create the service account shell: > - {{ openshift.common.client_binary }} get scc privileged -o yaml - --output-version=v1 > /tmp/openshift/scc.yaml - changed_when: false + echo {{ lookup('template', '../templates/serviceaccount.j2') + | from_yaml | to_json | quote }} | {{ openshift.common.client_binary }} create -f - + when: item.1.rc != 0 + with_together: + - openshift_serviceaccounts_names + - account_test.results -- name: Add security context constraint for {{ item }} - lineinfile: - dest: /tmp/openshift/scc.yaml - line: "- system:serviceaccount:default:{{ item }}" - insertafter: "^users:$" - with_items: accounts +- name: test if scc needs to be updated + command: > + {{ openshift.common.client_binary }} get scc {{ item }} -o yaml + changed_when: false + failed_when: false + register: scc_test + with_items: openshift_serviceaccounts_sccs -- name: Apply new scc rules for service accounts - command: "{{ openshift.common.client_binary }} update -f /tmp/openshift/scc.yaml --api-version=v1" +- name: Grant the user access to the privileged scc + command: > + {{ openshift.common.admin_binary }} policy add-scc-to-user + privileged system:serviceaccount:{{ openshift_serviceaccounts_namespace }}:{{ item.0 }} + when: "item.1.rc == 0 and 'system:serviceaccount:{{ openshift_serviceaccounts_namespace }}:{{ item.0 }}' not in {{ (item.1.stdout | from_yaml).users }}" + with_nested: + - openshift_serviceaccounts_names + - scc_test.results diff --git a/roles/openshift_serviceaccounts/templates/serviceaccount.j2 b/roles/openshift_serviceaccounts/templates/serviceaccount.j2 index 931e249f9..c5f12421f 100644 --- a/roles/openshift_serviceaccounts/templates/serviceaccount.j2 +++ b/roles/openshift_serviceaccounts/templates/serviceaccount.j2 @@ -1,4 +1,4 @@ apiVersion: v1 kind: ServiceAccount metadata: - name: {{ item }} + name: {{ item.0 }} diff --git a/roles/openshift_storage_nfs/README.md b/roles/openshift_storage_nfs/README.md index 548e146cb..dd988b849 100644 --- a/roles/openshift_storage_nfs/README.md +++ b/roles/openshift_storage_nfs/README.md @@ -15,11 +15,11 @@ Role Variables -------------- From this role: -| Name | Default value | | -|-------------------------------|-----------------------|--------------------------------------------------| -| openshift_nfs_exports_dir | /var/export | Root export directory. | -| openshift_nfs_registry_volume | regvol | Registry volume within openshift_nfs_exports_dir | -| openshift_nfs_export_options | *(rw,sync,all_squash) | NFS options for configured exports. | +| Name | Default value | | +|-------------------------------------------------|-----------------------|-------------------------------------------------------------| +| openshift_hosted_registry_storage_nfs_directory | /exports | Root export directory. | +| openshift_hosted_registry_storage_volume_name | registry | Registry volume within openshift_hosted_registry_volume_dir | +| openshift_hosted_registry_storage_nfs_options | *(rw,root_squash) | NFS options for configured exports. | From openshift_common: @@ -31,8 +31,6 @@ From openshift_common: Dependencies ------------ - - Example Playbook ---------------- diff --git a/roles/openshift_storage_nfs/defaults/main.yml b/roles/openshift_storage_nfs/defaults/main.yml index e25062c00..5f6893129 100644 --- a/roles/openshift_storage_nfs/defaults/main.yml +++ b/roles/openshift_storage_nfs/defaults/main.yml @@ -1,7 +1,13 @@ --- -exports_dir: /var/export -registry_volume: regvol -export_options: '*(rw,sync,all_squash)' +openshift: + hosted: + registry: + storage: + nfs: + directory: "/exports" + options: "*(rw,root_squash)" + volume: + name: "registry" os_firewall_use_firewalld: False os_firewall_allow: - service: nfs diff --git a/roles/openshift_storage_nfs/tasks/main.yml b/roles/openshift_storage_nfs/tasks/main.yml index 64b121ade..fdd7bd3f1 100644 --- a/roles/openshift_storage_nfs/tasks/main.yml +++ b/roles/openshift_storage_nfs/tasks/main.yml @@ -1,31 +1,34 @@ --- -- name: Set nfs facts - openshift_facts: - role: nfs - local_facts: - exports_dir: "{{ openshift_nfs_exports_dir | default(None) }}" - export_options: "{{ openshift_nfs_export_options | default(None) }}" - registry_volume: "{{ openshift_nfs_registry_volume | default(None) }}" - - name: Install nfs-utils yum: pkg: nfs-utils state: present +- name: Configure NFS + lineinfile: + dest: /etc/sysconfig/nfs + regexp: '^RPCNFSDARGS=.*$' + line: 'RPCNFSDARGS="-N 2 -N 3"' + register: nfs_config + +- name: Restart nfs-config + service: name=nfs-config state=restarted + when: nfs_config | changed + - name: Ensure exports directory exists file: - path: "{{ openshift.nfs.exports_dir }}" + path: "{{ openshift.hosted.registry.storage.nfs.directory }}" state: directory - name: Ensure export directories exist file: - path: "{{ openshift.nfs.exports_dir }}/{{ item }}" + path: "{{ openshift.hosted.registry.storage.nfs.directory }}/{{ item }}" state: directory mode: 0777 owner: nfsnobody group: nfsnobody with_items: - - "{{ openshift.nfs.registry_volume }}" + - "{{ openshift.hosted.registry.storage.volume.name }}" - name: Configure exports template: @@ -44,6 +47,4 @@ - nfs-server - set_fact: - nfs_service_status_changed: "{{ True in (start_result.results - | map(attribute='changed') - | list) }}" + nfs_service_status_changed: "{{ start_result | changed }}" diff --git a/roles/openshift_storage_nfs/templates/exports.j2 b/roles/openshift_storage_nfs/templates/exports.j2 index 702473040..c1e1994b0 100644 --- a/roles/openshift_storage_nfs/templates/exports.j2 +++ b/roles/openshift_storage_nfs/templates/exports.j2 @@ -1 +1 @@ -{{ openshift.nfs.exports_dir }}/{{ openshift.nfs.registry_volume }} {{ openshift.nfs.export_options }} +{{ openshift.hosted.registry.storage.nfs.directory }}/{{ openshift.hosted.registry.storage.volume.name }} {{ openshift.hosted.registry.storage.nfs.options }} diff --git a/roles/os_firewall/tasks/firewall/iptables.yml b/roles/os_firewall/tasks/firewall/iptables.yml index 5cf4bf7af..3b584f8eb 100644 --- a/roles/os_firewall/tasks/firewall/iptables.yml +++ b/roles/os_firewall/tasks/firewall/iptables.yml @@ -1,12 +1,4 @@ --- -- name: Install iptables packages - action: "{{ ansible_pkg_mgr }} name={{ item }} state=present" - with_items: - - iptables - - iptables-services - register: install_result - when: not openshift.common.is_atomic | bool - - name: Check if firewalld is installed command: rpm -q firewalld register: pkg_check @@ -20,6 +12,22 @@ enabled: no when: pkg_check.rc == 0 +# TODO: submit PR upstream to add mask/unmask to service module +- name: Mask firewalld service + command: systemctl mask firewalld + register: result + changed_when: "'firewalld' in result.stdout" + when: pkg_check.rc == 0 + ignore_errors: yes + +- name: Install iptables packages + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present" + with_items: + - iptables + - iptables-services + register: install_result + when: not openshift.common.is_atomic | bool + - name: Reload systemd units command: systemctl daemon-reload when: install_result | changed @@ -35,14 +43,6 @@ pause: seconds=10 when: result | changed -# TODO: submit PR upstream to add mask/unmask to service module -- name: Mask firewalld service - command: systemctl mask firewalld - register: result - changed_when: "'firewalld' in result.stdout" - when: pkg_check.rc == 0 - ignore_errors: yes - - name: Add iptables allow rules os_firewall_manage_iptables: name: "{{ item.service }}" diff --git a/roles/os_zabbix/vars/template_openshift_master.yml b/roles/os_zabbix/vars/template_openshift_master.yml index 9d20eb012..1824d7881 100644 --- a/roles/os_zabbix/vars/template_openshift_master.yml +++ b/roles/os_zabbix/vars/template_openshift_master.yml @@ -2,13 +2,13 @@ g_template_openshift_master: name: Template Openshift Master zitems: - - name: create_app + - name: openshift.master.app.create applications: - Openshift Master - key: create_app + key: openshift.master.app.create - - key: openshift.master.registry.healthz - description: "Shows the health status of the cluster's docker registry" + - key: openshift.master.registry.healthy_pct + description: "Shows the percentage of healthy registries in the cluster" type: int applications: - Openshift Master @@ -201,6 +201,18 @@ g_template_openshift_master: applications: - Openshift Master Metrics + - key: openshift.master.nodesnotready.count + description: "This check shows how many nodes in a cluster are in NotReady state." + type: int + applications: + - Openshift Master + + - key: openshift.master.nodesnotschedulable.count + description: "This check shows how many nodes in a cluster are not schedulable." + type: int + applications: + - Openshift Master + - key: openshift.master.apiserver.latency.summary.pods.quantile.list.5 description: "Value from https://master/metrics. This is the time, in miliseconds, that 50% of the pod operations have taken to completed." type: int @@ -288,14 +300,14 @@ g_template_openshift_master: # Put triggers that depend on other triggers here (deps must be created first) - name: 'Application creation has failed on {HOST.NAME}' - expression: '{Template Openshift Master:create_app.last(#1)}=1 and {Template Openshift Master:create_app.last(#2)}=1' + expression: '{Template Openshift Master:openshift.master.app.create.last(#1)}=1 and {Template Openshift Master:openshift.master.app.create.last(#2)}=1' url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_create_app.asciidoc' dependencies: - 'Openshift Master process not running on {HOST.NAME}' priority: avg - name: 'Application creation has failed multiple times in the last hour on {HOST.NAME}' - expression: '{Template Openshift Master:create_app.sum(1h)}>3' + expression: '{Template Openshift Master:openshift.master.app.create.sum(1h)}>3' url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_create_app.asciidoc' dependencies: - 'Openshift Master process not running on {HOST.NAME}' @@ -333,9 +345,16 @@ g_template_openshift_master: - 'Openshift Master process not running on {HOST.NAME}' priority: avg - - name: 'Docker Registry check failed on {HOST.NAME}' - expression: '{Template Openshift Master:openshift.master.registry.healthz.max(#2)}<1' - url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc' + - name: 'One or more Docker Registries is unhealthy according to {HOST.NAME}' + expression: '{Template Openshift Master:openshift.master.registry.healthy_pct.last(#2)}<100 and {Template Openshift Master:openshift.master.registry.healthy_pct.max(#2)}>50' + url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_registry.asciidoc' + dependencies: + - 'Openshift Master process not running on {HOST.NAME}' + priority: avg + + - name: 'Multiple Docker Registries are unhealthy according to {HOST.NAME}' + expression: '{Template Openshift Master:openshift.master.registry.healthy_pct.last(#2)}<51' + url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_registry.asciidoc' dependencies: - 'Openshift Master process not running on {HOST.NAME}' priority: high @@ -354,6 +373,13 @@ g_template_openshift_master: - 'Openshift Master API health check is failing on {HOST.NAME}' priority: high + - name: 'Hosts not ready according to {HOST.NAME}' + expression: '{Template Openshift Master:openshift.master.nodesnotready.count.last(#2)}>0' + url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_node.asciidoc' + dependencies: + - 'Openshift Master process not running on {HOST.NAME}' + priority: high + zgraphs: - name: Openshift Master API Server Latency Pods LIST Quantiles width: 900 diff --git a/roles/os_zabbix/vars/template_openshift_node.yml b/roles/os_zabbix/vars/template_openshift_node.yml index ff65ef158..b0488656d 100644 --- a/roles/os_zabbix/vars/template_openshift_node.yml +++ b/roles/os_zabbix/vars/template_openshift_node.yml @@ -37,8 +37,8 @@ g_template_openshift_node: url: 'https://github.com/openshift/ops-sop/blob/node/V3/Alerts/openshift_node.asciidoc' priority: high - - name: 'OVS may not be running on {HOST.NAME}' - expression: '{Template Openshift Node:openshift.node.ovs.pids.count.last()}<>4' + - name: '[HEAL] OVS may not be running on {HOST.NAME}' + expression: '{Template Openshift Node:openshift.node.ovs.pids.count.last(#1)}<>4 and {Template Openshift Node:openshift.node.ovs.pids.count.last(#2)}<>4' url: 'https://github.com/openshift/ops-sop/blob/node/V3/Alerts/openshift_node.asciidoc' priority: high diff --git a/roles/oso_host_monitoring/templates/oso-rhel7-host-monitoring.service.j2 b/roles/oso_host_monitoring/templates/oso-rhel7-host-monitoring.service.j2 index ac950b4e5..453a9a3b4 100644 --- a/roles/oso_host_monitoring/templates/oso-rhel7-host-monitoring.service.j2 +++ b/roles/oso_host_monitoring/templates/oso-rhel7-host-monitoring.service.j2 @@ -58,10 +58,10 @@ ExecStart=/usr/bin/docker run --name {{ osohm_host_monitoring }} -v /var/run/docker.sock:/var/run/docker.sock \ -v /var/run/openvswitch:/var/run/openvswitch \ {% if hostvars[inventory_hostname]['ec2_tag_host-type'] == 'master' %} - -v /etc/openshift/master/admin.kubeconfig:/etc/openshift/master/admin.kubeconfig \ - -v /etc/openshift/master/master.etcd-client.crt:/etc/openshift/master/master.etcd-client.crt \ - -v /etc/openshift/master/master.etcd-client.key:/etc/openshift/master/master.etcd-client.key \ - -v /etc/openshift/master/master-config.yaml:/etc/openshift/master/master-config.yaml \ + -v /etc/origin/master/admin.kubeconfig:/etc/origin/master/admin.kubeconfig \ + -v /etc/origin/master/master.etcd-client.crt:/etc/origin/master/master.etcd-client.crt \ + -v /etc/origin/master/master.etcd-client.key:/etc/origin/master/master.etcd-client.key \ + -v /etc/origin/master/master-config.yaml:/etc/origin/master/master-config.yaml \ {% endif %} {{ osohm_docker_registry_url }}{{ osohm_host_monitoring }} diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py index cbb6f33e1..042ce1023 100644 --- a/utils/src/ooinstall/openshift_ansible.py +++ b/utils/src/ooinstall/openshift_ansible.py @@ -114,7 +114,7 @@ def write_inventory_vars(base_inventory, multiple_masters, proxy): base_inventory.write('openshift_master_cluster_method=native\n') base_inventory.write("openshift_master_cluster_hostname={}\n".format(proxy.hostname)) base_inventory.write("openshift_master_cluster_public_hostname={}\n".format(proxy.public_hostname)) - if CFG.settings['master_routingconfig_subdomain']: + if CFG.settings.get('master_routingconfig_subdomain', False): base_inventory.write("osm_default_subdomain={}\n".format(CFG.settings['master_routingconfig_subdomain'])) |