diff options
author | Kenny Woodson <kwoodson@redhat.com> | 2017-02-12 14:22:46 -0500 |
---|---|---|
committer | Kenny Woodson <kwoodson@redhat.com> | 2017-02-20 16:13:40 -0500 |
commit | 0e6d708c0278a2363fdf4161b949b944d29ea9d3 (patch) | |
tree | 7ae0a590566456a5696fe2efb26f51b36b0665aa /roles/lib_openshift/src | |
parent | c9563d87c6de11503c5e8fe29a794b8c2846afcc (diff) | |
download | openshift-0e6d708c0278a2363fdf4161b949b944d29ea9d3.tar.gz openshift-0e6d708c0278a2363fdf4161b949b944d29ea9d3.tar.bz2 openshift-0e6d708c0278a2363fdf4161b949b944d29ea9d3.tar.xz openshift-0e6d708c0278a2363fdf4161b949b944d29ea9d3.zip |
Adding router and registry to lib_openshift.
Diffstat (limited to 'roles/lib_openshift/src')
-rw-r--r-- | roles/lib_openshift/src/ansible/oadm_registry.py | 48 | ||||
-rw-r--r-- | roles/lib_openshift/src/ansible/oadm_router.py | 63 | ||||
-rw-r--r-- | roles/lib_openshift/src/class/oadm_registry.py | 394 | ||||
-rw-r--r-- | roles/lib_openshift/src/class/oadm_router.py | 437 | ||||
-rw-r--r-- | roles/lib_openshift/src/doc/registry | 190 | ||||
-rw-r--r-- | roles/lib_openshift/src/doc/router | 198 | ||||
-rw-r--r-- | roles/lib_openshift/src/lib/rolebinding.py | 276 | ||||
-rw-r--r-- | roles/lib_openshift/src/lib/volume.py | 36 | ||||
-rw-r--r-- | roles/lib_openshift/src/sources.yml | 30 |
9 files changed, 1672 insertions, 0 deletions
diff --git a/roles/lib_openshift/src/ansible/oadm_registry.py b/roles/lib_openshift/src/ansible/oadm_registry.py new file mode 100644 index 000000000..53c1dab5a --- /dev/null +++ b/roles/lib_openshift/src/ansible/oadm_registry.py @@ -0,0 +1,48 @@ +# pylint: skip-file +# flake8: noqa + +def main(): + ''' + ansible oc module for registry + ''' + + module = AnsibleModule( + argument_spec=dict( + state=dict(default='present', type='str', + choices=['present', 'absent']), + debug=dict(default=False, type='bool'), + namespace=dict(default='default', type='str'), + name=dict(default=None, required=True, type='str'), + + kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'), + credentials=dict(default='/etc/origin/master/openshift-registry.kubeconfig', type='str'), + images=dict(default=None, type='str'), + latest_images=dict(default=False, type='bool'), + labels=dict(default=None, type='list'), + ports=dict(default=['5000'], type='list'), + replicas=dict(default=1, type='int'), + selector=dict(default=None, type='str'), + service_account=dict(default='registry', type='str'), + mount_host=dict(default=None, type='str'), + registry_type=dict(default='docker-registry', type='str'), + template=dict(default=None, type='str'), + volume=dict(default='/registry', type='str'), + env_vars=dict(default=None, type='dict'), + volume_mounts=dict(default=None, type='list'), + edits=dict(default=None, type='list'), + force=dict(default=False, type='bool'), + ), + mutually_exclusive=[["registry_type", "images"]], + + supports_check_mode=True, + ) + + results = Registry.run_ansible(module.params, module.check_mode) + if 'failed' in results: + module.fail_json(**results) + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/roles/lib_openshift/src/ansible/oadm_router.py b/roles/lib_openshift/src/ansible/oadm_router.py new file mode 100644 index 000000000..131f0c1ed --- /dev/null +++ b/roles/lib_openshift/src/ansible/oadm_router.py @@ -0,0 +1,63 @@ +# pylint: skip-file +# flake8: noqa + + +def main(): + ''' + ansible oc module for router + ''' + + module = AnsibleModule( + argument_spec=dict( + state=dict(default='present', type='str', + choices=['present', 'absent']), + debug=dict(default=False, type='bool'), + namespace=dict(default='default', type='str'), + name=dict(default='router', type='str'), + + kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'), + cert_file=dict(default=None, type='str'), + key_file=dict(default=None, type='str'), + images=dict(default=None, type='str'), #'openshift3/ose-${component}:${version}' + latest_images=dict(default=False, type='bool'), + labels=dict(default=None, type='list'), + ports=dict(default=['80:80', '443:443'], type='list'), + replicas=dict(default=1, type='int'), + selector=dict(default=None, type='str'), + service_account=dict(default='router', type='str'), + router_type=dict(default='haproxy-router', type='str'), + host_network=dict(default=True, type='bool'), + # external host options + external_host=dict(default=None, type='str'), + external_host_vserver=dict(default=None, type='str'), + external_host_insecure=dict(default=False, type='bool'), + external_host_partition_path=dict(default=None, type='str'), + external_host_username=dict(default=None, type='str'), + external_host_password=dict(default=None, type='str'), + external_host_private_key=dict(default=None, type='str'), + # Metrics + expose_metrics=dict(default=False, type='bool'), + metrics_image=dict(default=None, type='str'), + # Stats + stats_user=dict(default=None, type='str'), + stats_password=dict(default=None, type='str'), + stats_port=dict(default=1936, type='int'), + # extra + cacert_file=dict(default=None, type='str'), + # edits + edits=dict(default=[], type='list'), + ), + mutually_exclusive=[["router_type", "images"]], + + supports_check_mode=True, + ) + results = Router.run_ansible(module.params, module.check_mode) + + if 'failed' in results: + module.fail_json(**results) + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/roles/lib_openshift/src/class/oadm_registry.py b/roles/lib_openshift/src/class/oadm_registry.py new file mode 100644 index 000000000..b42410926 --- /dev/null +++ b/roles/lib_openshift/src/class/oadm_registry.py @@ -0,0 +1,394 @@ +# pylint: skip-file +# flake8: noqa + +class RegistryException(Exception): + ''' Registry Exception Class ''' + pass + + +class RegistryConfig(OpenShiftCLIConfig): + ''' RegistryConfig is a DTO for the registry. ''' + def __init__(self, rname, namespace, kubeconfig, registry_options): + super(RegistryConfig, self).__init__(rname, namespace, kubeconfig, registry_options) + + +class Registry(OpenShiftCLI): + ''' Class to wrap the oc command line tools ''' + + volume_mount_path = 'spec.template.spec.containers[0].volumeMounts' + volume_path = 'spec.template.spec.volumes' + env_path = 'spec.template.spec.containers[0].env' + + def __init__(self, + registry_config, + verbose=False): + ''' Constructor for Registry + + a registry consists of 3 or more parts + - dc/docker-registry + - svc/docker-registry + + Parameters: + :registry_config: + :verbose: + ''' + super(Registry, self).__init__(registry_config.namespace, registry_config.kubeconfig, verbose) + self.version = OCVersion(registry_config.kubeconfig, verbose) + self.svc_ip = None + self.portal_ip = None + self.config = registry_config + self.verbose = verbose + self.registry_parts = [{'kind': 'dc', 'name': self.config.name}, + {'kind': 'svc', 'name': self.config.name}, + ] + + self.__registry_prep = None + self.volume_mounts = [] + self.volumes = [] + if self.config.config_options['volume_mounts']['value']: + for volume in self.config.config_options['volume_mounts']['value']: + volume_info = {'secret_name': volume.get('secret_name', None), + 'name': volume.get('name', None), + 'type': volume.get('type', None), + 'path': volume.get('path', None), + 'claimName': volume.get('claim_name', None), + 'claimSize': volume.get('claim_size', None), + } + + vol, vol_mount = Volume.create_volume_structure(volume_info) + self.volumes.append(vol) + self.volume_mounts.append(vol_mount) + + self.dconfig = None + self.svc = None + + @property + def deploymentconfig(self): + ''' deploymentconfig property ''' + return self.dconfig + + @deploymentconfig.setter + def deploymentconfig(self, config): + ''' setter for deploymentconfig property ''' + self.dconfig = config + + @property + def service(self): + ''' service property ''' + return self.svc + + @service.setter + def service(self, config): + ''' setter for service property ''' + self.svc = config + + @property + def registry_prep(self): + ''' registry_prep property ''' + if not self.__registry_prep: + results = self.prep_registry() + if not results: + raise RegistryException('Could not perform registry preparation.') + self.__registry_prep = results + + return self.__registry_prep + + @registry_prep.setter + def registry_prep(self, data): + ''' setter method for registry_prep attribute ''' + self.__registry_prep = data + + def force_registry_prep(self): + '''force a registry prep''' + self.registry_prep = None + + def get(self): + ''' return the self.registry_parts ''' + self.deploymentconfig = None + self.service = None + + for part in self.registry_parts: + result = self._get(part['kind'], rname=part['name']) + if result['returncode'] == 0 and part['kind'] == 'dc': + self.deploymentconfig = DeploymentConfig(result['results'][0]) + elif result['returncode'] == 0 and part['kind'] == 'svc': + self.service = Yedit(content=result['results'][0]) + + return (self.deploymentconfig, self.service) + + def exists(self): + '''does the object exist?''' + self.get() + if self.deploymentconfig or self.service: + return True + + return False + + def delete(self, complete=True): + '''return all pods ''' + parts = [] + for part in self.registry_parts: + if not complete and part['kind'] == 'svc': + continue + parts.append(self._delete(part['kind'], part['name'])) + + return parts + + def prep_registry(self): + ''' prepare a registry for instantiation ''' + # In <= 3.4 credentials are used + # In >= 3.5 credentials are removed + versions = self.version.get() + if '3.5' in versions['oc']: + self.config.config_options['credentials']['include'] = False + + options = self.config.to_option_list() + + cmd = ['registry', '-n', self.config.namespace] + cmd.extend(options) + cmd.extend(['--dry-run=True', '-o', 'json']) + + results = self.openshift_cmd(cmd, oadm=True, output=True, output_type='json') + # probably need to parse this + # pylint thinks results is a string + # pylint: disable=no-member + if results['returncode'] != 0 and results['results'].has_key('items'): + return results + + service = None + deploymentconfig = None + # pylint: disable=invalid-sequence-index + for res in results['results']['items']: + if res['kind'] == 'DeploymentConfig': + deploymentconfig = DeploymentConfig(res) + elif res['kind'] == 'Service': + service = Service(res) + + # Verify we got a service and a deploymentconfig + if not service or not deploymentconfig: + return results + + # results will need to get parsed here and modifications added + deploymentconfig = DeploymentConfig(self.add_modifications(deploymentconfig)) + + # modify service ip + if self.svc_ip: + service.put('spec.clusterIP', self.svc_ip) + if self.portal_ip: + service.put('spec.portalIP', self.portal_ip) + + # need to create the service and the deploymentconfig + service_file = Utils.create_tmp_file_from_contents('service', service.yaml_dict) + deployment_file = Utils.create_tmp_file_from_contents('deploymentconfig', deploymentconfig.yaml_dict) + + return {"service": service, "service_file": service_file, + "deployment": deploymentconfig, "deployment_file": deployment_file} + + def create(self): + '''Create a registry''' + results = [] + for config_file in ['deployment_file', 'service_file']: + results.append(self._create(self.registry_prep[config_file])) + + # Clean up returned results + rval = 0 + for result in results: + if result['returncode'] != 0: + rval = result['returncode'] + + + return {'returncode': rval, 'results': results} + + def update(self): + '''run update for the registry. This performs a delete and then create ''' + # Store the current service IP + self.force_registry_prep() + + self.get() + if self.service: + svcip = self.service.get('spec.clusterIP') + if svcip: + self.svc_ip = svcip + portip = self.service.get('spec.portalIP') + if portip: + self.portal_ip = portip + + parts = self.delete(complete=False) + for part in parts: + if part['returncode'] != 0: + if part.has_key('stderr') and 'not found' in part['stderr']: + # the object is not there, continue + continue + # something went wrong + return parts + + # Ugly built in sleep here. + #time.sleep(10) + + results = [] + results.append(self._create(self.registry_prep['deployment_file'])) + results.append(self._replace(self.registry_prep['service_file'])) + + # Clean up returned results + rval = 0 + for result in results: + if result['returncode'] != 0: + rval = result['returncode'] + + return {'returncode': rval, 'results': results} + + def add_modifications(self, deploymentconfig): + ''' update a deployment config with changes ''' + # Currently we know that our deployment of a registry requires a few extra modifications + # Modification 1 + # we need specific environment variables to be set + for key, value in self.config.config_options['env_vars']['value'].items(): + if not deploymentconfig.exists_env_key(key): + deploymentconfig.add_env_value(key, value) + else: + deploymentconfig.update_env_var(key, value) + + # Modification 2 + # we need specific volume variables to be set + for volume in self.volumes: + deploymentconfig.update_volume(volume) + + for vol_mount in self.volume_mounts: + deploymentconfig.update_volume_mount(vol_mount) + + # Modification 3 + # Edits + edit_results = [] + for edit in self.config.config_options['edits'].get('value', []): + if edit['action'] == 'put': + edit_results.append(deploymentconfig.put(edit['key'], + edit['value'])) + if edit['action'] == 'update': + edit_results.append(deploymentconfig.update(edit['key'], + edit['value'], + edit.get('index', None), + edit.get('curr_value', None))) + if edit['action'] == 'append': + edit_results.append(deploymentconfig.append(edit['key'], + edit['value'])) + + if edit_results and not any([res[0] for res in edit_results]): + return None + + return deploymentconfig.yaml_dict + + def needs_update(self, verbose=False): + ''' check to see if we need to update ''' + if not self.service or not self.deploymentconfig: + return True + + exclude_list = ['clusterIP', 'portalIP', 'type', 'protocol'] + if not Utils.check_def_equal(self.registry_prep['service'].yaml_dict, + self.service.yaml_dict, + exclude_list, + verbose): + return True + + exclude_list = ['dnsPolicy', + 'terminationGracePeriodSeconds', + 'restartPolicy', 'timeoutSeconds', + 'livenessProbe', 'readinessProbe', + 'terminationMessagePath', + 'rollingParams', + 'securityContext', + 'imagePullPolicy', + 'protocol', # ports.portocol: TCP + 'type', # strategy: {'type': 'rolling'} + 'defaultMode', # added on secrets + 'activeDeadlineSeconds', # added in 1.5 for timeouts + ] + + if not Utils.check_def_equal(self.registry_prep['deployment'].yaml_dict, + self.deploymentconfig.yaml_dict, + exclude_list, + verbose): + return True + + return False + + + @staticmethod + def run_ansible(params, check_mode): + '''run idempotent ansible code''' + + rconfig = RegistryConfig(params['name'], + params['namespace'], + params['kubeconfig'], + {'credentials': {'value': params['credentials'], 'include': True}, + 'default_cert': {'value': None, 'include': True}, + 'images': {'value': params['images'], 'include': True}, + 'latest_images': {'value': params['latest_images'], 'include': True}, + 'labels': {'value': params['labels'], 'include': True}, + 'ports': {'value': ','.join(params['ports']), 'include': True}, + 'replicas': {'value': params['replicas'], 'include': True}, + 'selector': {'value': params['selector'], 'include': True}, + 'service_account': {'value': params['service_account'], 'include': True}, + 'registry_type': {'value': params['registry_type'], 'include': False}, + 'mount_host': {'value': params['mount_host'], 'include': True}, + 'volume': {'value': params['mount_host'], 'include': True}, + 'template': {'value': params['template'], 'include': True}, + 'env_vars': {'value': params['env_vars'], 'include': False}, + 'volume_mounts': {'value': params['volume_mounts'], 'include': False}, + 'edits': {'value': params['edits'], 'include': False}, + }) + + + ocregistry = Registry(rconfig) + + state = params['state'] + + ######## + # Delete + ######## + if state == 'absent': + if not ocregistry.exists(): + return {'changed': False, 'state': state} + + if check_mode: + return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete.'} + + api_rval = ocregistry.delete() + + if api_rval['returncode'] != 0: + return {'failed': True, 'msg': api_rval} + + return {'changed': True, 'results': api_rval, 'state': state} + + if state == 'present': + ######## + # Create + ######## + if not ocregistry.exists(): + + if check_mode: + return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete.'} + + api_rval = ocregistry.create() + + if api_rval['returncode'] != 0: + return {'failed': True, 'msg': api_rval} + + return {'changed': True, 'results': api_rval, 'state': state} + + ######## + # Update + ######## + if not params['force'] and not ocregistry.needs_update(): + return {'changed': False, 'state': state} + + if check_mode: + return {'changed': True, 'msg': 'CHECK_MODE: Would have performed an update.'} + + api_rval = ocregistry.update() + + if api_rval['returncode'] != 0: + return {'failed': True, 'msg': api_rval} + + return {'changed': True, 'results': api_rval, 'state': state} + + return {'failed': True, 'msg': 'Unknown state passed. %s' % state} diff --git a/roles/lib_openshift/src/class/oadm_router.py b/roles/lib_openshift/src/class/oadm_router.py new file mode 100644 index 000000000..8b4efcc3f --- /dev/null +++ b/roles/lib_openshift/src/class/oadm_router.py @@ -0,0 +1,437 @@ +# pylint: skip-file + +import time + +class RouterException(Exception): + ''' Router exception''' + pass + +class RouterConfig(OpenShiftCLIConfig): + ''' RouterConfig is a DTO for the router. ''' + def __init__(self, rname, namespace, kubeconfig, router_options): + super(RouterConfig, self).__init__(rname, namespace, kubeconfig, router_options) + +class Router(OpenShiftCLI): + ''' Class to wrap the oc command line tools ''' + def __init__(self, + router_config, + verbose=False): + ''' Constructor for OpenshiftOC + + a router consists of 3 or more parts + - dc/router + - svc/router + - endpoint/router + ''' + super(Router, self).__init__('default', router_config.kubeconfig, verbose) + self.config = router_config + self.verbose = verbose + self.router_parts = [{'kind': 'dc', 'name': self.config.name}, + {'kind': 'svc', 'name': self.config.name}, + {'kind': 'sa', 'name': self.config.config_options['service_account']['value']}, + {'kind': 'secret', 'name': self.config.name + '-certs'}, + {'kind': 'clusterrolebinding', 'name': 'router-' + self.config.name + '-role'}, + #{'kind': 'endpoints', 'name': self.config.name}, + ] + + self.__router_prep = None + self.dconfig = None + self.svc = None + self._secret = None + self._serviceaccount = None + self._rolebinding = None + self.get() + + @property + def router_prep(self): + ''' property deploymentconfig''' + if self.__router_prep == None: + results = self.prepare_router() + if not results: + raise RouterException('Could not perform router preparation') + self.__router_prep = results + + return self.__router_prep + + @router_prep.setter + def router_prep(self, obj): + '''set the router prep property''' + self.__router_prep = obj + + @property + def deploymentconfig(self): + ''' property deploymentconfig''' + return self.dconfig + + @deploymentconfig.setter + def deploymentconfig(self, config): + ''' setter for property deploymentconfig ''' + self.dconfig = config + + @property + def service(self): + ''' property service ''' + return self.svc + + @service.setter + def service(self, config): + ''' setter for property service ''' + self.svc = config + + @property + def secret(self): + ''' property secret ''' + return self._secret + + @secret.setter + def secret(self, config): + ''' setter for property secret ''' + self._secret = config + + @property + def serviceaccount(self): + ''' property secret ''' + return self._serviceaccount + + @serviceaccount.setter + def serviceaccount(self, config): + ''' setter for property secret ''' + self._serviceaccount = config + + @property + def rolebinding(self): + ''' property rolebinding ''' + return self._rolebinding + + @rolebinding.setter + def rolebinding(self, config): + ''' setter for property rolebinding ''' + self._rolebinding = config + + def get(self): + ''' return the self.router_parts ''' + self.service = None + self.deploymentconfig = None + self.serviceaccount = None + self.secret = None + self.rolebinding = None + for part in self.router_parts: + result = self._get(part['kind'], rname=part['name']) + if result['returncode'] == 0 and part['kind'] == 'dc': + self.deploymentconfig = DeploymentConfig(result['results'][0]) + elif result['returncode'] == 0 and part['kind'] == 'svc': + self.service = Service(content=result['results'][0]) + elif result['returncode'] == 0 and part['kind'] == 'sa': + self.serviceaccount = ServiceAccount(content=result['results'][0]) + elif result['returncode'] == 0 and part['kind'] == 'secret': + self.secret = Secret(content=result['results'][0]) + elif result['returncode'] == 0 and part['kind'] == 'clusterrolebinding': + self.rolebinding = RoleBinding(content=result['results'][0]) + + return {'deploymentconfig': self.deploymentconfig, + 'service': self.service, + 'serviceaccount': self.serviceaccount, + 'secret': self.secret, + 'clusterrolebinding': self.rolebinding, + } + + def exists(self): + '''return a whether svc or dc exists ''' + if self.deploymentconfig and self.service and self.secret and self.serviceaccount: + return True + + return False + + def delete(self): + '''return all pods ''' + parts = [] + for part in self.router_parts: + parts.append(self._delete(part['kind'], part['name'])) + + return parts + + def add_modifications(self, deploymentconfig): + '''modify the deployment config''' + # We want modifications in the form of edits coming in from the module. + # Let's apply these here + edit_results = [] + for edit in self.config.config_options['edits'].get('value', []): + if edit['action'] == 'put': + edit_results.append(deploymentconfig.put(edit['key'], + edit['value'])) + if edit['action'] == 'update': + edit_results.append(deploymentconfig.update(edit['key'], + edit['value'], + edit.get('index', None), + edit.get('curr_value', None))) + if edit['action'] == 'append': + edit_results.append(deploymentconfig.append(edit['key'], + edit['value'])) + + if edit_results and not any([res[0] for res in edit_results]): + return None + + return deploymentconfig + + def prepare_router(self): + '''prepare router for instantiation''' + # We need to create the pem file + router_pem = '/tmp/router.pem' + with open(router_pem, 'w') as rfd: + rfd.write(open(self.config.config_options['cert_file']['value']).read()) + rfd.write(open(self.config.config_options['key_file']['value']).read()) + if self.config.config_options['cacert_file']['value'] and \ + os.path.exists(self.config.config_options['cacert_file']['value']): + rfd.write(open(self.config.config_options['cacert_file']['value']).read()) + + atexit.register(Utils.cleanup, [router_pem]) + self.config.config_options['default_cert']['value'] = router_pem + + options = self.config.to_option_list() + + cmd = ['router', self.config.name, '-n', self.config.namespace] + cmd.extend(options) + cmd.extend(['--dry-run=True', '-o', 'json']) + + results = self.openshift_cmd(cmd, oadm=True, output=True, output_type='json') + + # pylint: disable=no-member + if results['returncode'] != 0 and results['results'].has_key('items'): + return results + + oc_objects = {'DeploymentConfig': {'obj': None, 'path': None}, + 'Secret': {'obj': None, 'path': None}, + 'ServiceAccount': {'obj': None, 'path': None}, + 'ClusterRoleBinding': {'obj': None, 'path': None}, + 'Service': {'obj': None, 'path': None}, + } + # pylint: disable=invalid-sequence-index + for res in results['results']['items']: + if res['kind'] == 'DeploymentConfig': + oc_objects['DeploymentConfig']['obj'] = DeploymentConfig(res) + elif res['kind'] == 'Service': + oc_objects['Service']['obj'] = Service(res) + elif res['kind'] == 'ServiceAccount': + oc_objects['ServiceAccount']['obj'] = ServiceAccount(res) + elif res['kind'] == 'Secret': + oc_objects['Secret']['obj'] = Secret(res) + elif res['kind'] == 'ClusterRoleBinding': + oc_objects['ClusterRoleBinding']['obj'] = RoleBinding(res) + + # Currently only deploymentconfig needs updating + # Verify we got a deploymentconfig + if not oc_objects['DeploymentConfig']['obj']: + return results + + # results will need to get parsed here and modifications added + oc_objects['DeploymentConfig']['obj'] = self.add_modifications(oc_objects['DeploymentConfig']['obj']) + + for oc_type in oc_objects.keys(): + oc_objects[oc_type]['path'] = Utils.create_tmp_file_from_contents(oc_type, oc_objects[oc_type]['obj'].yaml_dict) + + return oc_objects + + def create(self): + '''Create a deploymentconfig ''' + # generate the objects and prepare for instantiation + self.prepare_router() + + results = [] + for _, oc_data in self.router_prep.items(): + results.append(self._create(oc_data['path'])) + + rval = 0 + for result in results: + if result['returncode'] != 0 and not 'already exist' in result['stderr']: + rval = result['returncode'] + + return {'returncode': rval, 'results': results} + + def update(self): + '''run update for the router. This performs a delete and then create ''' + parts = self.delete() + for part in parts: + if part['returncode'] != 0: + if part.has_key('stderr') and 'not found' in part['stderr']: + # the object is not there, continue + continue + + # something went wrong + return parts + + # Ugly built in sleep here. + time.sleep(15) + + return self.create() + + # pylint: disable=too-many-return-statements,too-many-branches + def needs_update(self): + ''' check to see if we need to update ''' + if not self.deploymentconfig or not self.service or not self.serviceaccount or not self.secret: + return True + + oc_objects_prep = self.prepare_router() + + # Since the output from oadm_router is returned as raw + # we need to parse it. The first line is the stats_password in 3.1 + # Inside of 3.2, it is just json + + # ServiceAccount: + # Need to determine the pregenerated ones from the original + # Since these are auto generated, we can skip + skip = ['secrets', 'imagePullSecrets'] + if not Utils.check_def_equal(oc_objects_prep['ServiceAccount']['obj'].yaml_dict, + self.serviceaccount.yaml_dict, + skip_keys=skip, + debug=self.verbose): + return True + + # Secret: + # In 3.2 oadm router generates a secret volume for certificates + # See if one was generated from our dry-run and verify it if needed + if oc_objects_prep['Secret']['obj']: + if not self.secret: + return True + if not Utils.check_def_equal(oc_objects_prep['Secret']['obj'].yaml_dict, + self.secret.yaml_dict, + skip_keys=skip, + debug=self.verbose): + return True + + # Service: + # Fix the ports to have protocol=TCP + for port in oc_objects_prep['Service']['obj'].get('spec.ports'): + port['protocol'] = 'TCP' + + skip = ['portalIP', 'clusterIP', 'sessionAffinity', 'type'] + if not Utils.check_def_equal(oc_objects_prep['Service']['obj'].yaml_dict, + self.service.yaml_dict, + skip_keys=skip, + debug=self.verbose): + return True + + # DeploymentConfig: + # Router needs some exceptions. + # We do not want to check the autogenerated password for stats admin + if not self.config.config_options['stats_password']['value']: + for idx, env_var in enumerate(oc_objects_prep['DeploymentConfig']['obj'].get(\ + 'spec.template.spec.containers[0].env') or []): + if env_var['name'] == 'STATS_PASSWORD': + env_var['value'] = \ + self.deploymentconfig.get('spec.template.spec.containers[0].env[%s].value' % idx) + break + + # dry-run doesn't add the protocol to the ports section. We will manually do that. + for idx, port in enumerate(oc_objects_prep['DeploymentConfig']['obj'].get(\ + 'spec.template.spec.containers[0].ports') or []): + if not port.has_key('protocol'): + port['protocol'] = 'TCP' + + # These are different when generating + skip = ['dnsPolicy', + 'terminationGracePeriodSeconds', + 'restartPolicy', 'timeoutSeconds', + 'livenessProbe', 'readinessProbe', + 'terminationMessagePath', 'hostPort', + 'defaultMode', + ] + + return not Utils.check_def_equal(oc_objects_prep['DeploymentConfig']['obj'].yaml_dict, + self.deploymentconfig.yaml_dict, + skip_keys=skip, + debug=self.verbose) + + + @staticmethod + def run_ansible(params, check_mode): + '''run ansible idempotent code''' + + rconfig = RouterConfig(params['name'], + params['namespace'], + params['kubeconfig'], + {'default_cert': {'value': None, 'include': True}, + 'cert_file': {'value': params['cert_file'], 'include': False}, + 'key_file': {'value': params['key_file'], 'include': False}, + 'images': {'value': params['images'], 'include': True}, + 'latest_images': {'value': params['latest_images'], 'include': True}, + 'labels': {'value': params['labels'], 'include': True}, + 'ports': {'value': ','.join(params['ports']), 'include': True}, + 'replicas': {'value': params['replicas'], 'include': True}, + 'selector': {'value': params['selector'], 'include': True}, + 'service_account': {'value': params['service_account'], 'include': True}, + 'router_type': {'value': params['router_type'], 'include': False}, + 'host_network': {'value': params['host_network'], 'include': True}, + 'external_host': {'value': params['external_host'], 'include': True}, + 'external_host_vserver': {'value': params['external_host_vserver'], + 'include': True}, + 'external_host_insecure': {'value': params['external_host_insecure'], + 'include': True}, + 'external_host_partition_path': {'value': params['external_host_partition_path'], + 'include': True}, + 'external_host_username': {'value': params['external_host_username'], + 'include': True}, + 'external_host_password': {'value': params['external_host_password'], + 'include': True}, + 'external_host_private_key': {'value': params['external_host_private_key'], + 'include': True}, + 'expose_metrics': {'value': params['expose_metrics'], 'include': True}, + 'metrics_image': {'value': params['metrics_image'], 'include': True}, + 'stats_user': {'value': params['stats_user'], 'include': True}, + 'stats_password': {'value': params['stats_password'], 'include': True}, + 'stats_port': {'value': params['stats_port'], 'include': True}, + # extra + 'cacert_file': {'value': params['cacert_file'], 'include': False}, + # edits + 'edits': {'value': params['edits'], 'include': False}, + }) + + + ocrouter = Router(rconfig) + + state = params['state'] + + ######## + # Delete + ######## + if state == 'absent': + if not ocrouter.exists(): + return {'changed': False, 'state': state} + + if check_mode: + return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete.'} + + api_rval = ocrouter.delete() + + return {'changed': True, 'results': api_rval, 'state': state} + + if state == 'present': + ######## + # Create + ######## + if not ocrouter.exists(): + + if check_mode: + return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create.'} + + api_rval = ocrouter.create() + + if api_rval['returncode'] != 0: + return {'failed': True, 'msg': api_rval} + + return {'changed': True, 'results': api_rval, 'state': state} + + ######## + # Update + ######## + if not ocrouter.needs_update(): + return {'changed': False, 'state': state} + + if check_mode: + return {'changed': False, 'msg': 'CHECK_MODE: Would have performed an update.'} + + api_rval = ocrouter.update() + + if api_rval['returncode'] != 0: + return {'failed': True, 'msg': api_rval} + + return {'changed': True, 'results': api_rval, 'state': state} diff --git a/roles/lib_openshift/src/doc/registry b/roles/lib_openshift/src/doc/registry new file mode 100644 index 000000000..953e8d90d --- /dev/null +++ b/roles/lib_openshift/src/doc/registry @@ -0,0 +1,190 @@ +# flake8: noqa +# pylint: skip-file + +DOCUMENTATION = ''' +--- +module: oadm_manage_node +short_description: Module to manage openshift nodes +description: + - Manage openshift nodes programmatically. +options: + kubeconfig: + description: + - The path for the kubeconfig file to use for authentication + required: false + default: /etc/origin/master/admin.kubeconfig + aliases: [] + debug: + description: + - Turn on debug output. + required: false + default: False + aliases: [] + name: + description: + - The name of the registry + required: false + default: None + aliases: [] + namespace: + description: + - The selector when filtering on node labels + required: false + default: None + aliases: [] + credentials: + description: + - Path to a .kubeconfig file that will contain the credentials the registry should use to contact the master. + required: false + default: None + aliases: [] + images: + description: + - The image to base this registry on - ${component} will be replaced with --type + required: 'openshift3/ose-${component}:${version}' + default: None + aliases: [] + latest_images: + description: + - If true, attempt to use the latest image for the registry instead of the latest release. + required: false + default: False + aliases: [] + labels: + description: + - A set of labels to uniquely identify the registry and its components. + required: false + default: None + aliases: [] + enforce_quota: + description: + - If set, the registry will refuse to write blobs if they exceed quota limits + required: False + default: False + aliases: [] + mount_host: + description: + - If set, the registry volume will be created as a host-mount at this path. + required: False + default: False + aliases: [] + ports: + description: + - A comma delimited list of ports or port pairs to expose on the registry pod. The default is set for 5000. + required: False + default: [5000] + aliases: [] + replicas: + description: + - The replication factor of the registry; commonly 2 when high availability is desired. + required: False + default: 1 + aliases: [] + selector: + description: + - Selector used to filter nodes on deployment. Used to run registries on a specific set of nodes. + required: False + default: None + aliases: [] + service_account: + description: + - Name of the service account to use to run the registry pod. + required: False + default: 'registry' + aliases: [] + tls_certificate: + description: + - An optional path to a PEM encoded certificate (which may contain the private key) for serving over TLS + required: false + default: None + aliases: [] + tls_key: + description: + - An optional path to a PEM encoded private key for serving over TLS + required: false + default: None + aliases: [] + registry_type: + description: + - The registry image to use - if you specify --images this flag may be ignored. + required: false + default: 'docker-registry' + aliases: [] + volume: + description: + - The volume path to use for registry storage; defaults to /registry which is the default for origin-docker-registry. + required: false + default: '/registry' + aliases: [] + volume_mounts: + description: + - The volume mounts for the registry. + required: false + default: None + aliases: [] + daemonset: + description: + - Use a daemonset instead of a deployment config. + required: false + default: None + aliases: [] + edits: + description: + - A list of modifications to make on the deploymentconfig + required: false + default: None + aliases: [] +author: +- "Kenny Woodson <kwoodson@redhat.com>" +extends_documentation_fragment: [] +''' + +EXAMPLES = ''' +- name: create a secure registry + oadm_registry: + credentials: /etc/origin/master/openshift-registry.kubeconfig + name: docker-registry + service_account: registry + replicas: 2 + namespace: default + selector: type=infra + images: "registry.ops.openshift.com/openshift3/ose-${component}:${version}" + env_vars: + REGISTRY_CONFIGURATION_PATH: /etc/registryconfig/config.yml + REGISTRY_HTTP_TLS_CERTIFICATE: /etc/secrets/registry.crt + REGISTRY_HTTP_TLS_KEY: /etc/secrets/registry.key + REGISTRY_HTTP_SECRET: supersecret + volume_mounts: + - path: /etc/secrets + name: dockercerts + type: secret + secret_name: registry-secret + - path: /etc/registryconfig + name: dockersecrets + type: secret + secret_name: docker-registry-config + edits: + - key: spec.template.spec.containers[0].livenessProbe.httpGet.scheme + value: HTTPS + action: put + - key: spec.template.spec.containers[0].readinessProbe.httpGet.scheme + value: HTTPS + action: put + - key: spec.strategy.rollingParams + value: + intervalSeconds: 1 + maxSurge: 50% + maxUnavailable: 50% + timeoutSeconds: 600 + updatePeriodSeconds: 1 + action: put + - key: spec.template.spec.containers[0].resources.limits.memory + value: 2G + action: update + - key: spec.template.spec.containers[0].resources.requests.memory + value: 1G + action: update + + register: registryout + +''' diff --git a/roles/lib_openshift/src/doc/router b/roles/lib_openshift/src/doc/router new file mode 100644 index 000000000..7ba40d253 --- /dev/null +++ b/roles/lib_openshift/src/doc/router @@ -0,0 +1,198 @@ +# flake8: noqa +# pylint: skip-file + +DOCUMENTATION = ''' +--- +module: oadm_router +short_description: Module to manage openshift router +description: + - Manage openshift router programmatically. +options: + state: + description: + - Whether to create or delete the router + - present - create the router + - absent - remove the router + required: false + default: present + choices: + - present + - absent + aliases: [] + kubeconfig: + description: + - The path for the kubeconfig file to use for authentication + required: false + default: /etc/origin/master/admin.kubeconfig + aliases: [] + debug: + description: + - Turn on debug output. + required: false + default: False + aliases: [] + name: + description: + - The name of the router + required: false + default: router + aliases: [] + namespace: + description: + - The namespace where to manage the router. + required: false + default: default + aliases: [] + credentials: + description: + - Path to a .kubeconfig file that will contain the credentials the registry should use to contact the master. + required: false + default: None + aliases: [] + images: + description: + - The image to base this router on - ${component} will be replaced with --type + required: 'openshift3/ose-${component}:${version}' + default: None + aliases: [] + latest_images: + description: + - If true, attempt to use the latest image for the registry instead of the latest release. + required: false + default: False + aliases: [] + labels: + description: + - A set of labels to uniquely identify the registry and its components. + required: false + default: None + aliases: [] + ports: + description: + - A list of strings in the 'port:port' format + required: False + default: + - 80:80 + - 443:443 + aliases: [] + replicas: + description: + - The replication factor of the registry; commonly 2 when high availability is desired. + required: False + default: 1 + aliases: [] + selector: + description: + - Selector used to filter nodes on deployment. Used to run routers on a specific set of nodes. + required: False + default: None + aliases: [] + service_account: + description: + - Name of the service account to use to run the router pod. + required: False + default: router + aliases: [] + router_type: + description: + - The router image to use - if you specify --images this flag may be ignored. + required: false + default: haproxy-router + aliases: [] + external_host: + description: + - If the underlying router implementation connects with an external host, this is the external host's hostname. + required: false + default: None + aliases: [] + external_host_vserver: + description: + - If the underlying router implementation uses virtual servers, this is the name of the virtual server for HTTP connections. + required: false + default: None + aliases: [] + external_host_insecure: + description: + - If the underlying router implementation connects with an external host + - over a secure connection, this causes the router to skip strict certificate verification with the external host. + required: false + default: False + aliases: [] + external_host_partition_path: + description: + - If the underlying router implementation uses partitions for control boundaries, this is the path to use for that partition. + required: false + default: None + aliases: [] + external_host_username: + description: + - If the underlying router implementation connects with an external host, this is the username for authenticating with the external host. + required: false + default: None + aliases: [] + external_host_password: + description: + - If the underlying router implementation connects with an external host, this is the password for authenticating with the external host. + required: false + default: None + aliases: [] + external_host_private_key: + description: + - If the underlying router implementation requires an SSH private key, this is the path to the private key file. + required: false + default: None + aliases: [] + expose_metrics: + description: + - This is a hint to run an extra container in the pod to expose metrics - the image + - will either be set depending on the router implementation or provided with --metrics-image. + required: false + default: False + aliases: [] + metrics_image: + description: + - If expose_metrics is specified this is the image to use to run a sidecar container + - in the pod exposing metrics. If not set and --expose-metrics is true the image will + - depend on router implementation. + required: false + default: None + aliases: [] +author: +- "Kenny Woodson <kwoodson@redhat.com>" +extends_documentation_fragment: [] +''' + +EXAMPLES = ''' +- name: create routers + oadm_router: + name: router + service_account: router + replicas: 2 + namespace: default + selector: type=infra + cert_file: /etc/origin/master/named_certificates/router.crt + key_file: /etc/origin/master/named_certificates/router.key + cacert_file: /etc/origin/master/named_certificates/router.ca + edits: + - key: spec.strategy.rollingParams + value: + intervalSeconds: 1 + maxSurge: 50% + maxUnavailable: 50% + timeoutSeconds: 600 + updatePeriodSeconds: 1 + action: put + - key: spec.template.spec.containers[0].resources.limits.memory + value: 2G + action: update + - key: spec.template.spec.containers[0].resources.requests.memory + value: 1G + action: update + - key: spec.template.spec.containers[0].env + value: + name: EXTENDED_VALIDATION + value: 'false' + action: update + register: router_out + run_once: True +''' diff --git a/roles/lib_openshift/src/lib/rolebinding.py b/roles/lib_openshift/src/lib/rolebinding.py new file mode 100644 index 000000000..bbc1bb956 --- /dev/null +++ b/roles/lib_openshift/src/lib/rolebinding.py @@ -0,0 +1,276 @@ +# pylint: skip-file + +# pylint: disable=too-many-instance-attributes +class RoleBindingConfig(object): + ''' Handle route options ''' + # pylint: disable=too-many-arguments + def __init__(self, + sname, + namespace, + kubeconfig, + group_names=None, + role_ref=None, + subjects=None, + usernames=None): + ''' constructor for handling route options ''' + self.kubeconfig = kubeconfig + self.name = sname + self.namespace = namespace + self.group_names = group_names + self.role_ref = role_ref + self.subjects = subjects + self.usernames = usernames + self.data = {} + + self.create_dict() + + def create_dict(self): + ''' return a service as a dict ''' + self.data['apiVersion'] = 'v1' + self.data['kind'] = 'RoleBinding' + self.data['groupNames'] = self.group_names + self.data['metadata']['name'] = self.name + self.data['metadata']['namespace'] = self.namespace + + self.data['roleRef'] = self.role_ref + self.data['subjects'] = self.subjects + self.data['userNames'] = self.usernames + + +# pylint: disable=too-many-instance-attributes,too-many-public-methods +class RoleBinding(Yedit): + ''' Class to wrap the oc command line tools ''' + group_names_path = "groupNames" + role_ref_path = "roleRef" + subjects_path = "subjects" + user_names_path = "userNames" + + kind = 'RoleBinding' + + def __init__(self, content): + '''RoleBinding constructor''' + super(RoleBinding, self).__init__(content=content) + self._subjects = None + self._role_ref = None + self._group_names = None + self._user_names = None + + @property + def subjects(self): + ''' subjects property ''' + if self._subjects == None: + self._subjects = self.get_subjects() + return self._subjects + + @subjects.setter + def subjects(self, data): + ''' subjects property setter''' + self._subjects = data + + @property + def role_ref(self): + ''' role_ref property ''' + if self._role_ref == None: + self._role_ref = self.get_role_ref() + return self._role_ref + + @role_ref.setter + def role_ref(self, data): + ''' role_ref property setter''' + self._role_ref = data + + @property + def group_names(self): + ''' group_names property ''' + if self._group_names == None: + self._group_names = self.get_group_names() + return self._group_names + + @group_names.setter + def group_names(self, data): + ''' group_names property setter''' + self._group_names = data + + @property + def user_names(self): + ''' user_names property ''' + if self._user_names == None: + self._user_names = self.get_user_names() + return self._user_names + + @user_names.setter + def user_names(self, data): + ''' user_names property setter''' + self._user_names = data + + def get_group_names(self): + ''' return groupNames ''' + return self.get(RoleBinding.group_names_path) or [] + + def get_user_names(self): + ''' return usernames ''' + return self.get(RoleBinding.user_names_path) or [] + + def get_role_ref(self): + ''' return role_ref ''' + return self.get(RoleBinding.role_ref_path) or {} + + def get_subjects(self): + ''' return subjects ''' + return self.get(RoleBinding.subjects_path) or [] + + #### ADD ##### + def add_subject(self, inc_subject): + ''' add a subject ''' + if self.subjects: + self.subjects.append(inc_subject) + else: + self.put(RoleBinding.subjects_path, [inc_subject]) + + return True + + def add_role_ref(self, inc_role_ref): + ''' add a role_ref ''' + if not self.role_ref: + self.put(RoleBinding.role_ref_path, {"name": inc_role_ref}) + return True + + return False + + def add_group_names(self, inc_group_names): + ''' add a group_names ''' + if self.group_names: + self.group_names.append(inc_group_names) + else: + self.put(RoleBinding.group_names_path, [inc_group_names]) + + return True + + def add_user_name(self, inc_user_name): + ''' add a username ''' + if self.user_names: + self.user_names.append(inc_user_name) + else: + self.put(RoleBinding.user_names_path, [inc_user_name]) + + return True + + #### /ADD ##### + + #### Remove ##### + def remove_subject(self, inc_subject): + ''' remove a subject ''' + try: + self.subjects.remove(inc_subject) + except ValueError as _: + return False + + return True + + def remove_role_ref(self, inc_role_ref): + ''' remove a role_ref ''' + if self.role_ref and self.role_ref['name'] == inc_role_ref: + del self.role_ref['name'] + return True + + return False + + def remove_group_name(self, inc_group_name): + ''' remove a groupname ''' + try: + self.group_names.remove(inc_group_name) + except ValueError as _: + return False + + return True + + def remove_user_name(self, inc_user_name): + ''' remove a username ''' + try: + self.user_names.remove(inc_user_name) + except ValueError as _: + return False + + return True + + #### /REMOVE ##### + + #### UPDATE ##### + def update_subject(self, inc_subject): + ''' update a subject ''' + try: + index = self.subjects.index(inc_subject) + except ValueError as _: + return self.add_subject(inc_subject) + + self.subjects[index] = inc_subject + + return True + + def update_group_name(self, inc_group_name): + ''' update a groupname ''' + try: + index = self.group_names.index(inc_group_name) + except ValueError as _: + return self.add_group_names(inc_group_name) + + self.group_names[index] = inc_group_name + + return True + + def update_user_name(self, inc_user_name): + ''' update a username ''' + try: + index = self.user_names.index(inc_user_name) + except ValueError as _: + return self.add_user_name(inc_user_name) + + self.user_names[index] = inc_user_name + + return True + + def update_role_ref(self, inc_role_ref): + ''' update a role_ref ''' + self.role_ref['name'] = inc_role_ref + + return True + + #### /UPDATE ##### + + #### FIND #### + def find_subject(self, inc_subject): + ''' find a subject ''' + index = None + try: + index = self.subjects.index(inc_subject) + except ValueError as _: + return index + + return index + + def find_group_name(self, inc_group_name): + ''' find a group_name ''' + index = None + try: + index = self.group_names.index(inc_group_name) + except ValueError as _: + return index + + return index + + def find_user_name(self, inc_user_name): + ''' find a user_name ''' + index = None + try: + index = self.user_names.index(inc_user_name) + except ValueError as _: + return index + + return index + + def find_role_ref(self, inc_role_ref): + ''' find a user_name ''' + if self.role_ref and self.role_ref['name'] == inc_role_ref['name']: + return self.role_ref + + return None diff --git a/roles/lib_openshift/src/lib/volume.py b/roles/lib_openshift/src/lib/volume.py new file mode 100644 index 000000000..dc07d3ce1 --- /dev/null +++ b/roles/lib_openshift/src/lib/volume.py @@ -0,0 +1,36 @@ +# pylint: skip-file + +class Volume(object): + ''' Class to wrap the oc command line tools ''' + volume_mounts_path = {"pod": "spec.containers[0].volumeMounts", + "dc": "spec.template.spec.containers[0].volumeMounts", + "rc": "spec.template.spec.containers[0].volumeMounts", + } + volumes_path = {"pod": "spec.volumes", + "dc": "spec.template.spec.volumes", + "rc": "spec.template.spec.volumes", + } + + @staticmethod + def create_volume_structure(volume_info): + ''' return a properly structured volume ''' + volume_mount = None + volume = {'name': volume_info['name']} + if volume_info['type'] == 'secret': + volume['secret'] = {} + volume[volume_info['type']] = {'secretName': volume_info['secret_name']} + volume_mount = {'mountPath': volume_info['path'], + 'name': volume_info['name']} + elif volume_info['type'] == 'emptydir': + volume['emptyDir'] = {} + volume_mount = {'mountPath': volume_info['path'], + 'name': volume_info['name']} + elif volume_info['type'] == 'pvc': + volume['persistentVolumeClaim'] = {} + volume['persistentVolumeClaim']['claimName'] = volume_info['claimName'] + volume['persistentVolumeClaim']['claimSize'] = volume_info['claimSize'] + elif volume_info['type'] == 'hostpath': + volume['hostPath'] = {} + volume['hostPath']['path'] = volume_info['path'] + + return (volume, volume_mount) diff --git a/roles/lib_openshift/src/sources.yml b/roles/lib_openshift/src/sources.yml index 091aaef2e..a18842ed1 100644 --- a/roles/lib_openshift/src/sources.yml +++ b/roles/lib_openshift/src/sources.yml @@ -9,6 +9,36 @@ oadm_manage_node.py: - class/oadm_manage_node.py - ansible/oadm_manage_node.py +oadm_registry.py: +- doc/generated +- doc/license +- lib/import.py +- doc/registry +- ../../lib_utils/src/class/yedit.py +- lib/base.py +- lib/deploymentconfig.py +- lib/secret.py +- lib/service.py +- lib/volume.py +- class/oc_version.py +- class/oadm_registry.py +- ansible/oadm_registry.py + +oadm_router.py: +- doc/generated +- doc/license +- lib/import.py +- doc/router +- ../../lib_utils/src/class/yedit.py +- lib/base.py +- lib/service.py +- lib/deploymentconfig.py +- lib/serviceaccount.py +- lib/secret.py +- lib/rolebinding.py +- class/oadm_router.py +- ansible/oadm_router.py + oc_edit.py: - doc/generated - doc/license |