diff --git a/atomicapp/constants.py b/atomicapp/constants.py
index e3a6fd7a..60f4e94c 100644
--- a/atomicapp/constants.py
+++ b/atomicapp/constants.py
@@ -76,6 +76,7 @@
PROVIDER_CA_KEY = "provider-cafile"
K8S_DEFAULT_API = "http://localhost:8080"
+OC_DEFAULT_API = "http://localhost:8443"
# Persistent Storage Formats
PERSISTENT_STORAGE_FORMAT = ["ReadWriteOnce", "ReadOnlyMany", "ReadWriteMany"]
diff --git a/atomicapp/providers/kubernetes.py b/atomicapp/providers/kubernetes.py
index 92c2ed2e..a2109ba8 100644
--- a/atomicapp/providers/kubernetes.py
+++ b/atomicapp/providers/kubernetes.py
@@ -153,11 +153,13 @@ def _from_required_params(self):
self._check_required_params(exception=True)
paramdict = self._build_param_dict()
+ logger.debug("Building from required params")
# Generate the configuration from the paramters
config = KubeConfig().from_params(api=paramdict[PROVIDER_API_KEY],
auth=paramdict[PROVIDER_AUTH_KEY],
ca=paramdict[PROVIDER_CA_KEY],
verify=paramdict[PROVIDER_TLS_VERIFY_KEY])
+ logger.debug("Passed configuration for .kube/config %s" % config)
return config
def _check_namespaces(self):
diff --git a/atomicapp/providers/lib/kubeshift/client.py b/atomicapp/providers/lib/kubeshift/client.py
index ff59b8b2..2ce47865 100644
--- a/atomicapp/providers/lib/kubeshift/client.py
+++ b/atomicapp/providers/lib/kubeshift/client.py
@@ -18,6 +18,7 @@
"""
from atomicapp.providers.lib.kubeshift.kubernetes import KubeKubernetesClient
+from atomicapp.providers.lib.kubeshift.openshift import KubeOpenshiftClient
from atomicapp.providers.lib.kubeshift.exceptions import KubeClientError
from atomicapp.constants import LOGGER_DEFAULT
import logging
@@ -41,6 +42,9 @@ def __init__(self, config, provider):
if provider is "kubernetes":
self.connection = KubeKubernetesClient(config)
logger.debug("Using Kubernetes Provider KubeClient library")
+ elif provider is "openshift":
+ self.connection = KubeOpenshiftClient(config)
+ logger.debug("Using OpenShift Provider KubeClient library")
else:
raise KubeClientError("No provider by that name.")
diff --git a/atomicapp/providers/lib/kubeshift/kubebase.py b/atomicapp/providers/lib/kubeshift/kubebase.py
index 2eee9f5f..081354ea 100644
--- a/atomicapp/providers/lib/kubeshift/kubebase.py
+++ b/atomicapp/providers/lib/kubeshift/kubebase.py
@@ -178,7 +178,7 @@ def get_resources(self, url):
def test_connection(self, url):
self.api.request("get", url)
- logger.debug("Connection successfully tested")
+ logger.debug("Connection successfully tested on URL %s" % url)
@staticmethod
def cert_file(data, key):
@@ -337,7 +337,7 @@ def _request_method(self, method, url, data):
data (object): object of the data that is being passed (will be converted to json)
'''
if method.lower() == "get":
- res = self.api.get(url)
+ res = self.api.get(url, json=data)
elif method.lower() == "post":
res = self.api.post(url, json=data)
elif method.lower() == "put":
diff --git a/atomicapp/providers/lib/kubeshift/kubeconfig.py b/atomicapp/providers/lib/kubeshift/kubeconfig.py
index 778d00d8..8a4b1beb 100644
--- a/atomicapp/providers/lib/kubeshift/kubeconfig.py
+++ b/atomicapp/providers/lib/kubeshift/kubeconfig.py
@@ -75,6 +75,9 @@ def from_params(api=None, auth=None, ca=None, verify=True):
if ca:
config['clusters'][0]['cluster']['certificate-authority'] = ca
+
+ if verify is False:
+ config['clusters'][0]['cluster']['insecure-skip-tls-verify'] = 'true'
return config
@staticmethod
diff --git a/atomicapp/providers/lib/kubeshift/kubernetes.py b/atomicapp/providers/lib/kubeshift/kubernetes.py
index 6364ce15..5bbbc7a2 100644
--- a/atomicapp/providers/lib/kubeshift/kubernetes.py
+++ b/atomicapp/providers/lib/kubeshift/kubernetes.py
@@ -17,11 +17,11 @@
along with Atomic App. If not, see .
"""
-from urlparse import urljoin
-from urllib import urlencode
import logging
import re
+from urlparse import urljoin
+from urllib import urlencode
from atomicapp.constants import LOGGER_DEFAULT
from atomicapp.providers.lib.kubeshift.kubebase import KubeBase
from atomicapp.providers.lib.kubeshift.exceptions import (KubeKubernetesError)
@@ -39,7 +39,7 @@ def __init__(self, config):
'''
- # Pass in the configuration data (.kube/config object) to the KubeBase
+ # The configuration data passed in will be .kube/config data, so process is accordingly.
self.api = KubeBase(config)
# Check the API url
@@ -75,7 +75,9 @@ def create(self, obj, namespace):
'''
name = self._get_metadata_name(obj)
kind, url = self._generate_kurl(obj, namespace)
+
self.api.request("post", url, data=obj)
+
logger.info("%s '%s' successfully created", kind.capitalize(), name)
def delete(self, obj, namespace):
@@ -99,8 +101,8 @@ def delete(self, obj, namespace):
if kind in ['rcs', 'replicationcontrollers']:
self.scale(obj, namespace)
-
self.api.request("delete", url)
+
logger.info("%s '%s' successfully deleted", kind.capitalize(), name)
def scale(self, obj, namespace, replicas=0):
diff --git a/atomicapp/providers/lib/kubeshift/openshift.py b/atomicapp/providers/lib/kubeshift/openshift.py
new file mode 100644
index 00000000..1ee722e7
--- /dev/null
+++ b/atomicapp/providers/lib/kubeshift/openshift.py
@@ -0,0 +1,386 @@
+"""
+ Copyright 2014-2016 Red Hat, Inc.
+
+ This file is part of Atomic App.
+
+ Atomic App is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ Atomic App is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with Atomic App. If not, see .
+"""
+
+import datetime
+import time
+import os
+import tarfile
+import logging
+import re
+
+from urlparse import urljoin
+from urllib import urlencode
+from atomicapp.utils import Utils
+from atomicapp.constants import LOGGER_DEFAULT
+from atomicapp.providers.lib.kubeshift.kubebase import KubeBase
+from atomicapp.providers.lib.kubeshift.exceptions import KubeOpenshiftError
+
+logger = logging.getLogger(LOGGER_DEFAULT)
+
+
+class KubeOpenshiftClient(object):
+
+ def __init__(self, config):
+ '''
+
+ Args:
+ config (obj): Object of the configuration data
+
+ '''
+
+ # The configuration data passed in will be .kube/config data, so process is accordingly.
+ self.api = KubeBase(config)
+
+ # Check the API url
+ url = self.api.cluster['server']
+ if not re.match('(?:http|https)://', url):
+ raise KubeOpenshiftError("OpenShift API URL does not include HTTP or HTTPS")
+
+ # Gather what end-points we will be using
+ self.k8s_api = urljoin(url, "api/v1/")
+ self.oc_api = urljoin(url, "oapi/v1/")
+
+ # Test the connection before proceeding
+ self.api.test_connection(self.k8s_api)
+ self.api.test_connection(self.oc_api)
+
+ # Gather the resource names which will be used for the 'kind' API calls
+ self.oc_api_resources = self.api.get_resources(self.oc_api)
+
+ # Gather what API groups are available
+ # TODO: refactor this (create function in kubebase.py)
+ self.k8s_api_resources = {}
+ self.k8s_api_resources['v1'] = self.api.get_resources(self.k8s_api)
+ self.k8s_apis = urljoin(url, "apis/")
+
+ # Gather the group names from which resource names will be derived
+ self.k8s_api_groups = self.api.get_groups(self.k8s_apis)
+
+ for (name, versions) in self.k8s_api_groups:
+ for version in versions:
+ api = "%s/%s" % (name, version)
+ url = urljoin(self.k8s_apis, api)
+ self.k8s_api_resources[api] = self.api.get_resources(url)
+
+ def create(self, obj, namespace):
+ '''
+ Create an object from the Kubernetes cluster
+ '''
+ name = self._get_metadata_name(obj)
+ kind, url = self._generate_kurl(obj, namespace)
+
+ # Must process through each object if kind is a 'template'
+ if kind is "template":
+ self._process_template(obj, namespace, "create")
+ else:
+ self.api.request("post", url, data=obj)
+
+ logger.info("%s '%s' successfully created", kind.capitalize(), name)
+
+ def delete(self, obj, namespace):
+ '''
+ Delete an object from the Kubernetes cluster
+
+ Args:
+ obj (object): Object of the artifact being modified
+ namesapce (str): Namespace of the kubernetes cluster to be used
+ replicates (int): Default 0, size of the amount of replicas to scale
+
+ *Note*
+ Replication controllers must scale to 0 in order to delete pods.
+ Kubernetes 1.3 will implement server-side cascading deletion, but
+ until then, it's mandatory to scale to 0
+ https://github.com/kubernetes/kubernetes/blob/master/docs/proposals/garbage-collection.md
+
+ '''
+ name = self._get_metadata_name(obj)
+ kind, url = self._generate_kurl(obj, namespace, name)
+
+ # Must process through each object if kind is a 'template'
+ if kind is "template":
+ self._process_template(obj, namespace, "create")
+ else:
+ if kind in ['rcs', 'replicationcontrollers']:
+ self.scale(obj, namespace)
+ self.api.request("delete", url)
+
+ logger.info("%s '%s' successfully deleted", kind.capitalize(), name)
+
+ def scale(self, obj, namespace, replicas=0):
+ '''
+ By default we scale back down to 0. This function takes an object and scales said
+ object down to a specified value on the Kubernetes cluster
+
+ Args:
+ obj (object): Object of the artifact being modified
+ namesapce (str): Namespace of the kubernetes cluster to be used
+ replicates (int): Default 0, size of the amount of replicas to scale
+ '''
+ patch = [{"op": "replace",
+ "path": "/spec/replicas",
+ "value": replicas}]
+ name = self._get_metadata_name(obj)
+ _, url = self._generate_kurl(obj, namespace, name)
+ self.api.request("patch", url, data=patch)
+ logger.info("'%s' successfully scaled to %s", name, replicas)
+
+ def namespaces(self):
+ '''
+ Gathers a list of namespaces on the Kubernetes cluster
+ '''
+ url = urljoin(self.k8s_api, "namespaces")
+ ns = self.api.request("get", url)
+ return ns['items']
+
+ def _generate_kurl(self, obj, namespace, name=None, params=None):
+ '''
+ Generate the required URL by extracting the 'kind' from the
+ object as well as the namespace.
+
+ Args:
+ obj (obj): Object of the data being passed
+ namespace (str): k8s namespace
+ name (str): Name of the object being passed
+ params (arr): Extra params passed such as timeout=300
+
+ Returns:
+ kind (str): The kind used
+ url (str): The URL to be used / artifact URL
+ '''
+ if 'apiVersion' not in obj.keys():
+ raise KubeOpenshiftError("Error processing object. There is no apiVersion")
+
+ if 'kind' not in obj.keys():
+ raise KubeOpenshiftError("Error processing object. There is no kind")
+
+ api_version = obj['apiVersion']
+
+ kind = obj['kind']
+
+ resource = KubeBase.kind_to_resource_name(kind)
+
+ if resource in self.k8s_api_resources[api_version]:
+ if api_version == 'v1':
+ url = self.k8s_api
+ else:
+ url = urljoin(self.k8s_apis, "%s/" % api_version)
+ else:
+ raise KubeOpenshiftError("No kind by that name: %s" % kind)
+
+ url = urljoin(url, "namespaces/%s/%s/" % (namespace, resource))
+
+ if name:
+ url = urljoin(url, name)
+
+ if params:
+ url = urljoin(url, "?%s" % urlencode(params))
+
+ return (resource, url)
+
+ @staticmethod
+ def _get_metadata_name(obj):
+ '''
+ This looks at the object and grabs the metadata name of said object
+
+ Args:
+ obj (object): Object file of the artifact
+
+ Returns:
+ name (str): Returns the metadata name of the object
+ '''
+ if "metadata" in obj and \
+ "name" in obj["metadata"]:
+ name = obj["metadata"]["name"]
+ else:
+ raise KubeOpenshiftError("Cannot undeploy. There is no"
+ " name in object metadata "
+ "object=%s" % obj)
+ return name
+
+ # OPENSHIFT-SPECIFIC FUNCTIONS
+
+ def extract(self, image, src, dest, namespace, update=True):
+ """
+ Extract contents of a container image from 'src' in container
+ to 'dest' in host.
+
+ Args:
+ image (str): Name of container image
+ src (str): Source path in container
+ dest (str): Destination path in host
+ update (bool): Update existing destination, if True
+ """
+ if os.path.exists(dest) and not update:
+ return
+ cleaned_image_name = Utils.sanitizeName(image)
+ pod_name = '{}-{}'.format(cleaned_image_name, Utils.getUniqueUUID())
+ container_name = cleaned_image_name
+
+ # Pull (if needed) image and bring up a container from it
+ # with 'sleep 3600' entrypoint, just to extract content from it
+ artifact = {
+ 'apiVersion': 'v1',
+ 'kind': 'Pod',
+ 'metadata': {
+ 'name': pod_name
+ },
+ 'spec': {
+ 'containers': [
+ {
+ 'image': image,
+ 'command': [
+ 'sleep',
+ '3600'
+ ],
+ 'imagePullPolicy': 'IfNotPresent',
+ 'name': container_name
+ }
+ ],
+ 'restartPolicy': 'Always'
+ }
+ }
+
+ self.create(artifact, namespace)
+ try:
+ self._wait_till_pod_runs(namespace, pod_name, timeout=300)
+
+ # Archive content from the container and dump it to tmpfile
+ tmpfile = '/tmp/atomicapp-{pod}.tar.gz'.format(pod=pod_name)
+
+ self._execute(
+ namespace, pod_name, container_name,
+ 'tar -cz --directory {} ./'.format('/' + src),
+ outfile=tmpfile
+ )
+ finally:
+ # Delete created pod
+ self.delete(artifact, namespace)
+
+ # Extract archive data
+ tar = tarfile.open(tmpfile, 'r:gz')
+ tar.extractall(dest)
+
+ def _execute(self, namespace, pod, container, command,
+ outfile=None):
+ """
+ Execute a command in a container in an Openshift pod.
+
+ Args:
+ namespace (str): Namespace
+ pod (str): Pod name
+ container (str): Container name inside pod
+ command (str): Command to execute
+ outfile (str): Path to output file where results should be dumped
+
+ Returns:
+ Command output (str) or None in case results dumped to output file
+ """
+ args = {
+ 'token': self.api.token,
+ 'namespace': namespace,
+ 'pod': pod,
+ 'container': container,
+ 'command': ''.join(['command={}&'.format(word) for word in command.split()])
+ }
+ url = urljoin(
+ self.k8s_api,
+ 'namespaces/{namespace}/pods/{pod}/exec?'
+ 'access_token={token}&container={container}&'
+ '{command}stdout=1&stdin=0&tty=0'.format(**args))
+
+ return self.api.websocket_request(url, outfile)
+
+ def _process_template(self, obj, namespace, method):
+ _, url = self._generate_kurl(obj, namespace)
+ data = self.api.request("post", url, data=obj)
+
+ if method is "create":
+ for o in data[0]['objects']:
+ name = self._get_metadata_name(o)
+ _, object_url = self._generate_kurl(o, namespace)
+ self.api.request("post", object_url, data=o)
+ logger.debug("Created template object: %s" % name)
+ elif method is "delete":
+ for o in data[0]['objects']:
+ name = self._get_metadata_name(o)
+ _, object_url = self._generate_kurl(o, namespace, name)
+ self.api.request("delete", object_url)
+ logger.debug("Deleted template object: %s" % name)
+ else:
+ raise KubeOpenshiftError("No method by that name to process template")
+
+ logger.debug("Processed object template successfully")
+
+ def _get_pod_status(self, namespace, pod):
+ """
+ Get pod status.
+
+ Args:
+ namespace (str): Openshift namespace
+ pod (str): Pod name
+
+ Returns:
+ Status of pod (str)
+
+ Raises:
+ ProviderFailedException when unable to fetch Pod status.
+ """
+ args = {
+ 'namespace': namespace,
+ 'pod': pod,
+ 'access_token': self.api.token
+ }
+ url = urljoin(
+ self.k8s_api,
+ 'namespaces/{namespace}/pods/{pod}?'
+ 'access_token={access_token}'.format(**args))
+ data = self.api.request("get", url)
+
+ return data['status']['phase'].lower()
+
+ def _wait_till_pod_runs(self, namespace, pod, timeout=300):
+ """
+ Wait till pod runs, with a timeout.
+
+ Args:
+ namespace (str): Openshift namespace
+ pod (str): Pod name
+ timeout (int): Timeout in seconds.
+
+ Raises:
+ ProviderFailedException on timeout or when the pod goes to
+ failed state.
+ """
+ now = datetime.datetime.now()
+ timeout_delta = datetime.timedelta(seconds=timeout)
+ while datetime.datetime.now() - now < timeout_delta:
+ status = self.oc.get_pod_status(namespace, pod)
+ if status == 'running':
+ break
+ elif status == 'failed':
+ raise KubeOpenshiftError(
+ 'Unable to run pod for extracting content: '
+ '{namespace}/{pod}'.format(namespace=namespace,
+ pod=pod))
+ time.sleep(1)
+ if status != 'running':
+ raise KubeOpenshiftError(
+ 'Timed out to extract content from pod: '
+ '{namespace}/{pod}'.format(namespace=namespace,
+ pod=pod))
diff --git a/atomicapp/providers/openshift.py b/atomicapp/providers/openshift.py
index bac6f3a4..d9e9fe27 100644
--- a/atomicapp/providers/openshift.py
+++ b/atomicapp/providers/openshift.py
@@ -17,473 +17,188 @@
along with Atomic App. If not, see .
"""
-import datetime
-import os
import anymarkup
-import ssl
-import tarfile
-import time
-from urlparse import urljoin
-from urllib import urlencode
-from collections import OrderedDict
-import websocket
+import logging
+import os
-from atomicapp.utils import Utils
-from atomicapp.plugin import Provider, ProviderFailedException
from atomicapp.constants import (PROVIDER_AUTH_KEY,
ANSWERS_FILE,
DEFAULT_NAMESPACE,
LOGGER_DEFAULT,
- NAMESPACE_KEY,
PROVIDER_API_KEY,
- PROVIDER_TLS_VERIFY_KEY,
PROVIDER_CA_KEY,
- OPENSHIFT_POD_CA_FILE)
+ PROVIDER_TLS_VERIFY_KEY,
+ LOGGER_COCKPIT,
+ OC_DEFAULT_API)
+from atomicapp.plugin import Provider, ProviderFailedException
+
from atomicapp.providers.lib.kubeshift.kubeconfig import KubeConfig
-from requests.exceptions import SSLError
-import logging
+from atomicapp.providers.lib.kubeshift.client import Client
+from atomicapp.utils import Utils
+cockpit_logger = logging.getLogger(LOGGER_COCKPIT)
logger = logging.getLogger(LOGGER_DEFAULT)
-class OpenshiftClient(object):
-
- def __init__(self, providerapi, access_token,
- provider_tls_verify, provider_ca):
- self.providerapi = providerapi
- self.access_token = access_token
- self.provider_tls_verify = provider_tls_verify
- self.provider_ca = provider_ca
-
- # construct full urls for api endpoints
- self.kubernetes_api = urljoin(self.providerapi, "api/v1/")
- self.openshift_api = urljoin(self.providerapi, "oapi/v1/")
-
- logger.debug("kubernetes_api = %s", self.kubernetes_api)
- logger.debug("openshift_api = %s", self.openshift_api)
-
- def test_connection(self):
- """
- Test connection to OpenShift server
-
- Raises:
- ProviderFailedException - Invalid SSL/TLS certificate
- """
- logger.debug("Testing connection to OpenShift server")
-
- if self.provider_ca and not os.path.exists(self.provider_ca):
- raise ProviderFailedException("Unable to find CA path %s"
- % self.provider_ca)
-
- try:
- (status_code, return_data) = \
- Utils.make_rest_request("get",
- self.openshift_api,
- verify=self._requests_tls_verify(),
- headers={'Authorization': "Bearer %s" % self.access_token})
- except SSLError as e:
- if self.provider_tls_verify:
- msg = "SSL/TLS ERROR: invalid certificate. " \
- "Add certificate of correct Certificate Authority providing" \
- " `%s` or you can disable SSL/TLS verification by `%s=False`" \
- % (PROVIDER_CA_KEY, PROVIDER_TLS_VERIFY_KEY)
- raise ProviderFailedException(msg)
- else:
- # this shouldn't happen
- raise ProviderFailedException(e.message)
-
- def get_oapi_resources(self):
- """
- Get Openshift API resources
- """
- # get list of supported resources for each api
- (status_code, return_data) = \
- Utils.make_rest_request("get",
- self.openshift_api,
- verify=self._requests_tls_verify(),
- headers={'Authorization': "Bearer %s" % self.access_token})
- if status_code == 200:
- oapi_resources = return_data["resources"]
- else:
- raise ProviderFailedException("Cannot get OpenShift resource list")
-
- # convert resources list of dicts to list of names
- oapi_resources = [res['name'] for res in oapi_resources]
-
- logger.debug("Openshift resources %s", oapi_resources)
-
- return oapi_resources
-
- def get_kapi_resources(self):
- """
- Get kubernetes API resources
- """
- # get list of supported resources for each api
- (status_code, return_data) = \
- Utils.make_rest_request("get",
- self.kubernetes_api,
- verify=self._requests_tls_verify(),
- headers={'Authorization': "Bearer %s" % self.access_token})
- if status_code == 200:
- kapi_resources = return_data["resources"]
- else:
- raise ProviderFailedException("Cannot get Kubernetes resource list")
-
- # convert resources list of dicts to list of names
- kapi_resources = [res['name'] for res in kapi_resources]
-
- logger.debug("Kubernetes resources %s", kapi_resources)
-
- return kapi_resources
-
- def deploy(self, url, artifact):
- (status_code, return_data) = \
- Utils.make_rest_request("post",
- url,
- verify=self._requests_tls_verify(),
- data=artifact,
- headers={'Authorization': "Bearer %s" % self.access_token})
- if status_code == 201:
- logger.info("Object %s successfully deployed.",
- artifact['metadata']['name'])
- else:
- msg = "%s %s" % (status_code, return_data)
- logger.error(msg)
- # TODO: remove running components (issue: #428)
- raise ProviderFailedException(msg)
-
- def delete(self, url):
- """
- Delete object on given url
-
- Args:
- url (str): full url for artifact
-
- Raises:
- ProviderFailedException: error when calling remote api
- """
- (status_code, return_data) = \
- Utils.make_rest_request("delete",
- url,
- verify=self._requests_tls_verify(),
- headers={'Authorization': "Bearer %s" % self.access_token})
- if status_code == 200:
- logger.info("Successfully deleted.")
- else:
- msg = "%s %s" % (status_code, return_data)
- logger.error(msg)
- raise ProviderFailedException(msg)
-
- def scale(self, url, replicas):
- """
- Scale ReplicationControllers or DeploymentConfig
-
- Args:
- url (str): full url for artifact
- replicas (int): number of replicas scale to
- """
- patch = [{"op": "replace",
- "path": "/spec/replicas",
- "value": replicas}]
-
- (status_code, return_data) = \
- Utils.make_rest_request("patch",
- url,
- data=patch,
- verify=self._requests_tls_verify(),
- headers={'Authorization': "Bearer %s" % self.access_token})
- if status_code == 200:
- logger.info("Successfully scaled to %s replicas", replicas)
- else:
- msg = "%s %s" % (status_code, return_data)
- logger.error(msg)
- raise ProviderFailedException(msg)
-
- def process_template(self, url, template):
- (status_code, return_data) = \
- Utils.make_rest_request("post",
- url,
- verify=self._requests_tls_verify(),
- data=template,
- headers={'Authorization': "Bearer %s" % self.access_token})
- if status_code == 201:
- logger.info("template processed %s", template['metadata']['name'])
- logger.debug("processed template %s", return_data)
- return return_data['objects']
- else:
- msg = "%s %s" % (status_code, return_data)
- logger.error(msg)
- raise ProviderFailedException(msg)
-
- def _requests_tls_verify(self):
- """
- Return verify parameter for function Utils.make_rest_request
- in format that is used by requests library.
- see: http://docs.python-requests.org/en/latest/user/advanced/#ssl-cert-verification
- """
- if self.provider_ca and self.provider_tls_verify:
- return self.provider_ca
- else:
- return self.provider_tls_verify
-
- def execute(self, namespace, pod, container, command,
- outfile=None):
- """
- Execute a command in a container in an Openshift pod.
-
- Args:
- namespace (str): Namespace
- pod (str): Pod name
- container (str): Container name inside pod
- command (str): Command to execute
- outfile (str): Path to output file where results should be dumped
-
- Returns:
- Command output (str) or None in case results dumped to output file
- """
- args = {
- 'token': self.access_token,
- 'namespace': namespace,
- 'pod': pod,
- 'container': container,
- 'command': ''.join(['command={}&'.format(word) for word in command.split()])
- }
- url = urljoin(
- self.kubernetes_api,
- 'namespaces/{namespace}/pods/{pod}/exec?'
- 'access_token={token}&container={container}&'
- '{command}stdout=1&stdin=0&tty=0'.format(**args))
-
- # The above endpoint needs the request to be upgraded to SPDY,
- # which python-requests does not yet support. However, the same
- # endpoint works over websockets, so we are using websocket client.
-
- # Convert url from http(s) protocol to wss protocol
- url = 'wss://' + url.split('://', 1)[-1]
- logger.debug('url: {}'.format(url))
-
- results = []
-
- ws = websocket.WebSocketApp(
- url,
- on_message=lambda ws, message: self._handle_exec_reply(ws, message, results, outfile))
-
- ws.run_forever(sslopt={
- 'ca_certs': self.provider_ca,
- 'cert_reqs': ssl.CERT_REQUIRED if self.provider_tls_verify else ssl.CERT_NONE})
-
- if not outfile:
- return ''.join(results)
-
- def _handle_exec_reply(self, ws, message, results, outfile=None):
- """
- Handle reply message for exec call
- """
- # FIXME: For some reason, we do not know why, we need to ignore the
- # 1st char of the message, to generate a meaningful result
- cleaned_msg = message[1:]
- if outfile:
- with open(outfile, 'ab') as f:
- f.write(cleaned_msg)
- else:
- results.append(cleaned_msg)
-
- def get_pod_status(self, namespace, pod):
- """
- Get pod status.
-
- Args:
- namespace (str): Openshift namespace
- pod (str): Pod name
-
- Returns:
- Status of pod (str)
-
- Raises:
- ProviderFailedException when unable to fetch Pod status.
- """
- args = {
- 'namespace': namespace,
- 'pod': pod,
- 'access_token': self.access_token
- }
- url = urljoin(
- self.kubernetes_api,
- 'namespaces/{namespace}/pods/{pod}?'
- 'access_token={access_token}'.format(**args))
- (status_code, return_data) = \
- Utils.make_rest_request("get",
- url,
- verify=self._requests_tls_verify(),
- headers={'Authorization': "Bearer %s" % self.access_token})
-
- if status_code != 200:
- raise ProviderFailedException(
- 'Could not fetch status for pod: {namespace}/{pod}'.format(
- namespace=namespace, pod=pod))
- return return_data['status']['phase'].lower()
+class OpenshiftProvider(Provider):
+ """Operations for OpenShift provider is implemented in this class.
+ This class implements deploy, stop and undeploy of an atomicapp on
+ OpenShift provider.
+ """
-class OpenshiftProvider(Provider):
+ # Class variables
key = "openshift"
- cli_str = "oc"
- cli = None
- config_file = None
- template_data = None
- providerapi = "https://127.0.0.1:8443"
- openshift_api = None
- kubernetes_api = None
- access_token = None
namespace = DEFAULT_NAMESPACE
+ oc_artifacts = {}
+
+ # From the provider configuration
+ config_file = None
- # verify tls/ssl connection
- provider_tls_verify = True
- # path to file or dir with CA certificates
+ # Essential provider parameters
+ provider_api = None
+ provider_auth = None
+ provider_tls_verify = None
provider_ca = None
def init(self):
- # Parsed artifacts. Key is kind of artifacts. Value is list of artifacts.
- self.openshift_artifacts = OrderedDict()
+ self.oc_artifacts = {}
- self._set_config_values()
+ logger.debug("Given config: %s", self.config)
+ if self.config.get("namespace"):
+ self.namespace = self.config.get("namespace")
- self.oc = OpenshiftClient(self.providerapi,
- self.access_token,
- self.provider_tls_verify,
- self.provider_ca)
- self.openshift_api = self.oc.openshift_api
- self.kubernetes_api = self.oc.kubernetes_api
-
- # test connection to openshift server
- self.oc.test_connection()
-
- self.oapi_resources = self.oc.get_oapi_resources()
- self.kapi_resources = self.oc.get_kapi_resources()
+ logger.info("Using namespace %s", self.namespace)
self._process_artifacts()
- def _get_namespace(self, artifact):
- """
- Return namespace for artifact. If namespace is specified inside
- artifact use that, if not return default namespace (as specfied in
- answers.conf)
+ if self.dryrun:
+ return
- Args:
- artifact (dict): OpenShift/Kubernetes object
+ '''
+ Config_file:
+ If a config_file has been provided, use the configuration
+ from the file and load the associated generated file.
+ If a config_file exists (--provider-config) use that.
- Returns:
- namespace (str)
- """
- if "metadata" in artifact and "namespace" in artifact["metadata"]:
- return artifact["metadata"]["namespace"]
- return self.namespace
+ Params:
+ If any provider specific parameters have been provided,
+ load the configuration through the answers.conf file
- def run(self):
- logger.debug("Deploying to OpenShift")
- # TODO: remove running components if one component fails issue:#428
- for kind, objects in self.openshift_artifacts.iteritems():
- for artifact in objects:
- namespace = self._get_namespace(artifact)
- url = self._get_url(namespace, kind)
+ .kube/config:
+ If no config file or params are provided by user then try to find and
+ use a config file at the default location.
- if self.dryrun:
- logger.info("DRY-RUN: %s", url)
- continue
- self.oc.deploy(url, artifact)
+ no config at all:
+ If no .kube/config file can be found then try to connect to the default
+ unauthenticated http://localhost:8080/api end-point.
+ '''
- def stop(self):
- """
- Undeploy application.
-
- Cascade the deletion of the resources managed other resource
- (e.g. ReplicationControllers created by a DeploymentConfig and
- Pods created by a ReplicationController).
- When using command line client this is done automatically
- by `oc` command.
- When using API calls we have to cascade deletion manually.
- """
- logger.debug("Starting undeploy")
- delete_artifacts = []
- for kind, objects in self.openshift_artifacts.iteritems():
- # Add deployment configs to beginning of the list so they are deleted first.
- # Do deployment config first because if you do replication controller
- # before deployment config then the deployment config will re-spawn
- # the replication controller before the deployment config is deleted.
- if kind == "deploymentconfig":
- delete_artifacts = objects + delete_artifacts
- else:
- delete_artifacts = delete_artifacts + objects
-
- for artifact in delete_artifacts:
- kind = artifact["kind"].lower()
- namespace = self._get_namespace(artifact)
-
- # Get name from metadata so we know which object to delete.
- if "metadata" in artifact and \
- "name" in artifact["metadata"]:
- name = artifact["metadata"]["name"]
- else:
- raise ProviderFailedException("Cannot undeploy. There is no"
- " name in artifacts metadata "
- "artifact=%s" % artifact)
-
- logger.info("Undeploying artifact name=%s kind=%s" % (name, kind))
-
- # If this is a deployment config we need to delete all
- # replication controllers that were created by this.
- # Find the replication controller that was created by this deployment
- # config by querying for all replication controllers and filtering based
- # on automatically created label openshift.io/deployment-config.name
- if kind.lower() == "deploymentconfig":
- params = {"labelSelector":
- "openshift.io/deployment-config.name=%s" % name}
- url = self._get_url(namespace,
- "replicationcontroller",
- params=params)
- (status_code, return_data) = \
- Utils.make_rest_request("get", url, verify=self.oc._requests_tls_verify())
- if status_code != 200:
- raise ProviderFailedException("Cannot get Replication"
- "Controllers for Deployment"
- "Config %s (status code %s)" %
- (name, status_code))
- # kind of returned data is ReplicationControllerList
- # https://docs.openshift.com/enterprise/3.1/rest_api/kubernetes_v1.html#v1-replicationcontrollerlist
- # we need modify items to get valid ReplicationController
- items = return_data["items"]
- for item in items:
- item["kind"] = "ReplicationController"
- item["apiVersion"] = return_data["apiVersion"]
- # add items to list of artifact to be deleted
- delete_artifacts.extend(items)
-
- url = self._get_url(namespace, kind, name)
-
- # Scale down replication controller to 0 replicas before deleting.
- # This should take care of all pods created by this replication
- # controller and we can safely delete it.
- if kind.lower() == "replicationcontroller":
- if self.dryrun:
- logger.info("DRY-RUN: SCALE %s down to 0", url)
- else:
- self.oc.scale(url, 0)
+ default_config_loc = os.path.join(
+ Utils.getRoot(), Utils.getUserHome().strip('/'), '.kube/config')
- if self.dryrun:
- logger.info("DRY-RUN: DELETE %s", url)
- else:
- self.oc.delete(url)
+ if self.config_file:
+ logger.debug("Provider configuration provided")
+ self.api = Client(KubeConfig.from_file(self.config_file), "openshift")
+ elif self._check_required_params():
+ logger.debug("Generating .kube/config from given parameters")
+ self.api = Client(self._from_required_params(), "openshift")
+ elif os.path.isfile(default_config_loc):
+ logger.debug(".kube/config exists, using default configuration file")
+ self.api = Client(KubeConfig.from_file(default_config_loc), "openshift")
+ else:
+ self.config["provider-api"] = OC_DEFAULT_API
+ self.api = Client(self._from_required_params(), "openshift")
+
+ # Check if the namespace that the app is being deployed to is available
+ # DISABLED at the moment... Opened up issue against OpenShift..
+ # self._check_namespaces()
+
+ def _build_param_dict(self):
+ # Initialize the values
+ paramdict = {PROVIDER_API_KEY: self.provider_api,
+ PROVIDER_AUTH_KEY: self.provider_auth,
+ PROVIDER_TLS_VERIFY_KEY: self.provider_tls_verify,
+ PROVIDER_CA_KEY: self.provider_ca}
+
+ # Get values from the loaded answers.conf / passed CLI params
+ for k in paramdict.keys():
+ paramdict[k] = self.config.get(k)
+
+ return paramdict
+
+ def _check_required_params(self, exception=False):
+ '''
+ This checks to see if required parameters associated to the Kubernetes
+ provider are passed.
+ PROVIDER_API_KEY and PROVIDER_AUTH_KEY are *required*. Token may be blank.
+ '''
+
+ paramdict = self._build_param_dict()
+ logger.debug("List of parameters passed: %s" % paramdict)
+
+ # Check that the required parameters are passed. If not, error out.
+ for k in [PROVIDER_API_KEY, PROVIDER_AUTH_KEY]:
+ if paramdict[k] is None:
+ if exception:
+ msg = "You need to set %s in %s or pass it as a CLI param" % (k, ANSWERS_FILE)
+ raise ProviderFailedException(msg)
+ else:
+ return False
+
+ return True
+
+ def _from_required_params(self):
+ '''
+ Create a default configuration from passed environment parameters.
+ '''
+
+ self._check_required_params(exception=True)
+ paramdict = self._build_param_dict()
+
+ logger.debug("Building from required params")
+ # Generate the configuration from the paramters
+ config = KubeConfig().from_params(api=paramdict[PROVIDER_API_KEY],
+ auth=paramdict[PROVIDER_AUTH_KEY],
+ ca=paramdict[PROVIDER_CA_KEY],
+ verify=paramdict[PROVIDER_TLS_VERIFY_KEY])
+ logger.debug("Passed configuration for .kube/config %s" % config)
+ return config
+
+ def _check_namespaces(self):
+ '''
+ This function checks to see whether or not the namespaces created in the cluster match the
+ namespace that is associated and/or provided in the deployed application
+ '''
+
+ # Get the namespaces and output the currently used ones
+ namespace_list = self.api.namespaces()
+ logger.debug("There are currently %s namespaces in the cluster." % str(len(namespace_list)))
+
+ # Create a namespace list
+ namespaces = []
+ for ns in namespace_list:
+ namespaces.append(ns["metadata"]["name"])
+
+ # Output the namespaces and check to see if the one provided exists
+ logger.debug("Namespaces: %s" % namespaces)
+ if self.namespace not in namespaces:
+ msg = "%s namespace does not exist. Please create the namespace and try again." % self.namespace
+ raise ProviderFailedException(msg)
def _process_artifacts(self):
"""
- Parse OpenShift manifests files and checks if manifest under
- process is valid. Reads self.artifacts and saves parsed artifacts
- to self.openshift_artifacts
+ Parse each Kubernetes file and convert said format into an Object for
+ deployment.
"""
for artifact in self.artifacts:
logger.debug("Processing artifact: %s", artifact)
data = None
+
+ # Open and parse the artifact data
with open(os.path.join(self.path, artifact), "r") as fp:
data = anymarkup.parse(fp, force_types=None)
+ # Process said artifacts
self._process_artifact_data(artifact, data)
def _process_artifact_data(self, artifact, data):
@@ -494,290 +209,58 @@ def _process_artifact_data(self, artifact, data):
artifact (str): Artifact name
data (dict): Artifact data
"""
- # kind has to be specified in artifact
+
+ # Check if kind exists
if "kind" not in data.keys():
raise ProviderFailedException(
"Error processing %s artifact. There is no kind" % artifact)
+ # Change to lower case so it's easier to parse
kind = data["kind"].lower()
- resource = self._kind_to_resource(kind)
-
- # check if resource is supported by apis
- if resource not in self.oapi_resources \
- and resource not in self.kapi_resources:
- raise ProviderFailedException(
- "Unsupported kind %s in artifact %s" % (kind, artifact))
-
- # process templates
- if kind == "template":
- processed_objects = self._process_template(data)
- # add all processed object to artifacts dict
- for obj in processed_objects:
- obj_kind = obj["kind"].lower()
- if obj_kind not in self.openshift_artifacts.keys():
- self.openshift_artifacts[obj_kind] = []
- self.openshift_artifacts[obj_kind].append(obj)
- return
-
- # add parsed artifact to dict
- if kind not in self.openshift_artifacts.keys():
- self.openshift_artifacts[kind] = []
- self.openshift_artifacts[kind].append(data)
-
- def _process_template(self, template):
- """
- Call OpenShift api and process template.
- Templates allow parameterization of resources prior to being sent to
- the server for creation or update. Templates have "parameters",
- which may either be generated on creation or set by the user.
-
- Args:
- template (dict): template to process
-
- Returns:
- List of objects from processed template.
- """
- logger.debug("processing template: %s", template)
- url = self._get_url(self._get_namespace(template), "processedtemplates")
- return self.oc.process_template(url, template)
- def _kind_to_resource(self, kind):
- """
- Converts kind to resource name. It is same logics
- as in k8s.io/kubernetes/pkg/api/meta/restmapper.go (func KindToResource)
- Example:
- Pod -> pods
- Policy - > policies
- BuildConfig - > buildconfigs
-
- Args:
- kind (str): Kind of the object
-
- Returns:
- Resource name (str) (kind in plural form)
- """
- singular = kind.lower()
- if singular.endswith("status"):
- plural = singular + "es"
- else:
- if singular[-1] == "s":
- plural = singular
- elif singular[-1] == "y":
- plural = singular.rstrip("y") + "ies"
- else:
- plural = singular + "s"
- return plural
-
- def _get_url(self, namespace, kind, name=None, params=None):
- """
- Some kinds/resources are managed by OpensShift and some by Kubernetes.
- Here we compose right url (Kubernets or OpenShift) for given kind.
- If resource is managed by Kubernetes or OpenShift is determined by
- self.kapi_resources/self.oapi_resources lists
- Example:
- For namespace=project1, kind=DeploymentConfig, name=dc1 result
- would be http://example.com:8443/oapi/v1/namespaces/project1/deploymentconfigs/dc1
-
- Args:
- namespace (str): Kubernetes namespace or Openshift project name
- kind (str): kind of the object
- name (str): object name if modifying or deleting specific object (optional)
- params (dict): query parameters {"key":"value"} url?key=value
+ if kind not in self.oc_artifacts.keys():
+ self.oc_artifacts[kind] = []
- Returns:
- Full url (str) for given kind, namespace and name
- """
- url = None
-
- resource = self._kind_to_resource(kind)
-
- if resource in self.oapi_resources:
- url = self.openshift_api
- elif resource in self.kapi_resources:
- url = self.kubernetes_api
-
- url = urljoin(url, "namespaces/%s/%s/" % (namespace, resource))
+ # Fail if there is no metadata
+ if 'metadata' not in data:
+ raise ProviderFailedException(
+ "Error processing %s artifact. There is no metadata object" % artifact)
- if name:
- url = urljoin(url, name)
+ # Change to the namespace specified on init()
+ data['metadata']['namespace'] = self.namespace
- if params:
- params["access_token"] = self.access_token
+ if 'labels' not in data['metadata']:
+ data['metadata']['labels'] = {'namespace': self.namespace}
else:
- params = {"access_token": self.access_token}
+ data['metadata']['labels']['namespace'] = self.namespace
- url = urljoin(url, "?%s" % urlencode(params))
- logger.debug("url: %s", url)
- return url
+ self.oc_artifacts[kind].append(data)
- def _set_config_values(self):
+ def run(self):
"""
- Reads providerapi, namespace and accesstoken from answers.conf and
- corresponding values from providerconfig (if set).
- Use one that is set, if both are set and have conflicting values raise
- exception.
-
- Raises:
- ProviderFailedException: values in providerconfig and answers.conf
- are in conflict
-
+ Deploys the app by given resource artifacts.
"""
+ logger.info("Deploying to Kubernetes")
- # First things first, if we are running inside of an openshift pod via
- # `oc new-app` then get the config from the environment (files/env vars)
- # NOTE: pick up provider_tls_verify from answers if exists
- if Utils.running_on_openshift():
- self.providerapi = Utils.get_openshift_api_endpoint_from_env()
- self.namespace = os.environ['POD_NAMESPACE']
- self.access_token = os.environ['TOKEN_ENV_VAR']
- self.provider_ca = OPENSHIFT_POD_CA_FILE
- self.provider_tls_verify = \
- self.config.get(PROVIDER_TLS_VERIFY_KEY, True)
- return # No need to process other information
-
- # initialize result to default values
- result = {PROVIDER_API_KEY: self.providerapi,
- PROVIDER_AUTH_KEY: self.access_token,
- NAMESPACE_KEY: self.namespace,
- PROVIDER_TLS_VERIFY_KEY: self.provider_tls_verify,
- PROVIDER_CA_KEY: self.provider_ca}
-
- # create keys in dicts and initialize values to None
- answers = dict.fromkeys(result)
- providerconfig = dict.fromkeys(result)
-
- # get values from answers.conf
- for k in result.keys():
- answers[k] = self.config.get(k)
-
- # get values from providerconfig
- if self.config_file:
- providerconfig = KubeConfig.parse_kubeconf(self.config_file)
-
- # decide between values from answers.conf and providerconfig
- # if only one is set use that, report if they are in conflict
- for k in result.keys():
- if answers[k] is not None and providerconfig[k] is None:
- result[k] = answers[k]
- elif answers[k] is None and providerconfig[k] is not None:
- result[k] = providerconfig[k]
- elif answers[k] is not None and providerconfig[k] is not None:
- if answers[k] == providerconfig[k]:
- result[k] = answers[k]
+ for kind, objects in self.oc_artifacts.iteritems():
+ for artifact in objects:
+ if self.dryrun:
+ logger.info("DRY-RUN: Deploying k8s KIND: %s, ARTIFACT: %s"
+ % (kind, artifact))
else:
- msg = "There are conflicting values in %s (%s) and %s (%s)"\
- % (self.config_file, providerconfig[k], ANSWERS_FILE,
- answers[k])
- logger.error(msg)
- raise ProviderFailedException(msg)
-
- logger.debug("config values: %s" % result)
-
- # this items are required, they have to be not None
- for k in [PROVIDER_API_KEY, PROVIDER_AUTH_KEY, NAMESPACE_KEY]:
- if result[k] is None:
- msg = "You need to set %s in %s" % (k, ANSWERS_FILE)
- logger.error(msg)
- raise ProviderFailedException(msg)
-
- # set config values
- self.providerapi = result[PROVIDER_API_KEY]
- self.access_token = result[PROVIDER_AUTH_KEY]
- self.namespace = result[NAMESPACE_KEY]
- self.provider_tls_verify = result[PROVIDER_TLS_VERIFY_KEY]
- if result[PROVIDER_CA_KEY]:
- # if we are in container translate path to path on host
- self.provider_ca = Utils.get_real_abspath(result[PROVIDER_CA_KEY])
- else:
- self.provider_ca = None
+ self.api.create(artifact, self.namespace)
- def extract(self, image, src, dest, update=True):
- """
- Extract contents of a container image from 'src' in container
- to 'dest' in host.
-
- Args:
- image (str): Name of container image
- src (str): Source path in container
- dest (str): Destination path in host
- update (bool): Update existing destination, if True
- """
- if os.path.exists(dest) and not update:
- return
- cleaned_image_name = Utils.sanitizeName(image)
- pod_name = '{}-{}'.format(cleaned_image_name, Utils.getUniqueUUID())
- container_name = cleaned_image_name
-
- # Pull (if needed) image and bring up a container from it
- # with 'sleep 3600' entrypoint, just to extract content from it
- artifact = {
- 'apiVersion': 'v1',
- 'kind': 'Pod',
- 'metadata': {
- 'name': pod_name
- },
- 'spec': {
- 'containers': [
- {
- 'image': image,
- 'command': [
- 'sleep',
- '3600'
- ],
- 'imagePullPolicy': 'IfNotPresent',
- 'name': container_name
- }
- ],
- 'restartPolicy': 'Always'
- }
- }
-
- self.oc.deploy(self._get_url(self.namespace, 'Pod'), artifact)
- try:
- self._wait_till_pod_runs(self.namespace, pod_name, timeout=300)
-
- # Archive content from the container and dump it to tmpfile
- tmpfile = '/tmp/atomicapp-{pod}.tar.gz'.format(pod=pod_name)
- self.oc.execute(
- self.namespace, pod_name, container_name,
- 'tar -cz --directory {} ./'.format('/' + src),
- outfile=tmpfile
- )
- finally:
- # Delete created pod
- self.oc.delete(self._get_url(self.namespace, 'Pod', pod_name))
-
- # Extract archive data
- tar = tarfile.open(tmpfile, 'r:gz')
- tar.extractall(dest)
-
- def _wait_till_pod_runs(self, namespace, pod, timeout=300):
+ def stop(self):
+ """Undeploys the app by given resource manifests.
+ Undeploy operation first scale down the replicas to 0 and then deletes
+ the resource from cluster.
"""
- Wait till pod runs, with a timeout.
+ logger.info("Undeploying from Kubernetes")
- Args:
- namespace (str): Openshift namespace
- pod (str): Pod name
- timeout (int): Timeout in seconds.
-
- Raises:
- ProviderFailedException on timeout or when the pod goes to
- failed state.
- """
- now = datetime.datetime.now()
- timeout_delta = datetime.timedelta(seconds=timeout)
- while datetime.datetime.now() - now < timeout_delta:
- status = self.oc.get_pod_status(namespace, pod)
- if status == 'running':
- break
- elif status == 'failed':
- raise ProviderFailedException(
- 'Unable to run pod for extracting content: '
- '{namespace}/{pod}'.format(namespace=namespace,
- pod=pod))
- time.sleep(1)
- if status != 'running':
- raise ProviderFailedException(
- 'Timed out to extract content from pod: '
- '{namespace}/{pod}'.format(namespace=namespace,
- pod=pod))
+ for kind, objects in self.oc_artifacts.iteritems():
+ for artifact in objects:
+ if self.dryrun:
+ logger.info("DRY-RUN: Deploying k8s KIND: %s, ARTIFACT: %s"
+ % (kind, artifact))
+ else:
+ self.api.delete(artifact, self.namespace)
diff --git a/test-requirements.txt b/test-requirements.txt
index b68bb921..f292f6cb 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -2,3 +2,4 @@ flake8
mock
pep8
pytest-cov
+pytest-localserver
diff --git a/tests/units/kubeshift/external/example_kubeconfig b/tests/units/kubeshift/external/example_kubeconfig
new file mode 100644
index 00000000..ef955b9c
--- /dev/null
+++ b/tests/units/kubeshift/external/example_kubeconfig
@@ -0,0 +1,17 @@
+apiVersion: v1
+clusters:
+- cluster:
+ server: http://localhost:8080
+ name: dev
+contexts:
+- context:
+ cluster: dev
+ user: default
+ name: dev
+current-context: dev
+kind: Config
+preferences: {}
+users:
+- name: default
+ user:
+ token: foobar
diff --git a/tests/units/kubeshift/test_client.py b/tests/units/kubeshift/test_client.py
new file mode 100644
index 00000000..ad01092b
--- /dev/null
+++ b/tests/units/kubeshift/test_client.py
@@ -0,0 +1,72 @@
+import mock
+import pytest
+from atomicapp.providers.lib.kubeshift.client import Client
+from atomicapp.providers.lib.kubeshift.exceptions import KubeClientError
+
+config = {
+ "kind": "Config",
+ "preferences": {},
+ "current-context": "dev",
+ "contexts": [
+ {
+ "name": "dev",
+ "context": {
+ "cluster": "dev",
+ "user": "default"
+ }
+ }
+ ],
+ "clusters": [
+ {
+ "cluster": {
+ "server": "http://localhost:8080"
+ },
+ "name": "dev"
+ }
+ ],
+ "apiVersion": "v1",
+ "users": [
+ {
+ "name": "default",
+ "user": {
+ "token": "foobar"
+ }
+ }
+ ]
+}
+
+
+class FakeClient():
+
+ def __init__(self, *args):
+ pass
+
+
+@mock.patch("atomicapp.providers.lib.kubeshift.client.KubeKubernetesClient")
+def test_client_kubernetes(FakeClient):
+ Client(config, "kubernetes")
+
+
+@mock.patch("atomicapp.providers.lib.kubeshift.client.KubeOpenshiftClient")
+def test_client_openshift(FakeClient):
+ Client(config, "openshift")
+
+
+def test_client_load_failure():
+ with pytest.raises(KubeClientError):
+ Client(config, "foobar")
+
+
+# TODO
+def test_client_create():
+ pass
+
+
+# TODO
+def test_client_delete():
+ pass
+
+
+# TODO
+def test_client_namespaces():
+ pass
diff --git a/tests/units/kubeshift/test_kubebase.py b/tests/units/kubeshift/test_kubebase.py
new file mode 100644
index 00000000..aeff61a9
--- /dev/null
+++ b/tests/units/kubeshift/test_kubebase.py
@@ -0,0 +1,93 @@
+import pytest
+from atomicapp.providers.lib.kubeshift.kubebase import KubeBase
+from atomicapp.providers.lib.kubeshift.exceptions import KubeConnectionError
+
+
+config = {
+ "kind": "Config",
+ "preferences": {},
+ "current-context": "dev",
+ "contexts": [
+ {
+ "name": "dev",
+ "context": {
+ "cluster": "dev",
+ "user": "default"
+ }
+ }
+ ],
+ "clusters": [
+ {
+ "cluster": {
+ "server": "http://localhost:8080"
+ },
+ "name": "dev"
+ }
+ ],
+ "apiVersion": "v1",
+ "users": [
+ {
+ "name": "default",
+ "user": {
+ "token": "foobar"
+ }
+ }
+ ]
+}
+kubebase = KubeBase(config)
+
+
+def test_get_resources(httpserver):
+ content = '{"kind":"APIResourceList","groupVersion":"v1","resources":[{"name":"bindings","namespaced":true,"kind":"Binding"},{"name":"componentstatuses","namespaced":false,"kind":"ComponentStatus"}]}'
+ httpserver.serve_content(content, code=200, headers=None)
+ kubebase.get_resources(httpserver.url)
+
+
+def test_get_groups(httpserver):
+ content = '{"kind":"APIGroupList","groups":[{"name":"autoscaling","versions":[{"groupVersion":"autoscaling/v1","version":"v1"}],"preferredVersion":{"groupVersion":"autoscaling/v1","version":"v1"},"serverAddressByClientCIDRs":[{"clientCIDR":"0.0.0.0/0","serverAddress":"192.168.1.156:443"}]},{"name":"batch","versions":[{"groupVersion":"batch/v1","version":"v1"}],"preferredVersion":{"groupVersion":"batch/v1","version":"v1"},"serverAddressByClientCIDRs":[{"clientCIDR":"0.0.0.0/0","serverAddress":"192.168.1.156:443"}]},{"name":"extensions","versions":[{"groupVersion":"extensions/v1beta1","version":"v1beta1"}],"preferredVersion":{"groupVersion":"extensions/v1beta1","version":"v1beta1"},"serverAddressByClientCIDRs":[{"clientCIDR":"0.0.0.0/0","serverAddress":"192.168.1.156:443"}]}]}'
+ httpserver.serve_content(content, code=200, headers=None)
+ kubebase.get_groups(httpserver.url)
+
+
+def test_connection(httpserver):
+ httpserver.serve_content(content="OK", code=200, headers=None)
+ kubebase.test_connection(httpserver.url)
+
+
+def test_kind_to_resource_name():
+ assert kubebase.kind_to_resource_name("Pod") == "pods"
+ assert kubebase.kind_to_resource_name("buildconfig") == "buildconfigs"
+ assert kubebase.kind_to_resource_name("policy") == "policies"
+ assert kubebase.kind_to_resource_name("petset") == "petsets"
+ assert kubebase.kind_to_resource_name("componentstatus") == "componentstatuses"
+ assert kubebase.kind_to_resource_name("Ingress") == "ingresses"
+
+
+def test_request_methods_failures():
+ with pytest.raises(KubeConnectionError):
+ kubebase.request("get", "http://foobar")
+ with pytest.raises(KubeConnectionError):
+ kubebase.request("post", "http://foobar")
+ with pytest.raises(KubeConnectionError):
+ kubebase.request("put", "http://foobar")
+ with pytest.raises(KubeConnectionError):
+ kubebase.request("delete", "http://foobar")
+ with pytest.raises(KubeConnectionError):
+ kubebase.request("patch", "http://foobar")
+
+
+def test_request_timeout(httpserver):
+ httpserver.serve_content(content="Time out", code=408, headers=None)
+ with pytest.raises(KubeConnectionError):
+ kubebase.request("get", httpserver.url)
+
+
+def test_request_ok(httpserver):
+ httpserver.serve_content(content="OK", code=200, headers=None)
+ kubebase.request("get", httpserver.url)
+
+
+def test_websocket_request_without_ssl():
+ # Should get an attribute error if there is no "cert_ca" to the base config
+ with pytest.raises(AttributeError):
+ kubebase.websocket_request("http://foobar")
diff --git a/tests/units/nulecule/test_kubeconfig.py b/tests/units/kubeshift/test_kubeconfig.py
similarity index 84%
rename from tests/units/nulecule/test_kubeconfig.py
rename to tests/units/kubeshift/test_kubeconfig.py
index a0f13601..6a89debf 100644
--- a/tests/units/nulecule/test_kubeconfig.py
+++ b/tests/units/kubeshift/test_kubeconfig.py
@@ -1,10 +1,39 @@
import unittest
+import pytest
+import tempfile
+import os
from atomicapp.plugin import ProviderFailedException
from atomicapp.providers.lib.kubeshift.kubeconfig import KubeConfig
class TestKubeConfParsing(unittest.TestCase):
+ def test_from_file(self):
+ """
+ Test parsing a hello world JSON example and returning back the
+ respective anymarkup content
+ """
+ _, tmpfilename = tempfile.mkstemp()
+ f = open(tmpfilename, 'w')
+ f.write("{ 'hello': 'world'}")
+ f.close()
+ KubeConfig.from_file(tmpfilename)
+
+ def test_from_params(self):
+ KubeConfig.from_params("foo", "bar", "foo", "bar")
+
+ def test_parse_kubeconf_from_file_failure(self):
+ _, tmpfilename = tempfile.mkstemp()
+ f = open(tmpfilename, 'w')
+ f.write("{ 'hello': 'world'}")
+ f.close()
+ with pytest.raises(KeyError):
+ KubeConfig.parse_kubeconf(tmpfilename)
+
+ def test_parse_kubeconf_from_file(self):
+ example_kubeconfig = os.path.dirname(__file__) + '/external/example_kubeconfig'
+ KubeConfig.parse_kubeconf(example_kubeconfig)
+
def test_parse_kubeconf_data_insecure(self):
"""
Test parsing kubeconf data with current context containing
diff --git a/tests/units/kubeshift/test_kubernetes.py b/tests/units/kubeshift/test_kubernetes.py
new file mode 100644
index 00000000..914c31a6
--- /dev/null
+++ b/tests/units/kubeshift/test_kubernetes.py
@@ -0,0 +1,82 @@
+import mock
+from atomicapp.providers.lib.kubeshift.kubernetes import KubeKubernetesClient
+
+config = {
+ "kind": "Config",
+ "preferences": {},
+ "current-context": "dev",
+ "contexts": [
+ {
+ "name": "dev",
+ "context": {
+ "cluster": "dev",
+ "user": "default"
+ }
+ }
+ ],
+ "clusters": [
+ {
+ "cluster": {
+ "server": "http://localhost:8080"
+ },
+ "name": "dev"
+ }
+ ],
+ "apiVersion": "v1",
+ "users": [
+ {
+ "name": "default",
+ "user": {
+ "token": "foobar"
+ }
+ }
+ ]
+}
+
+
+class FakeClient():
+
+ def __init__(self, *args):
+ pass
+
+ def test_connection(self, *args):
+ pass
+
+ def get_resources(self, *args):
+ return ['Pod', 'pod', 'pods']
+
+ def get_groups(self, *args):
+ return {}
+
+ def request(self, method, url, data=None):
+ return None, 200
+
+ @property
+ def cluster(self):
+ return {'server': 'https://foobar'}
+
+
+@mock.patch("atomicapp.providers.lib.kubeshift.kubernetes.KubeBase")
+def test_create(mock_class):
+ # Mock the API class
+ mock_class.return_value = FakeClient()
+ mock_class.kind_to_resource_name.return_value = 'Pod'
+
+ k8s_object = {"apiVersion": "v1", "kind": "Pod", "metadata": {"labels": {"app": "helloapache"}, "name": "helloapache"}, "spec": {
+ "containers": [{"image": "$image", "name": "helloapache", "ports": [{"containerPort": 80, "hostPort": 80, "protocol": "TCP"}]}]}}
+
+ a = KubeKubernetesClient(config)
+ a.create(k8s_object, "foobar")
+
+
+@mock.patch("atomicapp.providers.lib.kubeshift.kubernetes.KubeBase")
+def test_delete(mock_class):
+ # Mock the API class
+ mock_class.return_value = FakeClient()
+ mock_class.kind_to_resource_name.return_value = 'Pod'
+
+ k8s_object = {"apiVersion": "v1", "kind": "Pod", "metadata": {"labels": {"app": "helloapache"}, "name": "helloapache"}, "spec": {
+ "containers": [{"image": "$image", "name": "helloapache", "ports": [{"containerPort": 80, "hostPort": 80, "protocol": "TCP"}]}]}}
+
+ a = KubeKubernetesClient(config)
+ a.delete(k8s_object, "foobar")
diff --git a/tests/units/kubeshift/test_openshift.py b/tests/units/kubeshift/test_openshift.py
new file mode 100644
index 00000000..af9f5e05
--- /dev/null
+++ b/tests/units/kubeshift/test_openshift.py
@@ -0,0 +1,121 @@
+import mock
+from atomicapp.providers.lib.kubeshift.openshift import KubeOpenshiftClient
+
+config = {
+ "kind": "Config",
+ "preferences": {},
+ "current-context": "dev",
+ "contexts": [
+ {
+ "name": "dev",
+ "context": {
+ "cluster": "dev",
+ "user": "default"
+ }
+ }
+ ],
+ "clusters": [
+ {
+ "cluster": {
+ "server": "http://localhost:8080"
+ },
+ "name": "dev"
+ }
+ ],
+ "apiVersion": "v1",
+ "users": [
+ {
+ "name": "default",
+ "user": {
+ "token": "foobar"
+ }
+ }
+ ]
+}
+
+
+class FakeClient():
+
+ def __init__(self, *args):
+ pass
+
+ def test_connection(self, *args):
+ pass
+
+ def get_resources(self, *args):
+ return ['Pod', 'template']
+
+ def get_groups(self, *args):
+ return {}
+
+ def request(self, method, url, data=None):
+ return None, 200
+
+ @property
+ def cluster(self):
+ return {'server': 'https://foobar'}
+
+
+@mock.patch("atomicapp.providers.lib.kubeshift.openshift.KubeBase")
+def test_create(mock_class):
+ # Mock the API class
+ mock_class.return_value = FakeClient()
+ mock_class.get_resources.return_value = ['Pod']
+ mock_class.kind_to_resource_name.return_value = 'Pod'
+
+ k8s_object = {"apiVersion": "v1", "kind": "Pod", "metadata": {"labels": {"app": "helloapache"}, "name": "helloapache"}, "spec": {
+ "containers": [{"image": "$image", "name": "helloapache", "ports": [{"containerPort": 80, "hostPort": 80, "protocol": "TCP"}]}]}}
+
+ a = KubeOpenshiftClient(config)
+ a.create(k8s_object, "foobar")
+
+
+@mock.patch("atomicapp.providers.lib.kubeshift.openshift.KubeBase")
+def test_delete(mock_class):
+ # Mock the API class
+ mock_class.return_value = FakeClient()
+ mock_class.kind_to_resource_name.return_value = 'Pod'
+
+ k8s_object = {"apiVersion": "v1", "kind": "Pod", "metadata": {"labels": {"app": "helloapache"}, "name": "helloapache"}, "spec": {
+ "containers": [{"image": "$image", "name": "helloapache", "ports": [{"containerPort": 80, "hostPort": 80, "protocol": "TCP"}]}]}}
+
+ a = KubeOpenshiftClient(config)
+ a.delete(k8s_object, "foobar")
+
+
+class FakeOpenshiftTemplateClient():
+
+ def __init__(self, *args):
+ pass
+
+ def test_connection(self, *args):
+ pass
+
+ def get_resources(self, *args):
+ return ['Pod', 'template']
+
+ def get_groups(self, *args):
+ return {}
+
+ def request(self, method, url, data=None):
+ openshift_object = {}
+ openshift_object['objects'] = [{"kind": "Service", "apiVersion": "v1", "metadata": {"name": "cakephp-mysql-example", "annotations": {"description": "Exposes and load balances the application pods"}}, "spec": {"ports": [{"name": "web", "port": 8080, "targetPort": 8080}], "selector": {"name": "cakephp-mysql-example"}}}]
+ return openshift_object, 200
+
+ @property
+ def cluster(self):
+ return {'server': 'https://foobar'}
+
+
+@mock.patch("atomicapp.providers.lib.kubeshift.openshift.KubeBase")
+def test_process_template(mock_class):
+ # Mock the API class
+ mock_class.return_value = FakeOpenshiftTemplateClient()
+ mock_class.kind_to_resource_name.return_value = 'template'
+
+ openshift_template = {"kind": "Template", "apiVersion": "v1", "metadata": {"name": "foobar"}, "objects": [{"kind": "Service", "apiVersion": "v1", "metadata": {"name": "cakephp-mysql-example", "annotations": {
+ "description": "Exposes and load balances the application pods"}}, "spec": {"ports": [{"name": "web", "port": 8080, "targetPort": 8080}], "selector": {"name": "cakephp-mysql-example"}}}]}
+
+ a = KubeOpenshiftClient(config)
+ a.create(openshift_template, "foobar")
+ a.delete(openshift_template, "foobar")
diff --git a/tests/units/providers/test_openshift_provider.py b/tests/units/providers/test_openshift_provider.py
deleted file mode 100644
index 31f6d2b5..00000000
--- a/tests/units/providers/test_openshift_provider.py
+++ /dev/null
@@ -1,227 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-Unittests for atomicapp/providers/openshift.py
-
-We test most functionalities of OpenshiftProvider by
-mocking out OpenshiftClient which interacts with
-the external world openshift and kubernetes API.
-"""
-
-import unittest
-import mock
-from atomicapp.providers.openshift import OpenshiftProvider
-from atomicapp.plugin import ProviderFailedException
-
-
-class OpenshiftProviderTestMixin(object):
-
- def setUp(self):
- # Patch OpenshiftClient to test OpenshiftProvider
- self.patcher = mock.patch('atomicapp.providers.openshift.OpenshiftClient')
- self.mock_OpenshiftClient = self.patcher.start()
- self.mock_oc = self.mock_OpenshiftClient()
-
- def get_oc_provider(self, dryrun=False, artifacts=[]):
- """
- Get OpenshiftProvider instance
- """
- op = OpenshiftProvider({}, '.', dryrun)
- op.artifacts = artifacts
- op.access_token = 'test'
- op.init()
- return op
-
- def tearDown(self):
- self.patcher.stop()
-
-
-class TestOpenshiftProviderDeploy(OpenshiftProviderTestMixin, unittest.TestCase):
- """
- Test OpenshiftProvider.run
- """
-
- def test_run(self):
- """
- Test calling OpenshiftClient.run from OpenshiftProvider.run
- """
- op = self.get_oc_provider()
- op.oapi_resources = ['foo']
- op.openshift_artifacts = {
- 'pods': [
- {
- 'metadata': {
- 'namespace': 'foo'
- }
- }
- ]
- }
-
- op.run()
-
- self.mock_oc.deploy.assert_called_once_with(
- 'namespaces/foo/pods/?access_token=test',
- op.openshift_artifacts['pods'][0])
-
- def test_run_dryrun(self):
- """
- Test running OpenshiftProvider.run as dryrun
- """
- op = self.get_oc_provider(dryrun=True)
- op.oapi_resources = ['foo']
- op.openshift_artifacts = {
- 'pods': [
- {
- 'metadata': {
- 'namespace': 'foo'
- }
- }
- ]
- }
-
- op.run()
-
- self.assertFalse(self.mock_oc.run.call_count)
-
-class TestOpenshiftProviderUnrun(OpenshiftProviderTestMixin, unittest.TestCase):
- """
- Test OpenshiftProvider.stop
- """
-
- def test_stop(self):
- """
- Test calling OpenshiftClient.delete from OpenshiftProvider.stop
- """
- op = self.get_oc_provider()
- op.oapi_resources = ['foo']
- op.openshift_artifacts = {
- 'pods': [
- {
- 'kind': 'Pod',
- 'metadata': {
- 'name': 'bar',
- 'namespace': 'foo'
- }
- }
- ]
- }
-
- op.stop()
-
- self.mock_oc.delete.assert_called_once_with(
- 'namespaces/foo/pods/%s?access_token=test' %
- op.openshift_artifacts['pods'][0]['metadata']['name'])
-
- def test_stop_dryrun(self):
- """
- Test running OpenshiftProvider.stop as dryrun
- """
- op = self.get_oc_provider(dryrun=True)
- op.oapi_resources = ['foo']
- op.openshift_artifacts = {
- 'pods': [
- {
- 'kind': 'Pod',
- 'metadata': {
- 'name': 'bar',
- 'namespace': 'foo'
- }
- }
- ]
- }
-
- op.stop()
-
- self.assertFalse(self.mock_oc.delete.call_count)
-
-class TestOpenshiftProviderProcessArtifactData(OpenshiftProviderTestMixin, unittest.TestCase):
- """
- Test processing Openshift artifact data
- """
-
- def test_process_artifact_data_non_template_kind(self):
- """
- Test processing non template artifact data
- """
- artifact_data = {
- 'kind': 'Pod',
- 'pods': [
- {
- 'metadata': {
- 'namespace': 'foo'
- }
- }
- ]
- }
- self.mock_oc.get_oapi_resources.return_value = ['pods']
-
- op = self.get_oc_provider()
-
- op._process_artifact_data('foo', artifact_data)
-
- self.assertEqual(op.openshift_artifacts,
- {'pod': [artifact_data]})
-
- def test_process_artifact_data_template_kind(self):
- """
- Test processing non template artifact data
- """
- artifact_data = {
- 'kind': 'Template',
- 'objects': [
- {
- 'kind': 'Pod',
- 'metadata': {
- 'namespace': 'foo'
- }
- },
- {
- 'kind': 'Service',
- 'metadata': {
- 'namespace': 'foo'
- }
- }
- ]
- }
- self.mock_oc.get_oapi_resources.return_value = ['templates']
- op = self.get_oc_provider()
- self.mock_oc.process_template.return_value = artifact_data['objects']
-
- op._process_artifact_data('foo', artifact_data)
-
- self.assertEqual(
- op.openshift_artifacts, {
- 'pod': [
- {'kind': 'Pod', 'metadata': {'namespace': 'foo'}}
- ],
- 'service': [
- {'kind': 'Service', 'metadata': {'namespace': 'foo'}}
- ]
- }
- )
-
- def test_process_artifact_data_error_resource_not_in_resources(self):
- """
- Test processing artifact data with kind not in resources
- """
- artifact_data = {
- 'kind': 'foobar'
- }
-
- op = self.get_oc_provider()
-
- self.assertRaises(
- ProviderFailedException,
- op._process_artifact_data, 'foo', artifact_data)
-
- def test_process_artifact_data_error_kind_key_missing(self):
- """
- Test processing artifact data with missing key 'kind'
- """
- artifact_data = {}
- op = self.get_oc_provider()
-
- self.assertRaises(
- ProviderFailedException,
- op._process_artifact_data, 'foo', artifact_data)
-
-