diff --git a/.dockerignore b/.dockerignore index bfdd2a5e..0bcc9f15 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,8 +1,8 @@ .git build.bash tests -requirements-dev.txt hooks scripts .* Dockerfile +Makefile diff --git a/.gitignore b/.gitignore index 78a05f56..60715866 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,7 @@ .idea templates/ __pycache__ +.pytest_cache *.pyc .env .cache diff --git a/Dockerfile.devkit b/Dockerfile.devkit new file mode 100644 index 00000000..332d792a --- /dev/null +++ b/Dockerfile.devkit @@ -0,0 +1,37 @@ +FROM debian:buster + +ENV LANG=C.UTF-8 + +RUN set -x \ + && apt-get update \ + && apt-get install -y --no-install-recommends \ + ca-certificates \ + docker.io \ + gcc \ + git \ + jq \ + libcurl4-openssl-dev \ + libssl-dev \ + python3-dev \ + python3-pip \ + python3-setuptools + +COPY requirements-dev.txt /marathon-lb/requirements-dev.txt +COPY requirements.txt /marathon-lb/requirements.txt + +# NOTE(jkoelker) dcos-e2e has a large list of strict requrements (== vs >=) +# that creates conflicts preventing the command line from +# running. By installing it in its own pip transaction, then +# allowing subsequent pip to use the existing requirements +# (no --upgrade or --force-reinstall) the command line is +# available +RUN set -x \ + && pip3 install \ + --no-cache \ + --upgrade \ + https://github.com/dcos/dcos-e2e/archive/2018.12.10.0.zip \ + && pip3 install \ + --no-cache \ + -r /marathon-lb/requirements-dev.txt + +CMD ["/bin/bash"] diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..0da812a6 --- /dev/null +++ b/Makefile @@ -0,0 +1,366 @@ +# +# To start a DCOS cluster with the defaults (OSS edition): +# make dcos +# +# To start a DCOS cluster with EE: +# DCOS_LICENSE_KEY_PATH=${HOME}/license.txt \ +# DCOS_E2E_VARIANT=enterprise \ +# make dcos +# +# By default the installers are kept in ./.cache which is removed +# during `make clean`. Specifying a location outside this repo for +# `DCOS_E2E_INSTALLERS_DIR` prevents redownloading: +# +# DCOS_E2E_INSTALLERS_DIR=${HOME}/dcos/installers make dcos +# +# To start a shell with the env pointing to a dcos cluster +# make cluster-env shell + +THIS_PATH := $(strip $(realpath $(dir $(realpath \ + $(lastword $(MAKEFILE_LIST)))))) + +# Default overridable varables +MLB_VERSION ?= $(shell git rev-parse --short HEAD || echo dev) +CONTAINTER_REPO ?= mesosphere-ci/marathon-lb +CONTAINER_TAG ?= $(MLB_VERSION) + +DEVKIT_CONTAINTER_REPO ?= mesosphere/marathon-lb-devkit +DEVKIT_CONTAINER_TAG ?= latest +DEVKIT_CONTAINER_NAME ?= marathon-lb-devkit + +DCOS_E2E_WORKSPACE_DIR ?= $(THIS_PATH)/.cache/dcos-e2e/workspace +DCOS_E2E_INSTALLERS_DIR ?= $(THIS_PATH)/.cache/dcos-e2e/installers +DCOS_E2E_CLUSTER_ID ?= marathon-lb-devkit +DCOS_E2E_CHANNEL ?= stable +DCOS_E2E_VERSION ?= 1.12.0 +DCOS_E2E_VARIANT ?= oss + +CLUSTER_URL ?= +PUBLIC_AGENT_IP ?= +DCOS_USERNAME ?= admin +DCOS_PASSWORD ?= admin +DCOS_VERSION ?= $(shell echo '$(DCOS_E2E_VERSION)' | cut -d. -f -2) + +DOCKER_SOCKET ?= /var/run/docker.sock +DOCKER_HUB_USERNAME ?= +DOCKER_HUB_PASSWORD ?= + +# Internal variables +MLB_PATH := $(THIS_PATH) +IMG := $(CONTAINTER_REPO):$(CONTAINER_TAG) +DEVKIT_IMG := $(DEVKIT_CONTAINTER_REPO):$(DEVKIT_CONTAINER_TAG) + + +MLB_CONTAINER_PATH := /marathon-lb +DEVKIT_VOL_ARGS := -v $(MLB_PATH):$(MLB_CONTAINER_PATH) +DOCKER_VOL_ARGS := -v $(DOCKER_SOCKET):/var/run/docker.sock + +DCOS_E2E_INSTALLER_VOL := \ + -v $(DCOS_E2E_INSTALLERS_DIR):$(DCOS_E2E_INSTALLERS_DIR) +DCOS_E2E_WORKSPACE_VOL := \ + -v $(DCOS_E2E_WORKSPACE_DIR):$(DCOS_E2E_WORKSPACE_DIR) +DCOS_E2E_VOL_ARGS := \ + $(DCOS_E2E_INSTALLER_VOL) \ + $(DCOS_E2E_WORKSPACE_VOL) + +#(TODO) Support overriding backend to use any backend +DCOS_E2E_BACKEND := docker +DCOS_E2E_NODE_TRANSPORT := docker-exec + + +JQ_FIND_CLUSTER_URL := jq -r -e '."Web UI" // empty' +JQ_FIND_PUBLIC_IP := jq -r -e '.Nodes.public_agents[0].ip_address // empty' + + +CLUSTER_ENV_ARGS = \ + --env CLUSTER_URL="$(CLUSTER_URL)" \ + --env PUBLIC_AGENT_IP="$(PUBLIC_AGENT_IP)" \ + --env DCOS_USERNAME="$(DCOS_USERNAME)" \ + --env DCOS_LOGIN_UNAME="$(DCOS_USERNAME)" \ + --env DCOS_PASSWORD="$(DCOS_PASSWORD)" \ + --env DCOS_LOGIN_PW="$(DCOS_PASSWORD)" \ + --env DCOS_VERSION="$(DCOS_VERSION)" \ + --env DCOS_E2E_BACKEND="$(DCOS_E2E_BACKEND)" \ + --env DCOS_E2E_NODE_TRANSPORT="$(DCOS_E2E_NODE_TRANSPORT)" \ + --env DCOS_E2E_CLUSTER_ID="$(DCOS_E2E_CLUSTER_ID)" \ + --env DCOS_E2E_VARIANT="$(DCOS_E2E_VARIANT)" \ + --env MARATHON_LB_IMAGE="$(IMG)" \ + --env MARATHON_LB_VERSION="$(MLB_VERSION)" + +CLUSTER_RUNNING := 0 +ifneq ($(strip $(shell docker ps -q -f 'name=$(DCOS_E2E_CLUSTER_ID)')),) + CLUSTER_RUNNING := 1 +endif + +ifeq ($(strip $(CLUSTER_URL)),) + CLUSTER_TARGET := dcos +endif + +ifeq ($(strip $(PUBLIC_AGENT_IP)),) + CLUSTER_TARGET := dcos +endif + +DCOS_E2E_DOWNLOAD_SITE := https://downloads.dcos.io/dcos + +ifeq ($(strip $(DCOS_E2E_VARIANT)), enterprise) + DCOS_E2E_DOWNLOAD_SITE := $(strip \ + https://downloads.mesosphere.com/dcos-enterprise) + DCOS_E2E_FILE_TAG := .ee + DCOS_LICENSE_KEY_PATH ?= + DCOS_E2E_VOL_ARGS := \ + $(DCOS_E2E_VOL_ARGS) \ + -v $(DCOS_LICENSE_KEY_PATH):$(DCOS_LICENSE_KEY_PATH) + DCOS_E2E_ENV_ARGS := \ + --env DCOS_LICENSE_KEY_PATH="$(DCOS_LICENSE_KEY_PATH)" +endif + +DCOS_E2E_FILE := $(DCOS_E2E_VERSION)$(DCOS_E2E_FILE_TAG).sh +DCOS_E2E_DOWNLOAD_URL := $(DCOS_E2E_DOWNLOAD_SITE)/$(DCOS_E2E_CHANNEL) +DCOS_E2E_DOWNLOAD_URL := $(DCOS_E2E_DOWNLOAD_URL)/$(DCOS_E2E_VERSION) +DCOS_E2E_DOWNLOAD_URL := $(strip \ + $(DCOS_E2E_DOWNLOAD_URL)/dcos_generate_config$(DCOS_E2E_FILE_TAG).sh) + + +.DEFAULT_GOAL := help + + +.PHONY: help +help: + @echo "Targets: " + @echo " clean" + @echo " Remove all artifacts/files/containers" + @echo "" + @echo " image, devkit" + @echo " Build the marathon-lb/devkit images" + @echo "" + @echo " dcos" + @echo " Start a dcos cluster with dcos-e2e" + @echo "" + @echo " shell" + @echo " Run /bin/bash in a devkit container" + @echo "" + @echo " test" + @echo " Run unit/integration tests" + + +.PHONY: clean-dcos-container +clean-dcos-container: +ifeq ($(CLUSTER_RUNNING), 1) + @echo "+ Cleaning up DCOS cluster-id $(DCOS_E2E_CLUSTER_ID)" + -@docker run \ + --rm \ + --tty \ + --interactive \ + $(DEVKIT_VOL_ARGS) \ + $(DOCKER_VOL_ARGS) \ + $(DCOS_E2E_VOL_ARGS) \ + $(DEVKIT_IMG) \ + minidcos \ + $(DCOS_E2E_BACKEND) \ + destroy --cluster-id $(DCOS_E2E_CLUSTER_ID) +endif + +.PHONY: clean-devkit-container +clean-devkit-container: +ifneq ($(strip $(shell docker ps -q -f 'name=$(DEVKIT_CONTAINER_NAME)')),) + @echo "+ Cleaning up $(DEVKIT_CONTAINER_NAME) container" + -@docker rm \ + --force \ + --volumes \ + $(DEVKIT_CONTAINER_NAME) > /dev/null 2>&1 || true +endif + + +.PHONY: clean +clean: clean-devkit-container \ + clean-dcos-container + @echo "+ Remove files left behind" + @find . -type f -name '*.pyc' -delete + @find . -type f -name '.coverage.*' -delete + @find . -name ".pytest_cache" -type d -prune -exec rm -r "{}" \; + @find . -name "__pycache__" -type d -prune -exec rm -r "{}" \; + @rm -rf .cache + + +.PHONY: cluster-env +cluster-env: cluster-url cluster-public-ip + + +.PHONY: cluster-public-ip +cluster-public-ip: devkit $(CLUSTER_TARGET) +ifeq ($(strip $(PUBLIC_AGENT_IP)),) + @echo "+ Discovering Public Node IP" + $(eval PUBLIC_AGENT_IP := $(shell \ + docker run \ + --rm \ + --tty \ + --interactive \ + $(DEVKIT_VOL_ARGS) \ + $(DOCKER_VOL_ARGS) \ + $(DCOS_E2E_VOL_ARGS) \ + $(DEVKIT_IMG) \ + minidcos \ + $(DCOS_E2E_BACKEND) \ + inspect --cluster-id $(DCOS_E2E_CLUSTER_ID) \ + | $(JQ_FIND_PUBLIC_IP))) +endif + @echo "+ Public Node IP: $(PUBLIC_AGENT_IP)" + + +.PHONY: cluster-url +cluster-url: devkit $(CLUSTER_TARGET) +ifeq ($(strip $(CLUSTER_URL)),) + @echo "+ Discovering Cluster URL" + $(eval CLUSTER_URL := $(shell \ + docker run \ + --rm \ + --tty \ + --interactive \ + $(DEVKIT_VOL_ARGS) \ + $(DOCKER_VOL_ARGS) \ + $(DCOS_E2E_VOL_ARGS) \ + $(DEVKIT_IMG) \ + minidcos \ + $(DCOS_E2E_BACKEND) \ + inspect --cluster-id $(DCOS_E2E_CLUSTER_ID) \ + | $(JQ_FIND_CLUSTER_URL))) +endif + @echo "+ Cluster URL: $(CLUSTER_URL)" + + +.PHONY: devkit +devkit: image + @echo "+ Build devkit image $(DEVKIT_IMG)" + @docker build \ + --rm \ + --quiet \ + --force-rm \ + --file $(MLB_PATH)/Dockerfile.devkit \ + --tag $(DEVKIT_IMG) \ + $(MLB_PATH) > /dev/null 2>&1 + + +$(DCOS_E2E_WORKSPACE_DIR): + @echo "+ Creating DCOS E2E Workspace" + @mkdir -p $(DCOS_E2E_WORKSPACE_DIR) + + +$(DCOS_E2E_INSTALLERS_DIR): + @echo "+ Creating DCOS E2E Installer Cache" + @mkdir -p $(DCOS_E2E_WORKSPACE_DIR) + + +$(DCOS_E2E_INSTALLERS_DIR)/$(DCOS_E2E_FILE): $(DCOS_E2E_INSTALLERS_DIR) +ifeq ($(strip $(DCOS_E2E_VERSION)), master) + @echo '+ Removing existing master installer' + -@rm -f $@.tmp $@ +endif + @echo "+ Downloading $(DCOS_E2E_VERSION)$(DCOS_E2E_FILE_TAG) installer" + @curl \ + --show-error \ + --location \ + --fail \ + --continue-at - \ + --output $@.tmp \ + $(DCOS_E2E_DOWNLOAD_URL) \ + && mv -f $@.tmp $@ 2>/dev/null \ + && touch $@ + + +.PHONY: dcos +dcos: devkit \ + $(DCOS_E2E_WORKSPACE_DIR) \ + $(DCOS_E2E_INSTALLERS_DIR)/$(DCOS_E2E_FILE) +ifeq ($(CLUSTER_RUNNING), 0) + @echo "+ Starting DCOS $(DCOS_E2E_VARIANT)" \ + "cluster: $(DCOS_E2E_CLUSTER_ID)" + @docker run \ + --rm \ + --tty \ + $(DEVKIT_VOL_ARGS) \ + $(DOCKER_VOL_ARGS) \ + $(DCOS_E2E_VOL_ARGS) \ + $(DCOS_E2E_ENV_ARGS) \ + $(DEVKIT_IMG) \ + minidcos \ + $(DCOS_E2E_BACKEND) \ + create \ + --cluster-id $(DCOS_E2E_CLUSTER_ID) \ + --workspace-dir $(DCOS_E2E_WORKSPACE_DIR) \ + --variant $(DCOS_E2E_VARIANT) \ + --wait-for-dcos \ + $(DCOS_E2E_INSTALLERS_DIR)/$(DCOS_E2E_FILE) +endif + + +.PHONY: image +image: + @echo "+ Build container image $(IMG)" + @docker build \ + --rm \ + --quiet \ + --force-rm \ + --file $(MLB_PATH)/Dockerfile \ + --tag $(IMG) \ + $(MLB_PATH) > /dev/null 2>&1 || true + + +.PHONY: image-push +image-push: image + @echo "+ Pushing image to hub" + docker push $(IMG) + + +.PHONY: shell +shell: devkit + @echo "+ Running $(DEVKIT_IMG) container" + -@docker run \ + --rm \ + --tty \ + --interactive \ + $(CLUSTER_ENV_ARGS) \ + $(DEVKIT_VOL_ARGS) \ + $(DOCKER_VOL_ARGS) \ + $(DCOS_E2E_VOL_ARGS) \ + $(DCOS_E2E_ENV_ARGS) \ + $(DEVKIT_IMG) \ + /bin/bash -l || true + + +.PHONY: test-integration +test-integration: image-push devkit cluster-url cluster-public-ip + @echo "+ Integration Testng with image $(IMG)" + @docker run \ + --rm \ + --tty \ + $(CLUSTER_ENV_ARGS) \ + $(DEVKIT_VOL_ARGS) \ + $(DOCKER_VOL_ARGS) \ + $(DCOS_E2E_VOL_ARGS) \ + $(DCOS_E2E_ENV_ARGS) \ + $(DEVKIT_IMG) \ + /bin/bash -c " \ + cd $(MLB_CONTAINER_PATH)/ci \ + && pytest -p no:warnings -v test_marathon_lb_dcos_e2e.py \ + " + +.PHONY: test-unit +test-unit: devkit + @echo "+ Unit Testing with image $(DEVKIT_IMG)" + @docker run \ + --rm \ + --tty \ + $(DEVKIT_VOL_ARGS) \ + $(DEVKIT_IMG) \ + /bin/bash -c " \ + echo -n 'flake8...' \ + && flake8 $(MLB_CONTAINER_PATH) \ + && echo ' OK' \ + && echo -n 'nosetests' \ + && cd $(MLB_CONTAINER_PATH) \ + && nosetests --with-coverage --cover-package=. \ + " + + +.PHONY: test +test: test-unit test-integration diff --git a/README.md b/README.md index 3cb9a486..f679963a 100644 --- a/README.md +++ b/README.md @@ -394,6 +394,54 @@ PRs are welcome, but here are a few general guidelines: bash /path/to/marathon-lb/scripts/install-git-hooks.sh ``` +### Using the Makefile and docker for developement and testing + +Running unit and integration tests is automated as `make` targets. Docker +is required to use the targets as it will run all tests in containers. + +Several environment variables can be set to control the image tags, +DCOS version/variant, etc. Check the top of the `Makefile` for more info. + +To run the unit tests: + +```bash +make test-unit +``` + +To run the integration tests a DCOS installation will be started via +[dcos-e2e](https://github.com/dcos/dcos-e2e). The installation of +`dcos-e2e` and management of the cluster will all be done in docker +containers. Since the installers are rather large downloads, it is +benificial to specify a value for `DCOS_E2E_INSTALLERS_DIR`. By default +`DCOS_E2E_INSTALLERS_DIR` is inside the `.cache` directory that will be +removed upon `make clean`. You must provide a repository for the +resultant docker image to be pushed to via the `CONTAINTER_REPO` +environemnt variable. It is assumed that the local docker is already +logged in and the image will be pushed prior to launching the cluster. + +To run the integration tests on the OSS variant of DCOS: + +```bash +DCOS_E2E_INSTALLERS_DIR="${HOME}/dcos/installers" \ +CONTAINTER_REPO="my_docker_user/my-marathon-lb-repo" make test-integration +``` + +To run the integration tests on the ENTERPRISE variant of DCOS: + + +```bash +DCOS_LICENSE_KEY_PATH=${HOME}/license.txt \ +DCOS_E2E_VARIANT=enterprise \ +DCOS_E2E_INSTALLERS_DIR="${HOME}/dcos/installers"\ +CONTAINTER_REPO="my_docker_user/my-marathon-lb-repo" make test-integration +``` + +To run both unit and integration tests (add appropriate variables): + +```bash +CONTAINTER_REPO="my_docker_user/my-marathon-lb-repo" make test +``` + ### Troubleshooting your development environment setup #### FileNotFoundError: [Errno 2] No such file or directory: 'curl-config' diff --git a/ci/test_marathon_lb_dcos_e2e.py b/ci/test_marathon_lb_dcos_e2e.py new file mode 100644 index 00000000..7ba31e64 --- /dev/null +++ b/ci/test_marathon_lb_dcos_e2e.py @@ -0,0 +1,515 @@ +#!python3 + +import contextlib +import json +import logging +import os + +from cryptography.hazmat.backends import default_backend +from cryptography.hazmat.primitives import serialization +from cryptography.hazmat.primitives.asymmetric import rsa + +from dcos_e2e import cluster +from dcos_e2e import node +from dcos_test_utils import helpers as dcos_helpers +from dcos_test_utils import iam as dcos_iam +from dcos_test_utils import enterprise as dcos_ee_api +from dcos_test_utils import dcos_api +from dcos_test_utils import package + +import dcos_installer_tools +import pytest + +import test_marathon_lb + + +DCOS_E2E_BACKEND = 'DCOS_E2E_BACKEND' +DCOS_E2E_CLUSTER_ID = 'DCOS_E2E_CLUSTER_ID' +DCOS_E2E_NODE_TRANSPORT = 'DCOS_E2E_NODE_TRANSPORT' +DCOS_LOGIN_UNAME = 'DCOS_LOGIN_UNAME' +DCOS_LOGIN_PW = 'DCOS_LOGIN_PW' + +BACKEND_AWS = 'aws' +BACKEND_DOCKER = 'docker' +BACKEND_VAGRANT = 'vagrant' + +MARATHON_LB_IMAGE = os.environ.get('MARATHON_LB_IMAGE', + 'marathon-lb:latest') +MARATHON_LB_VERSION = os.environ.get('MARATHON_LB_VERSION', + 'dev') + +OSS = 'oss' +ENTERPRISE = 'enterprise' +VARIANTS = {OSS: dcos_installer_tools.DCOSVariant.OSS, + ENTERPRISE: dcos_installer_tools.DCOSVariant.ENTERPRISE} +VARIANT_VALUES = dict((value.value, value) for value in VARIANTS.values()) + + +logging.captureWarnings(True) + + +# NOTE(jkoelker) Define some helpers that should eventually be upstreamed +class Package(package.Cosmos): + def render(self, name, options=None, version=None): + params = {'packageName': name} + + if version: + params['packageVersion'] = version + + if options: + params['options'] = options + + self._update_headers('render', + request_version=1, + response_version=1) + return self._post('/render', params).json().get('marathonJson') + + +class Secrets(dcos_helpers.ApiClientSession): + def __init__(self, default_url: dcos_helpers.Url, session=None): + super().__init__(default_url) + if session: + self.session = session + + def list_stores(self): + r = self.get('/store') + r.raise_for_status() + return r.json()['array'] + + def list_secrets(self, store, path='/'): + params = {'list': True} + r = self.get(self.secret_uri(store, path), params=params) + r.raise_for_status() + return r.json()['array'] + + def create_secret(self, path, value, store='default'): + headers = None + data = None + + if not isinstance(value, (str, bytes)): + value = json.dumps(value, + sort_keys=True, + indent=None, + ensure_ascii=False, + separators=(',', ':')) + + json_value = {'value': value} + + if isinstance(value, bytes): + headers = {'Content-Type': 'application/octet-stream'} + data = value + json_value = None + + return self.put(self.secret_uri(store, path), + json=json_value, + data=data, + headers=headers) + + def delete_secret(self, path, store='default'): + return self.delete(self.secret_uri(store, path)) + + @staticmethod + def secret_uri(store, path): + if not path.startswith('/'): + path = '/' + path + return '/secret/{}{}'.format(store, path) + + +def add_user_to_group(self, user, group): + return self.put('/groups/{}/users/{}'.format(group, user)) + + +def delete_user_from_group(self, user, group): + if not self.user_in_group(user, group): + return + + return self.delete('/groups/{}/users/{}'.format(group, user)) + + +def list_group_users(self, group): + r = self.get('/groups/{}/users'.format(group)) + r.raise_for_status() + return r.json()['array'] + + +def user_in_group(self, user, group): + return user in [a['user']['uid'] + for a in self.list_group_users(group)] + + +# NOTE(jkoelker) Monkey patch in our helpers +dcos_api.DcosApiSession.package = property( + lambda s: Package(default_url=s.default_url.copy(path='package'), + session=s.copy().session)) +dcos_api.DcosApiSession.secrets = property( + lambda s: Secrets( + default_url=s.default_url.copy(path='secrets/v1'), + session=s.copy().session)) +dcos_ee_api.EnterpriseApiSession.secrets = property( + lambda s: Secrets( + default_url=s.default_url.copy(path='secrets/v1'), + session=s.copy().session)) +dcos_iam.Iam.add_user_to_group = add_user_to_group +dcos_iam.Iam.delete_user_from_group = delete_user_from_group +dcos_iam.Iam.list_group_users = list_group_users +dcos_iam.Iam.user_in_group = user_in_group + + +class Cluster(cluster.Cluster): + _USER_ZKCLI_CMD = ( + '.', + '/opt/mesosphere/environment.export', + '&&', + 'zkCli.sh', + '-server', + '"zk-1.zk:2181,zk-2.zk:2181,zk-3.zk:2181,zk-4.zk:2181,' + 'zk-5.zk:2181"' + ) + _USER_OSS_EMAIL = 'albert@bekstil.net' + _USER_OSS_ZK_PATH = '/dcos/users/{}'.format(_USER_OSS_EMAIL) + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._variant = dcos_installer_tools.DCOSVariant.OSS + + @property + def _any_master(self): + return next(iter(self.masters)) + + def _any_master_run(self, cmd, *args, **kwargs): + return self._any_master.run(list(cmd), *args, **kwargs) + + @property + def _oss_user_exists(self): + cmd = self._USER_ZKCLI_CMD + ('get', + self._USER_OSS_ZK_PATH) + output = self._any_master_run(cmd, shell=True) + stdout = output.stdout.decode() + + if stdout.strip().split('\n')[-1] == self._USER_OSS_EMAIL: + return True + + return False + + def _create_oss_user(self): + if self._oss_user_exists: + return + + cmd = self._USER_ZKCLI_CMD + ('create', + self._USER_OSS_ZK_PATH, + self._USER_OSS_EMAIL) + self._any_master_run(cmd, shell=True) + + def _delete_oss_user(self): + cmd = self._USER_ZKCLI_CMD + ('delete', self._USER_OSS_ZK_PATH) + self._any_master_run(cmd, shell=True) + + def _enterprise_session(self): + cmd = ('cat', '/opt/mesosphere/etc/bootstrap-config.json') + config_result = self._any_master_run(cmd) + config = json.loads(config_result.stdout.decode()) + ssl_enabled = config['ssl_enabled'] + + scheme = 'https://' if ssl_enabled else 'http://' + dcos_url = scheme + str(self._any_master.public_ip_address) + api = dcos_ee_api.EnterpriseApiSession( + dcos_url=dcos_url, + masters=[str(n.public_ip_address) for n in self.masters], + slaves=[str(n.public_ip_address) for n in self.agents], + public_slaves=[ + str(n.public_ip_address) for n in self.public_agents + ], + auth_user=dcos_api.DcosUser(credentials=self.credentials), + ) + + if api.ssl_enabled: + api.set_ca_cert() + api.login_default_user() + api.set_initial_resource_ids() + + return api + + def _oss_session(self): + api = dcos_api.DcosApiSession( + dcos_url='http://{}'.format(self._any_master.public_ip_address), + masters=[str(n.public_ip_address) for n in self.masters], + slaves=[str(n.public_ip_address) for n in self.agents], + public_slaves=[ + str(n.public_ip_address) for n in self.public_agents + ], + auth_user=dcos_api.DcosUser(credentials=self.credentials), + ) + + api.login_default_user() + return api + + def _session(self): + if self.enterprise: + return self._enterprise_session() + + return self._oss_session() + + @property + def credentials(self): + if self.enterprise: + return { + 'uid': os.environ.get(DCOS_LOGIN_UNAME, 'admin'), + 'password': os.environ.get(DCOS_LOGIN_PW, 'admin') + } + + return dcos_helpers.CI_CREDENTIALS + + @property + def enterprise(self): + return self._variant == dcos_installer_tools.DCOSVariant.ENTERPRISE + + @property + def oss(self): + return self._variant == dcos_installer_tools.DCOSVariant.OSS + + @property + def variant(self): + return self._variant + + @variant.setter + def variant(self, value): + # NOTE(jkoelker) Hack becuase enums from vendored libraries + # are technically different + if hasattr(value, 'value') and value.value in VARIANT_VALUES: + value = VARIANT_VALUES[value.value] + + if value in VARIANTS: + value = VARIANTS[value] + + if value not in dcos_installer_tools.DCOSVariant: + msg = 'Expected one of {} or {} got {}' + raise ValueError(msg.format(tuple(VARIANTS.keys()), + dcos_installer_tools.DCOSVariant, + value)) + + self._variant = value + + def create_user(self): + if self.enterprise: + return + + self._create_oss_user() + + def delete_user(self): + if self.enterprise: + return + + self._delete_oss_user() + + def create_service_account(self, name, secret, description=None, + superuser=False): + if not self.enterprise: + return + + if description is None: + description = '{} service account'.format(name) + + key = rsa.generate_private_key( + public_exponent=65537, + key_size=2048, + backend=default_backend()) + + priv = key.private_bytes( + encoding=serialization.Encoding.PEM, + format=serialization.PrivateFormat.PKCS8, + encryption_algorithm=serialization.NoEncryption()) + + pub = key.public_key().public_bytes( + encoding=serialization.Encoding.PEM, + format=serialization.PublicFormat.SubjectPublicKeyInfo) + + priv = priv.decode('ascii') + pub = pub.decode('ascii') + + with self.session as session: + iam = session.iam + try: + iam.create_service(name, pub, description) + except AssertionError: + iam.delete_service(name) + iam.create_service(name, pub, description) + + if superuser: + iam.add_user_to_group(name, 'superusers') + + login_endpoint = 'https://leader.mesos/{}/auth/login' + + # NOTE(jkoelker) override the login_endpoint to force it to + # use `leader.mesos` by default it is set + # to the dcos_url the sesion is created with + sa_creds = iam.make_service_account_credentials(name, priv) + sa_creds['login_endpoint'] = login_endpoint.format( + iam.default_url.path) + secret_ret = session.secrets.create_secret(secret, sa_creds) + if secret_ret.status_code != 201: + session.secrets.delete_secret(secret, store='default') + session.secrets.create_secret(secret, sa_creds) + + def delete_service_account(self, name, secret): + if not self.enterprise: + return + + with self.session as session: + iam = session.iam + iam.delete_user_from_group(name, 'superusers') + session.secrets.delete_secret(secret, store='default') + iam.delete_service(name) + + @contextlib.contextmanager + def service_account(self, name, secret, description=None, + superuser=False): + try: + yield self.create_service_account(name, + secret, + description, + superuser) + finally: + self.delete_service_account(name, secret) + + @property + @contextlib.contextmanager + def session(self): + with self.user: + yield self._session() + + @property + @contextlib.contextmanager + def user(self): + try: + yield self.create_user() + finally: + self.delete_user() + + +def get_docker_cluster(cluster_id, transport, **kwargs): + from dcos_e2e_cli.dcos_docker.commands import _common + + if cluster_id not in _common.existing_cluster_ids(): + return None + + cluster_containers = _common.ClusterContainers(cluster_id, transport) + cluster = Cluster.from_nodes( + masters=set(map(cluster_containers.to_node, + cluster_containers.masters)), + agents=set(map(cluster_containers.to_node, + cluster_containers.agents)), + public_agents=set(map(cluster_containers.to_node, + cluster_containers.public_agents))) + + cluster.variant = cluster_containers.dcos_variant + + return cluster + + +def get_cluster(): + backend = os.environ.get(DCOS_E2E_BACKEND, BACKEND_DOCKER) + cluster_id = os.environ.get(DCOS_E2E_CLUSTER_ID, 'default') + + if backend == BACKEND_AWS: + return None + + if backend == BACKEND_VAGRANT: + return None + + transport = os.environ.get(DCOS_E2E_NODE_TRANSPORT, 'docker-exec') + + if transport == 'ssh': + transport = node.Transport.SSH + else: + transport = node.Transport.DOCKER_EXEC + + return get_docker_cluster(cluster_id, transport) + + +@pytest.fixture(scope='session') +def dcos_marathon_lb_session(): + '''Fixture to return `cluster.session` after deploying `marathon-lb`''' + cluster = get_cluster() + + with cluster.session as session: + options = { + 'marathon-lb': { + 'sysctl-params': ' '.join( + ['net.ipv4.tcp_fin_timeout=30', + 'net.core.somaxconn=10000']), + } + } + + if cluster.enterprise: + options['marathon-lb'].update({ + 'secret_name': 'mlb-secret', + 'marathon-uri': 'https://master.mesos:8443', + 'strict-mode': True + }) + + with cluster.service_account('mlb-principal', + 'mlb-secret', + superuser=True): + app = session.package.render('marathon-lb', options=options) + app['container']['docker']['image'] = MARATHON_LB_IMAGE + app['labels']['DCOS_PACKAGE_VERSION'] = MARATHON_LB_VERSION + + with session.marathon.deploy_and_cleanup(app): + yield session + + +@pytest.fixture(scope='session') +def agent_public_ip(dcos_marathon_lb_session): + '''Fixture to return the first public agents ip address''' + return dcos_marathon_lb_session.public_slaves[0] + + +@pytest.fixture(scope='session') +def dcos_version(dcos_marathon_lb_session): + '''Fixture to return the first dcos version''' + return dcos_marathon_lb_session.get_version() + + +@pytest.fixture(scope='session', + params=(['backends/' + f + for f in os.listdir('backends')] + + ['backends_1.9/' + f + for f in os.listdir('backends_1.9')])) +def backend_app(request, dcos_version): + if dcos_version.startswith('1.9.'): + if not request.param.startswith('backends_1.9/'): + return pytest.skip('Not a 1.9 backend') + return test_marathon_lb.get_json(request.param) + + if request.param.startswith('backends_1.9/'): + return pytest.skip('Not a 1.9 cluster') + + return test_marathon_lb.get_json(request.param) + + +@pytest.fixture(scope='session') +def app_deployment(dcos_marathon_lb_session, backend_app): + session = dcos_marathon_lb_session + with session.marathon.deploy_and_cleanup(backend_app, + check_health=False): + app_id = backend_app['id'] + backend_app['name'] = app_id[1:] if app_id[0] == '/' else app_id + yield backend_app + + +@pytest.fixture(scope='session') +def app_port(app_deployment, agent_public_ip): + return test_marathon_lb.get_app_port(app_deployment['name'], + agent_public_ip) + + +def test_port(app_deployment, app_port): + assert app_port == app_deployment["labels"]["HAPROXY_0_PORT"] + + +def test_response(app_deployment, app_port, agent_public_ip): + (response, + status_code) = test_marathon_lb.get_app_content(app_port, + agent_public_ip) + assert status_code == 200 + assert response == app_deployment['name'] diff --git a/requirements-dev.txt b/requirements-dev.txt index b3d1e515..4175128d 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -3,4 +3,10 @@ coverage flake8 mock nose -pytest==3.5.1 +pytest +retrying +dcos-shakedown +cryptography +https://github.com/dcos/dcos-e2e/archive/2018.12.10.0.zip +https://github.com/dcos/dcos-test-utils/archive/master.zip +https://github.com/mesosphere/dcos-installer-tools/archive/master.zip