Skip to content

Commit

Permalink
Support kind for integration testing (#2057)
Browse files Browse the repository at this point in the history
* Support kind for integration testing

* [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci

* upgrade pre-commit hooks

---------

Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
  • Loading branch information
dalazx and pre-commit-ci[bot] authored Mar 9, 2023
1 parent 3696a7f commit d5f225f
Show file tree
Hide file tree
Showing 15 changed files with 89 additions and 71 deletions.
6 changes: 5 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,11 @@ pip install -e .[dev]
```shell
pytest -vv tests/unit
```
8. Run the integration test suite:
9. Run the integration test suite:
```shell
pytest -vv tests/integration
```

### Debugging Tests
Pass `-x -s --log-cli-level=DEBUG` to your `pytest` commands for better debugging
experience.
15 changes: 9 additions & 6 deletions k8s.mk
Original file line number Diff line number Diff line change
@@ -1,11 +1,14 @@
K8S_DIND_CLUSTER_CMD := tests/k8s/dind-cluster-v1.10.sh

$(K8S_DIND_CLUSTER_CMD):
tests/bin/kind:
mkdir -p $(@D)
curl -Lo $@ https://cdn.rawgit.com/Mirantis/kubeadm-dind-cluster/master/fixed/dind-cluster-v1.10.sh
chmod u+x $@
# for Intel Macs
[ $$(uname -m) = x86_64 ] && curl -Lo $@ https://kind.sigs.k8s.io/dl/v0.17.0/kind-darwin-amd64 || :
# for M1 / ARM Macs
[ $$(uname -m) = arm64 ] && curl -Lo $@ https://kind.sigs.k8s.io/dl/v0.17.0/kind-darwin-arm64 || :
chmod +x $@

# tests/bin/kind create cluster --name neuro --wait 5m
# kubectl cluster-info --context kind-neuro

# K8S_CLUSTER_CMD := $(K8S_DIND_CLUSTER_CMD)
K8S_CLUSTER_CMD := tests/k8s/cluster.sh

install_k8s:
Expand Down
1 change: 0 additions & 1 deletion platform_api/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -294,7 +294,6 @@ async def create_app(

async def _init_app(app: aiohttp.web.Application) -> AsyncIterator[None]:
async with AsyncExitStack() as exit_stack:

logger.info("Initializing Auth client")
auth_client = await exit_stack.enter_async_context(
AuthClient(
Expand Down
2 changes: 1 addition & 1 deletion platform_api/orchestrator/kube_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -968,7 +968,7 @@ def _process_disk_volumes(
pod_volumes = []
volume_mounts = []

pvc_volumes: dict[str, PVCDiskVolume] = dict()
pvc_volumes: dict[str, PVCDiskVolume] = {}
for index, disk_volume in enumerate(disk_volumes, 1):
pvc_volume = pvc_volumes.get(disk_volume.disk.disk_id)
if pvc_volume is None:
Expand Down
4 changes: 2 additions & 2 deletions platform_api/poller_main.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,6 @@ async def create_app(

async def _init_app(app: aiohttp.web.Application) -> AsyncIterator[None]:
async with AsyncExitStack() as exit_stack:

logger.info("Initializing AuthClient")
auth_client = await exit_stack.enter_async_context(
AuthClient(
Expand Down Expand Up @@ -144,6 +143,8 @@ async def _init_app(app: aiohttp.web.Application) -> AsyncIterator[None]:
cluster_holder=cluster_holder,
cluster_name=config.cluster_name,
)
if cluster:
cluster_updater.disable_updates_for_test = True

logger.info("Initializing JobsPoller")
jobs_poller = JobsPoller(
Expand All @@ -153,7 +154,6 @@ async def _init_app(app: aiohttp.web.Application) -> AsyncIterator[None]:

if cluster:
await cluster_holder.update(cluster)
cluster_updater.disable_updates_for_test = True
else:
await cluster_updater.do_update()

Expand Down
13 changes: 11 additions & 2 deletions tests/integration/admin.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@

from platform_api.config import AuthConfig

from tests.integration.conftest import ApiRunner
from tests.integration.conftest import ApiRunner, _TestConfigClient
from tests.integration.notifications import NotificationsServer


Expand Down Expand Up @@ -83,6 +83,16 @@ async def delete_storage(request: aiohttp.web.Request) -> aiohttp.web.Response:
await runner.close()


@pytest.fixture
async def config_client(fake_config_app: URL) -> AsyncIterator[_TestConfigClient]:
client = _TestConfigClient(
base_url=fake_config_app,
service_token="token",
)
async with client:
yield client


@pytest.fixture(scope="session")
def admin_server_image_name() -> str:
with open("PLATFORMADMIN_IMAGE") as f:
Expand Down Expand Up @@ -241,7 +251,6 @@ async def admin_client(
admin_url: URL, auth_config: AuthConfig
) -> AsyncGenerator[AdminClient, None]:
async with create_admin_client(admin_url, auth_config) as client:

yield client


Expand Down
1 change: 0 additions & 1 deletion tests/integration/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -228,7 +228,6 @@ async def long_polling_by_job_id(
unreachable_optimization: bool = True,
headers: Optional[dict[str, str]] = None,
) -> dict[str, Any]:

# A little optimization with unreachable statuses
unreachable_statuses_map: dict[str, list[str]] = {
JobStatus.PENDING.value: [
Expand Down
8 changes: 8 additions & 0 deletions tests/integration/auth.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
from platform_api.config import AuthConfig, OAuthConfig

from tests.conftest import random_str
from tests.integration.conftest import _TestConfigClient


@pytest.fixture(scope="session")
Expand Down Expand Up @@ -173,6 +174,7 @@ async def __call__(
@pytest.fixture
async def regular_user_factory(
auth_client: AuthClient,
config_client: _TestConfigClient,
admin_client: AdminClient,
token_factory: Callable[[str], str],
admin_token: str,
Expand Down Expand Up @@ -201,6 +203,12 @@ async def _factory(
await admin_client.create_cluster(cluster)
except ClientResponseError:
pass
try:
# in case docker containers are reused, we want to recreate clusters
# that were previously stored in memory
await config_client.create_cluster(name=cluster)
except ClientResponseError:
pass
if org_name is not None:
try:
await admin_client.create_org(org_name)
Expand Down
45 changes: 38 additions & 7 deletions tests/integration/conftest.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import asyncio
import base64
import json
import uuid
from collections.abc import AsyncIterator, Awaitable, Callable, Iterator, Mapping
Expand Down Expand Up @@ -39,6 +40,7 @@
ServerConfig,
StorageConfig,
)
from platform_api.config_client import ConfigClient
from platform_api.orchestrator.job_request import JobNotFoundException
from platform_api.orchestrator.kube_client import (
AlreadyExistsException,
Expand Down Expand Up @@ -75,7 +77,13 @@ def event_loop() -> Iterator[asyncio.AbstractEventLoop]:
@pytest.fixture(scope="session")
async def kube_config_payload() -> dict[str, Any]:
process = await asyncio.create_subprocess_exec(
"kubectl", "config", "view", "-o", "json", stdout=asyncio.subprocess.PIPE
"kubectl",
"config",
"view",
"--raw",
"-o",
"json",
stdout=asyncio.subprocess.PIPE,
)
output, _ = await process.communicate()
payload_str = output.decode().rstrip()
Expand All @@ -96,17 +104,35 @@ async def kube_config_cluster_payload(kube_config_payload: dict[str, Any]) -> An
def cert_authority_data_pem(
kube_config_cluster_payload: dict[str, Any]
) -> Optional[str]:
ca_path = kube_config_cluster_payload["certificate-authority"]
if ca_path:
return Path(ca_path).read_text()
if "certificate-authority" in kube_config_cluster_payload:
ca_path = kube_config_cluster_payload["certificate-authority"]
if ca_path:
return Path(ca_path).read_text()
elif "certificate-authority-data" in kube_config_cluster_payload:
return base64.b64decode(
kube_config_cluster_payload["certificate-authority-data"]
).decode()
return None


@pytest.fixture(scope="session")
async def kube_config_user_payload(kube_config_payload: dict[str, Any]) -> Any:
import tempfile

user_name = "minikube"
users = {user["name"]: user["user"] for user in kube_config_payload["users"]}
return users[user_name]
user = users[user_name]
if "client-certificate-data" in user:
with tempfile.NamedTemporaryFile(delete=False) as cert_file:
cert_file.write(base64.b64decode(user["client-certificate-data"]))
cert_file.flush()
user["client-certificate"] = cert_file.name
if "client-key-data" in user:
with tempfile.NamedTemporaryFile(delete=False) as key_file:
key_file.write(base64.b64decode(user["client-key-data"]))
key_file.flush()
user["client-key"] = key_file.name
return user


@pytest.fixture(scope="session")
Expand Down Expand Up @@ -530,7 +556,6 @@ async def create_failed_attach_volume_event(self, pod_id: str) -> None:
@pytest.fixture(scope="session")
async def kube_client_factory(kube_config: KubeConfig) -> Callable[..., MyKubeClient]:
def _f(custom_kube_config: Optional[KubeConfig] = None) -> MyKubeClient:

config = custom_kube_config or kube_config
kube_client = MyKubeClient(
base_url=config.endpoint_url,
Expand Down Expand Up @@ -922,4 +947,10 @@ async def close(self) -> None:

@property
def closed(self) -> bool:
return not bool(self._task)
return not self._task


class _TestConfigClient(ConfigClient):
async def create_cluster(self, *, name: str) -> None:
async with self._request("POST", "clusters", json={"name": name}):
pass
1 change: 0 additions & 1 deletion tests/integration/postgres.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,6 @@ async def _wait_for_postgres_server(

@pytest.fixture
async def postgres_config(postgres_dsn: str) -> AsyncIterator[PostgresConfig]:

db_config = PostgresConfig(
postgres_dsn=postgres_dsn,
alembic=EnvironConfigFactory().create_alembic(postgres_dsn),
Expand Down
2 changes: 0 additions & 2 deletions tests/integration/test_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -1380,7 +1380,6 @@ async def test_create_job_with_disk_volume_user_with_slash_single_ok(
jobs_client_factory: Callable[[_User], JobsClient],
disk_client_factory: Callable[[_User], DiskAPIClient],
) -> None:

service_user = await service_account_factory(
owner=regular_user, name="some-really-long-name"
)
Expand Down Expand Up @@ -2290,7 +2289,6 @@ async def test_create_job_with_secret_missing_all_requested_secrets_fail(
result = await jobs_client.get_job_by_id(job_id)
assert result["history"]["reason"] == "Missing secrets: 'key1', 'key2'"
finally:

if job_id:
await jobs_client.delete_job(job_id)

Expand Down
10 changes: 0 additions & 10 deletions tests/integration/test_jobs_storage.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,6 @@ async def test_drop_unexisting_job(self, storage: JobsStorage) -> None:
await storage.drop_job(original_job.id)

async def test_try_create_job__no_name__ok(self, storage: JobsStorage) -> None:

pending_job = self._create_pending_job()
async with storage.try_create_job(pending_job) as job:
assert pending_job.status == JobStatus.PENDING
Expand Down Expand Up @@ -163,7 +162,6 @@ async def test_try_create_job__no_name__job_changed_while_creation(
async def test_try_create_job__different_name_same_owner__ok(
self, storage: JobsStorage
) -> None:

owner = "test-user-1"
job_name_1 = "some-test-job-name-1"
job_name_2 = "some-test-job-name-2"
Expand All @@ -187,7 +185,6 @@ async def test_try_create_job__different_name_same_owner__ok(
async def test_try_create_job__same_name_different_owner__ok(
self, storage: JobsStorage
) -> None:

owner_1 = "test-user-1"
job_name_1 = "some-test-job-name-1"
owner_2 = "test-user-2"
Expand All @@ -213,7 +210,6 @@ async def test_try_create_job__same_name_different_owner__ok(
async def test_try_create_job__same_name_with_an_active_job__conflict(
self, storage: JobsStorage, first_job_status: JobStatus
) -> None:

owner = "test-user"
job_name = "some-test-job-name"

Expand All @@ -239,7 +235,6 @@ async def test_try_create_job__same_name_with_an_active_job__conflict(
async def test_try_create_job__same_name_and_base_owner_with_active_job__conflict(
self, storage: JobsStorage, first_job_status: JobStatus
) -> None:

owner = "test-user"
job_name = "some-test-job-name"

Expand Down Expand Up @@ -322,7 +317,6 @@ async def test_try_create_job__same_name_and_base_owner_with_a_terminated_job__o
assert job.status == JobStatus.PENDING

async def test_try_create_job_with_tags(self, storage: JobsStorage) -> None:

tags = ["tag1", "tag2"]
job = self._create_job(tags=tags)
async with storage.try_create_job(job) as job:
Expand All @@ -337,7 +331,6 @@ async def test_get_non_existent(self, storage: JobsStorage) -> None:
await storage.get_job("unknown")

async def test_get_all_no_filter_empty_result(self, storage: JobsStorage) -> None:

jobs = await storage.get_all_jobs()
assert not jobs

Expand Down Expand Up @@ -1228,7 +1221,6 @@ async def test_get_all_filter_by_cluster_and_status(
assert job_ids == []

async def test_get_running_empty(self, storage: JobsStorage) -> None:

jobs = await storage.get_running_jobs()
assert not jobs

Expand Down Expand Up @@ -1451,7 +1443,6 @@ async def test_try_update_job__no_name__job_changed_while_creation(
async def test_try_update_job__different_name_same_owner__ok(
self, storage: JobsStorage
) -> None:

owner = "test-user-1"
job_name_1 = "some-test-job-name-1"
job_name_2 = "some-test-job-name-2"
Expand All @@ -1478,7 +1469,6 @@ async def test_try_update_job__different_name_same_owner__ok(
async def test_try_update_job__same_name_different_owner__ok(
self, storage: JobsStorage
) -> None:

owner_1 = "test-user-1"
owner_2 = "test-user-2"
job_name = "some-test-job-name"
Expand Down
17 changes: 11 additions & 6 deletions tests/integration/test_kube_orchestrator.py
Original file line number Diff line number Diff line change
Expand Up @@ -1687,7 +1687,7 @@ async def test_job_pod_with_disk_volume_simple_ok(
disk_volumes_raw = [
v
for v in raw["spec"]["volumes"]
if v.get("persistentVolumeClaim", dict()).get("claimName") == disk_id
if v.get("persistentVolumeClaim", {}).get("claimName") == disk_id
]
assert len(disk_volumes_raw) == 1

Expand Down Expand Up @@ -3150,7 +3150,8 @@ async def test_cannot_be_scheduled(
assert preempted == []

job_pod = await kube_client.get_pod(job.id)
assert job_pod.status and job_pod.status.is_phase_pending
assert job_pod.status
assert job_pod.status.is_phase_pending

async def test_not_enough_resources(
self,
Expand All @@ -3167,7 +3168,8 @@ async def test_not_enough_resources(
assert preempted == []

job_pod = await kube_client.get_pod(job.id)
assert job_pod.status and job_pod.status.is_phase_pending
assert job_pod.status
assert job_pod.status.is_phase_pending

async def test_running_jobs_ignored(
self,
Expand All @@ -3182,7 +3184,8 @@ async def test_running_jobs_ignored(
assert preempted == []

preemptible_pod = await kube_client.get_pod(preemptible_job.id)
assert preemptible_pod.status and preemptible_pod.status.is_scheduled
assert preemptible_pod.status
assert preemptible_pod.status.is_scheduled

async def test_no_preemptible_jobs(
self,
Expand All @@ -3198,7 +3201,8 @@ async def test_no_preemptible_jobs(
assert preempted == []

job_pod = await kube_client.get_pod(job.id)
assert job_pod.status and job_pod.status.is_phase_pending
assert job_pod.status
assert job_pod.status.is_phase_pending

async def test_no_jobs(
self,
Expand All @@ -3212,4 +3216,5 @@ async def test_no_jobs(
assert preempted == []

preemptible_pod = await kube_client.get_pod(preemptible_job.id)
assert preemptible_pod.status and preemptible_pod.status.is_scheduled
assert preemptible_pod.status
assert preemptible_pod.status.is_scheduled
Loading

0 comments on commit d5f225f

Please sign in to comment.