Skip to content

Commit

Permalink
chore: add e2e test running on AWS infra
Browse files Browse the repository at this point in the history
Using `capi-utils` module to set up cluster in AWS using current version
of `control-plane-provider`.
Only validate health in the tests.

Signed-off-by: Artem Chernyshev <[email protected]>
  • Loading branch information
Unix4ever committed Sep 7, 2021
1 parent 4c7d42c commit 9435b12
Show file tree
Hide file tree
Showing 11 changed files with 689 additions and 68 deletions.
30 changes: 30 additions & 0 deletions .drone.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,15 @@ kind: pipeline
name: default
type: kubernetes

services:
- name: docker
image: docker:20.10-dind
entrypoint: [dockerd]
privileged: true
volumes:
- name: docker-socket
path: /var/run

steps:
- name: setup-ci
image: autonomy/build-container:latest
Expand All @@ -22,6 +31,8 @@ steps:
pull: always
environment:
PLATFORM: linux/amd64,linux/arm64
PUSH: true
REGISTRY: registry.dev.talos-systems.io
commands:
- make
when:
Expand Down Expand Up @@ -60,6 +71,25 @@ steps:
- name: docker
path: /root/.docker/buildx

- name: e2e-aws
image: autonomy/build-container:latest
pull: always
environment:
AWS_ACCESS_KEY_ID:
from_secret: aws_access_key_id
AWS_SECRET_ACCESS_KEY:
from_secret: aws_secret_access_key
CI: true
REGISTRY: registry.dev.talos-systems.io
commands:
- make integration-test
volumes:
- name: docker-socket
path: /var/run
- name: outerdockersock
path: /var/outer-run
- name: docker
path: /root/.docker/buildx

- name: build-release
image: autonomy/build-container:latest
Expand Down
6 changes: 6 additions & 0 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,12 @@ ARG TARGETARCH
RUN --mount=type=cache,target=/.cache GOOS=linux GOARCH=${TARGETARCH} go build -ldflags "-s -w" -o /manager
RUN chmod +x /manager

FROM build AS integration-test-build
RUN --mount=type=cache,target=/.cache go test -v -c ./internal/integration

FROM scratch AS integration-test
COPY --from=integration-test-build /src/integration.test /integration.test

FROM scratch AS container
COPY --from=pkg-ca-certificates / /
COPY --from=pkg-fhs / /
Expand Down
7 changes: 7 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -106,3 +106,10 @@ run: install ## Run the controller locally. This is for testing purposes only.
.PHONY: clean
clean:
@rm -rf $(ARTIFACTS)

integration-test-build:
@$(MAKE) local-integration-test DEST=./_out/ PLATFORM=linux/amd64

.PHONY: integration-test
integration-test: integration-test-build
@REGISTRY_AND_USERNAME=$(REGISTRY_AND_USERNAME) TAG=$(TAG) NAME=$(NAME) bash hack/test/e2e-aws.sh
2 changes: 1 addition & 1 deletion controllers/configs.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ func (r *TalosControlPlaneReconciler) talosconfigForMachine(ctx context.Context,
}

// grab all addresses as endpoints
node, err := clientset.CoreV1().Nodes().Get(machine.Status.NodeRef.Name, metav1.GetOptions{})
node, err := clientset.CoreV1().Nodes().Get(ctx, machine.Status.NodeRef.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
Expand Down
10 changes: 5 additions & 5 deletions controllers/taloscontrolplane_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -337,7 +337,7 @@ func (r *TalosControlPlaneReconciler) scaleDownControlPlane(ctx context.Context,
if !machine.ObjectMeta.DeletionTimestamp.IsZero() {
r.Log.Info("Machine is in process of deletion", "machine", machine.Name)

node, err := clientset.CoreV1().Nodes().Get(machine.Status.NodeRef.Name, metav1.GetOptions{})
node, err := clientset.CoreV1().Nodes().Get(ctx, machine.Status.NodeRef.Name, metav1.GetOptions{})
if err != nil {
// It's possible for the node to already be deleted in the workload cluster, so we just
// requeue if that's that case instead of throwing a scary error.
Expand All @@ -349,7 +349,7 @@ func (r *TalosControlPlaneReconciler) scaleDownControlPlane(ctx context.Context,

r.Log.Info("Deleting node", "machine", machine.Name, "node", node.Name)

err = clientset.CoreV1().Nodes().Delete(node.Name, &metav1.DeleteOptions{})
err = clientset.CoreV1().Nodes().Delete(ctx, node.Name, metav1.DeleteOptions{})
if err != nil {
return ctrl.Result{RequeueAfter: 20 * time.Second}, err
}
Expand Down Expand Up @@ -397,7 +397,7 @@ func (r *TalosControlPlaneReconciler) scaleDownControlPlane(ctx context.Context,

r.Log.Info("Deleting node", "machine", oldest.Name, "node", node.Name)

err = clientset.CoreV1().Nodes().Delete(node.Name, &metav1.DeleteOptions{})
err = clientset.CoreV1().Nodes().Delete(ctx, node.Name, metav1.DeleteOptions{})
if err != nil {
return ctrl.Result{RequeueAfter: 20 * time.Second}, err
}
Expand Down Expand Up @@ -594,7 +594,7 @@ func (r *TalosControlPlaneReconciler) updateStatus(ctx context.Context, tcp *con
return fmt.Errorf("machine %q does not have a noderef", ownedMachine.Name)
}

node, err := clientset.CoreV1().Nodes().Get(ownedMachine.Status.NodeRef.Name, metav1.GetOptions{})
node, err := clientset.CoreV1().Nodes().Get(ctx, ownedMachine.Status.NodeRef.Name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("failed to get node %q: %w", node.Name, err)
}
Expand Down Expand Up @@ -635,7 +635,7 @@ func (r *TalosControlPlaneReconciler) updateStatus(ctx context.Context, tcp *con
// We consider ourselves "initialized" if the workload cluster returns any number of nodes.
// We also do not return client list errors (just log them) as it's expected that it will fail
// for a while until the cluster is up.
nodeList, err := clientset.CoreV1().Nodes().List(metav1.ListOptions{})
nodeList, err := clientset.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err == nil {
if len(nodeList.Items) > 0 {
tcp.Status.Initialized = true
Expand Down
39 changes: 26 additions & 13 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -2,19 +2,32 @@ module github.com/talos-systems/cluster-api-control-plane-provider-talos

go 1.16

replace (
// keep older versions of k8s.io packages to keep compatiblity with cluster-api
k8s.io/api v0.21.3 => k8s.io/api v0.20.5
k8s.io/api-server v0.21.3 => k8s.io/api-server v0.20.5
k8s.io/apimachinery v0.21.3 => k8s.io/apimachinery v0.20.5
k8s.io/client-go v0.21.3 => k8s.io/client-go v0.20.5

sigs.k8s.io/cluster-api v0.3.20 => sigs.k8s.io/cluster-api v0.3.9
)

require (
cloud.google.com/go v0.47.0 // indirect
github.com/go-logr/logr v0.1.0
github.com/onsi/ginkgo v1.16.2
github.com/onsi/gomega v1.13.0
github.com/go-logr/logr v0.4.0
github.com/go-logr/zapr v0.2.0 // indirect
github.com/onsi/ginkgo v1.16.4
github.com/onsi/gomega v1.14.0
github.com/pkg/errors v0.9.1
github.com/talos-systems/cluster-api-bootstrap-provider-talos v0.2.0-alpha.12.0.20210805142556-9fb0d07ca4d2
github.com/talos-systems/talos/pkg/machinery v0.11.3
k8s.io/api v0.17.9
k8s.io/apimachinery v0.17.9
k8s.io/apiserver v0.17.9
k8s.io/client-go v0.17.9
k8s.io/utils v0.0.0-20200619165400-6e3d28b6ed19
sigs.k8s.io/cluster-api v0.3.12
sigs.k8s.io/controller-runtime v0.5.14
github.com/stretchr/testify v1.7.0
github.com/talos-systems/capi-utils v0.0.0-20210906195159-c20b1a80b427
github.com/talos-systems/cluster-api-bootstrap-provider-talos v0.2.0
github.com/talos-systems/talos/pkg/machinery v0.12.0
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b
k8s.io/api v0.21.3
k8s.io/apimachinery v0.21.3
k8s.io/apiserver v0.21.3
k8s.io/client-go v0.21.3
k8s.io/utils v0.0.0-20210722164352-7f3ee0f31471
sigs.k8s.io/cluster-api v0.3.20
sigs.k8s.io/controller-runtime v0.6.3
)
Loading

0 comments on commit 9435b12

Please sign in to comment.