Skip to content

Commit

Permalink
add istio test
Browse files Browse the repository at this point in the history
  • Loading branch information
stuartwdouglas committed Sep 25, 2024
1 parent 1b0a910 commit 8fa7eca
Show file tree
Hide file tree
Showing 8 changed files with 79 additions and 18 deletions.
34 changes: 18 additions & 16 deletions backend/controller/scaling/k8sscaling/deployment_provisioner.go
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ func (r *DeploymentProvisioner) HandleSchemaChange(ctx context.Context, msg *ftl
if err != nil {
logger.Errorf(err, "failed to handle schema change")
}
logger.Infof("handled schema change for %s", msg.ModuleName)
logger.Debugf("handled schema change for %s", msg.ModuleName)
return err
}
func (r *DeploymentProvisioner) handleSchemaChange(ctx context.Context, msg *ftlv1.PullSchemaResponse) error {
Expand All @@ -112,7 +112,8 @@ func (r *DeploymentProvisioner) handleSchemaChange(ctx context.Context, msg *ftl
logger := log.FromContext(ctx)
logger = logger.Module(msg.ModuleName)
ctx = log.ContextWithLogger(ctx, logger)
logger.Infof("Handling schema change for %s", msg.DeploymentKey)

logger.Debugf("Handling schema change for %s", msg.DeploymentKey)
deploymentClient := r.Client.AppsV1().Deployments(r.Namespace)
deployment, err := deploymentClient.Get(ctx, msg.DeploymentKey, v1.GetOptions{})
deploymentExists := true
Expand All @@ -132,7 +133,7 @@ func (r *DeploymentProvisioner) handleSchemaChange(ctx context.Context, msg *ftl
// This will need to be fixed as part of the support for rolling deployments
r.KnownDeployments[msg.DeploymentKey] = true
if deploymentExists {
logger.Infof("updating deployment %s", msg.DeploymentKey)
logger.Debugf("updating deployment %s", msg.DeploymentKey)
return r.handleExistingDeployment(ctx, deployment, msg.Schema)
} else {
return r.handleNewDeployment(ctx, msg.Schema, msg.DeploymentKey)
Expand All @@ -145,14 +146,14 @@ func (r *DeploymentProvisioner) handleSchemaChange(ctx context.Context, msg *ftl
// Nasty hack, we want all the controllers to have updated their route tables before we kill the runner
// so we add a slight delay here
time.Sleep(time.Second * 10)
logger.Infof("deleting deployment %s", msg.ModuleName)
logger.Debugf("deleting deployment %s", msg.ModuleName)
err := deploymentClient.Delete(ctx, msg.DeploymentKey, v1.DeleteOptions{})
if err != nil {
logger.Errorf(err, "failed to delete deployment %s", msg.ModuleName)
}
// TODO: we only need to delete the services once this new ownership structure has been deployed to production
// Existing deployments don't have this though
logger.Infof("deleting service %s", msg.ModuleName)
logger.Debugf("deleting service %s", msg.ModuleName)
err = r.Client.CoreV1().Secrets(r.Namespace).Delete(ctx, msg.DeploymentKey, v1.DeleteOptions{})
if err != nil {
if !errors.IsNotFound(err) {
Expand Down Expand Up @@ -200,7 +201,7 @@ func (r *DeploymentProvisioner) handleNewDeployment(ctx context.Context, dep *sc
if !errors.IsNotFound(err) {
return fmt.Errorf("failed to get service %s: %w", name, err)
}
logger.Infof("creating new kube service %s", name)
logger.Debugf("creating new kube service %s", name)
err = decodeBytesToObject([]byte(cm.Data[serviceTemplate]), service)
if err != nil {
return fmt.Errorf("failed to decode service from configMap %s: %w", configMapName, err)
Expand All @@ -213,9 +214,9 @@ func (r *DeploymentProvisioner) handleNewDeployment(ctx context.Context, dep *sc
if err != nil {
return fmt.Errorf("failed to create service %s: %w", name, err)
}
logger.Infof("created kube service %s", name)
logger.Debugf("created kube service %s", name)
} else {
logger.Infof("service %s already exists", name)
logger.Debugf("service %s already exists", name)
}

// Now create a ServiceAccount, we mostly need this for Istio but we create it for all deployments
Expand All @@ -226,7 +227,7 @@ func (r *DeploymentProvisioner) handleNewDeployment(ctx context.Context, dep *sc
if !errors.IsNotFound(err) {
return fmt.Errorf("failed to get service account %s: %w", name, err)
}
logger.Infof("creating new kube service account %s", name)
logger.Debugf("creating new kube service account %s", name)
err = decodeBytesToObject([]byte(cm.Data[serviceAccountTemplate]), serviceAccount)
if err != nil {
return fmt.Errorf("failed to decode service account from configMap %s: %w", configMapName, err)
Expand All @@ -238,14 +239,14 @@ func (r *DeploymentProvisioner) handleNewDeployment(ctx context.Context, dep *sc
if err != nil {
return fmt.Errorf("failed to create service account%s: %w", name, err)
}
logger.Infof("created kube service account%s", name)
logger.Debugf("created kube service account%s", name)
} else {
logger.Infof("service account %s already exists", name)
logger.Debugf("service account %s already exists", name)
}

// Now create the deployment

logger.Infof("creating new kube deployment %s", name)
logger.Debugf("creating new kube deployment %s", name)
thisImage, err := r.thisContainerImage(ctx)
if err != nil {
return fmt.Errorf("failed to get container image: %w", err)
Expand Down Expand Up @@ -290,12 +291,13 @@ func (r *DeploymentProvisioner) handleNewDeployment(ctx context.Context, dep *sc
change(deployment)
}
deployment.Labels = addLabel(deployment.Labels, deploymentLabel, name)
deployment.Labels = addLabel(deployment.Labels, "app", name)

deployment, err = deploymentClient.Create(ctx, deployment, v1.CreateOptions{})
if err != nil {
return fmt.Errorf("failed to create deployment %s: %w", deployment.Name, err)
}
logger.Infof("created kube deployment %s", name)
logger.Debugf("created kube deployment %s", name)

return nil
}
Expand Down Expand Up @@ -426,7 +428,7 @@ func (r *DeploymentProvisioner) deleteMissingDeployments(ctx context.Context) {
for _, deployment := range list.Items {
if !r.KnownDeployments[deployment.Name] {

logger.Infof("deleting service %s", deployment.Name)
logger.Debugf("deleting service %s", deployment.Name)
err = r.Client.CoreV1().Services(r.Namespace).Delete(ctx, deployment.Name, v1.DeleteOptions{})
if err != nil {
if !errors.IsNotFound(err) {
Expand All @@ -435,7 +437,7 @@ func (r *DeploymentProvisioner) deleteMissingDeployments(ctx context.Context) {
}
// With owner references the deployments should be deleted automatically
// However this is in transition so delete both
logger.Infof("deleting deployment %s as it is not a known module", deployment.Name)
logger.Debugf("deleting deployment %s as it is not a known module", deployment.Name)
err := deploymentClient.Delete(ctx, deployment.Name, v1.DeleteOptions{})
if err != nil {
logger.Errorf(err, "failed to delete deployment %s", deployment.Name)
Expand All @@ -446,7 +448,7 @@ func (r *DeploymentProvisioner) deleteMissingDeployments(ctx context.Context) {

func (r *DeploymentProvisioner) syncIstioPolicy(ctx context.Context, sec istioclient.Clientset, name string, service *kubecore.Service) error {
logger := log.FromContext(ctx)
logger.Infof("creating new istio policy for %s", name)
logger.Debugf("creating new istio policy for %s", name)
var update func(policy *istiosec.AuthorizationPolicy) error

policiesClient := sec.SecurityV1().AuthorizationPolicies(r.Namespace)
Expand Down
2 changes: 1 addition & 1 deletion backend/controller/scaling/k8sscaling/k8s_scaling.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ func (k k8sScaling) Start(ctx context.Context, controller url.URL, leaser leases
}
}

logger.Infof("using namespace %s", namespace)
logger.Debugf("using namespace %s", namespace)
deploymentReconciler := &DeploymentProvisioner{
Client: clientset,
Namespace: namespace,
Expand Down
21 changes: 20 additions & 1 deletion backend/controller/scaling/kube_scaling_integration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ package scaling_test
import (
"context"
"fmt"
"strconv"
"strings"
"sync"
"testing"
Expand All @@ -31,13 +32,31 @@ func runKubeScalingTest(t *testing.T, istio bool) {
done.Store(false)
routineStopped := sync.WaitGroup{}
routineStopped.Add(1)
echoDeployment := map[string]string{}
in.Run(t,
in.WithIstio(istio),
in.CopyModule("echo"),
in.Deploy("echo"),
in.CopyModule("naughty"),
in.Deploy("naughty"),
in.Call("echo", "echo", "Bob", func(t testing.TB, response string) {
assert.Equal(t, "Hello, Bob!!!", response)
}),
in.VerifyKubeState(func(ctx context.Context, t testing.TB, namespace string, client *kubernetes.Clientset) {
deps, err := client.AppsV1().Deployments(namespace).List(ctx, v1.ListOptions{})
assert.NoError(t, err)
for _, dep := range deps.Items {
if strings.HasPrefix(dep.Name, "dpl-echo") {
echoDeployment["name"] = dep.Name
}
}
assert.NotEqual(t, "", echoDeployment["name"])
}),
in.Call("naughty", "beNaughty", echoDeployment, func(t testing.TB, response string) {
// If istio is not present we should be able to ping the echo service directly.
// Istio should prevent this
assert.Equal(t, strconv.FormatBool(!istio), response)
}),
in.EditFile("echo", func(content []byte) []byte {
return []byte(strings.ReplaceAll(string(content), "Hello", "Bye"))
}, "echo.go"),
Expand Down Expand Up @@ -70,7 +89,7 @@ func runKubeScalingTest(t *testing.T, istio bool) {
assert.NoError(t, err)
depCount := 0
for _, dep := range deps.Items {
if strings.HasPrefix(dep.Name, "dpl-echo") || strings.HasPrefix(dep.Name, "dpl-time") {
if strings.HasPrefix(dep.Name, "dpl-echo") {
depCount++
service, err := client.CoreV1().Services(namespace).Get(ctx, dep.Name, v1.GetOptions{})
assert.NoError(t, err)
Expand Down
2 changes: 2 additions & 0 deletions backend/controller/scaling/testdata/go/naughty/ftl.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
module = "naughty"
language = "go"
8 changes: 8 additions & 0 deletions backend/controller/scaling/testdata/go/naughty/go.mod
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
module ftl/naughty

go 1.23.0

replace (
github.com/TBD54566975/ftl => ./../../../../../..

)
Empty file.
29 changes: 29 additions & 0 deletions backend/controller/scaling/testdata/go/naughty/naughty.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
// This is the echo module.
package naughty

import (
"context"
"fmt"
"io"
"net/http"
"strconv"
)

// BeNaughty attempts to ping echo directly and returns true if successful
//
//ftl:verb export
func BeNaughty(ctx context.Context, endpoint map[string]string) (string, error) {
url := "http://" + endpoint["name"] + ":8893/healthz" // Replace with your actual URL

resp, err := http.Get(url)
if err != nil {
return fmt.Sprintf("Error making GET request: to %s %v\n", url, err), nil
}
defer resp.Body.Close()

_, err = io.ReadAll(resp.Body)
if err != nil {
return fmt.Sprintf("Error reading response body: %v\n", err), nil
}
return strconv.FormatBool(resp.StatusCode == 200), nil
}
1 change: 1 addition & 0 deletions deployment/Justfile
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@ install-istio:
helm install istio-base istio/base -n istio-system --wait
helm install istiod istio/istiod -n istio-system --wait
fi
kubectl label namespace default istio-injection=enabled --overwrite
kubectl kustomize --load-restrictor=LoadRestrictionsNone istio | kubectl apply -f -


Expand Down

0 comments on commit 8fa7eca

Please sign in to comment.