From 966b746438874082acf94752f7c83f96b2a3955c Mon Sep 17 00:00:00 2001 From: Stuart Douglas Date: Thu, 5 Dec 2024 11:55:28 +1100 Subject: [PATCH] handle module removal --- backend/controller/controller.go | 47 +++++--- .../controller/encryption/integration_test.go | 38 ------- .../provisioner/runner_scaling_provisioner.go | 20 ++-- .../scaling/k8sscaling/k8s_scaling.go | 100 +++++++----------- .../scaling/local_scaling_integration_test.go | 29 +---- .../scaling/localscaling/local_scaling.go | 19 +++- backend/provisioner/scaling/scaling.go | 4 +- examples/java/echo/ftl.toml | 2 +- examples/java/time/ftl.toml | 2 +- .../schemaeventsource/schemaeventsource.go | 3 + jvm-runtime/jvm_integration_test.go | 3 +- 11 files changed, 110 insertions(+), 157 deletions(-) diff --git a/backend/controller/controller.go b/backend/controller/controller.go index 02c5136754..15085d13b3 100644 --- a/backend/controller/controller.go +++ b/backend/controller/controller.go @@ -834,8 +834,7 @@ func hashConfigurationMap(h hash.Hash, m map[string][]byte) error { return nil } -// hashRoutesTable computes an order invariant checksum on the configuration -// settings supplied in the map. +// hashRoutesTable computes an order invariant checksum on the routes func hashRoutesTable(h hash.Hash, m map[string]string) error { keys := maps.Keys(m) sort.Strings(keys) @@ -1557,9 +1556,9 @@ func (s *Service) watchModuleChanges(ctx context.Context, sendChange func(respon hash []byte minReplicas int } - moduleState := map[string]moduleStateEntry{} + deploymentState := map[string]moduleStateEntry{} moduleByDeploymentKey := map[string]string{} - mostRecentDeploymentByModule := map[string]string{} + aliveDeploymentsForModule := map[string]map[string]bool{} schemaByDeploymentKey := map[string]*schemapb.Module{} // Seed the notification channel with the current deployments. @@ -1602,7 +1601,14 @@ func (s *Service) watchModuleChanges(ctx context.Context, sendChange func(respon if deletion, ok := notification.Deleted.Get(); ok { name := moduleByDeploymentKey[deletion.String()] schema := schemaByDeploymentKey[deletion.String()] - moduleRemoved := mostRecentDeploymentByModule[name] == deletion.String() + moduleRemoved := true + if aliveDeploymentsForModule[name] != nil { + delete(aliveDeploymentsForModule[name], deletion.String()) + moduleRemoved = len(aliveDeploymentsForModule[name]) == 0 + if moduleRemoved { + delete(aliveDeploymentsForModule, name) + } + } response = &ftlv1.PullSchemaResponse{ ModuleName: name, DeploymentKey: proto.String(deletion.String()), @@ -1610,12 +1616,9 @@ func (s *Service) watchModuleChanges(ctx context.Context, sendChange func(respon ModuleRemoved: moduleRemoved, Schema: schema, } - delete(moduleState, name) + delete(deploymentState, deletion.String()) delete(moduleByDeploymentKey, deletion.String()) delete(schemaByDeploymentKey, deletion.String()) - if moduleRemoved { - delete(mostRecentDeploymentByModule, name) - } } else if message, ok := notification.Message.Get(); ok { if message.Schema.Runtime == nil { message.Schema.Runtime = &schema.ModuleRuntime{} @@ -1640,14 +1643,25 @@ func (s *Service) watchModuleChanges(ctx context.Context, sendChange func(respon hash: hasher.Sum(nil), minReplicas: message.MinReplicas, } - if current, ok := moduleState[message.Schema.Name]; ok { + if current, ok := deploymentState[message.Key.String()]; ok { if !bytes.Equal(current.hash, newState.hash) || current.minReplicas != newState.minReplicas { + alive := aliveDeploymentsForModule[moduleSchema.Name] + if alive == nil { + alive = map[string]bool{} + aliveDeploymentsForModule[moduleSchema.Name] = alive + } + if newState.minReplicas > 0 { + alive[message.Key.String()] = true + } else { + delete(alive, message.Key.String()) + } changeType := ftlv1.DeploymentChangeType_DEPLOYMENT_CHANGE_TYPE_CHANGED // A deployment is considered removed if its minReplicas is set to 0. moduleRemoved := false if current.minReplicas > 0 && message.MinReplicas == 0 { changeType = ftlv1.DeploymentChangeType_DEPLOYMENT_CHANGE_TYPE_REMOVED - moduleRemoved = mostRecentDeploymentByModule[message.Schema.Name] == message.Key.String() + moduleRemoved = len(alive) == 0 + logger.Infof("Deployment %s was deleted via update notfication with module removed %v", deletion, moduleRemoved) } response = &ftlv1.PullSchemaResponse{ ModuleName: moduleSchema.Name, @@ -1658,7 +1672,12 @@ func (s *Service) watchModuleChanges(ctx context.Context, sendChange func(respon } } } else { - mostRecentDeploymentByModule[message.Schema.Name] = message.Key.String() + alive := aliveDeploymentsForModule[moduleSchema.Name] + if alive == nil { + alive = map[string]bool{} + aliveDeploymentsForModule[moduleSchema.Name] = alive + } + alive[message.Key.String()] = true response = &ftlv1.PullSchemaResponse{ ModuleName: moduleSchema.Name, DeploymentKey: proto.String(message.Key.String()), @@ -1670,9 +1689,7 @@ func (s *Service) watchModuleChanges(ctx context.Context, sendChange func(respon initialCount-- } } - moduleState[message.Schema.Name] = newState - delete(moduleByDeploymentKey, message.Key.String()) // The deployment may have changed. - delete(schemaByDeploymentKey, message.Key.String()) + deploymentState[message.Key.String()] = newState moduleByDeploymentKey[message.Key.String()] = message.Schema.Name schemaByDeploymentKey[message.Key.String()] = moduleSchema } diff --git a/backend/controller/encryption/integration_test.go b/backend/controller/encryption/integration_test.go index 811c3c7f56..2f63765b21 100644 --- a/backend/controller/encryption/integration_test.go +++ b/backend/controller/encryption/integration_test.go @@ -30,44 +30,6 @@ func WithEncryption() in.Option { return in.WithEnvar("FTL_KMS_URI", "fake-kms://CKbvh_ILElQKSAowdHlwZS5nb29nbGVhcGlzLmNvbS9nb29nbGUuY3J5cHRvLnRpbmsuQWVzR2NtS2V5EhIaEE6tD2yE5AWYOirhmkY-r3sYARABGKbvh_ILIAE") } -func TestEncryptionForLogs(t *testing.T) { - t.Skip("This test needs the timeline service refactoring done") - in.Run(t, - WithEncryption(), - in.CopyModule("encryption"), - in.Deploy("encryption"), - in.Call[map[string]interface{}, any]("encryption", "echo", map[string]interface{}{"name": "Alice"}, nil), - - // confirm that we can read an event for that call - func(t testing.TB, ic in.TestContext) { - in.Infof("Read Logs") - resp, err := ic.Console.GetEvents(ic.Context, connect.NewRequest(&pbconsole.GetEventsRequest{ - Limit: 10, - })) - assert.NoError(t, err, "could not get events") - _, ok := slices.Find(resp.Msg.Events, func(e *pbtimeline.Event) bool { - call, ok := e.Entry.(*pbtimeline.Event_Call) - if !ok { - return false - } - assert.Contains(t, call.Call.Request, "Alice", "request does not contain expected value") - - return true - }) - assert.True(t, ok, "could not find event") - }, - - // confirm that we can't find that raw request string in the table - in.QueryRow("ftl", "SELECT COUNT(*) FROM timeline WHERE type = 'call'", int64(1)), - func(t testing.TB, ic in.TestContext) { - values := in.GetRow(t, ic, "ftl", "SELECT payload FROM timeline WHERE type = 'call' LIMIT 1", 1) - payload, ok := values[0].([]byte) - assert.True(t, ok, "could not convert payload to string") - assert.NotContains(t, string(payload), "Alice", "raw request string should not be stored in the table") - }, - ) -} - func TestEncryptionForPubSub(t *testing.T) { in.Run(t, WithEncryption(), diff --git a/backend/provisioner/runner_scaling_provisioner.go b/backend/provisioner/runner_scaling_provisioner.go index 8e7b7bc88f..808ef8ce8b 100644 --- a/backend/provisioner/runner_scaling_provisioner.go +++ b/backend/provisioner/runner_scaling_provisioner.go @@ -50,7 +50,7 @@ func provisionRunner(scaling scaling.RunnerScaling) InMemResourceProvisionerFn { return nil, fmt.Errorf("failed to parse schema: %w", err) } logger.Debugf("provisioning runner: %s.%s for deployment %s", module, id, deployment) - err = scaling.StartDeployment(ctx, module, deployment, schema) + err = scaling.StartDeployment(ctx, module, deployment, schema, false, false) if err != nil { logger.Infof("failed to start deployment: %v", err) return nil, fmt.Errorf("failed to start deployment: %w", err) @@ -80,18 +80,24 @@ func provisionRunner(scaling scaling.RunnerScaling) InMemResourceProvisionerFn { } } + schemaClient := rpc.ClientFromContext[ftlv1connect.SchemaServiceClient](ctx) + controllerClient := rpc.ClientFromContext[ftlv1connect.ControllerServiceClient](ctx) runner.Runner.Output = &provisioner.RunnerResource_RunnerResourceOutput{ RunnerUri: endpointURI, DeploymentKey: deployment, } - if previous != nil && previous.GetRunner().GetOutput().GetDeploymentKey() != deployment { - logger.Debugf("terminating previous deployment: %s", previous.GetRunner().GetOutput().GetDeploymentKey()) - err := scaling.TerminateDeployment(ctx, module, previous.GetRunner().GetOutput().GetDeploymentKey()) - if err != nil { - logger.Errorf(err, "failed to terminate previous deployment") + deps, err := scaling.TerminatePreviousDeployments(ctx, module, deployment) + if err != nil { + logger.Errorf(err, "failed to terminate previous deployments") + } else { + var zero int32 + for _, dep := range deps { + _, err := controllerClient.UpdateDeploy(ctx, connect.NewRequest(&ftlv1.UpdateDeployRequest{DeploymentKey: dep, MinReplicas: &zero})) + if err != nil { + logger.Errorf(err, "failed to update deployment %s", dep) + } } } - schemaClient := rpc.ClientFromContext[ftlv1connect.SchemaServiceClient](ctx) logger.Infof("updating module runtime for %s with endpoint %s", module, endpointURI) _, err = schemaClient.UpdateDeploymentRuntime(ctx, connect.NewRequest(&ftlv1.UpdateDeploymentRuntimeRequest{Deployment: deployment, Event: &schemapb.ModuleRuntimeEvent{Value: &schemapb.ModuleRuntimeEvent_ModuleRuntimeDeployment{ModuleRuntimeDeployment: &schemapb.ModuleRuntimeDeployment{DeploymentKey: deployment, Endpoint: endpointURI}}}})) diff --git a/backend/provisioner/scaling/k8sscaling/k8s_scaling.go b/backend/provisioner/scaling/k8sscaling/k8s_scaling.go index dc7ad71e6d..9fc04a35bb 100644 --- a/backend/provisioner/scaling/k8sscaling/k8s_scaling.go +++ b/backend/provisioner/scaling/k8sscaling/k8s_scaling.go @@ -59,7 +59,7 @@ func NewK8sScaling(disableIstio bool, controllerURL string) scaling.RunnerScalin return &k8sScaling{disableIstio: disableIstio, controller: controllerURL} } -func (r *k8sScaling) StartDeployment(ctx context.Context, module string, deploymentKey string, sch *schema.Module) error { +func (r *k8sScaling) StartDeployment(ctx context.Context, module string, deploymentKey string, sch *schema.Module, hasCron bool, hasIngress bool) error { logger := log.FromContext(ctx) logger = logger.Module(module) ctx = log.ContextWithLogger(ctx, logger) @@ -84,7 +84,7 @@ func (r *k8sScaling) StartDeployment(ctx context.Context, module string, deploym return r.handleExistingDeployment(ctx, deployment) } - err = r.handleNewDeployment(ctx, module, deploymentKey, sch) + err = r.handleNewDeployment(ctx, module, deploymentKey, sch, hasCron, hasIngress) if err != nil { return err } @@ -92,49 +92,37 @@ func (r *k8sScaling) StartDeployment(ctx context.Context, module string, deploym if err != nil { return err } - delCtx := log.ContextWithLogger(context.Background(), logger) - go func() { - time.Sleep(time.Second * 20) - err := r.deleteOldDeployments(delCtx, module, deploymentKey) - if err != nil { - logger.Errorf(err, "Failed to delete old deployments") - } - }() + return nil } -func (r *k8sScaling) TerminateDeployment(ctx context.Context, module string, deploymentKey string) error { +func (r *k8sScaling) TerminatePreviousDeployments(ctx context.Context, module string, deploymentKey string) ([]string, error) { logger := log.FromContext(ctx) logger = logger.Module(module) - logger.Debugf("Handling schema change for %s", deploymentKey) + delCtx := log.ContextWithLogger(context.Background(), logger) deploymentClient := r.client.AppsV1().Deployments(r.namespace) - _, err := deploymentClient.Get(ctx, deploymentKey, v1.GetOptions{}) - deploymentExists := true + deployments, err := deploymentClient.List(ctx, v1.ListOptions{LabelSelector: moduleLabel + "=" + module}) + var ret []string if err != nil { - if errors.IsNotFound(err) { - deploymentExists = false - } else { - return fmt.Errorf("failed to get deployment %s: %w", deploymentKey, err) + return nil, fmt.Errorf("failed to list deployments: %w", err) + } + for _, deploy := range deployments.Items { + if deploy.Name != deploymentKey { + logger.Debugf("Queing old deployment %s for deletion", deploy.Name) + ret = append(ret, deploy.Name) } } - - if deploymentExists { - go func() { - - // Nasty hack, we want all the controllers to have updated their route tables before we kill the runner - // so we add a slight delay here - time.Sleep(time.Second * 10) - r.knownDeployments.Delete(deploymentKey) - logger.Debugf("Deleting service %s", module) - err = r.client.CoreV1().Services(r.namespace).Delete(ctx, deploymentKey, v1.DeleteOptions{}) + // So hacky, all this needs to change when the provisioner is a proper schema observer + go func() { + time.Sleep(time.Second * 20) + for _, dep := range ret { + err = deploymentClient.Delete(delCtx, dep, v1.DeleteOptions{}) if err != nil { - if !errors.IsNotFound(err) { - logger.Errorf(err, "Failed to delete service %s", module) - } + logger.Errorf(err, "Failed to delete deployment %s", dep) } - }() - } - return nil + } + }() + return ret, nil } func (r *k8sScaling) Start(ctx context.Context) error { @@ -283,7 +271,7 @@ func (r *k8sScaling) thisContainerImage(ctx context.Context) (string, error) { return thisDeployment.Spec.Template.Spec.Containers[0].Image, nil } -func (r *k8sScaling) handleNewDeployment(ctx context.Context, module string, name string, sch *schema.Module) error { +func (r *k8sScaling) handleNewDeployment(ctx context.Context, module string, name string, sch *schema.Module, cron bool, ingress bool) error { logger := log.FromContext(ctx) cm, err := r.client.CoreV1().ConfigMaps(r.namespace).Get(ctx, configMapName, v1.GetOptions{}) @@ -355,7 +343,7 @@ func (r *k8sScaling) handleNewDeployment(ctx context.Context, module string, nam // Sync the istio policy if applicable if sec, ok := r.istioSecurity.Get(); ok { - err = r.syncIstioPolicy(ctx, sec, module, name, service, controllerDeployment, provisionerDeployment, sch) + err = r.syncIstioPolicy(ctx, sec, module, name, service, controllerDeployment, provisionerDeployment, sch, cron, ingress) if err != nil { return err } @@ -548,7 +536,7 @@ func (r *k8sScaling) updateEnvVar(deployment *kubeapps.Deployment, envVerName st return changes } -func (r *k8sScaling) syncIstioPolicy(ctx context.Context, sec istioclient.Clientset, module string, name string, service *kubecore.Service, controllerDeployment *kubeapps.Deployment, provisionerDeployment *kubeapps.Deployment, sch *schema.Module) error { +func (r *k8sScaling) syncIstioPolicy(ctx context.Context, sec istioclient.Clientset, module string, name string, service *kubecore.Service, controllerDeployment *kubeapps.Deployment, provisionerDeployment *kubeapps.Deployment, sch *schema.Module, hasCron bool, hasIngress bool) error { logger := log.FromContext(ctx) logger.Debugf("Creating new istio policy for %s", name) @@ -580,15 +568,26 @@ func (r *k8sScaling) syncIstioPolicy(ctx context.Context, sec istioclient.Client // At present we only allow ingress from the controller policy.Spec.Selector = &v1beta1.WorkloadSelector{MatchLabels: map[string]string{"app": name}} policy.Spec.Action = istiosecmodel.AuthorizationPolicy_ALLOW + principals := []string{ + "cluster.local/ns/" + r.namespace + "/sa/" + controllerDeployment.Spec.Template.Spec.ServiceAccountName, + "cluster.local/ns/" + r.namespace + "/sa/" + provisionerDeployment.Spec.Template.Spec.ServiceAccountName, + } + // TODO: fix hard coded service account names + if hasIngress { + // Allow ingress from the ingress gateway + principals = append(principals, "cluster.local/ns/"+r.namespace+"/sa/ftl-http-ingress") + } + + if hasCron { + // Allow cron invocations + principals = append(principals, "cluster.local/ns/"+r.namespace+"/sa/ftl-cron") + } policy.Spec.Rules = []*istiosecmodel.Rule{ { From: []*istiosecmodel.Rule_From{ { Source: &istiosecmodel.Source{ - Principals: []string{ - "cluster.local/ns/" + r.namespace + "/sa/" + controllerDeployment.Spec.Template.Spec.ServiceAccountName, - "cluster.local/ns/" + r.namespace + "/sa/" + provisionerDeployment.Spec.Template.Spec.ServiceAccountName, - }, + Principals: principals, }, }, }, @@ -692,25 +691,6 @@ func (r *k8sScaling) waitForDeploymentReady(ctx context.Context, key string, tim } } -func (r *k8sScaling) deleteOldDeployments(ctx context.Context, module string, deployment string) error { - logger := log.FromContext(ctx) - deploymentClient := r.client.AppsV1().Deployments(r.namespace) - deployments, err := deploymentClient.List(ctx, v1.ListOptions{LabelSelector: moduleLabel + "=" + module}) - if err != nil { - return fmt.Errorf("failed to list deployments: %w", err) - } - for _, deploy := range deployments.Items { - if deploy.Name != deployment { - logger.Debugf("Deleting old deployment %s", deploy.Name) - err = deploymentClient.Delete(ctx, deploy.Name, v1.DeleteOptions{}) - if err != nil { - logger.Errorf(err, "Failed to delete deployment %s", deploy.Name) - } - } - } - return nil -} - func extractTag(image string) (string, error) { idx := strings.LastIndex(image, ":") if idx == -1 { diff --git a/backend/provisioner/scaling/local_scaling_integration_test.go b/backend/provisioner/scaling/local_scaling_integration_test.go index eca2245fb9..b1d89f2931 100644 --- a/backend/provisioner/scaling/local_scaling_integration_test.go +++ b/backend/provisioner/scaling/local_scaling_integration_test.go @@ -3,23 +3,15 @@ package scaling_test import ( - "fmt" "strings" - "sync" "testing" "github.com/alecthomas/assert/v2" - "github.com/alecthomas/atomic" in "github.com/TBD54566975/ftl/internal/integration" ) func TestLocalScaling(t *testing.T) { - failure := atomic.Value[error]{} - done := atomic.Value[bool]{} - routineStopped := sync.WaitGroup{} - routineStopped.Add(1) - done.Store(false) in.Run(t, in.CopyModule("echo"), in.Deploy("echo"), @@ -29,29 +21,10 @@ func TestLocalScaling(t *testing.T) { in.EditFile("echo", func(content []byte) []byte { return []byte(strings.ReplaceAll(string(content), "Hello", "Bye")) }, "echo.go"), - func(t testing.TB, ic in.TestContext) { - // Hit the verb constantly to test rolling updates. - go func() { - defer routineStopped.Done() - for !done.Load() { - in.Call("echo", "echo", "Bob", func(t testing.TB, response string) { - if !strings.Contains(response, "Bob") { - failure.Store(fmt.Errorf("unexpected response: %s", response)) - return - } - })(t, ic) - } - }() - }, + in.Deploy("echo"), in.Call("echo", "echo", "Bob", func(t testing.TB, response string) { assert.Equal(t, "Bye, Bob!!!", response) }), - func(t testing.TB, ic in.TestContext) { - done.Store(true) - routineStopped.Wait() - err := failure.Load() - assert.NoError(t, err) - }, ) } diff --git a/backend/provisioner/scaling/localscaling/local_scaling.go b/backend/provisioner/scaling/localscaling/local_scaling.go index a7ded40f3c..5e37d4e637 100644 --- a/backend/provisioner/scaling/localscaling/local_scaling.go +++ b/backend/provisioner/scaling/localscaling/local_scaling.go @@ -50,7 +50,7 @@ type localScaling struct { devModeEndpoints map[string]*devModeRunner } -func (l *localScaling) StartDeployment(ctx context.Context, module string, deployment string, sch *schema.Module) error { +func (l *localScaling) StartDeployment(ctx context.Context, module string, deployment string, sch *schema.Module, hasCron bool, hasIngress bool) error { if sch.Runtime == nil { return nil } @@ -83,8 +83,21 @@ func (l *localScaling) setReplicas(module string, deployment string, language st return l.reconcileRunners(ctx, deploymentRunners) } -func (l *localScaling) TerminateDeployment(ctx context.Context, module string, deployment string) error { - return l.setReplicas(module, deployment, "", 0) +func (l *localScaling) TerminatePreviousDeployments(ctx context.Context, module string, deployment string) ([]string, error) { + logger := log.FromContext(ctx) + var ret []string + // So hacky, all this needs to change when the provisioner is a proper schema observer + logger.Debugf("Terminating previous deployments for %s", deployment) + for dep := range l.runners[module] { + if dep != deployment { + ret = append(ret, dep) + logger.Debugf("Terminating deployment %s", dep) + if err := l.setReplicas(module, dep, "", 0); err != nil { + return nil, err + } + } + } + return ret, nil } type devModeRunner struct { diff --git a/backend/provisioner/scaling/scaling.go b/backend/provisioner/scaling/scaling.go index 898bb0bc52..fe89254e79 100644 --- a/backend/provisioner/scaling/scaling.go +++ b/backend/provisioner/scaling/scaling.go @@ -14,7 +14,7 @@ type RunnerScaling interface { GetEndpointForDeployment(ctx context.Context, module string, deployment string) (optional.Option[url.URL], error) - StartDeployment(ctx context.Context, module string, deployment string, sch *schema.Module) error + StartDeployment(ctx context.Context, module string, deployment string, sch *schema.Module, hasCron bool, hasIngress bool) error - TerminateDeployment(ctx context.Context, module string, deployment string) error + TerminatePreviousDeployments(ctx context.Context, module string, currentDeployment string) ([]string, error) } diff --git a/examples/java/echo/ftl.toml b/examples/java/echo/ftl.toml index 700b9d8833..de92e831e1 100644 --- a/examples/java/echo/ftl.toml +++ b/examples/java/echo/ftl.toml @@ -1,2 +1,2 @@ module = "echo" -language = "kotlin" +language = "java" diff --git a/examples/java/time/ftl.toml b/examples/java/time/ftl.toml index 48033f28f8..e89ed11377 100644 --- a/examples/java/time/ftl.toml +++ b/examples/java/time/ftl.toml @@ -1,2 +1,2 @@ module = "time" -language = "kotlin" +language = "java" diff --git a/internal/schema/schemaeventsource/schemaeventsource.go b/internal/schema/schemaeventsource/schemaeventsource.go index 92e0b7eee9..b08aada6e4 100644 --- a/internal/schema/schemaeventsource/schemaeventsource.go +++ b/internal/schema/schemaeventsource/schemaeventsource.go @@ -191,6 +191,9 @@ func New(ctx context.Context, client ftlv1connect.SchemaServiceClient) EventSour more = more && resp.More switch resp.ChangeType { case ftlv1.DeploymentChangeType_DEPLOYMENT_CHANGE_TYPE_REMOVED: + if !resp.ModuleRemoved { + return nil + } logger.Debugf("Module %s removed", sch.Name) event := EventRemove{ Deployment: someDeploymentKey, diff --git a/jvm-runtime/jvm_integration_test.go b/jvm-runtime/jvm_integration_test.go index cd58510efb..a333543093 100644 --- a/jvm-runtime/jvm_integration_test.go +++ b/jvm-runtime/jvm_integration_test.go @@ -12,8 +12,8 @@ import ( "github.com/alecthomas/repr" - ftlv1 "github.com/TBD54566975/ftl/backend/protos/xyz/block/ftl/v1" schemapb "github.com/TBD54566975/ftl/backend/protos/xyz/block/ftl/schema/v1" + ftlv1 "github.com/TBD54566975/ftl/backend/protos/xyz/block/ftl/v1" "github.com/TBD54566975/ftl/go-runtime/ftl" in "github.com/TBD54566975/ftl/internal/integration" "github.com/TBD54566975/ftl/internal/schema" @@ -65,7 +65,6 @@ func TestLifecycleJVM(t *testing.T) { assert.Equal(t, "Bye, Bob!", response) }), in.VerifyControllerStatus(func(ctx context.Context, t testing.TB, status *ftlv1.StatusResponse) { - // Non structurally changing edits should not trigger a new deployment. assert.Equal(t, 1, len(status.Deployments)) assert.NotEqual(t, deployment, status.Deployments[0].Key) }),