From 15cb549a5c040e52ce3d66cf66eb695ff2cd0f5f Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Fri, 9 Feb 2024 18:51:40 -0800 Subject: [PATCH 01/13] instances: Fix data race in Expander.ExpandAbsModuleCall This method must hold a read lock while it does its work, or else it will race with calls to register the expansion mode for other module calls. --- internal/instances/expander.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/internal/instances/expander.go b/internal/instances/expander.go index 497daae2f62a..b9d77dbffe4f 100644 --- a/internal/instances/expander.go +++ b/internal/instances/expander.go @@ -154,6 +154,9 @@ func (e *Expander) ExpandModule(addr addrs.Module) []addrs.ModuleInstance { // key type: integer keys are sorted numerically, and string keys are sorted // lexically. func (e *Expander) ExpandAbsModuleCall(addr addrs.AbsModuleCall) (keyType addrs.InstanceKeyType, insts []addrs.InstanceKey, known bool) { + e.mu.RLock() + defer e.mu.RUnlock() + expParent, ok := e.findModule(addr.Module) if !ok { // This module call lives under an unknown-expansion prefix, so we From 7dad938fdb6bfeaf7cc082ce50bcf99f7889a2a9 Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Fri, 9 Feb 2024 18:12:52 -0800 Subject: [PATCH 02/13] rpcapi+stacks: Stacks runtime can see whether experiments are allowed We allow experiments only in alpha builds, and so we propagate the flag for whether that's allowed in from "package main". We previously had that plumbed in only as far as the rpcapi startup. This plumbs the flag all the way into package stackeval so that we can in turn propagate it to Terraform's module config loader, which is ultimately the one responsible for ensuring that language experiments can be enabled only when the flag is set. Therefore it will now be possible to opt in to language experiments in modules that are used in stack components. --- internal/rpcapi/internal_client.go | 2 +- internal/rpcapi/plugin.go | 22 ++++++++--- internal/rpcapi/stacks.go | 38 +++++++++++-------- internal/rpcapi/stacks_inspector.go | 20 +++++----- internal/rpcapi/stacks_test.go | 6 +-- internal/stacks/stackruntime/apply.go | 2 + internal/stacks/stackruntime/eval_expr.go | 3 ++ .../internal/stackeval/applying.go | 2 + .../internal/stackeval/component_config.go | 1 + .../stackruntime/internal/stackeval/main.go | 21 ++++++++++ .../internal/stackeval/main_apply.go | 1 + internal/stacks/stackruntime/plan.go | 3 ++ internal/stacks/stackruntime/validate.go | 3 ++ 13 files changed, 90 insertions(+), 34 deletions(-) diff --git a/internal/rpcapi/internal_client.go b/internal/rpcapi/internal_client.go index c3e86c5d8e18..ae11a5052574 100644 --- a/internal/rpcapi/internal_client.go +++ b/internal/rpcapi/internal_client.go @@ -42,7 +42,7 @@ type Client struct { func NewInternalClient(ctx context.Context, clientCaps *terraform1.ClientCapabilities) (*Client, error) { fakeListener := bufconn.Listen(4 * 1024 * 1024 /* buffer size */) srv := grpc.NewServer() - registerGRPCServices(srv) + registerGRPCServices(srv, &serviceOpts{}) go func() { if err := srv.Serve(fakeListener); err != nil { diff --git a/internal/rpcapi/plugin.go b/internal/rpcapi/plugin.go index 14e651d8d583..6d57920022c6 100644 --- a/internal/rpcapi/plugin.go +++ b/internal/rpcapi/plugin.go @@ -28,19 +28,22 @@ func (p *corePlugin) GRPCClient(ctx context.Context, broker *plugin.GRPCBroker, } func (p *corePlugin) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) error { - registerGRPCServices(s) + generalOpts := &serviceOpts{ + experimentsAllowed: p.experimentsAllowed, + } + registerGRPCServices(s, generalOpts) return nil } -func registerGRPCServices(s *grpc.Server) { +func registerGRPCServices(s *grpc.Server, opts *serviceOpts) { // We initially only register the setup server, because the registration // of other services can vary depending on the capabilities negotiated // during handshake. - setup := newSetupServer(serverHandshake(s)) + setup := newSetupServer(serverHandshake(s, opts)) terraform1.RegisterSetupServer(s, setup) } -func serverHandshake(s *grpc.Server) func(context.Context, *terraform1.ClientCapabilities) (*terraform1.ServerCapabilities, error) { +func serverHandshake(s *grpc.Server, opts *serviceOpts) func(context.Context, *terraform1.ClientCapabilities) (*terraform1.ServerCapabilities, error) { dependencies := dynrpcserver.NewDependenciesStub() terraform1.RegisterDependenciesServer(s, dependencies) stacks := dynrpcserver.NewStacksStub() @@ -71,7 +74,7 @@ func serverHandshake(s *grpc.Server) func(context.Context, *terraform1.ClientCap // doing real work. In future the details of what we register here // might vary based on the negotiated capabilities. dependencies.ActivateRPCServer(newDependenciesServer(handles, services)) - stacks.ActivateRPCServer(newStacksServer(handles)) + stacks.ActivateRPCServer(newStacksServer(handles, opts)) packages.ActivateRPCServer(newPackagesServer(services)) // If the client requested any extra capabililties that we're going @@ -79,3 +82,12 @@ func serverHandshake(s *grpc.Server) func(context.Context, *terraform1.ClientCap return &terraform1.ServerCapabilities{}, nil } } + +// serviceOpts are options that could potentially apply to all of our +// individual RPC services. +// +// This could potentially be embedded inside a service-specific options +// structure, if needed. +type serviceOpts struct { + experimentsAllowed bool +} diff --git a/internal/rpcapi/stacks.go b/internal/rpcapi/stacks.go index 3779f3876f15..76218531f695 100644 --- a/internal/rpcapi/stacks.go +++ b/internal/rpcapi/stacks.go @@ -31,14 +31,16 @@ import ( type stacksServer struct { terraform1.UnimplementedStacksServer - handles *handleTable + handles *handleTable + experimentsAllowed bool } var _ terraform1.StacksServer = (*stacksServer)(nil) -func newStacksServer(handles *handleTable) *stacksServer { +func newStacksServer(handles *handleTable, opts *serviceOpts) *stacksServer { return &stacksServer{ - handles: handles, + handles: handles, + experimentsAllowed: opts.experimentsAllowed, } } @@ -104,7 +106,8 @@ func (s *stacksServer) ValidateStackConfiguration(ctx context.Context, req *terr } diags := stackruntime.Validate(ctx, &stackruntime.ValidateRequest{ - Config: cfg, + Config: cfg, + ExperimentsAllowed: s.experimentsAllowed, }) return &terraform1.ValidateStackConfiguration_Response{ Diagnostics: diagnosticsToProto(diags), @@ -230,11 +233,12 @@ func (s *stacksServer) PlanStackChanges(req *terraform1.PlanStackChanges_Request changesCh := make(chan stackplan.PlannedChange, 8) diagsCh := make(chan tfdiags.Diagnostic, 2) rtReq := stackruntime.PlanRequest{ - PlanMode: planMode, - Config: cfg, - PrevState: prevState, - ProviderFactories: providerFactories, - InputValues: inputValues, + PlanMode: planMode, + Config: cfg, + PrevState: prevState, + ProviderFactories: providerFactories, + InputValues: inputValues, + ExperimentsAllowed: s.experimentsAllowed, } rtResp := stackruntime.PlanResponse{ PlannedChanges: changesCh, @@ -368,9 +372,10 @@ func (s *stacksServer) ApplyStackChanges(req *terraform1.ApplyStackChanges_Reque changesCh := make(chan stackstate.AppliedChange, 8) diagsCh := make(chan tfdiags.Diagnostic, 2) rtReq := stackruntime.ApplyRequest{ - Config: cfg, - ProviderFactories: providerFactories, - RawPlan: req.PlannedChanges, + Config: cfg, + ProviderFactories: providerFactories, + RawPlan: req.PlannedChanges, + ExperimentsAllowed: s.experimentsAllowed, } rtResp := stackruntime.ApplyResponse{ AppliedChanges: changesCh, @@ -503,10 +508,11 @@ func (s *stacksServer) OpenStackInspector(ctx context.Context, req *terraform1.O } hnd := s.handles.NewStackInspector(&stacksInspector{ - Config: cfg, - State: state, - ProviderFactories: providerFactories, - InputValues: inputValues, + Config: cfg, + State: state, + ProviderFactories: providerFactories, + InputValues: inputValues, + ExperimentsAllowed: s.experimentsAllowed, }) return &terraform1.OpenStackInspector_Response{ diff --git a/internal/rpcapi/stacks_inspector.go b/internal/rpcapi/stacks_inspector.go index 0b7843781e82..4969b1cd43fc 100644 --- a/internal/rpcapi/stacks_inspector.go +++ b/internal/rpcapi/stacks_inspector.go @@ -29,10 +29,11 @@ import ( // provide what they want to inspect just once and then perform any number // of subsequent inspection actions against it. type stacksInspector struct { - Config *stackconfig.Config - State *stackstate.State - ProviderFactories map[addrs.Provider]providers.Factory - InputValues map[stackaddrs.InputVariable]stackruntime.ExternalInputValue + Config *stackconfig.Config + State *stackstate.State + ProviderFactories map[addrs.Provider]providers.Factory + InputValues map[stackaddrs.InputVariable]stackruntime.ExternalInputValue + ExperimentsAllowed bool } // InspectExpressionResult evaluates a given expression string in the @@ -57,11 +58,12 @@ func (i *stacksInspector) InspectExpressionResult(ctx context.Context, req *terr } val, moreDiags := stackruntime.EvalExpr(ctx, expr, &stackruntime.EvalExprRequest{ - Config: i.Config, - State: i.State, - EvalStackInstance: stackAddr, - InputValues: i.InputValues, - ProviderFactories: i.ProviderFactories, + Config: i.Config, + State: i.State, + EvalStackInstance: stackAddr, + InputValues: i.InputValues, + ProviderFactories: i.ProviderFactories, + ExperimentsAllowed: i.ExperimentsAllowed, }) diags = diags.Append(moreDiags) if val == cty.NilVal { diff --git a/internal/rpcapi/stacks_test.go b/internal/rpcapi/stacks_test.go index db56e4b5983d..f8955f295612 100644 --- a/internal/rpcapi/stacks_test.go +++ b/internal/rpcapi/stacks_test.go @@ -28,7 +28,7 @@ func TestStacksOpenCloseStackConfiguration(t *testing.T) { ctx := context.Background() handles := newHandleTable() - stacksServer := newStacksServer(handles) + stacksServer := newStacksServer(handles, &serviceOpts{}) // In normal use a client would have previously opened a source bundle // using Dependencies.OpenSourceBundle, so we'll simulate the effect @@ -110,7 +110,7 @@ func TestStacksFindStackConfigurationComponents(t *testing.T) { ctx := context.Background() handles := newHandleTable() - stacksServer := newStacksServer(handles) + stacksServer := newStacksServer(handles, &serviceOpts{}) // In normal use a client would have previously opened a source bundle // using Dependencies.OpenSourceBundle, so we'll simulate the effect @@ -226,7 +226,7 @@ func TestStacksPlanStackChanges(t *testing.T) { ctx := context.Background() handles := newHandleTable() - stacksServer := newStacksServer(handles) + stacksServer := newStacksServer(handles, &serviceOpts{}) fakeSourceBundle := &sourcebundle.Bundle{} bundleHnd := handles.NewSourceBundle(fakeSourceBundle) diff --git a/internal/stacks/stackruntime/apply.go b/internal/stacks/stackruntime/apply.go index a7f6fa813ea7..1af2733b15e4 100644 --- a/internal/stacks/stackruntime/apply.go +++ b/internal/stacks/stackruntime/apply.go @@ -96,6 +96,8 @@ type ApplyRequest struct { RawPlan []*anypb.Any ProviderFactories map[addrs.Provider]providers.Factory + + ExperimentsAllowed bool } // ApplyResponse is used by [Apply] to describe the results of applying. diff --git a/internal/stacks/stackruntime/eval_expr.go b/internal/stacks/stackruntime/eval_expr.go index 9deb0b3dbc11..84925d1834df 100644 --- a/internal/stacks/stackruntime/eval_expr.go +++ b/internal/stacks/stackruntime/eval_expr.go @@ -28,6 +28,7 @@ func EvalExpr(ctx context.Context, expr hcl.Expression, req *EvalExprRequest) (c InputVariableValues: req.InputValues, ProviderFactories: req.ProviderFactories, }) + main.AllowLanguageExperiments(req.ExperimentsAllowed) return main.EvalExpr(ctx, expr, req.EvalStackInstance, stackeval.InspectPhase) } @@ -50,4 +51,6 @@ type EvalExprRequest struct { // configurations corresponding to these. InputValues map[stackaddrs.InputVariable]ExternalInputValue ProviderFactories map[addrs.Provider]providers.Factory + + ExperimentsAllowed bool } diff --git a/internal/stacks/stackruntime/internal/stackeval/applying.go b/internal/stacks/stackruntime/internal/stackeval/applying.go index 642ec3aba3c8..facd06e3e509 100644 --- a/internal/stacks/stackruntime/internal/stackeval/applying.go +++ b/internal/stacks/stackruntime/internal/stackeval/applying.go @@ -26,6 +26,8 @@ type ApplyOpts struct { // unrecognized then the apply phase will use this to emit the necessary // "discard" events to keep the state consistent. PrevStateDescKeys collections.Set[statekeys.Key] + + ExperimentsAllowed bool } // Applyable is an interface implemented by types which represent objects diff --git a/internal/stacks/stackruntime/internal/stackeval/component_config.go b/internal/stacks/stackruntime/internal/stackeval/component_config.go index ea5e68c13ebc..403d5c6aa9fb 100644 --- a/internal/stacks/stackruntime/internal/stackeval/component_config.go +++ b/internal/stacks/stackruntime/internal/stackeval/component_config.go @@ -126,6 +126,7 @@ func (c *ComponentConfig) CheckModuleTree(ctx context.Context) (*configs.Config, // source files on disk (an implementation detail) rather than // preserving the source address abstraction. parser := configs.NewParser(afero.NewOsFs()) + parser.AllowLanguageExperiments(c.main.LanguageExperimentsAllowed()) if !parser.IsConfigDir(rootModuleDir) { diags = diags.Append(&hcl.Diagnostic{ diff --git a/internal/stacks/stackruntime/internal/stackeval/main.go b/internal/stacks/stackruntime/internal/stackeval/main.go index 1a2cbad9c843..f9665fb3adce 100644 --- a/internal/stacks/stackruntime/internal/stackeval/main.go +++ b/internal/stacks/stackruntime/internal/stackeval/main.go @@ -65,6 +65,11 @@ type Main struct { // This must never be used outside of test code in this package. testOnlyGlobals map[string]cty.Value + // languageExperimentsAllowed gets set if our caller enables the use + // of language experiments by calling [Main.AllowLanguageExperiments] + // shortly after creating this object. + languageExperimentsAllowed bool + // The remaining fields memoize other objects we might create in response // to method calls. Must lock "mu" before interacting with them. mu sync.Mutex @@ -156,6 +161,22 @@ func NewForInspecting(config *stackconfig.Config, state *stackstate.State, opts } } +// AllowLanguageExperiments changes the flag for whether language experiments +// are allowed during evaluation. +// +// Call this very shortly after creating a [Main], before performing any other +// actions on it. Changing this setting after other methods have been called +// will produce unpredictable results. +func (m *Main) AllowLanguageExperiments(allow bool) { + m.languageExperimentsAllowed = allow +} + +// LanguageExperimentsAllowed returns true if language experiments are allowed +// to be used during evaluation. +func (m *Main) LanguageExperimentsAllowed() bool { + return m.languageExperimentsAllowed +} + // Validating returns true if the receiving [Main] is configured for validating. // // If this returns false then validation methods may panic or return strange diff --git a/internal/stacks/stackruntime/internal/stackeval/main_apply.go b/internal/stacks/stackruntime/internal/stackeval/main_apply.go index 3345c411e8db..b940e40706f4 100644 --- a/internal/stacks/stackruntime/internal/stackeval/main_apply.go +++ b/internal/stacks/stackruntime/internal/stackeval/main_apply.go @@ -199,6 +199,7 @@ func ApplyPlan(ctx context.Context, config *stackconfig.Config, rawPlan []*anypb }) main := NewForApplying(config, plan.RootInputValues, plan, results, opts) + main.AllowLanguageExperiments(opts.ExperimentsAllowed) begin(ctx, main) // the change tasks registered above become runnable // With the planned changes now in progress, we'll visit everything and diff --git a/internal/stacks/stackruntime/plan.go b/internal/stacks/stackruntime/plan.go index c44546774f0b..701dd5a2e925 100644 --- a/internal/stacks/stackruntime/plan.go +++ b/internal/stacks/stackruntime/plan.go @@ -49,6 +49,7 @@ func Plan(ctx context.Context, req *PlanRequest, resp *PlanResponse) { ForcePlanTimestamp: req.ForcePlanTimestamp, }) + main.AllowLanguageExperiments(req.ExperimentsAllowed) main.PlanAll(ctx, stackeval.PlanOutput{ AnnouncePlannedChange: func(ctx context.Context, change stackplan.PlannedChange) { resp.PlannedChanges <- change @@ -97,6 +98,8 @@ type PlanRequest struct { // to return the given value instead of whatever real time the plan // operation started. This is for testing purposes only. ForcePlanTimestamp *time.Time + + ExperimentsAllowed bool } // PlanResponse is used by [Plan] to describe the results of planning. diff --git a/internal/stacks/stackruntime/validate.go b/internal/stacks/stackruntime/validate.go index c6a428f585c8..197bad1962cf 100644 --- a/internal/stacks/stackruntime/validate.go +++ b/internal/stacks/stackruntime/validate.go @@ -19,6 +19,7 @@ func Validate(ctx context.Context, req *ValidateRequest) tfdiags.Diagnostics { defer span.End() main := stackeval.NewForValidating(req.Config, stackeval.ValidateOpts{}) + main.AllowLanguageExperiments(req.ExperimentsAllowed) diags := main.ValidateAll(ctx) diags = diags.Append( main.DoCleanup(ctx), @@ -32,5 +33,7 @@ func Validate(ctx context.Context, req *ValidateRequest) tfdiags.Diagnostics { type ValidateRequest struct { Config *stackconfig.Config + ExperimentsAllowed bool + // TODO: Provider factories and other similar such things } From 06d7a6fe2080b4590433b79cc6b22167b7fa4dbe Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Fri, 2 Feb 2024 15:41:22 -0800 Subject: [PATCH 03/13] addrs: PartialExpandedResource module address accessors This is kinda awkward because this address type represents both resources whose own instances are not expanded yet and resources belonging to whole modules whose instance keys haven't been expanded yet, and those two cases have different address types. However, in return for this awkward API only for the rare case where we need to isolate the module instance address, the rest of the system gets to not worry very much about this distinction: it can share most code between the two cases, since they both need similar evaluation treatment anyway. --- internal/addrs/partial_expanded.go | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/internal/addrs/partial_expanded.go b/internal/addrs/partial_expanded.go index 6bffedac550e..6b2cac149b8e 100644 --- a/internal/addrs/partial_expanded.go +++ b/internal/addrs/partial_expanded.go @@ -323,6 +323,34 @@ func (per PartialExpandedResource) KnownModuleInstancePrefix() ModuleInstance { return per.module.KnownPrefix() } +// ModuleInstance returns the fully-qualified [ModuleInstance] that this +// partial-expanded resource belongs to, but only if its module instance +// address is fully known. +// +// The second return value is false if the module instance address is not +// fully expanded, in which case the first return value is invalid. Use +// [PartialExpandedResource.PartialExpandedModule] instead in that case. +func (per PartialExpandedResource) ModuleInstance() (ModuleInstance, bool) { + if len(per.module.unexpandedSuffix) != 0 { + return nil, false + } + return per.module.expandedPrefix, true +} + +// PartialExpandedModule returns a [PartialExpandedModule] address describing +// the partially-unknown module instance address that the resource belongs to, +// but only if the module instance address is not fully known. +// +// The second return value is false if the module instance address is actually +// fully expanded, in which case the first return value is invalid. Use +// [PartialExpandedResource.ModuleInstance] instead in that case. +func (per PartialExpandedResource) PartialExpandedModule() (PartialExpandedModule, bool) { + if len(per.module.unexpandedSuffix) == 0 { + return PartialExpandedModule{}, false + } + return per.module, true +} + // String returns a string representation of the pattern which uses the special // placeholder "[*]" to represent positions where instance keys are not yet // known. From e396773a228954dddd6392b228d3106694b8ba65 Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Fri, 2 Feb 2024 10:00:43 -0800 Subject: [PATCH 04/13] instances: Some additional commentary The internal API here was a little unclear, so this is just some retroactive additional documentation resulting from me having "relearned" how this API works after some time away from it, in the hope that future maintainers will not need to repeat that exercise. --- internal/instances/expander.go | 4 ++-- internal/instances/expansion_mode.go | 14 +++++++++++++- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/internal/instances/expander.go b/internal/instances/expander.go index b9d77dbffe4f..594578ccf2f6 100644 --- a/internal/instances/expander.go +++ b/internal/instances/expander.go @@ -418,8 +418,8 @@ func (e *Expander) GetResourceInstanceRepetitionData(addr addrs.AbsResourceInsta return exp.repetitionData(addr.Resource.Key) } -// GetModuleCallInstanceKeys determines the child instance keys for one specific -// instance of a module call. +// ResourceInstanceKeys determines the child instance keys for one specific +// instance of a resource. // // keyType describes the expected type of all keys in knownKeys, which typically // also implies what data type would be used to describe the full set of diff --git a/internal/instances/expansion_mode.go b/internal/instances/expansion_mode.go index 4f396e20d375..797e40dcc42a 100644 --- a/internal/instances/expansion_mode.go +++ b/internal/instances/expansion_mode.go @@ -16,7 +16,19 @@ import ( // ways expansion can operate depending on how repetition is configured for // an object. type expansion interface { - instanceKeys() (addrs.InstanceKeyType, []addrs.InstanceKey, bool) + // instanceKeys determines the full set of instance keys for whatever + // object this expansion is associated with, or indicates that we + // can't yet know what they are (using keysUnknown=true). + // + // if keysUnknown is true then the "keys" result is likely to be incomplete, + // meaning that there might be other instance keys that we've not yet + // calculated, but we know that they will be of type keyType. + // + // If len(keys) == 0 when keysUnknown is true then we don't yet know _any_ + // instance keys for this object. keyType might be [addrs.UnknownKeyType] + // if we don't even have enough information to predict what type of + // keys we will be using for this object. + instanceKeys() (keyType addrs.InstanceKeyType, keys []addrs.InstanceKey, keysUnknown bool) repetitionData(addrs.InstanceKey) RepetitionData } From e00a6bc40ff7f856fde5998a5c8bd36b5eab3113 Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Fri, 26 Jan 2024 16:54:15 -0800 Subject: [PATCH 05/13] deferring: A new package for tracking deferred actions When we defer planning actions that have direct side-effects -- that is, resource instance actions -- we need to remember that we did so because we then need to force deferring planning for any other resources downstream to preserve the required order of operations. That means we need to keep track of what we've already deferred so we can check whether a newly-visited resource instance must also be deferred. We also need to track deferrals for the purpose of reporting them as part of the plan presented to the end-user. This change _starts_ to implement that, but for now the public API is limited only to generating a boolean signal for whether there are any deferrals at all, because that's the minimum possible reporting that allows for correct behavior (so the caller can treat the resulting plan slight differently in the workflow). In future we'll also want to be able to return placeholder values for not-yet-expanded resource instances, but that's not fully built out here yet since for an initial implementation we can cheat and use cty.DynamicVal as a universal "we know nothing at all" placeholder. This API is likely to grow and change in forthcoming commits as we continue wiring these new behaviors into the Terraform modules runtime. --- internal/plans/deferring/deferred.go | 322 ++++++++++++++++++ .../deferred_partial_expanded_resource.go | 24 ++ .../deferring/deferred_resource_instance.go | 38 +++ internal/plans/deferring/deferred_test.go | 165 +++++++++ internal/plans/deferring/doc.go | 10 + 5 files changed, 559 insertions(+) create mode 100644 internal/plans/deferring/deferred.go create mode 100644 internal/plans/deferring/deferred_partial_expanded_resource.go create mode 100644 internal/plans/deferring/deferred_resource_instance.go create mode 100644 internal/plans/deferring/deferred_test.go create mode 100644 internal/plans/deferring/doc.go diff --git a/internal/plans/deferring/deferred.go b/internal/plans/deferring/deferred.go new file mode 100644 index 000000000000..9ca16128066d --- /dev/null +++ b/internal/plans/deferring/deferred.go @@ -0,0 +1,322 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package deferring + +import ( + "fmt" + "sync" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/internal/addrs" + "github.com/hashicorp/terraform/internal/plans" +) + +// Deferred keeps track of deferrals that have already happened, to help +// guide decisions about whether downstream operations might also need to be +// deferred, and to provide some placeholder data for performing downstream +// checks against the subset of data we know despite the deferrals. +// +// This type only tracks information about object types that can _cause_ +// deferred changes. Everything in the language can be _affected_ by deferred +// changes, such as by referring to an object whose changes were deferred or +// being declared in a module that was only partially-expanded, but we track +// the information about the other object types in other locations that are +// thematically closer to the type of object in question. +type Deferred struct { + // resourceGraph is provided by the caller when instantiating a [Deferred], + // and describes the dependency relationships between the static resource + // declarations in the configuration. + // + // We use this as part of the rules for deciding whether a downstream + // resource instance that could potentially be planned should be deferred + // anyway due to its dependencies not yet being fully planned. + resourceGraph addrs.DirectedGraph[addrs.ConfigResource] + + // externalDependencyDeferred marks the special situation where the + // subsystem that's calling the modules runtime knows that some external + // dependency of the configuration has deferred changes itself, and thus + // all planned actions in this configuration must be deferred even if + // the modules runtime can't find its own reason to do that. + // + // This is used by the stacks runtime when component B depends on + // component A and component A's plan had deferred changes, so therefore + // everything that component B might plan must also be deferred even + // though the planning process for B cannot see into the plan for A. + externalDependencyDeferred bool + + // Must hold this lock when accessing all fields after this one. + mu sync.Mutex + + // resourceInstancesDeferred tracks the resource instances that have + // been deferred despite their full addresses being known. This can happen + // either because an upstream change was already deferred, or because + // during planning the owning provider indicated that it doesn't yet have + // enough information to produce a plan. + // + // These are grouped by the static resource configuration address because + // there can potentially be various different deferrals for the same + // configuration block at different amounts of instance expansion under + // different prefixes, and so some queries require us to search across + // all of those options to decide if each instance is relevant. + resourceInstancesDeferred addrs.Map[addrs.ConfigResource, addrs.Map[addrs.AbsResourceInstance, deferredResourceInstance]] + + // partialExpandedResourcesDeferred tracks placeholders that cover an + // unbounded set of potential resource instances in situations where we + // don't yet even have enough information to predict which instances of + // a resource will exist. + // + // These are grouped by the static resource configuration address because + // there can potentially be various different deferrals for the same + // configuration block at different amounts of instance expansion under + // different prefixes, and so some queries require us to search across + // all of those options to find the one that matches most closely. + partialExpandedResourcesDeferred addrs.Map[addrs.ConfigResource, addrs.Map[addrs.PartialExpandedResource, deferredPartialExpandedResource]] + + // partialExpandedModulesDeferred tracks all of the partial-expanded module + // prefixes we were notified about. + // + // We don't need to track anything for these other than that we saw them + // reported, because the relevant data is tracked in [instances.Expander] + // and [namedvals.State], but we do need to remember the addresses just + // so that we can inform the caller that there was something deferred + // even if there weren't any resources beneath the partial-expanded prefix. + // + // (If we didn't catch that then we'd mislead the caller into thinking + // we fully-evaluated everything, which would be incorrect if any of the + // root module output values are derived from the results of the + // partial-expanded calls.) + partialExpandedModulesDeferred addrs.Set[addrs.PartialExpandedModule] +} + +// NewDeferred constructs a new [Deferred] that assumes that the given resource +// graph accurately describes all of the dependencies between static resource +// blocks in the configuration. +// +// Callers must not modify anything reachable through resourceGraph after +// calling this function. +func NewDeferred(resourceGraph addrs.DirectedGraph[addrs.ConfigResource]) *Deferred { + return &Deferred{ + resourceGraph: resourceGraph, + resourceInstancesDeferred: addrs.MakeMap[addrs.ConfigResource, addrs.Map[addrs.AbsResourceInstance, deferredResourceInstance]](), + partialExpandedResourcesDeferred: addrs.MakeMap[addrs.ConfigResource, addrs.Map[addrs.PartialExpandedResource, deferredPartialExpandedResource]](), + partialExpandedModulesDeferred: addrs.MakeSet[addrs.PartialExpandedModule](), + } +} + +// SetExternalDependencyDeferred modifies a freshly-constructed [Deferred] +// so that it will consider all resource instances as needing their actions +// deferred, even if there's no other reason to do that. +// +// This must be called zero or one times before any other use of the receiver. +// Changing this setting after a [Deferred] has already been used, or +// concurrently with any other method call, will cause inconsistent and +// undefined behavior. +func (d *Deferred) SetExternalDependencyDeferred() { + d.externalDependencyDeferred = true +} + +// HaveAnyDeferrals returns true if at least one deferral has been registered +// with the receiver. +// +// This method is intended as a summary result to propagate to the modules +// runtime caller so it can know if it should treat any downstream objects +// as having their own changes deferred without having to duplicate the +// modules runtime's rules for what counts as a deferral. +func (d *Deferred) HaveAnyDeferrals() bool { + return d.externalDependencyDeferred || + d.resourceInstancesDeferred.Len() != 0 || + d.partialExpandedResourcesDeferred.Len() != 0 || + len(d.partialExpandedModulesDeferred) != 0 +} + +// ShouldDeferResourceChanges returns true if the receiver knows some reason +// why the resource instance with the given address should have its planned +// action deferred for a future plan/apply round. +// +// This method is specifically for resource instances whose full address is +// known and thus it would be possible in principle to plan changes, but we +// still need to respect dependency ordering and so any planned changes must +// be deferred if any upstream planned action was already deferred for +// some reason. +// +// Callers who get the answer true should announce an approximation of the +// action they would have planned to [Deferred.ReportResourceInstanceDeferred], +// but should skip writing that change into the live plan so that downstream +// evaluation will be based on the prior state (similar to in a refresh-only +// plan) rather than the result of the deferred action. +// +// It's invalid to call this method for an address that was already reported +// as deferred using [Deferred.ReportResourceInstanceDeferred], and so this +// method will panic in that case. Callers should always test whether a resource +// instance action should be deferred _before_ reporting that it has been. +func (d *Deferred) ShouldDeferResourceInstanceChanges(addr addrs.AbsResourceInstance) bool { + if d.externalDependencyDeferred { + // This is an easy case: _all_ actions must be deferred. + return true + } + + // If neither of our resource-deferral-tracking collections have anything + // in them then we definitely don't need to defer. This special case is + // here primarily to minimize the amount of code from here that will run + // when the deferred-actions-related experiments are inactive, so we can + // minimize the risk of impacting non-participants. + // (Maybe we'll remove this check once this stuff is non-experimental.) + if d.resourceInstancesDeferred.Len() == 0 && d.partialExpandedResourcesDeferred.Len() == 0 { + return false + } + + // Our resource graph describes relationships between the static resource + // configuration blocks, not their dynamic instances, so we need to start + // with the config address that the given instance belongs to. + configAddr := addr.ConfigResource() + + if d.resourceInstancesDeferred.Get(configAddr).Has(addr) { + // Asking for whether a resource instance should be deferred when + // it was already reported as deferred suggests a programming error + // in the caller, because the test for whether a change should be + // deferred should always come before reporting that it has been. + panic(fmt.Sprintf("checking whether %s should be deferred when it was already deferred", addr)) + } + + // We use DirectDependenciesOf rather than TransitiveDependenciesOf because + // the reports to this object are driven by the modules runtime's existing + // graph walk and so all of our direct dependencies should already have + // had the opportunity to report themselves as deferred by the time this + // question is being asked. + configDeps := d.resourceGraph.DirectDependenciesOf(configAddr) + + // For this initial implementation we're taking the shortcut of assuming + // that all of the configDeps are required. It would be better to do a + // more precise analysis that takes into account how data could possibly + // flow between instances of resources across different module paths, + // but that may have some subtlety due to dynamic data flow, so we'll + // need to do some more theory work to figure out what kind of analysis + // we'd need to do to get this to be more precise. + // + // This conservative approach is a starting point so we can focus on + // developing the workflow around deferred changes before making its + // analyses more precise. This will defer more changes than strictly + // necessary, but that's better than not deferring changes that should + // have been deferred. + // + // (FWIW, it does seem like we _should_ be able to eliminate some + // dynamic instances from consideration by relying on constraints such as + // how a multi-instance module call can't have an object in one instance + // depending on an object for another instance, but we'll need to make sure + // any additional logic here is well-reasoned to avoid violating dependency + // invariants.) + for _, configDep := range configDeps { + if d.resourceInstancesDeferred.Has(configDep) { + // For now we don't consider exactly which instances of that + // configuration block were deferred; there being at least + // one is enough. + return true + } + if d.partialExpandedResourcesDeferred.Has(configDep) { + // For now we don't consider exactly which partial-expanded + // prefixes of that configuration block were deferred; there being + // at least one is enough. + return true + } + + // We don't check d.partialExpandedModulesDeferred here because + // we expect that the graph nodes representing any resource under + // a partial-expanded module prefix to call + // d.ReportResourceExpansionDeferred once they find out that they + // are under a partial-expanded prefix, and so + // partialExpandedModulesDeferred is effectively just a less-detailed + // summary of the information in partialExpandedResourcesDeferred. + // (instances.Expander is the one responsible for letting the resource + // node discover that it needs to do that; package deferred does + // not participate directly in that concern.) + } + return false +} + +// ReportResourceExpansionDeferred reports that we cannot even predict which +// instances of a resource will be declared and thus we must defer all planning +// for that resource. +// +// Use the most precise partial-expanded resource address possible, and provide +// a valuePlaceholder that has known values only for attributes/elements that +// we can guarantee will be equal across all potential resource instances +// under the partial-expanded prefix. +func (d *Deferred) ReportResourceExpansionDeferred(addr addrs.PartialExpandedResource, valuePlaceholder cty.Value) { + d.mu.Lock() + defer d.mu.Unlock() + + configAddr := addr.ConfigResource() + if !d.partialExpandedResourcesDeferred.Has(configAddr) { + d.partialExpandedResourcesDeferred.Put(configAddr, addrs.MakeMap[addrs.PartialExpandedResource, deferredPartialExpandedResource]()) + } + + configMap := d.partialExpandedResourcesDeferred.Get(configAddr) + if configMap.Has(addr) { + // This indicates a bug in the caller, since our graph walk should + // ensure that we visit and evaluate each distinct partial-expanded + // prefix only once. + panic(fmt.Sprintf("duplicate deferral report for %s", addr)) + } + configMap.Put(addr, deferredPartialExpandedResource{ + valuePlaceholder: valuePlaceholder, + }) +} + +// ReportResourceInstanceDeferred records that a fully-expanded resource +// instance has had its planned action deferred to a future round for a reason +// other than its address being only partially-decided. +// +// For example, this is the method to use if the reason for deferral is +// that [Deferred.ShouldDeferResourceInstanceChanges] returns true for the +// same address, or if the responsible provider indicated in its planning +// response that it does not have enough information to produce a final +// plan. +// +// expectedAction and expectedValue together provide an approximation of +// what Terraform is expecting to plan in a future round. expectedAction may +// be [plans.Undecided] if there isn't even enough information to decide on +// an action. expectedValue should use unknown values to stand in for values +// that cannot be predicted while being as precise as is practical; in the +// worst case it's okay to provide a totally-unknown value, but better to +// provide a known object with unknown values inside it when possible. +// +// TODO: Allow the caller to pass something representing the reason for the +// deferral, so we can distinguish between the different variations in the +// plan reported to the operator. +func (d *Deferred) ReportResourceInstanceDeferred(addr addrs.AbsResourceInstance, expectedAction plans.Action, expectedValue cty.Value) { + d.mu.Lock() + defer d.mu.Unlock() + + configAddr := addr.ConfigResource() + if !d.resourceInstancesDeferred.Has(configAddr) { + d.resourceInstancesDeferred.Put(configAddr, addrs.MakeMap[addrs.AbsResourceInstance, deferredResourceInstance]()) + } + + configMap := d.resourceInstancesDeferred.Get(configAddr) + if configMap.Has(addr) { + // This indicates a bug in the caller, since our graph walk should + // ensure that we visit and evaluate each resource instance only once. + panic(fmt.Sprintf("duplicate deferral report for %s", addr)) + } + configMap.Put(addr, deferredResourceInstance{ + plannedAction: expectedAction, + plannedValue: expectedValue, + }) +} + +// ReportModuleExpansionDeferred reports that we cannot even predict which +// instances of a module call will be declared and thus we must defer all +// planning for everything inside that module. +// +// Use the most precise partial-expanded module address possible. +func (d *Deferred) ReportModuleExpansionDeferred(addr addrs.PartialExpandedModule) { + if d.partialExpandedModulesDeferred.Has(addr) { + // This indicates a bug in the caller, since our graph walk should + // ensure that we visit and evaluate each distinct partial-expanded + // prefix only once. + panic(fmt.Sprintf("duplicate deferral report for %s", addr)) + } + d.partialExpandedModulesDeferred.Add(addr) +} diff --git a/internal/plans/deferring/deferred_partial_expanded_resource.go b/internal/plans/deferring/deferred_partial_expanded_resource.go new file mode 100644 index 000000000000..2f97f230c283 --- /dev/null +++ b/internal/plans/deferring/deferred_partial_expanded_resource.go @@ -0,0 +1,24 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package deferring + +import ( + "github.com/zclconf/go-cty/cty" +) + +// deferredPartialExpandedResource tracks placeholder information for an +// unbounded set of potential resource instances sharing a common known +// address prefix. +// +// This is for situations where we can't even predict which instances of +// a resource will be declared, due to a count or for_each argument being +// unknown. The unknown repetition argument could either be on the resource +// itself or on one of its ancestor module calls. +type deferredPartialExpandedResource struct { + // valuePlaceholder is a placeholder value describes what all of the + // potential instances in the unbounded set represented by this object + // have in common, using unknown values for any parts where we cannot + // guarantee that all instances will agree. + valuePlaceholder cty.Value +} diff --git a/internal/plans/deferring/deferred_resource_instance.go b/internal/plans/deferring/deferred_resource_instance.go new file mode 100644 index 000000000000..7ce4ade0bf99 --- /dev/null +++ b/internal/plans/deferring/deferred_resource_instance.go @@ -0,0 +1,38 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package deferring + +import ( + "github.com/hashicorp/terraform/internal/plans" + "github.com/zclconf/go-cty/cty" +) + +// deferredResourceInstance tracks information about a resource instance whose +// address is precisely known but whose planned action has been deferred for +// some other reason. +type deferredResourceInstance struct { + // plannedAction is the action that Terraform expects to take for this + // resource instance in a future round. + // + // This can be set to plans.Undecided in situations where there isn't + // even enough information to decide what the action would be. + plannedAction plans.Action + + // plannedValue is an approximation of the value that Terraform expects + // to plan for this resource instance in a future round, using unknown + // values in locations where a concrete value cannot yet be decided. + // + // In the most extreme case, plannedValue could be cty.DynamicVal to + // reflect that we know nothing at all about the resource instance, or + // an unknown value of the resource instance's schema type if the values + // are completely unknown but we've at least got enough information to + // approximate the type of the value. + // + // However, ideally this should be a known object value that potentially + // has unknown values for individual attributes inside, since that gives + // the most context to aid in finding errors that would definitely arise + // on a future round, and thus shorten the iteration time to find that + // problem. + plannedValue cty.Value +} diff --git a/internal/plans/deferring/deferred_test.go b/internal/plans/deferring/deferred_test.go new file mode 100644 index 000000000000..f1f5b958b383 --- /dev/null +++ b/internal/plans/deferring/deferred_test.go @@ -0,0 +1,165 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package deferring + +import ( + "testing" + + "github.com/hashicorp/terraform/internal/addrs" + "github.com/hashicorp/terraform/internal/plans" + "github.com/zclconf/go-cty/cty" +) + +func TestDeferred_externalDependency(t *testing.T) { + // The resource graph is irrelevant for this case, because we're going to + // defer any resource instance changes regardless. Therefore an empty + // graph is just fine. + resourceGraph := addrs.NewDirectedGraph[addrs.ConfigResource]() + deferred := NewDeferred(resourceGraph) + + // This reports that something outside of the modules runtime knows that + // everything in this configuration depends on some elsewhere-action + // that has been deferred, and so the modules runtime must respect that + // even though it doesn't know the details of why it is so. + deferred.SetExternalDependencyDeferred() + + // With the above flag set, now ShouldDeferResourceInstanceChanges should + // return true regardless of any other information. + got := deferred.ShouldDeferResourceInstanceChanges(addrs.AbsResourceInstance{ + Module: addrs.RootModuleInstance, + Resource: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "anything", + Name: "really-anything", + }, + }, + }) + if !got { + t.Errorf("did not report that the instance should have its changes deferred; should have") + } +} + +func TestDeferred_absResourceInstanceDeferred(t *testing.T) { + instAAddr := addrs.AbsResourceInstance{ + Module: addrs.RootModuleInstance.Child("foo", addrs.NoKey), + Resource: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test", + Name: "a", + }, + }, + } + instBAddr := addrs.AbsResourceInstance{ + Module: addrs.RootModuleInstance, + Resource: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test", + Name: "a", + }, + }, + } + instCAddr := addrs.AbsResourceInstance{ + Module: addrs.RootModuleInstance, + Resource: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test", + Name: "c", + }, + }, + } + + resourceGraph := addrs.NewDirectedGraph[addrs.ConfigResource]() + resourceGraph.AddDependency(instCAddr.ConfigResource(), instBAddr.ConfigResource()) + resourceGraph.AddDependency(instCAddr.ConfigResource(), instAAddr.ConfigResource()) + deferred := NewDeferred(resourceGraph) + + // Before we report anything, all three addresses should indicate that + // they don't need to have their actions deferred. + t.Run("without any deferrals yet", func(t *testing.T) { + for _, instAddr := range []addrs.AbsResourceInstance{instAAddr, instBAddr, instCAddr} { + if deferred.ShouldDeferResourceInstanceChanges(instAddr) { + t.Errorf("%s reported as needing deferred; should not be, yet", instAddr) + } + } + }) + + // Instance A has its Create action deferred for some reason. + deferred.ReportResourceInstanceDeferred(instAAddr, plans.Create, cty.DynamicVal) + + t.Run("with one resource instance deferred", func(t *testing.T) { + if !deferred.ShouldDeferResourceInstanceChanges(instCAddr) { + t.Errorf("%s was not reported as needing deferred; should be deferred", instCAddr) + } + if deferred.ShouldDeferResourceInstanceChanges(instBAddr) { + t.Errorf("%s reported as needing deferred; should not be", instCAddr) + } + }) +} + +func TestDeferred_partialExpandedResource(t *testing.T) { + instAAddr := addrs.AbsResourceInstance{ + Module: addrs.RootModuleInstance.Child("foo", addrs.NoKey), + Resource: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test", + Name: "a", + }, + }, + } + instBAddr := addrs.AbsResourceInstance{ + Module: addrs.RootModuleInstance, + Resource: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test", + Name: "a", + }, + }, + } + instCAddr := addrs.AbsResourceInstance{ + Module: addrs.RootModuleInstance, + Resource: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test", + Name: "c", + }, + }, + } + instAPartial := addrs.RootModuleInstance. + UnexpandedChild(addrs.ModuleCall{Name: "foo"}). + Resource(instAAddr.Resource.Resource) + + resourceGraph := addrs.NewDirectedGraph[addrs.ConfigResource]() + resourceGraph.AddDependency(instCAddr.ConfigResource(), instBAddr.ConfigResource()) + resourceGraph.AddDependency(instCAddr.ConfigResource(), instAAddr.ConfigResource()) + deferred := NewDeferred(resourceGraph) + + // Before we report anything, all three addresses should indicate that + // they don't need to have their actions deferred. + t.Run("without any deferrals yet", func(t *testing.T) { + for _, instAddr := range []addrs.AbsResourceInstance{instAAddr, instBAddr, instCAddr} { + if deferred.ShouldDeferResourceInstanceChanges(instAddr) { + t.Errorf("%s reported as needing deferred; should not be, yet", instAddr) + } + } + }) + + // Resource A hasn't been expanded fully, so is deferred. + deferred.ReportResourceExpansionDeferred(instAPartial, cty.DynamicVal) + + t.Run("with one resource instance deferred", func(t *testing.T) { + if !deferred.ShouldDeferResourceInstanceChanges(instCAddr) { + t.Errorf("%s was not reported as needing deferred; should be deferred", instCAddr) + } + if deferred.ShouldDeferResourceInstanceChanges(instBAddr) { + t.Errorf("%s reported as needing deferred; should not be", instCAddr) + } + }) +} diff --git a/internal/plans/deferring/doc.go b/internal/plans/deferring/doc.go new file mode 100644 index 000000000000..3505225f6814 --- /dev/null +++ b/internal/plans/deferring/doc.go @@ -0,0 +1,10 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +// Package deferring deals with the problem of keeping track of +// "deferred actions", which means the situation where the planning of some +// objects is currently impossible due to incomplete information and so +// Terraform explicitly defers dealing with them until the next plan/apply +// round, while still allowing the operator to apply the partial plan so +// that there will be more information available next time. +package deferring From 58c1806e6205c750a319fa5c39fa07ef12a2c028 Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Mon, 29 Jan 2024 16:07:01 -0800 Subject: [PATCH 06/13] terraform: Expose a shared deferring.Deferred object As with the various other context-tracking containers we use during graph walks, we'll need a deferring.Deferred object shared across all graph nodes' EvalContexts and with the expression evaluator so that we can report deferrals and then make decisions based on deferrals previously registered upstream. As of this commit we're not yet doing anything with this new object, but in future commits we'll use it to report what we've deferred and to "artifically" defer anything that has a side-effect and depends on something else that's already been deferred. --- internal/terraform/context_eval.go | 2 +- internal/terraform/context_plan.go | 9 +++------ internal/terraform/context_walk.go | 11 +++++++++-- internal/terraform/eval_context.go | 6 ++++++ internal/terraform/eval_context_builtin.go | 8 ++++++++ internal/terraform/eval_context_mock.go | 9 +++++++++ internal/terraform/evaluate.go | 6 ++++++ internal/terraform/graph_walk_context.go | 4 ++++ 8 files changed, 46 insertions(+), 9 deletions(-) diff --git a/internal/terraform/context_eval.go b/internal/terraform/context_eval.go index 891824ea42d9..c3f54df240a1 100644 --- a/internal/terraform/context_eval.go +++ b/internal/terraform/context_eval.go @@ -100,7 +100,7 @@ func (c *Context) Eval(config *configs.Config, state *states.State, moduleAddr a // If we skipped walking the graph (due to errors) then we'll just // use a placeholder graph walker here, which'll refer to the // unmodified state. - walker = c.graphWalker(walkEval, walkOpts) + walker = c.graphWalker(graph, walkEval, walkOpts) } return evalScopeFromGraphWalk(walker, moduleAddr), diags diff --git a/internal/terraform/context_plan.go b/internal/terraform/context_plan.go index 01e8092de199..a80fd5e4b1aa 100644 --- a/internal/terraform/context_plan.go +++ b/internal/terraform/context_plan.go @@ -749,12 +749,9 @@ func (c *Context) planWalk(config *configs.Config, prevRunState *states.State, o // See the documentation for these plan fields to learn what exactly they // are intended to mean. if !diags.HasErrors() { - if len(opts.Targets) == 0 { - // A plan without any targets should be complete if we didn't encounter - // errors while producing it. - // TODO: Once we support "deferred actions" for resources, the - // presence of any of those should cause the plan to be marked - // incomplete too. + if len(opts.Targets) == 0 && !walker.Deferrals.HaveAnyDeferrals() { + // A plan without any targets or deferred actions should be + // complete if we didn't encounter errors while producing it. log.Println("[TRACE] Plan is complete") plan.Complete = true } else { diff --git a/internal/terraform/context_walk.go b/internal/terraform/context_walk.go index 640dd06b84cf..5a0098116c7b 100644 --- a/internal/terraform/context_walk.go +++ b/internal/terraform/context_walk.go @@ -14,6 +14,7 @@ import ( "github.com/hashicorp/terraform/internal/moduletest/mocking" "github.com/hashicorp/terraform/internal/namedvals" "github.com/hashicorp/terraform/internal/plans" + "github.com/hashicorp/terraform/internal/plans/deferring" "github.com/hashicorp/terraform/internal/providers" "github.com/hashicorp/terraform/internal/refactoring" "github.com/hashicorp/terraform/internal/states" @@ -68,7 +69,7 @@ type graphWalkOpts struct { func (c *Context) walk(graph *Graph, operation walkOperation, opts *graphWalkOpts) (*ContextGraphWalker, tfdiags.Diagnostics) { log.Printf("[DEBUG] Starting graph walk: %s", operation.String()) - walker := c.graphWalker(operation, opts) + walker := c.graphWalker(graph, operation, opts) // Watch for a stop so we can call the provider Stop() API. watchStop, watchWait := c.watchStop(walker) @@ -83,7 +84,7 @@ func (c *Context) walk(graph *Graph, operation walkOperation, opts *graphWalkOpt return walker, diags } -func (c *Context) graphWalker(operation walkOperation, opts *graphWalkOpts) *ContextGraphWalker { +func (c *Context) graphWalker(graph *Graph, operation walkOperation, opts *graphWalkOpts) *ContextGraphWalker { var state *states.SyncState var refreshState *states.SyncState var prevRunState *states.SyncState @@ -158,6 +159,11 @@ func (c *Context) graphWalker(operation walkOperation, opts *graphWalkOpts) *Con } } + // We'll produce a derived graph that only includes the static resource + // blocks, since we need that for deferral tracking. + resourceGraph := graph.ResourceGraph() + deferred := deferring.NewDeferred(resourceGraph) + return &ContextGraphWalker{ Context: c, State: state, @@ -167,6 +173,7 @@ func (c *Context) graphWalker(operation walkOperation, opts *graphWalkOpts) *Con PrevRunState: prevRunState, Changes: changes.SyncWrapper(), NamedValues: namedvals.NewState(), + Deferrals: deferred, Checks: checkState, InstanceExpander: instances.NewExpander(), ExternalProviderConfigs: opts.ExternalProviderConfigs, diff --git a/internal/terraform/eval_context.go b/internal/terraform/eval_context.go index 91b842b255ef..ae784b779926 100644 --- a/internal/terraform/eval_context.go +++ b/internal/terraform/eval_context.go @@ -17,6 +17,7 @@ import ( "github.com/hashicorp/terraform/internal/moduletest/mocking" "github.com/hashicorp/terraform/internal/namedvals" "github.com/hashicorp/terraform/internal/plans" + "github.com/hashicorp/terraform/internal/plans/deferring" "github.com/hashicorp/terraform/internal/providers" "github.com/hashicorp/terraform/internal/provisioners" "github.com/hashicorp/terraform/internal/refactoring" @@ -176,6 +177,11 @@ type EvalContext interface { // EvalContext objects for a given configuration. InstanceExpander() *instances.Expander + // Deferrals returns a helper object for tracking deferred actions, which + // means that Terraform either cannot plan an action at all or cannot + // perform a planned action due to an upstream dependency being deferred. + Deferrals() *deferring.Deferred + // MoveResults returns a map describing the results of handling any // resource instance move statements prior to the graph walk, so that // the graph walk can then record that information appropriately in other diff --git a/internal/terraform/eval_context_builtin.go b/internal/terraform/eval_context_builtin.go index 04c897012547..b5b9863a1064 100644 --- a/internal/terraform/eval_context_builtin.go +++ b/internal/terraform/eval_context_builtin.go @@ -23,6 +23,7 @@ import ( "github.com/hashicorp/terraform/internal/moduletest/mocking" "github.com/hashicorp/terraform/internal/namedvals" "github.com/hashicorp/terraform/internal/plans" + "github.com/hashicorp/terraform/internal/plans/deferring" "github.com/hashicorp/terraform/internal/providers" "github.com/hashicorp/terraform/internal/provisioners" "github.com/hashicorp/terraform/internal/refactoring" @@ -64,6 +65,9 @@ type BuiltinEvalContext struct { // stack configuration. ExternalProviderConfigs map[addrs.RootProviderConfig]providers.Interface + // DeferralsValue is the object returned by [BuiltinEvalContext.Deferrals]. + DeferralsValue *deferring.Deferred + Hooks []Hook InputValue UIInput ProviderCache map[string]providers.Interface @@ -575,6 +579,10 @@ func (ctx *BuiltinEvalContext) NamedValues() *namedvals.State { return ctx.NamedValuesValue } +func (ctx *BuiltinEvalContext) Deferrals() *deferring.Deferred { + return ctx.DeferralsValue +} + func (ctx *BuiltinEvalContext) Changes() *plans.ChangesSync { return ctx.ChangesValue } diff --git a/internal/terraform/eval_context_mock.go b/internal/terraform/eval_context_mock.go index ba6816e3751a..cabb21003064 100644 --- a/internal/terraform/eval_context_mock.go +++ b/internal/terraform/eval_context_mock.go @@ -19,6 +19,7 @@ import ( "github.com/hashicorp/terraform/internal/moduletest/mocking" "github.com/hashicorp/terraform/internal/namedvals" "github.com/hashicorp/terraform/internal/plans" + "github.com/hashicorp/terraform/internal/plans/deferring" "github.com/hashicorp/terraform/internal/providers" "github.com/hashicorp/terraform/internal/provisioners" "github.com/hashicorp/terraform/internal/refactoring" @@ -125,6 +126,9 @@ type MockEvalContext struct { NamedValuesCalled bool NamedValuesState *namedvals.State + DeferralsCalled bool + DeferralsState *deferring.Deferred + ChangesCalled bool ChangesChanges *plans.ChangesSync @@ -354,6 +358,11 @@ func (c *MockEvalContext) NamedValues() *namedvals.State { return c.NamedValuesState } +func (c *MockEvalContext) Deferrals() *deferring.Deferred { + c.DeferralsCalled = true + return c.DeferralsState +} + func (c *MockEvalContext) Changes() *plans.ChangesSync { c.ChangesCalled = true return c.ChangesChanges diff --git a/internal/terraform/evaluate.go b/internal/terraform/evaluate.go index 8157b1d1ab9e..1a5ee7ce564b 100644 --- a/internal/terraform/evaluate.go +++ b/internal/terraform/evaluate.go @@ -22,6 +22,7 @@ import ( "github.com/hashicorp/terraform/internal/lang/marks" "github.com/hashicorp/terraform/internal/namedvals" "github.com/hashicorp/terraform/internal/plans" + "github.com/hashicorp/terraform/internal/plans/deferring" "github.com/hashicorp/terraform/internal/states" "github.com/hashicorp/terraform/internal/tfdiags" ) @@ -51,6 +52,11 @@ type Evaluator struct { // variables, local values, and output values. NamedValues *namedvals.State + // Deferrals tracks resources and modules that have had either their + // expansion or their specific planned actions deferred to a future + // plan/apply round. + Deferrals *deferring.Deferred + // Plugins is the library of available plugin components (providers and // provisioners) that we have available to help us evaluate expressions // that interact with plugin-provided objects. diff --git a/internal/terraform/graph_walk_context.go b/internal/terraform/graph_walk_context.go index 2aba9116c4c0..7324b12defa8 100644 --- a/internal/terraform/graph_walk_context.go +++ b/internal/terraform/graph_walk_context.go @@ -17,6 +17,7 @@ import ( "github.com/hashicorp/terraform/internal/moduletest/mocking" "github.com/hashicorp/terraform/internal/namedvals" "github.com/hashicorp/terraform/internal/plans" + "github.com/hashicorp/terraform/internal/plans/deferring" "github.com/hashicorp/terraform/internal/providers" "github.com/hashicorp/terraform/internal/provisioners" "github.com/hashicorp/terraform/internal/refactoring" @@ -38,6 +39,7 @@ type ContextGraphWalker struct { Checks *checks.State // Used for safe concurrent writes of checkable objects and their check results NamedValues *namedvals.State // Tracks evaluation of input variables, local values, and output values InstanceExpander *instances.Expander // Tracks our gradual expansion of module and resource instances + Deferrals *deferring.Deferred // Tracks any deferred actions Imports []configs.Import MoveResults refactoring.MoveResults // Read-only record of earlier processing of move statements Operation walkOperation @@ -101,6 +103,7 @@ func (w *ContextGraphWalker) EvalContext() EvalContext { Plugins: w.Context.plugins, Instances: w.InstanceExpander, NamedValues: w.NamedValues, + Deferrals: w.Deferrals, PlanTimestamp: w.PlanTimestamp, } @@ -122,6 +125,7 @@ func (w *ContextGraphWalker) EvalContext() EvalContext { ChangesValue: w.Changes, ChecksValue: w.Checks, NamedValuesValue: w.NamedValues, + DeferralsValue: w.Deferrals, StateValue: w.State, RefreshStateValue: w.RefreshState, PrevRunStateValue: w.PrevRunState, From 347f723233ed85dbc9d20238ff9f6e0377d96ac2 Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Wed, 7 Feb 2024 14:07:31 -0800 Subject: [PATCH 07/13] core: Resource nodes aware when unknown_instances experiment is on Usually language experiments have single-module scope, but this particular one can potentially force modules other than the one that opt in to do some behavior differently for correct results. To minimize the risk of the new experiment code impacting those who are not participating in the experiment, we'll want to branch into a new DynamicExpand codepath only when at least one module is participating in the experiment, but graph nodes alone have a more tightly-scoped view that prevents them from answering that question directly, and so we'll temporarily ask the ConfigTransformer to help in detecting that up top and propagating the result into all of the resource nodes. This commit doesn't yet make use of the new temporary field; that'll follow in a subsequent commit that introduces the new partial-expansion-aware alternative DynamicExpand codepath. --- internal/terraform/node_resource_abstract.go | 7 ++++++ internal/terraform/transform_config.go | 23 ++++++++++++++++++++ 2 files changed, 30 insertions(+) diff --git a/internal/terraform/node_resource_abstract.go b/internal/terraform/node_resource_abstract.go index 9af4acb5d56e..37bb05669cb4 100644 --- a/internal/terraform/node_resource_abstract.go +++ b/internal/terraform/node_resource_abstract.go @@ -83,6 +83,13 @@ type NodeAbstractResource struct { // generateConfigPath tells this node which file to write generated config // into. If empty, then config should not be generated. generateConfigPath string + + // TEMP: [ConfigTransformer] sets this to true when at least one module + // in the configuration has opted in to the unknown_instances experiment. + // See the field of the same name in [ConfigTransformer] for more details. + // (And if that field has been removed already, then this one should've + // been too!) + unknownInstancesExperimentEnabled bool } var ( diff --git a/internal/terraform/transform_config.go b/internal/terraform/transform_config.go index ead1adb53de6..878fbc85ce82 100644 --- a/internal/terraform/transform_config.go +++ b/internal/terraform/transform_config.go @@ -11,6 +11,7 @@ import ( "github.com/hashicorp/terraform/internal/addrs" "github.com/hashicorp/terraform/internal/configs" "github.com/hashicorp/terraform/internal/dag" + "github.com/hashicorp/terraform/internal/experiments" "github.com/hashicorp/terraform/internal/tfdiags" ) @@ -50,6 +51,14 @@ type ConfigTransformer struct { // try to delete the imported resource unless the config is updated // manually. generateConfigPathForImportTargets string + + // TEMP: [ConfigTransformer.Transform] sets this to true if at least one + // module in the configuration has the "unknown_instances" language + // experiment enabled, because this particular experiment has cross-module + // implications (a module call with unknown instances affects everything + // beneath it in the tree) but we want to avoid activating the experimental + // code in the common case where no module is using it at all. + unknownInstancesExperimentEnabled bool } func (t *ConfigTransformer) Transform(g *Graph) error { @@ -62,6 +71,16 @@ func (t *ConfigTransformer) Transform(g *Graph) error { return nil } + // TEMP: Before we go further, we'll decide whether we're going to activate + // the experimental new behavior for the "unknown_instances" experiment. + // See the docstring for [ConfigTransformer.unknownInstancesExperimentEnabled] + // for more details. + t.Config.DeepEach(func(c *configs.Config) { + if c.Module != nil && c.Module.ActiveExperiments.Has(experiments.UnknownInstances) { + t.unknownInstancesExperimentEnabled = true + } + }) + // Start the transformation process return t.transform(g, t.Config) } @@ -161,6 +180,10 @@ func (t *ConfigTransformer) transformSingle(g *Graph, config *configs.Config) er Module: path, }, importTargets: imports, + + // TEMP: See the docs for this field in [ConfigTransformer] for + // more information. + unknownInstancesExperimentEnabled: t.unknownInstancesExperimentEnabled, } var node dag.Vertex = abstract From eabcecbdfbdca888981aeb9acfc0d0f1a55469b3 Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Wed, 7 Feb 2024 16:33:14 -0800 Subject: [PATCH 08/13] terraform: nodeExpandModule must check experiments on moduleCtx Previously we were incorrectly checking for the experiment being enabled on the global EvalContext, but that doesn't make sense because experiments are explicitly module-specific. Now we'll ask the question separately for each module so we can use the module-scoped EvalContext. --- internal/terraform/node_module_expand.go | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/internal/terraform/node_module_expand.go b/internal/terraform/node_module_expand.go index 271703836ee0..ed528f7f0984 100644 --- a/internal/terraform/node_module_expand.go +++ b/internal/terraform/node_module_expand.go @@ -107,20 +107,21 @@ func (n *nodeExpandModule) Execute(globalCtx EvalContext, op walkOperation) (dia expander := globalCtx.InstanceExpander() _, call := n.Addr.Call() - // Allowing unknown values in count and for_each is currently only an - // experimental feature. This will hopefully become the default (and only) - // behavior in future, if the experiment is successful. - // - // If this is false then the codepaths that handle unknown values below - // become unreachable, because the evaluate functions will reject unknown - // values as an error. - allowUnknown := globalCtx.LanguageExperimentActive(experiments.UnknownInstances) - // nodeExpandModule itself does not have visibility into how its ancestors // were expanded, so we use the expander here to provide all possible paths // to our module, and register module instances with each of them. for _, module := range expander.ExpandModule(n.Addr.Parent()) { moduleCtx := evalContextForModuleInstance(globalCtx, module) + + // Allowing unknown values in count and for_each is currently only an + // experimental feature. This will hopefully become the default (and only) + // behavior in future, if the experiment is successful. + // + // If this is false then the codepaths that handle unknown values below + // become unreachable, because the evaluate functions will reject unknown + // values as an error. + allowUnknown := moduleCtx.LanguageExperimentActive(experiments.UnknownInstances) + switch { case n.ModuleCall.Count != nil: count, ctDiags := evaluateCountExpression(n.ModuleCall.Count, moduleCtx, allowUnknown) From 660e1e018406283a15f1185556b93ffc0f29635e Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Wed, 7 Feb 2024 16:43:30 -0800 Subject: [PATCH 09/13] terraform: BuiltinEvalContext function eval in partial-expanded module An earlier commit changed the EvalContext model to support evaluation in both fully-expanded and partial-expanded modules, but the provider-contributed functions feature landed semi-concurrently with it and so inadvertently introduced a codepath that only worked in the fully-expanded case. This will now handle both situations, since all we really need is the addrs.Module, which we can obtain in both modes. --- internal/terraform/eval_context_builtin.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/terraform/eval_context_builtin.go b/internal/terraform/eval_context_builtin.go index b5b9863a1064..a9ba7cbb8005 100644 --- a/internal/terraform/eval_context_builtin.go +++ b/internal/terraform/eval_context_builtin.go @@ -489,7 +489,7 @@ func (ctx *BuiltinEvalContext) evaluationExternalFunctions() lang.ExternalFuncs // by the module author. ret := lang.ExternalFuncs{} - cfg := ctx.Evaluator.Config.DescendentForInstance(ctx.Path()) + cfg := ctx.Evaluator.Config.Descendent(ctx.scope.evalContextScopeModule()) if cfg == nil { // It's weird to not have a configuration by this point, but we'll // tolerate it for robustness and just return no functions at all. From f0335acad4807b6674f3911f760c18ec9fa2eec3 Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Wed, 7 Feb 2024 16:25:15 -0800 Subject: [PATCH 10/13] terraform: Evaluator returns DynamicVal for partial-expanded resource For now this is just an up-top weird exception to minimize the impact on the code that's runs when not participating in the unknown_instances experiment. In future it would be nice to rework this GetResource method to rely on the ResourceInstanceKeys result even in the known case, since that will simplify this function and remove a some duplicate code, but our priority for now is minimizing the possibility that the code for this experiment can impact users who are not participating in the experiment. --- internal/terraform/evaluate.go | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/internal/terraform/evaluate.go b/internal/terraform/evaluate.go index 1a5ee7ce564b..9be98c34b67e 100644 --- a/internal/terraform/evaluate.go +++ b/internal/terraform/evaluate.go @@ -574,6 +574,34 @@ func (d *evaluationStateData) GetResource(addr addrs.Resource, rng tfdiags.Sourc return cty.DynamicVal, diags } + // Much of this function was written before we had factored out the + // handling of instance keys into the separate instance expander model, + // and so it does a bunch of instance-related work itself below. While + // the possibility of unknown instance keys remains experimental + // (behind the unknown_instances language experiment) we'll use this + // function only for detecting that experimental situation, but leave + // the rest of this function unchanged for now to minimize the chances + // of the experiment code affecting someone who isn't participating. + // + // TODO: If we decide to stabilize the unknown_instances experiment + // then it would be nice to finally rework this function to rely + // on the ResourceInstanceKeys result for _all_ of its work, rather + // than continuing to duplicate a bunch of the logic we've tried to + // encapsulate over ther already. + if d.Operation == walkPlan { + if _, _, hasUnknownKeys := d.Evaluator.Instances.ResourceInstanceKeys(addr.Absolute(moduleAddr)); hasUnknownKeys { + // There really isn't anything interesting we can do in this situation, + // because it means we have an unknown for_each/count, in which case + // we can't even predict what the result type will be because it + // would be either an object or tuple type decided based on the instance + // keys. + // (We can't get in here for a single-instance resource because in that + // case we would know that there's only one key and it's addrs.NoKey, + // so we'll fall through to the other logic below.) + return cty.DynamicVal, diags + } + } + // Build the provider address from configuration, since we may not have // state available in all cases. // We need to build an abs provider address, but we can use a default From 5d144f5c37c1a78f322bfba44a116a1c2e4ff2a7 Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Thu, 1 Feb 2024 11:10:50 -0800 Subject: [PATCH 11/13] terraform: Handle partial-expanded resources during planning When at least one module in the configuration is opted in to the unknown_instances language experiment, we'll now divert into an experimental adaptation of nodeExpandPlannableResource.DynamicExpand which knows how to deal with partial-expanded modules and resources. Whenever a partial-expanded resource prefix is detected, we'll make a special graph node for it which goes through some of the motions of planning but does so only really for the side-effect of checking the configuration for dynamic errors. We do also save the result as a placeholder value for the object for completeness, although it's not really used for much yet because the partial-expansion tracker saves this primarily for reporting information about the deferred changes as extra context in the plan, and we're not implementing that yet. This isn't yet quite right because we're not preserving enough information from plan phase to apply phase to correctly handle references to the resources that had deferred actions. However, since this is currently just an experiment anyway, this is a useful interim milestone that shows at least that things are functional enough to converge on the correct result in the end. We'll correct the evaluation misses (as documented in the TestContext2Apply_deferredActionsResourceForEach test) in subsequent commits, which will require tracking a little more information in the saved plan so that we can re-populate all of the deferral metadata when we start the apply phase. --- internal/terraform/context_apply2_test.go | 484 ++++++++++++++++++ internal/terraform/node_resource_plan.go | 236 +++++++++ .../terraform/node_resource_plan_instance.go | 126 +++-- .../node_resource_plan_partialexp.go | 291 +++++++++++ 4 files changed, 1081 insertions(+), 56 deletions(-) create mode 100644 internal/terraform/node_resource_plan_partialexp.go diff --git a/internal/terraform/context_apply2_test.go b/internal/terraform/context_apply2_test.go index cd08cfd3a6b6..51424f2c99b9 100644 --- a/internal/terraform/context_apply2_test.go +++ b/internal/terraform/context_apply2_test.go @@ -14,6 +14,7 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/google/go-cmp/cmp" + "github.com/zclconf/go-cty-debug/ctydebug" "github.com/zclconf/go-cty/cty" "github.com/hashicorp/terraform/internal/addrs" @@ -2637,6 +2638,489 @@ resource "test_object" "foo" { } } +func TestContext2Apply_deferredActionsResourceForEach(t *testing.T) { + // This test exercises a situation that requires two plan/apply + // rounds to achieve convergence when starting from an empty state. + // + // The scenario is slightly contrived to make the scenario easier + // to orchestrate: the unknown for_each value originates from an + // input variable rather than directly from a resource. This + // scenario _is_ realistic in a Terraform Stacks context, if we + // pretend that the input variable is being populated from an + // as-yet-unknown result from an upstream component. + + cfg := testModuleInline(t, map[string]string{ + "main.tf": ` + // TEMP: unknown for_each currently requires an experiment opt-in. + // We should remove this block if the experiment gets stabilized. + terraform { + experiments = [unknown_instances] + } + + variable "each" { + type = set(string) + } + + resource "test" "a" { + name = "a" + } + + resource "test" "b" { + for_each = var.each + + name = "b:${each.key}" + upstream_names = [test.a.name] + } + + resource "test" "c" { + name = "c" + upstream_names = setunion( + [for v in test.b : v.name], + [test.a.name], + ) + } + + output "a" { + value = test.a + } + output "b" { + value = test.b + } + output "c" { + value = test.c + } + `, + }) + + var plannedVals sync.Map + var appliedVals sync.Map + p := &MockProvider{ + GetProviderSchemaResponse: &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "name": { + Type: cty.String, + Required: true, + }, + "upstream_names": { + Type: cty.Set(cty.String), + Optional: true, + }, + }, + }, + }, + }, + }, + PlanResourceChangeFn: func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + mapKey := "" + if v := req.Config.GetAttr("name"); v.IsKnown() { + mapKey = v.AsString() + } + plannedVals.Store(mapKey, req.ProposedNewState) + return providers.PlanResourceChangeResponse{ + PlannedState: req.ProposedNewState, + } + }, + ApplyResourceChangeFn: func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + mapKey := req.Config.GetAttr("name").AsString() // should always be known during apply + appliedVals.Store(mapKey, req.PlannedState) + return providers.ApplyResourceChangeResponse{ + NewState: req.PlannedState, + } + }, + } + state := states.NewState() + reset := func(newState *states.State) { + if newState != nil { + state = newState + } + plannedVals = sync.Map{} + appliedVals = sync.Map{} + } + normalMap := func(m *sync.Map) map[any]any { + ret := make(map[any]any) + m.Range(func(key, value any) bool { + ret[key] = value + return true + }) + return ret + } + resourceInstancesActionsInPlan := func(p *plans.Plan) map[string]plans.Action { + ret := make(map[string]plans.Action) + for _, cs := range p.Changes.Resources { + // Anything that was deferred will not appear in the result at + // all. Non-deferred actions that don't actually need to do anything + // _will_ appear, but with action set to [plans.NoOp]. + ret[cs.Addr.String()] = cs.Action + } + return ret + } + outputValsInState := func(s *states.State) map[string]cty.Value { + ret := make(map[string]cty.Value) + for n, v := range s.RootOutputValues { + ret[n] = v.Value + } + return ret + } + cmpOpts := cmp.Options{ + ctydebug.CmpOptions, + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + reset(states.NewState()) + + // Round 1: Should plan and apply changes only for test.a + { + t.Logf("--- First plan: var.each is unknown ---") + plan, diags := ctx.Plan(cfg, state, &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "each": { + Value: cty.DynamicVal, + SourceType: ValueFromCaller, + }, + }, + }) + assertNoDiagnostics(t, diags) + checkPlanApplyable(t, plan) + if plan.Complete { + t.Fatal("plan is complete; should have deferred actions") + } + + gotPlanned := normalMap(&plannedVals) + wantPlanned := map[any]any{ + // We should've still got one plan request for each resource block, + // but the one for "b" doesn't yet know its own name, so is + // captured under "". + "a": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("a"), + "upstream_names": cty.NullVal(cty.Set(cty.String)), + }), + "": cty.ObjectVal(map[string]cty.Value{ + // this one is the placeholder for all possible instances of test.b, + // describing only what they all have in common. + // Terraform knows at least that all of the names start + // with "b:", even though the rest of the name is unknown. + "name": cty.UnknownVal(cty.String).Refine(). + StringPrefixFull("b:"). + NotNull(). + NewValue(), + "upstream_names": cty.SetVal([]cty.Value{ + cty.StringVal("a"), + }), + }), + "c": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("c"), + "upstream_names": cty.UnknownVal(cty.Set(cty.String)).RefineNotNull(), + }), + } + if diff := cmp.Diff(wantPlanned, gotPlanned, cmpOpts); diff != "" { + t.Fatalf("wrong planned objects\n%s", diff) + } + + gotActions := resourceInstancesActionsInPlan(plan) + wantActions := map[string]plans.Action{ + // Only test.a has a planned action + "test.a": plans.Create, + // test.b was deferred because of its unknown for_each + // test.c was deferred because it depends on test.b + } + if diff := cmp.Diff(wantActions, gotActions, cmpOpts); diff != "" { + t.Fatalf("wrong actions in plan\n%s", diff) + } + // TODO: Once we are including information about the individual + // deferred actions in the plan, this would be a good place to + // assert that they are correct! + + reset(nil) + t.Logf("--- First apply: only dealing with test.a for now ---") + newState, diags := ctx.Apply(plan, cfg, &ApplyOpts{}) + assertNoDiagnostics(t, diags) + + gotApplied := normalMap(&appliedVals) + wantApplied := map[any]any{ + // Only test.a had non-deferred actions, so we only visited that + // one during the apply step. + "a": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("a"), + "upstream_names": cty.NullVal(cty.Set(cty.String)), + }), + } + if diff := cmp.Diff(wantApplied, gotApplied, cmpOpts); diff != "" { + t.Fatalf("wrong applied objects\n%s", diff) + } + + gotOutputs := outputValsInState(newState) + // FIXME: The system is currently producing incorrect results for + // output values that are derived from resources that had deferred + // actions, because we're not quite reconstructing all of the deferral + // state correctly during the apply phase. The commented-out wantOutputs + // below shows how this _ought_ to look, but we're accepting the + // incorrect answer for now so we can start go gather feedback on the + // experiment sooner, since the output value state at the interim + // steps isn't really that important for demonstrating the overall + // effect. We should fix this before stabilizing the experiment, though. + /* + wantOutputs := map[string]cty.Value{ + // A is fully resolved and known. + "a": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("a"), + "upstream_names": cty.NullVal(cty.Set(cty.String)), + }), + // We can't say anything about test.b until we know what its instance keys are. + "b": cty.DynamicVal, + // test.c evaluates to the placeholder value that shows what we're + // expecting this object to look like in the next round. + "c": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("c"), + "upstream_names": cty.UnknownVal(cty.Set(cty.String)).RefineNotNull(), + }), + } + */ + wantOutputs := map[string]cty.Value{ + // A is fully resolved and known. + "a": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("a"), + "upstream_names": cty.NullVal(cty.Set(cty.String)), + }), + // Currently we produce an incorrect result for output value "b" + // because the expression evaluator doesn't realize it's supposed + // to be treating this as deferred during the apply phase, and + // so it incorrectly decides that there are no instances due to + // the lack of instances in the state. + "b": cty.EmptyObjectVal, + // Currently we produce an incorrect result for output value "c" + // because the expression evaluator doesn't realize it's supposed + // to be treating this as deferred during the apply phase, and + // so it incorrectly decides that there is instance due to + // the lack of instances in the state. + "c": cty.NullVal(cty.DynamicPseudoType), + } + if diff := cmp.Diff(gotOutputs, wantOutputs, cmpOpts); diff != "" { + t.Errorf("wrong output values\n%s", diff) + } + + // Save the new state to use in round 2, and also reset our + // provider-call-tracking maps. + t.Log("(round 1 complete)") + reset(newState) + } + + // Round 2: reach convergence by dealing with test.b and test.c + { + t.Logf("--- Second plan: var.each is now known, with two elements ---") + plan, diags := ctx.Plan(cfg, state, &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "each": { + Value: cty.SetVal([]cty.Value{ + cty.StringVal("1"), + cty.StringVal("2"), + }), + SourceType: ValueFromCaller, + }, + }, + }) + assertNoDiagnostics(t, diags) + checkPlanCompleteAndApplyable(t, plan) + + gotPlanned := normalMap(&plannedVals) + wantPlanned := map[any]any{ + // test.a gets re-planned (to confirm that nothing has changed) + "a": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("a"), + "upstream_names": cty.NullVal(cty.Set(cty.String)), + }), + // test.b is now planned for real, once for each instance + "b:1": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("b:1"), + "upstream_names": cty.SetVal([]cty.Value{ + cty.StringVal("a"), + }), + }), + "b:2": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("b:2"), + "upstream_names": cty.SetVal([]cty.Value{ + cty.StringVal("a"), + }), + }), + // test.c gets re-planned, so we can finalize its values + // based on the new results from test.b. + "c": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("c"), + "upstream_names": cty.SetVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b:1"), + cty.StringVal("b:2"), + }), + }), + } + if diff := cmp.Diff(wantPlanned, gotPlanned, cmpOpts); diff != "" { + t.Fatalf("wrong planned objects\n%s", diff) + } + + gotActions := resourceInstancesActionsInPlan(plan) + wantActions := map[string]plans.Action{ + // Since this plan is "complete", we expect to have a planned + // action for every resource instance, although test.a is + // no-op because nothing has changed for it since last round. + `test.a`: plans.NoOp, + `test.b["1"]`: plans.Create, + `test.b["2"]`: plans.Create, + `test.c`: plans.Create, + } + if diff := cmp.Diff(wantActions, gotActions, cmpOpts); diff != "" { + t.Fatalf("wrong actions in plan\n%s", diff) + } + + reset(nil) + t.Logf("--- Second apply: Getting everything else created ---") + newState, diags := ctx.Apply(plan, cfg, &ApplyOpts{}) + assertNoDiagnostics(t, diags) + + gotApplied := normalMap(&appliedVals) + wantApplied := map[any]any{ + // Since test.a is no-op, it isn't visited during apply. The + // other instances should all be applied, though. + "b:1": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("b:1"), + "upstream_names": cty.SetVal([]cty.Value{ + cty.StringVal("a"), + }), + }), + "b:2": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("b:2"), + "upstream_names": cty.SetVal([]cty.Value{ + cty.StringVal("a"), + }), + }), + "c": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("c"), + "upstream_names": cty.SetVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b:1"), + cty.StringVal("b:2"), + }), + }), + } + if diff := cmp.Diff(wantApplied, gotApplied, cmpOpts); diff != "" { + t.Fatalf("wrong applied objects\n%s", diff) + } + + gotOutputs := outputValsInState(newState) + wantOutputs := map[string]cty.Value{ + // Now everything should be fully resolved and known. + // A is fully resolved and known. + "a": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("a"), + "upstream_names": cty.NullVal(cty.Set(cty.String)), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "1": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("b:1"), + "upstream_names": cty.SetVal([]cty.Value{ + cty.StringVal("a"), + }), + }), + "2": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("b:2"), + "upstream_names": cty.SetVal([]cty.Value{ + cty.StringVal("a"), + }), + }), + }), + "c": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("c"), + "upstream_names": cty.SetVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b:1"), + cty.StringVal("b:2"), + }), + }), + } + if diff := cmp.Diff(gotOutputs, wantOutputs, cmpOpts); diff != "" { + t.Errorf("wrong output values\n%s", diff) + } + + t.Log("(round 2 complete)") + reset(newState) + } + + // Round 3: Confirm that everything is converged + { + t.Logf("--- Final plan: convergence-checking ---") + plan, diags := ctx.Plan(cfg, state, &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "each": { + Value: cty.SetVal([]cty.Value{ + cty.StringVal("1"), + cty.StringVal("2"), + }), + SourceType: ValueFromCaller, + }, + }, + }) + assertNoDiagnostics(t, diags) + checkPlanComplete(t, plan) + if plan.Applyable { + t.Error("plan is applyable; should be non-applyable because there should be nothing left to do!") + } + + gotPlanned := normalMap(&plannedVals) + wantPlanned := map[any]any{ + // Everything gets re-planned to confirm that nothing has changed. + "a": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("a"), + "upstream_names": cty.NullVal(cty.Set(cty.String)), + }), + "b:1": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("b:1"), + "upstream_names": cty.SetVal([]cty.Value{ + cty.StringVal("a"), + }), + }), + "b:2": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("b:2"), + "upstream_names": cty.SetVal([]cty.Value{ + cty.StringVal("a"), + }), + }), + "c": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("c"), + "upstream_names": cty.SetVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b:1"), + cty.StringVal("b:2"), + }), + }), + } + if diff := cmp.Diff(wantPlanned, gotPlanned, cmpOpts); diff != "" { + t.Fatalf("wrong planned objects\n%s", diff) + } + + gotActions := resourceInstancesActionsInPlan(plan) + wantActions := map[string]plans.Action{ + // No changes needed + `test.a`: plans.NoOp, + `test.b["1"]`: plans.NoOp, + `test.b["2"]`: plans.NoOp, + `test.c`: plans.NoOp, + } + if diff := cmp.Diff(wantActions, gotActions, cmpOpts); diff != "" { + t.Fatalf("wrong actions in plan\n%s", diff) + } + + t.Log("(round 3 complete: all done!)") + } +} + func TestContext2Apply_forget(t *testing.T) { addrA := mustResourceInstanceAddr("test_object.a") m := testModuleInline(t, map[string]string{ diff --git a/internal/terraform/node_resource_plan.go b/internal/terraform/node_resource_plan.go index 18bcb2483bfa..2ed9ac4152c0 100644 --- a/internal/terraform/node_resource_plan.go +++ b/internal/terraform/node_resource_plan.go @@ -108,6 +108,30 @@ func (n *nodeExpandPlannableResource) DynamicExpand(ctx EvalContext) (*Graph, tf expander := ctx.InstanceExpander() moduleInstances := expander.ExpandModule(n.Addr.Module) + // The possibility of partial-expanded modules and resources is + // currently guarded by a language experiment, and so to minimize the + // risk of that experiment impacting mainline behavior we currently + // branch off into an entirely-separate codepath in those situations, + // at the expense of duplicating some of the logic for behavior this + // method would normally handle. + // + // Normally language experiments are confined to only a single module, + // but this one has potential cross-module impact once enabled for at + // least one, and so this flag is true if _any_ module in the configuration + // has opted in to the experiment. Our intent is for this different + // codepath to produce the same results when there aren't any + // partial-expanded modules, but bugs might make that not true and so + // this is conservative to minimize the risk of breaking things for + // those who aren't participating in the experiment. + // + // TODO: If this experiment is stablized then we should aim to combine + // these two codepaths back together, so that the behavior is less likely + // to diverge under future maintenence. + if n.unknownInstancesExperimentEnabled { + pem := expander.UnknownModuleInstances(n.Addr.Module) + return n.dynamicExpandWithUnknownInstancesExperiment(ctx, moduleInstances, pem) + } + // Lock the state while we inspect it state := ctx.State().Lock() @@ -194,6 +218,218 @@ func (n *nodeExpandPlannableResource) DynamicExpand(ctx EvalContext) (*Graph, tf return &g, diags } +// dynamicExpandWithUnknownInstancesExperiment is a temporary experimental +// variant of DynamicExpand that we use when at least one module is +// participating in the "unknown_instances" language experiment. +// +// This is not exactly in the typical spirit of language experiments in that +// the effect is not scoped only to the module where the opt-in is declared: +// if there are bugs in this method then they could potentially also affect +// resources in modules not directly participating. We're accepting that +// as a pragmatic compromise here since unknown expansion of a module call +// is inherently a cross-module concern. +// +// If we move forward with unknown instances as a stable feature then we +// should find a way to meld this logic with the main DynamicExpand logic, +// but it's separate for now to minimize the risk of the experiment impacting +// configurations that are not opted into it. +func (n *nodeExpandPlannableResource) dynamicExpandWithUnknownInstancesExperiment(globalCtx EvalContext, knownInsts []addrs.ModuleInstance, partialInsts addrs.Set[addrs.PartialExpandedModule]) (*Graph, tfdiags.Diagnostics) { + var g Graph + var diags tfdiags.Diagnostics + + // We need to resolve the expansions of the resource itself, separately + // for each of the dynamic module prefixes it appears under. + knownAddrs := addrs.MakeSet[addrs.AbsResourceInstance]() + partialExpandedAddrs := addrs.MakeSet[addrs.PartialExpandedResource]() + for _, moduleAddr := range knownInsts { + resourceAddr := n.Addr.Resource.Absolute(moduleAddr) + // The rest of our work here needs to know which module instance it's + // working in, so that it can evaluate expressions in the appropriate scope. + moduleCtx := evalContextForModuleInstance(globalCtx, resourceAddr.Module) + + // writeResourceState calculates the dynamic expansion of the given + // resource as a side-effect, along with its other work. + moreDiags := n.writeResourceState(moduleCtx, resourceAddr) + diags = diags.Append(moreDiags) + if moreDiags.HasErrors() { + continue + } + + // We can now ask for all of the individual resource instances that + // we know, or for those with not-yet-known expansion. + expander := moduleCtx.InstanceExpander() + _, knownInstKeys, haveUnknownKeys := expander.ResourceInstanceKeys(resourceAddr) + for _, instKey := range knownInstKeys { + instAddr := resourceAddr.Instance(instKey) + knownAddrs.Add(instAddr) + } + if haveUnknownKeys { + partialAddr := moduleAddr.UnexpandedResource(resourceAddr.Resource) + partialExpandedAddrs.Add(partialAddr) + } + } + for _, moduleAddr := range partialInsts { + // Resources that appear under partial-expanded module prefixes are + // also partial-expanded resource addresses. + partialAddr := moduleAddr.Resource(n.Addr.Resource) + partialExpandedAddrs.Add(partialAddr) + } + // If we accumulated any error diagnostics in our work so far then + // we'll just bail out at this point. + if diags.HasErrors() { + return nil, diags + } + + // We need to search the prior state for any resource instances that + // belong to module instances that are no longer declared in the + // configuration, which is one way a resource instance can be classified + // as an "orphan". + // + // However, if any instance is under a partial-expanded prefix then + // we can't know whether it's still desired or not, and so we'll need + // to defer dealing with it to a future plan/apply round. + // + // We need to compare with the resource instances we can find in the + // state, so we'll need to briefly hold the state lock while we inspect + // those. The following inline function limits the scope of the lock. + orphanAddrs := addrs.MakeSet[addrs.AbsResourceInstance]() + maybeOrphanAddrs := addrs.MakeSet[addrs.AbsResourceInstance]() + func() { + ss := globalCtx.PrevRunState() + state := ss.Lock() + defer ss.Unlock() + + for _, res := range state.Resources(n.Addr) { + Instances: + for instKey := range res.Instances { + instAddr := res.Addr.Instance(instKey) + + for _, partialAddr := range partialExpandedAddrs { + if partialAddr.MatchesInstance(instAddr) { + // The instance is beneath a partial-expanded prefix, so + // we can't decide yet whether it's an orphan or not, + // but we'll still note it so we can make sure to + // refresh its state. + maybeOrphanAddrs.Add(instAddr) + continue Instances + } + } + if !knownAddrs.Has(instAddr) { + // If we get here then the instance is not under an + // partial-expanded prefix and is not in our set of + // fully-known desired state instances, and so it's + // an "orphan". + orphanAddrs.Add(instAddr) + } + } + } + }() + + // TEMP: The code that deals with some other language/workflow features + // is not yet updated to be able to handle partial-expanded resource + // address prefixes, to constrain the scope of the initial experimental + // implementation. We'll reject some of those cases with errors, just to + // be explicit that they don't work rather than just quietly doing + // something incomplete/broken/strange. + if len(partialExpandedAddrs) != 0 { + // Some other parts of the system aren't yet able to make sense of + // partial-expanded resource addresses, so we'll forbid them for + // now and improve on this in later iterations of the experiment. + if len(n.Targets) != 0 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Cannot use resource targeting with unknown count or for_each", + "In the current phase of the unknown_instances language experiment, the -target=... planning option is not yet supported whenever unknown count or for_each are present.", + )) + } + if len(n.forceReplace) != 0 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Cannot use forced replacement with unknown count or for_each", + "In the current phase of the unknown_instances language experiment, the -replace=... planning option is not yet supported whenever unknown count or for_each are present.", + )) + } + if diags.HasErrors() { + return nil, diags + } + } + + // At this point we have four different sets of resource instance + // addresses: + // - knownAddrs are definitely in the desired state. They may or may not + // also be in the previous run state. + // - partialExpandedAddrs are unbounded sets of instances that _might_ + // be in the desired state, but we can't know until a future round. + // - orphanAddrs are in the previous run state but definitely not in + // the desired state. + // - maybeOrphanAddrs are in the previous run state and we can't know + // whether they are in the desired state until a future round. + // + // Each resource instance in the union of all of the above sets needs to + // be represented as part of _some_ graph node, but we'll build them + // differently depending on which set they came from. + for _, addr := range knownAddrs { + log.Printf("[TRACE] nodeExpandPlannableResource: %s is definitely in the desired state", addr) + v := &NodePlannableResourceInstance{ + NodeAbstractResourceInstance: NewNodeAbstractResourceInstance(addr), + skipRefresh: n.skipRefresh, + skipPlanChanges: n.skipPlanChanges, + forceReplace: n.forceReplace, + // TODO: replaceTriggeredBy? + // TODO: importTarget? + // TODO: ForceCreateBeforeDestroy? + } + v.ResolvedProvider = n.ResolvedProvider + v.Config = n.Config + g.Add(v) + } + for _, addr := range partialExpandedAddrs { + log.Printf("[TRACE] nodeExpandPlannableResource: desired instances matching %s are not yet known", addr) + v := &nodePlannablePartialExpandedResource{ + addr: addr, + config: n.Config, + resolvedProvider: n.ResolvedProvider, + skipPlanChanges: n.skipPlanChanges, + } + g.Add(v) + } + for _, addr := range orphanAddrs { + log.Printf("[TRACE] nodeExpandPlannableResource: %s is in previous state but no longer desired", addr) + v := &NodePlannableResourceInstanceOrphan{ + NodeAbstractResourceInstance: NewNodeAbstractResourceInstance(addr), + skipRefresh: n.skipRefresh, + skipPlanChanges: n.skipPlanChanges, + // TODO: forgetResources? + // TODO: forgetModules? + } + v.ResolvedProvider = n.ResolvedProvider + v.Config = n.Config + g.Add(v) + } + for _, addr := range maybeOrphanAddrs { + // For any object in the previous run state where we cannot yet know + // if it's an orphan, we can't yet properly plan it but we still + // want to refresh it, in the same way we would if this were a + // refresh-only plan. + log.Printf("[TRACE] nodeExpandPlannableResource: %s is in previous state but unknown whether it's still desired", addr) + v := &NodePlannableResourceInstance{ + NodeAbstractResourceInstance: NewNodeAbstractResourceInstance(addr), + skipRefresh: n.skipRefresh, + skipPlanChanges: true, // We never plan for a "maybe-orphan" + forceReplace: n.forceReplace, + // TODO: replaceTriggeredBy? + // TODO: importTarget? + // TODO: ForceCreateBeforeDestroy? + } + v.ResolvedProvider = n.ResolvedProvider + v.Config = n.Config + g.Add(v) + } + + addRootNodeToGraph(&g) + return &g, diags +} + // validateExpandedImportTargets checks that all expanded imports correspond to // a configured instance. func (n *nodeExpandPlannableResource) validateExpandedImportTargets() tfdiags.Diagnostics { diff --git a/internal/terraform/node_resource_plan_instance.go b/internal/terraform/node_resource_plan_instance.go index d00b9f9bb998..2419f2557279 100644 --- a/internal/terraform/node_resource_plan_instance.go +++ b/internal/terraform/node_resource_plan_instance.go @@ -286,67 +286,81 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext) change.ActionReason = plans.ResourceInstanceReplaceByTriggers } - // We intentionally write the change before the subsequent checks, because - // all of the checks below this point are for problems caused by the - // context surrounding the change, rather than the change itself, and - // so it's helpful to still include the valid-in-isolation change as - // part of the plan as additional context in our error output. - // - // FIXME: it is currently important that we write resource changes to - // the plan (n.writeChange) before we write the corresponding state - // (n.writeResourceInstanceState). - // - // This is because the planned resource state will normally have the - // status of states.ObjectPlanned, which causes later logic to refer to - // the contents of the plan to retrieve the resource data. Because - // there is no shared lock between these two data structures, reversing - // the order of these writes will cause a brief window of inconsistency - // which can lead to a failed safety check. - // - // Future work should adjust these APIs such that it is impossible to - // update these two data structures incorrectly through any objects - // reachable via the terraform.EvalContext API. - diags = diags.Append(n.writeChange(ctx, change, "")) - if diags.HasErrors() { - return diags - } - diags = diags.Append(n.writeResourceInstanceState(ctx, instancePlanState, workingState)) - if diags.HasErrors() { - return diags - } - - diags = diags.Append(n.checkPreventDestroy(change)) - if diags.HasErrors() { - return diags - } + deferrals := ctx.Deferrals() + if !deferrals.ShouldDeferResourceInstanceChanges(n.Addr) { + // We intentionally write the change before the subsequent checks, because + // all of the checks below this point are for problems caused by the + // context surrounding the change, rather than the change itself, and + // so it's helpful to still include the valid-in-isolation change as + // part of the plan as additional context in our error output. + // + // FIXME: it is currently important that we write resource changes to + // the plan (n.writeChange) before we write the corresponding state + // (n.writeResourceInstanceState). + // + // This is because the planned resource state will normally have the + // status of states.ObjectPlanned, which causes later logic to refer to + // the contents of the plan to retrieve the resource data. Because + // there is no shared lock between these two data structures, reversing + // the order of these writes will cause a brief window of inconsistency + // which can lead to a failed safety check. + // + // Future work should adjust these APIs such that it is impossible to + // update these two data structures incorrectly through any objects + // reachable via the terraform.EvalContext API. + diags = diags.Append(n.writeChange(ctx, change, "")) + if diags.HasErrors() { + return diags + } + diags = diags.Append(n.writeResourceInstanceState(ctx, instancePlanState, workingState)) + if diags.HasErrors() { + return diags + } - // If this plan resulted in a NoOp, then apply won't have a chance to make - // any changes to the stored dependencies. Since this is a NoOp we know - // that the stored dependencies will have no effect during apply, and we can - // write them out now. - if change.Action == plans.NoOp && !depsEqual(instanceRefreshState.Dependencies, n.Dependencies) { - // the refresh state will be the final state for this resource, so - // finalize the dependencies here if they need to be updated. - instanceRefreshState.Dependencies = n.Dependencies - diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, refreshState)) + diags = diags.Append(n.checkPreventDestroy(change)) if diags.HasErrors() { return diags } - } - // Post-conditions might block completion. We intentionally do this - // _after_ writing the state/diff because we want to check against - // the result of the operation, and to fail on future operations - // until the user makes the condition succeed. - // (Note that some preconditions will end up being skipped during - // planning, because their conditions depend on values not yet known.) - checkDiags := evalCheckRules( - addrs.ResourcePostcondition, - n.Config.Postconditions, - ctx, n.ResourceInstanceAddr(), repeatData, - checkRuleSeverity, - ) - diags = diags.Append(checkDiags) + // If this plan resulted in a NoOp, then apply won't have a chance to make + // any changes to the stored dependencies. Since this is a NoOp we know + // that the stored dependencies will have no effect during apply, and we can + // write them out now. + if change.Action == plans.NoOp && !depsEqual(instanceRefreshState.Dependencies, n.Dependencies) { + // the refresh state will be the final state for this resource, so + // finalize the dependencies here if they need to be updated. + instanceRefreshState.Dependencies = n.Dependencies + diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, refreshState)) + if diags.HasErrors() { + return diags + } + } + + // Post-conditions might block completion. We intentionally do this + // _after_ writing the state/diff because we want to check against + // the result of the operation, and to fail on future operations + // until the user makes the condition succeed. + // (Note that some preconditions will end up being skipped during + // planning, because their conditions depend on values not yet known.) + checkDiags := evalCheckRules( + addrs.ResourcePostcondition, + n.Config.Postconditions, + ctx, n.ResourceInstanceAddr(), repeatData, + checkRuleSeverity, + ) + diags = diags.Append(checkDiags) + } else { + // The deferrals tracker says that we must defer changes for + // this resource instance, presumably due to a dependency on an + // upstream object that was already deferred. Therefore we just + // report our own deferral (capturing a placeholder value in the + // deferral tracker) and don't add anything to the plan or + // working state. + // In this case, the expression evaluator should use the placeholder + // value registered here as the value of this resource instance, + // instead of using the plan. + deferrals.ReportResourceInstanceDeferred(n.Addr, change.Action, change.After) + } } else { // In refresh-only mode we need to evaluate the for-each expression in // order to supply the value to the pre- and post-condition check diff --git a/internal/terraform/node_resource_plan_partialexp.go b/internal/terraform/node_resource_plan_partialexp.go new file mode 100644 index 000000000000..3851f147570e --- /dev/null +++ b/internal/terraform/node_resource_plan_partialexp.go @@ -0,0 +1,291 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package terraform + +import ( + "fmt" + "log" + "strings" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/internal/addrs" + "github.com/hashicorp/terraform/internal/configs" + "github.com/hashicorp/terraform/internal/instances" + "github.com/hashicorp/terraform/internal/plans/objchange" + "github.com/hashicorp/terraform/internal/providers" + "github.com/hashicorp/terraform/internal/tfdiags" +) + +// nodePlannablePartialExpandedResource is a graph node that stands in for +// an unbounded set of potential resource instances that we don't yet know. +// +// Its job is to check the configuration as much as we can with the information +// that's available (so we can raise an error early if something is clearly +// wrong across _all_ potential instances) and to record a placeholder value +// for use when evaluating other objects that refer to this resource. +// +// This is the partial-expanded equivalent of NodePlannableResourceInstance. +type nodePlannablePartialExpandedResource struct { + addr addrs.PartialExpandedResource + config *configs.Resource + resolvedProvider addrs.AbsProviderConfig + skipPlanChanges bool +} + +var ( + _ graphNodeEvalContextScope = (*nodePlannablePartialExpandedResource)(nil) + _ GraphNodeConfigResource = (*nodePlannablePartialExpandedResource)(nil) + _ GraphNodeExecutable = (*nodePlannablePartialExpandedResource)(nil) +) + +// Name implements [dag.NamedVertex]. +func (n *nodePlannablePartialExpandedResource) Name() string { + return n.addr.String() +} + +// Path implements graphNodeEvalContextScope. +func (n *nodePlannablePartialExpandedResource) Path() evalContextScope { + if moduleAddr, ok := n.addr.ModuleInstance(); ok { + return evalContextModuleInstance{Addr: moduleAddr} + } else if moduleAddr, ok := n.addr.PartialExpandedModule(); ok { + return evalContextPartialExpandedModule{Addr: moduleAddr} + } else { + // Should not get here: at least one of the two cases above + // should always be true for any valid addrs.PartialExpandedResource + panic("addrs.PartialExpandedResource has neither a partial-expanded or a fully-expanded module instance address") + } +} + +// ResourceAddr implements GraphNodeConfigResource. +func (n *nodePlannablePartialExpandedResource) ResourceAddr() addrs.ConfigResource { + return n.addr.ConfigResource() +} + +// Execute implements GraphNodeExecutable. +func (n *nodePlannablePartialExpandedResource) Execute(ctx EvalContext, op walkOperation) tfdiags.Diagnostics { + // Because this node type implements [graphNodeEvalContextScope], the + // given EvalContext could either be for a fully-expanded module instance + // or an unbounded set of potential module instances sharing a common + // prefix. The logic here doesn't need to vary between the two because + // the differences are encapsulated in the EvalContext abstraction, + // but if you're unsure which of the two is being used then look for + // the following line in the logs to see if there's a [*] marker on + // any of the module instance steps, or if the [*] is applied only to + // the resource itself. + // + // Fully-expanded module example: + // + // module.foo["a"].type.name[*] + // + // Partial-expanded module example: + // + // module.foo[*].type.name[*] + // + log.Printf("[TRACE] nodePlannablePartialExpandedResource: checking all of %s", n.addr.String()) + + var placeholderVal cty.Value + var diags tfdiags.Diagnostics + switch n.addr.Resource().Mode { + case addrs.ManagedResourceMode: + placeholderVal, diags = n.managedResourceExecute(ctx) + case addrs.DataResourceMode: + placeholderVal, diags = n.dataResourceExecute(ctx) + default: + panic(fmt.Errorf("unsupported resource mode %s", n.config.Mode)) + } + + // Registering this allows downstream resources that depend on this one + // to know that they need to defer themselves too, in order to preserve + // correct dependency order. + ctx.Deferrals().ReportResourceExpansionDeferred(n.addr, placeholderVal) + return diags +} + +func (n *nodePlannablePartialExpandedResource) managedResourceExecute(ctx EvalContext) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // We cannot fully plan partial-expanded resources because we don't know + // what addresses they will have, but in this function we'll still go + // through many of the familiar motions of planning so that we can give + // feedback sooner if we can prove that the configuration is already + // invalid even with the partial information we have here. This is just + // to shorten the iterative journey, so nothing here actually contributes + // new actions to the plan. + + provider, providerSchema, err := getProvider(ctx, n.resolvedProvider) + diags = diags.Append(err) + if diags.HasErrors() { + return cty.DynamicVal, diags + } + + diags = diags.Append(validateSelfRef(n.addr.Resource(), n.config.Config, providerSchema)) + if diags.HasErrors() { + return cty.DynamicVal, diags + } + + schema, _ := providerSchema.SchemaForResourceAddr(n.addr.Resource()) + if schema == nil { + // Should be caught during validation, so we don't bother with a pretty error here + diags = diags.Append(fmt.Errorf("provider does not support resource type %q", n.addr.Resource().Type)) + return cty.DynamicVal, diags + } + + // TODO: Normal managed resource planning + // (in [NodePlannableResourceInstance.managedResourceExecute]) deals with + // some additional things that we're just ignoring here for now. We should + // confirm whether it's really okay to ignore them here or if we ought + // to be partial-populating some results. + // + // Including but not necessarily limited to: + // - Somehow reacting if one or more of the possible resource instances + // is affected by an import block + // - Evaluating the preconditions/postconditions to see if they produce + // a definitive fail result even with the partial information. + + if n.skipPlanChanges { + // If we're supposed to be making a refresh-only plan then there's + // not really anything else to do here, since we can only refresh + // specific known resource instances (which another graph node should + // handle), so we'll just return early. + return cty.DynamicVal, diags + } + + // Because we don't know the instance keys yet, we'll be evaluating using + // suitable unknown values for count.index, each.key, and each.value + // so that anything that varies between the instances will be unknown + // but we can still check the arguments that they all have in common. + var keyData instances.RepetitionData + switch { + case n.config.ForEach != nil: + // We don't actually know the `for_each` type here, but we do at least + // know it's for_each. + keyData = instances.UnknownForEachRepetitionData(cty.DynamicPseudoType) + case n.config.Count != nil: + keyData = instances.UnknownCountRepetitionData + default: + // If we get here then we're presumably a single-instance resource + // inside a multi-instance module whose instances aren't known yet, + // and so we'll evaluate without any of the repetition symbols to + // still generate the usual errors if someone tries to use them here. + keyData = instances.RepetitionData{ + CountIndex: cty.NilVal, + EachKey: cty.NilVal, + EachValue: cty.NilVal, + } + } + + configVal, _, configDiags := ctx.EvaluateBlock(n.config.Config, schema, nil, keyData) + diags = diags.Append(configDiags) + if configDiags.HasErrors() { + return cty.DynamicVal, diags + } + + unmarkedConfigVal, _ := configVal.UnmarkDeep() + validateResp := provider.ValidateResourceConfig( + providers.ValidateResourceConfigRequest{ + TypeName: n.addr.Resource().Type, + Config: unmarkedConfigVal, + }, + ) + diags = diags.Append(validateResp.Diagnostics.InConfigBody(n.config.Config, n.addr.String())) + if diags.HasErrors() { + return cty.DynamicVal, diags + } + + unmarkedConfigVal, unmarkedPaths := configVal.UnmarkDeepWithPaths() + priorVal := cty.NullVal(schema.ImpliedType()) // we don't have any specific prior value to use + proposedNewVal := objchange.ProposedNew(schema, priorVal, unmarkedConfigVal) + + // The provider now gets to plan an imaginary substitute that represents + // all of the possible resource instances together. Correctly-implemented + // providers should handle the extra unknown values here just as if they + // had been unknown an individual instance's configuration, but we can + // still find out if any of the known values are somehow invalid and + // learn a subset of the "computed" attribute values to save as part + // of our placeholder value for downstream checks. + resp := provider.PlanResourceChange(providers.PlanResourceChangeRequest{ + TypeName: n.addr.Resource().Type, + Config: unmarkedConfigVal, + PriorState: priorVal, + ProposedNewState: proposedNewVal, + }) + diags = diags.Append(resp.Diagnostics.InConfigBody(n.config.Config, n.addr.String())) + if diags.HasErrors() { + return cty.DynamicVal, diags + } + + plannedNewVal := resp.PlannedState + if plannedNewVal == cty.NilVal { + // Should never happen. Since real-world providers return via RPC a nil + // is always a bug in the client-side stub. This is more likely caused + // by an incompletely-configured mock provider in tests, though. + panic(fmt.Sprintf("PlanResourceChange of %s produced nil value", n.addr.String())) + } + + for _, err := range plannedNewVal.Type().TestConformance(schema.ImpliedType()) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced invalid plan", + fmt.Sprintf( + "Provider %q planned an invalid value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.resolvedProvider.Provider, tfdiags.FormatErrorPrefixed(err, n.addr.String()), + ), + )) + } + if diags.HasErrors() { + return cty.DynamicVal, diags + } + + if errs := objchange.AssertPlanValid(schema, priorVal, unmarkedConfigVal, plannedNewVal); len(errs) > 0 { + if resp.LegacyTypeSystem { + // The shimming of the old type system in the legacy SDK is not precise + // enough to pass this consistency check, so we'll give it a pass here, + // but we will generate a warning about it so that we are more likely + // to notice in the logs if an inconsistency beyond the type system + // leads to a downstream provider failure. + var buf strings.Builder + fmt.Fprintf(&buf, + "[WARN] Provider %q produced an invalid plan for %s, but we are tolerating it because it is using the legacy plugin SDK.\n The following problems may be the cause of any confusing errors from downstream operations:", + n.resolvedProvider.Provider, n.addr.String(), + ) + for _, err := range errs { + fmt.Fprintf(&buf, "\n - %s", tfdiags.FormatError(err)) + } + log.Print(buf.String()) + } else { + for _, err := range errs { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced invalid plan", + fmt.Sprintf( + "Provider %q planned an invalid value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.resolvedProvider.Provider, tfdiags.FormatErrorPrefixed(err, n.addr.String()), + ), + )) + } + return cty.DynamicVal, diags + } + } + + // We need to combine the dynamic marks with the static marks implied by + // the provider's schema. + unmarkedPaths = dedupePathValueMarks(append(unmarkedPaths, schema.ValueMarks(plannedNewVal, nil)...)) + if len(unmarkedPaths) > 0 { + plannedNewVal = plannedNewVal.MarkWithPaths(unmarkedPaths) + } + + return plannedNewVal, diags +} + +func (n *nodePlannablePartialExpandedResource) dataResourceExecute(ctx EvalContext) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // TODO: Ideally we should do an approximation of the normal data resource + // planning process similar to what we're doing for managed resources in + // managedResourceExecute, but we'll save that for a later phase of this + // experiment since managed resources are enough to start getting real + // experience with this new evaluation approach. + return cty.DynamicVal, diags +} From 2dab42bb45c088a3e3e90cb8eae4eacc8f1ed0a0 Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Fri, 9 Feb 2024 16:35:49 -0800 Subject: [PATCH 12/13] terraform: Callers can force all actions to be deferred When the modules runtime is being used inside the stacks runtime, it's possible that a component could refer to another component that has deferred changes in its plan. In that case, we do still want to plan the downstream component (to give earlier feedback if there are obvious problems with it) but we need to force all planned actions to be treated as deferred so that we preserve the correct dependency ordering across all objects described in a stack. This commit only deals with the modules runtime handling that case. We'll make the stacks runtime use it in a subsequent commit. --- internal/terraform/context_plan.go | 24 ++++-- internal/terraform/context_plan2_test.go | 97 ++++++++++++++++++++++++ internal/terraform/context_walk.go | 9 +++ 3 files changed, 122 insertions(+), 8 deletions(-) diff --git a/internal/terraform/context_plan.go b/internal/terraform/context_plan.go index a80fd5e4b1aa..e43eacfb13d5 100644 --- a/internal/terraform/context_plan.go +++ b/internal/terraform/context_plan.go @@ -82,6 +82,13 @@ type PlanOpts struct { // the actual graph. ExternalReferences []*addrs.Reference + // ExternalDependencyDeferred, when set, indicates that the caller + // considers this configuration to depend on some other configuration + // that had at least one deferred change, and therefore everything in + // this configuration must have its changes deferred too so that the + // overall dependency ordering would be correct. + ExternalDependencyDeferred bool + // Overrides provides a set of override objects that should be applied // during this plan. Overrides *mocking.Overrides @@ -664,14 +671,15 @@ func (c *Context) planWalk(config *configs.Config, prevRunState *states.State, o providerFuncResults := providers.NewFunctionResultsTable(nil) walker, walkDiags := c.walk(graph, walkOp, &graphWalkOpts{ - Config: config, - InputState: prevRunState, - ExternalProviderConfigs: externalProviderConfigs, - Changes: changes, - MoveResults: moveResults, - Overrides: opts.Overrides, - PlanTimeTimestamp: timestamp, - ProviderFuncResults: providerFuncResults, + Config: config, + InputState: prevRunState, + ExternalProviderConfigs: externalProviderConfigs, + ExternalDependencyDeferred: opts.ExternalDependencyDeferred, + Changes: changes, + MoveResults: moveResults, + Overrides: opts.Overrides, + PlanTimeTimestamp: timestamp, + ProviderFuncResults: providerFuncResults, }) diags = diags.Append(walker.NonFatalDiagnostics) diags = diags.Append(walkDiags) diff --git a/internal/terraform/context_plan2_test.go b/internal/terraform/context_plan2_test.go index 2cd27b146695..a6c307ed8436 100644 --- a/internal/terraform/context_plan2_test.go +++ b/internal/terraform/context_plan2_test.go @@ -13,6 +13,7 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/google/go-cmp/cmp" + "github.com/zclconf/go-cty-debug/ctydebug" "github.com/zclconf/go-cty/cty" // "github.com/hashicorp/hcl/v2" @@ -4742,6 +4743,102 @@ func TestContext2Plan_externalProviders(t *testing.T) { } } +func TestContext2Apply_externalDependencyDeferred(t *testing.T) { + // This test deals with the situation where the stacks runtime knows + // that an upstream component already has deferred actions and so + // it's telling us that we need to artifically treat everything in + // the current configuration as deferred. + + cfg := testModuleInline(t, map[string]string{ + "main.tf": ` + resource "test" "a" { + name = "a" + } + + resource "test" "b" { + name = "b" + upstream_names = [test.a.name] + } + + resource "test" "c" { + name = "c" + upstream_names = toset([ + test.a.name, + test.b.name, + ]) + } + `, + }) + + p := &MockProvider{ + GetProviderSchemaResponse: &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "name": { + Type: cty.String, + Required: true, + }, + "upstream_names": { + Type: cty.Set(cty.String), + Optional: true, + }, + }, + }, + }, + }, + }, + PlanResourceChangeFn: func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + return providers.PlanResourceChangeResponse{ + PlannedState: req.ProposedNewState, + } + }, + } + resourceInstancesActionsInPlan := func(p *plans.Plan) map[string]plans.Action { + ret := make(map[string]plans.Action) + for _, cs := range p.Changes.Resources { + // Anything that was deferred will not appear in the result at + // all. Non-deferred actions that don't actually need to do anything + // _will_ appear, but with action set to [plans.NoOp]. + ret[cs.Addr.String()] = cs.Action + } + return ret + } + cmpOpts := cmp.Options{ + ctydebug.CmpOptions, + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(cfg, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + ExternalDependencyDeferred: true, + }) + assertNoDiagnostics(t, diags) + if plan.Applyable { + t.Fatal("plan is applyable; should not be, because there's nothing to do yet") + } + if plan.Complete { + t.Fatal("plan is complete; should have deferred actions") + } + + gotActions := resourceInstancesActionsInPlan(plan) + wantActions := map[string]plans.Action{ + // No actions at all, because everything was deferred! + } + if diff := cmp.Diff(wantActions, gotActions, cmpOpts); diff != "" { + t.Fatalf("wrong actions in plan\n%s", diff) + } + // TODO: Once we are including information about the individual + // deferred actions in the plan, this would be a good place to + // assert that they are correct! +} + func TestContext2Plan_removedResourceForgetBasic(t *testing.T) { addrA := mustResourceInstanceAddr("test_object.a") m := testModuleInline(t, map[string]string{ diff --git a/internal/terraform/context_walk.go b/internal/terraform/context_walk.go index 5a0098116c7b..ccdaf7bf7044 100644 --- a/internal/terraform/context_walk.go +++ b/internal/terraform/context_walk.go @@ -44,6 +44,12 @@ type graphWalkOpts struct { // always take into account what walk type it's dealing with. ExternalProviderConfigs map[addrs.RootProviderConfig]providers.Interface + // ExternalDependencyDeferred indicates that something that this entire + // configuration depends on (outside the view of this modules runtime) + // has deferred changes, and therefore we must treat _all_ actions + // as deferred to produce the correct overall dependency ordering. + ExternalDependencyDeferred bool + // PlanTimeCheckResults should be populated during the apply phase with // the snapshot of check results that was generated during the plan step. // @@ -163,6 +169,9 @@ func (c *Context) graphWalker(graph *Graph, operation walkOperation, opts *graph // blocks, since we need that for deferral tracking. resourceGraph := graph.ResourceGraph() deferred := deferring.NewDeferred(resourceGraph) + if opts.ExternalDependencyDeferred { + deferred.SetExternalDependencyDeferred() + } return &ContextGraphWalker{ Context: c, From 268c6fb60176b37bd42042f8e399d94f99fe5a8d Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Fri, 9 Feb 2024 18:15:50 -0800 Subject: [PATCH 13/13] stackeval: Basic support for deferred-change propagation This is the bare minimum functionality to ensure that we defer all actions in any component that depends on a component that already had deferred actions. We will also eventually need to propagate out a signal to the caller for whether the stack plan as a whole is complete or incomplete, but we'll save that for later commits, since the stack orchestration in Terraform Cloud will do the right thing regardless, aside from the cosmetic concern that it won't yet know to show a message to the user saying that there are deferred changes. --- .../internal/stackeval/component.go | 36 +++++++ .../internal/stackeval/component_instance.go | 52 +++++++-- .../internal/stackeval/planning_test.go | 100 ++++++++++++++++++ .../deferred-changes-propagation.tf | 32 ++++++ .../deferred-changes-propagation.tfstack.hcl | 35 ++++++ 5 files changed, 244 insertions(+), 11 deletions(-) create mode 100644 internal/stacks/stackruntime/internal/stackeval/testdata/sourcebundle/planning/deferred_changes_propagation/deferred-changes-propagation.tf create mode 100644 internal/stacks/stackruntime/internal/stackeval/testdata/sourcebundle/planning/deferred_changes_propagation/deferred-changes-propagation.tfstack.hcl diff --git a/internal/stacks/stackruntime/internal/stackeval/component.go b/internal/stacks/stackruntime/internal/stackeval/component.go index 3ae127749560..bfcd35a5f9ac 100644 --- a/internal/stacks/stackruntime/internal/stackeval/component.go +++ b/internal/stacks/stackruntime/internal/stackeval/component.go @@ -239,6 +239,42 @@ func (c *Component) ResultValue(ctx context.Context, phase EvalPhase) cty.Value } } +// PlanIsComplete can be called only during the planning phase, and returns +// true only if all instances of this component have "complete" plans. +// +// A component instance plan is "incomplete" if it was either created with +// resource targets set in its planning options or if the modules runtime +// decided it needed to defer at least one action for a future round. +func (c *Component) PlanIsComplete(ctx context.Context) bool { + if !c.main.Planning() { + panic("PlanIsComplete used when not in the planning phase") + } + insts := c.Instances(ctx, PlanPhase) + if insts == nil { + // Suggests that the configuration was not even valid enough to + // decide what the instances are, so we'll return false to be + // conservative and let the error be returned by a different path. + return false + } + + for _, inst := range insts { + plan := inst.ModuleTreePlan(ctx) + if plan == nil { + // Seems that we weren't even able to create a plan for this + // one, so we'll just assume it was incomplete to be conservative, + // and assume that whatever errors caused this nil result will + // get returned by a different return path. + return false + } + if !plan.Complete { + return false + } + } + // If we get here without returning false then we can say that + // all of the instance plans are complete. + return true +} + // ExprReferenceValue implements Referenceable. func (c *Component) ExprReferenceValue(ctx context.Context, phase EvalPhase) cty.Value { return c.ResultValue(ctx, phase) diff --git a/internal/stacks/stackruntime/internal/stackeval/component_instance.go b/internal/stacks/stackruntime/internal/stackeval/component_instance.go index 6c9d1b3c92e1..3ae87192bacd 100644 --- a/internal/stacks/stackruntime/internal/stackeval/component_instance.go +++ b/internal/stacks/stackruntime/internal/stackeval/component_instance.go @@ -529,15 +529,37 @@ func (c *ComponentInstance) CheckModuleTreePlan(ctx context.Context) (*plans.Pla return nil, diags } + // If any of our upstream components have incomplete plans then + // we need to force treating everything in this component as + // deferred so we can preserve the correct dependency ordering. + upstreamDeferred := false + for _, depAddr := range c.call.RequiredComponents(ctx).Elems() { + depStack := c.main.Stack(ctx, depAddr.Stack, PlanPhase) + if depStack == nil { + upstreamDeferred = true // to be conservative + break + } + depComponent := depStack.Component(ctx, depAddr.Item) + if depComponent == nil { + upstreamDeferred = true // to be conservative + break + } + if !depComponent.PlanIsComplete(ctx) { + upstreamDeferred = true + break + } + } + // NOTE: This ComponentInstance type only deals with component // instances currently declared in the configuration. See // [ComponentInstanceRemoved] for the model of a component instance // that existed in the prior state but is not currently declared // in the configuration. plan, moreDiags := tfCtx.Plan(moduleTree, prevState, &terraform.PlanOpts{ - Mode: stackPlanOpts.PlanningMode, - SetVariables: inputValues, - ExternalProviders: providerClients, + Mode: stackPlanOpts.PlanningMode, + SetVariables: inputValues, + ExternalProviders: providerClients, + ExternalDependencyDeferred: upstreamDeferred, // This is set by some tests but should not be used in main code. // (nil means to use the real time when tfCtx.Plan was called.) @@ -748,14 +770,22 @@ func (c *ComponentInstance) ApplyModuleTreePlan(ctx context.Context, plan *plans return noOpResult, diags } - // NOTE: tfCtx.Apply tends to make changes to the given plan while it - // works, and so code after this point should not make any further use - // of either "modifiedPlan" or "plan" (since they share lots of the same - // pointers to mutable objects and so both can get modified together.) - newState, moreDiags := tfCtx.Apply(&modifiedPlan, moduleTree, &terraform.ApplyOpts{ - ExternalProviders: providerClients, - }) - diags = diags.Append(moreDiags) + var newState *states.State + if modifiedPlan.Applyable { + // NOTE: tfCtx.Apply tends to make changes to the given plan while it + // works, and so code after this point should not make any further use + // of either "modifiedPlan" or "plan" (since they share lots of the same + // pointers to mutable objects and so both can get modified together.) + newState, moreDiags = tfCtx.Apply(&modifiedPlan, moduleTree, &terraform.ApplyOpts{ + ExternalProviders: providerClients, + }) + diags = diags.Append(moreDiags) + } else { + // For a non-applyable plan, we just skip trying to apply it altogether + // and just propagate the prior state (including any refreshing we + // did during the plan phase) forward. + newState = modifiedPlan.PriorState + } if newState != nil { cic := &hooks.ComponentInstanceChange{ diff --git a/internal/stacks/stackruntime/internal/stackeval/planning_test.go b/internal/stacks/stackruntime/internal/stackeval/planning_test.go index 8fe80f948b8d..123ad1ccbdfa 100644 --- a/internal/stacks/stackruntime/internal/stackeval/planning_test.go +++ b/internal/stacks/stackruntime/internal/stackeval/planning_test.go @@ -21,6 +21,7 @@ import ( "github.com/hashicorp/terraform/internal/promising" "github.com/hashicorp/terraform/internal/providers" "github.com/hashicorp/terraform/internal/stacks/stackaddrs" + "github.com/hashicorp/terraform/internal/stacks/stackplan" "github.com/hashicorp/terraform/internal/stacks/stackstate" "github.com/hashicorp/terraform/internal/stacks/stackstate/statekeys" "github.com/hashicorp/terraform/internal/stacks/tfstackdata1" @@ -581,6 +582,105 @@ func TestPlanning_RequiredComponents(t *testing.T) { }) } +func TestPlanning_DeferredChangesPropagation(t *testing.T) { + // This test arranges for one component's plan to signal deferred changes, + // and checks that a downstream component's plan also has everything + // deferred even though it could potentially have been plannable in + // isolation, since we need to respect the dependency ordering between + // components. + + cfg := testStackConfig(t, "planning", "deferred_changes_propagation") + main := NewForPlanning(cfg, stackstate.NewState(), PlanOpts{ + PlanningMode: plans.NormalMode, + InputVariableValues: map[stackaddrs.InputVariable]ExternalInputValue{ + // This causes the first component to have a module whose + // instance count isn't known yet. + {Name: "first_count"}: { + Value: cty.UnknownVal(cty.Number), + }, + }, + ProviderFactories: ProviderFactories{ + addrs.NewBuiltInProvider("test"): func() (providers.Interface, error) { + return &terraform.MockProvider{ + GetProviderSchemaResponse: &providers.GetProviderSchemaResponse{ + Provider: providers.Schema{ + Block: &configschema.Block{}, + }, + ResourceTypes: map[string]providers.Schema{ + "test": { + Block: &configschema.Block{}, + }, + }, + }, + }, nil + }, + }, + }) + // TEMP: This test currently relies on the experimental module language + // feature of allowing unknown values in a resource's "count" argument. + // We should remove this if the experiment gets stabilized. + main.AllowLanguageExperiments(true) + + componentFirstInstAddr := stackaddrs.AbsComponentInstance{ + Stack: stackaddrs.RootStackInstance, + Item: stackaddrs.ComponentInstance{ + Component: stackaddrs.Component{ + Name: "first", + }, + }, + } + componentSecondInstAddr := stackaddrs.AbsComponentInstance{ + Stack: stackaddrs.RootStackInstance, + Item: stackaddrs.ComponentInstance{ + Component: stackaddrs.Component{ + Name: "second", + }, + }, + } + + componentPlanResourceActions := func(plan *stackplan.Component) map[string]plans.Action { + ret := make(map[string]plans.Action) + for _, elem := range plan.ResourceInstancePlanned.Elems { + ret[elem.Key.String()] = elem.Value.Action + } + return ret + } + + inPromisingTask(t, func(ctx context.Context, t *testing.T) { + plan, diags := testPlan(t, main) + assertNoErrors(t, diags) + + firstPlan := plan.Components.Get(componentFirstInstAddr) + if firstPlan.PlanComplete { + t.Error("first component has a complete plan; should be incomplete because it has deferred actions") + } + secondPlan := plan.Components.Get(componentSecondInstAddr) + if secondPlan.PlanComplete { + t.Error("second component has a complete plan; should be incomplete because everything in it should've been deferred") + } + + gotFirstActions := componentPlanResourceActions(firstPlan) + wantFirstActions := map[string]plans.Action{ + // Only test.a is planned, because test.b has unknown count + // and must therefore be deferred. + "test.a": plans.Create, + } + gotSecondActions := componentPlanResourceActions(secondPlan) + wantSecondActions := map[string]plans.Action{ + // Nothing at all expected for the second, because all of its + // planned actions should've been deferred to respect the + // dependency on the first component. + } + + if diff := cmp.Diff(wantFirstActions, gotFirstActions); diff != "" { + t.Errorf("wrong actions for first component\n%s", diff) + } + if diff := cmp.Diff(wantSecondActions, gotSecondActions); diff != "" { + t.Errorf("wrong actions for second component\n%s", diff) + } + }) +} + func TestPlanning_NoWorkspaceNameRef(t *testing.T) { // This test verifies that a reference to terraform.workspace is treated // as invalid for modules used in a stacks context, because there's diff --git a/internal/stacks/stackruntime/internal/stackeval/testdata/sourcebundle/planning/deferred_changes_propagation/deferred-changes-propagation.tf b/internal/stacks/stackruntime/internal/stackeval/testdata/sourcebundle/planning/deferred_changes_propagation/deferred-changes-propagation.tf new file mode 100644 index 000000000000..1760edf92191 --- /dev/null +++ b/internal/stacks/stackruntime/internal/stackeval/testdata/sourcebundle/planning/deferred_changes_propagation/deferred-changes-propagation.tf @@ -0,0 +1,32 @@ + +terraform { + required_providers { + test = { + source = "terraform.io/builtin/test" + } + } + + # TODO: Remove this if this experiment gets stabilized. + # If you're removing this, remember to also update the calling test so + # that it no longer enables the use of experiments, to ensure that we're + # really not depending on any experimental features. + experiments = [unknown_instances] +} + +variable "instance_count" { + type = number +} + +resource "test" "a" { + # This one has on intrinsic need to be deferred, but + # should still be deferred when an upstream component + # has a deferral. +} + +resource "test" "b" { + count = var.instance_count +} + +output "constant_one" { + value = 1 +} diff --git a/internal/stacks/stackruntime/internal/stackeval/testdata/sourcebundle/planning/deferred_changes_propagation/deferred-changes-propagation.tfstack.hcl b/internal/stacks/stackruntime/internal/stackeval/testdata/sourcebundle/planning/deferred_changes_propagation/deferred-changes-propagation.tfstack.hcl new file mode 100644 index 000000000000..d429a4d63e1a --- /dev/null +++ b/internal/stacks/stackruntime/internal/stackeval/testdata/sourcebundle/planning/deferred_changes_propagation/deferred-changes-propagation.tfstack.hcl @@ -0,0 +1,35 @@ + +required_providers { + test = { + source = "terraform.io/builtin/test" + } +} + +provider "test" "main" { +} + +variable "first_count" { + type = number +} + +component "first" { + source = "./" + + inputs = { + instance_count = var.first_count + } + providers = { + test = provider.test.main + } +} + +component "second" { + source = "./" + + inputs = { + instance_count = component.first.constant_one + } + providers = { + test = provider.test.main + } +}