From afcac0f418e144d1037f698b4cf42fa8cd6fffd4 Mon Sep 17 00:00:00 2001 From: Leo Antoli <430982+lantoli@users.noreply.github.com> Date: Sun, 15 Dec 2024 10:46:47 +0100 Subject: [PATCH] chore: Implements Read in TPF data source (#2865) * enable data source test checks * ds model * same Read impl for singular ds as resource Read * Read in plural ds * use conversion.CopyModel to reduce duplicated code * check out is not nil * remove unneeded funcs * pluralModel * overrideUsingLegacySchema * fix condition to use legacy schema * remove use of config.AdvancedClusterV2Schema in tests * fix pinned_fcv * revert test changes * refactor ds to use readClustersDS and readClusterDS * enable ds test checks * refactor plural ds to use ListClusters info * leftover comment * revert temporarily overrideUsingLegacySchema * split convertClusterAddAdvConfig into getBasicClusterModel and updateModelAdvancedConfig * pagination comment in ListClusters * check if use_replication_spec_per_shard should be added in ds * bring changes from master in resource.go * CopyModel tests * have resource and datasource models together, renaming resource_schema files to schema * rename AsymmetricShardUnsupportedError to AsymmetricShardUnsupported * use AllPages * wait in applyAdvancedConfigurationChanges * await after every update * don't include accept_data_risks_and_force_replica_set_reconfig in ds * move ds schemas together with resource one * wait to apply adv_config changes in Create * leftover * skip TestAccClusterAdvancedClusterConfig_asymmetricShardedNewSchema * leftover --- .golangci.yml | 2 +- .../common/conversion/model_generation.go | 41 +++++++ .../conversion/model_generation_test.go | 92 ++++++++++++++ .../resource_advanced_cluster_test.go | 2 + .../service/advancedclustertpf/data_source.go | 66 +++++++--- .../model_ClusterDescription20240805.go | 1 + .../advancedclustertpf/plural_data_source.go | 81 +++++++++--- .../service/advancedclustertpf/resource.go | 115 +++++++++++------- .../resource_compatibility_reuse.go | 15 +-- .../resource_compatiblity.go | 9 +- .../{resource_schema.go => schema.go} | 79 +++++++++++- ...resource_schema_test.go => schema_test.go} | 0 .../acc/advanced_cluster_schema_v2.go | 46 ++----- 13 files changed, 425 insertions(+), 124 deletions(-) create mode 100644 internal/common/conversion/model_generation.go create mode 100644 internal/common/conversion/model_generation_test.go rename internal/service/advancedclustertpf/{resource_schema.go => schema.go} (91%) rename internal/service/advancedclustertpf/{resource_schema_test.go => schema_test.go} (100%) diff --git a/.golangci.yml b/.golangci.yml index 83b0802fad..bc730e75ee 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -123,7 +123,7 @@ issues: - linters: - gocritic text: "^hugeParam: req is heavy" - - path: "_schema\\.go" # exclude rules for schema files as it's auto-genereated from OpenAPI spec + - path: "schema\\.go" # exclude rules for schema files as it's auto-genereated from OpenAPI spec text: "fieldalignment|hugeParam|var-naming|ST1003|S1007|exceeds the maximum|too long|regexpSimplify|nolint" run: timeout: 10m diff --git a/internal/common/conversion/model_generation.go b/internal/common/conversion/model_generation.go new file mode 100644 index 0000000000..f44c75624b --- /dev/null +++ b/internal/common/conversion/model_generation.go @@ -0,0 +1,41 @@ +package conversion + +import ( + "fmt" + "reflect" +) + +// CopyModel creates a new struct with the same values as the source struct. Fields in destination struct that are not in source are left with zero value. +func CopyModel[T any](src any) (*T, error) { + dest := new(T) + valSrc := reflect.ValueOf(src) + valDest := reflect.ValueOf(dest) + if valSrc.Kind() != reflect.Ptr || valDest.Kind() != reflect.Ptr { + return nil, fmt.Errorf("params must be pointers") + } + valSrc = valSrc.Elem() + valDest = valDest.Elem() + if valSrc.Kind() != reflect.Struct || valDest.Kind() != reflect.Struct { + return nil, fmt.Errorf("params must be pointers to structs") + } + typeSrc := valSrc.Type() + typeDest := valDest.Type() + for i := 0; i < typeDest.NumField(); i++ { + fieldDest := typeDest.Field(i) + name := fieldDest.Name + { + fieldSrc, found := typeSrc.FieldByName(name) + if !found { + continue + } + if fieldDest.Type != fieldSrc.Type { + return nil, fmt.Errorf("field has different type: %s", name) + } + } + if !valDest.Field(i).CanSet() { + return nil, fmt.Errorf("field can't be set, probably unexported: %s", name) + } + valDest.Field(i).Set(valSrc.FieldByName(name)) + } + return dest, nil +} diff --git a/internal/common/conversion/model_generation_test.go b/internal/common/conversion/model_generation_test.go new file mode 100644 index 0000000000..ea18d83d4e --- /dev/null +++ b/internal/common/conversion/model_generation_test.go @@ -0,0 +1,92 @@ +package conversion_test + +import ( + "testing" + + "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestCopyModel(t *testing.T) { + type destType struct { + AttrStr string + attrUnexported string + AttrInt int + } + + testCases := map[string]struct { + input any + expected any + expectedErrorStr string + }{ + "basic": { + input: &struct { + AttrStr string + AttrInt int + }{ + AttrStr: "val", + AttrInt: 1, + }, + expected: &destType{ + AttrStr: "val", + AttrInt: 1, + attrUnexported: "", + }, + }, + "missing field": { + input: &struct { + AttrStr string + }{ + AttrStr: "val", + }, + expected: &destType{ + AttrStr: "val", + }, + }, + "extra field": { + input: &struct { + AttrStr string + AttrExtra string + AttrInt int + }{ + AttrStr: "val", + AttrExtra: "extra", + AttrInt: 1, + }, + expected: &destType{ + AttrStr: "val", + AttrInt: 1, + }, + }, + "different type": { + input: &struct { + AttrStr bool + }{ + AttrStr: true, + }, + expectedErrorStr: "field has different type: AttrStr", + }, + "unexported": { + input: &struct { + attrUnexported string + }{ + attrUnexported: "val", + }, + expectedErrorStr: "field can't be set, probably unexported: attrUnexported", + }, + } + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + dest, err := conversion.CopyModel[destType](tc.input) + if err == nil { + assert.Equal(t, tc.expected, dest) + assert.Equal(t, "", tc.expectedErrorStr) + } else { + require.ErrorContains(t, err, tc.expectedErrorStr) + assert.Nil(t, dest) + assert.Nil(t, tc.expected) + } + }) + } +} diff --git a/internal/service/advancedcluster/resource_advanced_cluster_test.go b/internal/service/advancedcluster/resource_advanced_cluster_test.go index 5cf47fc748..d68ea5e23f 100644 --- a/internal/service/advancedcluster/resource_advanced_cluster_test.go +++ b/internal/service/advancedcluster/resource_advanced_cluster_test.go @@ -706,6 +706,8 @@ func TestAccClusterAdvancedClusterConfig_symmetricShardedNewSchemaToAsymmetricAd } func TestAccClusterAdvancedClusterConfig_asymmetricShardedNewSchema(t *testing.T) { + // TODO: enable when datasource attribute use_replication_spec_per_shard is used + acc.SkipIfAdvancedClusterV2Schema(t) resource.ParallelTest(t, asymmetricShardedNewSchemaTestCase(t, true)) } diff --git a/internal/service/advancedclustertpf/data_source.go b/internal/service/advancedclustertpf/data_source.go index 0bb0905091..b95aff557c 100644 --- a/internal/service/advancedclustertpf/data_source.go +++ b/internal/service/advancedclustertpf/data_source.go @@ -2,12 +2,13 @@ package advancedclustertpf import ( "context" + "fmt" "github.com/hashicorp/terraform-plugin-framework/datasource" - "github.com/hashicorp/terraform-plugin-framework/datasource/schema" - "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" + "go.mongodb.org/atlas-sdk/v20241113003/admin" ) var _ datasource.DataSource = &ds{} @@ -26,22 +27,57 @@ type ds struct { } func (d *ds) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { - resp.Schema = conversion.DataSourceSchemaFromResource(ResourceSchema(ctx), &conversion.DataSourceSchemaRequest{ - RequiredFields: []string{"project_id", "name"}, - OverridenFields: map[string]schema.Attribute{ - "use_replication_spec_per_shard": schema.BoolAttribute{ // TODO: added as in current resource - Optional: true, - MarkdownDescription: "use_replication_spec_per_shard", // TODO: add documentation - }, - }, - }) + resp.Schema = dataSourceSchema(ctx) } func (d *ds) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var state TFModelDS + diags := &resp.Diagnostics + diags.Append(req.Config.Get(ctx, &state)...) + if diags.HasError() { + return + } + model := d.readCluster(ctx, diags, &state) + if model != nil { + diags.Append(resp.State.Set(ctx, model)...) + } } -// TODO: see if resource model can be used instead, probably different only in timeouts -type ModelDS struct { - ProjectID types.String `tfsdk:"project_id"` - Name types.String `tfsdk:"name"` +func (d *ds) readCluster(ctx context.Context, diags *diag.Diagnostics, modelDS *TFModelDS) *TFModelDS { + clusterName := modelDS.Name.ValueString() + projectID := modelDS.ProjectID.ValueString() + useReplicationSpecPerShard := modelDS.UseReplicationSpecPerShard.ValueBool() + api := d.Client.AtlasV2.ClustersApi + clusterResp, _, err := api.GetCluster(ctx, projectID, clusterName).Execute() + if err != nil { + if admin.IsErrorCode(err, ErrorCodeClusterNotFound) { + return nil + } + diags.AddError("errorRead", fmt.Sprintf(errorRead, clusterName, err.Error())) + return nil + } + modelIn := &TFModel{ + ProjectID: modelDS.ProjectID, + Name: modelDS.Name, + } + // TODO: pass !UseReplicationSpecPerShard to overrideUsingLegacySchema + modelOut, extraInfo := getBasicClusterModel(ctx, diags, d.Client, clusterResp, modelIn) + if diags.HasError() { + return nil + } + if extraInfo.AsymmetricShardUnsupported && !useReplicationSpecPerShard { + diags.AddError("errorRead", "Please add `use_replication_spec_per_shard = true` to your data source configuration to enable asymmetric shard support. Refer to documentation for more details.") + return nil + } + updateModelAdvancedConfig(ctx, diags, d.Client, modelOut, nil, nil) + if diags.HasError() { + return nil + } + modelOutDS, err := conversion.CopyModel[TFModelDS](modelOut) + if err != nil { + diags.AddError(errorRead, fmt.Sprintf("error setting model: %s", err.Error())) + return nil + } + modelOutDS.UseReplicationSpecPerShard = modelDS.UseReplicationSpecPerShard // attrs not in resource model + return modelOutDS } diff --git a/internal/service/advancedclustertpf/model_ClusterDescription20240805.go b/internal/service/advancedclustertpf/model_ClusterDescription20240805.go index 8df35183b4..f77a7dfbc5 100644 --- a/internal/service/advancedclustertpf/model_ClusterDescription20240805.go +++ b/internal/service/advancedclustertpf/model_ClusterDescription20240805.go @@ -25,6 +25,7 @@ type ExtraAPIInfo struct { RootDiskSize *float64 ContainerIDs map[string]string UsingLegacySchema bool + AsymmetricShardUnsupported bool } func NewTFModel(ctx context.Context, input *admin.ClusterDescription20240805, timeout timeouts.Value, diags *diag.Diagnostics, apiInfo ExtraAPIInfo) *TFModel { diff --git a/internal/service/advancedclustertpf/plural_data_source.go b/internal/service/advancedclustertpf/plural_data_source.go index 5da1ba424f..6c8178345c 100644 --- a/internal/service/advancedclustertpf/plural_data_source.go +++ b/internal/service/advancedclustertpf/plural_data_source.go @@ -3,12 +3,15 @@ package advancedclustertpf import ( "context" "fmt" + "net/http" "github.com/hashicorp/terraform-plugin-framework/datasource" - "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" + "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/dsschema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" + "go.mongodb.org/atlas-sdk/v20241113003/admin" ) var _ datasource.DataSource = &pluralDS{} @@ -27,26 +30,68 @@ type pluralDS struct { } func (d *pluralDS) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { - resp.Schema = conversion.PluralDataSourceSchemaFromResource(ResourceSchema(ctx), &conversion.PluralDataSourceSchemaRequest{ - RequiredFields: []string{"project_id"}, - OverridenRootFields: map[string]schema.Attribute{ - "use_replication_spec_per_shard": schema.BoolAttribute{ // TODO: added as in current resource - Optional: true, - MarkdownDescription: "use_replication_spec_per_shard", // TODO: add documentation - }, - "include_deleted_with_retained_backups": schema.BoolAttribute{ // TODO: not in current resource, decide if keep - Optional: true, - MarkdownDescription: "Flag that indicates whether to return Clusters with retain backups.", - }, - }, - }) + resp.Schema = pluralDataSourceSchema(ctx) } func (d *pluralDS) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var state TFModelPluralDS + diags := &resp.Diagnostics + diags.Append(req.Config.Get(ctx, &state)...) + if diags.HasError() { + return + } + model := d.readClusters(ctx, diags, &state) + if model != nil { + diags.Append(resp.State.Set(ctx, model)...) + } } -type AdvancedClustersModel struct { - ProjectID types.String `tfsdk:"project_id"` - UseReplicationSpecPerShard types.Bool `tfsdk:"use_replication_spec_per_shard"` // TODO: added as in current resource - IncludeDeletedWithRetainedBackups types.Bool `tfsdk:"include_deleted_with_retained_backups"` // TODO: not in current resource, decide if keep +func (d *pluralDS) readClusters(ctx context.Context, diags *diag.Diagnostics, pluralModel *TFModelPluralDS) *TFModelPluralDS { + projectID := pluralModel.ProjectID.ValueString() + useReplicationSpecPerShard := pluralModel.UseReplicationSpecPerShard.ValueBool() + api := d.Client.AtlasV2.ClustersApi + params := admin.ListClustersApiParams{ + GroupId: projectID, + } + list, err := dsschema.AllPages(ctx, func(ctx context.Context, pageNum int) (dsschema.PaginateResponse[admin.ClusterDescription20240805], *http.Response, error) { + request := api.ListClustersWithParams(ctx, ¶ms) + request = request.PageNum(pageNum) + return request.Execute() + }) + if err != nil { + diags.AddError("errorList", fmt.Sprintf(errorList, projectID, err.Error())) + return nil + } + outs := &TFModelPluralDS{ + ProjectID: pluralModel.ProjectID, + UseReplicationSpecPerShard: pluralModel.UseReplicationSpecPerShard, + IncludeDeletedWithRetainedBackups: pluralModel.IncludeDeletedWithRetainedBackups, + } + for i := range list { + clusterResp := &list[i] + modelIn := &TFModel{ + ProjectID: pluralModel.ProjectID, + Name: types.StringValue(clusterResp.GetName()), + } + // TODO: pass !UseReplicationSpecPerShard to overrideUsingLegacySchema + modelOut, extraInfo := getBasicClusterModel(ctx, diags, d.Client, clusterResp, modelIn) + if diags.HasError() { + return nil + } + if extraInfo.AsymmetricShardUnsupported && !useReplicationSpecPerShard { + continue + } + updateModelAdvancedConfig(ctx, diags, d.Client, modelOut, nil, nil) + if diags.HasError() { + return nil + } + modelOutDS, err := conversion.CopyModel[TFModelDS](modelOut) + if err != nil { + diags.AddError(errorList, fmt.Sprintf("error setting model: %s", err.Error())) + return nil + } + modelOutDS.UseReplicationSpecPerShard = pluralModel.UseReplicationSpecPerShard // attrs not in resource model + outs.Results = append(outs.Results, modelOutDS) + } + return outs } diff --git a/internal/service/advancedclustertpf/resource.go b/internal/service/advancedclustertpf/resource.go index 3874df57cc..265705ab90 100644 --- a/internal/service/advancedclustertpf/resource.go +++ b/internal/service/advancedclustertpf/resource.go @@ -9,7 +9,6 @@ import ( "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/tfsdk" - "github.com/hashicorp/terraform-plugin-framework/types" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/constant" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/update" @@ -29,6 +28,7 @@ const ( errorRead = "error reading advanced cluster (%s): %s" errorDelete = "error deleting advanced cluster (%s): %s" errorUpdate = "error updating advanced cluster (%s): %s" + errorList = "error reading advanced cluster list for project %s: %s" errorConfigUpdate = "error updating advanced cluster configuration options (%s): %s" errorConfigRead = "error reading advanced cluster configuration options (%s): %s" ErrorClusterSetting = "error setting `%s` for MongoDB Cluster (%s): %s" @@ -64,7 +64,7 @@ type rs struct { } func (r *rs) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { - resp.Schema = ResourceSchema(ctx) + resp.Schema = resourceSchema(ctx) conversion.UpdateSchemaDescription(&resp.Schema) } @@ -88,7 +88,7 @@ func (r *rs) Read(ctx context.Context, req resource.ReadRequest, resp *resource. if diags.HasError() { return } - model := r.readCluster(ctx, &state, &resp.State, diags, true) + model := r.readCluster(ctx, diags, &state, &resp.State) if model != nil { diags.Append(resp.State.Set(ctx, model)...) } @@ -113,13 +113,13 @@ func (r *rs) Update(ctx context.Context, req resource.UpdateRequest, resp *resou diags.AddError("errorPatchPayload", err.Error()) return } - var cluster *admin.ClusterDescription20240805 + var clusterResp *admin.ClusterDescription20240805 if !update.IsZeroValues(patchReq) { upgradeRequest := getTenantUpgradeRequest(stateReq, patchReq) if upgradeRequest != nil { - cluster = r.applyTenantUpgrade(ctx, &plan, upgradeRequest, diags) + clusterResp = r.applyTenantUpgrade(ctx, &plan, upgradeRequest, diags) } else { - cluster = r.applyClusterChanges(ctx, diags, &state, &plan, patchReq) + clusterResp = r.applyClusterChanges(ctx, diags, &state, &plan, patchReq) } if diags.HasError() { return @@ -129,20 +129,22 @@ func (r *rs) Update(ctx context.Context, req resource.UpdateRequest, resp *resou if diags.HasError() { return } - var stateAdvConfig *types.Object - if advConfigChanged || cluster == nil { - // Cluster changes to UPDATING state after updating advanced configuration - cluster = AwaitChanges(ctx, r.Client.AtlasV2.ClustersApi, &plan.Timeouts, diags, plan.ProjectID.ValueString(), plan.Name.ValueString(), changeReasonUpdate) + modelOut := &state + if clusterResp != nil { + modelOut, _ = getBasicClusterModel(ctx, diags, r.Client, clusterResp, &plan) if diags.HasError() { return } - } else { - stateAdvConfig = &state.AdvancedConfiguration } - model := r.convertClusterAddAdvConfig(ctx, legacyAdvConfig, advConfig, cluster, &plan, stateAdvConfig, diags) - if model != nil { - diags.Append(resp.State.Set(ctx, model)...) + if advConfigChanged { + updateModelAdvancedConfig(ctx, diags, r.Client, modelOut, legacyAdvConfig, advConfig) + if diags.HasError() { + return + } + } else { + modelOut.AdvancedConfiguration = state.AdvancedConfiguration } + diags.Append(resp.State.Set(ctx, modelOut)...) } func (r *rs) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { @@ -201,12 +203,12 @@ func (r *rs) createCluster(ctx context.Context, plan *TFModel, diags *diag.Diagn diags.AddError("errorCreate", fmt.Sprintf(errorCreate, err.Error())) return nil } - cluster := AwaitChanges(ctx, api, &plan.Timeouts, diags, projectID, clusterName, changeReasonCreate) + clusterResp := AwaitChanges(ctx, api, &plan.Timeouts, diags, projectID, clusterName, changeReasonCreate) if diags.HasError() { return nil } if pauseAfter { - cluster = r.updateAndWait(ctx, &pauseRequest, diags, plan) + clusterResp = r.updateAndWait(ctx, &pauseRequest, diags, plan) } var legacyAdvConfig *admin20240530.ClusterDescriptionProcessArgs legacyAdvConfigUpdate := NewAtlasReqAdvancedConfigurationLegacy(ctx, &plan.AdvancedConfiguration, diags) @@ -217,6 +219,10 @@ func (r *rs) createCluster(ctx context.Context, plan *TFModel, diags *diag.Diagn diags.AddError("errorUpdateeAdvConfigLegacy", fmt.Sprintf(errorCreate, err.Error())) return nil } + _ = AwaitChanges(ctx, r.Client.AtlasV2.ClustersApi, &plan.Timeouts, diags, projectID, clusterName, changeReasonCreate) + if diags.HasError() { + return nil + } } advConfigUpdate := NewAtlasReqAdvancedConfiguration(ctx, &plan.AdvancedConfiguration, diags) @@ -228,25 +234,46 @@ func (r *rs) createCluster(ctx context.Context, plan *TFModel, diags *diag.Diagn diags.AddError("errorUpdateAdvConfig", fmt.Sprintf(errorCreate, err.Error())) return nil } + _ = AwaitChanges(ctx, r.Client.AtlasV2.ClustersApi, &plan.Timeouts, diags, projectID, clusterName, changeReasonCreate) + if diags.HasError() { + return nil + } + } + modelOut, _ := getBasicClusterModel(ctx, diags, r.Client, clusterResp, plan) + if diags.HasError() { + return nil } - return r.convertClusterAddAdvConfig(ctx, legacyAdvConfig, advConfig, cluster, plan, nil, diags) + updateModelAdvancedConfig(ctx, diags, r.Client, modelOut, legacyAdvConfig, advConfig) + if diags.HasError() { + return nil + } + return modelOut } -func (r *rs) readCluster(ctx context.Context, model *TFModel, state *tfsdk.State, diags *diag.Diagnostics, allowNotFound bool) *TFModel { - clusterName := model.Name.ValueString() - projectID := model.ProjectID.ValueString() +func (r *rs) readCluster(ctx context.Context, diags *diag.Diagnostics, modelIn *TFModel, state *tfsdk.State) *TFModel { + clusterName := modelIn.Name.ValueString() + projectID := modelIn.ProjectID.ValueString() api := r.Client.AtlasV2.ClustersApi readResp, _, err := api.GetCluster(ctx, projectID, clusterName).Execute() if err != nil { - if admin.IsErrorCode(err, ErrorCodeClusterNotFound) && allowNotFound { + if admin.IsErrorCode(err, ErrorCodeClusterNotFound) { state.RemoveResource(ctx) return nil } diags.AddError("errorRead", fmt.Sprintf(errorRead, clusterName, err.Error())) return nil } - return r.convertClusterAddAdvConfig(ctx, nil, nil, readResp, model, nil, diags) + modelOut, _ := getBasicClusterModel(ctx, diags, r.Client, readResp, modelIn) + if diags.HasError() { + return nil + } + updateModelAdvancedConfig(ctx, diags, r.Client, modelOut, nil, nil) + if diags.HasError() { + return nil + } + return modelOut } + func (r *rs) applyAdvancedConfigurationChanges(ctx context.Context, diags *diag.Diagnostics, state, plan *TFModel) (legacy *admin20240530.ClusterDescriptionProcessArgs, latest *admin.ClusterDescriptionProcessArgs20240805, changed bool) { var ( api = r.Client.AtlasV2.ClustersApi @@ -262,7 +289,11 @@ func (r *rs) applyAdvancedConfigurationChanges(ctx context.Context, diags *diag. advConfig, _, err = api.UpdateClusterAdvancedConfiguration(ctx, projectID, clusterName, patchReqProcessArgs).Execute() if err != nil { diags.AddError("errorUpdateAdvancedConfig", fmt.Sprintf(errorConfigUpdate, clusterName, err.Error())) - return legacyAdvConfig, advConfig, changed + return nil, nil, false + } + _ = AwaitChanges(ctx, r.Client.AtlasV2.ClustersApi, &plan.Timeouts, diags, projectID, clusterName, changeReasonUpdate) + if diags.HasError() { + return nil, nil, false } } patchReqProcessArgsLegacy := update.PatchPayloadTpf(ctx, diags, &state.AdvancedConfiguration, &plan.AdvancedConfiguration, NewAtlasReqAdvancedConfigurationLegacy) @@ -271,6 +302,11 @@ func (r *rs) applyAdvancedConfigurationChanges(ctx context.Context, diags *diag. legacyAdvConfig, _, err = r.Client.AtlasV220240530.ClustersApi.UpdateClusterAdvancedConfiguration(ctx, projectID, clusterName, patchReqProcessArgsLegacy).Execute() if err != nil { diags.AddError("errorUpdateAdvancedConfigLegacy", fmt.Sprintf(errorConfigUpdate, clusterName, err.Error())) + return nil, nil, false + } + _ = AwaitChanges(ctx, r.Client.AtlasV2.ClustersApi, &plan.Timeouts, diags, projectID, clusterName, changeReasonUpdate) + if diags.HasError() { + return nil, nil, false } } return legacyAdvConfig, advConfig, changed @@ -348,7 +384,6 @@ func (r *rs) updateLegacyReplicationSpecs(ctx context.Context, state, plan *TFMo return false } api20240530 := r.Client.AtlasV220240530.ClustersApi - api20240530.UpdateCluster(ctx, plan.ProjectID.ValueString(), plan.Name.ValueString(), legacyPatch) _, _, err := api20240530.UpdateCluster(ctx, plan.ProjectID.ValueString(), plan.Name.ValueString(), legacyPatch).Execute() if err != nil { diags.AddError("errorUpdateLegacy", fmt.Sprintf(errorUpdate, plan.Name.ValueString(), err.Error())) @@ -368,6 +403,7 @@ func (r *rs) updateAndWait(ctx context.Context, patchReq *admin.ClusterDescripti } return AwaitChanges(ctx, r.Client.AtlasV2.ClustersApi, &tfModel.Timeouts, diags, projectID, clusterName, changeReasonUpdate) } + func (r *rs) updateAndWaitLegacy(ctx context.Context, patchReq *admin20240805.ClusterDescription20240805, diags *diag.Diagnostics, plan *TFModel) *admin.ClusterDescription20240805 { api20240805 := r.Client.AtlasV220240805.ClustersApi projectID := plan.ProjectID.ValueString() @@ -393,29 +429,20 @@ func (r *rs) applyTenantUpgrade(ctx context.Context, plan *TFModel, upgradeReque return AwaitChanges(ctx, api, &plan.Timeouts, diags, projectID, clusterName, changeReasonUpdate) } -func (r *rs) convertClusterAddAdvConfig(ctx context.Context, legacyAdvConfig *admin20240530.ClusterDescriptionProcessArgs, advConfig *admin.ClusterDescriptionProcessArgs20240805, cluster *admin.ClusterDescription20240805, modelIn *TFModel, oldAdvConfig *types.Object, diags *diag.Diagnostics) *TFModel { - apiInfo := resolveAPIInfo(ctx, modelIn, diags, cluster, r.Client) +func getBasicClusterModel(ctx context.Context, diags *diag.Diagnostics, client *config.MongoDBClient, clusterResp *admin.ClusterDescription20240805, modelIn *TFModel) (*TFModel, *ExtraAPIInfo) { + apiInfo := resolveAPIInfo(ctx, modelIn, diags, clusterResp, client) if diags.HasError() { - return nil + return nil, nil } - modelOut := NewTFModel(ctx, cluster, modelIn.Timeouts, diags, *apiInfo) + modelOut := NewTFModel(ctx, clusterResp, modelIn.Timeouts, diags, *apiInfo) if diags.HasError() { - return nil - } - if admin.IsNil(oldAdvConfig) { - legacyAdvConfig, advConfig = readUnsetAdvancedConfiguration(ctx, r.Client, modelOut, legacyAdvConfig, advConfig, diags) - AddAdvancedConfig(ctx, modelOut, advConfig, legacyAdvConfig, diags) - if diags.HasError() { - return nil - } - } else { - modelOut.AdvancedConfiguration = *oldAdvConfig + return nil, nil } overrideAttributesWithPrevStateValue(modelIn, modelOut) - return modelOut + return modelOut, apiInfo } -func readUnsetAdvancedConfiguration(ctx context.Context, client *config.MongoDBClient, model *TFModel, legacyAdvConfig *admin20240530.ClusterDescriptionProcessArgs, advConfig *admin.ClusterDescriptionProcessArgs20240805, diags *diag.Diagnostics) (*admin20240530.ClusterDescriptionProcessArgs, *admin.ClusterDescriptionProcessArgs20240805) { +func updateModelAdvancedConfig(ctx context.Context, diags *diag.Diagnostics, client *config.MongoDBClient, model *TFModel, legacyAdvConfig *admin20240530.ClusterDescriptionProcessArgs, advConfig *admin.ClusterDescriptionProcessArgs20240805) { api := client.AtlasV2.ClustersApi api20240530 := client.AtlasV220240530.ClustersApi projectID := model.ProjectID.ValueString() @@ -425,15 +452,15 @@ func readUnsetAdvancedConfiguration(ctx context.Context, client *config.MongoDBC legacyAdvConfig, _, err = api20240530.GetClusterAdvancedConfiguration(ctx, projectID, clusterName).Execute() if err != nil { diags.AddError("errorReadAdvConfigLegacy", fmt.Sprintf(errorRead, clusterName, err.Error())) - return nil, nil + return } } if advConfig == nil { advConfig, _, err = api.GetClusterAdvancedConfiguration(ctx, projectID, clusterName).Execute() if err != nil { diags.AddError("errorReadAdvConfig", fmt.Sprintf(errorRead, clusterName, err.Error())) - return nil, nil + return } } - return legacyAdvConfig, advConfig + AddAdvancedConfig(ctx, model, advConfig, legacyAdvConfig, diags) } diff --git a/internal/service/advancedclustertpf/resource_compatibility_reuse.go b/internal/service/advancedclustertpf/resource_compatibility_reuse.go index b827444670..e3dd10df5e 100644 --- a/internal/service/advancedclustertpf/resource_compatibility_reuse.go +++ b/internal/service/advancedclustertpf/resource_compatibility_reuse.go @@ -105,22 +105,23 @@ func getAdvancedClusterContainerID(containers []admin.CloudProviderContainer, cl return "" } -func getReplicationSpecIDsFromOldAPI(ctx context.Context, projectID, clusterName string, api admin20240530.ClustersApi) (map[string]string, error) { - clusterOldAPI, _, err := api.GetCluster(ctx, projectID, clusterName).Execute() +func getReplicationSpecIDsFromOldAPI(ctx context.Context, projectID, clusterName string, api admin20240530.ClustersApi) (zoneNameSpecIDs map[string]string, asymmetricShardUnsupported bool, err error) { + var clusterOldAPI *admin20240530.AdvancedClusterDescription + clusterOldAPI, _, err = api.GetCluster(ctx, projectID, clusterName).Execute() if err != nil { if apiError, ok := admin20240530.AsError(err); ok { if apiError.GetErrorCode() == "ASYMMETRIC_SHARD_UNSUPPORTED" { - return nil, nil // if its the case of an asymmetric shard an error is expected in old API, replication_specs.*.id attribute will not be populated + return nil, true, nil // an error is expected in old API in case of an asymmetric shard. In that case, replication_specs.*.id attribute will not be populated. } } - return nil, fmt.Errorf("error reading advanced cluster with 2023-02-01 API (%s): %s", clusterName, err) + return nil, false, fmt.Errorf("error reading advanced cluster with 2023-02-01 API (%s): %s", clusterName, err) } specs := clusterOldAPI.GetReplicationSpecs() - result := make(map[string]string, len(specs)) + zoneNameSpecIDs = make(map[string]string, len(specs)) for _, spec := range specs { - result[spec.GetZoneName()] = spec.GetId() + zoneNameSpecIDs[spec.GetZoneName()] = spec.GetId() } - return result, nil + return zoneNameSpecIDs, false, nil } func convertHardwareSpecToOldSDK(hwspec *admin.HardwareSpec20240805) *admin20240530.HardwareSpec { diff --git a/internal/service/advancedclustertpf/resource_compatiblity.go b/internal/service/advancedclustertpf/resource_compatiblity.go index e3ac60561b..85130bb473 100644 --- a/internal/service/advancedclustertpf/resource_compatiblity.go +++ b/internal/service/advancedclustertpf/resource_compatiblity.go @@ -40,7 +40,7 @@ func findNumShardsUpdates(ctx context.Context, state, plan *TFModel, diags *diag func resolveAPIInfo(ctx context.Context, plan *TFModel, diags *diag.Diagnostics, clusterLatest *admin.ClusterDescription20240805, client *config.MongoDBClient) *ExtraAPIInfo { rootDiskSize := conversion.NilForUnknown(plan.DiskSizeGB, plan.DiskSizeGB.ValueFloat64Pointer()) projectID := plan.ProjectID.ValueString() - zoneNameSpecIDs, err := getReplicationSpecIDsFromOldAPI(ctx, projectID, plan.Name.ValueString(), client.AtlasV220240530.ClustersApi) + zoneNameSpecIDs, asymmetricShardUnsupported, err := getReplicationSpecIDsFromOldAPI(ctx, projectID, plan.Name.ValueString(), client.AtlasV220240530.ClustersApi) if err != nil { diags.AddError("getReplicationSpecIDsFromOldAPI", err.Error()) return nil @@ -59,6 +59,7 @@ func resolveAPIInfo(ctx context.Context, plan *TFModel, diags *diag.Diagnostics, ZoneNameNumShards: numShardsMap(ctx, plan.ReplicationSpecs, diags), RootDiskSize: rootDiskSize, ZoneNameReplicationSpecIDs: zoneNameSpecIDs, + AsymmetricShardUnsupported: asymmetricShardUnsupported, } } @@ -140,6 +141,9 @@ func repSpecNoIDs(repspec admin.ReplicationSpec20240805) *admin.ReplicationSpec2 func numShardsCounts(ctx context.Context, input types.List, diags *diag.Diagnostics) []int64 { elements := make([]TFReplicationSpecsModel, len(input.Elements())) + if len(elements) == 0 { + return nil + } if localDiags := input.ElementsAs(ctx, &elements, false); len(localDiags) > 0 { diags.Append(localDiags...) return nil @@ -162,6 +166,9 @@ func usingLegacySchema(ctx context.Context, input types.List, diags *diag.Diagno func numShardsMap(ctx context.Context, input types.List, diags *diag.Diagnostics) map[string]int64 { elements := make([]TFReplicationSpecsModel, len(input.Elements())) + if len(elements) == 0 { + return nil + } if localDiags := input.ElementsAs(ctx, &elements, false); len(localDiags) > 0 { diags.Append(localDiags...) return nil diff --git a/internal/service/advancedclustertpf/resource_schema.go b/internal/service/advancedclustertpf/schema.go similarity index 91% rename from internal/service/advancedclustertpf/resource_schema.go rename to internal/service/advancedclustertpf/schema.go index 001745c89a..547390cd18 100644 --- a/internal/service/advancedclustertpf/resource_schema.go +++ b/internal/service/advancedclustertpf/schema.go @@ -8,6 +8,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework-validators/int64validator" "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" "github.com/hashicorp/terraform-plugin-framework/attr" + dsschema "github.com/hashicorp/terraform-plugin-framework/datasource/schema" "github.com/hashicorp/terraform-plugin-framework/resource/schema" "github.com/hashicorp/terraform-plugin-framework/resource/schema/int64default" "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" @@ -15,10 +16,11 @@ import ( "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" "github.com/hashicorp/terraform-plugin-framework/schema/validator" "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/schemafunc" ) -func ResourceSchema(ctx context.Context) schema.Schema { +func resourceSchema(ctx context.Context) schema.Schema { return schema.Schema{ Version: 1, Attributes: map[string]schema.Attribute{ @@ -351,6 +353,40 @@ func ResourceSchema(ctx context.Context) schema.Schema { } } +func dataSourceSchema(ctx context.Context) dsschema.Schema { + return conversion.DataSourceSchemaFromResource(resourceSchema(ctx), &conversion.DataSourceSchemaRequest{ + RequiredFields: []string{"project_id", "name"}, + OverridenFields: dataSourceOverridenFields(), + }) +} + +func pluralDataSourceSchema(ctx context.Context) dsschema.Schema { + return conversion.PluralDataSourceSchemaFromResource(resourceSchema(ctx), &conversion.PluralDataSourceSchemaRequest{ + RequiredFields: []string{"project_id"}, + OverridenRootFields: map[string]dsschema.Attribute{ + "use_replication_spec_per_shard": dsschema.BoolAttribute{ // TODO: added as in current resource + Optional: true, + MarkdownDescription: "use_replication_spec_per_shard", // TODO: add documentation + }, + "include_deleted_with_retained_backups": dsschema.BoolAttribute{ // TODO: not in current resource, decide if keep + Optional: true, + MarkdownDescription: "Flag that indicates whether to return Clusters with retain backups.", + }, + }, + OverridenFields: dataSourceOverridenFields(), + }) +} + +func dataSourceOverridenFields() map[string]dsschema.Attribute { + return map[string]dsschema.Attribute{ + "use_replication_spec_per_shard": dsschema.BoolAttribute{ // TODO: added as in current resource + Optional: true, + MarkdownDescription: "use_replication_spec_per_shard", // TODO: add documentation + }, + "accept_data_risks_and_force_replica_set_reconfig": nil, + } +} + func AutoScalingSchema() schema.SingleNestedAttribute { return schema.SingleNestedAttribute{ Computed: true, @@ -537,6 +573,47 @@ type TFModel struct { PitEnabled types.Bool `tfsdk:"pit_enabled"` } +// TFModelDS differs from TFModel: removes timeouts, accept_data_risks_and_force_replica_set_reconfig; adds use_replication_spec_per_shard. +type TFModelDS struct { + DiskSizeGB types.Float64 `tfsdk:"disk_size_gb"` + Labels types.Set `tfsdk:"labels"` + ReplicationSpecs types.List `tfsdk:"replication_specs"` + Tags types.Set `tfsdk:"tags"` + ReplicaSetScalingStrategy types.String `tfsdk:"replica_set_scaling_strategy"` + Name types.String `tfsdk:"name"` + AdvancedConfiguration types.Object `tfsdk:"advanced_configuration"` + BiConnectorConfig types.Object `tfsdk:"bi_connector_config"` + RootCertType types.String `tfsdk:"root_cert_type"` + ClusterType types.String `tfsdk:"cluster_type"` + MongoDBMajorVersion types.String `tfsdk:"mongo_db_major_version"` + ConfigServerType types.String `tfsdk:"config_server_type"` + VersionReleaseSystem types.String `tfsdk:"version_release_system"` + ConnectionStrings types.Object `tfsdk:"connection_strings"` + StateName types.String `tfsdk:"state_name"` + MongoDBVersion types.String `tfsdk:"mongo_db_version"` + CreateDate types.String `tfsdk:"create_date"` + EncryptionAtRestProvider types.String `tfsdk:"encryption_at_rest_provider"` + ProjectID types.String `tfsdk:"project_id"` + ClusterID types.String `tfsdk:"cluster_id"` + ConfigServerManagementMode types.String `tfsdk:"config_server_management_mode"` + PinnedFCV types.Object `tfsdk:"pinned_fcv"` + UseReplicationSpecPerShard types.Bool `tfsdk:"use_replication_spec_per_shard"` + RedactClientLogData types.Bool `tfsdk:"redact_client_log_data"` + GlobalClusterSelfManagedSharding types.Bool `tfsdk:"global_cluster_self_managed_sharding"` + BackupEnabled types.Bool `tfsdk:"backup_enabled"` + RetainBackupsEnabled types.Bool `tfsdk:"retain_backups_enabled"` + Paused types.Bool `tfsdk:"paused"` + TerminationProtectionEnabled types.Bool `tfsdk:"termination_protection_enabled"` + PitEnabled types.Bool `tfsdk:"pit_enabled"` +} + +type TFModelPluralDS struct { + ProjectID types.String `tfsdk:"project_id"` + Results []*TFModelDS `tfsdk:"results"` + UseReplicationSpecPerShard types.Bool `tfsdk:"use_replication_spec_per_shard"` // TODO: added as in current resource + IncludeDeletedWithRetainedBackups types.Bool `tfsdk:"include_deleted_with_retained_backups"` // TODO: not in current resource, decide if keep +} + type TFBiConnectorModel struct { ReadPreference types.String `tfsdk:"read_preference"` Enabled types.Bool `tfsdk:"enabled"` diff --git a/internal/service/advancedclustertpf/resource_schema_test.go b/internal/service/advancedclustertpf/schema_test.go similarity index 100% rename from internal/service/advancedclustertpf/resource_schema_test.go rename to internal/service/advancedclustertpf/schema_test.go diff --git a/internal/testutil/acc/advanced_cluster_schema_v2.go b/internal/testutil/acc/advanced_cluster_schema_v2.go index 9c89c1d2f6..1bb7fc0ae0 100644 --- a/internal/testutil/acc/advanced_cluster_schema_v2.go +++ b/internal/testutil/acc/advanced_cluster_schema_v2.go @@ -7,7 +7,6 @@ import ( "github.com/hashicorp/hcl/v2/hclsyntax" "github.com/hashicorp/hcl/v2/hclwrite" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/hcl" "github.com/zclconf/go-cty/cty" @@ -16,67 +15,35 @@ import ( ) func TestCheckResourceAttrSchemaV2(isAcc bool, name, key, value string) resource.TestCheckFunc { - if skipChecks(isAcc, name) { - return testCheckFuncAlwaysPass - } return resource.TestCheckResourceAttr(name, AttrNameToSchemaV2(isAcc, key), value) } func TestCheckResourceAttrSetSchemaV2(isAcc bool, name, key string) resource.TestCheckFunc { - if skipChecks(isAcc, name) { - return testCheckFuncAlwaysPass - } return resource.TestCheckResourceAttrSet(name, AttrNameToSchemaV2(isAcc, key)) } func TestCheckResourceAttrWithSchemaV2(isAcc bool, name, key string, checkValueFunc resource.CheckResourceAttrWithFunc) resource.TestCheckFunc { - if skipChecks(isAcc, name) { - return testCheckFuncAlwaysPass - } return resource.TestCheckResourceAttrWith(name, AttrNameToSchemaV2(isAcc, key), checkValueFunc) } func TestCheckTypeSetElemNestedAttrsSchemaV2(isAcc bool, name, key string, values map[string]string) resource.TestCheckFunc { - if skipChecks(isAcc, name) { - return testCheckFuncAlwaysPass - } return resource.TestCheckTypeSetElemNestedAttrs(name, AttrNameToSchemaV2(isAcc, key), values) } -func testCheckFuncAlwaysPass(*terraform.State) error { - return nil -} - func AddAttrChecksSchemaV2(isAcc bool, name string, checks []resource.TestCheckFunc, mapChecks map[string]string) []resource.TestCheckFunc { - if skipChecks(isAcc, name) { - return []resource.TestCheckFunc{} - } return AddAttrChecks(name, checks, ConvertToSchemaV2AttrsMap(isAcc, mapChecks)) } func AddAttrSetChecksSchemaV2(isAcc bool, name string, checks []resource.TestCheckFunc, attrNames ...string) []resource.TestCheckFunc { - if skipChecks(isAcc, name) { - return []resource.TestCheckFunc{} - } return AddAttrSetChecks(name, checks, ConvertToSchemaV2AttrsSet(isAcc, attrNames)...) } func AddAttrChecksPrefixSchemaV2(isAcc bool, name string, checks []resource.TestCheckFunc, mapChecks map[string]string, prefix string, skipNames ...string) []resource.TestCheckFunc { - if skipChecks(isAcc, name) { - return []resource.TestCheckFunc{} - } return AddAttrChecksPrefix(name, checks, ConvertToSchemaV2AttrsMap(isAcc, mapChecks), prefix, skipNames...) } -func skipChecks(isAcc bool, name string) bool { - if !config.AdvancedClusterV2Schema() || !isAcc { - return false - } - return strings.HasPrefix(name, "data.mongodbatlas_advanced_cluster") -} - func ConvertToSchemaV2AttrsMap(isAcc bool, attrsMap map[string]string) map[string]string { - if !config.AdvancedClusterV2Schema() || !isAcc { + if skipSchemaV2Work(isAcc) { return attrsMap } ret := make(map[string]string, len(attrsMap)) @@ -87,7 +54,7 @@ func ConvertToSchemaV2AttrsMap(isAcc bool, attrsMap map[string]string) map[strin } func ConvertToSchemaV2AttrsSet(isAcc bool, attrsSet []string) []string { - if !config.AdvancedClusterV2Schema() || !isAcc { + if skipSchemaV2Work(isAcc) { return attrsSet } ret := make([]string, 0, len(attrsSet)) @@ -104,10 +71,11 @@ var tpfSingleNestedAttrs = []string{ "auto_scaling", // includes analytics_auto_scaling "advanced_configuration", "bi_connector_config", + "pinned_fcv", } func AttrNameToSchemaV2(isAcc bool, name string) string { - if !config.AdvancedClusterV2Schema() || !isAcc { + if skipSchemaV2Work(isAcc) { return name } for _, singleAttrName := range tpfSingleNestedAttrs { @@ -118,7 +86,7 @@ func AttrNameToSchemaV2(isAcc bool, name string) string { func ConvertAdvancedClusterToSchemaV2(t *testing.T, isAcc bool, def string) string { t.Helper() - if !config.AdvancedClusterV2Schema() || !isAcc { + if skipSchemaV2Work(isAcc) { return def } parse := hcl.GetDefParser(t, def) @@ -139,6 +107,10 @@ func ConvertAdvancedClusterToSchemaV2(t *testing.T, isAcc bool, def string) stri return string(content) } +func skipSchemaV2Work(isAcc bool) bool { + return !config.AdvancedClusterV2Schema() || !isAcc +} + func AssertEqualHCL(t *testing.T, expected, actual string, msgAndArgs ...interface{}) { t.Helper() assert.Equal(t, hcl.CanonicalHCL(t, expected), hcl.CanonicalHCL(t, actual), msgAndArgs...)