Skip to content

Commit

Permalink
Merge branch 'master' into CLOUDP-287995_fail_index_key_too_long_work…
Browse files Browse the repository at this point in the history
…around
  • Loading branch information
EspenAlbert committed Dec 16, 2024
2 parents 9cad988 + afcac0f commit d87b478
Show file tree
Hide file tree
Showing 17 changed files with 436 additions and 129 deletions.
2 changes: 1 addition & 1 deletion .golangci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ issues:
- linters:
- gocritic
text: "^hugeParam: req is heavy"
- path: "_schema\\.go" # exclude rules for schema files as it's auto-genereated from OpenAPI spec
- path: "schema\\.go" # exclude rules for schema files as it's auto-genereated from OpenAPI spec
text: "fieldalignment|hugeParam|var-naming|ST1003|S1007|exceeds the maximum|too long|regexpSimplify|nolint"
run:
timeout: 10m
Expand Down
41 changes: 41 additions & 0 deletions internal/common/conversion/model_generation.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
package conversion

import (
"fmt"
"reflect"
)

// CopyModel creates a new struct with the same values as the source struct. Fields in destination struct that are not in source are left with zero value.
func CopyModel[T any](src any) (*T, error) {
dest := new(T)
valSrc := reflect.ValueOf(src)
valDest := reflect.ValueOf(dest)
if valSrc.Kind() != reflect.Ptr || valDest.Kind() != reflect.Ptr {
return nil, fmt.Errorf("params must be pointers")
}
valSrc = valSrc.Elem()
valDest = valDest.Elem()
if valSrc.Kind() != reflect.Struct || valDest.Kind() != reflect.Struct {
return nil, fmt.Errorf("params must be pointers to structs")
}
typeSrc := valSrc.Type()
typeDest := valDest.Type()
for i := 0; i < typeDest.NumField(); i++ {
fieldDest := typeDest.Field(i)
name := fieldDest.Name
{
fieldSrc, found := typeSrc.FieldByName(name)
if !found {
continue
}
if fieldDest.Type != fieldSrc.Type {
return nil, fmt.Errorf("field has different type: %s", name)
}
}
if !valDest.Field(i).CanSet() {
return nil, fmt.Errorf("field can't be set, probably unexported: %s", name)
}
valDest.Field(i).Set(valSrc.FieldByName(name))
}
return dest, nil
}
92 changes: 92 additions & 0 deletions internal/common/conversion/model_generation_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
package conversion_test

import (
"testing"

"github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)

func TestCopyModel(t *testing.T) {
type destType struct {
AttrStr string
attrUnexported string
AttrInt int
}

testCases := map[string]struct {
input any
expected any
expectedErrorStr string
}{
"basic": {
input: &struct {
AttrStr string
AttrInt int
}{
AttrStr: "val",
AttrInt: 1,
},
expected: &destType{
AttrStr: "val",
AttrInt: 1,
attrUnexported: "",
},
},
"missing field": {
input: &struct {
AttrStr string
}{
AttrStr: "val",
},
expected: &destType{
AttrStr: "val",
},
},
"extra field": {
input: &struct {
AttrStr string
AttrExtra string
AttrInt int
}{
AttrStr: "val",
AttrExtra: "extra",
AttrInt: 1,
},
expected: &destType{
AttrStr: "val",
AttrInt: 1,
},
},
"different type": {
input: &struct {
AttrStr bool
}{
AttrStr: true,
},
expectedErrorStr: "field has different type: AttrStr",
},
"unexported": {
input: &struct {
attrUnexported string
}{
attrUnexported: "val",
},
expectedErrorStr: "field can't be set, probably unexported: attrUnexported",
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
dest, err := conversion.CopyModel[destType](tc.input)
if err == nil {
assert.Equal(t, tc.expected, dest)
assert.Equal(t, "", tc.expectedErrorStr)
} else {
require.ErrorContains(t, err, tc.expectedErrorStr)
assert.Nil(t, dest)
assert.Nil(t, tc.expected)
}
})
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -706,6 +706,8 @@ func TestAccClusterAdvancedClusterConfig_symmetricShardedNewSchemaToAsymmetricAd
}

func TestAccClusterAdvancedClusterConfig_asymmetricShardedNewSchema(t *testing.T) {
// TODO: enable when datasource attribute use_replication_spec_per_shard is used
acc.SkipIfAdvancedClusterV2Schema(t)
resource.ParallelTest(t, asymmetricShardedNewSchemaTestCase(t, true))
}

Expand Down
66 changes: 51 additions & 15 deletions internal/service/advancedclustertpf/data_source.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,13 @@ package advancedclustertpf

import (
"context"
"fmt"

"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion"
"github.com/mongodb/terraform-provider-mongodbatlas/internal/config"
"go.mongodb.org/atlas-sdk/v20241113003/admin"
)

var _ datasource.DataSource = &ds{}
Expand All @@ -26,22 +27,57 @@ type ds struct {
}

func (d *ds) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = conversion.DataSourceSchemaFromResource(ResourceSchema(ctx), &conversion.DataSourceSchemaRequest{
RequiredFields: []string{"project_id", "name"},
OverridenFields: map[string]schema.Attribute{
"use_replication_spec_per_shard": schema.BoolAttribute{ // TODO: added as in current resource
Optional: true,
MarkdownDescription: "use_replication_spec_per_shard", // TODO: add documentation
},
},
})
resp.Schema = dataSourceSchema(ctx)
}

func (d *ds) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
var state TFModelDS
diags := &resp.Diagnostics
diags.Append(req.Config.Get(ctx, &state)...)
if diags.HasError() {
return
}
model := d.readCluster(ctx, diags, &state)
if model != nil {
diags.Append(resp.State.Set(ctx, model)...)
}
}

// TODO: see if resource model can be used instead, probably different only in timeouts
type ModelDS struct {
ProjectID types.String `tfsdk:"project_id"`
Name types.String `tfsdk:"name"`
func (d *ds) readCluster(ctx context.Context, diags *diag.Diagnostics, modelDS *TFModelDS) *TFModelDS {
clusterName := modelDS.Name.ValueString()
projectID := modelDS.ProjectID.ValueString()
useReplicationSpecPerShard := modelDS.UseReplicationSpecPerShard.ValueBool()
api := d.Client.AtlasV2.ClustersApi
clusterResp, _, err := api.GetCluster(ctx, projectID, clusterName).Execute()
if err != nil {
if admin.IsErrorCode(err, ErrorCodeClusterNotFound) {
return nil
}
diags.AddError("errorRead", fmt.Sprintf(errorRead, clusterName, err.Error()))
return nil
}
modelIn := &TFModel{
ProjectID: modelDS.ProjectID,
Name: modelDS.Name,
}
// TODO: pass !UseReplicationSpecPerShard to overrideUsingLegacySchema
modelOut, extraInfo := getBasicClusterModel(ctx, diags, d.Client, clusterResp, modelIn)
if diags.HasError() {
return nil
}
if extraInfo.AsymmetricShardUnsupported && !useReplicationSpecPerShard {
diags.AddError("errorRead", "Please add `use_replication_spec_per_shard = true` to your data source configuration to enable asymmetric shard support. Refer to documentation for more details.")
return nil
}
updateModelAdvancedConfig(ctx, diags, d.Client, modelOut, nil, nil)
if diags.HasError() {
return nil
}
modelOutDS, err := conversion.CopyModel[TFModelDS](modelOut)
if err != nil {
diags.AddError(errorRead, fmt.Sprintf("error setting model: %s", err.Error()))
return nil
}
modelOutDS.UseReplicationSpecPerShard = modelDS.UseReplicationSpecPerShard // attrs not in resource model
return modelOutDS
}
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ type ExtraAPIInfo struct {
RootDiskSize *float64
ContainerIDs map[string]string
UsingLegacySchema bool
AsymmetricShardUnsupported bool
}

func NewTFModel(ctx context.Context, input *admin.ClusterDescription20240805, timeout timeouts.Value, diags *diag.Diagnostics, apiInfo ExtraAPIInfo) *TFModel {
Expand Down
81 changes: 63 additions & 18 deletions internal/service/advancedclustertpf/plural_data_source.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,15 @@ package advancedclustertpf
import (
"context"
"fmt"
"net/http"

"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion"
"github.com/mongodb/terraform-provider-mongodbatlas/internal/common/dsschema"
"github.com/mongodb/terraform-provider-mongodbatlas/internal/config"
"go.mongodb.org/atlas-sdk/v20241113003/admin"
)

var _ datasource.DataSource = &pluralDS{}
Expand All @@ -27,26 +30,68 @@ type pluralDS struct {
}

func (d *pluralDS) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = conversion.PluralDataSourceSchemaFromResource(ResourceSchema(ctx), &conversion.PluralDataSourceSchemaRequest{
RequiredFields: []string{"project_id"},
OverridenRootFields: map[string]schema.Attribute{
"use_replication_spec_per_shard": schema.BoolAttribute{ // TODO: added as in current resource
Optional: true,
MarkdownDescription: "use_replication_spec_per_shard", // TODO: add documentation
},
"include_deleted_with_retained_backups": schema.BoolAttribute{ // TODO: not in current resource, decide if keep
Optional: true,
MarkdownDescription: "Flag that indicates whether to return Clusters with retain backups.",
},
},
})
resp.Schema = pluralDataSourceSchema(ctx)
}

func (d *pluralDS) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
var state TFModelPluralDS
diags := &resp.Diagnostics
diags.Append(req.Config.Get(ctx, &state)...)
if diags.HasError() {
return
}
model := d.readClusters(ctx, diags, &state)
if model != nil {
diags.Append(resp.State.Set(ctx, model)...)
}
}

type AdvancedClustersModel struct {
ProjectID types.String `tfsdk:"project_id"`
UseReplicationSpecPerShard types.Bool `tfsdk:"use_replication_spec_per_shard"` // TODO: added as in current resource
IncludeDeletedWithRetainedBackups types.Bool `tfsdk:"include_deleted_with_retained_backups"` // TODO: not in current resource, decide if keep
func (d *pluralDS) readClusters(ctx context.Context, diags *diag.Diagnostics, pluralModel *TFModelPluralDS) *TFModelPluralDS {
projectID := pluralModel.ProjectID.ValueString()
useReplicationSpecPerShard := pluralModel.UseReplicationSpecPerShard.ValueBool()
api := d.Client.AtlasV2.ClustersApi
params := admin.ListClustersApiParams{
GroupId: projectID,
}
list, err := dsschema.AllPages(ctx, func(ctx context.Context, pageNum int) (dsschema.PaginateResponse[admin.ClusterDescription20240805], *http.Response, error) {
request := api.ListClustersWithParams(ctx, &params)
request = request.PageNum(pageNum)
return request.Execute()
})
if err != nil {
diags.AddError("errorList", fmt.Sprintf(errorList, projectID, err.Error()))
return nil
}
outs := &TFModelPluralDS{
ProjectID: pluralModel.ProjectID,
UseReplicationSpecPerShard: pluralModel.UseReplicationSpecPerShard,
IncludeDeletedWithRetainedBackups: pluralModel.IncludeDeletedWithRetainedBackups,
}
for i := range list {
clusterResp := &list[i]
modelIn := &TFModel{
ProjectID: pluralModel.ProjectID,
Name: types.StringValue(clusterResp.GetName()),
}
// TODO: pass !UseReplicationSpecPerShard to overrideUsingLegacySchema
modelOut, extraInfo := getBasicClusterModel(ctx, diags, d.Client, clusterResp, modelIn)
if diags.HasError() {
return nil
}
if extraInfo.AsymmetricShardUnsupported && !useReplicationSpecPerShard {
continue
}
updateModelAdvancedConfig(ctx, diags, d.Client, modelOut, nil, nil)
if diags.HasError() {
return nil
}
modelOutDS, err := conversion.CopyModel[TFModelDS](modelOut)
if err != nil {
diags.AddError(errorList, fmt.Sprintf("error setting model: %s", err.Error()))
return nil
}
modelOutDS.UseReplicationSpecPerShard = pluralModel.UseReplicationSpecPerShard // attrs not in resource model
outs.Results = append(outs.Results, modelOutDS)
}
return outs
}
Loading

0 comments on commit d87b478

Please sign in to comment.