Skip to content

Commit

Permalink
feat: Adds global_cluster_self_managed_sharding to `mongodbatlas_ad…
Browse files Browse the repository at this point in the history
…vanced_cluster` resource (#2348)

* add attribute global_cluster_self_managed_sharding

* changelog

* tests

* example

* doc

* apply feedback [skip ci]

* add clarification [skip ci]

* apply feedback
  • Loading branch information
lantoli authored Jun 19, 2024
1 parent 36aa748 commit 0085b85
Show file tree
Hide file tree
Showing 10 changed files with 176 additions and 40 deletions.
11 changes: 11 additions & 0 deletions .changelog/2348.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
```release-note:enhancement
resource/mongodbatlas_advanced_cluster: Adds attribute `global_cluster_self_managed_sharding`
```

```release-note:enhancement
data-source/mongodbatlas_advanced_cluster: Adds attribute `global_cluster_self_managed_sharding`
```

```release-note:enhancement
data-source/mongodbatlas_advanced_clusters: Adds attribute `global_cluster_self_managed_sharding`
```
10 changes: 7 additions & 3 deletions examples/mongodbatlas_advanced_cluster/global-cluster/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,13 @@ provider "mongodbatlas" {
}

resource "mongodbatlas_advanced_cluster" "cluster" {
project_id = mongodbatlas_project.project.id
name = var.cluster_name
cluster_type = "GEOSHARDED"
project_id = mongodbatlas_project.project.id
name = var.cluster_name
cluster_type = "GEOSHARDED"

# uncomment next line to use self-managed sharding, see doc for more info
# global_cluster_self_managed_sharding = true

backup_enabled = true

replication_specs { # zone n1
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -225,6 +225,10 @@ func DataSource() *schema.Resource {
Type: schema.TypeString,
Computed: true,
},
"global_cluster_self_managed_sharding": {
Type: schema.TypeBool,
Computed: true,
},
},
}
}
Expand Down Expand Up @@ -320,6 +324,9 @@ func dataSourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.
if err := d.Set("version_release_system", cluster.GetVersionReleaseSystem()); err != nil {
return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "version_release_system", clusterName, err))
}
if err := d.Set("global_cluster_self_managed_sharding", cluster.GetGlobalClusterSelfManagedSharding()); err != nil {
return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "global_cluster_self_managed_sharding", clusterName, err))
}

processArgs, _, err := connV2.ClustersApi.GetClusterAdvancedConfiguration(ctx, projectID, clusterName).Execute()
if err != nil {
Expand Down
45 changes: 25 additions & 20 deletions internal/service/advancedcluster/data_source_advanced_clusters.go
Original file line number Diff line number Diff line change
Expand Up @@ -233,6 +233,10 @@ func PluralDataSource() *schema.Resource {
Type: schema.TypeString,
Computed: true,
},
"global_cluster_self_managed_sharding": {
Type: schema.TypeBool,
Computed: true,
},
},
},
},
Expand Down Expand Up @@ -273,26 +277,27 @@ func flattenAdvancedClusters(ctx context.Context, connV2 *admin.APIClient, clust
}

result := map[string]any{
"advanced_configuration": flattenProcessArgs(processArgs),
"backup_enabled": cluster.GetBackupEnabled(),
"bi_connector_config": flattenBiConnectorConfig(cluster.GetBiConnector()),
"cluster_type": cluster.GetClusterType(),
"create_date": conversion.TimePtrToStringPtr(cluster.CreateDate),
"connection_strings": flattenConnectionStrings(cluster.GetConnectionStrings()),
"disk_size_gb": cluster.GetDiskSizeGB(),
"encryption_at_rest_provider": cluster.GetEncryptionAtRestProvider(),
"labels": flattenLabels(cluster.GetLabels()),
"tags": conversion.FlattenTags(cluster.GetTags()),
"mongo_db_major_version": cluster.GetMongoDBMajorVersion(),
"mongo_db_version": cluster.GetMongoDBVersion(),
"name": cluster.GetName(),
"paused": cluster.GetPaused(),
"pit_enabled": cluster.GetPitEnabled(),
"replication_specs": replicationSpecs,
"root_cert_type": cluster.GetRootCertType(),
"state_name": cluster.GetStateName(),
"termination_protection_enabled": cluster.GetTerminationProtectionEnabled(),
"version_release_system": cluster.GetVersionReleaseSystem(),
"advanced_configuration": flattenProcessArgs(processArgs),
"backup_enabled": cluster.GetBackupEnabled(),
"bi_connector_config": flattenBiConnectorConfig(cluster.GetBiConnector()),
"cluster_type": cluster.GetClusterType(),
"create_date": conversion.TimePtrToStringPtr(cluster.CreateDate),
"connection_strings": flattenConnectionStrings(cluster.GetConnectionStrings()),
"disk_size_gb": cluster.GetDiskSizeGB(),
"encryption_at_rest_provider": cluster.GetEncryptionAtRestProvider(),
"labels": flattenLabels(cluster.GetLabels()),
"tags": conversion.FlattenTags(cluster.GetTags()),
"mongo_db_major_version": cluster.GetMongoDBMajorVersion(),
"mongo_db_version": cluster.GetMongoDBVersion(),
"name": cluster.GetName(),
"paused": cluster.GetPaused(),
"pit_enabled": cluster.GetPitEnabled(),
"replication_specs": replicationSpecs,
"root_cert_type": cluster.GetRootCertType(),
"state_name": cluster.GetStateName(),
"termination_protection_enabled": cluster.GetTerminationProtectionEnabled(),
"version_release_system": cluster.GetVersionReleaseSystem(),
"global_cluster_self_managed_sharding": cluster.GetGlobalClusterSelfManagedSharding(),
}
results = append(results, result)
}
Expand Down
16 changes: 16 additions & 0 deletions internal/service/advancedcluster/resource_advanced_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -317,6 +317,11 @@ func Resource() *schema.Resource {
Optional: true,
Description: "Submit this field alongside your topology reconfiguration to request a new regional outage resistant topology",
},
"global_cluster_self_managed_sharding": {
Type: schema.TypeBool,
Optional: true,
Computed: true,
},
},
Timeouts: &schema.ResourceTimeout{
Create: schema.DefaultTimeout(3 * time.Hour),
Expand Down Expand Up @@ -409,6 +414,9 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag.
if v, ok := d.GetOk("version_release_system"); ok {
params.VersionReleaseSystem = conversion.StringPtr(v.(string))
}
if v, ok := d.GetOk("global_cluster_self_managed_sharding"); ok {
params.GlobalClusterSelfManagedSharding = conversion.Pointer(v.(bool))
}

// Validate oplog_size_mb to show the error before the cluster is created.
if oplogSizeMB, ok := d.GetOkExists("advanced_configuration.0.oplog_size_mb"); ok {
Expand Down Expand Up @@ -573,6 +581,10 @@ func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Di
return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "accept_data_risks_and_force_replica_set_reconfig", clusterName, err))
}

if err := d.Set("global_cluster_self_managed_sharding", cluster.GetGlobalClusterSelfManagedSharding()); err != nil {
return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "global_cluster_self_managed_sharding", clusterName, err))
}

processArgs, _, err := connV2.ClustersApi.GetClusterAdvancedConfiguration(ctx, projectID, clusterName).Execute()
if err != nil {
return diag.FromErr(fmt.Errorf(errorConfigRead, clusterName, err))
Expand Down Expand Up @@ -685,6 +697,10 @@ func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag.
cluster.VersionReleaseSystem = conversion.StringPtr(d.Get("version_release_system").(string))
}

if d.HasChange("global_cluster_self_managed_sharding") {
cluster.GlobalClusterSelfManagedSharding = conversion.Pointer(d.Get("global_cluster_self_managed_sharding").(bool))
}

if d.HasChange("accept_data_risks_and_force_replica_set_reconfig") {
if strTime := d.Get("accept_data_risks_and_force_replica_set_reconfig").(string); strTime != "" {
t, ok := conversion.StringToTime(strTime)
Expand Down
116 changes: 104 additions & 12 deletions internal/service/advancedcluster/resource_advanced_cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,13 +40,16 @@ func TestAccClusterAdvancedCluster_basicTenant(t *testing.T) {
resource.TestCheckResourceAttr(resourceName, "name", clusterName),
resource.TestCheckResourceAttrSet(resourceName, "replication_specs.#"),
resource.TestCheckResourceAttrSet(resourceName, "replication_specs.0.region_configs.#"),
resource.TestCheckResourceAttrSet(resourceName, "termination_protection_enabled"),
resource.TestCheckResourceAttr(resourceName, "termination_protection_enabled", "false"),
resource.TestCheckResourceAttr(resourceName, "global_cluster_self_managed_sharding", "false"),
resource.TestCheckResourceAttr(dataSourceName, "name", clusterName),
resource.TestCheckResourceAttr(dataSourceName, "termination_protection_enabled", "false"),
resource.TestCheckResourceAttr(dataSourceName, "global_cluster_self_managed_sharding", "false"),
resource.TestCheckResourceAttrSet(dataSourcePluralName, "results.#"),
resource.TestCheckResourceAttrSet(dataSourcePluralName, "results.0.replication_specs.#"),
resource.TestCheckResourceAttrSet(dataSourcePluralName, "results.0.name"),
resource.TestCheckResourceAttrSet(dataSourcePluralName, "results.0.termination_protection_enabled"),
resource.TestCheckResourceAttrSet(dataSourcePluralName, "results.0.global_cluster_self_managed_sharding"),
),
},
{
Expand All @@ -58,12 +61,16 @@ func TestAccClusterAdvancedCluster_basicTenant(t *testing.T) {
resource.TestCheckResourceAttrSet(resourceName, "replication_specs.#"),
resource.TestCheckResourceAttrSet(resourceName, "replication_specs.0.region_configs.#"),
resource.TestCheckResourceAttr(resourceName, "labels.#", "0"),
resource.TestCheckResourceAttr(resourceName, "termination_protection_enabled", "false"),
resource.TestCheckResourceAttr(resourceName, "global_cluster_self_managed_sharding", "false"),
resource.TestCheckResourceAttr(dataSourceName, "name", clusterNameUpdated),
resource.TestCheckResourceAttr(dataSourceName, "termination_protection_enabled", "false"),
resource.TestCheckResourceAttr(dataSourceName, "global_cluster_self_managed_sharding", "false"),
resource.TestCheckResourceAttrSet(dataSourcePluralName, "results.#"),
resource.TestCheckResourceAttrSet(dataSourcePluralName, "results.0.replication_specs.#"),
resource.TestCheckResourceAttrSet(dataSourcePluralName, "results.0.name"),
resource.TestCheckResourceAttrSet(dataSourcePluralName, "results.0.termination_protection_enabled"),
resource.TestCheckResourceAttrSet(dataSourcePluralName, "results.0.global_cluster_self_managed_sharding"),
),
},
{
Expand Down Expand Up @@ -547,11 +554,9 @@ func TestAccClusterAdvancedClusterConfig_replicationSpecsAnalyticsAutoScaling(t

func TestAccClusterAdvancedClusterConfig_replicationSpecsAndShardUpdating(t *testing.T) {
var (
orgID = os.Getenv("MONGODB_ATLAS_ORG_ID")
projectName = acc.RandomProjectName()
clusterName = acc.RandomClusterName() // No ProjectIDExecution to avoid cross-region limits because multi-region
numShards = "1"
numShardsUpdated = "2"
orgID = os.Getenv("MONGODB_ATLAS_ORG_ID")
projectName = acc.RandomProjectName() // No ProjectIDExecution to avoid cross-region limits because multi-region
clusterName = acc.RandomClusterName()
)

resource.ParallelTest(t, resource.TestCase{
Expand All @@ -560,21 +565,27 @@ func TestAccClusterAdvancedClusterConfig_replicationSpecsAndShardUpdating(t *tes
CheckDestroy: acc.CheckDestroyCluster,
Steps: []resource.TestStep{
{
Config: configMultiZoneWithShards(orgID, projectName, clusterName, numShards, numShards),
Config: configMultiZoneWithShards(orgID, projectName, clusterName, 1, 1, false),
Check: resource.ComposeTestCheckFunc(
checkExists(resourceName),
resource.TestCheckResourceAttr(resourceName, "name", clusterName),
resource.TestCheckResourceAttr(resourceName, "replication_specs.0.num_shards", "1"),
resource.TestCheckResourceAttr(resourceName, "replication_specs.1.num_shards", "1"),
resource.TestCheckResourceAttr(dataSourceName, "name", clusterName),
resource.TestCheckResourceAttr(dataSourceName, "replication_specs.0.num_shards", "1"),
resource.TestCheckResourceAttr(dataSourceName, "replication_specs.1.num_shards", "1"),
),
},
{
Config: configMultiZoneWithShards(orgID, projectName, clusterName, numShardsUpdated, numShards),
Config: configMultiZoneWithShards(orgID, projectName, clusterName, 2, 1, false),
Check: resource.ComposeTestCheckFunc(
checkExists(resourceName),
resource.TestCheckResourceAttr(resourceName, "name", clusterName),
resource.TestCheckResourceAttr(resourceName, "replication_specs.0.num_shards", "2"),
resource.TestCheckResourceAttr(resourceName, "replication_specs.1.num_shards", "1"),
resource.TestCheckResourceAttr(dataSourceName, "name", clusterName),
resource.TestCheckResourceAttr(dataSourceName, "replication_specs.0.num_shards", "2"),
resource.TestCheckResourceAttr(dataSourceName, "replication_specs.1.num_shards", "1"),
),
},
},
Expand Down Expand Up @@ -656,6 +667,53 @@ func TestAccClusterAdvancedCluster_withTags(t *testing.T) {
})
}

func TestAccClusterAdvancedClusterConfig_selfManagedSharding(t *testing.T) {
var (
orgID = os.Getenv("MONGODB_ATLAS_ORG_ID")
projectName = acc.RandomProjectName() // No ProjectIDExecution to avoid cross-region limits because multi-region
clusterName = acc.RandomClusterName()
)

resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { acc.PreCheckBasic(t) },
ProtoV6ProviderFactories: acc.TestAccProviderV6Factories,
CheckDestroy: acc.CheckDestroyCluster,
Steps: []resource.TestStep{
{
Config: configMultiZoneWithShards(orgID, projectName, clusterName, 1, 1, true),
Check: resource.ComposeTestCheckFunc(
checkExists(resourceName),
resource.TestCheckResourceAttr(resourceName, "global_cluster_self_managed_sharding", "true"),
resource.TestCheckResourceAttr(dataSourceName, "global_cluster_self_managed_sharding", "true"),
),
},
{
Config: configMultiZoneWithShards(orgID, projectName, clusterName, 1, 1, false),
ExpectError: regexp.MustCompile("CANNOT_MODIFY_GLOBAL_CLUSTER_MANAGEMENT_SETTING"),
},
},
})
}

func TestAccClusterAdvancedClusterConfig_selfManagedShardingIncorrectType(t *testing.T) {
var (
projectID = acc.ProjectIDExecution(t)
clusterName = acc.RandomClusterName()
)

resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { acc.PreCheckBasic(t) },
ProtoV6ProviderFactories: acc.TestAccProviderV6Factories,
CheckDestroy: acc.CheckDestroyCluster,
Steps: []resource.TestStep{
{
Config: configIncorrectTypeGobalClusterSelfManagedSharding(projectID, clusterName),
ExpectError: regexp.MustCompile("CANNOT_SET_SELF_MANAGED_SHARDING_FOR_NON_GLOBAL_CLUSTER"),
},
},
})
}

func checkExists(resourceName string) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[resourceName]
Expand Down Expand Up @@ -788,6 +846,34 @@ func configSingleProvider(projectID, name string) string {
`, projectID, name)
}

func configIncorrectTypeGobalClusterSelfManagedSharding(projectID, name string) string {
return fmt.Sprintf(`
resource "mongodbatlas_advanced_cluster" "test" {
project_id = %[1]q
name = %[2]q
cluster_type = "REPLICASET"
global_cluster_self_managed_sharding = true # invalid, can only by used with GEOSHARDED clusters
replication_specs {
region_configs {
electable_specs {
instance_size = "M10"
node_count = 3
}
analytics_specs {
instance_size = "M10"
node_count = 1
}
provider_name = "AWS"
priority = 7
region_name = "US_WEST_2"
}
}
}
`, projectID, name)
}

func configMultiCloud(orgID, projectName, name string) string {
return fmt.Sprintf(`
resource "mongodbatlas_project" "cluster_project" {
Expand Down Expand Up @@ -1063,7 +1149,7 @@ func configReplicationSpecsAnalyticsAutoScaling(projectID, clusterName string, p
`, projectID, clusterName, p.Compute.GetEnabled(), p.DiskGB.GetEnabled(), p.Compute.GetMaxInstanceSize())
}

func configMultiZoneWithShards(orgID, projectName, name, numShardsFirstZone, numShardsSecondZone string) string {
func configMultiZoneWithShards(orgID, projectName, name string, numShardsFirstZone, numShardsSecondZone int, selfManagedSharding bool) string {
return fmt.Sprintf(`
resource "mongodbatlas_project" "cluster_project" {
org_id = %[1]q
Expand All @@ -1076,10 +1162,11 @@ func configMultiZoneWithShards(orgID, projectName, name, numShardsFirstZone, num
backup_enabled = false
mongo_db_major_version = "7.0"
cluster_type = "GEOSHARDED"
global_cluster_self_managed_sharding = %[6]t
replication_specs {
zone_name = "zone n1"
num_shards = %[4]q
num_shards = %[4]d
region_configs {
electable_specs {
Expand All @@ -1098,7 +1185,7 @@ func configMultiZoneWithShards(orgID, projectName, name, numShardsFirstZone, num
replication_specs {
zone_name = "zone n2"
num_shards = %[5]q
num_shards = %[5]d
region_configs {
electable_specs {
Expand All @@ -1115,5 +1202,10 @@ func configMultiZoneWithShards(orgID, projectName, name, numShardsFirstZone, num
}
}
}
`, orgID, projectName, name, numShardsFirstZone, numShardsSecondZone)
data "mongodbatlas_advanced_cluster" "test" {
project_id = mongodbatlas_advanced_cluster.test.project_id
name = mongodbatlas_advanced_cluster.test.name
}
`, orgID, projectName, name, numShardsFirstZone, numShardsSecondZone, selfManagedSharding)
}
4 changes: 2 additions & 2 deletions internal/service/cluster/resource_cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1127,8 +1127,8 @@ func TestAccCluster_basicGCPRegionNameWesternUS(t *testing.T) {
func TestAccCluster_basicGCPRegionNameUSWest2(t *testing.T) {
var (
orgID = os.Getenv("MONGODB_ATLAS_ORG_ID")
projectName = acc.RandomProjectName()
clusterName = acc.RandomClusterName() // No ProjectIDExecution to avoid cross-region limits because no AWS
projectName = acc.RandomProjectName() // No ProjectIDExecution to avoid cross-region limits because no AWS
clusterName = acc.RandomClusterName()
regionName = "US_WEST_2"
)

Expand Down
2 changes: 1 addition & 1 deletion website/docs/d/advanced_cluster.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ In addition to all arguments above, the following attributes are exported:
* `termination_protection_enabled` - Flag that indicates whether termination protection is enabled on the cluster. If set to true, MongoDB Cloud won't delete the cluster. If set to false, MongoDB Cloud will delete the cluster.
* `version_release_system` - Release cadence that Atlas uses for this cluster.
* `advanced_configuration` - Get the advanced configuration options. See [Advanced Configuration](#advanced-configuration) below for more details.

* `global_cluster_self_managed_sharding` - Flag that indicates if cluster uses Atlas-Managed Sharding (false) or Self-Managed Sharding (true).

### bi_connector_config

Expand Down
2 changes: 1 addition & 1 deletion website/docs/d/advanced_clusters.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ In addition to all arguments above, the following attributes are exported:
* `termination_protection_enabled` - Flag that indicates whether termination protection is enabled on the cluster. If set to true, MongoDB Cloud won't delete the cluster. If set to false, MongoDB Cloud will delete the cluster.
* `version_release_system` - Release cadence that Atlas uses for this cluster.
* `advanced_configuration` - Get the advanced configuration options. See [Advanced Configuration](#advanced-configuration) below for more details.

* `global_cluster_self_managed_sharding` - Flag that indicates if cluster uses Atlas-Managed Sharding (false) or Self-Managed Sharding (true).

### bi_connector_config

Expand Down
Loading

0 comments on commit 0085b85

Please sign in to comment.