diff --git a/examples/atlas-advanced-cluster/tenant-upgrade/README.md b/examples/atlas-advanced-cluster/tenant-upgrade/README.md new file mode 100644 index 0000000000..5051df631b --- /dev/null +++ b/examples/atlas-advanced-cluster/tenant-upgrade/README.md @@ -0,0 +1,32 @@ +# MongoDB Atlas Provider -- Advanced Cluster Tenant Upgrade +This example creates a project and cluster. It is intended to show how to upgrade from shared, aka tenant, to dedicated tier. + +Variables Required: +- `atlas_org_id`: ID of the Atlas organization +- `public_key`: Atlas public key +- `private_key`: Atlas private key +- `provider_name`: Name of provider to use for cluster (TENANT, AWS, GCP) +- `backing_provider_name`: If provider_name is tenant, the backing provider (AWS, GCP) +- `provider_instance_size_name`: Size of the cluster (Shared: M0, M2, M5, Dedicated: M10+.) + +For this example, first we'll start out on the shared tier, then upgrade to a dedicated tier. + +Utilize the following to execute a working example, replacing the org id, public and private key with your values: + +Apply with the following `terraform.tfvars` to first create a shared tier cluster: +``` +atlas_org_id = "627a9687f7f7f7f774de306f14" +public_key = +private_key = +provider_name = "TENANT" +backing_provider_name = "AWS" +provider_instance_size_name = "M2" +``` + +Apply with the following `terraform.tfvars` to upgrade the shared tier cluster you just created to dedicated tier: +``` +atlas_org_id = "627a9687f7f7f7f774de306f14" +public_key = +private_key = +provider_name = "GCP" +provider_instance_size_name = "M10" \ No newline at end of file diff --git a/examples/atlas-advanced-cluster/tenant-upgrade/main.tf b/examples/atlas-advanced-cluster/tenant-upgrade/main.tf new file mode 100644 index 0000000000..8ee986e7e2 --- /dev/null +++ b/examples/atlas-advanced-cluster/tenant-upgrade/main.tf @@ -0,0 +1,29 @@ +provider "mongodbatlas" { + public_key = var.public_key + private_key = var.private_key +} + +resource "mongodbatlas_advanced_cluster" "cluster" { + project_id = mongodbatlas_project.project.id + name = "ClusterToUpgrade" + cluster_type = "REPLICASET" + + replication_specs { + num_shards = 1 + + region_configs { + electable_specs { + instance_size = var.provider_instance_size_name + } + provider_name = var.provider_name + backing_provider_name = var.backing_provider_name + region_name = "US_EAST_1" + priority = 7 + } + } +} + +resource "mongodbatlas_project" "project" { + name = "TenantUpgradeTest" + org_id = var.atlas_org_id +} \ No newline at end of file diff --git a/examples/atlas-advanced-cluster/tenant-upgrade/variables.tf b/examples/atlas-advanced-cluster/tenant-upgrade/variables.tf new file mode 100644 index 0000000000..2986570c19 --- /dev/null +++ b/examples/atlas-advanced-cluster/tenant-upgrade/variables.tf @@ -0,0 +1,22 @@ +variable "atlas_org_id" { + description = "Atlas organization id" + default = "" +} +variable "public_key" { + description = "Public API key to authenticate to Atlas" +} +variable "private_key" { + description = "Private API key to authenticate to Atlas" +} +variable "provider_name" { + description = "Atlas cluster provider name" + default = "AWS" +} +variable "backing_provider_name" { + description = "Atlas cluster backing provider name" + default = null +} +variable "provider_instance_size_name" { + description = "Atlas cluster provider instance name" + default = "M10" +} \ No newline at end of file diff --git a/examples/atlas-advanced-cluster/tenant-upgrade/versions.tf b/examples/atlas-advanced-cluster/tenant-upgrade/versions.tf new file mode 100644 index 0000000000..92fca3b63d --- /dev/null +++ b/examples/atlas-advanced-cluster/tenant-upgrade/versions.tf @@ -0,0 +1,8 @@ +terraform { + required_providers { + mongodbatlas = { + source = "mongodb/mongodbatlas" + } + } + required_version = ">= 0.13" +} \ No newline at end of file diff --git a/examples/atlas-cluster/tenant-upgrade/README.md b/examples/atlas-cluster/tenant-upgrade/README.md new file mode 100644 index 0000000000..34c91e11a5 --- /dev/null +++ b/examples/atlas-cluster/tenant-upgrade/README.md @@ -0,0 +1,33 @@ +# MongoDB Atlas Provider -- Cluster Tenant Upgrade +This example creates a project and cluster. It is intended to show how to upgrade from shared, aka tenant, to dedicated tier. + +Variables Required: +- `atlas_org_id`: ID of the Atlas organization +- `public_key`: Atlas public key +- `private_key`: Atlas private key +- `provider_name`: Name of provider to use for cluster (TENANT, AWS, GCP) +- `backing_provider_name`: If provider_name is tenant, the backing provider (AWS, GCP) +- `provider_instance_size_name`: Size of the cluster (Shared: M0, M2, M5, Dedicated: M10+.) + +For this example, first we'll start out on the shared tier, then upgrade to a dedicated tier. + + +Utilize the following to execute a working example, replacing the org id, public and private key with your values: + +Apply with the following `terraform.tfvars` to first create a shared tier cluster: +``` +atlas_org_id = "627a9687f7f7f7f774de306f14" +public_key = +private_key = +provider_name = "TENANT" +backing_provider_name = "AWS" +provider_instance_size_name = "M2" +``` + +Apply with the following `terraform.tfvars` to upgrade the shared tier cluster you just created to dedicated tier: +``` +atlas_org_id = "627a9687f7f7f7f774de306f14" +public_key = +private_key = +provider_name = "GCP" +provider_instance_size_name = "M10" \ No newline at end of file diff --git a/examples/atlas-cluster/tenant-upgrade/main.tf b/examples/atlas-cluster/tenant-upgrade/main.tf new file mode 100644 index 0000000000..6896f35e6d --- /dev/null +++ b/examples/atlas-cluster/tenant-upgrade/main.tf @@ -0,0 +1,19 @@ +provider "mongodbatlas" { + public_key = var.public_key + private_key = var.private_key +} + +resource "mongodbatlas_cluster" "cluster" { + project_id = mongodbatlas_project.project.id + name = "ClusterToUpgrade" + cluster_type = "REPLICASET" + provider_name = var.provider_name + backing_provider_name = var.backing_provider_name + provider_region_name = "US_EAST_1" + provider_instance_size_name = var.provider_instance_size_name +} + +resource "mongodbatlas_project" "project" { + name = "TenantUpgradeTest" + org_id = var.atlas_org_id +} diff --git a/examples/atlas-cluster/tenant-upgrade/variables.tf b/examples/atlas-cluster/tenant-upgrade/variables.tf new file mode 100644 index 0000000000..2986570c19 --- /dev/null +++ b/examples/atlas-cluster/tenant-upgrade/variables.tf @@ -0,0 +1,22 @@ +variable "atlas_org_id" { + description = "Atlas organization id" + default = "" +} +variable "public_key" { + description = "Public API key to authenticate to Atlas" +} +variable "private_key" { + description = "Private API key to authenticate to Atlas" +} +variable "provider_name" { + description = "Atlas cluster provider name" + default = "AWS" +} +variable "backing_provider_name" { + description = "Atlas cluster backing provider name" + default = null +} +variable "provider_instance_size_name" { + description = "Atlas cluster provider instance name" + default = "M10" +} \ No newline at end of file diff --git a/examples/atlas-cluster/tenant-upgrade/versions.tf b/examples/atlas-cluster/tenant-upgrade/versions.tf new file mode 100644 index 0000000000..92fca3b63d --- /dev/null +++ b/examples/atlas-cluster/tenant-upgrade/versions.tf @@ -0,0 +1,8 @@ +terraform { + required_providers { + mongodbatlas = { + source = "mongodb/mongodbatlas" + } + } + required_version = ">= 0.13" +} \ No newline at end of file diff --git a/mongodbatlas/resource_mongodbatlas_advanced_cluster.go b/mongodbatlas/resource_mongodbatlas_advanced_cluster.go index b1d90e1d19..0abab8f047 100644 --- a/mongodbatlas/resource_mongodbatlas_advanced_cluster.go +++ b/mongodbatlas/resource_mongodbatlas_advanced_cluster.go @@ -21,6 +21,8 @@ import ( matlas "go.mongodb.org/atlas/mongodbatlas" ) +type acCtxKey string + const ( errorClusterAdvancedCreate = "error creating MongoDB ClusterAdvanced: %s" errorClusterAdvancedRead = "error reading MongoDB ClusterAdvanced (%s): %s" @@ -31,11 +33,13 @@ const ( errorAdvancedClusterAdvancedConfRead = "error reading Advanced Configuration Option form MongoDB Cluster (%s): %s" ) +var upgradeRequestCtxKey acCtxKey = "upgradeRequest" + func resourceMongoDBAtlasAdvancedCluster() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceMongoDBAtlasAdvancedClusterCreate, ReadWithoutTimeout: resourceMongoDBAtlasAdvancedClusterRead, - UpdateWithoutTimeout: resourceMongoDBAtlasAdvancedClusterUpdate, + UpdateWithoutTimeout: resourceMongoDBAtlasAdvancedClusterUpdateOrUpgrade, DeleteWithoutTimeout: resourceMongoDBAtlasAdvancedClusterDelete, Importer: &schema.ResourceImporter{ StateContext: resourceMongoDBAtlasAdvancedClusterImportState, @@ -511,6 +515,42 @@ func resourceMongoDBAtlasAdvancedClusterRead(ctx context.Context, d *schema.Reso return nil } +func resourceMongoDBAtlasAdvancedClusterUpdateOrUpgrade(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + if upgradeRequest := getUpgradeRequest(d); upgradeRequest != nil { + upgradeCtx := context.WithValue(ctx, upgradeRequestCtxKey, upgradeRequest) + return resourceMongoDBAtlasAdvancedClusterUpgrade(upgradeCtx, d, meta) + } + + return resourceMongoDBAtlasAdvancedClusterUpdate(ctx, d, meta) +} + +func resourceMongoDBAtlasAdvancedClusterUpgrade(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*MongoDBClient).Atlas + ids := decodeStateID(d.Id()) + projectID := ids["project_id"] + clusterName := ids["cluster_name"] + + upgradeRequest := ctx.Value(upgradeRequestCtxKey).(*matlas.Cluster) + + if upgradeRequest == nil { + return diag.FromErr(fmt.Errorf("upgrade called without %s in ctx", string(upgradeRequestCtxKey))) + } + + upgradeResponse, _, err := upgradeCluster(ctx, conn, upgradeRequest, projectID, clusterName) + + if err != nil { + return diag.FromErr(fmt.Errorf(errorClusterAdvancedUpdate, clusterName, err)) + } + + d.SetId(encodeStateID(map[string]string{ + "cluster_id": upgradeResponse.ID, + "project_id": projectID, + "cluster_name": clusterName, + })) + + return resourceMongoDBAtlasAdvancedClusterRead(ctx, d, meta) +} + func resourceMongoDBAtlasAdvancedClusterUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { // Get client connection. conn := meta.(*MongoDBClient).Atlas @@ -1079,6 +1119,38 @@ func replicationSpecsHashSet(v interface{}) int { return schema.HashString(buf.String()) } +func getUpgradeRequest(d *schema.ResourceData) *matlas.Cluster { + if !d.HasChange("replication_specs") { + return nil + } + + cs, us := d.GetChange("replication_specs") + currentSpecs := expandAdvancedReplicationSpecs(cs.(*schema.Set).List()) + updatedSpecs := expandAdvancedReplicationSpecs(us.(*schema.Set).List()) + + if len(currentSpecs) != 1 || len(updatedSpecs) != 1 || len(currentSpecs[0].RegionConfigs) != 1 || len(updatedSpecs[0].RegionConfigs) != 1 { + return nil + } + + currentRegion := currentSpecs[0].RegionConfigs[0] + updatedRegion := updatedSpecs[0].RegionConfigs[0] + currentSize := currentRegion.ElectableSpecs.InstanceSize + + if currentRegion.ElectableSpecs.InstanceSize == updatedRegion.ElectableSpecs.InstanceSize || !(currentSize == "M0" || + currentSize == "M2" || + currentSize == "M5") { + return nil + } + + return &matlas.Cluster{ + ProviderSettings: &matlas.ProviderSettings{ + ProviderName: updatedRegion.ProviderName, + InstanceSizeName: updatedRegion.ElectableSpecs.InstanceSize, + RegionName: updatedRegion.RegionName, + }, + } +} + func updateAdvancedCluster(ctx context.Context, conn *matlas.Client, request *matlas.AdvancedCluster, projectID, name string) (*matlas.AdvancedCluster, *matlas.Response, error) { cluster, resp, err := conn.AdvancedClusters.Update(ctx, projectID, name, request) if err != nil { diff --git a/mongodbatlas/resource_mongodbatlas_cluster.go b/mongodbatlas/resource_mongodbatlas_cluster.go index 980f25c1bc..47d208c5c2 100644 --- a/mongodbatlas/resource_mongodbatlas_cluster.go +++ b/mongodbatlas/resource_mongodbatlas_cluster.go @@ -161,7 +161,6 @@ func resourceMongoDBAtlasCluster() *schema.Resource { }, "provider_name": { Type: schema.TypeString, - ForceNew: true, Required: true, }, "pit_enabled": { @@ -353,6 +352,7 @@ func resourceMongoDBAtlasCluster() *schema.Resource { ValidateFunc: validation.StringInSlice([]string{"LTS", "CONTINUOUS"}, false), }, }, + CustomizeDiff: resourceClusterCustomizeDiff, } } @@ -900,27 +900,42 @@ func resourceMongoDBAtlasClusterUpdate(ctx context.Context, d *schema.ResourceDa } } - // Has changes - if !reflect.DeepEqual(cluster, matlas.Cluster{}) { + var didUnpauseCluster = false + + if isUpgradeRequired(d) { + updatedCluster, _, err := upgradeCluster(ctx, conn, cluster, projectID, clusterName) + + if err != nil { + return diag.FromErr(fmt.Errorf(errorClusterUpdate, clusterName, err)) + } + + d.SetId(encodeStateID(map[string]string{ + "cluster_id": updatedCluster.ID, + "project_id": projectID, + "cluster_name": updatedCluster.Name, + "provider_name": updatedCluster.ProviderSettings.ProviderName, + })) + } else if !reflect.DeepEqual(cluster, matlas.Cluster{}) { err := resource.RetryContext(ctx, 3*time.Hour, func() *resource.RetryError { _, _, err := updateCluster(ctx, conn, cluster, projectID, clusterName) - if err != nil { - var target *matlas.ErrorResponse - if errors.As(err, &target) && target.ErrorCode == "CANNOT_UPDATE_PAUSED_CLUSTER" { - clusterRequest := &matlas.Cluster{ - Paused: pointy.Bool(false), - } - _, _, err := updateCluster(ctx, conn, clusterRequest, projectID, clusterName) - if err != nil { - return resource.NonRetryableError(fmt.Errorf(errorClusterUpdate, clusterName, err)) - } - } - if errors.As(err, &target) && target.HTTPCode == 400 { - return resource.NonRetryableError(fmt.Errorf(errorClusterUpdate, clusterName, err)) + + if didErrOnPausedCluster(err) { + clusterRequest := &matlas.Cluster{ + Paused: pointy.Bool(false), } + + _, _, err = updateCluster(ctx, conn, clusterRequest, projectID, clusterName) + + didUnpauseCluster = true } + + if err != nil { + return resource.NonRetryableError(fmt.Errorf(errorClusterUpdate, clusterName, err)) + } + return nil }) + if err != nil { return diag.FromErr(fmt.Errorf(errorClusterUpdate, clusterName, err)) } @@ -942,7 +957,7 @@ func resourceMongoDBAtlasClusterUpdate(ctx context.Context, d *schema.ResourceDa } } - if d.Get("paused").(bool) { + if didUnpauseCluster { clusterRequest := &matlas.Cluster{ Paused: pointy.Bool(true), } @@ -956,6 +971,16 @@ func resourceMongoDBAtlasClusterUpdate(ctx context.Context, d *schema.ResourceDa return resourceMongoDBAtlasClusterRead(ctx, d, meta) } +func didErrOnPausedCluster(err error) bool { + if err == nil { + return false + } + + var target *matlas.ErrorResponse + + return errors.As(err, &target) && target.ErrorCode == "CANNOT_UPDATE_PAUSED_CLUSTER" +} + func resourceMongoDBAtlasClusterDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { // Get client connection. conn := meta.(*MongoDBClient).Atlas @@ -1116,6 +1141,7 @@ func expandProviderSetting(d *schema.ResourceData) (*matlas.ProviderSettings, er instanceSize = getInstanceSizeToInt(d.Get("provider_instance_size_name").(string)) compute *matlas.Compute autoScalingEnabled = d.Get("auto_scaling_compute_enabled").(bool) + providerName = cast.ToString(d.Get("provider_name")) ) if minInstanceSize != 0 && autoScalingEnabled { @@ -1140,12 +1166,15 @@ func expandProviderSetting(d *schema.ResourceData) (*matlas.ProviderSettings, er } providerSettings := &matlas.ProviderSettings{ - BackingProviderName: cast.ToString(d.Get("backing_provider_name")), - InstanceSizeName: cast.ToString(d.Get("provider_instance_size_name")), - ProviderName: cast.ToString(d.Get("provider_name")), - RegionName: region, - VolumeType: cast.ToString(d.Get("provider_volume_type")), - DiskTypeName: cast.ToString(d.Get("provider_disk_type_name")), + InstanceSizeName: cast.ToString(d.Get("provider_instance_size_name")), + ProviderName: providerName, + RegionName: region, + VolumeType: cast.ToString(d.Get("provider_volume_type")), + DiskTypeName: cast.ToString(d.Get("provider_disk_type_name")), + } + + if providerName == "TENANT" { + providerSettings.BackingProviderName = cast.ToString(d.Get("backing_provider_name")) } if autoScalingEnabled { @@ -1172,8 +1201,10 @@ func expandProviderSetting(d *schema.ResourceData) (*matlas.ProviderSettings, er } func flattenProviderSettings(d *schema.ResourceData, settings *matlas.ProviderSettings, clusterName string) { - if err := d.Set("backing_provider_name", settings.BackingProviderName); err != nil { - log.Printf(errorClusterSetting, "backing_provider_name", clusterName, err) + if settings.ProviderName == "TENANT" { + if err := d.Set("backing_provider_name", settings.BackingProviderName); err != nil { + log.Printf(errorClusterSetting, "backing_provider_name", clusterName, err) + } } if settings.DiskIOPS != nil && *settings.DiskIOPS != 0 { @@ -1209,6 +1240,16 @@ func flattenProviderSettings(d *schema.ResourceData, settings *matlas.ProviderSe } } +func isUpgradeRequired(d *schema.ResourceData) bool { + currentSize, updatedSize := d.GetChange("provider_instance_size_name") + + if currentSize == updatedSize { + return false + } + + return currentSize == "M0" || currentSize == "M2" || currentSize == "M5" +} + func expandReplicationSpecs(d *schema.ResourceData) ([]matlas.ReplicationSpec, error) { rSpecs := make([]matlas.ReplicationSpec, 0) @@ -1417,6 +1458,22 @@ func resourceClusterRefreshFunc(ctx context.Context, name, projectID string, cli } } +func resourceClusterCustomizeDiff(ctx context.Context, d *schema.ResourceDiff, meta interface{}) error { + var err error + currentProvider, updatedProvider := d.GetChange("provider_name") + + willProviderChange := currentProvider != updatedProvider + willLeaveTenant := willProviderChange && currentProvider == "TENANT" + + if willLeaveTenant { + err = d.SetNewComputed("backing_provider_name") + } else if willProviderChange { + err = d.ForceNew("provider_name") + } + + return err +} + func formatMongoDBMajorVersion(val interface{}) string { if strings.Contains(val.(string), ".") { return val.(string) @@ -1668,3 +1725,29 @@ func updateCluster(ctx context.Context, conn *matlas.Client, request *matlas.Clu return cluster, resp, nil } + +func upgradeCluster(ctx context.Context, conn *matlas.Client, request *matlas.Cluster, projectID, name string) (*matlas.Cluster, *matlas.Response, error) { + request.Name = name + + cluster, resp, err := conn.Clusters.Upgrade(ctx, projectID, request) + if err != nil { + return nil, nil, err + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"CREATING", "UPDATING", "REPAIRING"}, + Target: []string{"IDLE"}, + Refresh: resourceClusterRefreshFunc(ctx, name, projectID, conn), + Timeout: 3 * time.Hour, + MinTimeout: 30 * time.Second, + Delay: 1 * time.Minute, + } + + // Wait, catching any errors + _, err = stateConf.WaitForStateContext(ctx) + if err != nil { + return nil, nil, err + } + + return cluster, resp, nil +} diff --git a/website/docs/r/advanced_cluster.html.markdown b/website/docs/r/advanced_cluster.html.markdown index 5c39f82532..d58500d2f7 100644 --- a/website/docs/r/advanced_cluster.html.markdown +++ b/website/docs/r/advanced_cluster.html.markdown @@ -15,6 +15,8 @@ More information on considerations for using advanced clusters please see [Consi ~> **IMPORTANT:**
• The primary difference between [`mongodbatlas_cluster`](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/resources/cluster) and [`mongodbatlas_advanced_cluster`](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/resources/advanced_cluster) is that `mongodbatlas_advanced_cluster` supports multi-cloud clusters. We recommend new users start with the `mongodbatlas_advanced_cluster` resource. +
• Upgrading the shared tier is supported. Any change from a shared tier cluster, aka tenant, to a different instance size will be considered a tenant upgrade. When upgrading from the shared tier, change the `provider_name` from "TENANT" to your preferred provider (AWS, GCP or Azure) and remove the variable `backing_provider_name`. See the [Example Tenant Cluster Upgrade](#Example-Tenant-Cluster-Upgrade) below. Note you can upgrade a shared tier cluster only to a single provider M10 or greater. +
• WARNING WHEN UPGRADING TENANT/SHARED CLUSTERS!!! When upgrading from the shared tier *only* the upgrade changes will be applied. This is done in-order to avoid a corrupt state file in the event that the upgrade succeeds, but subsequent updates fail within the same `terraform apply`. In order to apply any other cluster changes, run a secondary `terraform apply` after the upgrade succeeds. -> **NOTE:** Groups and projects are synonymous terms. You may find group_id in the official documentation. -> **NOTE:** A network container is created for a advanced cluster to reside in if one does not yet exist in the project. To use this automatically created container with another resource, such as peering, the `container_id` is exported after creation. @@ -69,6 +71,27 @@ resource "mongodbatlas_advanced_cluster" "test" { } ``` +### Example Tenant Cluster Upgrade + +```terraform +resource "mongodbatlas_advanced_cluster" "test" { + project_id = "PROJECT ID" + name = "NAME OF CLUSTER" + cluster_type = "REPLICASET" + + replication_specs { + region_configs { + electable_specs { + instance_size = "M10" + } + provider_name = "AWS" + region_name = "US_EAST_1" + priority = 1 + } + } +} +``` + ### Example Multicloud. ```terraform diff --git a/website/docs/r/cluster.html.markdown b/website/docs/r/cluster.html.markdown index 442f571f75..0b891f5faa 100644 --- a/website/docs/r/cluster.html.markdown +++ b/website/docs/r/cluster.html.markdown @@ -17,7 +17,17 @@ description: |- ~> **IMPORTANT:**
• New Users: If you are not already using `mongodbatlas_cluster` for your deployment we recommend starting with the [`mongodbatlas_advanced_cluster`](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/resources/advanced_cluster). `mongodbatlas_advanced_cluster` has all the same functionality as `mongodbatlas_cluster` but also supports multi-cloud clusters.
• Free tier cluster creation (M0) is supported. -
• Shared tier clusters (M0, M2, M5) cannot be upgraded to higher tiers via API or by this Provider. WARNING! If you attempt to upgrade from an existing shared tier cluster that you manage with this Provider to a dedicated cluster (M10+) Terraform will see it as a request to destroy the shared tier cluster and as a request to create a dedicated tier cluster, i.e. Terraform will not see it as a request to upgrade. If you accept the plan in this case the shared tier cluster would be destroyed and you would lose the data on that cluster. Do not attempt to upgrade from the shared to dedicated tier via this Provider, it is not supported! +
• Shared tier clusters (M0, M2, M5) can be upgraded to dedicated tiers (M10+) via this provider. WARNING WHEN UPGRADING TENANT/SHARED CLUSTERS!!! Any change from shared tier to a different instance size will be considered a tenant upgrade. When upgrading from shared tier to dedicated simply change the `provider_name` from "TENANT" to your preferred provider (AWS, GCP, AZURE) and remove the variable `backing_provider_name`, for example if you have an existing tenant/shared cluster and want to upgrade your Terraform config should be changed from: +``` +provider_instance_size_name = "M0" +provider_name = "TENANT" +backing_provider_name = "AWS" +``` +To: +``` +provider_instance_size_name = "M10" +provider_name = "AWS" +```
• Changes to cluster configurations can affect costs. Before making changes, please see [Billing](https://docs.atlas.mongodb.com/billing/).
• If your Atlas project contains a custom role that uses actions introduced in a specific MongoDB version, you cannot create a cluster with a MongoDB version less than that version unless you delete the custom role.