Skip to content

Commit

Permalink
refactor StateChangeConf
Browse files Browse the repository at this point in the history
  • Loading branch information
lantoli committed Sep 18, 2024
1 parent 511768a commit 16f12f3
Show file tree
Hide file tree
Showing 2 changed files with 22 additions and 71 deletions.
14 changes: 3 additions & 11 deletions internal/service/cluster/new_atlas.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,11 @@ import (
"context"
"time"

"github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry"
"github.com/mongodb/terraform-provider-mongodbatlas/internal/service/advancedcluster"
"go.mongodb.org/atlas-sdk/v20240805004/admin"
matlas "go.mongodb.org/atlas/mongodbatlas"
)

func newAtlasUpdate(ctx context.Context, timeout time.Duration, conn *matlas.Client, connV2 *admin.APIClient, projectID, clusterName string, redactClientLogData bool) error {
func newAtlasUpdate(ctx context.Context, timeout time.Duration, connV2 *admin.APIClient, projectID, clusterName string, redactClientLogData bool) error {
current, err := newAtlasGet(ctx, connV2, projectID, clusterName)
if err != nil {
return err
Expand All @@ -23,14 +22,7 @@ func newAtlasUpdate(ctx context.Context, timeout time.Duration, conn *matlas.Cli
if _, _, err = connV2.ClustersApi.UpdateCluster(ctx, projectID, clusterName, req).Execute(); err != nil {
return err
}
stateConf := &retry.StateChangeConf{
Pending: []string{"CREATING", "UPDATING", "REPAIRING"},
Target: []string{"IDLE"},
Refresh: ResourceClusterRefreshFunc(ctx, clusterName, projectID, conn),
Timeout: timeout,
MinTimeout: 30 * time.Second,
Delay: 1 * time.Minute,
}
stateConf := advancedcluster.CreateStateChangeConfig(ctx, connV2, projectID, clusterName, timeout)
if _, err = stateConf.WaitForStateContext(ctx); err != nil {
return err
}
Expand Down
79 changes: 19 additions & 60 deletions internal/service/cluster/resource_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ import (
"strings"
"time"

"go.mongodb.org/atlas-sdk/v20240805004/admin"
matlas "go.mongodb.org/atlas/mongodbatlas"

"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
Expand Down Expand Up @@ -545,18 +546,8 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag.
}

timeout := d.Timeout(schema.TimeoutCreate)
stateConf := &retry.StateChangeConf{
Pending: []string{"CREATING", "UPDATING", "REPAIRING", "REPEATING", "PENDING"},
Target: []string{"IDLE"},
Refresh: ResourceClusterRefreshFunc(ctx, clusterName, projectID, conn),
Timeout: timeout,
MinTimeout: 1 * time.Minute,
Delay: 3 * time.Minute,
}

// Wait, catching any errors
_, err = stateConf.WaitForStateContext(ctx)
if err != nil {
stateConf := advancedcluster.CreateStateChangeConfig(ctx, connV2, projectID, clusterName, timeout)
if _, err = stateConf.WaitForStateContext(ctx); err != nil {
return diag.FromErr(fmt.Errorf(errorClusterCreate, err))
}

Expand All @@ -582,14 +573,14 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag.
Paused: conversion.Pointer(v),
}

_, _, err = updateCluster(ctx, conn, clusterRequest, projectID, clusterName, timeout)
_, _, err = updateCluster(ctx, conn, connV2, clusterRequest, projectID, clusterName, timeout)
if err != nil {
return diag.FromErr(fmt.Errorf(errorClusterUpdate, clusterName, err))
}
}

if v, ok := d.GetOk("redact_client_log_data"); ok {
if err := newAtlasUpdate(ctx, d.Timeout(schema.TimeoutCreate), conn, connV2, projectID, clusterName, v.(bool)); err != nil {
if err := newAtlasUpdate(ctx, d.Timeout(schema.TimeoutCreate), connV2, projectID, clusterName, v.(bool)); err != nil {
return diag.FromErr(fmt.Errorf(errorClusterUpdate, clusterName, err))
}
}
Expand Down Expand Up @@ -962,7 +953,7 @@ func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag.
}

if isUpgradeRequired(d) {
updatedCluster, _, err := upgradeCluster(ctx, conn, cluster, projectID, clusterName, timeout)
updatedCluster, _, err := upgradeCluster(ctx, conn, connV2, cluster, projectID, clusterName, timeout)

if err != nil {
return diag.FromErr(fmt.Errorf(errorClusterUpdate, clusterName, err))
Expand All @@ -976,14 +967,14 @@ func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag.
}))
} else if !reflect.DeepEqual(cluster, clusterChangeDetect) {
err := retry.RetryContext(ctx, timeout, func() *retry.RetryError {
_, _, err := updateCluster(ctx, conn, cluster, projectID, clusterName, timeout)
_, _, err := updateCluster(ctx, conn, connV2, cluster, projectID, clusterName, timeout)

if didErrOnPausedCluster(err) {
clusterRequest := &matlas.Cluster{
Paused: conversion.Pointer(false),
}

_, _, err = updateCluster(ctx, conn, clusterRequest, projectID, clusterName, timeout)
_, _, err = updateCluster(ctx, conn, connV2, clusterRequest, projectID, clusterName, timeout)
}

if err != nil {
Expand All @@ -1003,15 +994,15 @@ func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag.
Paused: conversion.Pointer(true),
}

_, _, err := updateCluster(ctx, conn, clusterRequest, projectID, clusterName, timeout)
_, _, err := updateCluster(ctx, conn, connV2, clusterRequest, projectID, clusterName, timeout)
if err != nil {
return diag.FromErr(fmt.Errorf(errorClusterUpdate, clusterName, err))
}
}

if d.HasChange("redact_client_log_data") {
if v, ok := d.GetOk("redact_client_log_data"); ok {
if err := newAtlasUpdate(ctx, d.Timeout(schema.TimeoutUpdate), conn, connV2, projectID, clusterName, v.(bool)); err != nil {
if err := newAtlasUpdate(ctx, d.Timeout(schema.TimeoutUpdate), connV2, projectID, clusterName, v.(bool)); err != nil {
return diag.FromErr(fmt.Errorf(errorClusterUpdate, clusterName, err))
}
}
Expand Down Expand Up @@ -1052,8 +1043,8 @@ func didErrOnPausedCluster(err error) bool {
}

func resourceDelete(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics {
// Get client connection.
conn := meta.(*config.MongoDBClient).Atlas
connV2 := meta.(*config.MongoDBClient).AtlasV2
ids := conversion.DecodeStateID(d.Id())
projectID := ids["project_id"]
clusterName := ids["cluster_name"]
Expand All @@ -1070,20 +1061,8 @@ func resourceDelete(ctx context.Context, d *schema.ResourceData, meta any) diag.
return diag.FromErr(fmt.Errorf(errorClusterDelete, clusterName, err))
}

log.Println("[INFO] Waiting for MongoDB Cluster to be destroyed")

stateConf := &retry.StateChangeConf{
Pending: []string{"IDLE", "CREATING", "UPDATING", "REPAIRING", "DELETING"},
Target: []string{"DELETED"},
Refresh: ResourceClusterRefreshFunc(ctx, clusterName, projectID, conn),
Timeout: d.Timeout(schema.TimeoutDelete),
MinTimeout: 30 * time.Second,
Delay: 1 * time.Minute, // Wait 30 secs before starting
}

// Wait, catching any errors
_, err = stateConf.WaitForStateContext(ctx)
if err != nil {
stateConf := advancedcluster.DeleteStateChangeConfig(ctx, connV2, projectID, clusterName, d.Timeout(schema.TimeoutDelete))
if _, err = stateConf.WaitForStateContext(ctx); err != nil {
return diag.FromErr(fmt.Errorf(errorClusterDelete, clusterName, err))
}

Expand Down Expand Up @@ -1213,24 +1192,14 @@ func isEqualProviderAutoScalingMaxInstanceSize(k, old, newStr string, d *schema.
return true
}

func updateCluster(ctx context.Context, conn *matlas.Client, request *matlas.Cluster, projectID, name string, timeout time.Duration) (*matlas.Cluster, *matlas.Response, error) {
func updateCluster(ctx context.Context, conn *matlas.Client, connV2 *admin.APIClient, request *matlas.Cluster, projectID, name string, timeout time.Duration) (*matlas.Cluster, *matlas.Response, error) {
cluster, resp, err := conn.Clusters.Update(ctx, projectID, name, request)
if err != nil {
return nil, nil, err
}

stateConf := &retry.StateChangeConf{
Pending: []string{"CREATING", "UPDATING", "REPAIRING"},
Target: []string{"IDLE"},
Refresh: ResourceClusterRefreshFunc(ctx, name, projectID, conn),
Timeout: timeout,
MinTimeout: 30 * time.Second,
Delay: 1 * time.Minute,
}

// Wait, catching any errors
_, err = stateConf.WaitForStateContext(ctx)
if err != nil {
stateConf := advancedcluster.CreateStateChangeConfig(ctx, connV2, projectID, name, timeout)
if _, err = stateConf.WaitForStateContext(ctx); err != nil {
return nil, nil, err
}

Expand Down Expand Up @@ -1344,26 +1313,16 @@ func ResourceClusterRefreshFunc(ctx context.Context, name, projectID string, con
}
}

func upgradeCluster(ctx context.Context, conn *matlas.Client, request *matlas.Cluster, projectID, name string, timeout time.Duration) (*matlas.Cluster, *matlas.Response, error) {
func upgradeCluster(ctx context.Context, conn *matlas.Client, connV2 *admin.APIClient, request *matlas.Cluster, projectID, name string, timeout time.Duration) (*matlas.Cluster, *matlas.Response, error) {
request.Name = name

cluster, resp, err := conn.Clusters.Upgrade(ctx, projectID, request)
if err != nil {
return nil, nil, err
}

stateConf := &retry.StateChangeConf{
Pending: []string{"CREATING", "UPDATING", "REPAIRING"},
Target: []string{"IDLE"},
Refresh: ResourceClusterRefreshFunc(ctx, name, projectID, conn),
Timeout: timeout,
MinTimeout: 30 * time.Second,
Delay: 1 * time.Minute,
}

// Wait, catching any errors
_, err = stateConf.WaitForStateContext(ctx)
if err != nil {
stateConf := advancedcluster.CreateStateChangeConfig(ctx, connV2, projectID, name, timeout)
if _, err = stateConf.WaitForStateContext(ctx); err != nil {
return nil, nil, err
}

Expand Down

0 comments on commit 16f12f3

Please sign in to comment.