Skip to content

Commit

Permalink
INTMDB-15: Added parameter advanced conf for cluster datasource (#646)
Browse files Browse the repository at this point in the history
* added advanced configuration in datasource and docs of cluster(s)

* fixes typo

* fixes test

* fixes test

* fix tests

* fix tests

Co-authored-by: Edgar Lopez <[email protected]>
  • Loading branch information
coderGo93 and Edgar Lopez authored Jan 12, 2022
1 parent a125b02 commit 3f67fba
Show file tree
Hide file tree
Showing 6 changed files with 282 additions and 0 deletions.
60 changes: 60 additions & 0 deletions mongodbatlas/data_source_mongodbatlas_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ func dataSourceMongoDBAtlasCluster() *schema.Resource {
Type: schema.TypeString,
Required: true,
},
"advanced_configuration": clusterAdvancedConfigurationSchemaComputed(),
"auto_scaling_disk_gb_enabled": {
Type: schema.TypeBool,
Computed: true,
Expand Down Expand Up @@ -444,6 +445,18 @@ func dataSourceMongoDBAtlasClusterRead(ctx context.Context, d *schema.ResourceDa
return diag.FromErr(fmt.Errorf(errorClusterSetting, "version_release_system", clusterName, err))
}

/*
Get the advaced configuration options and set up to the terraform state
*/
processArgs, _, err := conn.Clusters.GetProcessArgs(ctx, projectID, clusterName)
if err != nil {
return diag.FromErr(fmt.Errorf(errorAdvancedConfRead, clusterName, err))
}

if err := d.Set("advanced_configuration", flattenProcessArgs(processArgs)); err != nil {
return diag.FromErr(fmt.Errorf(errorClusterSetting, "advanced_configuration", clusterName, err))
}

// Get the snapshot policy and set the data
snapshotBackupPolicy, err := flattenCloudProviderSnapshotBackupPolicy(ctx, d, conn, projectID, clusterName)
if err != nil {
Expand All @@ -458,3 +471,50 @@ func dataSourceMongoDBAtlasClusterRead(ctx context.Context, d *schema.ResourceDa

return nil
}

func clusterAdvancedConfigurationSchemaComputed() *schema.Schema {
return &schema.Schema{
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"default_read_concern": {
Type: schema.TypeString,
Computed: true,
},
"default_write_concern": {
Type: schema.TypeString,
Computed: true,
},
"fail_index_key_too_long": {
Type: schema.TypeBool,
Computed: true,
},
"javascript_enabled": {
Type: schema.TypeBool,
Computed: true,
},
"minimum_enabled_tls_protocol": {
Type: schema.TypeString,
Computed: true,
},
"no_table_scan": {
Type: schema.TypeBool,
Computed: true,
},
"oplog_size_mb": {
Type: schema.TypeInt,
Computed: true,
},
"sample_size_bi_connector": {
Type: schema.TypeInt,
Computed: true,
},
"sample_refresh_interval_bi_connector": {
Type: schema.TypeInt,
Computed: true,
},
},
},
}
}
92 changes: 92 additions & 0 deletions mongodbatlas/data_source_mongodbatlas_cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import (

"github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
"github.com/mwielbut/pointy"
matlas "go.mongodb.org/atlas/mongodbatlas"
)

Expand Down Expand Up @@ -57,6 +58,52 @@ func TestAccDataSourceMongoDBAtlasCluster_basic(t *testing.T) {
})
}

func TestAccDataSourceMongoDBAtlasCluster_advancedConf(t *testing.T) {
var (
cluster matlas.Cluster
resourceName = "mongodbatlas_cluster.test"
dataSourceName = "data.mongodbatlas_cluster.test"
projectID = os.Getenv("MONGODB_ATLAS_PROJECT_ID")
name = acctest.RandomWithPrefix("test-acc")
)

resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
ProviderFactories: testAccProviderFactories,
CheckDestroy: testAccCheckMongoDBAtlasClusterDestroy,
Steps: []resource.TestStep{
{
Config: testAccDataSourceMongoDBAtlasClusterConfigAdvancedConf(projectID, name, false, &matlas.ProcessArgs{
FailIndexKeyTooLong: pointy.Bool(true),
JavascriptEnabled: pointy.Bool(true),
MinimumEnabledTLSProtocol: "TLS1_1",
NoTableScan: pointy.Bool(false),
OplogSizeMB: pointy.Int64(1000),
SampleRefreshIntervalBIConnector: pointy.Int64(310),
SampleSizeBIConnector: pointy.Int64(110),
}),
Check: resource.ComposeTestCheckFunc(
testAccCheckMongoDBAtlasClusterExists(resourceName, &cluster),
testAccCheckMongoDBAtlasClusterAttributes(&cluster, name),
resource.TestCheckResourceAttrSet(resourceName, "project_id"),
resource.TestCheckResourceAttr(resourceName, "name", name),
resource.TestCheckResourceAttr(resourceName, "disk_size_gb", "10"),
resource.TestCheckResourceAttrSet(resourceName, "mongo_uri"),
resource.TestCheckResourceAttrSet(resourceName, "replication_specs.#"),
resource.TestCheckResourceAttrSet(resourceName, "replication_specs.0.regions_config.#"),
resource.TestCheckResourceAttrSet(resourceName, "project_id"),
resource.TestCheckResourceAttr(dataSourceName, "name", name),
resource.TestCheckResourceAttr(dataSourceName, "disk_size_gb", "10"),
resource.TestCheckResourceAttr(dataSourceName, "pit_enabled", "false"),
resource.TestCheckResourceAttrSet(dataSourceName, "mongo_uri"),
resource.TestCheckResourceAttrSet(dataSourceName, "replication_specs.#"),
resource.TestCheckResourceAttr(dataSourceName, "version_release_system", "LTS"),
),
},
},
})
}

func testAccDataSourceMongoDBAtlasClusterConfig(projectID, name, backupEnabled, autoScalingEnabled, scaleDownEnabled, minSizeName, maxSizeName string) string {
return fmt.Sprintf(`
resource "mongodbatlas_cluster" "basic_ds" {
Expand Down Expand Up @@ -102,3 +149,48 @@ func testAccDataSourceMongoDBAtlasClusterConfig(projectID, name, backupEnabled,
}
`, projectID, name, backupEnabled, autoScalingEnabled, scaleDownEnabled, minSizeName, maxSizeName)
}

func testAccDataSourceMongoDBAtlasClusterConfigAdvancedConf(projectID, name string, autoscalingEnabled bool, p *matlas.ProcessArgs) string {
return fmt.Sprintf(`
resource "mongodbatlas_cluster" "test" {
project_id = %[1]q
name = %[2]q
disk_size_gb = 10
cluster_type = "REPLICASET"
replication_specs {
num_shards = 1
regions_config {
region_name = "US_EAST_2"
electable_nodes = 3
priority = 7
read_only_nodes = 0
}
}
provider_name = "AWS"
provider_instance_size_name = "M10"
backup_enabled = false
auto_scaling_disk_gb_enabled = %[3]t
mongo_db_major_version = "4.0"
advanced_configuration {
fail_index_key_too_long = %[4]t
javascript_enabled = %[5]t
minimum_enabled_tls_protocol = %[6]q
no_table_scan = %[7]t
oplog_size_mb = %[8]d
sample_size_bi_connector = %[9]d
sample_refresh_interval_bi_connector = %[10]d
}
}
data "mongodbatlas_cluster" "test" {
project_id = mongodbatlas_cluster.test.project_id
name = mongodbatlas_cluster.test.name
}
`, projectID, name, autoscalingEnabled,
*p.FailIndexKeyTooLong, *p.JavascriptEnabled, p.MinimumEnabledTLSProtocol, *p.NoTableScan,
*p.OplogSizeMB, *p.SampleSizeBIConnector, *p.SampleRefreshIntervalBIConnector)
}
5 changes: 5 additions & 0 deletions mongodbatlas/data_source_mongodbatlas_clusters.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ func dataSourceMongoDBAtlasClusters() *schema.Resource {
Type: schema.TypeString,
Computed: true,
},
"advanced_configuration": clusterAdvancedConfigurationSchemaComputed(),
"auto_scaling_disk_gb_enabled": {
Type: schema.TypeBool,
Computed: true,
Expand Down Expand Up @@ -352,7 +353,11 @@ func flattenClusters(ctx context.Context, d *schema.ResourceData, conn *matlas.C
log.Printf("[WARN] Error setting `snapshot_backup_policy` for the cluster(%s): %s", clusters[i].ID, err)
}

processArgs, _, err := conn.Clusters.GetProcessArgs(ctx, clusters[i].GroupID, clusters[i].Name)
log.Printf("[WARN] Error setting `advanced_configuration` for the cluster(%s): %s", clusters[i].ID, err)

result := map[string]interface{}{
"advanced_configuration": flattenProcessArgs(processArgs),
"auto_scaling_compute_enabled": clusters[i].AutoScaling.Compute.Enabled,
"auto_scaling_compute_scale_down_enabled": clusters[i].AutoScaling.Compute.ScaleDownEnabled,
"auto_scaling_disk_gb_enabled": clusters[i].BackupEnabled,
Expand Down
89 changes: 89 additions & 0 deletions mongodbatlas/data_source_mongodbatlas_clusters_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import (

"github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
"github.com/mwielbut/pointy"
matlas "go.mongodb.org/atlas/mongodbatlas"
)

Expand Down Expand Up @@ -51,6 +52,50 @@ func TestAccDataSourceMongoDBAtlasClusters_basic(t *testing.T) {
})
}

func TestAccDataSourceMongoDBAtlasClusters_advancedConf(t *testing.T) {
var (
cluster matlas.Cluster
resourceName = "mongodbatlas_cluster.test"
dataSourceName = "data.mongodbatlas_clusters.test"
projectID = os.Getenv("MONGODB_ATLAS_PROJECT_ID")
name = fmt.Sprintf("test-acc-%s", acctest.RandString(10))
)

resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
ProviderFactories: testAccProviderFactories,
CheckDestroy: testAccCheckMongoDBAtlasClusterDestroy,
Steps: []resource.TestStep{
{
Config: testAccDataSourceMongoDBAtlasClustersConfigAdvancedConf(projectID, name, false, &matlas.ProcessArgs{
FailIndexKeyTooLong: pointy.Bool(true),
JavascriptEnabled: pointy.Bool(true),
MinimumEnabledTLSProtocol: "TLS1_1",
NoTableScan: pointy.Bool(false),
OplogSizeMB: pointy.Int64(1000),
SampleRefreshIntervalBIConnector: pointy.Int64(310),
SampleSizeBIConnector: pointy.Int64(110),
}),
Check: resource.ComposeTestCheckFunc(
testAccCheckMongoDBAtlasClusterExists(resourceName, &cluster),
testAccCheckMongoDBAtlasClusterAttributes(&cluster, name),
resource.TestCheckResourceAttrSet(resourceName, "project_id"),
resource.TestCheckResourceAttr(resourceName, "name", name),
resource.TestCheckResourceAttr(resourceName, "disk_size_gb", "10"),
resource.TestCheckResourceAttrSet(resourceName, "mongo_uri"),
resource.TestCheckResourceAttrSet(resourceName, "replication_specs.#"),
resource.TestCheckResourceAttrSet(resourceName, "replication_specs.0.regions_config.#"),
resource.TestCheckResourceAttrSet(resourceName, "project_id"),
resource.TestCheckResourceAttrSet(dataSourceName, "results.#"),
resource.TestCheckResourceAttrSet(dataSourceName, "results.0.replication_specs.#"),
resource.TestCheckResourceAttrSet(dataSourceName, "results.0.name"),
resource.TestCheckResourceAttr(dataSourceName, "results.0.version_release_system", "LTS"),
),
},
},
})
}

func testAccDataSourceMongoDBAtlasClustersConfig(projectID, name, backupEnabled, autoScalingEnabled, scaleDownEnabled, minSizeName, maxSizeName string) string {
return fmt.Sprintf(`
resource "mongodbatlas_cluster" "basic_ds" {
Expand Down Expand Up @@ -96,3 +141,47 @@ func testAccDataSourceMongoDBAtlasClustersConfig(projectID, name, backupEnabled,
}
`, projectID, name, backupEnabled, autoScalingEnabled, scaleDownEnabled, minSizeName, maxSizeName)
}

func testAccDataSourceMongoDBAtlasClustersConfigAdvancedConf(projectID, name string, autoscalingEnabled bool, p *matlas.ProcessArgs) string {
return fmt.Sprintf(`
resource "mongodbatlas_cluster" "test" {
project_id = %[1]q
name = %[2]q
disk_size_gb = 10
cluster_type = "REPLICASET"
replication_specs {
num_shards = 1
regions_config {
region_name = "US_EAST_2"
electable_nodes = 3
priority = 7
read_only_nodes = 0
}
}
provider_name = "AWS"
provider_instance_size_name = "M10"
backup_enabled = false
auto_scaling_disk_gb_enabled = %[3]t
mongo_db_major_version = "4.0"
advanced_configuration {
fail_index_key_too_long = %[4]t
javascript_enabled = %[5]t
minimum_enabled_tls_protocol = %[6]q
no_table_scan = %[7]t
oplog_size_mb = %[8]d
sample_size_bi_connector = %[9]d
sample_refresh_interval_bi_connector = %[10]d
}
}
data "mongodbatlas_clusters" "test" {
project_id = mongodbatlas_cluster.test.project_id
}
`, projectID, name, autoscalingEnabled,
*p.FailIndexKeyTooLong, *p.JavascriptEnabled, p.MinimumEnabledTLSProtocol, *p.NoTableScan,
*p.OplogSizeMB, *p.SampleSizeBIConnector, *p.SampleRefreshIntervalBIConnector)
}
18 changes: 18 additions & 0 deletions website/docs/d/cluster.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -122,6 +122,8 @@ In addition to all arguments above, the following attributes are exported:

* `version_release_system` - Release cadence that Atlas uses for this cluster.

* `advanced_configuration` - Get the advanced configuration options. See [Advanced Configuration](#advanced-configuration) below for more details.

### BI Connector

Indicates BI Connector for Atlas configuration.
Expand Down Expand Up @@ -194,6 +196,22 @@ Contains a key-value pair that tags that the cluster was created by a Terraform
* `snapshot_backup_policy.#.policies.#.policy_item.#.retention_unit` - The unit of time in which snapshot retention is measured (days, weeks, months).
* `snapshot_backup_policy.#.policies.#.policy_item.#.retention_value` - The number of days, weeks, or months the snapshot is retained.

#### Advanced Configuration

* `default_read_concern` - [Default level of acknowledgment requested from MongoDB for read operations](https://docs.mongodb.com/manual/reference/read-concern/) set for this cluster. MongoDB 4.4 clusters default to [available](https://docs.mongodb.com/manual/reference/read-concern-available/).
* `default_write_concern` - [Default level of acknowledgment requested from MongoDB for write operations](https://docs.mongodb.com/manual/reference/write-concern/) set for this cluster. MongoDB 4.4 clusters default to [1](https://docs.mongodb.com/manual/reference/write-concern/).
* `fail_index_key_too_long` - When true, documents can only be updated or inserted if, for all indexed fields on the target collection, the corresponding index entries do not exceed 1024 bytes. When false, mongod writes documents that exceed the limit but does not index them.
* `javascript_enabled` - When true, the cluster allows execution of operations that perform server-side executions of JavaScript. When false, the cluster disables execution of those operations.
* `minimum_enabled_tls_protocol` - Sets the minimum Transport Layer Security (TLS) version the cluster accepts for incoming connections.Valid values are:

- TLS1_0
- TLS1_1
- TLS1_2

* `no_table_scan` - When true, the cluster disables the execution of any query that requires a collection scan to return results. When false, the cluster allows the execution of those operations.
* `oplog_size_mb` - The custom oplog size of the cluster. Without a value that indicates that the cluster uses the default oplog size calculated by Atlas.
* `sample_size_bi_connector` - Number of documents per database to sample when gathering schema information. Defaults to 100. Available only for Atlas deployments in which BI Connector for Atlas is enabled.
* `sample_refresh_interval_bi_connector` - Interval in seconds at which the mongosqld process re-samples data to create its relational schema. The default value is 300. The specified value must be a positive integer. Available only for Atlas deployments in which BI Connector for Atlas is enabled.


See detailed information for arguments and attributes: [MongoDB API Clusters](https://docs.atlas.mongodb.com/reference/api/clusters-create-one/)
18 changes: 18 additions & 0 deletions website/docs/d/clusters.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,7 @@ In addition to all arguments above, the following attributes are exported:

* `version_release_system` - Release cadence that Atlas uses for this cluster.

* `advanced_configuration` - Get the advanced configuration options. See [Advanced Configuration](#advanced-configuration) below for more details.

### BI Connector

Expand Down Expand Up @@ -195,5 +196,22 @@ Contains a key-value pair that tags that the cluster was created by a Terraform
* `snapshot_backup_policy.#.policies.#.policy_item.#.retention_unit` - The unit of time in which snapshot retention is measured (days, weeks, months).
* `snapshot_backup_policy.#.policies.#.policy_item.#.retention_value` - The number of days, weeks, or months the snapshot is retained.

#### Advanced Configuration

* `default_read_concern` - [Default level of acknowledgment requested from MongoDB for read operations](https://docs.mongodb.com/manual/reference/read-concern/) set for this cluster. MongoDB 4.4 clusters default to [available](https://docs.mongodb.com/manual/reference/read-concern-available/).
* `default_write_concern` - [Default level of acknowledgment requested from MongoDB for write operations](https://docs.mongodb.com/manual/reference/write-concern/) set for this cluster. MongoDB 4.4 clusters default to [1](https://docs.mongodb.com/manual/reference/write-concern/).
* `fail_index_key_too_long` - When true, documents can only be updated or inserted if, for all indexed fields on the target collection, the corresponding index entries do not exceed 1024 bytes. When false, mongod writes documents that exceed the limit but does not index them.
* `javascript_enabled` - When true, the cluster allows execution of operations that perform server-side executions of JavaScript. When false, the cluster disables execution of those operations.
* `minimum_enabled_tls_protocol` - Sets the minimum Transport Layer Security (TLS) version the cluster accepts for incoming connections.Valid values are:

- TLS1_0
- TLS1_1
- TLS1_2

* `no_table_scan` - When true, the cluster disables the execution of any query that requires a collection scan to return results. When false, the cluster allows the execution of those operations.
* `oplog_size_mb` - The custom oplog size of the cluster. Without a value that indicates that the cluster uses the default oplog size calculated by Atlas.
* `sample_size_bi_connector` - Number of documents per database to sample when gathering schema information. Defaults to 100. Available only for Atlas deployments in which BI Connector for Atlas is enabled.
* `sample_refresh_interval_bi_connector` - Interval in seconds at which the mongosqld process re-samples data to create its relational schema. The default value is 300. The specified value must be a positive integer. Available only for Atlas deployments in which BI Connector for Atlas is enabled.


See detailed information for arguments and attributes: [MongoDB API Clusters](https://docs.atlas.mongodb.com/reference/api/clusters-create-one/)

0 comments on commit 3f67fba

Please sign in to comment.