From 87114b2e7da55513dc2288c53af7bd32fae47662 Mon Sep 17 00:00:00 2001 From: njucz Date: Fri, 11 Sep 2020 11:12:55 +0800 Subject: [PATCH] new resource `azurerm_synapse_sql_pool` (#8095) the third resource of #7406 it needs to take a long time to run test for PointInTime restore (8h according to the doc) and recovery (1 day), so I don't add test case for these two create mode. I have tested it offline and there is no problem --- .../services/sql/parse/sql_database.go | 47 ++ .../services/sql/parse/sql_database_test.go | 88 ++++ .../services/sql/resource_arm_sql_database.go | 37 +- .../services/sql/validate/sql_database.go | 22 + .../services/synapse/client/client.go | 26 +- .../synapse/parse/synapse_sql_pool.go | 37 ++ .../synapse/parse/synapse_sql_pool_test.go | 79 ++++ .../internal/services/synapse/registration.go | 1 + .../synapse/synapse_sql_pool_resource.go | 415 ++++++++++++++++++ .../tests/synapse_sql_pool_resource_test.go | 231 ++++++++++ .../synapse/validate/synapse_sql_pool.go | 42 ++ .../synapse/validate/synapse_sql_pool_test.go | 48 ++ website/azurerm.erb | 4 + website/docs/r/synapse_sql_pool.html.markdown | 101 +++++ 14 files changed, 1149 insertions(+), 29 deletions(-) create mode 100644 azurerm/internal/services/sql/parse/sql_database.go create mode 100644 azurerm/internal/services/sql/parse/sql_database_test.go create mode 100644 azurerm/internal/services/sql/validate/sql_database.go create mode 100644 azurerm/internal/services/synapse/parse/synapse_sql_pool.go create mode 100644 azurerm/internal/services/synapse/parse/synapse_sql_pool_test.go create mode 100644 azurerm/internal/services/synapse/synapse_sql_pool_resource.go create mode 100644 azurerm/internal/services/synapse/tests/synapse_sql_pool_resource_test.go create mode 100644 azurerm/internal/services/synapse/validate/synapse_sql_pool.go create mode 100644 azurerm/internal/services/synapse/validate/synapse_sql_pool_test.go create mode 100644 website/docs/r/synapse_sql_pool.html.markdown diff --git a/azurerm/internal/services/sql/parse/sql_database.go b/azurerm/internal/services/sql/parse/sql_database.go new file mode 100644 index 000000000000..fc7886569f37 --- /dev/null +++ b/azurerm/internal/services/sql/parse/sql_database.go @@ -0,0 +1,47 @@ +package parse + +import ( + "fmt" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" +) + +type SqlDatabaseId struct { + ResourceGroup string + ServerName string + Name string +} + +func NewSqlDatabaseID(resourceGroup, serverName, name string) SqlDatabaseId { + return SqlDatabaseId{ + ResourceGroup: resourceGroup, + ServerName: serverName, + Name: name, + } +} + +func (id SqlDatabaseId) ID(subscriptionId string) string { + return fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Sql/servers/%s/databases/%s", subscriptionId, id.ResourceGroup, id.ServerName, id.Name) +} + +func SqlDatabaseID(input string) (*SqlDatabaseId, error) { + id, err := azure.ParseAzureResourceID(input) + if err != nil { + return nil, fmt.Errorf("parsing Synapse Sql Pool ID %q: %+v", input, err) + } + + sqlDatabaseId := SqlDatabaseId{ + ResourceGroup: id.ResourceGroup, + } + if sqlDatabaseId.ServerName, err = id.PopSegment("servers"); err != nil { + return nil, err + } + if sqlDatabaseId.Name, err = id.PopSegment("databases"); err != nil { + return nil, err + } + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &sqlDatabaseId, nil +} diff --git a/azurerm/internal/services/sql/parse/sql_database_test.go b/azurerm/internal/services/sql/parse/sql_database_test.go new file mode 100644 index 000000000000..20bbb82e0661 --- /dev/null +++ b/azurerm/internal/services/sql/parse/sql_database_test.go @@ -0,0 +1,88 @@ +package parse + +import ( + "testing" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/resourceid" +) + +var _ resourceid.Formatter = SqlDatabaseId{} + +func TestSqlDatabaseId(t *testing.T) { + testData := []struct { + Name string + Input string + Expected *SqlDatabaseId + }{ + { + Name: "Empty", + Input: "", + Expected: nil, + }, + { + Name: "No Resource Groups Segment", + Input: "/subscriptions/00000000-0000-0000-0000-000000000000", + Expected: nil, + }, + { + Name: "No Resource Groups Value", + Input: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/", + Expected: nil, + }, + { + Name: "Resource Group ID", + Input: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/foo/", + Expected: nil, + }, + { + Name: "Missing servers Value", + Input: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/resGroup1/providers/Microsoft.Sql/servers/", + Expected: nil, + }, + { + Name: "Sql Servers ID", + Input: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/resGroup1/providers/Microsoft.Sql/servers/server1", + Expected: nil, + }, + { + Name: "Missing databases Value", + Input: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/resGroup1/providers/Microsoft.Sql/servers/server1/databases", + Expected: nil, + }, + { + Name: "Sql Database ID", + Input: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/resGroup1/providers/Microsoft.Sql/servers/server1/databases/db1", + Expected: &SqlDatabaseId{ + Name: "db1", + ServerName: "server1", + ResourceGroup: "resGroup1", + }, + }, + { + Name: "Wrong Casing", + Input: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/resGroup1/providers/Microsoft.Sql/servers/server1/Databases/db1", + Expected: nil, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Name) + + actual, err := SqlDatabaseID(v.Input) + if err != nil { + if v.Expected == nil { + continue + } + + t.Fatalf("Expected a value but got an error: %s", err) + } + + if actual.Name != v.Expected.Name { + t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + } + + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for Resource Group", v.Expected.ResourceGroup, actual.ResourceGroup) + } + } +} diff --git a/azurerm/internal/services/sql/resource_arm_sql_database.go b/azurerm/internal/services/sql/resource_arm_sql_database.go index 440cc930901f..bdd75e398d0b 100644 --- a/azurerm/internal/services/sql/resource_arm_sql_database.go +++ b/azurerm/internal/services/sql/resource_arm_sql_database.go @@ -16,7 +16,9 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/sql/helper" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/sql/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" + azSchema "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -28,9 +30,10 @@ func resourceArmSqlDatabase() *schema.Resource { Update: resourceArmSqlDatabaseCreateUpdate, Delete: resourceArmSqlDatabaseDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, + Importer: azSchema.ValidateResourceIDPriorToImport(func(id string) error { + _, err := parse.SqlDatabaseID(id) + return err + }), Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(60 * time.Minute), @@ -523,16 +526,12 @@ func resourceArmSqlDatabaseRead(d *schema.ResourceData, meta interface{}) error ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := azure.ParseAzureResourceID(d.Id()) + id, err := parse.SqlDatabaseID(d.Id()) if err != nil { return err } - resourceGroup := id.ResourceGroup - serverName := id.Path["servers"] - name := id.Path["databases"] - - resp, err := client.Get(ctx, resourceGroup, serverName, name, "") + resp, err := client.Get(ctx, id.ResourceGroup, id.ServerName, id.Name, "") if err != nil { if utils.ResponseWasNotFound(resp.Response) { log.Printf("[INFO] Error reading SQL Database %q - removing from state", d.Id()) @@ -540,11 +539,11 @@ func resourceArmSqlDatabaseRead(d *schema.ResourceData, meta interface{}) error return nil } - return fmt.Errorf("Error making Read request on Sql Database %s: %+v", name, err) + return fmt.Errorf("Error making Read request on Sql Database %s: %+v", id.Name, err) } threatClient := meta.(*clients.Client).Sql.DatabaseThreatDetectionPoliciesClient - threat, err := threatClient.Get(ctx, resourceGroup, serverName, name) + threat, err := threatClient.Get(ctx, id.ResourceGroup, id.ServerName, id.Name) if err == nil { if err := d.Set("threat_detection_policy", flattenArmSqlServerThreatDetectionPolicy(d, threat)); err != nil { return fmt.Errorf("Error setting `threat_detection_policy`: %+v", err) @@ -552,12 +551,12 @@ func resourceArmSqlDatabaseRead(d *schema.ResourceData, meta interface{}) error } d.Set("name", resp.Name) - d.Set("resource_group_name", resourceGroup) + d.Set("resource_group_name", id.ResourceGroup) if location := resp.Location; location != nil { d.Set("location", azure.NormalizeLocation(*location)) } - d.Set("server_name", serverName) + d.Set("server_name", id.ServerName) if props := resp.DatabaseProperties; props != nil { // TODO: set `create_mode` & `source_database_id` once this issue is fixed: @@ -599,9 +598,9 @@ func resourceArmSqlDatabaseRead(d *schema.ResourceData, meta interface{}) error } auditingClient := meta.(*clients.Client).Sql.DatabaseExtendedBlobAuditingPoliciesClient - auditingResp, err := auditingClient.Get(ctx, resourceGroup, serverName, name) + auditingResp, err := auditingClient.Get(ctx, id.ResourceGroup, id.ServerName, id.Name) if err != nil { - return fmt.Errorf("failure in reading SQL Database %q: %v Blob Auditing Policies", name, err) + return fmt.Errorf("failure in reading SQL Database %q: %v Blob Auditing Policies", id.Name, err) } flattenBlobAuditing := helper.FlattenAzureRmSqlDBBlobAuditingPolicies(&auditingResp, d) @@ -617,16 +616,12 @@ func resourceArmSqlDatabaseDelete(d *schema.ResourceData, meta interface{}) erro ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := azure.ParseAzureResourceID(d.Id()) + id, err := parse.SqlDatabaseID(d.Id()) if err != nil { return err } - resourceGroup := id.ResourceGroup - serverName := id.Path["servers"] - name := id.Path["databases"] - - resp, err := client.Delete(ctx, resourceGroup, serverName, name) + resp, err := client.Delete(ctx, id.ResourceGroup, id.ServerName, id.Name) if err != nil { if utils.ResponseWasNotFound(resp) { return nil diff --git a/azurerm/internal/services/sql/validate/sql_database.go b/azurerm/internal/services/sql/validate/sql_database.go new file mode 100644 index 000000000000..2eff46b476e7 --- /dev/null +++ b/azurerm/internal/services/sql/validate/sql_database.go @@ -0,0 +1,22 @@ +package parse + +import ( + "fmt" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/sql/parse" +) + +func SqlDatabaseID(i interface{}, k string) (warnings []string, errors []error) { + v, ok := i.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected type of %q to be string", k)) + return + } + + if _, err := parse.SqlDatabaseID(v); err != nil { + errors = append(errors, fmt.Errorf("can not parse %q as a Sql Database resource id: %v", k, err)) + return + } + + return warnings, errors +} diff --git a/azurerm/internal/services/synapse/client/client.go b/azurerm/internal/services/synapse/client/client.go index a8f1fe629a58..88cd24521b7c 100644 --- a/azurerm/internal/services/synapse/client/client.go +++ b/azurerm/internal/services/synapse/client/client.go @@ -6,10 +6,12 @@ import ( ) type Client struct { - FirewallRulesClient *synapse.IPFirewallRulesClient - SparkPoolClient *synapse.BigDataPoolsClient - WorkspaceClient *synapse.WorkspacesClient - WorkspaceAadAdminsClient *synapse.WorkspaceAadAdminsClient + FirewallRulesClient *synapse.IPFirewallRulesClient + SparkPoolClient *synapse.BigDataPoolsClient + SqlPoolClient *synapse.SQLPoolsClient + SqlPoolTransparentDataEncryptionClient *synapse.SQLPoolTransparentDataEncryptionsClient + WorkspaceClient *synapse.WorkspacesClient + WorkspaceAadAdminsClient *synapse.WorkspaceAadAdminsClient } func NewClient(o *common.ClientOptions) *Client { @@ -20,6 +22,12 @@ func NewClient(o *common.ClientOptions) *Client { sparkPoolClient := synapse.NewBigDataPoolsClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) o.ConfigureClient(&sparkPoolClient.Client, o.ResourceManagerAuthorizer) + sqlPoolClient := synapse.NewSQLPoolsClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) + o.ConfigureClient(&sqlPoolClient.Client, o.ResourceManagerAuthorizer) + + sqlPoolTransparentDataEncryptionClient := synapse.NewSQLPoolTransparentDataEncryptionsClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) + o.ConfigureClient(&sqlPoolTransparentDataEncryptionClient.Client, o.ResourceManagerAuthorizer) + workspaceClient := synapse.NewWorkspacesClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) o.ConfigureClient(&workspaceClient.Client, o.ResourceManagerAuthorizer) @@ -27,9 +35,11 @@ func NewClient(o *common.ClientOptions) *Client { o.ConfigureClient(&workspaceAadAdminsClient.Client, o.ResourceManagerAuthorizer) return &Client{ - FirewallRulesClient: &firewallRuleClient, - SparkPoolClient: &sparkPoolClient, - WorkspaceClient: &workspaceClient, - WorkspaceAadAdminsClient: &workspaceAadAdminsClient, + FirewallRulesClient: &firewallRuleClient, + SparkPoolClient: &sparkPoolClient, + SqlPoolClient: &sqlPoolClient, + SqlPoolTransparentDataEncryptionClient: &sqlPoolTransparentDataEncryptionClient, + WorkspaceClient: &workspaceClient, + WorkspaceAadAdminsClient: &workspaceAadAdminsClient, } } diff --git a/azurerm/internal/services/synapse/parse/synapse_sql_pool.go b/azurerm/internal/services/synapse/parse/synapse_sql_pool.go new file mode 100644 index 000000000000..2a0e25bce8c1 --- /dev/null +++ b/azurerm/internal/services/synapse/parse/synapse_sql_pool.go @@ -0,0 +1,37 @@ +package parse + +import ( + "fmt" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" +) + +type SynapseSqlPoolId struct { + Workspace *SynapseWorkspaceId + Name string +} + +func SynapseSqlPoolID(input string) (*SynapseSqlPoolId, error) { + id, err := azure.ParseAzureResourceID(input) + if err != nil { + return nil, fmt.Errorf("parsing Synapse Sql Pool ID %q: %+v", input, err) + } + + synapseSqlPool := SynapseSqlPoolId{ + Workspace: &SynapseWorkspaceId{ + SubscriptionID: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + }, + } + if synapseSqlPool.Workspace.Name, err = id.PopSegment("workspaces"); err != nil { + return nil, err + } + if synapseSqlPool.Name, err = id.PopSegment("sqlPools"); err != nil { + return nil, err + } + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &synapseSqlPool, nil +} diff --git a/azurerm/internal/services/synapse/parse/synapse_sql_pool_test.go b/azurerm/internal/services/synapse/parse/synapse_sql_pool_test.go new file mode 100644 index 000000000000..198dca25e5cb --- /dev/null +++ b/azurerm/internal/services/synapse/parse/synapse_sql_pool_test.go @@ -0,0 +1,79 @@ +package parse + +import ( + "testing" +) + +func TestSynapseSqlPoolID(t *testing.T) { + testData := []struct { + Name string + Input string + Expected *SynapseSqlPoolId + }{ + { + Name: "Empty", + Input: "", + Expected: nil, + }, + { + Name: "No Resource Groups Segment", + Input: "/subscriptions/00000000-0000-0000-0000-000000000000", + Expected: nil, + }, + { + Name: "No Resource Groups Value", + Input: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/", + Expected: nil, + }, + { + Name: "Resource Group ID", + Input: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/foo/", + Expected: nil, + }, + { + Name: "Missing SqlPool Value", + Input: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/resourceGroup1/providers/Microsoft.Synapse/workspaces/workspace1/sqlPools", + Expected: nil, + }, + { + Name: "synapse SqlPool ID", + Input: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/resourceGroup1/providers/Microsoft.Synapse/workspaces/workspace1/sqlPools/sqlPool1", + Expected: &SynapseSqlPoolId{ + Workspace: &SynapseWorkspaceId{ + ResourceGroup: "resourceGroup1", + Name: "workspace1", + }, + Name: "sqlPool1", + }, + }, + { + Name: "Wrong Casing", + Input: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/resourceGroup1/providers/Microsoft.Synapse/workspaces/workspace1/SqlPools/sqlPool1", + Expected: nil, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q..", v.Name) + + actual, err := SynapseSqlPoolID(v.Input) + if err != nil { + if v.Expected == nil { + continue + } + t.Fatalf("Expected a value but got an error: %s", err) + } + + if actual.Workspace.ResourceGroup != v.Expected.Workspace.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.Workspace.ResourceGroup, actual.Workspace.ResourceGroup) + } + + if actual.Workspace.Name != v.Expected.Workspace.Name { + t.Fatalf("Expected %q but got %q for Workspace Name", v.Expected.Workspace.Name, actual.Workspace.Name) + } + + if actual.Name != v.Expected.Name { + t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + } + } +} diff --git a/azurerm/internal/services/synapse/registration.go b/azurerm/internal/services/synapse/registration.go index 4fba3ec9682a..b7ea7ff569ad 100644 --- a/azurerm/internal/services/synapse/registration.go +++ b/azurerm/internal/services/synapse/registration.go @@ -28,6 +28,7 @@ func (r Registration) SupportedResources() map[string]*schema.Resource { return map[string]*schema.Resource{ "azurerm_synapse_firewall_rule": resourceArmSynapseFirewallRule(), "azurerm_synapse_spark_pool": resourceArmSynapseSparkPool(), + "azurerm_synapse_sql_pool": resourceArmSynapseSqlPool(), "azurerm_synapse_workspace": resourceArmSynapseWorkspace(), } } diff --git a/azurerm/internal/services/synapse/synapse_sql_pool_resource.go b/azurerm/internal/services/synapse/synapse_sql_pool_resource.go new file mode 100644 index 000000000000..dc320f3f129e --- /dev/null +++ b/azurerm/internal/services/synapse/synapse_sql_pool_resource.go @@ -0,0 +1,415 @@ +package synapse + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/Azure/azure-sdk-for-go/services/preview/synapse/mgmt/2019-06-01-preview/synapse" + "github.com/Azure/go-autorest/autorest/date" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + postgresValidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/postgres/validate" + sqlParse "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/sql/parse" + sqlValidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/sql/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/synapse/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/synapse/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +const ( + DefaultCreateMode = "Default" + RecoveryCreateMode = "Recovery" + PointInTimeRestoreCreateMode = "PointInTimeRestore" +) + +func resourceArmSynapseSqlPool() *schema.Resource { + return &schema.Resource{ + Create: resourceArmSynapseSqlPoolCreate, + Read: resourceArmSynapseSqlPoolRead, + Update: resourceArmSynapseSqlPoolUpdate, + Delete: resourceArmSynapseSqlPoolDelete, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + Read: schema.DefaultTimeout(5 * time.Minute), + Update: schema.DefaultTimeout(30 * time.Minute), + Delete: schema.DefaultTimeout(30 * time.Minute), + }, + + Importer: &schema.ResourceImporter{ + State: func(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + if _, err := parse.SynapseSqlPoolID(d.Id()); err != nil { + return []*schema.ResourceData{d}, err + } + + d.Set("create_mode", DefaultCreateMode) + if v, ok := d.GetOk("create_mode"); ok && v.(string) != "" { + d.Set("create_mode", v) + } + + return []*schema.ResourceData{d}, nil + }, + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.SqlPoolName, + }, + + "synapse_workspace_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.SynapseWorkspaceID, + }, + + "sku_name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{ + "DW100c", + "DW200c", + "DW300c", + "DW400c", + "DW500c", + "DW1000c", + "DW1500c", + "DW2000c", + "DW2500c", + "DW3000c", + "DW5000c", + "DW6000c", + "DW7500c", + "DW10000c", + "DW15000c", + "DW30000c", + }, false), + }, + + "create_mode": { + Type: schema.TypeString, + Optional: true, + Default: DefaultCreateMode, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{ + DefaultCreateMode, + RecoveryCreateMode, + PointInTimeRestoreCreateMode, + }, false), + }, + + "collation": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: postgresValidate.PostgresDatabaseCollation, + }, + + "recovery_database_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"restore"}, + ValidateFunc: validation.Any( + validate.SqlPoolID, + sqlValidate.SqlDatabaseID, + ), + }, + + "restore": { + Type: schema.TypeList, + ForceNew: true, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{"recovery_database_id"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "point_in_time": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.IsRFC3339Time, + }, + + "source_database_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.Any( + validate.SqlPoolID, + sqlValidate.SqlDatabaseID, + ), + }, + }, + }, + }, + + "data_encrypted": { + Type: schema.TypeBool, + Optional: true, + }, + + "tags": tags.Schema(), + }, + } +} + +func resourceArmSynapseSqlPoolCreate(d *schema.ResourceData, meta interface{}) error { + sqlClient := meta.(*clients.Client).Synapse.SqlPoolClient + sqlPTDEClient := meta.(*clients.Client).Synapse.SqlPoolTransparentDataEncryptionClient + workspaceClient := meta.(*clients.Client).Synapse.WorkspaceClient + subscriptionId := meta.(*clients.Client).Account.SubscriptionId + ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) + defer cancel() + + name := d.Get("name").(string) + workspaceId, err := parse.SynapseWorkspaceID(d.Get("synapse_workspace_id").(string)) + if err != nil { + return err + } + + existing, err := sqlClient.Get(ctx, workspaceId.ResourceGroup, workspaceId.Name, name) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("checking for presence of existing Synapse Sql Pool %q (workspace %q / Resource Group %q): %+v", name, workspaceId.Name, workspaceId.ResourceGroup, err) + } + } + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_synapse_sql_pool", *existing.ID) + } + + workspace, err := workspaceClient.Get(ctx, workspaceId.ResourceGroup, workspaceId.Name) + if err != nil { + return fmt.Errorf("retrieving Synapse workspace %q (Resource Group %q): %+v", workspaceId.Name, workspaceId.ResourceGroup, err) + } + + mode := d.Get("create_mode").(string) + sqlPoolInfo := synapse.SQLPool{ + Location: workspace.Location, + SQLPoolResourceProperties: &synapse.SQLPoolResourceProperties{ + CreateMode: utils.String(mode), + }, + Sku: &synapse.Sku{ + Name: utils.String(d.Get("sku_name").(string)), + }, + Tags: tags.Expand(d.Get("tags").(map[string]interface{})), + } + + switch mode { + case DefaultCreateMode: + sqlPoolInfo.SQLPoolResourceProperties.Collation = utils.String(d.Get("collation").(string)) + case RecoveryCreateMode: + recoveryDatabaseId := constructSourceDatabaseId(d.Get("recovery_database_id").(string), subscriptionId) + if recoveryDatabaseId == "" { + return fmt.Errorf("`recovery_database_id` must be set when `create_mode` is %q", RecoveryCreateMode) + } + sqlPoolInfo.SQLPoolResourceProperties.RecoverableDatabaseID = utils.String(recoveryDatabaseId) + case PointInTimeRestoreCreateMode: + restore := d.Get("restore").([]interface{}) + if len(restore) == 0 || restore[0] == nil { + return fmt.Errorf("`restore` block must be set when `create_mode` is %q", PointInTimeRestoreCreateMode) + } + v := restore[0].(map[string]interface{}) + sourceDatabaseId := constructSourceDatabaseId(v["source_database_id"].(string), subscriptionId) + restorePointInTime, err := time.Parse(time.RFC3339, v["point_in_time"].(string)) + if err != nil { + return err + } + sqlPoolInfo.SQLPoolResourceProperties.RestorePointInTime = &date.Time{Time: restorePointInTime} + sqlPoolInfo.SQLPoolResourceProperties.SourceDatabaseID = utils.String(sourceDatabaseId) + } + + future, err := sqlClient.Create(ctx, workspaceId.ResourceGroup, workspaceId.Name, name, sqlPoolInfo) + if err != nil { + return fmt.Errorf("creating Synapse SqlPool %q (Resource Group %q / workspaceName %q): %+v", name, workspaceId.ResourceGroup, workspaceId.Name, err) + } + if err = future.WaitForCompletionRef(ctx, sqlClient.Client); err != nil { + return fmt.Errorf("waiting on creating future for Synapse SqlPool %q (Resource Group %q / workspaceName %q): %+v", name, workspaceId.ResourceGroup, workspaceId.Name, err) + } + + if d.Get("data_encrypted").(bool) { + parameter := synapse.TransparentDataEncryption{ + TransparentDataEncryptionProperties: &synapse.TransparentDataEncryptionProperties{ + Status: synapse.TransparentDataEncryptionStatusEnabled, + }, + } + if _, err := sqlPTDEClient.CreateOrUpdate(ctx, workspaceId.ResourceGroup, workspaceId.Name, name, parameter); err != nil { + return fmt.Errorf("setting `data_encrypted`: %+v", err) + } + } + + resp, err := sqlClient.Get(ctx, workspaceId.ResourceGroup, workspaceId.Name, name) + if err != nil { + return fmt.Errorf("retrieving Synapse SqlPool %q (Resource Group %q / workspaceName %q): %+v", name, workspaceId.ResourceGroup, workspaceId.Name, err) + } + + if resp.ID == nil || *resp.ID == "" { + return fmt.Errorf("empty or nil ID returned for Synapse SqlPool %q (Resource Group %q / workspaceName %q) ID", name, workspaceId.ResourceGroup, workspaceId.Name) + } + + d.SetId(*resp.ID) + return resourceArmSynapseSqlPoolRead(d, meta) +} + +func resourceArmSynapseSqlPoolUpdate(d *schema.ResourceData, meta interface{}) error { + sqlClient := meta.(*clients.Client).Synapse.SqlPoolClient + sqlPTDEClient := meta.(*clients.Client).Synapse.SqlPoolTransparentDataEncryptionClient + ctx, cancel := timeouts.ForUpdate(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.SynapseSqlPoolID(d.Id()) + if err != nil { + return err + } + + if d.HasChange("data_encrypted") { + status := synapse.TransparentDataEncryptionStatusDisabled + if d.Get("data_encrypted").(bool) { + status = synapse.TransparentDataEncryptionStatusEnabled + } + + parameter := synapse.TransparentDataEncryption{ + TransparentDataEncryptionProperties: &synapse.TransparentDataEncryptionProperties{ + Status: status, + }, + } + if _, err := sqlPTDEClient.CreateOrUpdate(ctx, id.Workspace.ResourceGroup, id.Workspace.Name, id.Name, parameter); err != nil { + return fmt.Errorf("updating `data_encrypted`: %+v", err) + } + } + + if d.HasChanges("sku_name", "tags") { + sqlPoolInfo := synapse.SQLPoolPatchInfo{ + Sku: &synapse.Sku{ + Name: utils.String(d.Get("sku_name").(string)), + }, + Tags: tags.Expand(d.Get("tags").(map[string]interface{})), + } + + if _, err := sqlClient.Update(ctx, id.Workspace.ResourceGroup, id.Workspace.Name, id.Name, sqlPoolInfo); err != nil { + return fmt.Errorf("updating Synapse SqlPool %q (Resource Group %q / workspaceName %q): %+v", id.Name, id.Workspace.ResourceGroup, id.Workspace.Name, err) + } + + // wait for sku scale completion + if d.HasChange("sku_name") { + stateConf := &resource.StateChangeConf{ + Pending: []string{ + "Scaling", + }, + Target: []string{ + "Online", + }, + Refresh: synapseSqlPoolScaleStateRefreshFunc(ctx, sqlClient, id.Workspace.ResourceGroup, id.Workspace.Name, id.Name), + MinTimeout: 5 * time.Second, + ContinuousTargetOccurence: 3, + Timeout: d.Timeout(schema.TimeoutUpdate), + } + + if _, err := stateConf.WaitForState(); err != nil { + return fmt.Errorf("waiting for scaling of Synapse SqlPool %q (Resource Group %q / workspaceName %q): %+v", id.Name, id.Workspace.ResourceGroup, id.Workspace.Name, err) + } + } + } + + return resourceArmSynapseSqlPoolRead(d, meta) +} + +func resourceArmSynapseSqlPoolRead(d *schema.ResourceData, meta interface{}) error { + sqlClient := meta.(*clients.Client).Synapse.SqlPoolClient + sqlPTDEClient := meta.(*clients.Client).Synapse.SqlPoolTransparentDataEncryptionClient + ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.SynapseSqlPoolID(d.Id()) + if err != nil { + return err + } + + resp, err := sqlClient.Get(ctx, id.Workspace.ResourceGroup, id.Workspace.Name, id.Name) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + log.Printf("[INFO] Synapse SQL Pool %q (Workspace %q / Resource Group %q) does not exist - removing from state", id.Name, id.Workspace.Name, id.Workspace.ResourceGroup) + d.SetId("") + return nil + } + return fmt.Errorf("retrieving Synapse SqlPool %q (workspace %q / Resource Group %q): %+v", id.Name, id.Workspace.Name, id.Workspace.ResourceGroup, err) + } + + transparentDataEncryption, err := sqlPTDEClient.Get(ctx, id.Workspace.ResourceGroup, id.Workspace.Name, id.Name) + if err != nil { + return fmt.Errorf("retrieving Transparent Data Encryption settings of Synapse SqlPool %q (Resource Group %q / workspaceName %q): %+v", id.Name, id.Workspace.ResourceGroup, id.Workspace.Name, err) + } + + d.Set("name", id.Name) + d.Set("synapse_workspace_id", id.Workspace.String()) + if resp.Sku != nil { + d.Set("sku_name", resp.Sku.Name) + } + if props := resp.SQLPoolResourceProperties; props != nil { + d.Set("collation", props.Collation) + } + if props := transparentDataEncryption.TransparentDataEncryptionProperties; props != nil { + d.Set("data_encrypted", props.Status == synapse.TransparentDataEncryptionStatusEnabled) + } + + // whole "restore" block is not returned. to avoid conflict, so set it from the old state + d.Set("restore", d.Get("restore").([]interface{})) + + return tags.FlattenAndSet(d, resp.Tags) +} + +func resourceArmSynapseSqlPoolDelete(d *schema.ResourceData, meta interface{}) error { + sqlClient := meta.(*clients.Client).Synapse.SqlPoolClient + ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.SynapseSqlPoolID(d.Id()) + if err != nil { + return err + } + + future, err := sqlClient.Delete(ctx, id.Workspace.ResourceGroup, id.Workspace.Name, id.Name) + if err != nil { + return fmt.Errorf("deleting Synapse SqlPool %q (Resource Group %q / workspaceName %q): %+v", id.Name, id.Workspace.ResourceGroup, id.Workspace.Name, err) + } + + if err = future.WaitForCompletionRef(ctx, sqlClient.Client); err != nil { + return fmt.Errorf("waiting on deleting future for Synapse SqlPool %q (Resource Group %q / workspaceName %q): %+v", id.Name, id.Workspace.ResourceGroup, id.Workspace.Name, err) + } + return nil +} + +func synapseSqlPoolScaleStateRefreshFunc(ctx context.Context, client *synapse.SQLPoolsClient, resourceGroup, workspaceName, name string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + resp, err := client.Get(ctx, resourceGroup, workspaceName, name) + if err != nil { + return resp, "failed", err + } + if resp.SQLPoolResourceProperties == nil || resp.SQLPoolResourceProperties.Status == nil { + return resp, "failed", nil + } + return resp, *resp.SQLPoolResourceProperties.Status, nil + } +} + +// sqlPool backend service is a proxy to sql database +// backend service restore and backup only accept id format of sql database +// so if the id is sqlPool, we need to construct the corresponding sql database id +func constructSourceDatabaseId(id string, subscriptionId string) string { + sqlPoolId, err := parse.SynapseSqlPoolID(id) + if err != nil { + return id + } + return sqlParse.NewSqlDatabaseID(sqlPoolId.Workspace.ResourceGroup, sqlPoolId.Workspace.Name, sqlPoolId.Name).ID(subscriptionId) +} diff --git a/azurerm/internal/services/synapse/tests/synapse_sql_pool_resource_test.go b/azurerm/internal/services/synapse/tests/synapse_sql_pool_resource_test.go new file mode 100644 index 000000000000..d2150af34821 --- /dev/null +++ b/azurerm/internal/services/synapse/tests/synapse_sql_pool_resource_test.go @@ -0,0 +1,231 @@ +package tests + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/synapse/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func TestAccAzureRMSynapseSqlPool_basic(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_synapse_sql_pool", "test") + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMSynapseSqlPoolDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMSynapseSqlPool_basic(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMSynapseSqlPoolExists(data.ResourceName), + ), + }, + data.ImportStep(), + }, + }) +} + +func TestAccAzureRMSynapseSqlPool_requiresImport(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_synapse_sql_pool", "test") + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMSynapseSqlPoolDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMSynapseSqlPool_basic(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMSynapseSqlPoolExists(data.ResourceName), + ), + }, + data.RequiresImportErrorStep(testAccAzureRMSynapseSqlPool_requiresImport), + }, + }) +} + +func TestAccAzureRMSynapseSqlPool_complete(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_synapse_sql_pool", "test") + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMSynapseSqlPoolDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMSynapseSqlPool_complete(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMSynapseSqlPoolExists(data.ResourceName), + ), + }, + data.ImportStep(), + }, + }) +} + +func TestAccAzureRMSynapseSqlPool_update(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_synapse_sql_pool", "test") + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMSynapseSqlPoolDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMSynapseSqlPool_basic(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMSynapseSqlPoolExists(data.ResourceName), + ), + }, + data.ImportStep(), + { + Config: testAccAzureRMSynapseSqlPool_complete(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMSynapseSqlPoolExists(data.ResourceName), + ), + }, + data.ImportStep(), + { + Config: testAccAzureRMSynapseSqlPool_basic(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMSynapseSqlPoolExists(data.ResourceName), + ), + }, + data.ImportStep(), + }, + }) +} + +func testCheckAzureRMSynapseSqlPoolExists(resourceName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + client := acceptance.AzureProvider.Meta().(*clients.Client).Synapse.SqlPoolClient + ctx := acceptance.AzureProvider.Meta().(*clients.Client).StopContext + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return fmt.Errorf("synapse SqlPool not found: %s", resourceName) + } + id, err := parse.SynapseSqlPoolID(rs.Primary.ID) + if err != nil { + return err + } + if resp, err := client.Get(ctx, id.Workspace.ResourceGroup, id.Workspace.Name, id.Name); err != nil { + if !utils.ResponseWasNotFound(resp.Response) { + return fmt.Errorf("bad: Synapse SqlPool %q does not exist", id.Name) + } + return fmt.Errorf("bad: Get on Synapse.SqlPoolClient: %+v", err) + } + return nil + } +} + +func testCheckAzureRMSynapseSqlPoolDestroy(s *terraform.State) error { + client := acceptance.AzureProvider.Meta().(*clients.Client).Synapse.SqlPoolClient + ctx := acceptance.AzureProvider.Meta().(*clients.Client).StopContext + + for _, rs := range s.RootModule().Resources { + if rs.Type != "azurerm_synapse_sql_pool" { + continue + } + id, err := parse.SynapseSqlPoolID(rs.Primary.ID) + if err != nil { + return err + } + if resp, err := client.Get(ctx, id.Workspace.ResourceGroup, id.Workspace.Name, id.Name); err != nil { + if !utils.ResponseWasNotFound(resp.Response) { + return fmt.Errorf("bad: Get on Synapse.SqlPoolClient: %+v", err) + } + } + return nil + } + return nil +} + +func testAccAzureRMSynapseSqlPool_basic(data acceptance.TestData) string { + template := testAccAzureRMSynapseSqlPool_template(data) + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +%s + +resource "azurerm_synapse_sql_pool" "test" { + name = "acctestSP%s" + synapse_workspace_id = azurerm_synapse_workspace.test.id + sku_name = "DW100c" + create_mode = "Default" +} +`, template, data.RandomString) +} + +func testAccAzureRMSynapseSqlPool_requiresImport(data acceptance.TestData) string { + config := testAccAzureRMSynapseSqlPool_basic(data) + return fmt.Sprintf(` +%s + +resource "azurerm_synapse_sql_pool" "import" { + name = azurerm_synapse_sql_pool.test.name + synapse_workspace_id = azurerm_synapse_sql_pool.test.synapse_workspace_id + sku_name = azurerm_synapse_sql_pool.test.sku_name + create_mode = azurerm_synapse_sql_pool.test.create_mode +} +`, config) +} + +func testAccAzureRMSynapseSqlPool_complete(data acceptance.TestData) string { + template := testAccAzureRMSynapseSqlPool_template(data) + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +%s + +resource "azurerm_synapse_sql_pool" "test" { + name = "acctestSP%s" + synapse_workspace_id = azurerm_synapse_workspace.test.id + sku_name = "DW500c" + create_mode = "Default" + collation = "SQL_Latin1_General_CP1_CI_AS" + data_encrypted = true + + tags = { + ENV = "Test" + } +} +`, template, data.RandomString) +} + +func testAccAzureRMSynapseSqlPool_template(data acceptance.TestData) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-synapse-%d" + location = "%s" +} + +resource "azurerm_storage_account" "test" { + name = "acctestacc%s" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + account_kind = "BlobStorage" + account_tier = "Standard" + account_replication_type = "LRS" +} + +resource "azurerm_storage_data_lake_gen2_filesystem" "test" { + name = "acctest-%d" + storage_account_id = azurerm_storage_account.test.id +} + +resource "azurerm_synapse_workspace" "test" { + name = "acctestsw%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + storage_data_lake_gen2_filesystem_id = azurerm_storage_data_lake_gen2_filesystem.test.id + sql_administrator_login = "sqladminuser" + sql_administrator_login_password = "H@Sh1CoR3!" +} +`, data.RandomInteger, data.Locations.Primary, data.RandomString, data.RandomInteger, data.RandomInteger) +} diff --git a/azurerm/internal/services/synapse/validate/synapse_sql_pool.go b/azurerm/internal/services/synapse/validate/synapse_sql_pool.go new file mode 100644 index 000000000000..3cc0cb4cb087 --- /dev/null +++ b/azurerm/internal/services/synapse/validate/synapse_sql_pool.go @@ -0,0 +1,42 @@ +package validate + +import ( + "fmt" + "regexp" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/synapse/parse" +) + +func SqlPoolName(i interface{}, k string) (warnings []string, errors []error) { + v, ok := i.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected type of %s to be string", k)) + return + } + + // The name attribute rules are : + // 1. can contain only letters, numbers and underscore. + // 2. The value must be between 1 and 15 characters long + + if !regexp.MustCompile(`^[a-zA-Z_\d]{1,15}$`).MatchString(v) { + errors = append(errors, fmt.Errorf("%s can contain only letters, numbers or underscore, The value must be between 1 and 15 characters long", k)) + return + } + + return warnings, errors +} + +func SqlPoolID(i interface{}, k string) (warnings []string, errors []error) { + v, ok := i.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected type of %q to be string", k)) + return + } + + if _, err := parse.SynapseSqlPoolID(v); err != nil { + errors = append(errors, fmt.Errorf("can not parse %q as a Synapse Sql Pool resource id: %v", k, err)) + return + } + + return warnings, errors +} diff --git a/azurerm/internal/services/synapse/validate/synapse_sql_pool_test.go b/azurerm/internal/services/synapse/validate/synapse_sql_pool_test.go new file mode 100644 index 000000000000..4a4f5a1e665f --- /dev/null +++ b/azurerm/internal/services/synapse/validate/synapse_sql_pool_test.go @@ -0,0 +1,48 @@ +package validate + +import ( + "testing" +) + +func TestSqlPoolName(t *testing.T) { + testData := []struct { + input string + expected bool + }{ + { + // empty + input: "", + expected: false, + }, + { + // basic example + input: "aBc_123", + expected: true, + }, + { + // can't contain hyphen + input: "ab-c", + expected: false, + }, + { + // 15 chars + input: "abcdefghijklmno", + expected: true, + }, + { + // 16 chars + input: "abcdefghijklmnop", + expected: false, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q..", v.input) + + _, errors := SqlPoolName(v.input, "name") + actual := len(errors) == 0 + if v.expected != actual { + t.Fatalf("Expected %t but got %t", v.expected, actual) + } + } +} diff --git a/website/azurerm.erb b/website/azurerm.erb index fa154c13f7cf..36a0056923c5 100644 --- a/website/azurerm.erb +++ b/website/azurerm.erb @@ -2761,6 +2761,10 @@
  • azurerm_synapse_workspace
  • + +
  • + azurerm_synapse_sql_pool +
  • diff --git a/website/docs/r/synapse_sql_pool.html.markdown b/website/docs/r/synapse_sql_pool.html.markdown new file mode 100644 index 000000000000..036b146c64ba --- /dev/null +++ b/website/docs/r/synapse_sql_pool.html.markdown @@ -0,0 +1,101 @@ +--- +subcategory: "Synapse" +layout: "azurerm" +page_title: "Azure Resource Manager: azurerm_synapse_sql_pool" +description: |- + Manages a Synapse Sql Pool. +--- + +# azurerm_synapse_sql_pool + +Manages a Synapse Sql Pool. + +## Example Usage + +```hcl +resource "azurerm_resource_group" "example" { + name = "example-resources" + location = "West Europe" +} + +resource "azurerm_storage_account" "example" { + name = "examplestorageacc" + resource_group_name = azurerm_resource_group.example.name + location = azurerm_resource_group.example.location + account_tier = "Standard" + account_replication_type = "LRS" + account_kind = "BlobStorage" +} + +resource "azurerm_storage_data_lake_gen2_filesystem" "example" { + name = "example" + storage_account_id = azurerm_storage_account.example.id +} + +resource "azurerm_synapse_workspace" "example" { + name = "example" + resource_group_name = azurerm_resource_group.example.name + location = azurerm_resource_group.example.location + storage_data_lake_gen2_filesystem_id = azurerm_storage_data_lake_gen2_filesystem.example.id + sql_administrator_login = "sqladminuser" + sql_administrator_login_password = "H@Sh1CoR3!" +} + +resource "azurerm_synapse_sql_pool" "example" { + name = "examplesqlpool" + synapse_workspace_id = azurerm_synapse_workspace.example.id + sku_name = "DW100c" + create_mode = "Default" +} +``` + +## Arguments Reference + +The following arguments are supported: + +* `name` - (Required) The name which should be used for this Synapse Sql Pool. Changing this forces a new synapse SqlPool to be created. + +* `synapse_workspace_id` - (Required) The ID of Synapse Workspace within which this Sql Pool should be created. Changing this forces a new Synapse Sql Pool to be created. + +* `sku_name` - (Required) Specifies the SKU Name for this Synapse Sql Pool. Possible values are `DW100c`, `DW200c`, `DW300c`, `DW400c`, `DW500c`, `DW1000c`, `DW1500c`, `DW2000c`, `DW2500c`, `DW3000c`, `DW5000c`, `DW6000c`, `DW7500c`, `DW10000c`, `DW15000c` or `DW30000c`. + +* `create_mode` - (Optional) Specifies how to create the Sql Pool. Valid values are: `Default`, `Recovery` or `PointInTimeRestore`. Must be `Default` to create a new database. Defaults to `Default`. + +* `collation` - (Optional) The name of the collation to use with this pool, only applicable when `create_mode` is set to `Default`. Azure default is `SQL_LATIN1_GENERAL_CP1_CI_AS`. Changing this forces a new resource to be created. + +* `recovery_database_id` - (Optional) The ID of the Synapse Sql Pool or Sql Database which is to back up, only applicable when `create_mode` is set to `Recovery`. Changing this forces a new Synapse Sql Pool to be created. + +* `restore` - (Optional) A `restore` block as defined below. only applicable when `create_mode` is set to `PointInTimeRestore`. + +* `tags` - (Optional) A mapping of tags which should be assigned to the Synapse Sql Pool. + +--- + +An `restore` block supports the following: + +* `source_database_id` - (Optional) The ID of the Synapse Sql Pool or Sql Database which is to restore. Changing this forces a new Synapse Sql Pool to be created. + +* `point_in_time` - (Optional) Specifies the Snapshot time to restore. Changing this forces a new Synapse Sql Pool to be created. + +## Attributes Reference + +In addition to the Arguments listed above - the following Attributes are exported: + +* `id` - The ID of the Synapse Sql Pool. + +## Timeouts + +The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/docs/configuration/resources.html#timeouts) for certain actions: + +* `create` - (Defaults to 30 minutes) Used when creating the Synapse Sql Pool. +* `read` - (Defaults to 5 minutes) Used when retrieving the Synapse Sql Pool. +* `update` - (Defaults to 30 minutes) Used when updating the Synapse Sql Pool. +* `delete` - (Defaults to 30 minutes) Used when deleting the Synapse Sql Pool. + +## Import + +Synapse Sql Pool can be imported using the `resource id`, e.g. + +```shell +terraform import azurerm_synapse_sql_pool.example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Synapse/workspaces/workspace1/sqlPools/sqlPool1 +```