diff --git a/aws/provider.go b/aws/provider.go index 606c2a16885..3dd19d0980e 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -494,6 +494,7 @@ func Provider() terraform.ResourceProvider { "aws_emr_instance_group": resourceAwsEMRInstanceGroup(), "aws_emr_security_configuration": resourceAwsEMRSecurityConfiguration(), "aws_flow_log": resourceAwsFlowLog(), + "aws_fsx_file_system": resourceAwsFsxFileSystem(), "aws_fms_admin_account": resourceAwsFmsAdminAccount(), "aws_gamelift_alias": resourceAwsGameliftAlias(), "aws_gamelift_build": resourceAwsGameliftBuild(), diff --git a/aws/resource_aws_fsx_file_system.go b/aws/resource_aws_fsx_file_system.go new file mode 100644 index 00000000000..de469c45dba --- /dev/null +++ b/aws/resource_aws_fsx_file_system.go @@ -0,0 +1,649 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/fsx" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/validation" +) + +func resourceAwsFsxFileSystem() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsFsxFileSystemCreate, + Read: resourceAwsFsxFileSystemRead, + Update: resourceAwsFsxFileSystemUpdate, + Delete: resourceAwsFsxFileSystemDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{ + fsx.FileSystemTypeLustre, + fsx.FileSystemTypeWindows, + }, false), + }, + "capacity": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + ValidateFunc: validation.IntAtLeast(300), + }, + "kms_key_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"lustre_configuration"}, + }, + "subnet_ids": { + Type: schema.TypeSet, + Required: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "security_group_ids": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "timeout": { + Type: schema.TypeInt, + Optional: true, + Default: 120, + ValidateFunc: validation.IntAtLeast(60), + }, + "lustre_configuration": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + ConflictsWith: []string{"windows_configuration"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "import_path": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "export_path": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "chunk_size": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + }, + "weekly_maintenance_start_time": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + }, + }, + }, + "windows_configuration": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + ConflictsWith: []string{"lustre_configuration"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "active_directory_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "self_managed_active_directory": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "username": { + Type: schema.TypeString, + Required: true, + }, + "password": { + Type: schema.TypeString, + Required: true, + }, + "dns_ips": { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "domain_name": { + Type: schema.TypeString, + Required: true, + }, + "administrators_group": { + Type: schema.TypeString, + Optional: true, + }, + "ou_distinguished_name": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "backup_retention": { + Type: schema.TypeInt, + Optional: true, + Default: 7, + ValidateFunc: validation.IntBetween(0, 35), + }, + "copy_tags_to_backups": { + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + }, + "daily_backup_start_time": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "throughput_capacity": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + "weekly_maintenance_start_time": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + }, + }, + }, + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "dns_name": { + Type: schema.TypeString, + Computed: true, + }, + "tags": tagsSchema(), + }, + } +} + +func resourceAwsFsxFileSystemCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).fsxconn + + request := &fsx.CreateFileSystemInput{ + FileSystemType: aws.String(d.Get("type").(string)), + StorageCapacity: aws.Int64(int64(d.Get("capacity").(int))), + SubnetIds: expandStringList(d.Get("subnet_ids").(*schema.Set).List()), + } + + if _, ok := d.GetOk("kms_key_id"); ok { + request.KmsKeyId = aws.String(d.Get("kms_key_id").(string)) + } + + if _, ok := d.GetOk("security_group_ids"); ok { + request.SecurityGroupIds = expandStringList(d.Get("security_group_ids").(*schema.Set).List()) + } + + if _, ok := d.GetOk("lustre_configuration"); ok { + request.LustreConfiguration = expandFsxLustreConfigurationCreate(d.Get("lustre_configuration").([]interface{})) + } + + if _, ok := d.GetOk("windows_configuration"); ok { + request.WindowsConfiguration = expandFsxWindowsConfigurationCreate(d.Get("windows_configuration").([]interface{})) + } + + if value, ok := d.GetOk("tags"); ok { + request.Tags = tagsFromMapFSX(value.(map[string]interface{})) + } + + log.Printf("[DEBUG] FSx Filesystem create opts: %s", request) + result, err := conn.CreateFileSystem(request) + if err != nil { + return fmt.Errorf("Error creating FSx filesystem: %s", err) + } + + d.SetId(*result.FileSystem.FileSystemId) + + log.Println("[DEBUG] Waiting for filesystem to become available") + + stateConf := &resource.StateChangeConf{ + Pending: []string{fsx.FileSystemLifecycleCreating}, + Target: []string{fsx.FileSystemLifecycleAvailable}, + Refresh: fsxStateRefreshFunc(conn, d.Id()), + Timeout: time.Duration(d.Get("timeout").(int)) * time.Minute, + Delay: 30 * time.Second, + MinTimeout: 15 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf( + "Error waiting for filesystem (%s) to become available: %s", + *result.FileSystem.FileSystemId, err) + } + + return resourceAwsFsxFileSystemRead(d, meta) +} + +func resourceAwsFsxFileSystemUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).fsxconn + + if d.HasChange("tags") { + if err := setTagsFSX(conn, d); err != nil { + return fmt.Errorf("Error updating tags for FSx filesystem: %s", err) + } + } + + requestUpdate := false + params := &fsx.UpdateFileSystemInput{ + FileSystemId: aws.String(d.Id()), + } + + if d.HasChange("lustre_configuration") { + params.LustreConfiguration = expandFsxLustreConfigurationUpdate(d.Get("lustre_configuration").([]interface{})) + requestUpdate = true + } + + if d.HasChange("windows_configuration") { + params.WindowsConfiguration = expandFsxWindowsConfigurationUpdate(d.Get("windows_configuration").([]interface{})) + requestUpdate = true + } + + if requestUpdate { + _, err := conn.UpdateFileSystem(params) + if err != nil { + return fmt.Errorf("error updating FSX File System (%s): %s", d.Id(), err) + } + } + + return resourceAwsFsxFileSystemRead(d, meta) +} + +func resourceAwsFsxFileSystemRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).fsxconn + + filesystem, err := describeFsxFileSystem(conn, d.Id()) + + if isAWSErr(err, fsx.ErrCodeFileSystemNotFound, "") { + log.Printf("[WARN] FSx File System (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + if err != nil { + return fmt.Errorf("Error reading FSx File System (%s): %s", d.Id(), err) + } + + if filesystem == nil { + log.Printf("[WARN] FSx File System (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + d.Set("type", filesystem.FileSystemType) + d.Set("capacity", filesystem.StorageCapacity) + d.Set("arn", filesystem.ResourceARN) + d.Set("dns_name", filesystem.DNSName) + d.Set("kms_key_id", filesystem.KmsKeyId) + + d.Set("tags", tagsToMapFSX(filesystem.Tags)) + + err = d.Set("lustre_configuration", flattenLustreOptsConfig(filesystem.LustreConfiguration)) + if err != nil { + return err + } + + err = d.Set("windows_configuration", flattenWindowsOptsConfig(filesystem.WindowsConfiguration)) + if err != nil { + return err + } + + err = d.Set("subnet_ids", aws.StringValueSlice(filesystem.SubnetIds)) + if err != nil { + return err + } + + err = d.Set("security_group_ids", expandStringList(d.Get("security_group_ids").(*schema.Set).List())) + if err != nil { + return err + } + + return nil +} + +func resourceAwsFsxFileSystemDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).fsxconn + + request := &fsx.DeleteFileSystemInput{ + FileSystemId: aws.String(d.Id()), + } + + _, err := conn.DeleteFileSystem(request) + if err != nil { + return fmt.Errorf("Error deleting FSx filesystem: %s", err) + } + + log.Println("[DEBUG] Waiting for filesystem to delete") + + stateConf := &resource.StateChangeConf{ + Pending: []string{fsx.FileSystemLifecycleAvailable, fsx.FileSystemLifecycleDeleting}, + Target: []string{}, + Refresh: fsxStateRefreshFunc(conn, d.Id()), + Timeout: time.Duration(d.Get("timeout").(int)) * time.Minute, + Delay: 30 * time.Second, + MinTimeout: 15 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for filesystem (%s) to delete: %s", d.Id(), err) + } + + return nil +} + +func describeFsxFileSystem(conn *fsx.FSx, id string) (*fsx.FileSystem, error) { + input := &fsx.DescribeFileSystemsInput{ + FileSystemIds: []*string{aws.String(id)}, + } + var filesystem *fsx.FileSystem + + err := conn.DescribeFileSystemsPages(input, func(page *fsx.DescribeFileSystemsOutput, lastPage bool) bool { + for _, fs := range page.FileSystems { + if aws.StringValue(fs.FileSystemId) == id { + filesystem = fs + break + } + } + + return !lastPage + }) + + return filesystem, err +} + +func fsxStateRefreshFunc(conn *fsx.FSx, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + resp, err := conn.DescribeFileSystems(&fsx.DescribeFileSystemsInput{ + FileSystemIds: []*string{aws.String(id)}, + }) + + if resp == nil { + return nil, "", nil + } + + if isAWSErr(err, fsx.ErrCodeFileSystemNotFound, "") { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + filesystem := resp.FileSystems[0] + return filesystem, *filesystem.Lifecycle, nil + } +} + +func expandFsxLustreConfigurationCreate(l []interface{}) *fsx.CreateFileSystemLustreConfiguration { + if len(l) == 0 || l[0] == nil { + return nil + } + + data := l[0].(map[string]interface{}) + req := &fsx.CreateFileSystemLustreConfiguration{} + + if data["import_path"].(string) != "" { + req.ImportPath = aws.String(data["import_path"].(string)) + } + + if data["export_path"].(string) != "" { + req.ExportPath = aws.String(data["export_path"].(string)) + } + + if data["chunk_size"] != nil { + req.ImportedFileChunkSize = aws.Int64(int64(data["chunk_size"].(int))) + } + + if data["weekly_maintenance_start_time"].(string) != "" { + req.WeeklyMaintenanceStartTime = aws.String(data["weekly_maintenance_start_time"].(string)) + } + + return req +} + +func expandFsxLustreConfigurationUpdate(l []interface{}) *fsx.UpdateFileSystemLustreConfiguration { + if len(l) == 0 || l[0] == nil { + return nil + } + + data := l[0].(map[string]interface{}) + req := &fsx.UpdateFileSystemLustreConfiguration{} + + if data["weekly_maintenance_start_time"].(string) != "" { + req.WeeklyMaintenanceStartTime = aws.String(data["weekly_maintenance_start_time"].(string)) + } + + return req +} + +func expandFsxWindowsConfigurationCreate(l []interface{}) *fsx.CreateFileSystemWindowsConfiguration { + if len(l) == 0 || l[0] == nil { + return nil + } + + data := l[0].(map[string]interface{}) + req := &fsx.CreateFileSystemWindowsConfiguration{ + ThroughputCapacity: aws.Int64(int64(data["throughput_capacity"].(int))), + } + + if data["active_directory_id"].(string) != "" { + req.ActiveDirectoryId = aws.String(data["active_directory_id"].(string)) + } + + if data["self_managed_active_directory"] != nil { + req.SelfManagedActiveDirectoryConfiguration = expandSelfManagedAdOptsCreate(data["self_managed_active_directory"].([]interface{})) + } + + if data["backup_retention"] != nil { + req.AutomaticBackupRetentionDays = aws.Int64(int64(data["backup_retention"].(int))) + } + + if data["copy_tags_to_backups"] != nil { + req.CopyTagsToBackups = aws.Bool(data["copy_tags_to_backups"].(bool)) + } + + if data["daily_backup_start_time"].(string) != "" { + req.DailyAutomaticBackupStartTime = aws.String(data["daily_backup_start_time"].(string)) + } + + if data["weekly_maintenance_start_time"].(string) != "" { + req.WeeklyMaintenanceStartTime = aws.String(data["weekly_maintenance_start_time"].(string)) + } + + return req +} + +func expandFsxWindowsConfigurationUpdate(l []interface{}) *fsx.UpdateFileSystemWindowsConfiguration { + if len(l) == 0 || l[0] == nil { + return nil + } + + data := l[0].(map[string]interface{}) + req := &fsx.UpdateFileSystemWindowsConfiguration{} + + if data["backup_retention"] != nil { + req.AutomaticBackupRetentionDays = aws.Int64(int64(data["backup_retention"].(int))) + } + + if data["daily_backup_start_time"].(string) != "" { + req.DailyAutomaticBackupStartTime = aws.String(data["daily_backup_start_time"].(string)) + } + + if data["weekly_maintenance_start_time"].(string) != "" { + req.WeeklyMaintenanceStartTime = aws.String(data["weekly_maintenance_start_time"].(string)) + } + + if data["self_managed_active_directory"] != nil { + req.SelfManagedActiveDirectoryConfiguration = expandSelfManagedAdOptsUpdate(data["self_managed_active_directory"].([]interface{})) + } + + return req +} + +func expandSelfManagedAdOptsCreate(l []interface{}) *fsx.SelfManagedActiveDirectoryConfiguration { + if len(l) == 0 || l[0] == nil { + return nil + } + + data := l[0].(map[string]interface{}) + req := &fsx.SelfManagedActiveDirectoryConfiguration{} + + if d, ok := data["dns_ips"]; ok { + req.DnsIps = expandStringList(d.([]interface{})) + } + + if data["domain_name"].(string) != "" { + req.DomainName = aws.String(data["domain_name"].(string)) + } + + if data["administrators_group"].(string) != "" { + req.FileSystemAdministratorsGroup = aws.String(data["administrators_group"].(string)) + } + + if data["ou_distinguished_name"].(string) != "" { + req.OrganizationalUnitDistinguishedName = aws.String(data["ou_distinguished_name"].(string)) + } + + if data["password"].(string) != "" { + req.Password = aws.String(data["password"].(string)) + } + + if data["username"].(string) != "" { + req.UserName = aws.String(data["username"].(string)) + } + + return req +} + +func expandSelfManagedAdOptsUpdate(l []interface{}) *fsx.SelfManagedActiveDirectoryConfigurationUpdates { + if len(l) == 0 || l[0] == nil { + return nil + } + + data := l[0].(map[string]interface{}) + req := &fsx.SelfManagedActiveDirectoryConfigurationUpdates{} + + if d, ok := data["dns_ips"]; ok { + req.DnsIps = expandStringList(d.([]interface{})) + } + + if data["password"].(string) != "" { + req.Password = aws.String(data["password"].(string)) + } + + if data["username"].(string) != "" { + req.UserName = aws.String(data["username"].(string)) + } + + return req +} + +func flattenLustreOptsConfig(lopts *fsx.LustreFileSystemConfiguration) []map[string]interface{} { + if lopts == nil { + return []map[string]interface{}{} + } + + m := map[string]interface{}{} + + if lopts.DataRepositoryConfiguration != nil && *lopts.DataRepositoryConfiguration.ImportPath != "" { + m["import_path"] = aws.StringValue(lopts.DataRepositoryConfiguration.ImportPath) + } + if lopts.DataRepositoryConfiguration != nil && *lopts.DataRepositoryConfiguration.ExportPath != "" { + m["export_path"] = aws.StringValue(lopts.DataRepositoryConfiguration.ExportPath) + } + if lopts.DataRepositoryConfiguration != nil && *lopts.DataRepositoryConfiguration.ImportedFileChunkSize != 0 { + m["chunk_size"] = aws.Int64Value(lopts.DataRepositoryConfiguration.ImportedFileChunkSize) + } + if lopts.WeeklyMaintenanceStartTime != nil { + m["weekly_maintenance_start_time"] = aws.StringValue(lopts.WeeklyMaintenanceStartTime) + } + + return []map[string]interface{}{m} +} + +func flattenWindowsOptsConfig(wopts *fsx.WindowsFileSystemConfiguration) []map[string]interface{} { + if wopts == nil { + return []map[string]interface{}{} + } + + m := map[string]interface{}{} + + if wopts.ActiveDirectoryId != nil { + m["active_directory_id"] = aws.StringValue(wopts.ActiveDirectoryId) + } + if wopts.AutomaticBackupRetentionDays != nil { + m["backup_retention"] = aws.Int64Value(wopts.AutomaticBackupRetentionDays) + } + if wopts.CopyTagsToBackups != nil { + m["copy_tags_to_backups"] = aws.BoolValue(wopts.CopyTagsToBackups) + } + if wopts.DailyAutomaticBackupStartTime != nil { + m["daily_backup_start_time"] = aws.StringValue(wopts.DailyAutomaticBackupStartTime) + } + if wopts.ThroughputCapacity != nil { + m["throughput_capacity"] = aws.Int64Value(wopts.ThroughputCapacity) + } + if wopts.WeeklyMaintenanceStartTime != nil { + m["weekly_maintenance_start_time"] = aws.StringValue(wopts.WeeklyMaintenanceStartTime) + } + if wopts.SelfManagedActiveDirectoryConfiguration != nil { + m["self_managed_active_directory"] = flattenSelfManagedAdOptsConfig(wopts.SelfManagedActiveDirectoryConfiguration) + } + + return []map[string]interface{}{m} +} + +func flattenSelfManagedAdOptsConfig(adopts *fsx.SelfManagedActiveDirectoryAttributes) []map[string]interface{} { + if adopts == nil { + return []map[string]interface{}{} + } + + m := map[string]interface{}{} + + if adopts.UserName != nil { + m["username"] = aws.StringValue(adopts.UserName) + } + if adopts.DnsIps != nil { + m["dns_ips"] = aws.StringValueSlice(adopts.DnsIps) + } + if adopts.DomainName != nil { + m["domain_name"] = aws.StringValue(adopts.DomainName) + } + if adopts.FileSystemAdministratorsGroup != nil { + m["administrators_group"] = aws.StringValue(adopts.FileSystemAdministratorsGroup) + } + if adopts.OrganizationalUnitDistinguishedName != nil { + m["ou_distinguished_name"] = aws.StringValue(adopts.OrganizationalUnitDistinguishedName) + } + + return []map[string]interface{}{m} +} diff --git a/aws/resource_aws_fsx_file_system_test.go b/aws/resource_aws_fsx_file_system_test.go new file mode 100644 index 00000000000..5c6cd0b2955 --- /dev/null +++ b/aws/resource_aws_fsx_file_system_test.go @@ -0,0 +1,367 @@ +package aws + +import ( + "fmt" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/fsx" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccAWSFsxFileSystem_lustreBasic(t *testing.T) { + var v fsx.FileSystem + resourceName := "aws_fsx_file_system.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + IDRefreshName: resourceName, + Providers: testAccProviders, + CheckDestroy: testAccCheckFsxFileSystemDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsFsxFileSystemLustreBasic(), + Check: resource.ComposeTestCheckFunc( + testAccCheckFileSystemExists(resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "type", "LUSTRE"), + resource.TestCheckResourceAttr(resourceName, "capacity", "3600"), + resource.TestCheckResourceAttr(resourceName, "subnet_ids.#", "1"), + resource.TestCheckResourceAttr(resourceName, "security_group_ids.#", "1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"timeout", "security_group_ids"}, + }, + }, + }) +} + +func TestAccAWSFsxFileSystem_lustreConfig(t *testing.T) { + var v fsx.FileSystem + resourceName := "aws_fsx_file_system.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckFsxFileSystemDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsFsxFileSystemLustreConfigOpts(), + Check: resource.ComposeTestCheckFunc( + testAccCheckFileSystemExists(resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "lustre_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "lustre_configuration.0.import_path", "s3://nasanex"), + resource.TestCheckResourceAttr(resourceName, "lustre_configuration.0.chunk_size", "2048"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"timeout", "security_group_ids"}, + }, + }, + }) +} + +func TestAccAWSFsxFileSystem_lustreUpdate(t *testing.T) { + var v fsx.FileSystem + resourceName := "aws_fsx_file_system.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckFsxFileSystemDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsFsxFileSystemLustreConfigOpts(), + Check: resource.ComposeTestCheckFunc( + testAccCheckFileSystemExists(resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "lustre_configuration.#", "1"), + resource.TestCheckResourceAttrSet(resourceName, "lustre_configuration.0.weekly_maintenance_start_time"), + ), + }, + { + Config: testAccAwsFsxFileSystemLustreUpdateOpts(), + Check: resource.ComposeTestCheckFunc( + testAccCheckFileSystemExists(resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "lustre_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "lustre_configuration.0.weekly_maintenance_start_time", "5:05:50"), + ), + }, + }, + }) +} + +func TestAccAWSFsxFileSystem_windowsConfig(t *testing.T) { + var v fsx.FileSystem + resourceName := "aws_fsx_file_system.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckFsxFileSystemDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsFsxFileSystemWindowsConfigOpts(), + Check: resource.ComposeTestCheckFunc( + testAccCheckFileSystemExists(resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "windows_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "windows_configuration.0.backup_retention", "3"), + resource.TestCheckResourceAttr(resourceName, "windows_configuration.0.copy_tags_to_backups", "true"), + resource.TestCheckResourceAttr(resourceName, "windows_configuration.0.throughput_capacity", "1024"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"timeout", "security_group_ids"}, + }, + }, + }) +} + +func TestAccAWSFsxFileSystem_windowsUpdate(t *testing.T) { + var v fsx.FileSystem + resourceName := "aws_fsx_file_system.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckFsxFileSystemDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsFsxFileSystemWindowsConfigOpts(), + Check: resource.ComposeTestCheckFunc( + testAccCheckFileSystemExists(resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "windows_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "windows_configuration.0.backup_retention", "3"), + ), + }, + { + Config: testAccAwsFsxFileSystemWindowsUpdateOpts(), + Check: resource.ComposeTestCheckFunc( + testAccCheckFileSystemExists(resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "windows_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "windows_configuration.0.backup_retention", "30"), + ), + }, + }, + }) +} + +func testAccCheckFileSystemExists(n string, v *fsx.FileSystem) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + conn := testAccProvider.Meta().(*AWSClient).fsxconn + + request := &fsx.DescribeFileSystemsInput{ + FileSystemIds: []*string{aws.String(rs.Primary.ID)}, + } + + response, err := conn.DescribeFileSystems(request) + if err == nil { + if response.FileSystems != nil && len(response.FileSystems) > 0 { + *v = *response.FileSystems[0] + return nil + } + } + return fmt.Errorf("Error finding FSx filesystem %s", rs.Primary.ID) + } +} + +func testAccCheckFsxFileSystemDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).fsxconn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_fsx_file_system" { + continue + } + + filesystem, err := describeFsxFileSystem(conn, rs.Primary.ID) + + if isAWSErr(err, fsx.ErrCodeFileSystemNotFound, "") { + continue + } + + if err != nil { + return err + } + + if filesystem != nil { + return fmt.Errorf("FSx File System (%s) still exists", rs.Primary.ID) + } + } + return nil +} + +const testAccAwsFsxFileSystemBaseConfig = ` +data "aws_availability_zones" "available" { + state = "available" +} + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_subnet" "test1" { + vpc_id = "${aws_vpc.test.id}" + cidr_block = "10.0.1.0/24" + availability_zone = "${data.aws_availability_zones.available.names[0]}" +} + +resource "aws_subnet" "test2" { + vpc_id = "${aws_vpc.test.id}" + cidr_block = "10.0.2.0/24" + availability_zone = "${data.aws_availability_zones.available.names[1]}" +} + +resource "aws_security_group" "test1" { + description = "security group for FSx testing" + vpc_id = "${aws_vpc.test.id}" + + ingress { + from_port = 988 + to_port = 988 + protocol = "tcp" + cidr_blocks = ["10.0.0.0/16"] + } + + ingress { + from_port = 135 + to_port = 135 + protocol = "tcp" + cidr_blocks = ["10.0.0.0/16"] + } + + ingress { + from_port = 445 + to_port = 445 + protocol = "tcp" + cidr_blocks = ["10.0.0.0/16"] + } + + ingress { + from_port = 55555 + to_port = 55555 + protocol = "tcp" + cidr_blocks = ["10.0.0.0/16"] + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } +} +` + +const testAccAwsFsxFileSystemBaseWindowsConfig = ` +resource "aws_kms_key" "test" { + description = "FSx KMS Testing key" + deletion_window_in_days = 7 +} + +resource "aws_directory_service_directory" "test" { + name = "corp.notexample.com" + password = "SuperSecretPassw0rd" + type = "MicrosoftAD" + + vpc_settings { + vpc_id = "${aws_vpc.test.id}" + subnet_ids = ["${aws_subnet.test1.id}", "${aws_subnet.test2.id}"] + } +} +` + +func testAccAwsFsxFileSystemLustreBasic() string { + return testAccAwsFsxFileSystemBaseConfig + fmt.Sprintf(` +resource "aws_fsx_file_system" "test" { + type = "LUSTRE" + capacity = 3600 + subnet_ids = ["${aws_subnet.test1.id}"] + security_group_ids = ["${aws_security_group.test1.id}"] +} +`) +} + +func testAccAwsFsxFileSystemLustreConfigOpts() string { + return testAccAwsFsxFileSystemBaseConfig + fmt.Sprintf(` +resource "aws_fsx_file_system" "test" { + type = "LUSTRE" + capacity = 3600 + subnet_ids = ["${aws_subnet.test1.id}"] + + lustre_configuration { + import_path = "s3://nasanex" + chunk_size = 2048 + } +} +`) +} + +func testAccAwsFsxFileSystemLustreUpdateOpts() string { + return testAccAwsFsxFileSystemBaseConfig + fmt.Sprintf(` +resource "aws_fsx_file_system" "test" { + type = "LUSTRE" + capacity = 3600 + subnet_ids = ["${aws_subnet.test1.id}"] + + lustre_configuration { + import_path = "s3://nasanex" + chunk_size = 2048 + weekly_maintenance_start_time = "5:05:50" + } +} +`) +} + +func testAccAwsFsxFileSystemWindowsConfigOpts() string { + return testAccAwsFsxFileSystemBaseConfig + testAccAwsFsxFileSystemBaseWindowsConfig + fmt.Sprintf(` +resource "aws_fsx_file_system" "test" { + type = "WINDOWS" + capacity = 300 + kms_key_id = "${aws_kms_key.test.arn}" + subnet_ids = ["${aws_subnet.test1.id}"] + + windows_configuration { + active_directory_id = "${aws_directory_service_directory.test.id}" + backup_retention = 3 + copy_tags_to_backups = true + throughput_capacity = 1024 + } +} +`) +} + +func testAccAwsFsxFileSystemWindowsUpdateOpts() string { + return testAccAwsFsxFileSystemBaseConfig + testAccAwsFsxFileSystemBaseWindowsConfig + fmt.Sprintf(` +resource "aws_fsx_file_system" "test" { + type = "WINDOWS" + capacity = 300 + kms_key_id = "${aws_kms_key.test.arn}" + subnet_ids = ["${aws_subnet.test1.id}"] + + windows_configuration { + active_directory_id = "${aws_directory_service_directory.test.id}" + backup_retention = 30 + copy_tags_to_backups = true + throughput_capacity = 1024 + } +} +`) +} diff --git a/aws/tagsFSX.go b/aws/tagsFSX.go new file mode 100644 index 00000000000..f2eed55a87b --- /dev/null +++ b/aws/tagsFSX.go @@ -0,0 +1,117 @@ +package aws + +import ( + "log" + "regexp" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/fsx" + "github.com/hashicorp/terraform/helper/schema" +) + +// setTags is a helper to set the tags for a resource. It expects the +// tags field to be named "tags". It also expects to take the resource +// ARN as the primary ID based on the requirements of the FSx API (as +// opposed to the resource ID like other tagging helpers). +func setTagsFSX(conn *fsx.FSx, d *schema.ResourceData) error { + if d.HasChange("tags") { + oraw, nraw := d.GetChange("tags") + o := oraw.(map[string]interface{}) + n := nraw.(map[string]interface{}) + create, remove := diffTagsFSX(tagsFromMapFSX(o), tagsFromMapFSX(n)) + + // Set tags + if len(remove) > 0 { + log.Printf("[DEBUG] Removing tags: %#v", remove) + k := make([]*string, 0, len(remove)) + for _, t := range remove { + k = append(k, t.Key) + } + _, err := conn.UntagResource(&fsx.UntagResourceInput{ + ResourceARN: aws.String(d.Get("arn").(string)), + TagKeys: k, + }) + if err != nil { + return err + } + } + if len(create) > 0 { + log.Printf("[DEBUG] Creating tags: %#v", create) + _, err := conn.TagResource(&fsx.TagResourceInput{ + ResourceARN: aws.String(d.Get("arn").(string)), + Tags: create, + }) + if err != nil { + return err + } + } + } + + return nil +} + +// diffTags takes our tags locally and the ones remotely and returns +// the set of tags that must be created, and the set of tags that must +// be destroyed. +func diffTagsFSX(oldTags, newTags []*fsx.Tag) ([]*fsx.Tag, []*fsx.Tag) { + // First, we're creating everything we have + create := make(map[string]interface{}) + for _, t := range newTags { + create[*t.Key] = *t.Value + } + + // Build the list of what to remove + var remove []*fsx.Tag + for _, t := range oldTags { + old, ok := create[*t.Key] + if !ok || old != *t.Value { + // Delete it! + remove = append(remove, t) + } + } + + return tagsFromMapFSX(create), remove +} + +// tagsFromMap returns the tags for the given map of data. +func tagsFromMapFSX(m map[string]interface{}) []*fsx.Tag { + var result []*fsx.Tag + for k, v := range m { + t := &fsx.Tag{ + Key: aws.String(k), + Value: aws.String(v.(string)), + } + if !tagIgnoredFSX(t) { + result = append(result, t) + } + } + + return result +} + +// tagsToMap turns the list of tags into a map. +func tagsToMapFSX(ts []*fsx.Tag) map[string]string { + result := make(map[string]string) + for _, t := range ts { + if !tagIgnoredFSX(t) { + result[*t.Key] = *t.Value + } + } + + return result +} + +// compare a tag against a list of strings and checks if it should +// be ignored or not +func tagIgnoredFSX(t *fsx.Tag) bool { + filter := []string{"^aws:"} + for _, v := range filter { + log.Printf("[DEBUG] Matching %v with %v\n", v, *t.Key) + r, _ := regexp.MatchString(v, *t.Key) + if r { + log.Printf("[DEBUG] Found AWS specific tag %s (val: %s), ignoring.\n", *t.Key, *t.Value) + return true + } + } + return false +} diff --git a/aws/tagsFSX_test.go b/aws/tagsFSX_test.go new file mode 100644 index 00000000000..5bf1c309b6e --- /dev/null +++ b/aws/tagsFSX_test.go @@ -0,0 +1,77 @@ +package aws + +import ( + "reflect" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/fsx" +) + +func TestDiffFSXTags(t *testing.T) { + cases := []struct { + Old, New map[string]interface{} + Create, Remove map[string]string + }{ + // Basic add/remove + { + Old: map[string]interface{}{ + "foo": "bar", + }, + New: map[string]interface{}{ + "bar": "baz", + }, + Create: map[string]string{ + "bar": "baz", + }, + Remove: map[string]string{ + "foo": "bar", + }, + }, + + // Modify + { + Old: map[string]interface{}{ + "foo": "bar", + }, + New: map[string]interface{}{ + "foo": "baz", + }, + Create: map[string]string{ + "foo": "baz", + }, + Remove: map[string]string{ + "foo": "bar", + }, + }, + } + + for i, tc := range cases { + c, r := diffTagsFSX(tagsFromMapFSX(tc.Old), tagsFromMapFSX(tc.New)) + cm := tagsToMapFSX(c) + rm := tagsToMapFSX(r) + if !reflect.DeepEqual(cm, tc.Create) { + t.Fatalf("%d: bad create: %#v", i, cm) + } + if !reflect.DeepEqual(rm, tc.Remove) { + t.Fatalf("%d: bad remove: %#v", i, rm) + } + } +} + +func TestIgnoringTagsFSX(t *testing.T) { + var ignoredTags []*fsx.Tag + ignoredTags = append(ignoredTags, &fsx.Tag{ + Key: aws.String("aws:cloudformation:logical-id"), + Value: aws.String("foo"), + }) + ignoredTags = append(ignoredTags, &fsx.Tag{ + Key: aws.String("aws:foo:bar"), + Value: aws.String("baz"), + }) + for _, tag := range ignoredTags { + if !tagIgnoredFSX(tag) { + t.Fatalf("Tag %v with value %v not ignored, but should be!", *tag.Key, *tag.Value) + } + } +} diff --git a/website/aws.erb b/website/aws.erb index 9e43d5d6410..1c8b8ce7fce 100644 --- a/website/aws.erb +++ b/website/aws.erb @@ -1471,6 +1471,14 @@ +
  • + FSx Resources + +
  • Gamelift