Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

r/aws_fsx_lustre_file_system: Lower minimum storage cap to 1200Gb #11847

Merged
merged 2 commits into from
Feb 6, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion aws/resource_aws_fsx_lustre_file_system.go
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ func resourceAwsFsxLustreFileSystem() *schema.Resource {
Type: schema.TypeInt,
Required: true,
ForceNew: true,
ValidateFunc: validation.IntAtLeast(3600),
ValidateFunc: validation.IntAtLeast(1200),
},
"subnet_ids": {
Type: schema.TypeSet,
Expand Down
24 changes: 12 additions & 12 deletions aws/resource_aws_fsx_lustre_file_system_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ func TestAccAWSFsxLustreFileSystem_basic(t *testing.T) {
resource.TestCheckResourceAttr(resourceName, "imported_file_chunk_size", "0"),
resource.TestCheckResourceAttr(resourceName, "network_interface_ids.#", "2"),
testAccCheckResourceAttrAccountID(resourceName, "owner_id"),
resource.TestCheckResourceAttr(resourceName, "storage_capacity", "3600"),
resource.TestCheckResourceAttr(resourceName, "storage_capacity", "1200"),
resource.TestCheckResourceAttr(resourceName, "subnet_ids.#", "1"),
resource.TestCheckResourceAttr(resourceName, "security_group_ids.#", "0"),
resource.TestCheckResourceAttr(resourceName, "tags.%", "0"),
Expand Down Expand Up @@ -291,11 +291,11 @@ func TestAccAWSFsxLustreFileSystem_StorageCapacity(t *testing.T) {
ImportStateVerifyIgnore: []string{"security_group_ids"},
},
{
Config: testAccAwsFsxLustreFileSystemConfigStorageCapacity(3600),
Config: testAccAwsFsxLustreFileSystemConfigStorageCapacity(1200),
Check: resource.ComposeTestCheckFunc(
testAccCheckFsxLustreFileSystemExists(resourceName, &filesystem2),
testAccCheckFsxLustreFileSystemRecreated(&filesystem1, &filesystem2),
resource.TestCheckResourceAttr(resourceName, "storage_capacity", "3600"),
resource.TestCheckResourceAttr(resourceName, "storage_capacity", "1200"),
),
},
},
Expand Down Expand Up @@ -498,7 +498,7 @@ resource "aws_s3_bucket" "test" {
resource "aws_fsx_lustre_file_system" "test" {
export_path = "s3://${aws_s3_bucket.test.bucket}%[2]s"
import_path = "s3://${aws_s3_bucket.test.bucket}"
storage_capacity = 3600
storage_capacity = 1200
subnet_ids = ["${aws_subnet.test1.id}"]
}
`, rName, exportPrefix)
Expand All @@ -513,7 +513,7 @@ resource "aws_s3_bucket" "test" {

resource "aws_fsx_lustre_file_system" "test" {
import_path = "s3://${aws_s3_bucket.test.bucket}%[2]s"
storage_capacity = 3600
storage_capacity = 1200
subnet_ids = ["${aws_subnet.test1.id}"]
}
`, rName, importPrefix)
Expand All @@ -529,7 +529,7 @@ resource "aws_s3_bucket" "test" {
resource "aws_fsx_lustre_file_system" "test" {
import_path = "s3://${aws_s3_bucket.test.bucket}"
imported_file_chunk_size = %[2]d
storage_capacity = 3600
storage_capacity = 1200
subnet_ids = ["${aws_subnet.test1.id}"]
}
`, rName, importedFileChunkSize)
Expand Down Expand Up @@ -558,7 +558,7 @@ resource "aws_security_group" "test1" {

resource "aws_fsx_lustre_file_system" "test" {
security_group_ids = ["${aws_security_group.test1.id}"]
storage_capacity = 3600
storage_capacity = 1200
subnet_ids = ["${aws_subnet.test1.id}"]
}
`)
Expand Down Expand Up @@ -606,7 +606,7 @@ resource "aws_security_group" "test2" {

resource "aws_fsx_lustre_file_system" "test" {
security_group_ids = ["${aws_security_group.test1.id}", "${aws_security_group.test2.id}"]
storage_capacity = 3600
storage_capacity = 1200
subnet_ids = ["${aws_subnet.test1.id}"]
}
`)
Expand All @@ -624,7 +624,7 @@ resource "aws_fsx_lustre_file_system" "test" {
func testAccAwsFsxLustreFileSystemConfigSubnetIds1() string {
return testAccAwsFsxLustreFileSystemConfigBase() + fmt.Sprintf(`
resource "aws_fsx_lustre_file_system" "test" {
storage_capacity = 3600
storage_capacity = 1200
subnet_ids = ["${aws_subnet.test1.id}"]
}
`)
Expand All @@ -633,7 +633,7 @@ resource "aws_fsx_lustre_file_system" "test" {
func testAccAwsFsxLustreFileSystemConfigTags1(tagKey1, tagValue1 string) string {
return testAccAwsFsxLustreFileSystemConfigBase() + fmt.Sprintf(`
resource "aws_fsx_lustre_file_system" "test" {
storage_capacity = 3600
storage_capacity = 1200
subnet_ids = ["${aws_subnet.test1.id}"]

tags = {
Expand All @@ -646,7 +646,7 @@ resource "aws_fsx_lustre_file_system" "test" {
func testAccAwsFsxLustreFileSystemConfigTags2(tagKey1, tagValue1, tagKey2, tagValue2 string) string {
return testAccAwsFsxLustreFileSystemConfigBase() + fmt.Sprintf(`
resource "aws_fsx_lustre_file_system" "test" {
storage_capacity = 3600
storage_capacity = 1200
subnet_ids = ["${aws_subnet.test1.id}"]

tags = {
Expand All @@ -660,7 +660,7 @@ resource "aws_fsx_lustre_file_system" "test" {
func testAccAwsFsxLustreFileSystemConfigWeeklyMaintenanceStartTime(weeklyMaintenanceStartTime string) string {
return testAccAwsFsxLustreFileSystemConfigBase() + fmt.Sprintf(`
resource "aws_fsx_lustre_file_system" "test" {
storage_capacity = 3600
storage_capacity = 1200
subnet_ids = ["${aws_subnet.test1.id}"]
weekly_maintenance_start_time = %[1]q
}
Expand Down
4 changes: 2 additions & 2 deletions website/docs/r/fsx_lustre_file_system.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ Manages a FSx Lustre File System. See the [FSx Lustre Guide](https://docs.aws.am
```hcl
resource "aws_fsx_lustre_file_system" "example" {
import_path = "s3://${aws_s3_bucket.example.bucket}"
storage_capacity = 3600
storage_capacity = 1200
subnet_ids = ["${aws_subnet.example.id}"]
}
```
Expand All @@ -24,7 +24,7 @@ resource "aws_fsx_lustre_file_system" "example" {

The following arguments are supported:

* `storage_capacity` - (Required) The storage capacity (GiB) of the file system. Minimum of `3600`. Storage capacity is provisioned in increments of 3,600 GiB.
* `storage_capacity` - (Required) The storage capacity (GiB) of the file system. Minimum of `1200`. Storage capacity is provisioned in increments of 3,600 GiB.
* `subnet_ids` - (Required) A list of IDs for the subnets that the file system will be accessible from. File systems currently support only one subnet. The file server is also launched in that subnet's Availability Zone.
* `export_path` - (Optional) S3 URI (with optional prefix) where the root of your Amazon FSx file system is exported. Can only be specified with `import_path` argument and the path must use the same Amazon S3 bucket as specified in `import_path`. Set equal to `import_path` to overwrite files on export. Defaults to `s3://{IMPORT BUCKET}/FSxLustre{CREATION TIMESTAMP}`.
* `import_path` - (Optional) S3 URI (with optional prefix) that you're using as the data repository for your FSx for Lustre file system. For example, `s3://example-bucket/optional-prefix/`.
Expand Down