From f559520b826de02b1bc807193a2924891088e615 Mon Sep 17 00:00:00 2001 From: Trevor Bonas <45324987+trevorbonas@users.noreply.github.com> Date: Thu, 13 Jun 2024 13:32:00 -0700 Subject: [PATCH 01/21] Add TimestreamInfluxDB DB Instance resource --- .../service/timestreaminfluxdb/db_instance.go | 927 +++++++++++++++++ .../timestreaminfluxdb/db_instance_test.go | 972 ++++++++++++++++++ .../timestreaminfluxdb/exports_test.go | 11 + .../timestreaminfluxdb/service_package_gen.go | 10 +- internal/service/timestreaminfluxdb/sweep.go | 66 ++ internal/sweep/register_gen_test.go | 2 + ...mestreaminfluxdb_db_instance.html.markdown | 281 +++++ 7 files changed, 2268 insertions(+), 1 deletion(-) create mode 100644 internal/service/timestreaminfluxdb/db_instance.go create mode 100644 internal/service/timestreaminfluxdb/db_instance_test.go create mode 100644 internal/service/timestreaminfluxdb/exports_test.go create mode 100644 internal/service/timestreaminfluxdb/sweep.go create mode 100644 website/docs/r/timestreaminfluxdb_db_instance.html.markdown diff --git a/internal/service/timestreaminfluxdb/db_instance.go b/internal/service/timestreaminfluxdb/db_instance.go new file mode 100644 index 00000000000..c39f6aea3e1 --- /dev/null +++ b/internal/service/timestreaminfluxdb/db_instance.go @@ -0,0 +1,927 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package timestreaminfluxdb + +import ( + "context" + "encoding/json" + "errors" + "time" + + "github.com/YakDriver/regexache" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/secretsmanager" + "github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb" + awstypes "github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb/types" + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework-validators/int64validator" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework-validators/setvalidator" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/booldefault" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/boolplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/int64planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/setplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringdefault" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// Function annotations are used for resource registration to the Provider. DO NOT EDIT. +// @FrameworkResource("aws_timestreaminfluxdb_db_instance", name="Db Instance") +// @Tags(identifierAttribute="arn") +func newResourceDbInstance(_ context.Context) (resource.ResourceWithConfigure, error) { + r := &resourceDbInstance{} + + r.SetDefaultCreateTimeout(30 * time.Minute) + r.SetDefaultUpdateTimeout(30 * time.Minute) + r.SetDefaultDeleteTimeout(30 * time.Minute) + + return r, nil +} + +const ( + // If not provided, CreateDbInstance will use the below default values + // for bucket and organization. These values need to be set in Terraform + // because GetDbInstance won't return them. + DefaultBucketValue = "bucket" + DefaultOrganizationValue = "organization" + DefaultUsernameValue = "admin" + ResNameDbInstance = "Db Instance" +) + +type resourceDbInstance struct { + framework.ResourceWithConfigure + framework.WithTimeouts +} + +func (r *resourceDbInstance) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = "aws_timestreaminfluxdb_db_instance" +} + +func (r *resourceDbInstance) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "allocated_storage": schema.Int64Attribute{ + Required: true, + PlanModifiers: []planmodifier.Int64{ + int64planmodifier.RequiresReplace(), + }, + Validators: []validator.Int64{ + int64validator.AtLeast(20), + int64validator.AtMost(16384), + }, + Description: `The amount of storage to allocate for your DB storage type in GiB (gibibytes).`, + }, + "arn": framework.ARNAttributeComputedOnly(), + "availability_zone": schema.StringAttribute{ + Computed: true, + Description: `The Availability Zone in which the DB instance resides.`, + }, + "bucket": schema.StringAttribute{ + Optional: true, + Computed: true, + Default: stringdefault.StaticString(DefaultBucketValue), + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Validators: []validator.String{ + stringvalidator.LengthAtLeast(2), + stringvalidator.LengthAtMost(64), + stringvalidator.RegexMatches( + // Taken from the model for TimestreamInfluxDB in AWS SDK Go V2 + // https://github.com/aws/aws-sdk-go-v2/blob/8209abb7fa1aeb513228b4d8c1a459aeb6209d4d/codegen/sdk-codegen/aws-models/timestream-influxdb.json#L768 + regexache.MustCompile("^[^_][^\"]*$"), + "", + ), + }, + Description: `The name of the initial InfluxDB bucket. All InfluxDB data is stored in a bucket. + A bucket combines the concept of a database and a retention period (the duration of time + that each data point persists). A bucket belongs to an organization.`, + }, + "db_instance_type": schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Validators: []validator.String{ + stringvalidator.OneOf( + string(awstypes.DbInstanceTypeDbInfluxMedium), + string(awstypes.DbInstanceTypeDbInfluxLarge), + string(awstypes.DbInstanceTypeDbInfluxXlarge), + string(awstypes.DbInstanceTypeDbInflux2xlarge), + string(awstypes.DbInstanceTypeDbInflux4xlarge), + string(awstypes.DbInstanceTypeDbInflux8xlarge), + string(awstypes.DbInstanceTypeDbInflux12xlarge), + string(awstypes.DbInstanceTypeDbInflux16xlarge), + ), + }, + Description: `The Timestream for InfluxDB DB instance type to run InfluxDB on.`, + }, + "db_parameter_group_identifier": schema.StringAttribute{ + Optional: true, + // Once a parameter group is associated with a DB instance, it cannot be removed. + // Therefore, if db_parameter_group_identifier is removed, a replace of the DB instance + // is necessary. + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplaceIf( + statementReplaceIf, "Replace db_parameter_group_identifier diff", "Replace db_parameter_group_identifier diff", + ), + }, + Validators: []validator.String{ + stringvalidator.LengthAtLeast(3), + stringvalidator.LengthAtMost(64), + stringvalidator.RegexMatches( + // Taken from the model for TimestreamInfluxDB in AWS SDK Go V2 + // https://github.com/aws/aws-sdk-go-v2/blob/8209abb7fa1aeb513228b4d8c1a459aeb6209d4d/codegen/sdk-codegen/aws-models/timestream-influxdb.json#L1390 + regexache.MustCompile("^[a-zA-Z0-9]+$"), + "", + ), + }, + Description: `The id of the DB parameter group assigned to your DB instance.`, + }, + "db_storage_type": schema.StringAttribute{ + Optional: true, + Computed: true, + Default: stringdefault.StaticString(string(awstypes.DbStorageTypeInfluxIoIncludedT1)), + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Validators: []validator.String{ + stringvalidator.OneOf( + string(awstypes.DbStorageTypeInfluxIoIncludedT1), + string(awstypes.DbStorageTypeInfluxIoIncludedT2), + string(awstypes.DbStorageTypeInfluxIoIncludedT3), + ), + }, + Description: `The Timestream for InfluxDB DB storage type to read and write InfluxDB data. + You can choose between 3 different types of provisioned Influx IOPS included storage according + to your workloads requirements: Influx IO Included 3000 IOPS, Influx IO Included 12000 IOPS, + Influx IO Included 16000 IOPS.`, + }, + "deployment_type": schema.StringAttribute{ + Optional: true, + Computed: true, + Default: stringdefault.StaticString(string(awstypes.DeploymentTypeSingleAz)), + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Validators: []validator.String{ + stringvalidator.OneOf( + string(awstypes.DeploymentTypeSingleAz), + string(awstypes.DeploymentTypeWithMultiazStandby), + ), + }, + Description: `Specifies whether the DB instance will be deployed as a standalone instance or + with a Multi-AZ standby for high availability.`, + }, + "endpoint": schema.StringAttribute{ + Computed: true, + Description: `The endpoint used to connect to InfluxDB. The default InfluxDB port is 8086.`, + }, + "id": framework.IDAttribute(), + "influx_auth_parameters_secret_arn": schema.StringAttribute{ + Computed: true, + Description: `The Amazon Resource Name (ARN) of the AWS Secrets Manager secret containing the + initial InfluxDB authorization parameters. The secret value is a JSON formatted + key-value pair holding InfluxDB authorization values: organization, bucket, + username, and password.`, + }, + "name": schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Validators: []validator.String{ + stringvalidator.LengthAtLeast(3), + stringvalidator.LengthAtMost(40), + stringvalidator.RegexMatches( + // Taken from the model for TimestreamInfluxDB in AWS SDK Go V2 + // https://github.com/aws/aws-sdk-go-v2/blob/8209abb7fa1aeb513228b4d8c1a459aeb6209d4d/codegen/sdk-codegen/aws-models/timestream-influxdb.json#L1215 + regexache.MustCompile("^[a-zA-z][a-zA-Z0-9]*(-[a-zA-Z0-9]+)*$"), + "", + ), + }, + Description: `The name that uniquely identifies the DB instance when interacting with the + Amazon Timestream for InfluxDB API and CLI commands. This name will also be a + prefix included in the endpoint. DB instance names must be unique per customer + and per region.`, + }, + names.AttrTags: tftags.TagsAttribute(), + names.AttrTagsAll: tftags.TagsAttributeComputedOnly(), + "organization": schema.StringAttribute{ + Optional: true, + Computed: true, + Default: stringdefault.StaticString(DefaultOrganizationValue), + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Validators: []validator.String{ + stringvalidator.LengthAtLeast(1), + stringvalidator.LengthAtMost(64), + }, + Description: `The name of the initial organization for the initial admin user in InfluxDB. An + InfluxDB organization is a workspace for a group of users.`, + }, + "password": schema.StringAttribute{ + Required: true, + Sensitive: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Validators: []validator.String{ + stringvalidator.LengthAtLeast(8), + stringvalidator.LengthAtMost(64), + stringvalidator.RegexMatches(regexache.MustCompile("^[a-zA-Z0-9]+$"), ""), + }, + Description: `The password of the initial admin user created in InfluxDB. This password will + allow you to access the InfluxDB UI to perform various administrative tasks and + also use the InfluxDB CLI to create an operator token. These attributes will be + stored in a Secret created in AWS SecretManager in your account.`, + }, + "publicly_accessible": schema.BoolAttribute{ + Optional: true, + Computed: true, + Default: booldefault.StaticBool(false), + PlanModifiers: []planmodifier.Bool{ + boolplanmodifier.RequiresReplace(), + }, + Description: `Configures the DB instance with a public IP to facilitate access.`, + }, + "secondary_availability_zone": schema.StringAttribute{ + Computed: true, + Description: `The Availability Zone in which the standby instance is located when deploying + with a MultiAZ standby instance.`, + }, + "status": schema.StringAttribute{ + Computed: true, + Description: `The status of the DB instance.`, + }, + "username": schema.StringAttribute{ + Optional: true, + Computed: true, + Default: stringdefault.StaticString(DefaultUsernameValue), + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Validators: []validator.String{ + stringvalidator.RegexMatches( + regexache.MustCompile("^[a-zA-Z]([a-zA-Z0-9]*(-[a-zA-Z0-9]+)*)?$"), + `Must start with a letter and can't end with a hyphen or contain two + consecutive hyphens`, + ), + }, + Description: `The username of the initial admin user created in InfluxDB. + Must start with a letter and can't end with a hyphen or contain two + consecutive hyphens. For example, my-user1. This username will allow + you to access the InfluxDB UI to perform various administrative tasks + and also use the InfluxDB CLI to create an operator token. These + attributes will be stored in a Secret created in Amazon Secrets + Manager in your account`, + }, + "vpc_security_group_ids": schema.SetAttribute{ + Required: true, + ElementType: types.StringType, + PlanModifiers: []planmodifier.Set{ + setplanmodifier.RequiresReplace(), + }, + Validators: []validator.Set{ + setvalidator.SizeAtLeast(1), + setvalidator.SizeAtMost(5), + setvalidator.ValueStringsAre( + stringvalidator.LengthAtMost(64), + stringvalidator.RegexMatches(regexache.MustCompile("^sg-[a-z0-9]+$"), ""), + ), + }, + Description: `A list of VPC security group IDs to associate with the DB instance.`, + }, + "vpc_subnet_ids": schema.SetAttribute{ + Required: true, + ElementType: types.StringType, + PlanModifiers: []planmodifier.Set{ + setplanmodifier.RequiresReplace(), + }, + Validators: []validator.Set{ + setvalidator.SizeAtLeast(1), + setvalidator.SizeAtMost(3), + setvalidator.ValueStringsAre( + stringvalidator.LengthAtMost(64), + stringvalidator.RegexMatches(regexache.MustCompile("^subnet-[a-z0-9]+$"), ""), + ), + }, + Description: `A list of VPC subnet IDs to associate with the DB instance. Provide at least + two VPC subnet IDs in different availability zones when deploying with a Multi-AZ standby.`, + }, + }, + Blocks: map[string]schema.Block{ + "log_delivery_configuration": schema.ListNestedBlock{ + Validators: []validator.List{ + listvalidator.SizeAtMost(1), + }, + Description: `Configuration for sending InfluxDB engine logs to a specified S3 bucket.`, + NestedObject: schema.NestedBlockObject{ + Blocks: map[string]schema.Block{ + "s3_configuration": schema.SingleNestedBlock{ + Attributes: map[string]schema.Attribute{ + "bucket_name": schema.StringAttribute{ + Required: true, + Validators: []validator.String{ + stringvalidator.LengthAtLeast(3), + stringvalidator.LengthAtMost(63), + stringvalidator.RegexMatches(regexache.MustCompile("^[0-9a-z]+[0-9a-z\\.\\-]*[0-9a-z]+$"), ""), + }, + Description: `The name of the S3 bucket to deliver logs to.`, + }, + "enabled": schema.BoolAttribute{ + Required: true, + Description: `Indicates whether log delivery to the S3 bucket is enabled.`, + }, + }, + Description: `Configuration for S3 bucket log delivery.`, + }, + }, + }, + }, + "timeouts": timeouts.Block(ctx, timeouts.Opts{ + Create: true, + Update: true, + Delete: true, + }), + }, + } +} + +func statementReplaceIf(ctx context.Context, req planmodifier.StringRequest, resp *stringplanmodifier.RequiresReplaceIfFuncResponse) { + if req.State.Raw.IsNull() || req.Plan.Raw.IsNull() { + return + } + var plan, state resourceDbInstanceData + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } + + dbParameterGroupIdentifierRemoved := (!state.DBParameterGroupIdentifier.IsNull() && plan.DBParameterGroupIdentifier.IsNull()) + + resp.RequiresReplace = dbParameterGroupIdentifierRemoved +} + +func (r *resourceDbInstance) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + conn := r.Meta().TimestreamInfluxDBClient(ctx) + + var plan resourceDbInstanceData + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + return + } + + in := ×treaminfluxdb.CreateDbInstanceInput{ + AllocatedStorage: aws.Int32(int32(plan.AllocatedStorage.ValueInt64())), + DbInstanceType: awstypes.DbInstanceType(plan.DBInstanceType.ValueString()), + Name: aws.String(plan.Name.ValueString()), + Password: aws.String(plan.Password.ValueString()), + VpcSecurityGroupIds: flex.ExpandFrameworkStringValueSet(ctx, plan.VPCSecurityGroupIDs), + VpcSubnetIds: flex.ExpandFrameworkStringValueSet(ctx, plan.VPCSubnetIDs), + Tags: getTagsIn(ctx), + } + if !plan.Bucket.IsNull() { + in.Bucket = aws.String(plan.Bucket.ValueString()) + } + if !plan.DBParameterGroupIdentifier.IsNull() { + in.DbParameterGroupIdentifier = aws.String(plan.DBParameterGroupIdentifier.ValueString()) + } + if !plan.DBStorageType.IsNull() { + in.DbStorageType = awstypes.DbStorageType(plan.DBStorageType.ValueString()) + } + if !plan.DeploymentType.IsNull() { + in.DeploymentType = awstypes.DeploymentType(plan.DeploymentType.ValueString()) + } + if !plan.LogDeliveryConfiguration.IsNull() { + var tfList []logDeliveryConfigurationData + resp.Diagnostics.Append(plan.LogDeliveryConfiguration.ElementsAs(ctx, &tfList, false)...) + if resp.Diagnostics.HasError() { + return + } + in.LogDeliveryConfiguration = expandLogDeliveryConfiguration(tfList) + } + if !plan.Organization.IsNull() { + in.Organization = aws.String(plan.Organization.ValueString()) + } + if !plan.PubliclyAccessible.IsNull() { + in.PubliclyAccessible = aws.Bool(plan.PubliclyAccessible.ValueBool()) + } + if !plan.Username.IsNull() { + in.Username = aws.String(plan.Username.ValueString()) + } + + out, err := conn.CreateDbInstance(ctx, in) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionCreating, ResNameDbInstance, plan.Name.String(), err), + err.Error(), + ) + return + } + if out == nil || out.Id == nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionCreating, ResNameDbInstance, plan.Name.String(), nil), + errors.New("empty output").Error(), + ) + return + } + + // Computed attributes + plan.ARN = flex.StringToFramework(ctx, out.Arn) + plan.ID = flex.StringToFramework(ctx, out.Id) + plan.AvailabilityZone = flex.StringToFramework(ctx, out.AvailabilityZone) + + createTimeout := r.CreateTimeout(ctx, plan.Timeouts) + _, err = waitDbInstanceCreated(ctx, conn, plan.ID.ValueString(), createTimeout) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionWaitingForCreation, ResNameDbInstance, plan.Name.String(), err), + err.Error(), + ) + return + } + + readOut, err := findDbInstanceByID(ctx, conn, plan.ID.ValueString()) + if tfresource.NotFound(err) { + resp.State.RemoveResource(ctx) + return + } + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionSetting, ResNameDbInstance, plan.ID.String(), err), + err.Error(), + ) + return + } + + // Computed attributes only set after resource is finished creating + plan.Endpoint = flex.StringToFramework(ctx, readOut.Endpoint) + plan.InfluxAuthParametersSecretARN = flex.StringToFramework(ctx, readOut.InfluxAuthParametersSecretArn) + plan.Status = flex.StringToFramework(ctx, (*string)(&readOut.Status)) + plan.SecondaryAvailabilityZone = flex.StringToFramework(ctx, readOut.SecondaryAvailabilityZone) + + resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) +} + +func (r *resourceDbInstance) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + conn := r.Meta().TimestreamInfluxDBClient(ctx) + + var state resourceDbInstanceData + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } + + out, err := findDbInstanceByID(ctx, conn, state.ID.ValueString()) + if tfresource.NotFound(err) { + resp.State.RemoveResource(ctx) + return + } + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionSetting, ResNameDbInstance, state.ID.String(), err), + err.Error(), + ) + return + } + + state.ARN = flex.StringToFramework(ctx, out.Arn) + state.AllocatedStorage = flex.Int32ToFramework(ctx, out.AllocatedStorage) + state.AvailabilityZone = flex.StringToFramework(ctx, out.AvailabilityZone) + state.DBInstanceType = flex.StringToFramework(ctx, (*string)(&out.DbInstanceType)) + state.DBParameterGroupIdentifier = flex.StringToFramework(ctx, out.DbParameterGroupIdentifier) + state.DBStorageType = flex.StringToFramework(ctx, (*string)(&out.DbStorageType)) + state.DeploymentType = flex.StringToFramework(ctx, (*string)(&out.DeploymentType)) + state.Endpoint = flex.StringToFramework(ctx, out.Endpoint) + state.ID = flex.StringToFramework(ctx, out.Id) + state.InfluxAuthParametersSecretARN = flex.StringToFramework(ctx, out.InfluxAuthParametersSecretArn) + logDeliveryConfiguration, d := flattenLogDeliveryConfiguration(ctx, out.LogDeliveryConfiguration) + resp.Diagnostics.Append(d...) + state.LogDeliveryConfiguration = logDeliveryConfiguration + state.Name = flex.StringToFramework(ctx, out.Name) + state.PubliclyAccessible = flex.BoolToFramework(ctx, out.PubliclyAccessible) + state.SecondaryAvailabilityZone = flex.StringToFramework(ctx, out.SecondaryAvailabilityZone) + state.Status = flex.StringToFramework(ctx, (*string)(&out.Status)) + state.VPCSecurityGroupIDs = flex.FlattenFrameworkStringValueSet[string](ctx, out.VpcSecurityGroupIds) + state.VPCSubnetIDs = flex.FlattenFrameworkStringValueSet[string](ctx, out.VpcSubnetIds) + + // timestreaminfluxdb.GetDbInstance will not return InfluxDB managed attributes, like username, + // bucket, organization, or password. All of these attributes are stored in a secret indicated by + // out.InfluxAuthParametersSecretArn. To support importing, these attributes must be read from the + // secret. + secretsConn := r.Meta().SecretsManagerClient(ctx) + secretsOut, err := secretsConn.GetSecretValue(ctx, &secretsmanager.GetSecretValueInput{ + SecretId: out.InfluxAuthParametersSecretArn, + }) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionSetting, ResNameDbInstance, state.ID.String(), err), + err.Error(), + ) + return + } + + secrets := make(map[string]string) + if err := json.Unmarshal([]byte(aws.ToString(secretsOut.SecretString)), &secrets); err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionSetting, ResNameDbInstance, state.ID.String(), err), + err.Error(), + ) + return + } + if username, ok := secrets["username"]; ok { + state.Username = flex.StringValueToFramework[string](ctx, username) + } else { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionSetting, ResNameDbInstance, state.ID.String(), err), + err.Error(), + ) + return + } + if password, ok := secrets["password"]; ok { + state.Password = flex.StringValueToFramework[string](ctx, password) + } else { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionSetting, ResNameDbInstance, state.ID.String(), err), + err.Error(), + ) + return + } + if organization, ok := secrets["organization"]; ok { + state.Organization = flex.StringValueToFramework[string](ctx, organization) + } else { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionSetting, ResNameDbInstance, state.ID.String(), err), + err.Error(), + ) + return + } + if bucket, ok := secrets["bucket"]; ok { + state.Bucket = flex.StringValueToFramework[string](ctx, bucket) + } else { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionSetting, ResNameDbInstance, state.ID.String(), err), + err.Error(), + ) + return + } + + tags, err := listTags(ctx, conn, state.ARN.ValueString()) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionSetting, ResNameDbInstance, state.ID.String(), err), + err.Error(), + ) + return + } + setTagsOut(ctx, Tags(tags)) + + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) +} + +func (r *resourceDbInstance) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + conn := r.Meta().TimestreamInfluxDBClient(ctx) + + var plan, state resourceDbInstanceData + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } + + // Only fields without RequireReplace() will cause an update. + // Any other field changes will cause the resource to be destroyed and recreated. + // for aws_timestreaminfluxdb_db_instance this is tags, log_delivery_configuration, and + // db_parameter_group_identifier. + if !plan.DBParameterGroupIdentifier.Equal(state.DBParameterGroupIdentifier) || + !plan.LogDeliveryConfiguration.Equal(state.LogDeliveryConfiguration) { + + in := ×treaminfluxdb.UpdateDbInstanceInput{ + Identifier: aws.String(plan.ID.ValueString()), + } + + if !plan.DBParameterGroupIdentifier.IsNull() && !plan.DBParameterGroupIdentifier.Equal(state.DBParameterGroupIdentifier) { + in.DbParameterGroupIdentifier = aws.String(plan.DBParameterGroupIdentifier.ValueString()) + } + + if !plan.LogDeliveryConfiguration.IsNull() && !plan.LogDeliveryConfiguration.Equal(state.LogDeliveryConfiguration) { + var tfList []logDeliveryConfigurationData + resp.Diagnostics.Append(plan.LogDeliveryConfiguration.ElementsAs(ctx, &tfList, false)...) + if resp.Diagnostics.HasError() { + return + } + in.LogDeliveryConfiguration = expandLogDeliveryConfiguration(tfList) + } + + out, err := conn.UpdateDbInstance(ctx, in) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionUpdating, ResNameDbInstance, plan.ID.String(), err), + err.Error(), + ) + return + } + if out == nil || out.Id == nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionUpdating, ResNameDbInstance, plan.ID.String(), nil), + errors.New("empty output").Error(), + ) + return + } + } + + updateTimeout := r.UpdateTimeout(ctx, plan.Timeouts) + _, err := waitDbInstanceUpdated(ctx, conn, plan.ID.ValueString(), updateTimeout) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionWaitingForUpdate, ResNameDbInstance, plan.ID.String(), err), + err.Error(), + ) + return + } + + // Update status to current status + readOut, err := findDbInstanceByID(ctx, conn, plan.ID.ValueString()) + if tfresource.NotFound(err) { + resp.State.RemoveResource(ctx) + return + } + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionSetting, ResNameDbInstance, plan.ID.String(), err), + err.Error(), + ) + return + } + // Setting computed attributes + plan.ARN = flex.StringToFramework(ctx, readOut.Arn) + plan.AvailabilityZone = flex.StringToFramework(ctx, readOut.AvailabilityZone) + plan.DBStorageType = flex.StringToFramework(ctx, (*string)(&readOut.DbStorageType)) + plan.DeploymentType = flex.StringToFramework(ctx, (*string)(&readOut.DeploymentType)) + plan.Endpoint = flex.StringToFramework(ctx, readOut.Endpoint) + plan.ID = flex.StringToFramework(ctx, readOut.Id) + plan.InfluxAuthParametersSecretARN = flex.StringToFramework(ctx, readOut.InfluxAuthParametersSecretArn) + plan.SecondaryAvailabilityZone = flex.StringToFramework(ctx, readOut.SecondaryAvailabilityZone) + plan.Status = flex.StringToFramework(ctx, (*string)(&readOut.Status)) + + resp.Diagnostics.Append(resp.State.Set(ctx, &plan)...) +} + +func (r *resourceDbInstance) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + conn := r.Meta().TimestreamInfluxDBClient(ctx) + + var state resourceDbInstanceData + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } + + in := ×treaminfluxdb.DeleteDbInstanceInput{ + Identifier: aws.String(state.ID.ValueString()), + } + + _, err := conn.DeleteDbInstance(ctx, in) + if err != nil { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return + } + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionDeleting, ResNameDbInstance, state.ID.String(), err), + err.Error(), + ) + return + } + + deleteTimeout := r.DeleteTimeout(ctx, state.Timeouts) + _, err = waitDbInstanceDeleted(ctx, conn, state.ID.ValueString(), deleteTimeout) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionWaitingForDeletion, ResNameDbInstance, state.ID.String(), err), + err.Error(), + ) + return + } +} + +func (r *resourceDbInstance) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp) +} +func (r *resourceDbInstance) ModifyPlan(ctx context.Context, request resource.ModifyPlanRequest, response *resource.ModifyPlanResponse) { + r.SetTagsAll(ctx, request, response) +} + +func waitDbInstanceCreated(ctx context.Context, conn *timestreaminfluxdb.Client, id string, timeout time.Duration) (*timestreaminfluxdb.CreateDbInstanceOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{string(awstypes.StatusCreating), string(awstypes.StatusUpdating), string(awstypes.StatusModifying)}, + Target: []string{string(awstypes.StatusAvailable)}, + Refresh: statusDbInstance(ctx, conn, id), + Timeout: timeout, + NotFoundChecks: 20, + ContinuousTargetOccurence: 2, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*timestreaminfluxdb.CreateDbInstanceOutput); ok { + return out, err + } + + return nil, err +} + +func waitDbInstanceUpdated(ctx context.Context, conn *timestreaminfluxdb.Client, id string, timeout time.Duration) (*timestreaminfluxdb.UpdateDbInstanceOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{string(awstypes.StatusModifying), string(awstypes.StatusUpdating)}, + Target: []string{string(awstypes.StatusAvailable)}, + Refresh: statusDbInstance(ctx, conn, id), + Timeout: timeout, + NotFoundChecks: 20, + ContinuousTargetOccurence: 2, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*timestreaminfluxdb.UpdateDbInstanceOutput); ok { + return out, err + } + + return nil, err +} + +func waitDbInstanceDeleted(ctx context.Context, conn *timestreaminfluxdb.Client, id string, timeout time.Duration) (*timestreaminfluxdb.DeleteDbInstanceOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{string(awstypes.StatusDeleting), string(awstypes.StatusModifying), string(awstypes.StatusUpdating), string(awstypes.StatusAvailable)}, + Target: []string{}, + Refresh: statusDbInstance(ctx, conn, id), + Timeout: timeout, + Delay: 30 * time.Second, + PollInterval: 30 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*timestreaminfluxdb.DeleteDbInstanceOutput); ok { + return out, err + } + + return nil, err +} + +func statusDbInstance(ctx context.Context, conn *timestreaminfluxdb.Client, id string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + out, err := findDbInstanceByID(ctx, conn, id) + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + return out, string(out.Status), nil + } +} + +func findDbInstanceByID(ctx context.Context, conn *timestreaminfluxdb.Client, id string) (*timestreaminfluxdb.GetDbInstanceOutput, error) { + in := ×treaminfluxdb.GetDbInstanceInput{ + Identifier: aws.String(id), + } + + out, err := conn.GetDbInstance(ctx, in) + if err != nil { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: in, + } + } + + return nil, err + } + + if out == nil || out.Id == nil { + return nil, tfresource.NewEmptyResultError(in) + } + + return out, nil +} + +func flattenLogDeliveryConfiguration(ctx context.Context, apiObject *awstypes.LogDeliveryConfiguration) (types.List, diag.Diagnostics) { + var diags diag.Diagnostics + elemType := types.ObjectType{AttrTypes: logDeliveryConfigrationAttrTypes} + + if apiObject == nil { + return types.ListNull(elemType), diags + } + s3Configuration, d := flattenS3Configuration(ctx, apiObject.S3Configuration) + obj := map[string]attr.Value{ + "s3_configuration": s3Configuration, + } + objVal, d := types.ObjectValue(logDeliveryConfigrationAttrTypes, obj) + diags.Append(d...) + + listVal, d := types.ListValue(elemType, []attr.Value{objVal}) + diags.Append(d...) + + return listVal, diags +} + +func flattenS3Configuration(ctx context.Context, apiObject *awstypes.S3Configuration) (types.Object, diag.Diagnostics) { + var diags diag.Diagnostics + elemType := types.ObjectType{AttrTypes: s3ConfigurationAttrTypes} + + if apiObject == nil { + return types.ObjectNull(elemType.AttrTypes), diags + } + + obj := map[string]attr.Value{ + "bucket_name": flex.StringValueToFramework(ctx, *apiObject.BucketName), + "enabled": flex.BoolToFramework(ctx, *&apiObject.Enabled), + } + objVal, d := types.ObjectValue(s3ConfigurationAttrTypes, obj) + diags.Append(d...) + + return objVal, diags +} + +func expandLogDeliveryConfiguration(tfList []logDeliveryConfigurationData) *awstypes.LogDeliveryConfiguration { + if len(tfList) == 0 { + return nil + } + + tfObj := tfList[0] + apiObject := &awstypes.LogDeliveryConfiguration{ + S3Configuration: expandS3Configuration(tfObj.S3Configuration), + } + return apiObject +} + +func expandS3Configuration(tfObj s3ConfigurationData) *awstypes.S3Configuration { + apiObject := &awstypes.S3Configuration{ + BucketName: aws.String(tfObj.BucketName.ValueString()), + Enabled: aws.Bool(tfObj.Enabled.ValueBool()), + } + return apiObject +} + +type resourceDbInstanceData struct { + AllocatedStorage types.Int64 `tfsdk:"allocated_storage"` + ARN types.String `tfsdk:"arn"` + AvailabilityZone types.String `tfsdk:"availability_zone"` + Bucket types.String `tfsdk:"bucket"` + DBInstanceType types.String `tfsdk:"db_instance_type"` + DBParameterGroupIdentifier types.String `tfsdk:"db_parameter_group_identifier"` + DBStorageType types.String `tfsdk:"db_storage_type"` + DeploymentType types.String `tfsdk:"deployment_type"` + Endpoint types.String `tfsdk:"endpoint"` + ID types.String `tfsdk:"id"` + InfluxAuthParametersSecretARN types.String `tfsdk:"influx_auth_parameters_secret_arn"` + LogDeliveryConfiguration types.List `tfsdk:"log_delivery_configuration"` + Name types.String `tfsdk:"name"` + Organization types.String `tfsdk:"organization"` + Password types.String `tfsdk:"password"` + PubliclyAccessible types.Bool `tfsdk:"publicly_accessible"` + SecondaryAvailabilityZone types.String `tfsdk:"secondary_availability_zone"` + Status types.String `tfsdk:"status"` + Tags types.Map `tfsdk:"tags"` + TagsAll types.Map `tfsdk:"tags_all"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + Username types.String `tfsdk:"username"` + VPCSecurityGroupIDs types.Set `tfsdk:"vpc_security_group_ids"` + VPCSubnetIDs types.Set `tfsdk:"vpc_subnet_ids"` +} + +type logDeliveryConfigurationData struct { + S3Configuration s3ConfigurationData `tfsdk:"s3_configuration"` +} + +type s3ConfigurationData struct { + BucketName types.String `tfsdk:"bucket_name"` + Enabled types.Bool `tfsdk:"enabled"` +} + +var logDeliveryConfigrationAttrTypes = map[string]attr.Type{ + "s3_configuration": types.ObjectType{AttrTypes: s3ConfigurationAttrTypes}, +} + +var s3ConfigurationAttrTypes = map[string]attr.Type{ + "bucket_name": types.StringType, + "enabled": types.BoolType, +} diff --git a/internal/service/timestreaminfluxdb/db_instance_test.go b/internal/service/timestreaminfluxdb/db_instance_test.go new file mode 100644 index 00000000000..541c5e514c1 --- /dev/null +++ b/internal/service/timestreaminfluxdb/db_instance_test.go @@ -0,0 +1,972 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package timestreaminfluxdb_test + +import ( + "context" + "errors" + "fmt" + "testing" + + "github.com/YakDriver/regexache" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb" + "github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb/types" + awstypes "github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb/types" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/names" + + tftimestreaminfluxdb "github.com/hashicorp/terraform-provider-aws/internal/service/timestreaminfluxdb" +) + +func TestAccTimestreamInfluxDBDbInstance_basic(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var dbinstance timestreaminfluxdb.GetDbInstanceOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_timestreaminfluxdb_db_instance.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDbInstanceDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccDbInstanceConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDbInstanceExists(ctx, resourceName, &dbinstance), + // Verification of read-only attributes and default values. + // DB instance will not be publicly accessible and will not have an endpoint. + acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "timestream-influxdb", regexache.MustCompile(`db-instance/+.`)), + resource.TestCheckResourceAttrSet(resourceName, "availability_zone"), + resource.TestCheckResourceAttr(resourceName, "bucket", tftimestreaminfluxdb.DefaultBucketValue), + resource.TestCheckResourceAttr(resourceName, "db_storage_type", string(awstypes.DbStorageTypeInfluxIoIncludedT1)), + resource.TestCheckResourceAttr(resourceName, "deployment_type", string(awstypes.DeploymentTypeSingleAz)), + resource.TestCheckResourceAttrSet(resourceName, "influx_auth_parameters_secret_arn"), + resource.TestCheckResourceAttr(resourceName, "organization", tftimestreaminfluxdb.DefaultOrganizationValue), + resource.TestCheckResourceAttr(resourceName, "publicly_accessible", "false"), + resource.TestCheckResourceAttr(resourceName, names.AttrStatus, string(awstypes.StatusAvailable)), + resource.TestCheckResourceAttr(resourceName, "username", tftimestreaminfluxdb.DefaultUsernameValue), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"apply_immediately", "user"}, + }, + }, + }) +} + +func TestAccTimestreamInfluxDBDbInstance_disappears(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var dbinstance timestreaminfluxdb.GetDbInstanceOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_timestreaminfluxdb_db_instance.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDbInstanceDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccDbInstanceConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDbInstanceExists(ctx, resourceName, &dbinstance), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tftimestreaminfluxdb.ResourceDbInstance, resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccTimestreamInfluxDBDbInstance_logDeliveryConfiguration(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var dbinstance timestreaminfluxdb.GetDbInstanceOutput + // The same random name will be used for both the DB instance and the log S3 bucket name. + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_timestreaminfluxdb_db_instance.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDbInstanceDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccDbInstanceConfig_logDeliveryConfigurationEnabled(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDbInstanceExists(ctx, resourceName, &dbinstance), + resource.TestCheckResourceAttr(resourceName, "log_delivery_configuration.0.s3_configuration.%", "2"), + resource.TestCheckResourceAttr(resourceName, "log_delivery_configuration.0.s3_configuration.bucket_name", rName), + resource.TestCheckResourceAttr(resourceName, "log_delivery_configuration.0.s3_configuration.enabled", "true"), + ), + }, + { + Config: testAccDbInstanceConfig_logDeliveryConfigurationNotEnabled(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDbInstanceExists(ctx, resourceName, &dbinstance), + resource.TestCheckResourceAttr(resourceName, "log_delivery_configuration.0.s3_configuration.%", "2"), + resource.TestCheckResourceAttr(resourceName, "log_delivery_configuration.0.s3_configuration.bucket_name", rName), + resource.TestCheckResourceAttr(resourceName, "log_delivery_configuration.0.s3_configuration.enabled", "false"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"apply_immediately", "user"}, + }, + }, + }) +} + +func TestAccTimestreamInfluxDBDbInstance_publiclyAccessible(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var dbinstance timestreaminfluxdb.GetDbInstanceOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_timestreaminfluxdb_db_instance.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDbInstanceDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccDbInstanceConfig_publiclyAccessible(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDbInstanceExists(ctx, resourceName, &dbinstance), + resource.TestCheckResourceAttrSet(resourceName, "endpoint"), + resource.TestCheckResourceAttr(resourceName, "publicly_accessible", "true"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"apply_immediately", "user"}, + }, + }, + }) +} + +func TestAccTimestreamInfluxDBDbInstance_deploymentTypeMultiAzStandby(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var dbinstance timestreaminfluxdb.GetDbInstanceOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_timestreaminfluxdb_db_instance.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDbInstanceDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccDbInstanceConfig_deploymentTypeMultiAzStandby(rName, acctest.Region()), + Check: resource.ComposeTestCheckFunc( + testAccCheckDbInstanceExists(ctx, resourceName, &dbinstance), + // DB instance will not be publicly accessible and will not have an endpoint. + // DB instance will have a secondary availability zone. + resource.TestCheckResourceAttrSet(resourceName, "secondary_availability_zone"), + resource.TestCheckResourceAttr(resourceName, "deployment_type", string(awstypes.DeploymentTypeWithMultiazStandby)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"apply_immediately", "user"}, + }, + }, + }) +} + +func TestAccTimestreamInfluxDBDbInstance_username(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var dbinstance timestreaminfluxdb.GetDbInstanceOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_timestreaminfluxdb_db_instance.test" + testUsername := "testusername" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDbInstanceDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccDbInstanceConfig_username(rName, testUsername), + Check: resource.ComposeTestCheckFunc( + testAccCheckDbInstanceExists(ctx, resourceName, &dbinstance), + resource.TestCheckResourceAttr(resourceName, "username", testUsername), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"apply_immediately", "user"}, + }, + }, + }) +} + +func TestAccTimestreamInfluxDBDbInstance_bucket(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var dbinstance timestreaminfluxdb.GetDbInstanceOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_timestreaminfluxdb_db_instance.test" + testBucketName := "testbucket" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDbInstanceDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccDbInstanceConfig_bucket(rName, testBucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDbInstanceExists(ctx, resourceName, &dbinstance), + resource.TestCheckResourceAttr(resourceName, "bucket", testBucketName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"apply_immediately", "user"}, + }, + }, + }) +} + +func TestAccTimestreamInfluxDBDbInstance_organization(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var dbinstance timestreaminfluxdb.GetDbInstanceOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_timestreaminfluxdb_db_instance.test" + testOrganizationName := "testorganization" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDbInstanceDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccDbInstanceConfig_organization(rName, testOrganizationName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDbInstanceExists(ctx, resourceName, &dbinstance), + resource.TestCheckResourceAttr(resourceName, "organization", testOrganizationName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"apply_immediately", "user"}, + }, + }, + }) +} + +func TestAccTimestreamInfluxDBDbInstance_tags(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var dbinstance timestreaminfluxdb.GetDbInstanceOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_timestreaminfluxdb_db_instance.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDbInstanceDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccDbInstanceConfig_tags1(rName, acctest.CtKey1, acctest.CtValue1), + Check: resource.ComposeTestCheckFunc( + testAccCheckDbInstanceExists(ctx, resourceName, &dbinstance), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsAllPercent, acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "tags_all.key1", acctest.CtValue1), + ), + }, + { + Config: testAccDbInstanceConfig_tags2(rName, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, acctest.CtValue2), + Check: resource.ComposeTestCheckFunc( + testAccCheckDbInstanceExists(ctx, resourceName, &dbinstance), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct2), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1Updated), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsAllPercent, acctest.Ct2), + resource.TestCheckResourceAttr(resourceName, "tags_all.key1", acctest.CtValue1Updated), + resource.TestCheckResourceAttr(resourceName, "tags_all.key2", acctest.CtValue2), + ), + }, + { + Config: testAccDbInstanceConfig_tags1(rName, acctest.CtKey2, acctest.CtValue2), + Check: resource.ComposeTestCheckFunc( + testAccCheckDbInstanceExists(ctx, resourceName, &dbinstance), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsAllPercent, acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "tags_all.key2", acctest.CtValue2), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"apply_immediately", "user"}, + }, + }, + }) +} + +func TestAccTimestreamInfluxDBDbInstance_dbInstanceType(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var dbinstance timestreaminfluxdb.GetDbInstanceOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_timestreaminfluxdb_db_instance.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDbInstanceDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccDbInstanceConfig_dbInstanceTypeLarge(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDbInstanceExists(ctx, resourceName, &dbinstance), + resource.TestCheckResourceAttr(resourceName, "db_instance_type", "db.influx.large"), + ), + }, + { + Config: testAccDbInstanceConfig_dbInstanceTypeXLarge(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDbInstanceExists(ctx, resourceName, &dbinstance), + resource.TestCheckResourceAttr(resourceName, "db_instance_type", "db.influx.xlarge"), + ), + }, + { + Config: testAccDbInstanceConfig_dbInstanceType2XLarge(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDbInstanceExists(ctx, resourceName, &dbinstance), + resource.TestCheckResourceAttr(resourceName, "db_instance_type", "db.influx.2xlarge"), + ), + }, + { + Config: testAccDbInstanceConfig_dbInstanceType4XLarge(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDbInstanceExists(ctx, resourceName, &dbinstance), + resource.TestCheckResourceAttr(resourceName, "db_instance_type", "db.influx.4xlarge"), + ), + }, + { + Config: testAccDbInstanceConfig_dbInstanceType8XLarge(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDbInstanceExists(ctx, resourceName, &dbinstance), + resource.TestCheckResourceAttr(resourceName, "db_instance_type", "db.influx.8xlarge"), + ), + }, + { + Config: testAccDbInstanceConfig_dbInstanceType12XLarge(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDbInstanceExists(ctx, resourceName, &dbinstance), + resource.TestCheckResourceAttr(resourceName, "db_instance_type", "db.influx.12xlarge"), + ), + }, + { + Config: testAccDbInstanceConfig_dbInstanceType16XLarge(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDbInstanceExists(ctx, resourceName, &dbinstance), + resource.TestCheckResourceAttr(resourceName, "db_instance_type", "db.influx.16xlarge"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"apply_immediately", "user"}, + }, + }, + }) +} + +func testAccCheckDbInstanceDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).TimestreamInfluxDBClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_timestreaminfluxdb_db_instance" { + continue + } + + input := ×treaminfluxdb.GetDbInstanceInput{ + Identifier: aws.String(rs.Primary.ID), + } + _, err := conn.GetDbInstance(ctx, input) + if errs.IsA[*types.ResourceNotFoundException](err) { + return nil + } + if err != nil { + return create.Error(names.TimestreamInfluxDB, create.ErrActionCheckingDestroyed, tftimestreaminfluxdb.ResNameDbInstance, rs.Primary.ID, err) + } + + return create.Error(names.TimestreamInfluxDB, create.ErrActionCheckingDestroyed, tftimestreaminfluxdb.ResNameDbInstance, rs.Primary.ID, errors.New("not destroyed")) + } + + return nil + } +} + +func testAccCheckDbInstanceExists(ctx context.Context, name string, dbinstance *timestreaminfluxdb.GetDbInstanceOutput) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return create.Error(names.TimestreamInfluxDB, create.ErrActionCheckingExistence, tftimestreaminfluxdb.ResNameDbInstance, name, errors.New("not found")) + } + + if rs.Primary.ID == "" { + return create.Error(names.TimestreamInfluxDB, create.ErrActionCheckingExistence, tftimestreaminfluxdb.ResNameDbInstance, name, errors.New("not set")) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).TimestreamInfluxDBClient(ctx) + resp, err := conn.GetDbInstance(ctx, ×treaminfluxdb.GetDbInstanceInput{ + Identifier: aws.String(rs.Primary.ID), + }) + + if err != nil { + return create.Error(names.TimestreamInfluxDB, create.ErrActionCheckingExistence, tftimestreaminfluxdb.ResNameDbInstance, rs.Primary.ID, err) + } + + *dbinstance = *resp + + return nil + } +} + +func testAccPreCheck(ctx context.Context, t *testing.T) { + conn := acctest.Provider.Meta().(*conns.AWSClient).TimestreamInfluxDBClient(ctx) + + input := ×treaminfluxdb.ListDbInstancesInput{} + _, err := conn.ListDbInstances(ctx, input) + + if acctest.PreCheckSkipError(err) { + t.Skipf("skipping acceptance testing: %s", err) + } + if err != nil { + t.Fatalf("unexpected PreCheck error: %s", err) + } +} + +func testAccCheckDbInstanceNotRecreated(before, after *timestreaminfluxdb.GetDbInstanceOutput) resource.TestCheckFunc { + return func(s *terraform.State) error { + if before, after := aws.ToString(before.Id), aws.ToString(after.Id); before != after { + return create.Error(names.TimestreamInfluxDB, create.ErrActionCheckingNotRecreated, tftimestreaminfluxdb.ResNameDbInstance, before, errors.New("recreated")) + } + + return nil + } +} + +func testAccDbInstanceConfig_base() string { + return fmt.Sprintf(` +resource "aws_vpc" "test_vpc" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_subnet" "test_subnet" { + vpc_id = aws_vpc.test_vpc.id + cidr_block = "10.0.1.0/24" +} + +resource "aws_security_group" "test_security_group" { + vpc_id = aws_vpc.test_vpc.id +} +`) +} + +// Minimal configuration. +func testAccDbInstanceConfig_basic(rName string) string { + return acctest.ConfigCompose(testAccDbInstanceConfig_base(), fmt.Sprintf(` +resource "aws_timestreaminfluxdb_db_instance" "test" { + allocated_storage = 20 + password = "testpassword" + vpc_subnet_ids = [aws_subnet.test_subnet.id] + vpc_security_group_ids = [aws_security_group.test_security_group.id] + db_instance_type = "db.influx.medium" + name = %[1]q +} +`, rName)) +} + +// Configuration with log_delivery_configuration set and enabled. +func testAccDbInstanceConfig_logDeliveryConfigurationEnabled(rName string) string { + return acctest.ConfigCompose(testAccDbInstanceConfig_base(), fmt.Sprintf(` +resource "aws_s3_bucket" "test_s3_bucket" { + bucket = %[1]q + force_destroy = true +} + +data "aws_iam_policy_document" "allow_timestreaminfluxdb" { + statement { + actions = ["s3:PutObject"] + principals { + type = "Service" + identifiers = ["timestream-influxdb.amazonaws.com"] + } + resources = [ + "${aws_s3_bucket.test_s3_bucket.arn}/*" + ] + } +} + +resource "aws_s3_bucket_policy" "allow_timestreaminfluxdb" { + bucket = aws_s3_bucket.test_s3_bucket.id + policy = data.aws_iam_policy_document.allow_timestreaminfluxdb.json +} + +resource "aws_timestreaminfluxdb_db_instance" "test" { + allocated_storage = 20 + password = "testpassword" + vpc_subnet_ids = [aws_subnet.test_subnet.id] + vpc_security_group_ids = [aws_security_group.test_security_group.id] + db_instance_type = "db.influx.medium" + publicly_accessible = false + name = %[1]q + + log_delivery_configuration { + s3_configuration { + bucket_name = %[1]q + enabled = true + } + } +} +`, rName)) +} + +// Configuration with log_delivery_configuration set but not enabled. +func testAccDbInstanceConfig_logDeliveryConfigurationNotEnabled(rName string) string { + return acctest.ConfigCompose(testAccDbInstanceConfig_base(), fmt.Sprintf(` +resource "aws_s3_bucket" "test_s3_bucket" { + bucket = %[1]q + force_destroy = true +} + +data "aws_iam_policy_document" "allow_timestreaminfluxdb" { + statement { + actions = ["s3:PutObject"] + principals { + type = "Service" + identifiers = ["timestream-influxdb.amazonaws.com"] + } + resources = [ + "${aws_s3_bucket.test_s3_bucket.arn}/*" + ] + } +} + +resource "aws_s3_bucket_policy" "allow_timestreaminfluxdb" { + bucket = aws_s3_bucket.test_s3_bucket.id + policy = data.aws_iam_policy_document.allow_timestreaminfluxdb.json +} + +resource "aws_timestreaminfluxdb_db_instance" "test" { + allocated_storage = 20 + password = "testpassword" + vpc_subnet_ids = [aws_subnet.test_subnet.id] + vpc_security_group_ids = [aws_security_group.test_security_group.id] + db_instance_type = "db.influx.medium" + publicly_accessible = false + name = %[1]q + + log_delivery_configuration { + s3_configuration { + bucket_name = %[1]q + enabled = false + } + } +} +`, rName)) +} + +// Configuration that is publicly accessible. An endpoint will be created +// for the DB instance but no inbound rules will be defined, preventing access. +func testAccDbInstanceConfig_publiclyAccessible(rName string) string { + return acctest.ConfigCompose(testAccDbInstanceConfig_base(), fmt.Sprintf(` +resource "aws_internet_gateway" "test_internet_gateway" { + vpc_id = aws_vpc.test_vpc.id +} + +resource "aws_route" "test_route" { + route_table_id = aws_vpc.test_vpc.main_route_table_id + destination_cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.test_internet_gateway.id +} + +resource "aws_route_table_association" "test_route_table_association" { + subnet_id = aws_subnet.test_subnet.id + route_table_id = aws_vpc.test_vpc.main_route_table_id +} + +resource "aws_vpc_security_group_ingress_rule" "test_vpc_security_group_ingress_rule_vpc" { + security_group_id = aws_security_group.test_security_group.id + referenced_security_group_id = aws_security_group.test_security_group.id + ip_protocol = -1 +} + +resource "aws_timestreaminfluxdb_db_instance" "test" { + allocated_storage = 20 + password = "testpassword" + db_storage_type = "InfluxIOIncludedT1" + vpc_subnet_ids = [aws_subnet.test_subnet.id] + vpc_security_group_ids = [aws_security_group.test_security_group.id] + db_instance_type = "db.influx.medium" + name = %[1]q + + publicly_accessible = true +} +`, rName)) +} + +func testAccDbInstanceConfig_deploymentTypeMultiAzStandby(rName string, regionName string) string { + return fmt.Sprintf(` +resource "aws_vpc" "test_vpc" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_subnet" "test_subnet_1" { + vpc_id = aws_vpc.test_vpc.id + cidr_block = "10.0.1.0/24" + availability_zone = "%[2]sa" +} + +resource "aws_subnet" "test_subnet_2" { + vpc_id = aws_vpc.test_vpc.id + cidr_block = "10.0.2.0/24" + availability_zone = "%[2]sb" +} + +resource "aws_security_group" "test_security_group" { + vpc_id = aws_vpc.test_vpc.id +} + +resource "aws_timestreaminfluxdb_db_instance" "test" { + allocated_storage = 20 + password = "testpassword" + db_storage_type = "InfluxIOIncludedT1" + vpc_subnet_ids = [aws_subnet.test_subnet_1.id, aws_subnet.test_subnet_2.id] + vpc_security_group_ids = [aws_security_group.test_security_group.id] + db_instance_type = "db.influx.medium" + name = %[1]q + + deployment_type = "WITH_MULTIAZ_STANDBY" +} +`, rName, regionName) +} + +func testAccDbInstanceConfig_username(rName, username string) string { + return acctest.ConfigCompose(testAccDbInstanceConfig_base(), fmt.Sprintf(` +resource "aws_timestreaminfluxdb_db_instance" "test" { + allocated_storage = 20 + password = "testpassword" + db_storage_type = "InfluxIOIncludedT1" + vpc_subnet_ids = [aws_subnet.test_subnet.id] + vpc_security_group_ids = [aws_security_group.test_security_group.id] + db_instance_type = "db.influx.medium" + name = %[1]q + + username = %[2]q +} +`, rName, username)) +} + +func testAccDbInstanceConfig_bucket(rName, bucketName string) string { + return acctest.ConfigCompose(testAccDbInstanceConfig_base(), fmt.Sprintf(` +resource "aws_timestreaminfluxdb_db_instance" "test" { + allocated_storage = 20 + password = "testpassword" + db_storage_type = "InfluxIOIncludedT1" + vpc_subnet_ids = [aws_subnet.test_subnet.id] + vpc_security_group_ids = [aws_security_group.test_security_group.id] + db_instance_type = "db.influx.medium" + name = %[1]q + + bucket = %[2]q +} +`, rName, bucketName)) +} + +func testAccDbInstanceConfig_organization(rName, organizationName string) string { + return acctest.ConfigCompose(testAccDbInstanceConfig_base(), fmt.Sprintf(` +resource "aws_timestreaminfluxdb_db_instance" "test" { + allocated_storage = 20 + password = "testpassword" + db_storage_type = "InfluxIOIncludedT1" + vpc_subnet_ids = [aws_subnet.test_subnet.id] + vpc_security_group_ids = [aws_security_group.test_security_group.id] + db_instance_type = "db.influx.medium" + name = %[1]q + + organization = %[2]q +} +`, rName, organizationName)) +} + +func testAccDbInstanceConfig_tags1(rName, tagKey1, tagValue1 string) string { + return acctest.ConfigCompose(testAccDbInstanceConfig_base(), fmt.Sprintf(` +resource "aws_timestreaminfluxdb_db_instance" "test" { + allocated_storage = 20 + password = "testpassword" + db_storage_type = "InfluxIOIncludedT1" + vpc_subnet_ids = [aws_subnet.test_subnet.id] + vpc_security_group_ids = [aws_security_group.test_security_group.id] + db_instance_type = "db.influx.medium" + name = %[1]q + + tags = { + %[2]q = %[3]q + } +} +`, rName, tagKey1, tagValue1)) +} + +func testAccDbInstanceConfig_tags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { + return acctest.ConfigCompose(testAccDbInstanceConfig_base(), fmt.Sprintf(` +resource "aws_timestreaminfluxdb_db_instance" "test" { + allocated_storage = 20 + password = "testpassword" + db_storage_type = "InfluxIOIncludedT1" + vpc_subnet_ids = [aws_subnet.test_subnet.id] + vpc_security_group_ids = [aws_security_group.test_security_group.id] + db_instance_type = "db.influx.medium" + name = %[1]q + + tags = { + %[2]q = %[3]q + %[4]q = %[5]q + } +} +`, rName, tagKey1, tagValue1, tagKey2, tagValue2)) +} + +func testAccDbInstanceConfig_dbStorageTypeT2(rName string) string { + return acctest.ConfigCompose(testAccDbInstanceConfig_base(), fmt.Sprintf(` +resource "aws_timestreaminfluxdb_db_instance" "test" { + allocated_storage = 20 + password = "testpassword" + vpc_subnet_ids = [aws_subnet.test_subnet.id] + vpc_security_group_ids = [aws_security_group.test_security_group.id] + db_instance_type = "db.influx.medium" + name = %[1]q + + db_storage_type = "InfluxIOIncludedT2" +} +`, rName)) +} + +func testAccDbInstanceConfig_dbStorageTypeT3(rName string) string { + return acctest.ConfigCompose(testAccDbInstanceConfig_base(), fmt.Sprintf(` +resource "aws_timestreaminfluxdb_db_instance" "test" { + allocated_storage = 20 + password = "testpassword" + vpc_subnet_ids = [aws_subnet.test_subnet.id] + vpc_security_group_ids = [aws_security_group.test_security_group.id] + db_instance_type = "db.influx.medium" + name = %[1]q + + db_storage_type = "InfluxIOIncludedT3" +} +`, rName)) +} + +func testAccDbInstanceConfig_dbInstanceTypeLarge(rName string) string { + return acctest.ConfigCompose(testAccDbInstanceConfig_base(), fmt.Sprintf(` +resource "aws_timestreaminfluxdb_db_instance" "test" { + allocated_storage = 20 + db_storage_type = "InfluxIOIncludedT1" + password = "testpassword" + vpc_subnet_ids = [aws_subnet.test_subnet.id] + vpc_security_group_ids = [aws_security_group.test_security_group.id] + name = %[1]q + + db_instance_type = "db.influx.large" +} +`, rName)) +} + +func testAccDbInstanceConfig_dbInstanceTypeXLarge(rName string) string { + return acctest.ConfigCompose(testAccDbInstanceConfig_base(), fmt.Sprintf(` +resource "aws_timestreaminfluxdb_db_instance" "test" { + allocated_storage = 20 + db_storage_type = "InfluxIOIncludedT1" + password = "testpassword" + vpc_subnet_ids = [aws_subnet.test_subnet.id] + vpc_security_group_ids = [aws_security_group.test_security_group.id] + name = %[1]q + + db_instance_type = "db.influx.xlarge" +} +`, rName)) +} + +func testAccDbInstanceConfig_dbInstanceType2XLarge(rName string) string { + return acctest.ConfigCompose(testAccDbInstanceConfig_base(), fmt.Sprintf(` +resource "aws_timestreaminfluxdb_db_instance" "test" { + db_storage_type = "InfluxIOIncludedT1" + password = "testpassword" + vpc_subnet_ids = [aws_subnet.test_subnet.id] + vpc_security_group_ids = [aws_security_group.test_security_group.id] + name = %[1]q + + allocated_storage = 40 + db_instance_type = "db.influx.2xlarge" +} +`, rName)) +} + +func testAccDbInstanceConfig_dbInstanceType4XLarge(rName string) string { + return acctest.ConfigCompose(testAccDbInstanceConfig_base(), fmt.Sprintf(` +resource "aws_timestreaminfluxdb_db_instance" "test" { + allocated_storage = 20 + db_storage_type = "InfluxIOIncludedT1" + password = "testpassword" + vpc_subnet_ids = [aws_subnet.test_subnet.id] + vpc_security_group_ids = [aws_security_group.test_security_group.id] + name = %[1]q + + db_instance_type = "db.influx.4xlarge" +} +`, rName)) +} + +func testAccDbInstanceConfig_dbInstanceType8XLarge(rName string) string { + return acctest.ConfigCompose(testAccDbInstanceConfig_base(), fmt.Sprintf(` +resource "aws_timestreaminfluxdb_db_instance" "test" { + allocated_storage = 20 + db_storage_type = "InfluxIOIncludedT1" + password = "testpassword" + vpc_subnet_ids = [aws_subnet.test_subnet.id] + vpc_security_group_ids = [aws_security_group.test_security_group.id] + name = %[1]q + + db_instance_type = "db.influx.8xlarge" +} +`, rName)) +} + +func testAccDbInstanceConfig_dbInstanceType12XLarge(rName string) string { + return acctest.ConfigCompose(testAccDbInstanceConfig_base(), fmt.Sprintf(` +resource "aws_timestreaminfluxdb_db_instance" "test" { + allocated_storage = 20 + db_storage_type = "InfluxIOIncludedT1" + password = "testpassword" + vpc_subnet_ids = [aws_subnet.test_subnet.id] + vpc_security_group_ids = [aws_security_group.test_security_group.id] + name = %[1]q + + db_instance_type = "db.influx.12xlarge" +} +`, rName)) +} + +func testAccDbInstanceConfig_dbInstanceType16XLarge(rName string) string { + return acctest.ConfigCompose(testAccDbInstanceConfig_base(), fmt.Sprintf(` +resource "aws_timestreaminfluxdb_db_instance" "test" { + allocated_storage = 20 + db_storage_type = "InfluxIOIncludedT1" + password = "testpassword" + vpc_subnet_ids = [aws_subnet.test_subnet.id] + vpc_security_group_ids = [aws_security_group.test_security_group.id] + name = %[1]q + + db_instance_type = "db.influx.16xlarge" +} +`, rName)) +} diff --git a/internal/service/timestreaminfluxdb/exports_test.go b/internal/service/timestreaminfluxdb/exports_test.go new file mode 100644 index 00000000000..157b1503203 --- /dev/null +++ b/internal/service/timestreaminfluxdb/exports_test.go @@ -0,0 +1,11 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package timestreaminfluxdb + +// Exports for use in tests only. +var ( + ResourceDbInstance = newResourceDbInstance + + FindDbInstanceByID = findDbInstanceByID +) diff --git a/internal/service/timestreaminfluxdb/service_package_gen.go b/internal/service/timestreaminfluxdb/service_package_gen.go index 20175d1c1e6..e28483d81a2 100644 --- a/internal/service/timestreaminfluxdb/service_package_gen.go +++ b/internal/service/timestreaminfluxdb/service_package_gen.go @@ -20,7 +20,15 @@ func (p *servicePackage) FrameworkDataSources(ctx context.Context) []*types.Serv } func (p *servicePackage) FrameworkResources(ctx context.Context) []*types.ServicePackageFrameworkResource { - return []*types.ServicePackageFrameworkResource{} + return []*types.ServicePackageFrameworkResource{ + { + Factory: newResourceDbInstance, + Name: "Db Instance", + Tags: &types.ServicePackageResourceTags{ + IdentifierAttribute: names.AttrARN, + }, + }, + } } func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePackageSDKDataSource { diff --git a/internal/service/timestreaminfluxdb/sweep.go b/internal/service/timestreaminfluxdb/sweep.go new file mode 100644 index 00000000000..fe570c0aa81 --- /dev/null +++ b/internal/service/timestreaminfluxdb/sweep.go @@ -0,0 +1,66 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package timestreaminfluxdb + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-provider-aws/internal/sweep" + "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv2" + "github.com/hashicorp/terraform-provider-aws/internal/sweep/framework" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func RegisterSweepers() { + resource.AddTestSweepers("aws_timestreaminfluxdb_db_instance", &resource.Sweeper{ + Name: "aws_timestreaminfluxdb_db_instance", + F: sweepDbInstances, + }) +} + +func sweepDbInstances(region string) error { + ctx := sweep.Context(region) + client, err := sweep.SharedRegionalSweepClient(ctx, region) + if err != nil { + return fmt.Errorf("error getting client: %s", err) + } + input := ×treaminfluxdb.ListDbInstancesInput{} + conn := client.TimestreamInfluxDBClient(ctx) + sweepResources := make([]sweep.Sweepable, 0) + + pages := timestreaminfluxdb.NewListDbInstancesPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping TimestreamInfluxDB DB instance sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("error listing TimestreamInfluxDB DB instances (%s): %w", region, err) + } + + for _, v := range page.Items { + id := aws.ToString(v.Id) + log.Printf("[INFO] Deleting TimestreamInfluxDB DB instance: %s", id) + + sweepResources = append(sweepResources, framework.NewSweepResource(newResourceDbInstance, client, + framework.NewAttribute(names.AttrID, id), + )) + } + } + + err = sweep.SweepOrchestrator(ctx, sweepResources) + + if err != nil { + return fmt.Errorf("error sweeping TimestreamInfluxDB DB instances (%s): %w", region, err) + } + + return nil +} diff --git a/internal/sweep/register_gen_test.go b/internal/sweep/register_gen_test.go index 6266d056398..090747e5c1e 100644 --- a/internal/sweep/register_gen_test.go +++ b/internal/sweep/register_gen_test.go @@ -146,6 +146,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/service/storagegateway" "github.com/hashicorp/terraform-provider-aws/internal/service/swf" "github.com/hashicorp/terraform-provider-aws/internal/service/synthetics" + "github.com/hashicorp/terraform-provider-aws/internal/service/timestreaminfluxdb" "github.com/hashicorp/terraform-provider-aws/internal/service/timestreamwrite" "github.com/hashicorp/terraform-provider-aws/internal/service/transcribe" "github.com/hashicorp/terraform-provider-aws/internal/service/transfer" @@ -302,6 +303,7 @@ func registerSweepers() { storagegateway.RegisterSweepers() swf.RegisterSweepers() synthetics.RegisterSweepers() + timestreaminfluxdb.RegisterSweepers() timestreamwrite.RegisterSweepers() transcribe.RegisterSweepers() transfer.RegisterSweepers() diff --git a/website/docs/r/timestreaminfluxdb_db_instance.html.markdown b/website/docs/r/timestreaminfluxdb_db_instance.html.markdown new file mode 100644 index 00000000000..44960ca90e7 --- /dev/null +++ b/website/docs/r/timestreaminfluxdb_db_instance.html.markdown @@ -0,0 +1,281 @@ +--- +subcategory: "Timestream for InfluxDB" +layout: "aws" +page_title: "AWS: aws_timestreaminfluxdb_db_instance" +description: |- + Terraform resource for managing an Amazon Timestream for InfluxDB Db Instance. +--- +` +# Resource: aws_timestreaminfluxdb_db_instance + +Terraform resource for managing an Amazon Timestream for InfluxDB Db Instance. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_timestreaminfluxdb_db_instance" "example" { + allocated_storage = 20 + bucket = "example-bucket-name" + db_instance_type = "db.influx.medium" + username = "admin" + password = "example-password" + vpc_subnet_ids = [aws_subnet.test_subnet.id] + vpc_security_group_ids = [aws_security_group.example_security_group.id] + name = "example-db-instance" +} +``` + +### Usage with Prerequisite Resources + +All Timestream for InfluxDB instances require a VPC, subnet, and security group. The following example shows how these prerequisite resources can be created and used with `aws_timestreaminfluxdb_db_instance`. + +```terraform +resource "aws_vpc" "example_vpc" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_subnet" "example_subnet" { + vpc_id = aws_vpc.example_vpc.id + cidr_block = "10.0.1.0/24" +} + +resource "aws_security_group" "example_security_group" { + name = "example_security_group" + vpc_id = aws_vpc.example_vpc.id +} + +resource "aws_timestreaminfluxdb_db_instance" "example" { + allocated_storage = 20 + bucket = "example-bucket-name" + db_instance_type = "db.influx.medium" + username = "admin" + password = "example-password" + vpc_subnet_ids = [aws_subnet.example_subnet.id] + vpc_security_group_ids = [aws_security_group.example_security_group.id] + name = "example-db-instance" +} +``` + +### Usage with Public Internet Access Enabled + +The following configuration shows how to define the necessary resources and arguments to allow public internet access on your Timestream for InfluxDB instance's endpoint on port `8086`. After applying this configuration, the instance's InfluxDB UI can be accessed by visiting your instance's endpoint at port `8086`. + +```terraform +resource "aws_vpc" "example_vpc" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_subnet" "example_subnet" { + vpc_id = aws_vpc.example_vpc.id + cidr_block = "10.0.1.0/24" +} + +resource "aws_security_group" "example_security_group" { + name = "example_security_group" + vpc_id = aws_vpc.example_vpc.id +} + +resource "aws_internet_gateway" "test_internet_gateway" { + vpc_id = aws_vpc.test_vpc.id + + tags = { + Name = "test_internet_gateway" + } +} + +resource "aws_route" "test_route" { + route_table_id = aws_vpc.test_vpc.main_route_table_id + destination_cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.test_internet_gateway.id +} + +resource "aws_route_table_association" "test_route_table_association" { + subnet_id = aws_subnet.test_subnet.id + route_table_id = aws_vpc.test_vpc.main_route_table_id +} + +resource "aws_vpc_security_group_ingress_rule" "test_vpc_security_group_ingress_rule_vpc" { + security_group_id = aws_security_group.test_security_group.id + referenced_security_group_id = aws_security_group.test_security_group.id + ip_protocol = -1 +} + +resource "aws_vpc_security_group_ingress_rule" "test_vpc_security_group_ingress_rule_influxdb" { + security_group_id = aws_security_group.test_security_group.id + cidr_ipv4 = "0.0.0.0/0" + ip_protocol = "tcp" + from_port = 8086 + to_port = 8086 +} + +resource "aws_timestreaminfluxdb_db_instance" "example" { + allocated_storage = 20 + bucket = "example-bucket-name" + db_instance_type = "db.influx.medium" + username = "admin" + password = "example-password" + vpc_subnet_ids = [aws_subnet.example_subnet.id] + vpc_security_group_ids = [aws_security_group.example_security_group.id] + name = "example-db-instance" + publicly_accessible = true # False by default +} +``` + +### Usage with S3 Log Delivery Enabled + +You can use an S3 bucket to store logs generated by your Timestream for InfluxDB instance. The following example shows what resources and arguments are required to configure an S3 bucket for logging, including the IAM policy that needs to be set in order to allow Timestream for InfluxDB to place logs in your S3 bucket. The configuration of the required VPC, security group, and subnet have been left out of the example for brevity. + +```terraform +resource "aws_s3_bucket" "example_s3_bucket" { + bucket = "example-s3-bucket" +} + +data "aws_iam_policy_document" "allow_timestreaminfluxdb_policy_document" { + statement { + actions = ["s3:PutObject"] + principals { + type = "Service" + identifiers = ["timestream-influxdb.amazonaws.com"] + } + resources = [ + "${aws_s3_bucket.example_s3_bucket.arn}/*" + ] + } +} + +resource "aws_s3_bucket_policy" "allow_timestreaminfluxdb_policy" { + bucket = aws_s3_bucket.example_s3_bucket.id + policy = data.aws_iam_policy_document.allow_timestreaminfluxdb_policy_document.json +} + +resource "aws_timestreaminfluxdb_db_instance" "example" { + allocated_storage = 20 + bucket = "example-bucket-name" + db_instance_type = "db.influx.medium" + username = "admin" + password = "example-password" + vpc_subnet_ids = [aws_subnet.example_subnet.id] + vpc_security_group_ids = [aws_security_group.example_security_group.id] + name = "example-db-instance" + + log_delivery_configuration { + s3_configuration { + bucket_name = aws_s3_bucket.example_s3_bucket.name + enabled = true + } + } +} +``` + +### Usage with MultiAZ Deployment + +To use multi-region availability, at least two subnets must be created in different availability zones and used with your Timestream for InfluxDB instance. + +```terraform +resource "aws_subnet" "example_subnet_1" { + vpc_id = aws_vpc.example_vpc.id + cidr_block = "10.0.1.0/24" + availability_zone = "us-west-2a" +} + +resource "aws_subnet" "example_subnet_2" { + vpc_id = aws_vpc.example_vpc.id + cidr_block = "10.0.2.0/24" + availability_zone = "us-west-2b" +} + +resource "aws_timestreaminfluxdb_db_instance" "example" { + allocated_storage = 20 + bucket = "example-bucket-name" + db_instance_type = "db.influx.medium" + deployment_type = "WITH_MULTIAZ_STANDBY" + username = "admin" + password = "example-password" + vpc_subnet_ids = [aws_subnet.example_subnet_1.id, aws_subnet.example_subnet_2.id] + vpc_security_group_ids = [aws_security_group.example_security_group.id] + name = "example-db-instance" +} +``` + +## Argument Reference + +The following arguments are required: + +* `allocated_storage` - (Required) Amount of storage in GiB (gibibytes). The minimum value is 20, the maximum value is 16384. +* `db_instance_type` - (Required) Timestream for InfluxDB DB instance type to run InfluxDB on. Valid options are: `"db.influx.medium"`, `"db.influx.large"`, `"db.influx.xlarge"`, `"db.influx.2xlarge"`, `"db.influx.4xlarge"`, `"db.influx.8xlarge"`, `"db.influx.12xlarge"`, and `"db.influx.16xlarge"`. +* `name` - (Required) Name that uniquely identifies the DB instance when interacting with the Amazon Timestream for InfluxDB API and CLI commands. This name will also be a prefix included in the endpoint. DB instance names must be unique per customer and per region. The argument must start with a letter, cannot contain consecutive hyphens (`-`) and cannot end with a hyphen. +* `password` - (Required) Password of the initial admin user created in InfluxDB. This password will allow you to access the InfluxDB UI to perform various administrative tasks and also use the InfluxDB CLI to create an operator token. Along with `bucket`, `username`, and `organization`, this argument will be stored in the secret referred to by the `influx_auth_parameters_secret_arn` attribute. +* `vpc_security_group_ids` - (Required) List of VPC security group IDs to associate with the DB instance. +* `vpc_subnet_ids` - (Required) List of VPC subnet IDs to associate with the DB instance. Provide at least two VPC subnet IDs in different availability zones when deploying with a Multi-AZ standby. + +The following arguments are optional: + +* `bucket` - (Default `"bucket"`) Name of the initial InfluxDB bucket. All InfluxDB data is stored in a bucket. A bucket combines the concept of a database and a retention period (the duration of time that each data point persists). A bucket belongs to an organization. Along with `organization`, `username`, and `password`, this argument will be stored in the secret referred to by the `influx_auth_parameters_secret_arn` attribute. +* `db_parameter_group_identifier` - (Optional) ID of the DB parameter group assigned to your DB instance. If added to an existing Timestream for InfluxDB instance or given a new value, will cause an in-place update to the instance. However, if an instance already has a value for `db_parameter_group_identifier`, removing `db_parameter_group_identifier` will cause the instance to be destroyed and recreated. +* `db_storage_type` - (Default `"InfluxIOIncludedT1"`) Timestream for InfluxDB DB storage type to read and write InfluxDB data. You can choose between 3 different types of provisioned Influx IOPS included storage according to your workloads requirements: Influx IO Included 3000 IOPS, Influx IO Included 12000 IOPS, Influx IO Included 16000 IOPS. Valid options are: `"InfluxIOIncludedT1"`, `"InfluxIOIncludedT2"`, and `"InfluxIOIncludedT1"`. +* `deployment_type` - (Default `"SINGLE_AZ"`) Specifies whether the DB instance will be deployed as a standalone instance or with a Multi-AZ standby for high availability. Valid options are: `"SINGLE_AZ"`, `"WITH_MULTIAZ_STANDBY"`. +* `log_delivery_configuration` - (Optional) Configuration for sending InfluxDB engine logs to a specified S3 bucket. +* `organization` - (Default `"organization"`) Name of the initial organization for the initial admin user in InfluxDB. An InfluxDB organization is a workspace for a group of users. Along with `bucket`, `username`, and `password`, this argument will be stored in the secret referred to by the `influx_auth_parameters_secret_arn` attribute. +* `publicly_accessible` - (Default `false`) Configures the DB instance with a public IP to facilitate access. Other resources, such as a VPC, a subnet, an internet gateway, and a route table with routes, are also required to enabled public access, in addition to this argument. See "[Usage with Public Internet Access Enabled](#usage-with-public-internet-access-enabled)" for an example configuration with all required resources for public internet access. +* `username` - (Default `"admin"`) Username of the initial admin user created in InfluxDB. Must start with a letter and can't end with a hyphen or contain two consecutive hyphens. This username will allow you to access the InfluxDB UI to perform various administrative tasks and also use the InfluxDB CLI to create an operator token. Along with `bucket`, `organization`, and `password`, this argument will be stored in the secret referred to by the `influx_auth_parameters_secret_arn` attribute. +* `tags` - (Optional) Map of tags assigned to the resource. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +### Nested Fields + +#### `log_delivery_configuration` + +* `s3_configuration` - (Required) Configuration for S3 bucket log delivery. + +#### `s3_configuration` + +* `bucket_name` - (Required) Name of the S3 bucket to deliver logs to. +* `enabled` - (Required) Indicates whether log delivery to the S3 bucket is enabled. + +**Note**: Only three arguments do updates in-place: `db_parameter_group_identifier`, `log_delivery_configuration`, and `tags`. Changes to any other argument after a DB instance has been deployed will cause destruction and re-creation of the DB instance. Additionally, when `db_parameter_group_identifier` is added to a DB instance or modified, the DB instance will be updated in-place but if `db_parameter_group_identifier` is removed from a DB instance, the DB instance will be destroyed and re-created. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `arn` - ARN of the Timestream for InfluxDB Instance. +* `availability_zone` - Availability Zone in which the DB instance resides. +* `endpoint` - Endpoint used to connect to InfluxDB. The default InfluxDB port is 8086. +* `id` - ID of the Timestream for InfluxDB instance. +* `influx_auth_parameters_secret_arn` - ARN of the AWS Secrets Manager secret containing the initial InfluxDB authorization parameters. The secret value is a JSON formatted key-value pair holding InfluxDB authorization values: organization, bucket, username, and password. This secret will be read by the `aws_timestreaminfluxdb_db_instance` resource in order to support importing: deleting the secret or secret values can cause errors. +* `secondary_availability_zone` - Availability Zone in which the standby instance is located when deploying with a MultiAZ standby instance. +* `status` - The status of the Timestream for InfluxDB instance. +* `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `30m`) +* `update` - (Default `30m`) +* `delete` - (Default `30m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Timestream for InfluxDB Db Instance using its identifier. For example: + +```terraform +import { + to = aws_timestreaminfluxdb_db_instance.example + id = "12345abcde" +} +``` + +Using `terraform import`, import Timestream for InfluxDB Db Instance using its identifier. For example: + +```console +% terraform import aws_timestreaminfluxdb_db_instance.example 12345abcde +``` From 3bc8c0f1c1d45e80b98828d880a9d23cf375251d Mon Sep 17 00:00:00 2001 From: Trevor Bonas Date: Thu, 13 Jun 2024 13:42:06 -0700 Subject: [PATCH 02/21] make fmt --- internal/service/timestreaminfluxdb/db_instance.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/internal/service/timestreaminfluxdb/db_instance.go b/internal/service/timestreaminfluxdb/db_instance.go index c39f6aea3e1..3f0d60f729e 100644 --- a/internal/service/timestreaminfluxdb/db_instance.go +++ b/internal/service/timestreaminfluxdb/db_instance.go @@ -769,11 +769,11 @@ func waitDbInstanceUpdated(ctx context.Context, conn *timestreaminfluxdb.Client, func waitDbInstanceDeleted(ctx context.Context, conn *timestreaminfluxdb.Client, id string, timeout time.Duration) (*timestreaminfluxdb.DeleteDbInstanceOutput, error) { stateConf := &retry.StateChangeConf{ - Pending: []string{string(awstypes.StatusDeleting), string(awstypes.StatusModifying), string(awstypes.StatusUpdating), string(awstypes.StatusAvailable)}, - Target: []string{}, - Refresh: statusDbInstance(ctx, conn, id), - Timeout: timeout, - Delay: 30 * time.Second, + Pending: []string{string(awstypes.StatusDeleting), string(awstypes.StatusModifying), string(awstypes.StatusUpdating), string(awstypes.StatusAvailable)}, + Target: []string{}, + Refresh: statusDbInstance(ctx, conn, id), + Timeout: timeout, + Delay: 30 * time.Second, PollInterval: 30 * time.Second, } From 69ce2db3b33e05992a415d33aab8d294e757efb9 Mon Sep 17 00:00:00 2001 From: Trevor Bonas Date: Thu, 13 Jun 2024 14:55:03 -0700 Subject: [PATCH 03/21] Fix test Terraform formatting --- .../timestreaminfluxdb/db_instance_test.go | 406 +++++++++--------- 1 file changed, 203 insertions(+), 203 deletions(-) diff --git a/internal/service/timestreaminfluxdb/db_instance_test.go b/internal/service/timestreaminfluxdb/db_instance_test.go index 541c5e514c1..61b312b1ebe 100644 --- a/internal/service/timestreaminfluxdb/db_instance_test.go +++ b/internal/service/timestreaminfluxdb/db_instance_test.go @@ -558,7 +558,7 @@ func testAccCheckDbInstanceNotRecreated(before, after *timestreaminfluxdb.GetDbI func testAccDbInstanceConfig_base() string { return fmt.Sprintf(` resource "aws_vpc" "test_vpc" { - cidr_block = "10.0.0.0/16" + cidr_block = "10.0.0.0/16" } resource "aws_subnet" "test_subnet" { @@ -567,7 +567,7 @@ resource "aws_subnet" "test_subnet" { } resource "aws_security_group" "test_security_group" { - vpc_id = aws_vpc.test_vpc.id + vpc_id = aws_vpc.test_vpc.id } `) } @@ -576,12 +576,12 @@ resource "aws_security_group" "test_security_group" { func testAccDbInstanceConfig_basic(rName string) string { return acctest.ConfigCompose(testAccDbInstanceConfig_base(), fmt.Sprintf(` resource "aws_timestreaminfluxdb_db_instance" "test" { - allocated_storage = 20 - password = "testpassword" - vpc_subnet_ids = [aws_subnet.test_subnet.id] - vpc_security_group_ids = [aws_security_group.test_security_group.id] - db_instance_type = "db.influx.medium" - name = %[1]q + allocated_storage = 20 + password = "testpassword" + vpc_subnet_ids = [aws_subnet.test_subnet.id] + vpc_security_group_ids = [aws_security_group.test_security_group.id] + db_instance_type = "db.influx.medium" + name = %[1]q } `, rName)) } @@ -590,43 +590,43 @@ resource "aws_timestreaminfluxdb_db_instance" "test" { func testAccDbInstanceConfig_logDeliveryConfigurationEnabled(rName string) string { return acctest.ConfigCompose(testAccDbInstanceConfig_base(), fmt.Sprintf(` resource "aws_s3_bucket" "test_s3_bucket" { - bucket = %[1]q - force_destroy = true + bucket = %[1]q + force_destroy = true } data "aws_iam_policy_document" "allow_timestreaminfluxdb" { - statement { - actions = ["s3:PutObject"] - principals { - type = "Service" - identifiers = ["timestream-influxdb.amazonaws.com"] - } - resources = [ - "${aws_s3_bucket.test_s3_bucket.arn}/*" - ] - } + statement { + actions = ["s3:PutObject"] + principals { + type = "Service" + identifiers = ["timestream-influxdb.amazonaws.com"] + } + resources = [ + "${aws_s3_bucket.test_s3_bucket.arn}/*" + ] + } } resource "aws_s3_bucket_policy" "allow_timestreaminfluxdb" { - bucket = aws_s3_bucket.test_s3_bucket.id - policy = data.aws_iam_policy_document.allow_timestreaminfluxdb.json + bucket = aws_s3_bucket.test_s3_bucket.id + policy = data.aws_iam_policy_document.allow_timestreaminfluxdb.json } resource "aws_timestreaminfluxdb_db_instance" "test" { - allocated_storage = 20 - password = "testpassword" - vpc_subnet_ids = [aws_subnet.test_subnet.id] - vpc_security_group_ids = [aws_security_group.test_security_group.id] - db_instance_type = "db.influx.medium" - publicly_accessible = false - name = %[1]q - - log_delivery_configuration { - s3_configuration { - bucket_name = %[1]q - enabled = true - } - } + allocated_storage = 20 + password = "testpassword" + vpc_subnet_ids = [aws_subnet.test_subnet.id] + vpc_security_group_ids = [aws_security_group.test_security_group.id] + db_instance_type = "db.influx.medium" + publicly_accessible = false + name = %[1]q + + log_delivery_configuration { + s3_configuration { + bucket_name = %[1]q + enabled = true + } + } } `, rName)) } @@ -635,43 +635,43 @@ resource "aws_timestreaminfluxdb_db_instance" "test" { func testAccDbInstanceConfig_logDeliveryConfigurationNotEnabled(rName string) string { return acctest.ConfigCompose(testAccDbInstanceConfig_base(), fmt.Sprintf(` resource "aws_s3_bucket" "test_s3_bucket" { - bucket = %[1]q - force_destroy = true + bucket = %[1]q + force_destroy = true } data "aws_iam_policy_document" "allow_timestreaminfluxdb" { - statement { - actions = ["s3:PutObject"] - principals { - type = "Service" - identifiers = ["timestream-influxdb.amazonaws.com"] - } - resources = [ - "${aws_s3_bucket.test_s3_bucket.arn}/*" - ] - } + statement { + actions = ["s3:PutObject"] + principals { + type = "Service" + identifiers = ["timestream-influxdb.amazonaws.com"] + } + resources = [ + "${aws_s3_bucket.test_s3_bucket.arn}/*" + ] + } } resource "aws_s3_bucket_policy" "allow_timestreaminfluxdb" { - bucket = aws_s3_bucket.test_s3_bucket.id - policy = data.aws_iam_policy_document.allow_timestreaminfluxdb.json + bucket = aws_s3_bucket.test_s3_bucket.id + policy = data.aws_iam_policy_document.allow_timestreaminfluxdb.json } resource "aws_timestreaminfluxdb_db_instance" "test" { - allocated_storage = 20 - password = "testpassword" - vpc_subnet_ids = [aws_subnet.test_subnet.id] - vpc_security_group_ids = [aws_security_group.test_security_group.id] - db_instance_type = "db.influx.medium" - publicly_accessible = false - name = %[1]q - - log_delivery_configuration { - s3_configuration { - bucket_name = %[1]q - enabled = false - } - } + allocated_storage = 20 + password = "testpassword" + vpc_subnet_ids = [aws_subnet.test_subnet.id] + vpc_security_group_ids = [aws_security_group.test_security_group.id] + db_instance_type = "db.influx.medium" + publicly_accessible = false + name = %[1]q + + log_delivery_configuration { + s3_configuration { + bucket_name = %[1]q + enabled = false + } + } } `, rName)) } @@ -685,9 +685,9 @@ resource "aws_internet_gateway" "test_internet_gateway" { } resource "aws_route" "test_route" { - route_table_id = aws_vpc.test_vpc.main_route_table_id - destination_cidr_block = "0.0.0.0/0" - gateway_id = aws_internet_gateway.test_internet_gateway.id + route_table_id = aws_vpc.test_vpc.main_route_table_id + destination_cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.test_internet_gateway.id } resource "aws_route_table_association" "test_route_table_association" { @@ -696,21 +696,21 @@ resource "aws_route_table_association" "test_route_table_association" { } resource "aws_vpc_security_group_ingress_rule" "test_vpc_security_group_ingress_rule_vpc" { - security_group_id = aws_security_group.test_security_group.id - referenced_security_group_id = aws_security_group.test_security_group.id - ip_protocol = -1 + security_group_id = aws_security_group.test_security_group.id + referenced_security_group_id = aws_security_group.test_security_group.id + ip_protocol = -1 } resource "aws_timestreaminfluxdb_db_instance" "test" { - allocated_storage = 20 - password = "testpassword" - db_storage_type = "InfluxIOIncludedT1" - vpc_subnet_ids = [aws_subnet.test_subnet.id] - vpc_security_group_ids = [aws_security_group.test_security_group.id] - db_instance_type = "db.influx.medium" - name = %[1]q + allocated_storage = 20 + password = "testpassword" + db_storage_type = "InfluxIOIncludedT1" + vpc_subnet_ids = [aws_subnet.test_subnet.id] + vpc_security_group_ids = [aws_security_group.test_security_group.id] + db_instance_type = "db.influx.medium" + name = %[1]q - publicly_accessible = true + publicly_accessible = true } `, rName)) } @@ -718,35 +718,35 @@ resource "aws_timestreaminfluxdb_db_instance" "test" { func testAccDbInstanceConfig_deploymentTypeMultiAzStandby(rName string, regionName string) string { return fmt.Sprintf(` resource "aws_vpc" "test_vpc" { - cidr_block = "10.0.0.0/16" + cidr_block = "10.0.0.0/16" } resource "aws_subnet" "test_subnet_1" { - vpc_id = aws_vpc.test_vpc.id - cidr_block = "10.0.1.0/24" + vpc_id = aws_vpc.test_vpc.id + cidr_block = "10.0.1.0/24" availability_zone = "%[2]sa" } resource "aws_subnet" "test_subnet_2" { - vpc_id = aws_vpc.test_vpc.id - cidr_block = "10.0.2.0/24" + vpc_id = aws_vpc.test_vpc.id + cidr_block = "10.0.2.0/24" availability_zone = "%[2]sb" } resource "aws_security_group" "test_security_group" { - vpc_id = aws_vpc.test_vpc.id + vpc_id = aws_vpc.test_vpc.id } resource "aws_timestreaminfluxdb_db_instance" "test" { - allocated_storage = 20 - password = "testpassword" - db_storage_type = "InfluxIOIncludedT1" - vpc_subnet_ids = [aws_subnet.test_subnet_1.id, aws_subnet.test_subnet_2.id] - vpc_security_group_ids = [aws_security_group.test_security_group.id] - db_instance_type = "db.influx.medium" - name = %[1]q + allocated_storage = 20 + password = "testpassword" + db_storage_type = "InfluxIOIncludedT1" + vpc_subnet_ids = [aws_subnet.test_subnet_1.id, aws_subnet.test_subnet_2.id] + vpc_security_group_ids = [aws_security_group.test_security_group.id] + db_instance_type = "db.influx.medium" + name = %[1]q - deployment_type = "WITH_MULTIAZ_STANDBY" + deployment_type = "WITH_MULTIAZ_STANDBY" } `, rName, regionName) } @@ -754,15 +754,15 @@ resource "aws_timestreaminfluxdb_db_instance" "test" { func testAccDbInstanceConfig_username(rName, username string) string { return acctest.ConfigCompose(testAccDbInstanceConfig_base(), fmt.Sprintf(` resource "aws_timestreaminfluxdb_db_instance" "test" { - allocated_storage = 20 - password = "testpassword" - db_storage_type = "InfluxIOIncludedT1" - vpc_subnet_ids = [aws_subnet.test_subnet.id] - vpc_security_group_ids = [aws_security_group.test_security_group.id] - db_instance_type = "db.influx.medium" - name = %[1]q + allocated_storage = 20 + password = "testpassword" + db_storage_type = "InfluxIOIncludedT1" + vpc_subnet_ids = [aws_subnet.test_subnet.id] + vpc_security_group_ids = [aws_security_group.test_security_group.id] + db_instance_type = "db.influx.medium" + name = %[1]q - username = %[2]q + username = %[2]q } `, rName, username)) } @@ -770,15 +770,15 @@ resource "aws_timestreaminfluxdb_db_instance" "test" { func testAccDbInstanceConfig_bucket(rName, bucketName string) string { return acctest.ConfigCompose(testAccDbInstanceConfig_base(), fmt.Sprintf(` resource "aws_timestreaminfluxdb_db_instance" "test" { - allocated_storage = 20 - password = "testpassword" - db_storage_type = "InfluxIOIncludedT1" - vpc_subnet_ids = [aws_subnet.test_subnet.id] - vpc_security_group_ids = [aws_security_group.test_security_group.id] - db_instance_type = "db.influx.medium" - name = %[1]q + allocated_storage = 20 + password = "testpassword" + db_storage_type = "InfluxIOIncludedT1" + vpc_subnet_ids = [aws_subnet.test_subnet.id] + vpc_security_group_ids = [aws_security_group.test_security_group.id] + db_instance_type = "db.influx.medium" + name = %[1]q - bucket = %[2]q + bucket = %[2]q } `, rName, bucketName)) } @@ -786,15 +786,15 @@ resource "aws_timestreaminfluxdb_db_instance" "test" { func testAccDbInstanceConfig_organization(rName, organizationName string) string { return acctest.ConfigCompose(testAccDbInstanceConfig_base(), fmt.Sprintf(` resource "aws_timestreaminfluxdb_db_instance" "test" { - allocated_storage = 20 - password = "testpassword" - db_storage_type = "InfluxIOIncludedT1" - vpc_subnet_ids = [aws_subnet.test_subnet.id] - vpc_security_group_ids = [aws_security_group.test_security_group.id] - db_instance_type = "db.influx.medium" - name = %[1]q + allocated_storage = 20 + password = "testpassword" + db_storage_type = "InfluxIOIncludedT1" + vpc_subnet_ids = [aws_subnet.test_subnet.id] + vpc_security_group_ids = [aws_security_group.test_security_group.id] + db_instance_type = "db.influx.medium" + name = %[1]q - organization = %[2]q + organization = %[2]q } `, rName, organizationName)) } @@ -802,17 +802,17 @@ resource "aws_timestreaminfluxdb_db_instance" "test" { func testAccDbInstanceConfig_tags1(rName, tagKey1, tagValue1 string) string { return acctest.ConfigCompose(testAccDbInstanceConfig_base(), fmt.Sprintf(` resource "aws_timestreaminfluxdb_db_instance" "test" { - allocated_storage = 20 - password = "testpassword" - db_storage_type = "InfluxIOIncludedT1" - vpc_subnet_ids = [aws_subnet.test_subnet.id] - vpc_security_group_ids = [aws_security_group.test_security_group.id] - db_instance_type = "db.influx.medium" - name = %[1]q - - tags = { - %[2]q = %[3]q - } + allocated_storage = 20 + password = "testpassword" + db_storage_type = "InfluxIOIncludedT1" + vpc_subnet_ids = [aws_subnet.test_subnet.id] + vpc_security_group_ids = [aws_security_group.test_security_group.id] + db_instance_type = "db.influx.medium" + name = %[1]q + + tags = { + %[2]q = %[3]q + } } `, rName, tagKey1, tagValue1)) } @@ -820,18 +820,18 @@ resource "aws_timestreaminfluxdb_db_instance" "test" { func testAccDbInstanceConfig_tags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { return acctest.ConfigCompose(testAccDbInstanceConfig_base(), fmt.Sprintf(` resource "aws_timestreaminfluxdb_db_instance" "test" { - allocated_storage = 20 - password = "testpassword" - db_storage_type = "InfluxIOIncludedT1" - vpc_subnet_ids = [aws_subnet.test_subnet.id] - vpc_security_group_ids = [aws_security_group.test_security_group.id] - db_instance_type = "db.influx.medium" - name = %[1]q - - tags = { - %[2]q = %[3]q - %[4]q = %[5]q - } + allocated_storage = 20 + password = "testpassword" + db_storage_type = "InfluxIOIncludedT1" + vpc_subnet_ids = [aws_subnet.test_subnet.id] + vpc_security_group_ids = [aws_security_group.test_security_group.id] + db_instance_type = "db.influx.medium" + name = %[1]q + + tags = { + %[2]q = %[3]q + %[4]q = %[5]q + } } `, rName, tagKey1, tagValue1, tagKey2, tagValue2)) } @@ -839,14 +839,14 @@ resource "aws_timestreaminfluxdb_db_instance" "test" { func testAccDbInstanceConfig_dbStorageTypeT2(rName string) string { return acctest.ConfigCompose(testAccDbInstanceConfig_base(), fmt.Sprintf(` resource "aws_timestreaminfluxdb_db_instance" "test" { - allocated_storage = 20 - password = "testpassword" - vpc_subnet_ids = [aws_subnet.test_subnet.id] - vpc_security_group_ids = [aws_security_group.test_security_group.id] - db_instance_type = "db.influx.medium" - name = %[1]q + allocated_storage = 20 + password = "testpassword" + vpc_subnet_ids = [aws_subnet.test_subnet.id] + vpc_security_group_ids = [aws_security_group.test_security_group.id] + db_instance_type = "db.influx.medium" + name = %[1]q - db_storage_type = "InfluxIOIncludedT2" + db_storage_type = "InfluxIOIncludedT2" } `, rName)) } @@ -854,14 +854,14 @@ resource "aws_timestreaminfluxdb_db_instance" "test" { func testAccDbInstanceConfig_dbStorageTypeT3(rName string) string { return acctest.ConfigCompose(testAccDbInstanceConfig_base(), fmt.Sprintf(` resource "aws_timestreaminfluxdb_db_instance" "test" { - allocated_storage = 20 - password = "testpassword" - vpc_subnet_ids = [aws_subnet.test_subnet.id] - vpc_security_group_ids = [aws_security_group.test_security_group.id] - db_instance_type = "db.influx.medium" - name = %[1]q + allocated_storage = 20 + password = "testpassword" + vpc_subnet_ids = [aws_subnet.test_subnet.id] + vpc_security_group_ids = [aws_security_group.test_security_group.id] + db_instance_type = "db.influx.medium" + name = %[1]q - db_storage_type = "InfluxIOIncludedT3" + db_storage_type = "InfluxIOIncludedT3" } `, rName)) } @@ -869,14 +869,14 @@ resource "aws_timestreaminfluxdb_db_instance" "test" { func testAccDbInstanceConfig_dbInstanceTypeLarge(rName string) string { return acctest.ConfigCompose(testAccDbInstanceConfig_base(), fmt.Sprintf(` resource "aws_timestreaminfluxdb_db_instance" "test" { - allocated_storage = 20 - db_storage_type = "InfluxIOIncludedT1" - password = "testpassword" - vpc_subnet_ids = [aws_subnet.test_subnet.id] - vpc_security_group_ids = [aws_security_group.test_security_group.id] - name = %[1]q + allocated_storage = 20 + db_storage_type = "InfluxIOIncludedT1" + password = "testpassword" + vpc_subnet_ids = [aws_subnet.test_subnet.id] + vpc_security_group_ids = [aws_security_group.test_security_group.id] + name = %[1]q - db_instance_type = "db.influx.large" + db_instance_type = "db.influx.large" } `, rName)) } @@ -884,14 +884,14 @@ resource "aws_timestreaminfluxdb_db_instance" "test" { func testAccDbInstanceConfig_dbInstanceTypeXLarge(rName string) string { return acctest.ConfigCompose(testAccDbInstanceConfig_base(), fmt.Sprintf(` resource "aws_timestreaminfluxdb_db_instance" "test" { - allocated_storage = 20 - db_storage_type = "InfluxIOIncludedT1" - password = "testpassword" - vpc_subnet_ids = [aws_subnet.test_subnet.id] - vpc_security_group_ids = [aws_security_group.test_security_group.id] - name = %[1]q + allocated_storage = 20 + db_storage_type = "InfluxIOIncludedT1" + password = "testpassword" + vpc_subnet_ids = [aws_subnet.test_subnet.id] + vpc_security_group_ids = [aws_security_group.test_security_group.id] + name = %[1]q - db_instance_type = "db.influx.xlarge" + db_instance_type = "db.influx.xlarge" } `, rName)) } @@ -899,14 +899,14 @@ resource "aws_timestreaminfluxdb_db_instance" "test" { func testAccDbInstanceConfig_dbInstanceType2XLarge(rName string) string { return acctest.ConfigCompose(testAccDbInstanceConfig_base(), fmt.Sprintf(` resource "aws_timestreaminfluxdb_db_instance" "test" { - db_storage_type = "InfluxIOIncludedT1" - password = "testpassword" - vpc_subnet_ids = [aws_subnet.test_subnet.id] - vpc_security_group_ids = [aws_security_group.test_security_group.id] - name = %[1]q + db_storage_type = "InfluxIOIncludedT1" + password = "testpassword" + vpc_subnet_ids = [aws_subnet.test_subnet.id] + vpc_security_group_ids = [aws_security_group.test_security_group.id] + name = %[1]q - allocated_storage = 40 - db_instance_type = "db.influx.2xlarge" + allocated_storage = 40 + db_instance_type = "db.influx.2xlarge" } `, rName)) } @@ -914,14 +914,14 @@ resource "aws_timestreaminfluxdb_db_instance" "test" { func testAccDbInstanceConfig_dbInstanceType4XLarge(rName string) string { return acctest.ConfigCompose(testAccDbInstanceConfig_base(), fmt.Sprintf(` resource "aws_timestreaminfluxdb_db_instance" "test" { - allocated_storage = 20 - db_storage_type = "InfluxIOIncludedT1" - password = "testpassword" - vpc_subnet_ids = [aws_subnet.test_subnet.id] - vpc_security_group_ids = [aws_security_group.test_security_group.id] - name = %[1]q + allocated_storage = 20 + db_storage_type = "InfluxIOIncludedT1" + password = "testpassword" + vpc_subnet_ids = [aws_subnet.test_subnet.id] + vpc_security_group_ids = [aws_security_group.test_security_group.id] + name = %[1]q - db_instance_type = "db.influx.4xlarge" + db_instance_type = "db.influx.4xlarge" } `, rName)) } @@ -929,14 +929,14 @@ resource "aws_timestreaminfluxdb_db_instance" "test" { func testAccDbInstanceConfig_dbInstanceType8XLarge(rName string) string { return acctest.ConfigCompose(testAccDbInstanceConfig_base(), fmt.Sprintf(` resource "aws_timestreaminfluxdb_db_instance" "test" { - allocated_storage = 20 - db_storage_type = "InfluxIOIncludedT1" - password = "testpassword" - vpc_subnet_ids = [aws_subnet.test_subnet.id] - vpc_security_group_ids = [aws_security_group.test_security_group.id] - name = %[1]q + allocated_storage = 20 + db_storage_type = "InfluxIOIncludedT1" + password = "testpassword" + vpc_subnet_ids = [aws_subnet.test_subnet.id] + vpc_security_group_ids = [aws_security_group.test_security_group.id] + name = %[1]q - db_instance_type = "db.influx.8xlarge" + db_instance_type = "db.influx.8xlarge" } `, rName)) } @@ -944,14 +944,14 @@ resource "aws_timestreaminfluxdb_db_instance" "test" { func testAccDbInstanceConfig_dbInstanceType12XLarge(rName string) string { return acctest.ConfigCompose(testAccDbInstanceConfig_base(), fmt.Sprintf(` resource "aws_timestreaminfluxdb_db_instance" "test" { - allocated_storage = 20 - db_storage_type = "InfluxIOIncludedT1" - password = "testpassword" - vpc_subnet_ids = [aws_subnet.test_subnet.id] - vpc_security_group_ids = [aws_security_group.test_security_group.id] - name = %[1]q + allocated_storage = 20 + db_storage_type = "InfluxIOIncludedT1" + password = "testpassword" + vpc_subnet_ids = [aws_subnet.test_subnet.id] + vpc_security_group_ids = [aws_security_group.test_security_group.id] + name = %[1]q - db_instance_type = "db.influx.12xlarge" + db_instance_type = "db.influx.12xlarge" } `, rName)) } @@ -959,14 +959,14 @@ resource "aws_timestreaminfluxdb_db_instance" "test" { func testAccDbInstanceConfig_dbInstanceType16XLarge(rName string) string { return acctest.ConfigCompose(testAccDbInstanceConfig_base(), fmt.Sprintf(` resource "aws_timestreaminfluxdb_db_instance" "test" { - allocated_storage = 20 - db_storage_type = "InfluxIOIncludedT1" - password = "testpassword" - vpc_subnet_ids = [aws_subnet.test_subnet.id] - vpc_security_group_ids = [aws_security_group.test_security_group.id] - name = %[1]q - - db_instance_type = "db.influx.16xlarge" + allocated_storage = 20 + db_storage_type = "InfluxIOIncludedT1" + password = "testpassword" + vpc_subnet_ids = [aws_subnet.test_subnet.id] + vpc_security_group_ids = [aws_security_group.test_security_group.id] + name = %[1]q + + db_instance_type = "db.influx.16xlarge" } `, rName)) } From 7e221916b9524f76cd705ba5611c9ab5c56405bd Mon Sep 17 00:00:00 2001 From: Trevor Bonas Date: Thu, 13 Jun 2024 18:15:42 -0700 Subject: [PATCH 04/21] Fix formatting --- .../service/timestreaminfluxdb/db_instance.go | 94 ++++----- .../timestreaminfluxdb/db_instance_test.go | 199 +++++++++--------- .../timestreaminfluxdb/exports_test.go | 4 +- .../timestreaminfluxdb/service_package_gen.go | 2 +- internal/service/timestreaminfluxdb/sweep.go | 6 +- ...mestreaminfluxdb_db_instance.html.markdown | 195 ++++++++--------- 6 files changed, 250 insertions(+), 250 deletions(-) diff --git a/internal/service/timestreaminfluxdb/db_instance.go b/internal/service/timestreaminfluxdb/db_instance.go index 3f0d60f729e..0bfa15ca9c7 100644 --- a/internal/service/timestreaminfluxdb/db_instance.go +++ b/internal/service/timestreaminfluxdb/db_instance.go @@ -46,8 +46,8 @@ import ( // Function annotations are used for resource registration to the Provider. DO NOT EDIT. // @FrameworkResource("aws_timestreaminfluxdb_db_instance", name="Db Instance") // @Tags(identifierAttribute="arn") -func newResourceDbInstance(_ context.Context) (resource.ResourceWithConfigure, error) { - r := &resourceDbInstance{} +func newResourceDBInstance(_ context.Context) (resource.ResourceWithConfigure, error) { + r := &resourceDBInstance{} r.SetDefaultCreateTimeout(30 * time.Minute) r.SetDefaultUpdateTimeout(30 * time.Minute) @@ -63,19 +63,19 @@ const ( DefaultBucketValue = "bucket" DefaultOrganizationValue = "organization" DefaultUsernameValue = "admin" - ResNameDbInstance = "Db Instance" + ResNameDBInstance = "DB Instance" ) -type resourceDbInstance struct { +type resourceDBInstance struct { framework.ResourceWithConfigure framework.WithTimeouts } -func (r *resourceDbInstance) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { +func (r *resourceDBInstance) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { resp.TypeName = "aws_timestreaminfluxdb_db_instance" } -func (r *resourceDbInstance) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { +func (r *resourceDBInstance) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { resp.Schema = schema.Schema{ Attributes: map[string]schema.Attribute{ "allocated_storage": schema.Int64Attribute{ @@ -371,7 +371,7 @@ func statementReplaceIf(ctx context.Context, req planmodifier.StringRequest, res if req.State.Raw.IsNull() || req.Plan.Raw.IsNull() { return } - var plan, state resourceDbInstanceData + var plan, state resourceDBInstanceData resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) resp.Diagnostics.Append(req.State.Get(ctx, &state)...) if resp.Diagnostics.HasError() { @@ -383,10 +383,10 @@ func statementReplaceIf(ctx context.Context, req planmodifier.StringRequest, res resp.RequiresReplace = dbParameterGroupIdentifierRemoved } -func (r *resourceDbInstance) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { +func (r *resourceDBInstance) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { conn := r.Meta().TimestreamInfluxDBClient(ctx) - var plan resourceDbInstanceData + var plan resourceDBInstanceData resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) if resp.Diagnostics.HasError() { return @@ -434,14 +434,14 @@ func (r *resourceDbInstance) Create(ctx context.Context, req resource.CreateRequ out, err := conn.CreateDbInstance(ctx, in) if err != nil { resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionCreating, ResNameDbInstance, plan.Name.String(), err), + create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionCreating, ResNameDBInstance, plan.Name.String(), err), err.Error(), ) return } if out == nil || out.Id == nil { resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionCreating, ResNameDbInstance, plan.Name.String(), nil), + create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionCreating, ResNameDBInstance, plan.Name.String(), nil), errors.New("empty output").Error(), ) return @@ -453,23 +453,23 @@ func (r *resourceDbInstance) Create(ctx context.Context, req resource.CreateRequ plan.AvailabilityZone = flex.StringToFramework(ctx, out.AvailabilityZone) createTimeout := r.CreateTimeout(ctx, plan.Timeouts) - _, err = waitDbInstanceCreated(ctx, conn, plan.ID.ValueString(), createTimeout) + _, err = waitDBInstanceCreated(ctx, conn, plan.ID.ValueString(), createTimeout) if err != nil { resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionWaitingForCreation, ResNameDbInstance, plan.Name.String(), err), + create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionWaitingForCreation, ResNameDBInstance, plan.Name.String(), err), err.Error(), ) return } - readOut, err := findDbInstanceByID(ctx, conn, plan.ID.ValueString()) + readOut, err := findDBInstanceByID(ctx, conn, plan.ID.ValueString()) if tfresource.NotFound(err) { resp.State.RemoveResource(ctx) return } if err != nil { resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionSetting, ResNameDbInstance, plan.ID.String(), err), + create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionSetting, ResNameDBInstance, plan.ID.String(), err), err.Error(), ) return @@ -484,23 +484,23 @@ func (r *resourceDbInstance) Create(ctx context.Context, req resource.CreateRequ resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) } -func (r *resourceDbInstance) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { +func (r *resourceDBInstance) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { conn := r.Meta().TimestreamInfluxDBClient(ctx) - var state resourceDbInstanceData + var state resourceDBInstanceData resp.Diagnostics.Append(req.State.Get(ctx, &state)...) if resp.Diagnostics.HasError() { return } - out, err := findDbInstanceByID(ctx, conn, state.ID.ValueString()) + out, err := findDBInstanceByID(ctx, conn, state.ID.ValueString()) if tfresource.NotFound(err) { resp.State.RemoveResource(ctx) return } if err != nil { resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionSetting, ResNameDbInstance, state.ID.String(), err), + create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionSetting, ResNameDBInstance, state.ID.String(), err), err.Error(), ) return @@ -536,7 +536,7 @@ func (r *resourceDbInstance) Read(ctx context.Context, req resource.ReadRequest, }) if err != nil { resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionSetting, ResNameDbInstance, state.ID.String(), err), + create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionSetting, ResNameDBInstance, state.ID.String(), err), err.Error(), ) return @@ -545,7 +545,7 @@ func (r *resourceDbInstance) Read(ctx context.Context, req resource.ReadRequest, secrets := make(map[string]string) if err := json.Unmarshal([]byte(aws.ToString(secretsOut.SecretString)), &secrets); err != nil { resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionSetting, ResNameDbInstance, state.ID.String(), err), + create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionSetting, ResNameDBInstance, state.ID.String(), err), err.Error(), ) return @@ -554,7 +554,7 @@ func (r *resourceDbInstance) Read(ctx context.Context, req resource.ReadRequest, state.Username = flex.StringValueToFramework[string](ctx, username) } else { resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionSetting, ResNameDbInstance, state.ID.String(), err), + create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionSetting, ResNameDBInstance, state.ID.String(), err), err.Error(), ) return @@ -563,7 +563,7 @@ func (r *resourceDbInstance) Read(ctx context.Context, req resource.ReadRequest, state.Password = flex.StringValueToFramework[string](ctx, password) } else { resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionSetting, ResNameDbInstance, state.ID.String(), err), + create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionSetting, ResNameDBInstance, state.ID.String(), err), err.Error(), ) return @@ -572,7 +572,7 @@ func (r *resourceDbInstance) Read(ctx context.Context, req resource.ReadRequest, state.Organization = flex.StringValueToFramework[string](ctx, organization) } else { resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionSetting, ResNameDbInstance, state.ID.String(), err), + create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionSetting, ResNameDBInstance, state.ID.String(), err), err.Error(), ) return @@ -581,7 +581,7 @@ func (r *resourceDbInstance) Read(ctx context.Context, req resource.ReadRequest, state.Bucket = flex.StringValueToFramework[string](ctx, bucket) } else { resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionSetting, ResNameDbInstance, state.ID.String(), err), + create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionSetting, ResNameDBInstance, state.ID.String(), err), err.Error(), ) return @@ -590,7 +590,7 @@ func (r *resourceDbInstance) Read(ctx context.Context, req resource.ReadRequest, tags, err := listTags(ctx, conn, state.ARN.ValueString()) if err != nil { resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionSetting, ResNameDbInstance, state.ID.String(), err), + create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionSetting, ResNameDBInstance, state.ID.String(), err), err.Error(), ) return @@ -600,10 +600,10 @@ func (r *resourceDbInstance) Read(ctx context.Context, req resource.ReadRequest, resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) } -func (r *resourceDbInstance) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { +func (r *resourceDBInstance) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { conn := r.Meta().TimestreamInfluxDBClient(ctx) - var plan, state resourceDbInstanceData + var plan, state resourceDBInstanceData resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) resp.Diagnostics.Append(req.State.Get(ctx, &state)...) if resp.Diagnostics.HasError() { @@ -637,14 +637,14 @@ func (r *resourceDbInstance) Update(ctx context.Context, req resource.UpdateRequ out, err := conn.UpdateDbInstance(ctx, in) if err != nil { resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionUpdating, ResNameDbInstance, plan.ID.String(), err), + create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionUpdating, ResNameDBInstance, plan.ID.String(), err), err.Error(), ) return } if out == nil || out.Id == nil { resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionUpdating, ResNameDbInstance, plan.ID.String(), nil), + create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionUpdating, ResNameDBInstance, plan.ID.String(), nil), errors.New("empty output").Error(), ) return @@ -652,24 +652,24 @@ func (r *resourceDbInstance) Update(ctx context.Context, req resource.UpdateRequ } updateTimeout := r.UpdateTimeout(ctx, plan.Timeouts) - _, err := waitDbInstanceUpdated(ctx, conn, plan.ID.ValueString(), updateTimeout) + _, err := waitDBInstanceUpdated(ctx, conn, plan.ID.ValueString(), updateTimeout) if err != nil { resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionWaitingForUpdate, ResNameDbInstance, plan.ID.String(), err), + create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionWaitingForUpdate, ResNameDBInstance, plan.ID.String(), err), err.Error(), ) return } // Update status to current status - readOut, err := findDbInstanceByID(ctx, conn, plan.ID.ValueString()) + readOut, err := findDBInstanceByID(ctx, conn, plan.ID.ValueString()) if tfresource.NotFound(err) { resp.State.RemoveResource(ctx) return } if err != nil { resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionSetting, ResNameDbInstance, plan.ID.String(), err), + create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionSetting, ResNameDBInstance, plan.ID.String(), err), err.Error(), ) return @@ -688,10 +688,10 @@ func (r *resourceDbInstance) Update(ctx context.Context, req resource.UpdateRequ resp.Diagnostics.Append(resp.State.Set(ctx, &plan)...) } -func (r *resourceDbInstance) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { +func (r *resourceDBInstance) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { conn := r.Meta().TimestreamInfluxDBClient(ctx) - var state resourceDbInstanceData + var state resourceDBInstanceData resp.Diagnostics.Append(req.State.Get(ctx, &state)...) if resp.Diagnostics.HasError() { return @@ -707,31 +707,31 @@ func (r *resourceDbInstance) Delete(ctx context.Context, req resource.DeleteRequ return } resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionDeleting, ResNameDbInstance, state.ID.String(), err), + create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionDeleting, ResNameDBInstance, state.ID.String(), err), err.Error(), ) return } deleteTimeout := r.DeleteTimeout(ctx, state.Timeouts) - _, err = waitDbInstanceDeleted(ctx, conn, state.ID.ValueString(), deleteTimeout) + _, err = waitDBInstanceDeleted(ctx, conn, state.ID.ValueString(), deleteTimeout) if err != nil { resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionWaitingForDeletion, ResNameDbInstance, state.ID.String(), err), + create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionWaitingForDeletion, ResNameDBInstance, state.ID.String(), err), err.Error(), ) return } } -func (r *resourceDbInstance) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { +func (r *resourceDBInstance) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp) } -func (r *resourceDbInstance) ModifyPlan(ctx context.Context, request resource.ModifyPlanRequest, response *resource.ModifyPlanResponse) { +func (r *resourceDBInstance) ModifyPlan(ctx context.Context, request resource.ModifyPlanRequest, response *resource.ModifyPlanResponse) { r.SetTagsAll(ctx, request, response) } -func waitDbInstanceCreated(ctx context.Context, conn *timestreaminfluxdb.Client, id string, timeout time.Duration) (*timestreaminfluxdb.CreateDbInstanceOutput, error) { +func waitDBInstanceCreated(ctx context.Context, conn *timestreaminfluxdb.Client, id string, timeout time.Duration) (*timestreaminfluxdb.CreateDbInstanceOutput, error) { stateConf := &retry.StateChangeConf{ Pending: []string{string(awstypes.StatusCreating), string(awstypes.StatusUpdating), string(awstypes.StatusModifying)}, Target: []string{string(awstypes.StatusAvailable)}, @@ -749,7 +749,7 @@ func waitDbInstanceCreated(ctx context.Context, conn *timestreaminfluxdb.Client, return nil, err } -func waitDbInstanceUpdated(ctx context.Context, conn *timestreaminfluxdb.Client, id string, timeout time.Duration) (*timestreaminfluxdb.UpdateDbInstanceOutput, error) { +func waitDBInstanceUpdated(ctx context.Context, conn *timestreaminfluxdb.Client, id string, timeout time.Duration) (*timestreaminfluxdb.UpdateDbInstanceOutput, error) { stateConf := &retry.StateChangeConf{ Pending: []string{string(awstypes.StatusModifying), string(awstypes.StatusUpdating)}, Target: []string{string(awstypes.StatusAvailable)}, @@ -767,7 +767,7 @@ func waitDbInstanceUpdated(ctx context.Context, conn *timestreaminfluxdb.Client, return nil, err } -func waitDbInstanceDeleted(ctx context.Context, conn *timestreaminfluxdb.Client, id string, timeout time.Duration) (*timestreaminfluxdb.DeleteDbInstanceOutput, error) { +func waitDBInstanceDeleted(ctx context.Context, conn *timestreaminfluxdb.Client, id string, timeout time.Duration) (*timestreaminfluxdb.DeleteDbInstanceOutput, error) { stateConf := &retry.StateChangeConf{ Pending: []string{string(awstypes.StatusDeleting), string(awstypes.StatusModifying), string(awstypes.StatusUpdating), string(awstypes.StatusAvailable)}, Target: []string{}, @@ -787,7 +787,7 @@ func waitDbInstanceDeleted(ctx context.Context, conn *timestreaminfluxdb.Client, func statusDbInstance(ctx context.Context, conn *timestreaminfluxdb.Client, id string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - out, err := findDbInstanceByID(ctx, conn, id) + out, err := findDBInstanceByID(ctx, conn, id) if tfresource.NotFound(err) { return nil, "", nil } @@ -799,7 +799,7 @@ func statusDbInstance(ctx context.Context, conn *timestreaminfluxdb.Client, id s } } -func findDbInstanceByID(ctx context.Context, conn *timestreaminfluxdb.Client, id string) (*timestreaminfluxdb.GetDbInstanceOutput, error) { +func findDBInstanceByID(ctx context.Context, conn *timestreaminfluxdb.Client, id string) (*timestreaminfluxdb.GetDbInstanceOutput, error) { in := ×treaminfluxdb.GetDbInstanceInput{ Identifier: aws.String(id), } @@ -881,7 +881,7 @@ func expandS3Configuration(tfObj s3ConfigurationData) *awstypes.S3Configuration return apiObject } -type resourceDbInstanceData struct { +type resourceDBInstanceData struct { AllocatedStorage types.Int64 `tfsdk:"allocated_storage"` ARN types.String `tfsdk:"arn"` AvailabilityZone types.String `tfsdk:"availability_zone"` diff --git a/internal/service/timestreaminfluxdb/db_instance_test.go b/internal/service/timestreaminfluxdb/db_instance_test.go index 61b312b1ebe..4aae33d6f17 100644 --- a/internal/service/timestreaminfluxdb/db_instance_test.go +++ b/internal/service/timestreaminfluxdb/db_instance_test.go @@ -21,12 +21,11 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" "github.com/hashicorp/terraform-provider-aws/internal/errs" - "github.com/hashicorp/terraform-provider-aws/names" - tftimestreaminfluxdb "github.com/hashicorp/terraform-provider-aws/internal/service/timestreaminfluxdb" + "github.com/hashicorp/terraform-provider-aws/names" ) -func TestAccTimestreamInfluxDBDbInstance_basic(t *testing.T) { +func TestAccTimestreamInfluxDBDBInstance_basic(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { t.Skip("skipping long-running test in short mode") @@ -46,9 +45,9 @@ func TestAccTimestreamInfluxDBDbInstance_basic(t *testing.T) { CheckDestroy: testAccCheckDbInstanceDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccDbInstanceConfig_basic(rName), + Config: testAccDBInstanceConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckDbInstanceExists(ctx, resourceName, &dbinstance), + testAccCheckDBInstanceExists(ctx, resourceName, &dbinstance), // Verification of read-only attributes and default values. // DB instance will not be publicly accessible and will not have an endpoint. acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "timestream-influxdb", regexache.MustCompile(`db-instance/+.`)), @@ -73,7 +72,7 @@ func TestAccTimestreamInfluxDBDbInstance_basic(t *testing.T) { }) } -func TestAccTimestreamInfluxDBDbInstance_disappears(t *testing.T) { +func TestAccTimestreamInfluxDBDBInstance_disappears(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { t.Skip("skipping long-running test in short mode") @@ -90,13 +89,13 @@ func TestAccTimestreamInfluxDBDbInstance_disappears(t *testing.T) { }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckDbInstanceDestroy(ctx), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccDbInstanceConfig_basic(rName), + Config: testAccDBInstanceConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckDbInstanceExists(ctx, resourceName, &dbinstance), - acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tftimestreaminfluxdb.ResourceDbInstance, resourceName), + testAccCheckDBInstanceExists(ctx, resourceName, &dbinstance), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tftimestreaminfluxdb.ResourceDBInstance, resourceName), ), ExpectNonEmptyPlan: true, }, @@ -104,7 +103,7 @@ func TestAccTimestreamInfluxDBDbInstance_disappears(t *testing.T) { }) } -func TestAccTimestreamInfluxDBDbInstance_logDeliveryConfiguration(t *testing.T) { +func TestAccTimestreamInfluxDBDBInstance_logDeliveryConfiguration(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { t.Skip("skipping long-running test in short mode") @@ -125,16 +124,16 @@ func TestAccTimestreamInfluxDBDbInstance_logDeliveryConfiguration(t *testing.T) CheckDestroy: testAccCheckDbInstanceDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccDbInstanceConfig_logDeliveryConfigurationEnabled(rName), + Config: testAccDBInstanceConfig_logDeliveryConfigurationEnabled(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckDbInstanceExists(ctx, resourceName, &dbinstance), + testAccCheckDBInstanceExists(ctx, resourceName, &dbinstance), resource.TestCheckResourceAttr(resourceName, "log_delivery_configuration.0.s3_configuration.%", "2"), resource.TestCheckResourceAttr(resourceName, "log_delivery_configuration.0.s3_configuration.bucket_name", rName), resource.TestCheckResourceAttr(resourceName, "log_delivery_configuration.0.s3_configuration.enabled", "true"), ), }, { - Config: testAccDbInstanceConfig_logDeliveryConfigurationNotEnabled(rName), + Config: testAccDBInstanceConfig_logDeliveryConfigurationNotEnabled(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDbInstanceExists(ctx, resourceName, &dbinstance), resource.TestCheckResourceAttr(resourceName, "log_delivery_configuration.0.s3_configuration.%", "2"), @@ -172,7 +171,7 @@ func TestAccTimestreamInfluxDBDbInstance_publiclyAccessible(t *testing.T) { CheckDestroy: testAccCheckDbInstanceDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccDbInstanceConfig_publiclyAccessible(rName), + Config: testAccDBInstanceConfig_publiclyAccessible(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDbInstanceExists(ctx, resourceName, &dbinstance), resource.TestCheckResourceAttrSet(resourceName, "endpoint"), @@ -206,10 +205,10 @@ func TestAccTimestreamInfluxDBDbInstance_deploymentTypeMultiAzStandby(t *testing }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckDbInstanceDestroy(ctx), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccDbInstanceConfig_deploymentTypeMultiAzStandby(rName, acctest.Region()), + Config: testAccDBInstanceConfig_deploymentTypeMultiAzStandby(rName, acctest.Region()), Check: resource.ComposeTestCheckFunc( testAccCheckDbInstanceExists(ctx, resourceName, &dbinstance), // DB instance will not be publicly accessible and will not have an endpoint. @@ -228,7 +227,7 @@ func TestAccTimestreamInfluxDBDbInstance_deploymentTypeMultiAzStandby(t *testing }) } -func TestAccTimestreamInfluxDBDbInstance_username(t *testing.T) { +func TestAccTimestreamInfluxDBDBInstance_username(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { t.Skip("skipping long-running test in short mode") @@ -246,12 +245,12 @@ func TestAccTimestreamInfluxDBDbInstance_username(t *testing.T) { }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckDbInstanceDestroy(ctx), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccDbInstanceConfig_username(rName, testUsername), + Config: testAccDBInstanceConfig_username(rName, testUsername), Check: resource.ComposeTestCheckFunc( - testAccCheckDbInstanceExists(ctx, resourceName, &dbinstance), + testAccCheckDBInstanceExists(ctx, resourceName, &dbinstance), resource.TestCheckResourceAttr(resourceName, "username", testUsername), ), }, @@ -265,7 +264,7 @@ func TestAccTimestreamInfluxDBDbInstance_username(t *testing.T) { }) } -func TestAccTimestreamInfluxDBDbInstance_bucket(t *testing.T) { +func TestAccTimestreamInfluxDBDBInstance_bucket(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { t.Skip("skipping long-running test in short mode") @@ -283,12 +282,12 @@ func TestAccTimestreamInfluxDBDbInstance_bucket(t *testing.T) { }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckDbInstanceDestroy(ctx), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccDbInstanceConfig_bucket(rName, testBucketName), + Config: testAccDBInstanceConfig_bucket(rName, testBucketName), Check: resource.ComposeTestCheckFunc( - testAccCheckDbInstanceExists(ctx, resourceName, &dbinstance), + testAccCheckDBInstanceExists(ctx, resourceName, &dbinstance), resource.TestCheckResourceAttr(resourceName, "bucket", testBucketName), ), }, @@ -302,7 +301,7 @@ func TestAccTimestreamInfluxDBDbInstance_bucket(t *testing.T) { }) } -func TestAccTimestreamInfluxDBDbInstance_organization(t *testing.T) { +func TestAccTimestreamInfluxDBDBInstance_organization(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { t.Skip("skipping long-running test in short mode") @@ -320,12 +319,12 @@ func TestAccTimestreamInfluxDBDbInstance_organization(t *testing.T) { }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckDbInstanceDestroy(ctx), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccDbInstanceConfig_organization(rName, testOrganizationName), + Config: testAccDBInstanceConfig_organization(rName, testOrganizationName), Check: resource.ComposeTestCheckFunc( - testAccCheckDbInstanceExists(ctx, resourceName, &dbinstance), + testAccCheckDBInstanceExists(ctx, resourceName, &dbinstance), resource.TestCheckResourceAttr(resourceName, "organization", testOrganizationName), ), }, @@ -339,7 +338,7 @@ func TestAccTimestreamInfluxDBDbInstance_organization(t *testing.T) { }) } -func TestAccTimestreamInfluxDBDbInstance_tags(t *testing.T) { +func TestAccTimestreamInfluxDBDBInstance_tags(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { t.Skip("skipping long-running test in short mode") @@ -356,12 +355,12 @@ func TestAccTimestreamInfluxDBDbInstance_tags(t *testing.T) { }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckDbInstanceDestroy(ctx), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccDbInstanceConfig_tags1(rName, acctest.CtKey1, acctest.CtValue1), + Config: testAccDBInstanceConfig_tags1(rName, acctest.CtKey1, acctest.CtValue1), Check: resource.ComposeTestCheckFunc( - testAccCheckDbInstanceExists(ctx, resourceName, &dbinstance), + testAccCheckDBInstanceExists(ctx, resourceName, &dbinstance), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct1), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsAllPercent, acctest.Ct1), @@ -369,9 +368,9 @@ func TestAccTimestreamInfluxDBDbInstance_tags(t *testing.T) { ), }, { - Config: testAccDbInstanceConfig_tags2(rName, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, acctest.CtValue2), + Config: testAccDBInstanceConfig_tags2(rName, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, acctest.CtValue2), Check: resource.ComposeTestCheckFunc( - testAccCheckDbInstanceExists(ctx, resourceName, &dbinstance), + testAccCheckDBInstanceExists(ctx, resourceName, &dbinstance), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct2), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1Updated), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), @@ -381,9 +380,9 @@ func TestAccTimestreamInfluxDBDbInstance_tags(t *testing.T) { ), }, { - Config: testAccDbInstanceConfig_tags1(rName, acctest.CtKey2, acctest.CtValue2), + Config: testAccDBInstanceConfig_tags1(rName, acctest.CtKey2, acctest.CtValue2), Check: resource.ComposeTestCheckFunc( - testAccCheckDbInstanceExists(ctx, resourceName, &dbinstance), + testAccCheckDBInstanceExists(ctx, resourceName, &dbinstance), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct1), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsAllPercent, acctest.Ct1), @@ -400,7 +399,7 @@ func TestAccTimestreamInfluxDBDbInstance_tags(t *testing.T) { }) } -func TestAccTimestreamInfluxDBDbInstance_dbInstanceType(t *testing.T) { +func TestAccTimestreamInfluxDBDBInstance_dbInstanceType(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { t.Skip("skipping long-running test in short mode") @@ -417,54 +416,54 @@ func TestAccTimestreamInfluxDBDbInstance_dbInstanceType(t *testing.T) { }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckDbInstanceDestroy(ctx), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccDbInstanceConfig_dbInstanceTypeLarge(rName), + Config: testAccDBInstanceConfig_dbInstanceTypeLarge(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckDbInstanceExists(ctx, resourceName, &dbinstance), + testAccCheckDBInstanceExists(ctx, resourceName, &dbinstance), resource.TestCheckResourceAttr(resourceName, "db_instance_type", "db.influx.large"), ), }, { - Config: testAccDbInstanceConfig_dbInstanceTypeXLarge(rName), + Config: testAccDBInstanceConfig_dbInstanceTypeXLarge(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckDbInstanceExists(ctx, resourceName, &dbinstance), + testAccCheckDBInstanceExists(ctx, resourceName, &dbinstance), resource.TestCheckResourceAttr(resourceName, "db_instance_type", "db.influx.xlarge"), ), }, { - Config: testAccDbInstanceConfig_dbInstanceType2XLarge(rName), + Config: testAccDBInstanceConfig_dbInstanceType2XLarge(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckDbInstanceExists(ctx, resourceName, &dbinstance), + testAccCheckDBInstanceExists(ctx, resourceName, &dbinstance), resource.TestCheckResourceAttr(resourceName, "db_instance_type", "db.influx.2xlarge"), ), }, { - Config: testAccDbInstanceConfig_dbInstanceType4XLarge(rName), + Config: testAccDBInstanceConfig_dbInstanceType4XLarge(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckDbInstanceExists(ctx, resourceName, &dbinstance), + testAccCheckDBInstanceExists(ctx, resourceName, &dbinstance), resource.TestCheckResourceAttr(resourceName, "db_instance_type", "db.influx.4xlarge"), ), }, { - Config: testAccDbInstanceConfig_dbInstanceType8XLarge(rName), + Config: testAccDBInstanceConfig_dbInstanceType8XLarge(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckDbInstanceExists(ctx, resourceName, &dbinstance), + testAccCheckDBInstanceExists(ctx, resourceName, &dbinstance), resource.TestCheckResourceAttr(resourceName, "db_instance_type", "db.influx.8xlarge"), ), }, { - Config: testAccDbInstanceConfig_dbInstanceType12XLarge(rName), + Config: testAccDBInstanceConfig_dbInstanceType12XLarge(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckDbInstanceExists(ctx, resourceName, &dbinstance), + testAccCheckDBInstanceExists(ctx, resourceName, &dbinstance), resource.TestCheckResourceAttr(resourceName, "db_instance_type", "db.influx.12xlarge"), ), }, { - Config: testAccDbInstanceConfig_dbInstanceType16XLarge(rName), + Config: testAccDBInstanceConfig_dbInstanceType16XLarge(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckDbInstanceExists(ctx, resourceName, &dbinstance), + testAccCheckDBInstanceExists(ctx, resourceName, &dbinstance), resource.TestCheckResourceAttr(resourceName, "db_instance_type", "db.influx.16xlarge"), ), }, @@ -478,7 +477,7 @@ func TestAccTimestreamInfluxDBDbInstance_dbInstanceType(t *testing.T) { }) } -func testAccCheckDbInstanceDestroy(ctx context.Context) resource.TestCheckFunc { +func testAccCheckDBInstanceDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).TimestreamInfluxDBClient(ctx) @@ -495,25 +494,25 @@ func testAccCheckDbInstanceDestroy(ctx context.Context) resource.TestCheckFunc { return nil } if err != nil { - return create.Error(names.TimestreamInfluxDB, create.ErrActionCheckingDestroyed, tftimestreaminfluxdb.ResNameDbInstance, rs.Primary.ID, err) + return create.Error(names.TimestreamInfluxDB, create.ErrActionCheckingDestroyed, tftimestreaminfluxdb.ResNameDBInstance, rs.Primary.ID, err) } - return create.Error(names.TimestreamInfluxDB, create.ErrActionCheckingDestroyed, tftimestreaminfluxdb.ResNameDbInstance, rs.Primary.ID, errors.New("not destroyed")) + return create.Error(names.TimestreamInfluxDB, create.ErrActionCheckingDestroyed, tftimestreaminfluxdb.ResNameDBInstance, rs.Primary.ID, errors.New("not destroyed")) } return nil } } -func testAccCheckDbInstanceExists(ctx context.Context, name string, dbinstance *timestreaminfluxdb.GetDbInstanceOutput) resource.TestCheckFunc { +func testAccCheckDBInstanceExists(ctx context.Context, name string, dbinstance *timestreaminfluxdb.GetDbInstanceOutput) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[name] if !ok { - return create.Error(names.TimestreamInfluxDB, create.ErrActionCheckingExistence, tftimestreaminfluxdb.ResNameDbInstance, name, errors.New("not found")) + return create.Error(names.TimestreamInfluxDB, create.ErrActionCheckingExistence, tftimestreaminfluxdb.ResNameDBInstance, name, errors.New("not found")) } if rs.Primary.ID == "" { - return create.Error(names.TimestreamInfluxDB, create.ErrActionCheckingExistence, tftimestreaminfluxdb.ResNameDbInstance, name, errors.New("not set")) + return create.Error(names.TimestreamInfluxDB, create.ErrActionCheckingExistence, tftimestreaminfluxdb.ResNameDBInstance, name, errors.New("not set")) } conn := acctest.Provider.Meta().(*conns.AWSClient).TimestreamInfluxDBClient(ctx) @@ -522,7 +521,7 @@ func testAccCheckDbInstanceExists(ctx context.Context, name string, dbinstance * }) if err != nil { - return create.Error(names.TimestreamInfluxDB, create.ErrActionCheckingExistence, tftimestreaminfluxdb.ResNameDbInstance, rs.Primary.ID, err) + return create.Error(names.TimestreamInfluxDB, create.ErrActionCheckingExistence, tftimestreaminfluxdb.ResNameDBInstance, rs.Primary.ID, err) } *dbinstance = *resp @@ -545,17 +544,17 @@ func testAccPreCheck(ctx context.Context, t *testing.T) { } } -func testAccCheckDbInstanceNotRecreated(before, after *timestreaminfluxdb.GetDbInstanceOutput) resource.TestCheckFunc { +func testAccCheckDBInstanceNotRecreated(before, after *timestreaminfluxdb.GetDbInstanceOutput) resource.TestCheckFunc { return func(s *terraform.State) error { if before, after := aws.ToString(before.Id), aws.ToString(after.Id); before != after { - return create.Error(names.TimestreamInfluxDB, create.ErrActionCheckingNotRecreated, tftimestreaminfluxdb.ResNameDbInstance, before, errors.New("recreated")) + return create.Error(names.TimestreamInfluxDB, create.ErrActionCheckingNotRecreated, tftimestreaminfluxdb.ResNameDBInstance, before, errors.New("recreated")) } return nil } } -func testAccDbInstanceConfig_base() string { +func testAccDBInstanceConfig_base() string { return fmt.Sprintf(` resource "aws_vpc" "test_vpc" { cidr_block = "10.0.0.0/16" @@ -573,8 +572,8 @@ resource "aws_security_group" "test_security_group" { } // Minimal configuration. -func testAccDbInstanceConfig_basic(rName string) string { - return acctest.ConfigCompose(testAccDbInstanceConfig_base(), fmt.Sprintf(` +func testAccDBInstanceConfig_basic(rName string) string { + return acctest.ConfigCompose(testAccDBInstanceConfig_base(), fmt.Sprintf(` resource "aws_timestreaminfluxdb_db_instance" "test" { allocated_storage = 20 password = "testpassword" @@ -587,8 +586,8 @@ resource "aws_timestreaminfluxdb_db_instance" "test" { } // Configuration with log_delivery_configuration set and enabled. -func testAccDbInstanceConfig_logDeliveryConfigurationEnabled(rName string) string { - return acctest.ConfigCompose(testAccDbInstanceConfig_base(), fmt.Sprintf(` +func testAccDBInstanceConfig_logDeliveryConfigurationEnabled(rName string) string { + return acctest.ConfigCompose(testAccDBInstanceConfig_base(), fmt.Sprintf(` resource "aws_s3_bucket" "test_s3_bucket" { bucket = %[1]q force_destroy = true @@ -632,8 +631,8 @@ resource "aws_timestreaminfluxdb_db_instance" "test" { } // Configuration with log_delivery_configuration set but not enabled. -func testAccDbInstanceConfig_logDeliveryConfigurationNotEnabled(rName string) string { - return acctest.ConfigCompose(testAccDbInstanceConfig_base(), fmt.Sprintf(` +func testAccDBInstanceConfig_logDeliveryConfigurationNotEnabled(rName string) string { + return acctest.ConfigCompose(testAccDBInstanceConfig_base(), fmt.Sprintf(` resource "aws_s3_bucket" "test_s3_bucket" { bucket = %[1]q force_destroy = true @@ -678,8 +677,8 @@ resource "aws_timestreaminfluxdb_db_instance" "test" { // Configuration that is publicly accessible. An endpoint will be created // for the DB instance but no inbound rules will be defined, preventing access. -func testAccDbInstanceConfig_publiclyAccessible(rName string) string { - return acctest.ConfigCompose(testAccDbInstanceConfig_base(), fmt.Sprintf(` +func testAccDBInstanceConfig_publiclyAccessible(rName string) string { + return acctest.ConfigCompose(testAccDBInstanceConfig_base(), fmt.Sprintf(` resource "aws_internet_gateway" "test_internet_gateway" { vpc_id = aws_vpc.test_vpc.id } @@ -715,7 +714,7 @@ resource "aws_timestreaminfluxdb_db_instance" "test" { `, rName)) } -func testAccDbInstanceConfig_deploymentTypeMultiAzStandby(rName string, regionName string) string { +func testAccDBInstanceConfig_deploymentTypeMultiAzStandby(rName string, regionName string) string { return fmt.Sprintf(` resource "aws_vpc" "test_vpc" { cidr_block = "10.0.0.0/16" @@ -751,8 +750,8 @@ resource "aws_timestreaminfluxdb_db_instance" "test" { `, rName, regionName) } -func testAccDbInstanceConfig_username(rName, username string) string { - return acctest.ConfigCompose(testAccDbInstanceConfig_base(), fmt.Sprintf(` +func testAccDBInstanceConfig_username(rName, username string) string { + return acctest.ConfigCompose(testAccDBInstanceConfig_base(), fmt.Sprintf(` resource "aws_timestreaminfluxdb_db_instance" "test" { allocated_storage = 20 password = "testpassword" @@ -767,8 +766,8 @@ resource "aws_timestreaminfluxdb_db_instance" "test" { `, rName, username)) } -func testAccDbInstanceConfig_bucket(rName, bucketName string) string { - return acctest.ConfigCompose(testAccDbInstanceConfig_base(), fmt.Sprintf(` +func testAccDBInstanceConfig_bucket(rName, bucketName string) string { + return acctest.ConfigCompose(testAccDBInstanceConfig_base(), fmt.Sprintf(` resource "aws_timestreaminfluxdb_db_instance" "test" { allocated_storage = 20 password = "testpassword" @@ -783,8 +782,8 @@ resource "aws_timestreaminfluxdb_db_instance" "test" { `, rName, bucketName)) } -func testAccDbInstanceConfig_organization(rName, organizationName string) string { - return acctest.ConfigCompose(testAccDbInstanceConfig_base(), fmt.Sprintf(` +func testAccDBInstanceConfig_organization(rName, organizationName string) string { + return acctest.ConfigCompose(testAccDBInstanceConfig_base(), fmt.Sprintf(` resource "aws_timestreaminfluxdb_db_instance" "test" { allocated_storage = 20 password = "testpassword" @@ -799,8 +798,8 @@ resource "aws_timestreaminfluxdb_db_instance" "test" { `, rName, organizationName)) } -func testAccDbInstanceConfig_tags1(rName, tagKey1, tagValue1 string) string { - return acctest.ConfigCompose(testAccDbInstanceConfig_base(), fmt.Sprintf(` +func testAccDBInstanceConfig_tags1(rName, tagKey1, tagValue1 string) string { + return acctest.ConfigCompose(testAccDBInstanceConfig_base(), fmt.Sprintf(` resource "aws_timestreaminfluxdb_db_instance" "test" { allocated_storage = 20 password = "testpassword" @@ -817,8 +816,8 @@ resource "aws_timestreaminfluxdb_db_instance" "test" { `, rName, tagKey1, tagValue1)) } -func testAccDbInstanceConfig_tags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { - return acctest.ConfigCompose(testAccDbInstanceConfig_base(), fmt.Sprintf(` +func testAccDBInstanceConfig_tags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { + return acctest.ConfigCompose(testAccDBInstanceConfig_base(), fmt.Sprintf(` resource "aws_timestreaminfluxdb_db_instance" "test" { allocated_storage = 20 password = "testpassword" @@ -836,8 +835,8 @@ resource "aws_timestreaminfluxdb_db_instance" "test" { `, rName, tagKey1, tagValue1, tagKey2, tagValue2)) } -func testAccDbInstanceConfig_dbStorageTypeT2(rName string) string { - return acctest.ConfigCompose(testAccDbInstanceConfig_base(), fmt.Sprintf(` +func testAccDBInstanceConfig_dbStorageTypeT2(rName string) string { + return acctest.ConfigCompose(testAccDBInstanceConfig_base(), fmt.Sprintf(` resource "aws_timestreaminfluxdb_db_instance" "test" { allocated_storage = 20 password = "testpassword" @@ -851,8 +850,8 @@ resource "aws_timestreaminfluxdb_db_instance" "test" { `, rName)) } -func testAccDbInstanceConfig_dbStorageTypeT3(rName string) string { - return acctest.ConfigCompose(testAccDbInstanceConfig_base(), fmt.Sprintf(` +func testAccDBInstanceConfig_dbStorageTypeT3(rName string) string { + return acctest.ConfigCompose(testAccDBInstanceConfig_base(), fmt.Sprintf(` resource "aws_timestreaminfluxdb_db_instance" "test" { allocated_storage = 20 password = "testpassword" @@ -866,8 +865,8 @@ resource "aws_timestreaminfluxdb_db_instance" "test" { `, rName)) } -func testAccDbInstanceConfig_dbInstanceTypeLarge(rName string) string { - return acctest.ConfigCompose(testAccDbInstanceConfig_base(), fmt.Sprintf(` +func testAccDBInstanceConfig_dbInstanceTypeLarge(rName string) string { + return acctest.ConfigCompose(testAccDBInstanceConfig_base(), fmt.Sprintf(` resource "aws_timestreaminfluxdb_db_instance" "test" { allocated_storage = 20 db_storage_type = "InfluxIOIncludedT1" @@ -881,8 +880,8 @@ resource "aws_timestreaminfluxdb_db_instance" "test" { `, rName)) } -func testAccDbInstanceConfig_dbInstanceTypeXLarge(rName string) string { - return acctest.ConfigCompose(testAccDbInstanceConfig_base(), fmt.Sprintf(` +func testAccDBInstanceConfig_dbInstanceTypeXLarge(rName string) string { + return acctest.ConfigCompose(testAccDBInstanceConfig_base(), fmt.Sprintf(` resource "aws_timestreaminfluxdb_db_instance" "test" { allocated_storage = 20 db_storage_type = "InfluxIOIncludedT1" @@ -896,8 +895,8 @@ resource "aws_timestreaminfluxdb_db_instance" "test" { `, rName)) } -func testAccDbInstanceConfig_dbInstanceType2XLarge(rName string) string { - return acctest.ConfigCompose(testAccDbInstanceConfig_base(), fmt.Sprintf(` +func testAccDBInstanceConfig_dbInstanceType2XLarge(rName string) string { + return acctest.ConfigCompose(testAccDBInstanceConfig_base(), fmt.Sprintf(` resource "aws_timestreaminfluxdb_db_instance" "test" { db_storage_type = "InfluxIOIncludedT1" password = "testpassword" @@ -911,8 +910,8 @@ resource "aws_timestreaminfluxdb_db_instance" "test" { `, rName)) } -func testAccDbInstanceConfig_dbInstanceType4XLarge(rName string) string { - return acctest.ConfigCompose(testAccDbInstanceConfig_base(), fmt.Sprintf(` +func testAccDBInstanceConfig_dbInstanceType4XLarge(rName string) string { + return acctest.ConfigCompose(testAccDBInstanceConfig_base(), fmt.Sprintf(` resource "aws_timestreaminfluxdb_db_instance" "test" { allocated_storage = 20 db_storage_type = "InfluxIOIncludedT1" @@ -926,8 +925,8 @@ resource "aws_timestreaminfluxdb_db_instance" "test" { `, rName)) } -func testAccDbInstanceConfig_dbInstanceType8XLarge(rName string) string { - return acctest.ConfigCompose(testAccDbInstanceConfig_base(), fmt.Sprintf(` +func testAccDBInstanceConfig_dbInstanceType8XLarge(rName string) string { + return acctest.ConfigCompose(testAccDBInstanceConfig_base(), fmt.Sprintf(` resource "aws_timestreaminfluxdb_db_instance" "test" { allocated_storage = 20 db_storage_type = "InfluxIOIncludedT1" @@ -941,8 +940,8 @@ resource "aws_timestreaminfluxdb_db_instance" "test" { `, rName)) } -func testAccDbInstanceConfig_dbInstanceType12XLarge(rName string) string { - return acctest.ConfigCompose(testAccDbInstanceConfig_base(), fmt.Sprintf(` +func testAccDBInstanceConfig_dbInstanceType12XLarge(rName string) string { + return acctest.ConfigCompose(testAccDBInstanceConfig_base(), fmt.Sprintf(` resource "aws_timestreaminfluxdb_db_instance" "test" { allocated_storage = 20 db_storage_type = "InfluxIOIncludedT1" @@ -956,8 +955,8 @@ resource "aws_timestreaminfluxdb_db_instance" "test" { `, rName)) } -func testAccDbInstanceConfig_dbInstanceType16XLarge(rName string) string { - return acctest.ConfigCompose(testAccDbInstanceConfig_base(), fmt.Sprintf(` +func testAccDBInstanceConfig_dbInstanceType16XLarge(rName string) string { + return acctest.ConfigCompose(testAccDBInstanceConfig_base(), fmt.Sprintf(` resource "aws_timestreaminfluxdb_db_instance" "test" { allocated_storage = 20 db_storage_type = "InfluxIOIncludedT1" diff --git a/internal/service/timestreaminfluxdb/exports_test.go b/internal/service/timestreaminfluxdb/exports_test.go index 157b1503203..55949817f99 100644 --- a/internal/service/timestreaminfluxdb/exports_test.go +++ b/internal/service/timestreaminfluxdb/exports_test.go @@ -5,7 +5,7 @@ package timestreaminfluxdb // Exports for use in tests only. var ( - ResourceDbInstance = newResourceDbInstance + ResourceDBInstance = newResourceDBInstance - FindDbInstanceByID = findDbInstanceByID + FindDBInstanceByID = findDBInstanceByID ) diff --git a/internal/service/timestreaminfluxdb/service_package_gen.go b/internal/service/timestreaminfluxdb/service_package_gen.go index e28483d81a2..8d01f002f64 100644 --- a/internal/service/timestreaminfluxdb/service_package_gen.go +++ b/internal/service/timestreaminfluxdb/service_package_gen.go @@ -22,7 +22,7 @@ func (p *servicePackage) FrameworkDataSources(ctx context.Context) []*types.Serv func (p *servicePackage) FrameworkResources(ctx context.Context) []*types.ServicePackageFrameworkResource { return []*types.ServicePackageFrameworkResource{ { - Factory: newResourceDbInstance, + Factory: newResourceDBInstance, Name: "Db Instance", Tags: &types.ServicePackageResourceTags{ IdentifierAttribute: names.AttrARN, diff --git a/internal/service/timestreaminfluxdb/sweep.go b/internal/service/timestreaminfluxdb/sweep.go index fe570c0aa81..f71a959bef7 100644 --- a/internal/service/timestreaminfluxdb/sweep.go +++ b/internal/service/timestreaminfluxdb/sweep.go @@ -19,11 +19,11 @@ import ( func RegisterSweepers() { resource.AddTestSweepers("aws_timestreaminfluxdb_db_instance", &resource.Sweeper{ Name: "aws_timestreaminfluxdb_db_instance", - F: sweepDbInstances, + F: sweepDBInstances, }) } -func sweepDbInstances(region string) error { +func sweepDBInstances(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { @@ -50,7 +50,7 @@ func sweepDbInstances(region string) error { id := aws.ToString(v.Id) log.Printf("[INFO] Deleting TimestreamInfluxDB DB instance: %s", id) - sweepResources = append(sweepResources, framework.NewSweepResource(newResourceDbInstance, client, + sweepResources = append(sweepResources, framework.NewSweepResource(newResourceDBInstance, client, framework.NewAttribute(names.AttrID, id), )) } diff --git a/website/docs/r/timestreaminfluxdb_db_instance.html.markdown b/website/docs/r/timestreaminfluxdb_db_instance.html.markdown index 44960ca90e7..00b9e6d8f36 100644 --- a/website/docs/r/timestreaminfluxdb_db_instance.html.markdown +++ b/website/docs/r/timestreaminfluxdb_db_instance.html.markdown @@ -13,6 +13,7 @@ TIP: A few guiding principles for writing documentation: 4. Document your feature as it exists now; do not mention the future or past if you can help it. 5. Use accessible and inclusive language. --->` + # Resource: aws_timestreaminfluxdb_db_instance Terraform resource for managing an Amazon Timestream for InfluxDB Db Instance. @@ -23,14 +24,14 @@ Terraform resource for managing an Amazon Timestream for InfluxDB Db Instance. ```terraform resource "aws_timestreaminfluxdb_db_instance" "example" { - allocated_storage = 20 - bucket = "example-bucket-name" - db_instance_type = "db.influx.medium" - username = "admin" - password = "example-password" - vpc_subnet_ids = [aws_subnet.test_subnet.id] - vpc_security_group_ids = [aws_security_group.example_security_group.id] - name = "example-db-instance" + allocated_storage = 20 + bucket = "example-bucket-name" + db_instance_type = "db.influx.medium" + username = "admin" + password = "example-password" + vpc_subnet_ids = [aws_subnet.test_subnet.id] + vpc_security_group_ids = [aws_security_group.example_security_group.id] + name = "example-db-instance" } ``` @@ -40,28 +41,28 @@ All Timestream for InfluxDB instances require a VPC, subnet, and security group. ```terraform resource "aws_vpc" "example_vpc" { - cidr_block = "10.0.0.0/16" + cidr_block = "10.0.0.0/16" } resource "aws_subnet" "example_subnet" { - vpc_id = aws_vpc.example_vpc.id - cidr_block = "10.0.1.0/24" + vpc_id = aws_vpc.example_vpc.id + cidr_block = "10.0.1.0/24" } resource "aws_security_group" "example_security_group" { - name = "example_security_group" - vpc_id = aws_vpc.example_vpc.id + name = "example_security_group" + vpc_id = aws_vpc.example_vpc.id } resource "aws_timestreaminfluxdb_db_instance" "example" { - allocated_storage = 20 - bucket = "example-bucket-name" - db_instance_type = "db.influx.medium" - username = "admin" - password = "example-password" - vpc_subnet_ids = [aws_subnet.example_subnet.id] - vpc_security_group_ids = [aws_security_group.example_security_group.id] - name = "example-db-instance" + allocated_storage = 20 + bucket = "example-bucket-name" + db_instance_type = "db.influx.medium" + username = "admin" + password = "example-password" + vpc_subnet_ids = [aws_subnet.example_subnet.id] + vpc_security_group_ids = [aws_security_group.example_security_group.id] + name = "example-db-instance" } ``` @@ -71,62 +72,62 @@ The following configuration shows how to define the necessary resources and argu ```terraform resource "aws_vpc" "example_vpc" { - cidr_block = "10.0.0.0/16" + cidr_block = "10.0.0.0/16" } resource "aws_subnet" "example_subnet" { - vpc_id = aws_vpc.example_vpc.id - cidr_block = "10.0.1.0/24" + vpc_id = aws_vpc.example_vpc.id + cidr_block = "10.0.1.0/24" } resource "aws_security_group" "example_security_group" { - name = "example_security_group" - vpc_id = aws_vpc.example_vpc.id + name = "example_security_group" + vpc_id = aws_vpc.example_vpc.id } resource "aws_internet_gateway" "test_internet_gateway" { - vpc_id = aws_vpc.test_vpc.id + vpc_id = aws_vpc.test_vpc.id - tags = { - Name = "test_internet_gateway" - } + tags = { + Name = "test_internet_gateway" + } } resource "aws_route" "test_route" { - route_table_id = aws_vpc.test_vpc.main_route_table_id - destination_cidr_block = "0.0.0.0/0" - gateway_id = aws_internet_gateway.test_internet_gateway.id + route_table_id = aws_vpc.test_vpc.main_route_table_id + destination_cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.test_internet_gateway.id } resource "aws_route_table_association" "test_route_table_association" { - subnet_id = aws_subnet.test_subnet.id - route_table_id = aws_vpc.test_vpc.main_route_table_id + subnet_id = aws_subnet.test_subnet.id + route_table_id = aws_vpc.test_vpc.main_route_table_id } resource "aws_vpc_security_group_ingress_rule" "test_vpc_security_group_ingress_rule_vpc" { - security_group_id = aws_security_group.test_security_group.id - referenced_security_group_id = aws_security_group.test_security_group.id - ip_protocol = -1 + security_group_id = aws_security_group.test_security_group.id + referenced_security_group_id = aws_security_group.test_security_group.id + ip_protocol = -1 } resource "aws_vpc_security_group_ingress_rule" "test_vpc_security_group_ingress_rule_influxdb" { - security_group_id = aws_security_group.test_security_group.id - cidr_ipv4 = "0.0.0.0/0" - ip_protocol = "tcp" - from_port = 8086 - to_port = 8086 + security_group_id = aws_security_group.test_security_group.id + cidr_ipv4 = "0.0.0.0/0" + ip_protocol = "tcp" + from_port = 8086 + to_port = 8086 } resource "aws_timestreaminfluxdb_db_instance" "example" { - allocated_storage = 20 - bucket = "example-bucket-name" - db_instance_type = "db.influx.medium" - username = "admin" - password = "example-password" - vpc_subnet_ids = [aws_subnet.example_subnet.id] - vpc_security_group_ids = [aws_security_group.example_security_group.id] - name = "example-db-instance" - publicly_accessible = true # False by default + allocated_storage = 20 + bucket = "example-bucket-name" + db_instance_type = "db.influx.medium" + username = "admin" + password = "example-password" + vpc_subnet_ids = [aws_subnet.example_subnet.id] + vpc_security_group_ids = [aws_security_group.example_security_group.id] + name = "example-db-instance" + publicly_accessible = true # False by default } ``` @@ -136,43 +137,43 @@ You can use an S3 bucket to store logs generated by your Timestream for InfluxDB ```terraform resource "aws_s3_bucket" "example_s3_bucket" { - bucket = "example-s3-bucket" + bucket = "example-s3-bucket" } data "aws_iam_policy_document" "allow_timestreaminfluxdb_policy_document" { - statement { - actions = ["s3:PutObject"] - principals { - type = "Service" - identifiers = ["timestream-influxdb.amazonaws.com"] - } - resources = [ - "${aws_s3_bucket.example_s3_bucket.arn}/*" - ] - } + statement { + actions = ["s3:PutObject"] + principals { + type = "Service" + identifiers = ["timestream-influxdb.amazonaws.com"] + } + resources = [ + "${aws_s3_bucket.example_s3_bucket.arn}/*" + ] + } } resource "aws_s3_bucket_policy" "allow_timestreaminfluxdb_policy" { - bucket = aws_s3_bucket.example_s3_bucket.id - policy = data.aws_iam_policy_document.allow_timestreaminfluxdb_policy_document.json + bucket = aws_s3_bucket.example_s3_bucket.id + policy = data.aws_iam_policy_document.allow_timestreaminfluxdb_policy_document.json } resource "aws_timestreaminfluxdb_db_instance" "example" { - allocated_storage = 20 - bucket = "example-bucket-name" - db_instance_type = "db.influx.medium" - username = "admin" - password = "example-password" - vpc_subnet_ids = [aws_subnet.example_subnet.id] - vpc_security_group_ids = [aws_security_group.example_security_group.id] - name = "example-db-instance" - - log_delivery_configuration { - s3_configuration { - bucket_name = aws_s3_bucket.example_s3_bucket.name - enabled = true - } - } + allocated_storage = 20 + bucket = "example-bucket-name" + db_instance_type = "db.influx.medium" + username = "admin" + password = "example-password" + vpc_subnet_ids = [aws_subnet.example_subnet.id] + vpc_security_group_ids = [aws_security_group.example_security_group.id] + name = "example-db-instance" + + log_delivery_configuration { + s3_configuration { + bucket_name = aws_s3_bucket.example_s3_bucket.name + enabled = true + } + } } ``` @@ -182,27 +183,27 @@ To use multi-region availability, at least two subnets must be created in differ ```terraform resource "aws_subnet" "example_subnet_1" { - vpc_id = aws_vpc.example_vpc.id - cidr_block = "10.0.1.0/24" - availability_zone = "us-west-2a" + vpc_id = aws_vpc.example_vpc.id + cidr_block = "10.0.1.0/24" + availability_zone = "us-west-2a" } resource "aws_subnet" "example_subnet_2" { - vpc_id = aws_vpc.example_vpc.id - cidr_block = "10.0.2.0/24" - availability_zone = "us-west-2b" + vpc_id = aws_vpc.example_vpc.id + cidr_block = "10.0.2.0/24" + availability_zone = "us-west-2b" } resource "aws_timestreaminfluxdb_db_instance" "example" { - allocated_storage = 20 - bucket = "example-bucket-name" - db_instance_type = "db.influx.medium" - deployment_type = "WITH_MULTIAZ_STANDBY" - username = "admin" - password = "example-password" - vpc_subnet_ids = [aws_subnet.example_subnet_1.id, aws_subnet.example_subnet_2.id] - vpc_security_group_ids = [aws_security_group.example_security_group.id] - name = "example-db-instance" + allocated_storage = 20 + bucket = "example-bucket-name" + db_instance_type = "db.influx.medium" + deployment_type = "WITH_MULTIAZ_STANDBY" + username = "admin" + password = "example-password" + vpc_subnet_ids = [aws_subnet.example_subnet_1.id, aws_subnet.example_subnet_2.id] + vpc_security_group_ids = [aws_security_group.example_security_group.id] + name = "example-db-instance" } ``` @@ -269,8 +270,8 @@ In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashico ```terraform import { - to = aws_timestreaminfluxdb_db_instance.example - id = "12345abcde" + to = aws_timestreaminfluxdb_db_instance.example + id = "12345abcde" } ``` From 1f3d4d9e14f0d785f84109d47fdcbccaa2b117e5 Mon Sep 17 00:00:00 2001 From: Trevor Bonas Date: Fri, 14 Jun 2024 12:07:22 -0700 Subject: [PATCH 05/21] Fix linting issues --- .../service/timestreaminfluxdb/db_instance.go | 71 +++--- .../timestreaminfluxdb/db_instance_test.go | 213 ++++++++++-------- 2 files changed, 160 insertions(+), 124 deletions(-) diff --git a/internal/service/timestreaminfluxdb/db_instance.go b/internal/service/timestreaminfluxdb/db_instance.go index 0bfa15ca9c7..1b894b353cf 100644 --- a/internal/service/timestreaminfluxdb/db_instance.go +++ b/internal/service/timestreaminfluxdb/db_instance.go @@ -35,6 +35,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/framework" "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" @@ -60,7 +61,7 @@ const ( // If not provided, CreateDbInstance will use the below default values // for bucket and organization. These values need to be set in Terraform // because GetDbInstance won't return them. - DefaultBucketValue = "bucket" + DefaultBucketValue = names.AttrBucket DefaultOrganizationValue = "organization" DefaultUsernameValue = "admin" ResNameDBInstance = "DB Instance" @@ -78,7 +79,7 @@ func (r *resourceDBInstance) Metadata(_ context.Context, req resource.MetadataRe func (r *resourceDBInstance) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { resp.Schema = schema.Schema{ Attributes: map[string]schema.Attribute{ - "allocated_storage": schema.Int64Attribute{ + names.AttrAllocatedStorage: schema.Int64Attribute{ Required: true, PlanModifiers: []planmodifier.Int64{ int64planmodifier.RequiresReplace(), @@ -89,12 +90,12 @@ func (r *resourceDBInstance) Schema(ctx context.Context, req resource.SchemaRequ }, Description: `The amount of storage to allocate for your DB storage type in GiB (gibibytes).`, }, - "arn": framework.ARNAttributeComputedOnly(), - "availability_zone": schema.StringAttribute{ + names.AttrARN: framework.ARNAttributeComputedOnly(), + names.AttrAvailabilityZone: schema.StringAttribute{ Computed: true, Description: `The Availability Zone in which the DB instance resides.`, }, - "bucket": schema.StringAttribute{ + names.AttrBucket: schema.StringAttribute{ Optional: true, Computed: true, Default: stringdefault.StaticString(DefaultBucketValue), @@ -191,11 +192,11 @@ func (r *resourceDBInstance) Schema(ctx context.Context, req resource.SchemaRequ Description: `Specifies whether the DB instance will be deployed as a standalone instance or with a Multi-AZ standby for high availability.`, }, - "endpoint": schema.StringAttribute{ + names.AttrEndpoint: schema.StringAttribute{ Computed: true, Description: `The endpoint used to connect to InfluxDB. The default InfluxDB port is 8086.`, }, - "id": framework.IDAttribute(), + names.AttrID: framework.IDAttribute(), "influx_auth_parameters_secret_arn": schema.StringAttribute{ Computed: true, Description: `The Amazon Resource Name (ARN) of the AWS Secrets Manager secret containing the @@ -203,7 +204,7 @@ func (r *resourceDBInstance) Schema(ctx context.Context, req resource.SchemaRequ key-value pair holding InfluxDB authorization values: organization, bucket, username, and password.`, }, - "name": schema.StringAttribute{ + names.AttrName: schema.StringAttribute{ Required: true, PlanModifiers: []planmodifier.String{ stringplanmodifier.RequiresReplace(), @@ -239,7 +240,7 @@ func (r *resourceDBInstance) Schema(ctx context.Context, req resource.SchemaRequ Description: `The name of the initial organization for the initial admin user in InfluxDB. An InfluxDB organization is a workspace for a group of users.`, }, - "password": schema.StringAttribute{ + names.AttrPassword: schema.StringAttribute{ Required: true, Sensitive: true, PlanModifiers: []planmodifier.String{ @@ -255,7 +256,7 @@ func (r *resourceDBInstance) Schema(ctx context.Context, req resource.SchemaRequ also use the InfluxDB CLI to create an operator token. These attributes will be stored in a Secret created in AWS SecretManager in your account.`, }, - "publicly_accessible": schema.BoolAttribute{ + names.AttrPubliclyAccessible: schema.BoolAttribute{ Optional: true, Computed: true, Default: booldefault.StaticBool(false), @@ -269,11 +270,11 @@ func (r *resourceDBInstance) Schema(ctx context.Context, req resource.SchemaRequ Description: `The Availability Zone in which the standby instance is located when deploying with a MultiAZ standby instance.`, }, - "status": schema.StringAttribute{ + names.AttrStatus: schema.StringAttribute{ Computed: true, Description: `The status of the DB instance.`, }, - "username": schema.StringAttribute{ + names.AttrUsername: schema.StringAttribute{ Optional: true, Computed: true, Default: stringdefault.StaticString(DefaultUsernameValue), @@ -295,7 +296,7 @@ func (r *resourceDBInstance) Schema(ctx context.Context, req resource.SchemaRequ attributes will be stored in a Secret created in Amazon Secrets Manager in your account`, }, - "vpc_security_group_ids": schema.SetAttribute{ + names.AttrVPCSecurityGroupIDs: schema.SetAttribute{ Required: true, ElementType: types.StringType, PlanModifiers: []planmodifier.Set{ @@ -339,7 +340,7 @@ func (r *resourceDBInstance) Schema(ctx context.Context, req resource.SchemaRequ Blocks: map[string]schema.Block{ "s3_configuration": schema.SingleNestedBlock{ Attributes: map[string]schema.Attribute{ - "bucket_name": schema.StringAttribute{ + names.AttrBucketName: schema.StringAttribute{ Required: true, Validators: []validator.String{ stringvalidator.LengthAtLeast(3), @@ -348,7 +349,7 @@ func (r *resourceDBInstance) Schema(ctx context.Context, req resource.SchemaRequ }, Description: `The name of the S3 bucket to deliver logs to.`, }, - "enabled": schema.BoolAttribute{ + names.AttrEnabled: schema.BoolAttribute{ Required: true, Description: `Indicates whether log delivery to the S3 bucket is enabled.`, }, @@ -358,7 +359,7 @@ func (r *resourceDBInstance) Schema(ctx context.Context, req resource.SchemaRequ }, }, }, - "timeouts": timeouts.Block(ctx, timeouts.Opts{ + names.AttrTimeouts: timeouts.Block(ctx, timeouts.Opts{ Create: true, Update: true, Delete: true, @@ -550,7 +551,7 @@ func (r *resourceDBInstance) Read(ctx context.Context, req resource.ReadRequest, ) return } - if username, ok := secrets["username"]; ok { + if username, ok := secrets[names.AttrUsername]; ok { state.Username = flex.StringValueToFramework[string](ctx, username) } else { resp.Diagnostics.AddError( @@ -559,7 +560,7 @@ func (r *resourceDBInstance) Read(ctx context.Context, req resource.ReadRequest, ) return } - if password, ok := secrets["password"]; ok { + if password, ok := secrets[names.AttrPassword]; ok { state.Password = flex.StringValueToFramework[string](ctx, password) } else { resp.Diagnostics.AddError( @@ -577,7 +578,7 @@ func (r *resourceDBInstance) Read(ctx context.Context, req resource.ReadRequest, ) return } - if bucket, ok := secrets["bucket"]; ok { + if bucket, ok := secrets[names.AttrBucket]; ok { state.Bucket = flex.StringValueToFramework[string](ctx, bucket) } else { resp.Diagnostics.AddError( @@ -616,7 +617,6 @@ func (r *resourceDBInstance) Update(ctx context.Context, req resource.UpdateRequ // db_parameter_group_identifier. if !plan.DBParameterGroupIdentifier.Equal(state.DBParameterGroupIdentifier) || !plan.LogDeliveryConfiguration.Equal(state.LogDeliveryConfiguration) { - in := ×treaminfluxdb.UpdateDbInstanceInput{ Identifier: aws.String(plan.ID.ValueString()), } @@ -725,7 +725,7 @@ func (r *resourceDBInstance) Delete(ctx context.Context, req resource.DeleteRequ } func (r *resourceDBInstance) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { - resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp) + resource.ImportStatePassthroughID(ctx, path.Root(names.AttrID), req, resp) } func (r *resourceDBInstance) ModifyPlan(ctx context.Context, request resource.ModifyPlanRequest, response *resource.ModifyPlanResponse) { r.SetTagsAll(ctx, request, response) @@ -733,9 +733,9 @@ func (r *resourceDBInstance) ModifyPlan(ctx context.Context, request resource.Mo func waitDBInstanceCreated(ctx context.Context, conn *timestreaminfluxdb.Client, id string, timeout time.Duration) (*timestreaminfluxdb.CreateDbInstanceOutput, error) { stateConf := &retry.StateChangeConf{ - Pending: []string{string(awstypes.StatusCreating), string(awstypes.StatusUpdating), string(awstypes.StatusModifying)}, - Target: []string{string(awstypes.StatusAvailable)}, - Refresh: statusDbInstance(ctx, conn, id), + Pending: enum.Slice(string(awstypes.StatusCreating), string(awstypes.StatusUpdating), string(awstypes.StatusModifying)), + Target: enum.Slice(awstypes.StatusAvailable), + Refresh: statusDBInstance(ctx, conn, id), Timeout: timeout, NotFoundChecks: 20, ContinuousTargetOccurence: 2, @@ -751,9 +751,9 @@ func waitDBInstanceCreated(ctx context.Context, conn *timestreaminfluxdb.Client, func waitDBInstanceUpdated(ctx context.Context, conn *timestreaminfluxdb.Client, id string, timeout time.Duration) (*timestreaminfluxdb.UpdateDbInstanceOutput, error) { stateConf := &retry.StateChangeConf{ - Pending: []string{string(awstypes.StatusModifying), string(awstypes.StatusUpdating)}, - Target: []string{string(awstypes.StatusAvailable)}, - Refresh: statusDbInstance(ctx, conn, id), + Pending: enum.Slice(string(awstypes.StatusModifying), string(awstypes.StatusUpdating)), + Target: enum.Slice(string(awstypes.StatusAvailable)), + Refresh: statusDBInstance(ctx, conn, id), Timeout: timeout, NotFoundChecks: 20, ContinuousTargetOccurence: 2, @@ -769,9 +769,9 @@ func waitDBInstanceUpdated(ctx context.Context, conn *timestreaminfluxdb.Client, func waitDBInstanceDeleted(ctx context.Context, conn *timestreaminfluxdb.Client, id string, timeout time.Duration) (*timestreaminfluxdb.DeleteDbInstanceOutput, error) { stateConf := &retry.StateChangeConf{ - Pending: []string{string(awstypes.StatusDeleting), string(awstypes.StatusModifying), string(awstypes.StatusUpdating), string(awstypes.StatusAvailable)}, - Target: []string{}, - Refresh: statusDbInstance(ctx, conn, id), + Pending: enum.Slice(string(awstypes.StatusDeleting), string(awstypes.StatusModifying), string(awstypes.StatusUpdating), string(awstypes.StatusAvailable)), + Target: enum.Slice[string](), + Refresh: statusDBInstance(ctx, conn, id), Timeout: timeout, Delay: 30 * time.Second, PollInterval: 30 * time.Second, @@ -785,7 +785,7 @@ func waitDBInstanceDeleted(ctx context.Context, conn *timestreaminfluxdb.Client, return nil, err } -func statusDbInstance(ctx context.Context, conn *timestreaminfluxdb.Client, id string) retry.StateRefreshFunc { +func statusDBInstance(ctx context.Context, conn *timestreaminfluxdb.Client, id string) retry.StateRefreshFunc { return func() (interface{}, string, error) { out, err := findDBInstanceByID(ctx, conn, id) if tfresource.NotFound(err) { @@ -831,6 +831,7 @@ func flattenLogDeliveryConfiguration(ctx context.Context, apiObject *awstypes.Lo return types.ListNull(elemType), diags } s3Configuration, d := flattenS3Configuration(ctx, apiObject.S3Configuration) + diags.Append(d...) obj := map[string]attr.Value{ "s3_configuration": s3Configuration, } @@ -852,8 +853,8 @@ func flattenS3Configuration(ctx context.Context, apiObject *awstypes.S3Configura } obj := map[string]attr.Value{ - "bucket_name": flex.StringValueToFramework(ctx, *apiObject.BucketName), - "enabled": flex.BoolToFramework(ctx, *&apiObject.Enabled), + names.AttrBucketName: flex.StringValueToFramework(ctx, *apiObject.BucketName), + names.AttrEnabled: flex.BoolToFramework(ctx, apiObject.Enabled), } objVal, d := types.ObjectValue(s3ConfigurationAttrTypes, obj) diags.Append(d...) @@ -922,6 +923,6 @@ var logDeliveryConfigrationAttrTypes = map[string]attr.Type{ } var s3ConfigurationAttrTypes = map[string]attr.Type{ - "bucket_name": types.StringType, - "enabled": types.BoolType, + names.AttrBucketName: types.StringType, + names.AttrEnabled: types.BoolType, } diff --git a/internal/service/timestreaminfluxdb/db_instance_test.go b/internal/service/timestreaminfluxdb/db_instance_test.go index 4aae33d6f17..89bea112e06 100644 --- a/internal/service/timestreaminfluxdb/db_instance_test.go +++ b/internal/service/timestreaminfluxdb/db_instance_test.go @@ -12,7 +12,6 @@ import ( "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb" - "github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb/types" awstypes "github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" @@ -31,7 +30,7 @@ func TestAccTimestreamInfluxDBDBInstance_basic(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var dbinstance timestreaminfluxdb.GetDbInstanceOutput + var dbInstance timestreaminfluxdb.GetDbInstanceOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_timestreaminfluxdb_db_instance.test" @@ -42,31 +41,30 @@ func TestAccTimestreamInfluxDBDBInstance_basic(t *testing.T) { }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckDbInstanceDestroy(ctx), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccDBInstanceConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &dbinstance), + testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance), // Verification of read-only attributes and default values. // DB instance will not be publicly accessible and will not have an endpoint. - acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "timestream-influxdb", regexache.MustCompile(`db-instance/+.`)), - resource.TestCheckResourceAttrSet(resourceName, "availability_zone"), - resource.TestCheckResourceAttr(resourceName, "bucket", tftimestreaminfluxdb.DefaultBucketValue), + acctest.MatchResourceAttrRegionalARN(resourceName, names.AttrARN, "timestream-influxdb", regexache.MustCompile(`db-instance/+.`)), + resource.TestCheckResourceAttrSet(resourceName, names.AttrAvailabilityZone), + resource.TestCheckResourceAttr(resourceName, names.AttrBucket, tftimestreaminfluxdb.DefaultBucketValue), resource.TestCheckResourceAttr(resourceName, "db_storage_type", string(awstypes.DbStorageTypeInfluxIoIncludedT1)), resource.TestCheckResourceAttr(resourceName, "deployment_type", string(awstypes.DeploymentTypeSingleAz)), resource.TestCheckResourceAttrSet(resourceName, "influx_auth_parameters_secret_arn"), resource.TestCheckResourceAttr(resourceName, "organization", tftimestreaminfluxdb.DefaultOrganizationValue), - resource.TestCheckResourceAttr(resourceName, "publicly_accessible", "false"), + resource.TestCheckResourceAttr(resourceName, names.AttrPubliclyAccessible, acctest.CtFalse), resource.TestCheckResourceAttr(resourceName, names.AttrStatus, string(awstypes.StatusAvailable)), - resource.TestCheckResourceAttr(resourceName, "username", tftimestreaminfluxdb.DefaultUsernameValue), + resource.TestCheckResourceAttr(resourceName, names.AttrUsername, tftimestreaminfluxdb.DefaultUsernameValue), ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"apply_immediately", "user"}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, }, }, }) @@ -78,7 +76,7 @@ func TestAccTimestreamInfluxDBDBInstance_disappears(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var dbinstance timestreaminfluxdb.GetDbInstanceOutput + var dbInstance timestreaminfluxdb.GetDbInstanceOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_timestreaminfluxdb_db_instance.test" @@ -94,7 +92,7 @@ func TestAccTimestreamInfluxDBDBInstance_disappears(t *testing.T) { { Config: testAccDBInstanceConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &dbinstance), + testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance), acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tftimestreaminfluxdb.ResourceDBInstance, resourceName), ), ExpectNonEmptyPlan: true, @@ -109,7 +107,7 @@ func TestAccTimestreamInfluxDBDBInstance_logDeliveryConfiguration(t *testing.T) t.Skip("skipping long-running test in short mode") } - var dbinstance timestreaminfluxdb.GetDbInstanceOutput + var dbInstance1, dbInstance2 timestreaminfluxdb.GetDbInstanceOutput // The same random name will be used for both the DB instance and the log S3 bucket name. rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_timestreaminfluxdb_db_instance.test" @@ -121,43 +119,43 @@ func TestAccTimestreamInfluxDBDBInstance_logDeliveryConfiguration(t *testing.T) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckDbInstanceDestroy(ctx), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccDBInstanceConfig_logDeliveryConfigurationEnabled(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &dbinstance), - resource.TestCheckResourceAttr(resourceName, "log_delivery_configuration.0.s3_configuration.%", "2"), + testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance1), + resource.TestCheckResourceAttr(resourceName, "log_delivery_configuration.0.s3_configuration.%", acctest.Ct2), resource.TestCheckResourceAttr(resourceName, "log_delivery_configuration.0.s3_configuration.bucket_name", rName), - resource.TestCheckResourceAttr(resourceName, "log_delivery_configuration.0.s3_configuration.enabled", "true"), + resource.TestCheckResourceAttr(resourceName, "log_delivery_configuration.0.s3_configuration.enabled", acctest.CtTrue), ), }, { Config: testAccDBInstanceConfig_logDeliveryConfigurationNotEnabled(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckDbInstanceExists(ctx, resourceName, &dbinstance), - resource.TestCheckResourceAttr(resourceName, "log_delivery_configuration.0.s3_configuration.%", "2"), + testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance2), + testAccCheckDBInstanceNotRecreated(&dbInstance1, &dbInstance2), + resource.TestCheckResourceAttr(resourceName, "log_delivery_configuration.0.s3_configuration.%", acctest.Ct2), resource.TestCheckResourceAttr(resourceName, "log_delivery_configuration.0.s3_configuration.bucket_name", rName), - resource.TestCheckResourceAttr(resourceName, "log_delivery_configuration.0.s3_configuration.enabled", "false"), + resource.TestCheckResourceAttr(resourceName, "log_delivery_configuration.0.s3_configuration.enabled", acctest.CtFalse), ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"apply_immediately", "user"}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, }, }, }) } -func TestAccTimestreamInfluxDBDbInstance_publiclyAccessible(t *testing.T) { +func TestAccTimestreamInfluxDBDBInstance_publiclyAccessible(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { t.Skip("skipping long-running test in short mode") } - var dbinstance timestreaminfluxdb.GetDbInstanceOutput + var dbInstance timestreaminfluxdb.GetDbInstanceOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_timestreaminfluxdb_db_instance.test" @@ -168,33 +166,32 @@ func TestAccTimestreamInfluxDBDbInstance_publiclyAccessible(t *testing.T) { }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckDbInstanceDestroy(ctx), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccDBInstanceConfig_publiclyAccessible(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckDbInstanceExists(ctx, resourceName, &dbinstance), - resource.TestCheckResourceAttrSet(resourceName, "endpoint"), - resource.TestCheckResourceAttr(resourceName, "publicly_accessible", "true"), + testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance), + resource.TestCheckResourceAttrSet(resourceName, names.AttrEndpoint), + resource.TestCheckResourceAttr(resourceName, names.AttrPubliclyAccessible, acctest.CtTrue), ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"apply_immediately", "user"}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, }, }, }) } -func TestAccTimestreamInfluxDBDbInstance_deploymentTypeMultiAzStandby(t *testing.T) { +func TestAccTimestreamInfluxDBDBInstance_deploymentTypeMultiAzStandby(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { t.Skip("skipping long-running test in short mode") } - var dbinstance timestreaminfluxdb.GetDbInstanceOutput + var dbInstance timestreaminfluxdb.GetDbInstanceOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_timestreaminfluxdb_db_instance.test" @@ -210,7 +207,7 @@ func TestAccTimestreamInfluxDBDbInstance_deploymentTypeMultiAzStandby(t *testing { Config: testAccDBInstanceConfig_deploymentTypeMultiAzStandby(rName, acctest.Region()), Check: resource.ComposeTestCheckFunc( - testAccCheckDbInstanceExists(ctx, resourceName, &dbinstance), + testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance), // DB instance will not be publicly accessible and will not have an endpoint. // DB instance will have a secondary availability zone. resource.TestCheckResourceAttrSet(resourceName, "secondary_availability_zone"), @@ -218,10 +215,9 @@ func TestAccTimestreamInfluxDBDbInstance_deploymentTypeMultiAzStandby(t *testing ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"apply_immediately", "user"}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, }, }, }) @@ -233,7 +229,7 @@ func TestAccTimestreamInfluxDBDBInstance_username(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var dbinstance timestreaminfluxdb.GetDbInstanceOutput + var dbInstance timestreaminfluxdb.GetDbInstanceOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_timestreaminfluxdb_db_instance.test" testUsername := "testusername" @@ -250,15 +246,14 @@ func TestAccTimestreamInfluxDBDBInstance_username(t *testing.T) { { Config: testAccDBInstanceConfig_username(rName, testUsername), Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &dbinstance), - resource.TestCheckResourceAttr(resourceName, "username", testUsername), + testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance), + resource.TestCheckResourceAttr(resourceName, names.AttrUsername, testUsername), ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"apply_immediately", "user"}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, }, }, }) @@ -270,7 +265,7 @@ func TestAccTimestreamInfluxDBDBInstance_bucket(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var dbinstance timestreaminfluxdb.GetDbInstanceOutput + var dbInstance timestreaminfluxdb.GetDbInstanceOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_timestreaminfluxdb_db_instance.test" testBucketName := "testbucket" @@ -287,15 +282,14 @@ func TestAccTimestreamInfluxDBDBInstance_bucket(t *testing.T) { { Config: testAccDBInstanceConfig_bucket(rName, testBucketName), Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &dbinstance), - resource.TestCheckResourceAttr(resourceName, "bucket", testBucketName), + testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance), + resource.TestCheckResourceAttr(resourceName, names.AttrBucket, testBucketName), ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"apply_immediately", "user"}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, }, }, }) @@ -307,7 +301,7 @@ func TestAccTimestreamInfluxDBDBInstance_organization(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var dbinstance timestreaminfluxdb.GetDbInstanceOutput + var dbInstance timestreaminfluxdb.GetDbInstanceOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_timestreaminfluxdb_db_instance.test" testOrganizationName := "testorganization" @@ -324,15 +318,14 @@ func TestAccTimestreamInfluxDBDBInstance_organization(t *testing.T) { { Config: testAccDBInstanceConfig_organization(rName, testOrganizationName), Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &dbinstance), + testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance), resource.TestCheckResourceAttr(resourceName, "organization", testOrganizationName), ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"apply_immediately", "user"}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, }, }, }) @@ -344,7 +337,7 @@ func TestAccTimestreamInfluxDBDBInstance_tags(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var dbinstance timestreaminfluxdb.GetDbInstanceOutput + var dbInstance1, dbInstance2, dbInstance3 timestreaminfluxdb.GetDbInstanceOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_timestreaminfluxdb_db_instance.test" @@ -360,7 +353,7 @@ func TestAccTimestreamInfluxDBDBInstance_tags(t *testing.T) { { Config: testAccDBInstanceConfig_tags1(rName, acctest.CtKey1, acctest.CtValue1), Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &dbinstance), + testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance1), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct1), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsAllPercent, acctest.Ct1), @@ -370,7 +363,8 @@ func TestAccTimestreamInfluxDBDBInstance_tags(t *testing.T) { { Config: testAccDBInstanceConfig_tags2(rName, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, acctest.CtValue2), Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &dbinstance), + testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance2), + testAccCheckDBInstanceNotRecreated(&dbInstance2, &dbInstance2), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct2), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1Updated), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), @@ -382,7 +376,8 @@ func TestAccTimestreamInfluxDBDBInstance_tags(t *testing.T) { { Config: testAccDBInstanceConfig_tags1(rName, acctest.CtKey2, acctest.CtValue2), Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &dbinstance), + testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance3), + testAccCheckDBInstanceNotRecreated(&dbInstance2, &dbInstance3), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct1), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsAllPercent, acctest.Ct1), @@ -390,10 +385,9 @@ func TestAccTimestreamInfluxDBDBInstance_tags(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"apply_immediately", "user"}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, }, }, }) @@ -405,7 +399,7 @@ func TestAccTimestreamInfluxDBDBInstance_dbInstanceType(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var dbinstance timestreaminfluxdb.GetDbInstanceOutput + var dbInstance timestreaminfluxdb.GetDbInstanceOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_timestreaminfluxdb_db_instance.test" @@ -421,57 +415,98 @@ func TestAccTimestreamInfluxDBDBInstance_dbInstanceType(t *testing.T) { { Config: testAccDBInstanceConfig_dbInstanceTypeLarge(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &dbinstance), + testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance), resource.TestCheckResourceAttr(resourceName, "db_instance_type", "db.influx.large"), ), }, { Config: testAccDBInstanceConfig_dbInstanceTypeXLarge(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &dbinstance), + testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance), resource.TestCheckResourceAttr(resourceName, "db_instance_type", "db.influx.xlarge"), ), }, { Config: testAccDBInstanceConfig_dbInstanceType2XLarge(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &dbinstance), + testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance), resource.TestCheckResourceAttr(resourceName, "db_instance_type", "db.influx.2xlarge"), ), }, { Config: testAccDBInstanceConfig_dbInstanceType4XLarge(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &dbinstance), + testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance), resource.TestCheckResourceAttr(resourceName, "db_instance_type", "db.influx.4xlarge"), ), }, { Config: testAccDBInstanceConfig_dbInstanceType8XLarge(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &dbinstance), + testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance), resource.TestCheckResourceAttr(resourceName, "db_instance_type", "db.influx.8xlarge"), ), }, { Config: testAccDBInstanceConfig_dbInstanceType12XLarge(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &dbinstance), + testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance), resource.TestCheckResourceAttr(resourceName, "db_instance_type", "db.influx.12xlarge"), ), }, { Config: testAccDBInstanceConfig_dbInstanceType16XLarge(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &dbinstance), + testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance), resource.TestCheckResourceAttr(resourceName, "db_instance_type", "db.influx.16xlarge"), ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"apply_immediately", "user"}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccTimestreamInfluxDBDBInstance_dbStorageType(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var dbInstance timestreaminfluxdb.GetDbInstanceOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_timestreaminfluxdb_db_instance.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDBInstanceDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccDBInstanceConfig_dbStorageTypeT2(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance), + resource.TestCheckResourceAttr(resourceName, "db_storage_type", string(awstypes.DbStorageTypeInfluxIoIncludedT2)), + ), + }, + { + Config: testAccDBInstanceConfig_dbStorageTypeT3(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance), + resource.TestCheckResourceAttr(resourceName, "db_storage_type", string(awstypes.DbStorageTypeInfluxIoIncludedT3)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, }, }, }) @@ -490,7 +525,7 @@ func testAccCheckDBInstanceDestroy(ctx context.Context) resource.TestCheckFunc { Identifier: aws.String(rs.Primary.ID), } _, err := conn.GetDbInstance(ctx, input) - if errs.IsA[*types.ResourceNotFoundException](err) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return nil } if err != nil { @@ -504,7 +539,7 @@ func testAccCheckDBInstanceDestroy(ctx context.Context) resource.TestCheckFunc { } } -func testAccCheckDBInstanceExists(ctx context.Context, name string, dbinstance *timestreaminfluxdb.GetDbInstanceOutput) resource.TestCheckFunc { +func testAccCheckDBInstanceExists(ctx context.Context, name string, dbInstance *timestreaminfluxdb.GetDbInstanceOutput) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[name] if !ok { @@ -524,7 +559,7 @@ func testAccCheckDBInstanceExists(ctx context.Context, name string, dbinstance * return create.Error(names.TimestreamInfluxDB, create.ErrActionCheckingExistence, tftimestreaminfluxdb.ResNameDBInstance, rs.Primary.ID, err) } - *dbinstance = *resp + *dbInstance = *resp return nil } @@ -555,7 +590,7 @@ func testAccCheckDBInstanceNotRecreated(before, after *timestreaminfluxdb.GetDbI } func testAccDBInstanceConfig_base() string { - return fmt.Sprintf(` + return ` resource "aws_vpc" "test_vpc" { cidr_block = "10.0.0.0/16" } @@ -568,7 +603,7 @@ resource "aws_subnet" "test_subnet" { resource "aws_security_group" "test_security_group" { vpc_id = aws_vpc.test_vpc.id } -`) +` } // Minimal configuration. From 9b542f8dd42d9393b8fbc1a00ee63945abe9d368 Mon Sep 17 00:00:00 2001 From: Trevor Bonas Date: Fri, 14 Jun 2024 16:00:10 -0700 Subject: [PATCH 06/21] Increase minimum db_storage_type storage amount --- internal/service/timestreaminfluxdb/db_instance_test.go | 8 ++++---- .../docs/r/timestreaminfluxdb_db_instance.html.markdown | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/internal/service/timestreaminfluxdb/db_instance_test.go b/internal/service/timestreaminfluxdb/db_instance_test.go index 89bea112e06..923858eeedc 100644 --- a/internal/service/timestreaminfluxdb/db_instance_test.go +++ b/internal/service/timestreaminfluxdb/db_instance_test.go @@ -873,14 +873,14 @@ resource "aws_timestreaminfluxdb_db_instance" "test" { func testAccDBInstanceConfig_dbStorageTypeT2(rName string) string { return acctest.ConfigCompose(testAccDBInstanceConfig_base(), fmt.Sprintf(` resource "aws_timestreaminfluxdb_db_instance" "test" { - allocated_storage = 20 password = "testpassword" vpc_subnet_ids = [aws_subnet.test_subnet.id] vpc_security_group_ids = [aws_security_group.test_security_group.id] db_instance_type = "db.influx.medium" name = %[1]q - db_storage_type = "InfluxIOIncludedT2" + allocated_storage = 400 + db_storage_type = "InfluxIOIncludedT2" } `, rName)) } @@ -888,14 +888,14 @@ resource "aws_timestreaminfluxdb_db_instance" "test" { func testAccDBInstanceConfig_dbStorageTypeT3(rName string) string { return acctest.ConfigCompose(testAccDBInstanceConfig_base(), fmt.Sprintf(` resource "aws_timestreaminfluxdb_db_instance" "test" { - allocated_storage = 20 password = "testpassword" vpc_subnet_ids = [aws_subnet.test_subnet.id] vpc_security_group_ids = [aws_security_group.test_security_group.id] db_instance_type = "db.influx.medium" name = %[1]q - db_storage_type = "InfluxIOIncludedT3" + allocated_storage = 400 + db_storage_type = "InfluxIOIncludedT3" } `, rName)) } diff --git a/website/docs/r/timestreaminfluxdb_db_instance.html.markdown b/website/docs/r/timestreaminfluxdb_db_instance.html.markdown index 00b9e6d8f36..d5e21b6881a 100644 --- a/website/docs/r/timestreaminfluxdb_db_instance.html.markdown +++ b/website/docs/r/timestreaminfluxdb_db_instance.html.markdown @@ -222,7 +222,7 @@ The following arguments are optional: * `bucket` - (Default `"bucket"`) Name of the initial InfluxDB bucket. All InfluxDB data is stored in a bucket. A bucket combines the concept of a database and a retention period (the duration of time that each data point persists). A bucket belongs to an organization. Along with `organization`, `username`, and `password`, this argument will be stored in the secret referred to by the `influx_auth_parameters_secret_arn` attribute. * `db_parameter_group_identifier` - (Optional) ID of the DB parameter group assigned to your DB instance. If added to an existing Timestream for InfluxDB instance or given a new value, will cause an in-place update to the instance. However, if an instance already has a value for `db_parameter_group_identifier`, removing `db_parameter_group_identifier` will cause the instance to be destroyed and recreated. -* `db_storage_type` - (Default `"InfluxIOIncludedT1"`) Timestream for InfluxDB DB storage type to read and write InfluxDB data. You can choose between 3 different types of provisioned Influx IOPS included storage according to your workloads requirements: Influx IO Included 3000 IOPS, Influx IO Included 12000 IOPS, Influx IO Included 16000 IOPS. Valid options are: `"InfluxIOIncludedT1"`, `"InfluxIOIncludedT2"`, and `"InfluxIOIncludedT1"`. +* `db_storage_type` - (Default `"InfluxIOIncludedT1"`) Timestream for InfluxDB DB storage type to read and write InfluxDB data. You can choose between 3 different types of provisioned Influx IOPS included storage according to your workloads requirements: Influx IO Included 3000 IOPS, Influx IO Included 12000 IOPS, Influx IO Included 16000 IOPS. Valid options are: `"InfluxIOIncludedT1"`, `"InfluxIOIncludedT2"`, and `"InfluxIOIncludedT1"`. If you use `"InfluxIOIncludedT2" or "InfluxIOIncludedT3", the minimum value for `allocated_storage` is 400. * `deployment_type` - (Default `"SINGLE_AZ"`) Specifies whether the DB instance will be deployed as a standalone instance or with a Multi-AZ standby for high availability. Valid options are: `"SINGLE_AZ"`, `"WITH_MULTIAZ_STANDBY"`. * `log_delivery_configuration` - (Optional) Configuration for sending InfluxDB engine logs to a specified S3 bucket. * `organization` - (Default `"organization"`) Name of the initial organization for the initial admin user in InfluxDB. An InfluxDB organization is a workspace for a group of users. Along with `bucket`, `username`, and `password`, this argument will be stored in the secret referred to by the `influx_auth_parameters_secret_arn` attribute. From 927766971802d110c2d013537e6f2649cbf4b00f Mon Sep 17 00:00:00 2001 From: Adrian Johnson Date: Mon, 29 Jul 2024 13:04:44 -0500 Subject: [PATCH 07/21] aws_timestreaminfluxdb_db_instance: simplify schema and use autoflex in create --- .../service/timestreaminfluxdb/db_instance.go | 286 +++++++----------- 1 file changed, 103 insertions(+), 183 deletions(-) diff --git a/internal/service/timestreaminfluxdb/db_instance.go b/internal/service/timestreaminfluxdb/db_instance.go index 1b894b353cf..d655e4fd3bb 100644 --- a/internal/service/timestreaminfluxdb/db_instance.go +++ b/internal/service/timestreaminfluxdb/db_instance.go @@ -24,12 +24,10 @@ import ( "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/resource/schema" - "github.com/hashicorp/terraform-plugin-framework/resource/schema/booldefault" "github.com/hashicorp/terraform-plugin-framework/resource/schema/boolplanmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/int64planmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/setplanmodifier" - "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringdefault" "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" "github.com/hashicorp/terraform-plugin-framework/schema/validator" "github.com/hashicorp/terraform-plugin-framework/types" @@ -39,12 +37,12 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/framework" "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) -// Function annotations are used for resource registration to the Provider. DO NOT EDIT. // @FrameworkResource("aws_timestreaminfluxdb_db_instance", name="Db Instance") // @Tags(identifierAttribute="arn") func newResourceDBInstance(_ context.Context) (resource.ResourceWithConfigure, error) { @@ -97,17 +95,13 @@ func (r *resourceDBInstance) Schema(ctx context.Context, req resource.SchemaRequ }, names.AttrBucket: schema.StringAttribute{ Optional: true, - Computed: true, - Default: stringdefault.StaticString(DefaultBucketValue), + Required: true, PlanModifiers: []planmodifier.String{ stringplanmodifier.RequiresReplace(), }, Validators: []validator.String{ - stringvalidator.LengthAtLeast(2), - stringvalidator.LengthAtMost(64), + stringvalidator.LengthBetween(2, 64), stringvalidator.RegexMatches( - // Taken from the model for TimestreamInfluxDB in AWS SDK Go V2 - // https://github.com/aws/aws-sdk-go-v2/blob/8209abb7fa1aeb513228b4d8c1a459aeb6209d4d/codegen/sdk-codegen/aws-models/timestream-influxdb.json#L768 regexache.MustCompile("^[^_][^\"]*$"), "", ), @@ -117,40 +111,23 @@ func (r *resourceDBInstance) Schema(ctx context.Context, req resource.SchemaRequ that each data point persists). A bucket belongs to an organization.`, }, "db_instance_type": schema.StringAttribute{ - Required: true, + CustomType: fwtypes.StringEnumType[awstypes.DbInstanceType](), + Required: true, PlanModifiers: []planmodifier.String{ stringplanmodifier.RequiresReplace(), }, - Validators: []validator.String{ - stringvalidator.OneOf( - string(awstypes.DbInstanceTypeDbInfluxMedium), - string(awstypes.DbInstanceTypeDbInfluxLarge), - string(awstypes.DbInstanceTypeDbInfluxXlarge), - string(awstypes.DbInstanceTypeDbInflux2xlarge), - string(awstypes.DbInstanceTypeDbInflux4xlarge), - string(awstypes.DbInstanceTypeDbInflux8xlarge), - string(awstypes.DbInstanceTypeDbInflux12xlarge), - string(awstypes.DbInstanceTypeDbInflux16xlarge), - ), - }, Description: `The Timestream for InfluxDB DB instance type to run InfluxDB on.`, }, "db_parameter_group_identifier": schema.StringAttribute{ Optional: true, - // Once a parameter group is associated with a DB instance, it cannot be removed. - // Therefore, if db_parameter_group_identifier is removed, a replace of the DB instance - // is necessary. PlanModifiers: []planmodifier.String{ stringplanmodifier.RequiresReplaceIf( - statementReplaceIf, "Replace db_parameter_group_identifier diff", "Replace db_parameter_group_identifier diff", + dbParameterGroupIdentifierReplaceIf, "Replace db_parameter_group_identifier diff", "Replace db_parameter_group_identifier diff", ), }, Validators: []validator.String{ - stringvalidator.LengthAtLeast(3), - stringvalidator.LengthAtMost(64), + stringvalidator.LengthBetween(3, 64), stringvalidator.RegexMatches( - // Taken from the model for TimestreamInfluxDB in AWS SDK Go V2 - // https://github.com/aws/aws-sdk-go-v2/blob/8209abb7fa1aeb513228b4d8c1a459aeb6209d4d/codegen/sdk-codegen/aws-models/timestream-influxdb.json#L1390 regexache.MustCompile("^[a-zA-Z0-9]+$"), "", ), @@ -158,18 +135,12 @@ func (r *resourceDBInstance) Schema(ctx context.Context, req resource.SchemaRequ Description: `The id of the DB parameter group assigned to your DB instance.`, }, "db_storage_type": schema.StringAttribute{ - Optional: true, - Computed: true, - Default: stringdefault.StaticString(string(awstypes.DbStorageTypeInfluxIoIncludedT1)), + CustomType: fwtypes.StringEnumType[awstypes.DbStorageType](), + Optional: true, + Computed: true, PlanModifiers: []planmodifier.String{ stringplanmodifier.RequiresReplace(), - }, - Validators: []validator.String{ - stringvalidator.OneOf( - string(awstypes.DbStorageTypeInfluxIoIncludedT1), - string(awstypes.DbStorageTypeInfluxIoIncludedT2), - string(awstypes.DbStorageTypeInfluxIoIncludedT3), - ), + stringplanmodifier.UseStateForUnknown(), }, Description: `The Timestream for InfluxDB DB storage type to read and write InfluxDB data. You can choose between 3 different types of provisioned Influx IOPS included storage according @@ -177,17 +148,12 @@ func (r *resourceDBInstance) Schema(ctx context.Context, req resource.SchemaRequ Influx IO Included 16000 IOPS.`, }, "deployment_type": schema.StringAttribute{ - Optional: true, - Computed: true, - Default: stringdefault.StaticString(string(awstypes.DeploymentTypeSingleAz)), + CustomType: fwtypes.StringEnumType[awstypes.DeploymentType](), + Optional: true, + Computed: true, PlanModifiers: []planmodifier.String{ stringplanmodifier.RequiresReplace(), - }, - Validators: []validator.String{ - stringvalidator.OneOf( - string(awstypes.DeploymentTypeSingleAz), - string(awstypes.DeploymentTypeWithMultiazStandby), - ), + stringplanmodifier.UseStateForUnknown(), }, Description: `Specifies whether the DB instance will be deployed as a standalone instance or with a Multi-AZ standby for high availability.`, @@ -210,11 +176,8 @@ func (r *resourceDBInstance) Schema(ctx context.Context, req resource.SchemaRequ stringplanmodifier.RequiresReplace(), }, Validators: []validator.String{ - stringvalidator.LengthAtLeast(3), - stringvalidator.LengthAtMost(40), + stringvalidator.LengthBetween(3, 40), stringvalidator.RegexMatches( - // Taken from the model for TimestreamInfluxDB in AWS SDK Go V2 - // https://github.com/aws/aws-sdk-go-v2/blob/8209abb7fa1aeb513228b4d8c1a459aeb6209d4d/codegen/sdk-codegen/aws-models/timestream-influxdb.json#L1215 regexache.MustCompile("^[a-zA-z][a-zA-Z0-9]*(-[a-zA-Z0-9]+)*$"), "", ), @@ -227,15 +190,12 @@ func (r *resourceDBInstance) Schema(ctx context.Context, req resource.SchemaRequ names.AttrTags: tftags.TagsAttribute(), names.AttrTagsAll: tftags.TagsAttributeComputedOnly(), "organization": schema.StringAttribute{ - Optional: true, - Computed: true, - Default: stringdefault.StaticString(DefaultOrganizationValue), + Required: true, PlanModifiers: []planmodifier.String{ stringplanmodifier.RequiresReplace(), }, Validators: []validator.String{ - stringvalidator.LengthAtLeast(1), - stringvalidator.LengthAtMost(64), + stringvalidator.LengthBetween(1, 64), }, Description: `The name of the initial organization for the initial admin user in InfluxDB. An InfluxDB organization is a workspace for a group of users.`, @@ -247,8 +207,7 @@ func (r *resourceDBInstance) Schema(ctx context.Context, req resource.SchemaRequ stringplanmodifier.RequiresReplace(), }, Validators: []validator.String{ - stringvalidator.LengthAtLeast(8), - stringvalidator.LengthAtMost(64), + stringvalidator.LengthBetween(8, 64), stringvalidator.RegexMatches(regexache.MustCompile("^[a-zA-Z0-9]+$"), ""), }, Description: `The password of the initial admin user created in InfluxDB. This password will @@ -259,7 +218,6 @@ func (r *resourceDBInstance) Schema(ctx context.Context, req resource.SchemaRequ names.AttrPubliclyAccessible: schema.BoolAttribute{ Optional: true, Computed: true, - Default: booldefault.StaticBool(false), PlanModifiers: []planmodifier.Bool{ boolplanmodifier.RequiresReplace(), }, @@ -270,14 +228,12 @@ func (r *resourceDBInstance) Schema(ctx context.Context, req resource.SchemaRequ Description: `The Availability Zone in which the standby instance is located when deploying with a MultiAZ standby instance.`, }, - names.AttrStatus: schema.StringAttribute{ - Computed: true, - Description: `The status of the DB instance.`, - }, + //names.AttrStatus: schema.StringAttribute{ + // Computed: true, + // Description: `The status of the DB instance.`, + //}, names.AttrUsername: schema.StringAttribute{ - Optional: true, - Computed: true, - Default: stringdefault.StaticString(DefaultUsernameValue), + Required: true, PlanModifiers: []planmodifier.String{ stringplanmodifier.RequiresReplace(), }, @@ -297,8 +253,8 @@ func (r *resourceDBInstance) Schema(ctx context.Context, req resource.SchemaRequ Manager in your account`, }, names.AttrVPCSecurityGroupIDs: schema.SetAttribute{ - Required: true, - ElementType: types.StringType, + CustomType: fwtypes.SetOfStringType, + Required: true, PlanModifiers: []planmodifier.Set{ setplanmodifier.RequiresReplace(), }, @@ -313,14 +269,13 @@ func (r *resourceDBInstance) Schema(ctx context.Context, req resource.SchemaRequ Description: `A list of VPC security group IDs to associate with the DB instance.`, }, "vpc_subnet_ids": schema.SetAttribute{ - Required: true, - ElementType: types.StringType, + CustomType: fwtypes.SetOfStringType, + Required: true, PlanModifiers: []planmodifier.Set{ setplanmodifier.RequiresReplace(), }, Validators: []validator.Set{ - setvalidator.SizeAtLeast(1), - setvalidator.SizeAtMost(3), + setvalidator.SizeBetween(1, 3), setvalidator.ValueStringsAre( stringvalidator.LengthAtMost(64), stringvalidator.RegexMatches(regexache.MustCompile("^subnet-[a-z0-9]+$"), ""), @@ -332,26 +287,32 @@ func (r *resourceDBInstance) Schema(ctx context.Context, req resource.SchemaRequ }, Blocks: map[string]schema.Block{ "log_delivery_configuration": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[logDeliveryConfigurationData](ctx), Validators: []validator.List{ listvalidator.SizeAtMost(1), }, Description: `Configuration for sending InfluxDB engine logs to a specified S3 bucket.`, NestedObject: schema.NestedBlockObject{ Blocks: map[string]schema.Block{ - "s3_configuration": schema.SingleNestedBlock{ - Attributes: map[string]schema.Attribute{ - names.AttrBucketName: schema.StringAttribute{ - Required: true, - Validators: []validator.String{ - stringvalidator.LengthAtLeast(3), - stringvalidator.LengthAtMost(63), - stringvalidator.RegexMatches(regexache.MustCompile("^[0-9a-z]+[0-9a-z\\.\\-]*[0-9a-z]+$"), ""), + "s3_configuration": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[s3ConfigurationData](ctx), + Validators: []validator.List{ + listvalidator.SizeAtMost(1), + }, + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + names.AttrBucketName: schema.StringAttribute{ + Required: true, + Validators: []validator.String{ + stringvalidator.LengthBetween(3, 63), + stringvalidator.RegexMatches(regexache.MustCompile("^[0-9a-z]+[0-9a-z\\.\\-]*[0-9a-z]+$"), ""), + }, + Description: `The name of the S3 bucket to deliver logs to.`, + }, + names.AttrEnabled: schema.BoolAttribute{ + Required: true, + Description: `Indicates whether log delivery to the S3 bucket is enabled.`, }, - Description: `The name of the S3 bucket to deliver logs to.`, - }, - names.AttrEnabled: schema.BoolAttribute{ - Required: true, - Description: `Indicates whether log delivery to the S3 bucket is enabled.`, }, }, Description: `Configuration for S3 bucket log delivery.`, @@ -368,7 +329,7 @@ func (r *resourceDBInstance) Schema(ctx context.Context, req resource.SchemaRequ } } -func statementReplaceIf(ctx context.Context, req planmodifier.StringRequest, resp *stringplanmodifier.RequiresReplaceIfFuncResponse) { +func dbParameterGroupIdentifierReplaceIf(ctx context.Context, req planmodifier.StringRequest, resp *stringplanmodifier.RequiresReplaceIfFuncResponse) { if req.State.Raw.IsNull() || req.Plan.Raw.IsNull() { return } @@ -379,7 +340,7 @@ func statementReplaceIf(ctx context.Context, req planmodifier.StringRequest, res return } - dbParameterGroupIdentifierRemoved := (!state.DBParameterGroupIdentifier.IsNull() && plan.DBParameterGroupIdentifier.IsNull()) + dbParameterGroupIdentifierRemoved := !state.DBParameterGroupIdentifier.IsNull() && plan.DBParameterGroupIdentifier.IsNull() resp.RequiresReplace = dbParameterGroupIdentifierRemoved } @@ -393,46 +354,15 @@ func (r *resourceDBInstance) Create(ctx context.Context, req resource.CreateRequ return } - in := ×treaminfluxdb.CreateDbInstanceInput{ - AllocatedStorage: aws.Int32(int32(plan.AllocatedStorage.ValueInt64())), - DbInstanceType: awstypes.DbInstanceType(plan.DBInstanceType.ValueString()), - Name: aws.String(plan.Name.ValueString()), - Password: aws.String(plan.Password.ValueString()), - VpcSecurityGroupIds: flex.ExpandFrameworkStringValueSet(ctx, plan.VPCSecurityGroupIDs), - VpcSubnetIds: flex.ExpandFrameworkStringValueSet(ctx, plan.VPCSubnetIDs), - Tags: getTagsIn(ctx), - } - if !plan.Bucket.IsNull() { - in.Bucket = aws.String(plan.Bucket.ValueString()) - } - if !plan.DBParameterGroupIdentifier.IsNull() { - in.DbParameterGroupIdentifier = aws.String(plan.DBParameterGroupIdentifier.ValueString()) - } - if !plan.DBStorageType.IsNull() { - in.DbStorageType = awstypes.DbStorageType(plan.DBStorageType.ValueString()) - } - if !plan.DeploymentType.IsNull() { - in.DeploymentType = awstypes.DeploymentType(plan.DeploymentType.ValueString()) - } - if !plan.LogDeliveryConfiguration.IsNull() { - var tfList []logDeliveryConfigurationData - resp.Diagnostics.Append(plan.LogDeliveryConfiguration.ElementsAs(ctx, &tfList, false)...) - if resp.Diagnostics.HasError() { - return - } - in.LogDeliveryConfiguration = expandLogDeliveryConfiguration(tfList) - } - if !plan.Organization.IsNull() { - in.Organization = aws.String(plan.Organization.ValueString()) - } - if !plan.PubliclyAccessible.IsNull() { - in.PubliclyAccessible = aws.Bool(plan.PubliclyAccessible.ValueBool()) - } - if !plan.Username.IsNull() { - in.Username = aws.String(plan.Username.ValueString()) + in := timestreaminfluxdb.CreateDbInstanceInput{} + + resp.Diagnostics.Append(flex.Expand(ctx, plan, &in)...) + + if resp.Diagnostics.HasError() { + return } - out, err := conn.CreateDbInstance(ctx, in) + out, err := conn.CreateDbInstance(ctx, &in) if err != nil { resp.Diagnostics.AddError( create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionCreating, ResNameDBInstance, plan.Name.String(), err), @@ -440,6 +370,7 @@ func (r *resourceDBInstance) Create(ctx context.Context, req resource.CreateRequ ) return } + if out == nil || out.Id == nil { resp.Diagnostics.AddError( create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionCreating, ResNameDBInstance, plan.Name.String(), nil), @@ -448,13 +379,12 @@ func (r *resourceDBInstance) Create(ctx context.Context, req resource.CreateRequ return } - // Computed attributes - plan.ARN = flex.StringToFramework(ctx, out.Arn) - plan.ID = flex.StringToFramework(ctx, out.Id) - plan.AvailabilityZone = flex.StringToFramework(ctx, out.AvailabilityZone) + state := plan + state.ID = flex.StringToFramework(ctx, out.Id) createTimeout := r.CreateTimeout(ctx, plan.Timeouts) - _, err = waitDBInstanceCreated(ctx, conn, plan.ID.ValueString(), createTimeout) + output, err := waitDBInstanceCreated(ctx, conn, state.ID.ValueString(), createTimeout) + if err != nil { resp.Diagnostics.AddError( create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionWaitingForCreation, ResNameDBInstance, plan.Name.String(), err), @@ -463,26 +393,13 @@ func (r *resourceDBInstance) Create(ctx context.Context, req resource.CreateRequ return } - readOut, err := findDBInstanceByID(ctx, conn, plan.ID.ValueString()) - if tfresource.NotFound(err) { - resp.State.RemoveResource(ctx) - return - } - if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionSetting, ResNameDBInstance, plan.ID.String(), err), - err.Error(), - ) + resp.Diagnostics.Append(flex.Flatten(ctx, output, &state)...) + + if resp.Diagnostics.HasError() { return } - // Computed attributes only set after resource is finished creating - plan.Endpoint = flex.StringToFramework(ctx, readOut.Endpoint) - plan.InfluxAuthParametersSecretARN = flex.StringToFramework(ctx, readOut.InfluxAuthParametersSecretArn) - plan.Status = flex.StringToFramework(ctx, (*string)(&readOut.Status)) - plan.SecondaryAvailabilityZone = flex.StringToFramework(ctx, readOut.SecondaryAvailabilityZone) - - resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) } func (r *resourceDBInstance) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { @@ -495,10 +412,12 @@ func (r *resourceDBInstance) Read(ctx context.Context, req resource.ReadRequest, } out, err := findDBInstanceByID(ctx, conn, state.ID.ValueString()) + if tfresource.NotFound(err) { resp.State.RemoveResource(ctx) return } + if err != nil { resp.Diagnostics.AddError( create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionSetting, ResNameDBInstance, state.ID.String(), err), @@ -731,9 +650,9 @@ func (r *resourceDBInstance) ModifyPlan(ctx context.Context, request resource.Mo r.SetTagsAll(ctx, request, response) } -func waitDBInstanceCreated(ctx context.Context, conn *timestreaminfluxdb.Client, id string, timeout time.Duration) (*timestreaminfluxdb.CreateDbInstanceOutput, error) { +func waitDBInstanceCreated(ctx context.Context, conn *timestreaminfluxdb.Client, id string, timeout time.Duration) (*timestreaminfluxdb.GetDbInstanceOutput, error) { stateConf := &retry.StateChangeConf{ - Pending: enum.Slice(string(awstypes.StatusCreating), string(awstypes.StatusUpdating), string(awstypes.StatusModifying)), + Pending: enum.Slice(awstypes.StatusCreating, awstypes.StatusUpdating, awstypes.StatusModifying), Target: enum.Slice(awstypes.StatusAvailable), Refresh: statusDBInstance(ctx, conn, id), Timeout: timeout, @@ -742,7 +661,7 @@ func waitDBInstanceCreated(ctx context.Context, conn *timestreaminfluxdb.Client, } outputRaw, err := stateConf.WaitForStateContext(ctx) - if out, ok := outputRaw.(*timestreaminfluxdb.CreateDbInstanceOutput); ok { + if out, ok := outputRaw.(*timestreaminfluxdb.GetDbInstanceOutput); ok { return out, err } @@ -805,14 +724,15 @@ func findDBInstanceByID(ctx context.Context, conn *timestreaminfluxdb.Client, id } out, err := conn.GetDbInstance(ctx, in) - if err != nil { - if errs.IsA[*awstypes.ResourceNotFoundException](err) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: in, - } + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: in, } + } + if err != nil { return nil, err } @@ -883,34 +803,34 @@ func expandS3Configuration(tfObj s3ConfigurationData) *awstypes.S3Configuration } type resourceDBInstanceData struct { - AllocatedStorage types.Int64 `tfsdk:"allocated_storage"` - ARN types.String `tfsdk:"arn"` - AvailabilityZone types.String `tfsdk:"availability_zone"` - Bucket types.String `tfsdk:"bucket"` - DBInstanceType types.String `tfsdk:"db_instance_type"` - DBParameterGroupIdentifier types.String `tfsdk:"db_parameter_group_identifier"` - DBStorageType types.String `tfsdk:"db_storage_type"` - DeploymentType types.String `tfsdk:"deployment_type"` - Endpoint types.String `tfsdk:"endpoint"` - ID types.String `tfsdk:"id"` - InfluxAuthParametersSecretARN types.String `tfsdk:"influx_auth_parameters_secret_arn"` - LogDeliveryConfiguration types.List `tfsdk:"log_delivery_configuration"` - Name types.String `tfsdk:"name"` - Organization types.String `tfsdk:"organization"` - Password types.String `tfsdk:"password"` - PubliclyAccessible types.Bool `tfsdk:"publicly_accessible"` - SecondaryAvailabilityZone types.String `tfsdk:"secondary_availability_zone"` - Status types.String `tfsdk:"status"` - Tags types.Map `tfsdk:"tags"` - TagsAll types.Map `tfsdk:"tags_all"` - Timeouts timeouts.Value `tfsdk:"timeouts"` - Username types.String `tfsdk:"username"` - VPCSecurityGroupIDs types.Set `tfsdk:"vpc_security_group_ids"` - VPCSubnetIDs types.Set `tfsdk:"vpc_subnet_ids"` + AllocatedStorage types.Int64 `tfsdk:"allocated_storage"` + ARN types.String `tfsdk:"arn"` + AvailabilityZone types.String `tfsdk:"availability_zone"` + Bucket types.String `tfsdk:"bucket"` + DBInstanceType fwtypes.StringEnum[awstypes.DbInstanceType] `tfsdk:"db_instance_type"` + DBParameterGroupIdentifier types.String `tfsdk:"db_parameter_group_identifier"` + DBStorageType fwtypes.StringEnum[awstypes.DbStorageType] `tfsdk:"db_storage_type"` + DeploymentType fwtypes.StringEnum[awstypes.DeploymentType] `tfsdk:"deployment_type"` + Endpoint types.String `tfsdk:"endpoint"` + ID types.String `tfsdk:"id"` + InfluxAuthParametersSecretARN types.String `tfsdk:"influx_auth_parameters_secret_arn"` + LogDeliveryConfiguration fwtypes.ListNestedObjectValueOf[logDeliveryConfigurationData] `tfsdk:"log_delivery_configuration"` + Name types.String `tfsdk:"name"` + Organization types.String `tfsdk:"organization"` + Password types.String `tfsdk:"password"` + PubliclyAccessible types.Bool `tfsdk:"publicly_accessible"` + SecondaryAvailabilityZone types.String `tfsdk:"secondary_availability_zone"` + Status types.String `tfsdk:"status"` + Tags types.Map `tfsdk:"tags"` + TagsAll types.Map `tfsdk:"tags_all"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + Username types.String `tfsdk:"username"` + VPCSecurityGroupIDs fwtypes.SetValueOf[types.String] `tfsdk:"vpc_security_group_ids"` + VPCSubnetIDs fwtypes.SetValueOf[types.String] `tfsdk:"vpc_subnet_ids"` } type logDeliveryConfigurationData struct { - S3Configuration s3ConfigurationData `tfsdk:"s3_configuration"` + S3Configuration fwtypes.ListNestedObjectValueOf[s3ConfigurationData] `tfsdk:"s3_configuration"` } type s3ConfigurationData struct { From 20d198cc204148440af475da8375fcee27937799 Mon Sep 17 00:00:00 2001 From: Adrian Johnson Date: Mon, 29 Jul 2024 13:14:19 -0500 Subject: [PATCH 08/21] aws_timestreaminfluxdb_db_instance: use autoflex in read --- .../service/timestreaminfluxdb/db_instance.go | 112 ++---------------- 1 file changed, 11 insertions(+), 101 deletions(-) diff --git a/internal/service/timestreaminfluxdb/db_instance.go b/internal/service/timestreaminfluxdb/db_instance.go index d655e4fd3bb..02af2566fe7 100644 --- a/internal/service/timestreaminfluxdb/db_instance.go +++ b/internal/service/timestreaminfluxdb/db_instance.go @@ -5,13 +5,11 @@ package timestreaminfluxdb import ( "context" - "encoding/json" "errors" "time" "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/secretsmanager" "github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb" awstypes "github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb/types" "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" @@ -21,7 +19,6 @@ import ( "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" "github.com/hashicorp/terraform-plugin-framework/attr" "github.com/hashicorp/terraform-plugin-framework/diag" - "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/resource/schema" "github.com/hashicorp/terraform-plugin-framework/resource/schema/boolplanmodifier" @@ -68,6 +65,7 @@ const ( type resourceDBInstance struct { framework.ResourceWithConfigure framework.WithTimeouts + framework.WithImportByID } func (r *resourceDBInstance) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { @@ -417,43 +415,7 @@ func (r *resourceDBInstance) Read(ctx context.Context, req resource.ReadRequest, resp.State.RemoveResource(ctx) return } - - if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionSetting, ResNameDBInstance, state.ID.String(), err), - err.Error(), - ) - return - } - state.ARN = flex.StringToFramework(ctx, out.Arn) - state.AllocatedStorage = flex.Int32ToFramework(ctx, out.AllocatedStorage) - state.AvailabilityZone = flex.StringToFramework(ctx, out.AvailabilityZone) - state.DBInstanceType = flex.StringToFramework(ctx, (*string)(&out.DbInstanceType)) - state.DBParameterGroupIdentifier = flex.StringToFramework(ctx, out.DbParameterGroupIdentifier) - state.DBStorageType = flex.StringToFramework(ctx, (*string)(&out.DbStorageType)) - state.DeploymentType = flex.StringToFramework(ctx, (*string)(&out.DeploymentType)) - state.Endpoint = flex.StringToFramework(ctx, out.Endpoint) - state.ID = flex.StringToFramework(ctx, out.Id) - state.InfluxAuthParametersSecretARN = flex.StringToFramework(ctx, out.InfluxAuthParametersSecretArn) - logDeliveryConfiguration, d := flattenLogDeliveryConfiguration(ctx, out.LogDeliveryConfiguration) - resp.Diagnostics.Append(d...) - state.LogDeliveryConfiguration = logDeliveryConfiguration - state.Name = flex.StringToFramework(ctx, out.Name) - state.PubliclyAccessible = flex.BoolToFramework(ctx, out.PubliclyAccessible) - state.SecondaryAvailabilityZone = flex.StringToFramework(ctx, out.SecondaryAvailabilityZone) - state.Status = flex.StringToFramework(ctx, (*string)(&out.Status)) - state.VPCSecurityGroupIDs = flex.FlattenFrameworkStringValueSet[string](ctx, out.VpcSecurityGroupIds) - state.VPCSubnetIDs = flex.FlattenFrameworkStringValueSet[string](ctx, out.VpcSubnetIds) - - // timestreaminfluxdb.GetDbInstance will not return InfluxDB managed attributes, like username, - // bucket, organization, or password. All of these attributes are stored in a secret indicated by - // out.InfluxAuthParametersSecretArn. To support importing, these attributes must be read from the - // secret. - secretsConn := r.Meta().SecretsManagerClient(ctx) - secretsOut, err := secretsConn.GetSecretValue(ctx, &secretsmanager.GetSecretValueInput{ - SecretId: out.InfluxAuthParametersSecretArn, - }) if err != nil { resp.Diagnostics.AddError( create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionSetting, ResNameDBInstance, state.ID.String(), err), @@ -462,60 +424,11 @@ func (r *resourceDBInstance) Read(ctx context.Context, req resource.ReadRequest, return } - secrets := make(map[string]string) - if err := json.Unmarshal([]byte(aws.ToString(secretsOut.SecretString)), &secrets); err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionSetting, ResNameDBInstance, state.ID.String(), err), - err.Error(), - ) - return - } - if username, ok := secrets[names.AttrUsername]; ok { - state.Username = flex.StringValueToFramework[string](ctx, username) - } else { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionSetting, ResNameDBInstance, state.ID.String(), err), - err.Error(), - ) - return - } - if password, ok := secrets[names.AttrPassword]; ok { - state.Password = flex.StringValueToFramework[string](ctx, password) - } else { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionSetting, ResNameDBInstance, state.ID.String(), err), - err.Error(), - ) - return - } - if organization, ok := secrets["organization"]; ok { - state.Organization = flex.StringValueToFramework[string](ctx, organization) - } else { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionSetting, ResNameDBInstance, state.ID.String(), err), - err.Error(), - ) - return - } - if bucket, ok := secrets[names.AttrBucket]; ok { - state.Bucket = flex.StringValueToFramework[string](ctx, bucket) - } else { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionSetting, ResNameDBInstance, state.ID.String(), err), - err.Error(), - ) - return - } + resp.Diagnostics.Append(flex.Flatten(ctx, out, &state)...) - tags, err := listTags(ctx, conn, state.ARN.ValueString()) - if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionSetting, ResNameDBInstance, state.ID.String(), err), - err.Error(), - ) + if resp.Diagnostics.HasError() { return } - setTagsOut(ctx, Tags(tags)) resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) } @@ -643,9 +556,6 @@ func (r *resourceDBInstance) Delete(ctx context.Context, req resource.DeleteRequ } } -func (r *resourceDBInstance) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { - resource.ImportStatePassthroughID(ctx, path.Root(names.AttrID), req, resp) -} func (r *resourceDBInstance) ModifyPlan(ctx context.Context, request resource.ModifyPlanRequest, response *resource.ModifyPlanResponse) { r.SetTagsAll(ctx, request, response) } @@ -668,10 +578,10 @@ func waitDBInstanceCreated(ctx context.Context, conn *timestreaminfluxdb.Client, return nil, err } -func waitDBInstanceUpdated(ctx context.Context, conn *timestreaminfluxdb.Client, id string, timeout time.Duration) (*timestreaminfluxdb.UpdateDbInstanceOutput, error) { +func waitDBInstanceUpdated(ctx context.Context, conn *timestreaminfluxdb.Client, id string, timeout time.Duration) (*timestreaminfluxdb.GetDbInstanceOutput, error) { stateConf := &retry.StateChangeConf{ - Pending: enum.Slice(string(awstypes.StatusModifying), string(awstypes.StatusUpdating)), - Target: enum.Slice(string(awstypes.StatusAvailable)), + Pending: enum.Slice(awstypes.StatusModifying, awstypes.StatusUpdating), + Target: enum.Slice(awstypes.StatusAvailable), Refresh: statusDBInstance(ctx, conn, id), Timeout: timeout, NotFoundChecks: 20, @@ -679,17 +589,17 @@ func waitDBInstanceUpdated(ctx context.Context, conn *timestreaminfluxdb.Client, } outputRaw, err := stateConf.WaitForStateContext(ctx) - if out, ok := outputRaw.(*timestreaminfluxdb.UpdateDbInstanceOutput); ok { + if out, ok := outputRaw.(*timestreaminfluxdb.GetDbInstanceOutput); ok { return out, err } return nil, err } -func waitDBInstanceDeleted(ctx context.Context, conn *timestreaminfluxdb.Client, id string, timeout time.Duration) (*timestreaminfluxdb.DeleteDbInstanceOutput, error) { +func waitDBInstanceDeleted(ctx context.Context, conn *timestreaminfluxdb.Client, id string, timeout time.Duration) (*timestreaminfluxdb.GetDbInstanceOutput, error) { stateConf := &retry.StateChangeConf{ - Pending: enum.Slice(string(awstypes.StatusDeleting), string(awstypes.StatusModifying), string(awstypes.StatusUpdating), string(awstypes.StatusAvailable)), - Target: enum.Slice[string](), + Pending: enum.Slice(awstypes.StatusDeleting, awstypes.StatusModifying, awstypes.StatusUpdating, awstypes.StatusAvailable), + Target: []string{}, Refresh: statusDBInstance(ctx, conn, id), Timeout: timeout, Delay: 30 * time.Second, @@ -697,7 +607,7 @@ func waitDBInstanceDeleted(ctx context.Context, conn *timestreaminfluxdb.Client, } outputRaw, err := stateConf.WaitForStateContext(ctx) - if out, ok := outputRaw.(*timestreaminfluxdb.DeleteDbInstanceOutput); ok { + if out, ok := outputRaw.(*timestreaminfluxdb.GetDbInstanceOutput); ok { return out, err } From 3ea72c088ee183d6d62c8ae6173a14972e34b036 Mon Sep 17 00:00:00 2001 From: Adrian Johnson Date: Mon, 29 Jul 2024 13:26:48 -0500 Subject: [PATCH 09/21] aws_timestreaminfluxdb_db_instance: use autoflex in update --- .../service/timestreaminfluxdb/db_instance.go | 140 +++--------------- 1 file changed, 22 insertions(+), 118 deletions(-) diff --git a/internal/service/timestreaminfluxdb/db_instance.go b/internal/service/timestreaminfluxdb/db_instance.go index 02af2566fe7..c2346a395e1 100644 --- a/internal/service/timestreaminfluxdb/db_instance.go +++ b/internal/service/timestreaminfluxdb/db_instance.go @@ -17,8 +17,6 @@ import ( "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" "github.com/hashicorp/terraform-plugin-framework-validators/setvalidator" "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" - "github.com/hashicorp/terraform-plugin-framework/attr" - "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/resource/schema" "github.com/hashicorp/terraform-plugin-framework/resource/schema/boolplanmodifier" @@ -32,6 +30,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/create" "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" "github.com/hashicorp/terraform-provider-aws/internal/framework" "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" @@ -412,6 +411,7 @@ func (r *resourceDBInstance) Read(ctx context.Context, req resource.ReadRequest, out, err := findDBInstanceByID(ctx, conn, state.ID.ValueString()) if tfresource.NotFound(err) { + resp.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) resp.State.RemoveResource(ctx) return } @@ -443,30 +443,19 @@ func (r *resourceDBInstance) Update(ctx context.Context, req resource.UpdateRequ return } - // Only fields without RequireReplace() will cause an update. - // Any other field changes will cause the resource to be destroyed and recreated. - // for aws_timestreaminfluxdb_db_instance this is tags, log_delivery_configuration, and - // db_parameter_group_identifier. if !plan.DBParameterGroupIdentifier.Equal(state.DBParameterGroupIdentifier) || !plan.LogDeliveryConfiguration.Equal(state.LogDeliveryConfiguration) { - in := ×treaminfluxdb.UpdateDbInstanceInput{ + in := timestreaminfluxdb.UpdateDbInstanceInput{ Identifier: aws.String(plan.ID.ValueString()), } - if !plan.DBParameterGroupIdentifier.IsNull() && !plan.DBParameterGroupIdentifier.Equal(state.DBParameterGroupIdentifier) { - in.DbParameterGroupIdentifier = aws.String(plan.DBParameterGroupIdentifier.ValueString()) - } + resp.Diagnostics.Append(flex.Expand(ctx, plan, &in)...) - if !plan.LogDeliveryConfiguration.IsNull() && !plan.LogDeliveryConfiguration.Equal(state.LogDeliveryConfiguration) { - var tfList []logDeliveryConfigurationData - resp.Diagnostics.Append(plan.LogDeliveryConfiguration.ElementsAs(ctx, &tfList, false)...) - if resp.Diagnostics.HasError() { - return - } - in.LogDeliveryConfiguration = expandLogDeliveryConfiguration(tfList) + if resp.Diagnostics.HasError() { + return } - out, err := conn.UpdateDbInstance(ctx, in) + out, err := conn.UpdateDbInstance(ctx, &in) if err != nil { resp.Diagnostics.AddError( create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionUpdating, ResNameDBInstance, plan.ID.String(), err), @@ -474,6 +463,7 @@ func (r *resourceDBInstance) Update(ctx context.Context, req resource.UpdateRequ ) return } + if out == nil || out.Id == nil { resp.Diagnostics.AddError( create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionUpdating, ResNameDBInstance, plan.ID.String(), nil), @@ -481,41 +471,23 @@ func (r *resourceDBInstance) Update(ctx context.Context, req resource.UpdateRequ ) return } - } - updateTimeout := r.UpdateTimeout(ctx, plan.Timeouts) - _, err := waitDBInstanceUpdated(ctx, conn, plan.ID.ValueString(), updateTimeout) - if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionWaitingForUpdate, ResNameDBInstance, plan.ID.String(), err), - err.Error(), - ) - return - } + updateTimeout := r.UpdateTimeout(ctx, plan.Timeouts) + output, err := waitDBInstanceUpdated(ctx, conn, plan.ID.ValueString(), updateTimeout) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionWaitingForUpdate, ResNameDBInstance, plan.ID.String(), err), + err.Error(), + ) + return + } - // Update status to current status - readOut, err := findDBInstanceByID(ctx, conn, plan.ID.ValueString()) - if tfresource.NotFound(err) { - resp.State.RemoveResource(ctx) - return - } - if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionSetting, ResNameDBInstance, plan.ID.String(), err), - err.Error(), - ) - return + resp.Diagnostics.Append(flex.Flatten(ctx, output, &plan)...) + + if resp.Diagnostics.HasError() { + return + } } - // Setting computed attributes - plan.ARN = flex.StringToFramework(ctx, readOut.Arn) - plan.AvailabilityZone = flex.StringToFramework(ctx, readOut.AvailabilityZone) - plan.DBStorageType = flex.StringToFramework(ctx, (*string)(&readOut.DbStorageType)) - plan.DeploymentType = flex.StringToFramework(ctx, (*string)(&readOut.DeploymentType)) - plan.Endpoint = flex.StringToFramework(ctx, readOut.Endpoint) - plan.ID = flex.StringToFramework(ctx, readOut.Id) - plan.InfluxAuthParametersSecretARN = flex.StringToFramework(ctx, readOut.InfluxAuthParametersSecretArn) - plan.SecondaryAvailabilityZone = flex.StringToFramework(ctx, readOut.SecondaryAvailabilityZone) - plan.Status = flex.StringToFramework(ctx, (*string)(&readOut.Status)) resp.Diagnostics.Append(resp.State.Set(ctx, &plan)...) } @@ -653,65 +625,6 @@ func findDBInstanceByID(ctx context.Context, conn *timestreaminfluxdb.Client, id return out, nil } -func flattenLogDeliveryConfiguration(ctx context.Context, apiObject *awstypes.LogDeliveryConfiguration) (types.List, diag.Diagnostics) { - var diags diag.Diagnostics - elemType := types.ObjectType{AttrTypes: logDeliveryConfigrationAttrTypes} - - if apiObject == nil { - return types.ListNull(elemType), diags - } - s3Configuration, d := flattenS3Configuration(ctx, apiObject.S3Configuration) - diags.Append(d...) - obj := map[string]attr.Value{ - "s3_configuration": s3Configuration, - } - objVal, d := types.ObjectValue(logDeliveryConfigrationAttrTypes, obj) - diags.Append(d...) - - listVal, d := types.ListValue(elemType, []attr.Value{objVal}) - diags.Append(d...) - - return listVal, diags -} - -func flattenS3Configuration(ctx context.Context, apiObject *awstypes.S3Configuration) (types.Object, diag.Diagnostics) { - var diags diag.Diagnostics - elemType := types.ObjectType{AttrTypes: s3ConfigurationAttrTypes} - - if apiObject == nil { - return types.ObjectNull(elemType.AttrTypes), diags - } - - obj := map[string]attr.Value{ - names.AttrBucketName: flex.StringValueToFramework(ctx, *apiObject.BucketName), - names.AttrEnabled: flex.BoolToFramework(ctx, apiObject.Enabled), - } - objVal, d := types.ObjectValue(s3ConfigurationAttrTypes, obj) - diags.Append(d...) - - return objVal, diags -} - -func expandLogDeliveryConfiguration(tfList []logDeliveryConfigurationData) *awstypes.LogDeliveryConfiguration { - if len(tfList) == 0 { - return nil - } - - tfObj := tfList[0] - apiObject := &awstypes.LogDeliveryConfiguration{ - S3Configuration: expandS3Configuration(tfObj.S3Configuration), - } - return apiObject -} - -func expandS3Configuration(tfObj s3ConfigurationData) *awstypes.S3Configuration { - apiObject := &awstypes.S3Configuration{ - BucketName: aws.String(tfObj.BucketName.ValueString()), - Enabled: aws.Bool(tfObj.Enabled.ValueBool()), - } - return apiObject -} - type resourceDBInstanceData struct { AllocatedStorage types.Int64 `tfsdk:"allocated_storage"` ARN types.String `tfsdk:"arn"` @@ -747,12 +660,3 @@ type s3ConfigurationData struct { BucketName types.String `tfsdk:"bucket_name"` Enabled types.Bool `tfsdk:"enabled"` } - -var logDeliveryConfigrationAttrTypes = map[string]attr.Type{ - "s3_configuration": types.ObjectType{AttrTypes: s3ConfigurationAttrTypes}, -} - -var s3ConfigurationAttrTypes = map[string]attr.Type{ - names.AttrBucketName: types.StringType, - names.AttrEnabled: types.BoolType, -} From b089374593b3d0c7ab3d2187e8d7eb005a68909a Mon Sep 17 00:00:00 2001 From: Adrian Johnson Date: Mon, 29 Jul 2024 14:37:55 -0500 Subject: [PATCH 10/21] aws_timestreaminfluxdb_db_instance: cleanup --- .../service/timestreaminfluxdb/db_instance.go | 27 +- .../timestreaminfluxdb/db_instance_test.go | 263 +++--------------- 2 files changed, 54 insertions(+), 236 deletions(-) diff --git a/internal/service/timestreaminfluxdb/db_instance.go b/internal/service/timestreaminfluxdb/db_instance.go index c2346a395e1..3fb3b4a8ebf 100644 --- a/internal/service/timestreaminfluxdb/db_instance.go +++ b/internal/service/timestreaminfluxdb/db_instance.go @@ -52,13 +52,7 @@ func newResourceDBInstance(_ context.Context) (resource.ResourceWithConfigure, e } const ( - // If not provided, CreateDbInstance will use the below default values - // for bucket and organization. These values need to be set in Terraform - // because GetDbInstance won't return them. - DefaultBucketValue = names.AttrBucket - DefaultOrganizationValue = "organization" - DefaultUsernameValue = "admin" - ResNameDBInstance = "DB Instance" + ResNameDBInstance = "DB Instance" ) type resourceDBInstance struct { @@ -80,8 +74,7 @@ func (r *resourceDBInstance) Schema(ctx context.Context, req resource.SchemaRequ int64planmodifier.RequiresReplace(), }, Validators: []validator.Int64{ - int64validator.AtLeast(20), - int64validator.AtMost(16384), + int64validator.Between(20, 16384), }, Description: `The amount of storage to allocate for your DB storage type in GiB (gibibytes).`, }, @@ -89,9 +82,11 @@ func (r *resourceDBInstance) Schema(ctx context.Context, req resource.SchemaRequ names.AttrAvailabilityZone: schema.StringAttribute{ Computed: true, Description: `The Availability Zone in which the DB instance resides.`, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, }, names.AttrBucket: schema.StringAttribute{ - Optional: true, Required: true, PlanModifiers: []planmodifier.String{ stringplanmodifier.RequiresReplace(), @@ -162,6 +157,9 @@ func (r *resourceDBInstance) Schema(ctx context.Context, req resource.SchemaRequ names.AttrID: framework.IDAttribute(), "influx_auth_parameters_secret_arn": schema.StringAttribute{ Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, Description: `The Amazon Resource Name (ARN) of the AWS Secrets Manager secret containing the initial InfluxDB authorization parameters. The secret value is a JSON formatted key-value pair holding InfluxDB authorization values: organization, bucket, @@ -217,18 +215,18 @@ func (r *resourceDBInstance) Schema(ctx context.Context, req resource.SchemaRequ Computed: true, PlanModifiers: []planmodifier.Bool{ boolplanmodifier.RequiresReplace(), + boolplanmodifier.UseStateForUnknown(), }, Description: `Configures the DB instance with a public IP to facilitate access.`, }, "secondary_availability_zone": schema.StringAttribute{ Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, Description: `The Availability Zone in which the standby instance is located when deploying with a MultiAZ standby instance.`, }, - //names.AttrStatus: schema.StringAttribute{ - // Computed: true, - // Description: `The status of the DB instance.`, - //}, names.AttrUsername: schema.StringAttribute{ Required: true, PlanModifiers: []planmodifier.String{ @@ -643,7 +641,6 @@ type resourceDBInstanceData struct { Password types.String `tfsdk:"password"` PubliclyAccessible types.Bool `tfsdk:"publicly_accessible"` SecondaryAvailabilityZone types.String `tfsdk:"secondary_availability_zone"` - Status types.String `tfsdk:"status"` Tags types.Map `tfsdk:"tags"` TagsAll types.Map `tfsdk:"tags_all"` Timeouts timeouts.Value `tfsdk:"timeouts"` diff --git a/internal/service/timestreaminfluxdb/db_instance_test.go b/internal/service/timestreaminfluxdb/db_instance_test.go index 923858eeedc..7b3905a07b4 100644 --- a/internal/service/timestreaminfluxdb/db_instance_test.go +++ b/internal/service/timestreaminfluxdb/db_instance_test.go @@ -51,20 +51,17 @@ func TestAccTimestreamInfluxDBDBInstance_basic(t *testing.T) { // DB instance will not be publicly accessible and will not have an endpoint. acctest.MatchResourceAttrRegionalARN(resourceName, names.AttrARN, "timestream-influxdb", regexache.MustCompile(`db-instance/+.`)), resource.TestCheckResourceAttrSet(resourceName, names.AttrAvailabilityZone), - resource.TestCheckResourceAttr(resourceName, names.AttrBucket, tftimestreaminfluxdb.DefaultBucketValue), resource.TestCheckResourceAttr(resourceName, "db_storage_type", string(awstypes.DbStorageTypeInfluxIoIncludedT1)), resource.TestCheckResourceAttr(resourceName, "deployment_type", string(awstypes.DeploymentTypeSingleAz)), resource.TestCheckResourceAttrSet(resourceName, "influx_auth_parameters_secret_arn"), - resource.TestCheckResourceAttr(resourceName, "organization", tftimestreaminfluxdb.DefaultOrganizationValue), resource.TestCheckResourceAttr(resourceName, names.AttrPubliclyAccessible, acctest.CtFalse), - resource.TestCheckResourceAttr(resourceName, names.AttrStatus, string(awstypes.StatusAvailable)), - resource.TestCheckResourceAttr(resourceName, names.AttrUsername, tftimestreaminfluxdb.DefaultUsernameValue), ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"username", "password", "organization"}, }, }, }) @@ -205,7 +202,7 @@ func TestAccTimestreamInfluxDBDBInstance_deploymentTypeMultiAzStandby(t *testing CheckDestroy: testAccCheckDBInstanceDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccDBInstanceConfig_deploymentTypeMultiAzStandby(rName, acctest.Region()), + Config: testAccDBInstanceConfig_deploymentTypeMultiAzStandby(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance), // DB instance will not be publicly accessible and will not have an endpoint. @@ -223,114 +220,6 @@ func TestAccTimestreamInfluxDBDBInstance_deploymentTypeMultiAzStandby(t *testing }) } -func TestAccTimestreamInfluxDBDBInstance_username(t *testing.T) { - ctx := acctest.Context(t) - if testing.Short() { - t.Skip("skipping long-running test in short mode") - } - - var dbInstance timestreaminfluxdb.GetDbInstanceOutput - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_timestreaminfluxdb_db_instance.test" - testUsername := "testusername" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { - acctest.PreCheck(ctx, t) - testAccPreCheck(ctx, t) - }, - ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckDBInstanceDestroy(ctx), - Steps: []resource.TestStep{ - { - Config: testAccDBInstanceConfig_username(rName, testUsername), - Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance), - resource.TestCheckResourceAttr(resourceName, names.AttrUsername, testUsername), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func TestAccTimestreamInfluxDBDBInstance_bucket(t *testing.T) { - ctx := acctest.Context(t) - if testing.Short() { - t.Skip("skipping long-running test in short mode") - } - - var dbInstance timestreaminfluxdb.GetDbInstanceOutput - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_timestreaminfluxdb_db_instance.test" - testBucketName := "testbucket" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { - acctest.PreCheck(ctx, t) - testAccPreCheck(ctx, t) - }, - ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckDBInstanceDestroy(ctx), - Steps: []resource.TestStep{ - { - Config: testAccDBInstanceConfig_bucket(rName, testBucketName), - Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance), - resource.TestCheckResourceAttr(resourceName, names.AttrBucket, testBucketName), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func TestAccTimestreamInfluxDBDBInstance_organization(t *testing.T) { - ctx := acctest.Context(t) - if testing.Short() { - t.Skip("skipping long-running test in short mode") - } - - var dbInstance timestreaminfluxdb.GetDbInstanceOutput - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_timestreaminfluxdb_db_instance.test" - testOrganizationName := "testorganization" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { - acctest.PreCheck(ctx, t) - testAccPreCheck(ctx, t) - }, - ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckDBInstanceDestroy(ctx), - Steps: []resource.TestStep{ - { - Config: testAccDBInstanceConfig_organization(rName, testOrganizationName), - Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance), - resource.TestCheckResourceAttr(resourceName, "organization", testOrganizationName), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - func TestAccTimestreamInfluxDBDBInstance_tags(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { @@ -589,40 +478,34 @@ func testAccCheckDBInstanceNotRecreated(before, after *timestreaminfluxdb.GetDbI } } -func testAccDBInstanceConfig_base() string { - return ` -resource "aws_vpc" "test_vpc" { - cidr_block = "10.0.0.0/16" -} - -resource "aws_subnet" "test_subnet" { - vpc_id = aws_vpc.test_vpc.id - cidr_block = "10.0.1.0/24" -} - -resource "aws_security_group" "test_security_group" { - vpc_id = aws_vpc.test_vpc.id +func testAccDBInstanceConfig_base(rName string, subnetCount int) string { + return acctest.ConfigCompose(acctest.ConfigVPCWithSubnets(rName, subnetCount), ` +resource "aws_security_group" "test" { + vpc_id = aws_vpc.test.id } -` +`) } // Minimal configuration. func testAccDBInstanceConfig_basic(rName string) string { - return acctest.ConfigCompose(testAccDBInstanceConfig_base(), fmt.Sprintf(` + return acctest.ConfigCompose(testAccDBInstanceConfig_base(rName, 1), fmt.Sprintf(` resource "aws_timestreaminfluxdb_db_instance" "test" { + name = %[1]q allocated_storage = 20 + username = "admin" password = "testpassword" - vpc_subnet_ids = [aws_subnet.test_subnet.id] - vpc_security_group_ids = [aws_security_group.test_security_group.id] + vpc_subnet_ids = [aws_subnet.test.id] + vpc_security_group_ids = [aws_security_group.test.id] db_instance_type = "db.influx.medium" - name = %[1]q + bucket = "initial" + organization = "organization" } `, rName)) } // Configuration with log_delivery_configuration set and enabled. func testAccDBInstanceConfig_logDeliveryConfigurationEnabled(rName string) string { - return acctest.ConfigCompose(testAccDBInstanceConfig_base(), fmt.Sprintf(` + return acctest.ConfigCompose(testAccDBInstanceConfig_base(rName, 1), fmt.Sprintf(` resource "aws_s3_bucket" "test_s3_bucket" { bucket = %[1]q force_destroy = true @@ -648,11 +531,14 @@ resource "aws_s3_bucket_policy" "allow_timestreaminfluxdb" { resource "aws_timestreaminfluxdb_db_instance" "test" { allocated_storage = 20 + username = "admin" password = "testpassword" vpc_subnet_ids = [aws_subnet.test_subnet.id] vpc_security_group_ids = [aws_security_group.test_security_group.id] db_instance_type = "db.influx.medium" publicly_accessible = false + bucket = "initial" + organization = "organization" name = %[1]q log_delivery_configuration { @@ -667,7 +553,7 @@ resource "aws_timestreaminfluxdb_db_instance" "test" { // Configuration with log_delivery_configuration set but not enabled. func testAccDBInstanceConfig_logDeliveryConfigurationNotEnabled(rName string) string { - return acctest.ConfigCompose(testAccDBInstanceConfig_base(), fmt.Sprintf(` + return acctest.ConfigCompose(testAccDBInstanceConfig_base(rName, 1), fmt.Sprintf(` resource "aws_s3_bucket" "test_s3_bucket" { bucket = %[1]q force_destroy = true @@ -693,11 +579,14 @@ resource "aws_s3_bucket_policy" "allow_timestreaminfluxdb" { resource "aws_timestreaminfluxdb_db_instance" "test" { allocated_storage = 20 + username = "admin" password = "testpassword" vpc_subnet_ids = [aws_subnet.test_subnet.id] vpc_security_group_ids = [aws_security_group.test_security_group.id] db_instance_type = "db.influx.medium" publicly_accessible = false + bucket = "initial" + organization = "organization" name = %[1]q log_delivery_configuration { @@ -713,7 +602,7 @@ resource "aws_timestreaminfluxdb_db_instance" "test" { // Configuration that is publicly accessible. An endpoint will be created // for the DB instance but no inbound rules will be defined, preventing access. func testAccDBInstanceConfig_publiclyAccessible(rName string) string { - return acctest.ConfigCompose(testAccDBInstanceConfig_base(), fmt.Sprintf(` + return acctest.ConfigCompose(testAccDBInstanceConfig_base(rName, 1), fmt.Sprintf(` resource "aws_internet_gateway" "test_internet_gateway" { vpc_id = aws_vpc.test_vpc.id } @@ -749,92 +638,24 @@ resource "aws_timestreaminfluxdb_db_instance" "test" { `, rName)) } -func testAccDBInstanceConfig_deploymentTypeMultiAzStandby(rName string, regionName string) string { - return fmt.Sprintf(` -resource "aws_vpc" "test_vpc" { - cidr_block = "10.0.0.0/16" -} - -resource "aws_subnet" "test_subnet_1" { - vpc_id = aws_vpc.test_vpc.id - cidr_block = "10.0.1.0/24" - availability_zone = "%[2]sa" -} - -resource "aws_subnet" "test_subnet_2" { - vpc_id = aws_vpc.test_vpc.id - cidr_block = "10.0.2.0/24" - availability_zone = "%[2]sb" -} - -resource "aws_security_group" "test_security_group" { - vpc_id = aws_vpc.test_vpc.id -} - +func testAccDBInstanceConfig_deploymentTypeMultiAzStandby(rName string) string { + return acctest.ConfigCompose(testAccDBInstanceConfig_base(rName, 2), fmt.Sprintf(` resource "aws_timestreaminfluxdb_db_instance" "test" { allocated_storage = 20 password = "testpassword" db_storage_type = "InfluxIOIncludedT1" - vpc_subnet_ids = [aws_subnet.test_subnet_1.id, aws_subnet.test_subnet_2.id] - vpc_security_group_ids = [aws_security_group.test_security_group.id] + vpc_subnet_ids = aws_subnet.test.*.id + vpc_security_group_ids = [aws_security_group.test.id] db_instance_type = "db.influx.medium" name = %[1]q deployment_type = "WITH_MULTIAZ_STANDBY" } -`, rName, regionName) -} - -func testAccDBInstanceConfig_username(rName, username string) string { - return acctest.ConfigCompose(testAccDBInstanceConfig_base(), fmt.Sprintf(` -resource "aws_timestreaminfluxdb_db_instance" "test" { - allocated_storage = 20 - password = "testpassword" - db_storage_type = "InfluxIOIncludedT1" - vpc_subnet_ids = [aws_subnet.test_subnet.id] - vpc_security_group_ids = [aws_security_group.test_security_group.id] - db_instance_type = "db.influx.medium" - name = %[1]q - - username = %[2]q -} -`, rName, username)) -} - -func testAccDBInstanceConfig_bucket(rName, bucketName string) string { - return acctest.ConfigCompose(testAccDBInstanceConfig_base(), fmt.Sprintf(` -resource "aws_timestreaminfluxdb_db_instance" "test" { - allocated_storage = 20 - password = "testpassword" - db_storage_type = "InfluxIOIncludedT1" - vpc_subnet_ids = [aws_subnet.test_subnet.id] - vpc_security_group_ids = [aws_security_group.test_security_group.id] - db_instance_type = "db.influx.medium" - name = %[1]q - - bucket = %[2]q -} -`, rName, bucketName)) -} - -func testAccDBInstanceConfig_organization(rName, organizationName string) string { - return acctest.ConfigCompose(testAccDBInstanceConfig_base(), fmt.Sprintf(` -resource "aws_timestreaminfluxdb_db_instance" "test" { - allocated_storage = 20 - password = "testpassword" - db_storage_type = "InfluxIOIncludedT1" - vpc_subnet_ids = [aws_subnet.test_subnet.id] - vpc_security_group_ids = [aws_security_group.test_security_group.id] - db_instance_type = "db.influx.medium" - name = %[1]q - - organization = %[2]q -} -`, rName, organizationName)) +`, rName)) } func testAccDBInstanceConfig_tags1(rName, tagKey1, tagValue1 string) string { - return acctest.ConfigCompose(testAccDBInstanceConfig_base(), fmt.Sprintf(` + return acctest.ConfigCompose(testAccDBInstanceConfig_base(rName, 1), fmt.Sprintf(` resource "aws_timestreaminfluxdb_db_instance" "test" { allocated_storage = 20 password = "testpassword" @@ -852,7 +673,7 @@ resource "aws_timestreaminfluxdb_db_instance" "test" { } func testAccDBInstanceConfig_tags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { - return acctest.ConfigCompose(testAccDBInstanceConfig_base(), fmt.Sprintf(` + return acctest.ConfigCompose(testAccDBInstanceConfig_base(rName, 1), fmt.Sprintf(` resource "aws_timestreaminfluxdb_db_instance" "test" { allocated_storage = 20 password = "testpassword" @@ -871,7 +692,7 @@ resource "aws_timestreaminfluxdb_db_instance" "test" { } func testAccDBInstanceConfig_dbStorageTypeT2(rName string) string { - return acctest.ConfigCompose(testAccDBInstanceConfig_base(), fmt.Sprintf(` + return acctest.ConfigCompose(testAccDBInstanceConfig_base(rName, 1), fmt.Sprintf(` resource "aws_timestreaminfluxdb_db_instance" "test" { password = "testpassword" vpc_subnet_ids = [aws_subnet.test_subnet.id] @@ -886,7 +707,7 @@ resource "aws_timestreaminfluxdb_db_instance" "test" { } func testAccDBInstanceConfig_dbStorageTypeT3(rName string) string { - return acctest.ConfigCompose(testAccDBInstanceConfig_base(), fmt.Sprintf(` + return acctest.ConfigCompose(testAccDBInstanceConfig_base(rName, 1), fmt.Sprintf(` resource "aws_timestreaminfluxdb_db_instance" "test" { password = "testpassword" vpc_subnet_ids = [aws_subnet.test_subnet.id] @@ -901,7 +722,7 @@ resource "aws_timestreaminfluxdb_db_instance" "test" { } func testAccDBInstanceConfig_dbInstanceTypeLarge(rName string) string { - return acctest.ConfigCompose(testAccDBInstanceConfig_base(), fmt.Sprintf(` + return acctest.ConfigCompose(testAccDBInstanceConfig_base(rName, 1), fmt.Sprintf(` resource "aws_timestreaminfluxdb_db_instance" "test" { allocated_storage = 20 db_storage_type = "InfluxIOIncludedT1" @@ -916,7 +737,7 @@ resource "aws_timestreaminfluxdb_db_instance" "test" { } func testAccDBInstanceConfig_dbInstanceTypeXLarge(rName string) string { - return acctest.ConfigCompose(testAccDBInstanceConfig_base(), fmt.Sprintf(` + return acctest.ConfigCompose(testAccDBInstanceConfig_base(rName, 1), fmt.Sprintf(` resource "aws_timestreaminfluxdb_db_instance" "test" { allocated_storage = 20 db_storage_type = "InfluxIOIncludedT1" @@ -931,7 +752,7 @@ resource "aws_timestreaminfluxdb_db_instance" "test" { } func testAccDBInstanceConfig_dbInstanceType2XLarge(rName string) string { - return acctest.ConfigCompose(testAccDBInstanceConfig_base(), fmt.Sprintf(` + return acctest.ConfigCompose(testAccDBInstanceConfig_base(rName, 1), fmt.Sprintf(` resource "aws_timestreaminfluxdb_db_instance" "test" { db_storage_type = "InfluxIOIncludedT1" password = "testpassword" @@ -946,7 +767,7 @@ resource "aws_timestreaminfluxdb_db_instance" "test" { } func testAccDBInstanceConfig_dbInstanceType4XLarge(rName string) string { - return acctest.ConfigCompose(testAccDBInstanceConfig_base(), fmt.Sprintf(` + return acctest.ConfigCompose(testAccDBInstanceConfig_base(rName, 1), fmt.Sprintf(` resource "aws_timestreaminfluxdb_db_instance" "test" { allocated_storage = 20 db_storage_type = "InfluxIOIncludedT1" @@ -961,7 +782,7 @@ resource "aws_timestreaminfluxdb_db_instance" "test" { } func testAccDBInstanceConfig_dbInstanceType8XLarge(rName string) string { - return acctest.ConfigCompose(testAccDBInstanceConfig_base(), fmt.Sprintf(` + return acctest.ConfigCompose(testAccDBInstanceConfig_base(rName, 1), fmt.Sprintf(` resource "aws_timestreaminfluxdb_db_instance" "test" { allocated_storage = 20 db_storage_type = "InfluxIOIncludedT1" @@ -976,7 +797,7 @@ resource "aws_timestreaminfluxdb_db_instance" "test" { } func testAccDBInstanceConfig_dbInstanceType12XLarge(rName string) string { - return acctest.ConfigCompose(testAccDBInstanceConfig_base(), fmt.Sprintf(` + return acctest.ConfigCompose(testAccDBInstanceConfig_base(rName, 1), fmt.Sprintf(` resource "aws_timestreaminfluxdb_db_instance" "test" { allocated_storage = 20 db_storage_type = "InfluxIOIncludedT1" @@ -991,7 +812,7 @@ resource "aws_timestreaminfluxdb_db_instance" "test" { } func testAccDBInstanceConfig_dbInstanceType16XLarge(rName string) string { - return acctest.ConfigCompose(testAccDBInstanceConfig_base(), fmt.Sprintf(` + return acctest.ConfigCompose(testAccDBInstanceConfig_base(rName, 1), fmt.Sprintf(` resource "aws_timestreaminfluxdb_db_instance" "test" { allocated_storage = 20 db_storage_type = "InfluxIOIncludedT1" From 88d349be872eeafc9e83b947aad71da63a625321 Mon Sep 17 00:00:00 2001 From: Adrian Johnson Date: Mon, 29 Jul 2024 15:26:49 -0500 Subject: [PATCH 11/21] aws_timestreaminfluxdb_db_instance: fmt tests --- .../timestreaminfluxdb/db_instance_test.go | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/internal/service/timestreaminfluxdb/db_instance_test.go b/internal/service/timestreaminfluxdb/db_instance_test.go index 7b3905a07b4..838b9ea3e1b 100644 --- a/internal/service/timestreaminfluxdb/db_instance_test.go +++ b/internal/service/timestreaminfluxdb/db_instance_test.go @@ -61,7 +61,7 @@ func TestAccTimestreamInfluxDBDBInstance_basic(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"username", "password", "organization"}, + ImportStateVerifyIgnore: []string{"bucket", "username", "password", "organization"}, }, }, }) @@ -492,13 +492,13 @@ func testAccDBInstanceConfig_basic(rName string) string { resource "aws_timestreaminfluxdb_db_instance" "test" { name = %[1]q allocated_storage = 20 - username = "admin" + username = "admin" password = "testpassword" - vpc_subnet_ids = [aws_subnet.test.id] + vpc_subnet_ids = aws_subnet.test.*.id vpc_security_group_ids = [aws_security_group.test.id] db_instance_type = "db.influx.medium" - bucket = "initial" - organization = "organization" + bucket = "initial" + organization = "organization" } `, rName)) } @@ -531,14 +531,14 @@ resource "aws_s3_bucket_policy" "allow_timestreaminfluxdb" { resource "aws_timestreaminfluxdb_db_instance" "test" { allocated_storage = 20 - username = "admin" + username = "admin" password = "testpassword" vpc_subnet_ids = [aws_subnet.test_subnet.id] vpc_security_group_ids = [aws_security_group.test_security_group.id] db_instance_type = "db.influx.medium" publicly_accessible = false - bucket = "initial" - organization = "organization" + bucket = "initial" + organization = "organization" name = %[1]q log_delivery_configuration { @@ -579,14 +579,14 @@ resource "aws_s3_bucket_policy" "allow_timestreaminfluxdb" { resource "aws_timestreaminfluxdb_db_instance" "test" { allocated_storage = 20 - username = "admin" + username = "admin" password = "testpassword" vpc_subnet_ids = [aws_subnet.test_subnet.id] vpc_security_group_ids = [aws_security_group.test_security_group.id] db_instance_type = "db.influx.medium" publicly_accessible = false - bucket = "initial" - organization = "organization" + bucket = "initial" + organization = "organization" name = %[1]q log_delivery_configuration { From 37ce7c82c09f6014de26f50b676e4f7f00834535 Mon Sep 17 00:00:00 2001 From: Adrian Johnson Date: Tue, 30 Jul 2024 11:34:46 -0500 Subject: [PATCH 12/21] update tests --- .../timestreaminfluxdb/db_instance_test.go | 525 ++++-------------- 1 file changed, 111 insertions(+), 414 deletions(-) diff --git a/internal/service/timestreaminfluxdb/db_instance_test.go b/internal/service/timestreaminfluxdb/db_instance_test.go index 838b9ea3e1b..95d972fedaf 100644 --- a/internal/service/timestreaminfluxdb/db_instance_test.go +++ b/internal/service/timestreaminfluxdb/db_instance_test.go @@ -19,8 +19,8 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" - "github.com/hashicorp/terraform-provider-aws/internal/errs" tftimestreaminfluxdb "github.com/hashicorp/terraform-provider-aws/internal/service/timestreaminfluxdb" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -47,8 +47,6 @@ func TestAccTimestreamInfluxDBDBInstance_basic(t *testing.T) { Config: testAccDBInstanceConfig_basic(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance), - // Verification of read-only attributes and default values. - // DB instance will not be publicly accessible and will not have an endpoint. acctest.MatchResourceAttrRegionalARN(resourceName, names.AttrARN, "timestream-influxdb", regexache.MustCompile(`db-instance/+.`)), resource.TestCheckResourceAttrSet(resourceName, names.AttrAvailabilityZone), resource.TestCheckResourceAttr(resourceName, "db_storage_type", string(awstypes.DbStorageTypeInfluxIoIncludedT1)), @@ -105,7 +103,6 @@ func TestAccTimestreamInfluxDBDBInstance_logDeliveryConfiguration(t *testing.T) } var dbInstance1, dbInstance2 timestreaminfluxdb.GetDbInstanceOutput - // The same random name will be used for both the DB instance and the log S3 bucket name. rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_timestreaminfluxdb_db_instance.test" @@ -119,29 +116,30 @@ func TestAccTimestreamInfluxDBDBInstance_logDeliveryConfiguration(t *testing.T) CheckDestroy: testAccCheckDBInstanceDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccDBInstanceConfig_logDeliveryConfigurationEnabled(rName), + Config: testAccDBInstanceConfig_logDeliveryConfigurationEnabled(rName, true), Check: resource.ComposeTestCheckFunc( testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance1), - resource.TestCheckResourceAttr(resourceName, "log_delivery_configuration.0.s3_configuration.%", acctest.Ct2), - resource.TestCheckResourceAttr(resourceName, "log_delivery_configuration.0.s3_configuration.bucket_name", rName), - resource.TestCheckResourceAttr(resourceName, "log_delivery_configuration.0.s3_configuration.enabled", acctest.CtTrue), + resource.TestCheckResourceAttr(resourceName, "log_delivery_configuration.0.s3_configuration.0.%", acctest.Ct2), + resource.TestCheckResourceAttr(resourceName, "log_delivery_configuration.0.s3_configuration.0.bucket_name", rName), + resource.TestCheckResourceAttr(resourceName, "log_delivery_configuration.0.s3_configuration.0.enabled", acctest.CtTrue), ), }, { - Config: testAccDBInstanceConfig_logDeliveryConfigurationNotEnabled(rName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"bucket", "username", "password", "organization"}, + }, + { + Config: testAccDBInstanceConfig_logDeliveryConfigurationEnabled(rName, false), Check: resource.ComposeTestCheckFunc( testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance2), testAccCheckDBInstanceNotRecreated(&dbInstance1, &dbInstance2), - resource.TestCheckResourceAttr(resourceName, "log_delivery_configuration.0.s3_configuration.%", acctest.Ct2), - resource.TestCheckResourceAttr(resourceName, "log_delivery_configuration.0.s3_configuration.bucket_name", rName), - resource.TestCheckResourceAttr(resourceName, "log_delivery_configuration.0.s3_configuration.enabled", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "log_delivery_configuration.0.s3_configuration.0.%", acctest.Ct2), + resource.TestCheckResourceAttr(resourceName, "log_delivery_configuration.0.s3_configuration.0.bucket_name", rName), + resource.TestCheckResourceAttr(resourceName, "log_delivery_configuration.0.s3_configuration.0.enabled", acctest.CtFalse), ), }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, }, }) } @@ -220,186 +218,67 @@ func TestAccTimestreamInfluxDBDBInstance_deploymentTypeMultiAzStandby(t *testing }) } -func TestAccTimestreamInfluxDBDBInstance_tags(t *testing.T) { - ctx := acctest.Context(t) - if testing.Short() { - t.Skip("skipping long-running test in short mode") - } - - var dbInstance1, dbInstance2, dbInstance3 timestreaminfluxdb.GetDbInstanceOutput - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_timestreaminfluxdb_db_instance.test" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { - acctest.PreCheck(ctx, t) - testAccPreCheck(ctx, t) - }, - ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckDBInstanceDestroy(ctx), - Steps: []resource.TestStep{ - { - Config: testAccDBInstanceConfig_tags1(rName, acctest.CtKey1, acctest.CtValue1), - Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance1), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct1), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsAllPercent, acctest.Ct1), - resource.TestCheckResourceAttr(resourceName, "tags_all.key1", acctest.CtValue1), - ), - }, - { - Config: testAccDBInstanceConfig_tags2(rName, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, acctest.CtValue2), - Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance2), - testAccCheckDBInstanceNotRecreated(&dbInstance2, &dbInstance2), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct2), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1Updated), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsAllPercent, acctest.Ct2), - resource.TestCheckResourceAttr(resourceName, "tags_all.key1", acctest.CtValue1Updated), - resource.TestCheckResourceAttr(resourceName, "tags_all.key2", acctest.CtValue2), - ), - }, - { - Config: testAccDBInstanceConfig_tags1(rName, acctest.CtKey2, acctest.CtValue2), - Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance3), - testAccCheckDBInstanceNotRecreated(&dbInstance2, &dbInstance3), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct1), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsAllPercent, acctest.Ct1), - resource.TestCheckResourceAttr(resourceName, "tags_all.key2", acctest.CtValue2), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func TestAccTimestreamInfluxDBDBInstance_dbInstanceType(t *testing.T) { - ctx := acctest.Context(t) - if testing.Short() { - t.Skip("skipping long-running test in short mode") - } - - var dbInstance timestreaminfluxdb.GetDbInstanceOutput - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_timestreaminfluxdb_db_instance.test" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { - acctest.PreCheck(ctx, t) - testAccPreCheck(ctx, t) - }, - ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckDBInstanceDestroy(ctx), - Steps: []resource.TestStep{ - { - Config: testAccDBInstanceConfig_dbInstanceTypeLarge(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance), - resource.TestCheckResourceAttr(resourceName, "db_instance_type", "db.influx.large"), - ), - }, - { - Config: testAccDBInstanceConfig_dbInstanceTypeXLarge(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance), - resource.TestCheckResourceAttr(resourceName, "db_instance_type", "db.influx.xlarge"), - ), - }, - { - Config: testAccDBInstanceConfig_dbInstanceType2XLarge(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance), - resource.TestCheckResourceAttr(resourceName, "db_instance_type", "db.influx.2xlarge"), - ), - }, - { - Config: testAccDBInstanceConfig_dbInstanceType4XLarge(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance), - resource.TestCheckResourceAttr(resourceName, "db_instance_type", "db.influx.4xlarge"), - ), - }, - { - Config: testAccDBInstanceConfig_dbInstanceType8XLarge(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance), - resource.TestCheckResourceAttr(resourceName, "db_instance_type", "db.influx.8xlarge"), - ), - }, - { - Config: testAccDBInstanceConfig_dbInstanceType12XLarge(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance), - resource.TestCheckResourceAttr(resourceName, "db_instance_type", "db.influx.12xlarge"), - ), - }, - { - Config: testAccDBInstanceConfig_dbInstanceType16XLarge(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance), - resource.TestCheckResourceAttr(resourceName, "db_instance_type", "db.influx.16xlarge"), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func TestAccTimestreamInfluxDBDBInstance_dbStorageType(t *testing.T) { - ctx := acctest.Context(t) - if testing.Short() { - t.Skip("skipping long-running test in short mode") - } - - var dbInstance timestreaminfluxdb.GetDbInstanceOutput - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_timestreaminfluxdb_db_instance.test" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { - acctest.PreCheck(ctx, t) - testAccPreCheck(ctx, t) - }, - ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckDBInstanceDestroy(ctx), - Steps: []resource.TestStep{ - { - Config: testAccDBInstanceConfig_dbStorageTypeT2(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance), - resource.TestCheckResourceAttr(resourceName, "db_storage_type", string(awstypes.DbStorageTypeInfluxIoIncludedT2)), - ), - }, - { - Config: testAccDBInstanceConfig_dbStorageTypeT3(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance), - resource.TestCheckResourceAttr(resourceName, "db_storage_type", string(awstypes.DbStorageTypeInfluxIoIncludedT3)), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} +//func TestAccTimestreamInfluxDBDBInstance_tags(t *testing.T) { +// ctx := acctest.Context(t) +// if testing.Short() { +// t.Skip("skipping long-running test in short mode") +// } +// +// var dbInstance1, dbInstance2, dbInstance3 timestreaminfluxdb.GetDbInstanceOutput +// rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) +// resourceName := "aws_timestreaminfluxdb_db_instance.test" +// +// resource.ParallelTest(t, resource.TestCase{ +// PreCheck: func() { +// acctest.PreCheck(ctx, t) +// testAccPreCheck(ctx, t) +// }, +// ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), +// ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, +// CheckDestroy: testAccCheckDBInstanceDestroy(ctx), +// Steps: []resource.TestStep{ +// { +// Config: testAccDBInstanceConfig_tags1(rName, acctest.CtKey1, acctest.CtValue1), +// Check: resource.ComposeTestCheckFunc( +// testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance1), +// resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct1), +// resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1), +// resource.TestCheckResourceAttr(resourceName, acctest.CtTagsAllPercent, acctest.Ct1), +// resource.TestCheckResourceAttr(resourceName, "tags_all.key1", acctest.CtValue1), +// ), +// }, +// { +// Config: testAccDBInstanceConfig_tags2(rName, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, acctest.CtValue2), +// Check: resource.ComposeTestCheckFunc( +// testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance2), +// testAccCheckDBInstanceNotRecreated(&dbInstance2, &dbInstance2), +// resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct2), +// resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1Updated), +// resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), +// resource.TestCheckResourceAttr(resourceName, acctest.CtTagsAllPercent, acctest.Ct2), +// resource.TestCheckResourceAttr(resourceName, "tags_all.key1", acctest.CtValue1Updated), +// resource.TestCheckResourceAttr(resourceName, "tags_all.key2", acctest.CtValue2), +// ), +// }, +// { +// Config: testAccDBInstanceConfig_tags1(rName, acctest.CtKey2, acctest.CtValue2), +// Check: resource.ComposeTestCheckFunc( +// testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance3), +// testAccCheckDBInstanceNotRecreated(&dbInstance2, &dbInstance3), +// resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct1), +// resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), +// resource.TestCheckResourceAttr(resourceName, acctest.CtTagsAllPercent, acctest.Ct1), +// resource.TestCheckResourceAttr(resourceName, "tags_all.key2", acctest.CtValue2), +// ), +// }, +// { +// ResourceName: resourceName, +// ImportState: true, +// ImportStateVerify: true, +// }, +// }, +// }) +//} func testAccCheckDBInstanceDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { @@ -410,13 +289,12 @@ func testAccCheckDBInstanceDestroy(ctx context.Context) resource.TestCheckFunc { continue } - input := ×treaminfluxdb.GetDbInstanceInput{ - Identifier: aws.String(rs.Primary.ID), - } - _, err := conn.GetDbInstance(ctx, input) - if errs.IsA[*awstypes.ResourceNotFoundException](err) { - return nil + _, err := tftimestreaminfluxdb.FindDBInstanceByID(ctx, conn, rs.Primary.ID) + + if tfresource.NotFound(err) { + continue } + if err != nil { return create.Error(names.TimestreamInfluxDB, create.ErrActionCheckingDestroyed, tftimestreaminfluxdb.ResNameDBInstance, rs.Primary.ID, err) } @@ -440,9 +318,7 @@ func testAccCheckDBInstanceExists(ctx context.Context, name string, dbInstance * } conn := acctest.Provider.Meta().(*conns.AWSClient).TimestreamInfluxDBClient(ctx) - resp, err := conn.GetDbInstance(ctx, ×treaminfluxdb.GetDbInstanceInput{ - Identifier: aws.String(rs.Primary.ID), - }) + resp, err := tftimestreaminfluxdb.FindDBInstanceByID(ctx, conn, rs.Primary.ID) if err != nil { return create.Error(names.TimestreamInfluxDB, create.ErrActionCheckingExistence, tftimestreaminfluxdb.ResNameDBInstance, rs.Primary.ID, err) @@ -504,9 +380,9 @@ resource "aws_timestreaminfluxdb_db_instance" "test" { } // Configuration with log_delivery_configuration set and enabled. -func testAccDBInstanceConfig_logDeliveryConfigurationEnabled(rName string) string { +func testAccDBInstanceConfig_logDeliveryConfigurationEnabled(rName string, enabled bool) string { return acctest.ConfigCompose(testAccDBInstanceConfig_base(rName, 1), fmt.Sprintf(` -resource "aws_s3_bucket" "test_s3_bucket" { +resource "aws_s3_bucket" "test" { bucket = %[1]q force_destroy = true } @@ -519,7 +395,7 @@ data "aws_iam_policy_document" "allow_timestreaminfluxdb" { identifiers = ["timestream-influxdb.amazonaws.com"] } resources = [ - "${aws_s3_bucket.test_s3_bucket.arn}/*" + "${aws_s3_bucket.test.arn}/*" ] } } @@ -530,108 +406,61 @@ resource "aws_s3_bucket_policy" "allow_timestreaminfluxdb" { } resource "aws_timestreaminfluxdb_db_instance" "test" { - allocated_storage = 20 - username = "admin" - password = "testpassword" - vpc_subnet_ids = [aws_subnet.test_subnet.id] - vpc_security_group_ids = [aws_security_group.test_security_group.id] - db_instance_type = "db.influx.medium" - publicly_accessible = false - bucket = "initial" - organization = "organization" name = %[1]q - - log_delivery_configuration { - s3_configuration { - bucket_name = %[1]q - enabled = true - } - } -} -`, rName)) -} - -// Configuration with log_delivery_configuration set but not enabled. -func testAccDBInstanceConfig_logDeliveryConfigurationNotEnabled(rName string) string { - return acctest.ConfigCompose(testAccDBInstanceConfig_base(rName, 1), fmt.Sprintf(` -resource "aws_s3_bucket" "test_s3_bucket" { - bucket = %[1]q - force_destroy = true -} - -data "aws_iam_policy_document" "allow_timestreaminfluxdb" { - statement { - actions = ["s3:PutObject"] - principals { - type = "Service" - identifiers = ["timestream-influxdb.amazonaws.com"] - } - resources = [ - "${aws_s3_bucket.test_s3_bucket.arn}/*" - ] - } -} - -resource "aws_s3_bucket_policy" "allow_timestreaminfluxdb" { - bucket = aws_s3_bucket.test_s3_bucket.id - policy = data.aws_iam_policy_document.allow_timestreaminfluxdb.json -} - -resource "aws_timestreaminfluxdb_db_instance" "test" { allocated_storage = 20 username = "admin" password = "testpassword" - vpc_subnet_ids = [aws_subnet.test_subnet.id] - vpc_security_group_ids = [aws_security_group.test_security_group.id] + vpc_subnet_ids = aws_subnet.test.*.id + vpc_security_group_ids = [aws_security_group.test.id] db_instance_type = "db.influx.medium" publicly_accessible = false bucket = "initial" organization = "organization" - name = %[1]q log_delivery_configuration { s3_configuration { - bucket_name = %[1]q - enabled = false + bucket_name = aws_s3_bucket.test.bucket + enabled = %[2]t } } } -`, rName)) +`, rName, enabled)) } -// Configuration that is publicly accessible. An endpoint will be created -// for the DB instance but no inbound rules will be defined, preventing access. func testAccDBInstanceConfig_publiclyAccessible(rName string) string { return acctest.ConfigCompose(testAccDBInstanceConfig_base(rName, 1), fmt.Sprintf(` -resource "aws_internet_gateway" "test_internet_gateway" { - vpc_id = aws_vpc.test_vpc.id +resource "aws_internet_gateway" "test" { + vpc_id = aws_vpc.test.id } -resource "aws_route" "test_route" { - route_table_id = aws_vpc.test_vpc.main_route_table_id +resource "aws_route" "test" { + route_table_id = aws_vpc.test.main_route_table_id destination_cidr_block = "0.0.0.0/0" - gateway_id = aws_internet_gateway.test_internet_gateway.id + gateway_id = aws_internet_gateway.test.id } -resource "aws_route_table_association" "test_route_table_association" { - subnet_id = aws_subnet.test_subnet.id - route_table_id = aws_vpc.test_vpc.main_route_table_id +resource "aws_route_table_association" "test" { + subnet_id = aws_subnet.test.id + route_table_id = aws_vpc.test.main_route_table_id } -resource "aws_vpc_security_group_ingress_rule" "test_vpc_security_group_ingress_rule_vpc" { - security_group_id = aws_security_group.test_security_group.id - referenced_security_group_id = aws_security_group.test_security_group.id +resource "aws_vpc_security_group_ingress_rule" "test" { + security_group_id = aws_security_group.test.id + referenced_security_group_id = aws_security_group.test.id ip_protocol = -1 } resource "aws_timestreaminfluxdb_db_instance" "test" { + name = %[1]q allocated_storage = 20 +username = "admin" password = "testpassword" db_storage_type = "InfluxIOIncludedT1" - vpc_subnet_ids = [aws_subnet.test_subnet.id] - vpc_security_group_ids = [aws_security_group.test_security_group.id] + vpc_subnet_ids = aws_subnet.test.*.id + vpc_security_group_ids = [aws_security_group.test.id] db_instance_type = "db.influx.medium" - name = %[1]q + bucket = "initial" + organization = "organization" publicly_accessible = true } @@ -641,13 +470,16 @@ resource "aws_timestreaminfluxdb_db_instance" "test" { func testAccDBInstanceConfig_deploymentTypeMultiAzStandby(rName string) string { return acctest.ConfigCompose(testAccDBInstanceConfig_base(rName, 2), fmt.Sprintf(` resource "aws_timestreaminfluxdb_db_instance" "test" { + name = %[1]q allocated_storage = 20 +username = "admin" password = "testpassword" db_storage_type = "InfluxIOIncludedT1" vpc_subnet_ids = aws_subnet.test.*.id vpc_security_group_ids = [aws_security_group.test.id] db_instance_type = "db.influx.medium" - name = %[1]q + bucket = "initial" + organization = "organization" deployment_type = "WITH_MULTIAZ_STANDBY" } @@ -690,138 +522,3 @@ resource "aws_timestreaminfluxdb_db_instance" "test" { } `, rName, tagKey1, tagValue1, tagKey2, tagValue2)) } - -func testAccDBInstanceConfig_dbStorageTypeT2(rName string) string { - return acctest.ConfigCompose(testAccDBInstanceConfig_base(rName, 1), fmt.Sprintf(` -resource "aws_timestreaminfluxdb_db_instance" "test" { - password = "testpassword" - vpc_subnet_ids = [aws_subnet.test_subnet.id] - vpc_security_group_ids = [aws_security_group.test_security_group.id] - db_instance_type = "db.influx.medium" - name = %[1]q - - allocated_storage = 400 - db_storage_type = "InfluxIOIncludedT2" -} -`, rName)) -} - -func testAccDBInstanceConfig_dbStorageTypeT3(rName string) string { - return acctest.ConfigCompose(testAccDBInstanceConfig_base(rName, 1), fmt.Sprintf(` -resource "aws_timestreaminfluxdb_db_instance" "test" { - password = "testpassword" - vpc_subnet_ids = [aws_subnet.test_subnet.id] - vpc_security_group_ids = [aws_security_group.test_security_group.id] - db_instance_type = "db.influx.medium" - name = %[1]q - - allocated_storage = 400 - db_storage_type = "InfluxIOIncludedT3" -} -`, rName)) -} - -func testAccDBInstanceConfig_dbInstanceTypeLarge(rName string) string { - return acctest.ConfigCompose(testAccDBInstanceConfig_base(rName, 1), fmt.Sprintf(` -resource "aws_timestreaminfluxdb_db_instance" "test" { - allocated_storage = 20 - db_storage_type = "InfluxIOIncludedT1" - password = "testpassword" - vpc_subnet_ids = [aws_subnet.test_subnet.id] - vpc_security_group_ids = [aws_security_group.test_security_group.id] - name = %[1]q - - db_instance_type = "db.influx.large" -} -`, rName)) -} - -func testAccDBInstanceConfig_dbInstanceTypeXLarge(rName string) string { - return acctest.ConfigCompose(testAccDBInstanceConfig_base(rName, 1), fmt.Sprintf(` -resource "aws_timestreaminfluxdb_db_instance" "test" { - allocated_storage = 20 - db_storage_type = "InfluxIOIncludedT1" - password = "testpassword" - vpc_subnet_ids = [aws_subnet.test_subnet.id] - vpc_security_group_ids = [aws_security_group.test_security_group.id] - name = %[1]q - - db_instance_type = "db.influx.xlarge" -} -`, rName)) -} - -func testAccDBInstanceConfig_dbInstanceType2XLarge(rName string) string { - return acctest.ConfigCompose(testAccDBInstanceConfig_base(rName, 1), fmt.Sprintf(` -resource "aws_timestreaminfluxdb_db_instance" "test" { - db_storage_type = "InfluxIOIncludedT1" - password = "testpassword" - vpc_subnet_ids = [aws_subnet.test_subnet.id] - vpc_security_group_ids = [aws_security_group.test_security_group.id] - name = %[1]q - - allocated_storage = 40 - db_instance_type = "db.influx.2xlarge" -} -`, rName)) -} - -func testAccDBInstanceConfig_dbInstanceType4XLarge(rName string) string { - return acctest.ConfigCompose(testAccDBInstanceConfig_base(rName, 1), fmt.Sprintf(` -resource "aws_timestreaminfluxdb_db_instance" "test" { - allocated_storage = 20 - db_storage_type = "InfluxIOIncludedT1" - password = "testpassword" - vpc_subnet_ids = [aws_subnet.test_subnet.id] - vpc_security_group_ids = [aws_security_group.test_security_group.id] - name = %[1]q - - db_instance_type = "db.influx.4xlarge" -} -`, rName)) -} - -func testAccDBInstanceConfig_dbInstanceType8XLarge(rName string) string { - return acctest.ConfigCompose(testAccDBInstanceConfig_base(rName, 1), fmt.Sprintf(` -resource "aws_timestreaminfluxdb_db_instance" "test" { - allocated_storage = 20 - db_storage_type = "InfluxIOIncludedT1" - password = "testpassword" - vpc_subnet_ids = [aws_subnet.test_subnet.id] - vpc_security_group_ids = [aws_security_group.test_security_group.id] - name = %[1]q - - db_instance_type = "db.influx.8xlarge" -} -`, rName)) -} - -func testAccDBInstanceConfig_dbInstanceType12XLarge(rName string) string { - return acctest.ConfigCompose(testAccDBInstanceConfig_base(rName, 1), fmt.Sprintf(` -resource "aws_timestreaminfluxdb_db_instance" "test" { - allocated_storage = 20 - db_storage_type = "InfluxIOIncludedT1" - password = "testpassword" - vpc_subnet_ids = [aws_subnet.test_subnet.id] - vpc_security_group_ids = [aws_security_group.test_security_group.id] - name = %[1]q - - db_instance_type = "db.influx.12xlarge" -} -`, rName)) -} - -func testAccDBInstanceConfig_dbInstanceType16XLarge(rName string) string { - return acctest.ConfigCompose(testAccDBInstanceConfig_base(rName, 1), fmt.Sprintf(` -resource "aws_timestreaminfluxdb_db_instance" "test" { - allocated_storage = 20 - db_storage_type = "InfluxIOIncludedT1" - password = "testpassword" - vpc_subnet_ids = [aws_subnet.test_subnet.id] - vpc_security_group_ids = [aws_security_group.test_security_group.id] - name = %[1]q - - db_instance_type = "db.influx.16xlarge" -} -`, rName)) -} From 9ce48e75016f41b6334cc2b1fff41caaae1f761a Mon Sep 17 00:00:00 2001 From: Adrian Johnson Date: Tue, 30 Jul 2024 12:46:42 -0500 Subject: [PATCH 13/21] add tags test --- .../service/timestreaminfluxdb/db_instance.go | 8 +- .../db_instance_tags_gen_test.go | 1800 +++++++++++++++++ .../timestreaminfluxdb/db_instance_test.go | 124 +- .../service/timestreaminfluxdb/generate.go | 1 + .../timestreaminfluxdb/service_package_gen.go | 2 +- .../testdata/DBInstance/tags/main_gen.tf | 54 + .../DBInstance/tagsComputed1/main_gen.tf | 58 + .../DBInstance/tagsComputed2/main_gen.tf | 69 + .../DBInstance/tags_defaults/main_gen.tf | 65 + .../DBInstance/tags_ignore/main_gen.tf | 74 + .../testdata/tmpl/db_instance_tags.gtpl | 39 + 11 files changed, 2180 insertions(+), 114 deletions(-) create mode 100644 internal/service/timestreaminfluxdb/db_instance_tags_gen_test.go create mode 100644 internal/service/timestreaminfluxdb/testdata/DBInstance/tags/main_gen.tf create mode 100644 internal/service/timestreaminfluxdb/testdata/DBInstance/tagsComputed1/main_gen.tf create mode 100644 internal/service/timestreaminfluxdb/testdata/DBInstance/tagsComputed2/main_gen.tf create mode 100644 internal/service/timestreaminfluxdb/testdata/DBInstance/tags_defaults/main_gen.tf create mode 100644 internal/service/timestreaminfluxdb/testdata/DBInstance/tags_ignore/main_gen.tf create mode 100644 internal/service/timestreaminfluxdb/testdata/tmpl/db_instance_tags.gtpl diff --git a/internal/service/timestreaminfluxdb/db_instance.go b/internal/service/timestreaminfluxdb/db_instance.go index 3fb3b4a8ebf..4c60c76346c 100644 --- a/internal/service/timestreaminfluxdb/db_instance.go +++ b/internal/service/timestreaminfluxdb/db_instance.go @@ -39,8 +39,9 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -// @FrameworkResource("aws_timestreaminfluxdb_db_instance", name="Db Instance") +// @FrameworkResource("aws_timestreaminfluxdb_db_instance", name="DB Instance") // @Tags(identifierAttribute="arn") +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb;timestreaminfluxdb.GetDbInstanceOutput") func newResourceDBInstance(_ context.Context) (resource.ResourceWithConfigure, error) { r := &resourceDBInstance{} @@ -351,7 +352,10 @@ func (r *resourceDBInstance) Create(ctx context.Context, req resource.CreateRequ in := timestreaminfluxdb.CreateDbInstanceInput{} - resp.Diagnostics.Append(flex.Expand(ctx, plan, &in)...) + option := func(o *flex.AutoFlexOptions) { + o.SetIgnoredFields() + } + resp.Diagnostics.Append(flex.Expand(ctx, plan, &in, option)...) if resp.Diagnostics.HasError() { return diff --git a/internal/service/timestreaminfluxdb/db_instance_tags_gen_test.go b/internal/service/timestreaminfluxdb/db_instance_tags_gen_test.go new file mode 100644 index 00000000000..8a8738e3e45 --- /dev/null +++ b/internal/service/timestreaminfluxdb/db_instance_tags_gen_test.go @@ -0,0 +1,1800 @@ +// Code generated by internal/generate/tagstests/main.go; DO NOT EDIT. + +package timestreaminfluxdb_test + +import ( + "testing" + + "github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccTimestreamInfluxDBDBInstance_tags(t *testing.T) { + ctx := acctest.Context(t) + var v timestreaminfluxdb.GetDbInstanceOutput + resourceName := "aws_timestreaminfluxdb_db_instance.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBInstanceExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBInstanceExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBInstanceExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBInstanceExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccTimestreamInfluxDBDBInstance_tags_null(t *testing.T) { + t.Skip("Tags with null values are not correctly handled with the Plugin Framework") + + ctx := acctest.Context(t) + var v timestreaminfluxdb.GetDbInstanceOutput + resourceName := "aws_timestreaminfluxdb_db_instance.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBInstanceExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + PlanOnly: true, + ExpectNonEmptyPlan: false, + }, + }, + }) +} + +func TestAccTimestreamInfluxDBDBInstance_tags_AddOnUpdate(t *testing.T) { + ctx := acctest.Context(t) + var v timestreaminfluxdb.GetDbInstanceOutput + resourceName := "aws_timestreaminfluxdb_db_instance.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBInstanceExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBInstanceExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccTimestreamInfluxDBDBInstance_tags_EmptyTag_OnCreate(t *testing.T) { + ctx := acctest.Context(t) + var v timestreaminfluxdb.GetDbInstanceOutput + resourceName := "aws_timestreaminfluxdb_db_instance.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBInstanceExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBInstanceExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccTimestreamInfluxDBDBInstance_tags_EmptyTag_OnUpdate_Add(t *testing.T) { + ctx := acctest.Context(t) + var v timestreaminfluxdb.GetDbInstanceOutput + resourceName := "aws_timestreaminfluxdb_db_instance.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBInstanceExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + acctest.CtKey2: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBInstanceExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + acctest.CtKey2: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBInstanceExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccTimestreamInfluxDBDBInstance_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { + ctx := acctest.Context(t) + var v timestreaminfluxdb.GetDbInstanceOutput + resourceName := "aws_timestreaminfluxdb_db_instance.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBInstanceExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBInstanceExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_providerOnly(t *testing.T) { + ctx := acctest.Context(t) + var v timestreaminfluxdb.GetDbInstanceOutput + resourceName := "aws_timestreaminfluxdb_db_instance.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBInstanceExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBInstanceExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBInstanceExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBInstanceExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_nonOverlapping(t *testing.T) { + ctx := acctest.Context(t) + var v timestreaminfluxdb.GetDbInstanceOutput + resourceName := "aws_timestreaminfluxdb_db_instance.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBInstanceExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable("providervalue1updated"), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBInstanceExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact("providervalue1updated"), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact("providervalue1updated"), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable("providervalue1updated"), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBInstanceExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_overlapping(t *testing.T) { + ctx := acctest.Context(t) + var v timestreaminfluxdb.GetDbInstanceOutput + resourceName := "aws_timestreaminfluxdb_db_instance.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBInstanceExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + acctest.CtOverlapKey2: config.StringVariable("providervalue2"), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtOverlapKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBInstanceExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + acctest.CtOverlapKey2: config.StringVariable("providervalue2"), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtOverlapKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBInstanceExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_updateToProviderOnly(t *testing.T) { + ctx := acctest.Context(t) + var v timestreaminfluxdb.GetDbInstanceOutput + resourceName := "aws_timestreaminfluxdb_db_instance.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBInstanceExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBInstanceExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_updateToResourceOnly(t *testing.T) { + ctx := acctest.Context(t) + var v timestreaminfluxdb.GetDbInstanceOutput + resourceName := "aws_timestreaminfluxdb_db_instance.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBInstanceExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBInstanceExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_emptyResourceTag(t *testing.T) { + ctx := acctest.Context(t) + var v timestreaminfluxdb.GetDbInstanceOutput + resourceName := "aws_timestreaminfluxdb_db_instance.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBInstanceExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { + ctx := acctest.Context(t) + var v timestreaminfluxdb.GetDbInstanceOutput + resourceName := "aws_timestreaminfluxdb_db_instance.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBInstanceExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { + t.Skip("Tags with null values are not correctly handled with the Plugin Framework") + + ctx := acctest.Context(t) + var v timestreaminfluxdb.GetDbInstanceOutput + resourceName := "aws_timestreaminfluxdb_db_instance.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBInstanceExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { + t.Skip("Tags with null values are not correctly handled with the Plugin Framework") + + ctx := acctest.Context(t) + var v timestreaminfluxdb.GetDbInstanceOutput + resourceName := "aws_timestreaminfluxdb_db_instance.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBInstanceExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccTimestreamInfluxDBDBInstance_tags_ComputedTag_OnCreate(t *testing.T) { + ctx := acctest.Context(t) + var v timestreaminfluxdb.GetDbInstanceOutput + resourceName := "aws_timestreaminfluxdb_db_instance.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tagsComputed1/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable("computedkey1"), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBInstanceExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, "tags.computedkey1", "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(1)), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags).AtMapKey("computedkey1")), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tagsComputed1/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable("computedkey1"), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccTimestreamInfluxDBDBInstance_tags_ComputedTag_OnUpdate_Add(t *testing.T) { + ctx := acctest.Context(t) + var v timestreaminfluxdb.GetDbInstanceOutput + resourceName := "aws_timestreaminfluxdb_db_instance.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBInstanceExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tagsComputed2/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable("computedkey1"), + "knownTagKey": config.StringVariable(acctest.CtKey1), + "knownTagValue": config.StringVariable(acctest.CtValue1), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBInstanceExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, "tags.computedkey1", "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(2)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapPartial(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags).AtMapKey("computedkey1")), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tagsComputed2/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable("computedkey1"), + "knownTagKey": config.StringVariable(acctest.CtKey1), + "knownTagValue": config.StringVariable(acctest.CtValue1), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccTimestreamInfluxDBDBInstance_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { + ctx := acctest.Context(t) + var v timestreaminfluxdb.GetDbInstanceOutput + resourceName := "aws_timestreaminfluxdb_db_instance.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBInstanceExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tagsComputed1/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable(acctest.CtKey1), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBInstanceExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, acctest.CtTagsKey1, "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(1)), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags).AtMapKey(acctest.CtKey1)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tagsComputed1/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable(acctest.CtKey1), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/internal/service/timestreaminfluxdb/db_instance_test.go b/internal/service/timestreaminfluxdb/db_instance_test.go index 95d972fedaf..d631dca6adc 100644 --- a/internal/service/timestreaminfluxdb/db_instance_test.go +++ b/internal/service/timestreaminfluxdb/db_instance_test.go @@ -59,7 +59,7 @@ func TestAccTimestreamInfluxDBDBInstance_basic(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"bucket", "username", "password", "organization"}, + ImportStateVerifyIgnore: []string{names.AttrBucket, names.AttrUsername, names.AttrPassword, "organization"}, }, }, }) @@ -128,7 +128,7 @@ func TestAccTimestreamInfluxDBDBInstance_logDeliveryConfiguration(t *testing.T) ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"bucket", "username", "password", "organization"}, + ImportStateVerifyIgnore: []string{names.AttrBucket, names.AttrUsername, names.AttrPassword, "organization"}, }, { Config: testAccDBInstanceConfig_logDeliveryConfigurationEnabled(rName, false), @@ -172,9 +172,10 @@ func TestAccTimestreamInfluxDBDBInstance_publiclyAccessible(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{names.AttrBucket, names.AttrUsername, names.AttrPassword, "organization"}, }, }, }) @@ -218,68 +219,6 @@ func TestAccTimestreamInfluxDBDBInstance_deploymentTypeMultiAzStandby(t *testing }) } -//func TestAccTimestreamInfluxDBDBInstance_tags(t *testing.T) { -// ctx := acctest.Context(t) -// if testing.Short() { -// t.Skip("skipping long-running test in short mode") -// } -// -// var dbInstance1, dbInstance2, dbInstance3 timestreaminfluxdb.GetDbInstanceOutput -// rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) -// resourceName := "aws_timestreaminfluxdb_db_instance.test" -// -// resource.ParallelTest(t, resource.TestCase{ -// PreCheck: func() { -// acctest.PreCheck(ctx, t) -// testAccPreCheck(ctx, t) -// }, -// ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), -// ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, -// CheckDestroy: testAccCheckDBInstanceDestroy(ctx), -// Steps: []resource.TestStep{ -// { -// Config: testAccDBInstanceConfig_tags1(rName, acctest.CtKey1, acctest.CtValue1), -// Check: resource.ComposeTestCheckFunc( -// testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance1), -// resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct1), -// resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1), -// resource.TestCheckResourceAttr(resourceName, acctest.CtTagsAllPercent, acctest.Ct1), -// resource.TestCheckResourceAttr(resourceName, "tags_all.key1", acctest.CtValue1), -// ), -// }, -// { -// Config: testAccDBInstanceConfig_tags2(rName, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, acctest.CtValue2), -// Check: resource.ComposeTestCheckFunc( -// testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance2), -// testAccCheckDBInstanceNotRecreated(&dbInstance2, &dbInstance2), -// resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct2), -// resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1Updated), -// resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), -// resource.TestCheckResourceAttr(resourceName, acctest.CtTagsAllPercent, acctest.Ct2), -// resource.TestCheckResourceAttr(resourceName, "tags_all.key1", acctest.CtValue1Updated), -// resource.TestCheckResourceAttr(resourceName, "tags_all.key2", acctest.CtValue2), -// ), -// }, -// { -// Config: testAccDBInstanceConfig_tags1(rName, acctest.CtKey2, acctest.CtValue2), -// Check: resource.ComposeTestCheckFunc( -// testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance3), -// testAccCheckDBInstanceNotRecreated(&dbInstance2, &dbInstance3), -// resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct1), -// resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), -// resource.TestCheckResourceAttr(resourceName, acctest.CtTagsAllPercent, acctest.Ct1), -// resource.TestCheckResourceAttr(resourceName, "tags_all.key2", acctest.CtValue2), -// ), -// }, -// { -// ResourceName: resourceName, -// ImportState: true, -// ImportStateVerify: true, -// }, -// }, -// }) -//} - func testAccCheckDBInstanceDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).TimestreamInfluxDBClient(ctx) @@ -387,7 +326,7 @@ resource "aws_s3_bucket" "test" { force_destroy = true } -data "aws_iam_policy_document" "allow_timestreaminfluxdb" { +data "aws_iam_policy_document" "test" { statement { actions = ["s3:PutObject"] principals { @@ -400,9 +339,9 @@ data "aws_iam_policy_document" "allow_timestreaminfluxdb" { } } -resource "aws_s3_bucket_policy" "allow_timestreaminfluxdb" { - bucket = aws_s3_bucket.test_s3_bucket.id - policy = data.aws_iam_policy_document.allow_timestreaminfluxdb.json +resource "aws_s3_bucket_policy" "test" { + bucket = aws_s3_bucket.test.id + policy = data.aws_iam_policy_document.test.json } resource "aws_timestreaminfluxdb_db_instance" "test" { @@ -440,7 +379,7 @@ resource "aws_route" "test" { } resource "aws_route_table_association" "test" { - subnet_id = aws_subnet.test.id + subnet_id = aws_subnet.test[0].id route_table_id = aws_vpc.test.main_route_table_id } @@ -453,7 +392,7 @@ resource "aws_vpc_security_group_ingress_rule" "test" { resource "aws_timestreaminfluxdb_db_instance" "test" { name = %[1]q allocated_storage = 20 -username = "admin" + username = "admin" password = "testpassword" db_storage_type = "InfluxIOIncludedT1" vpc_subnet_ids = aws_subnet.test.*.id @@ -472,7 +411,7 @@ func testAccDBInstanceConfig_deploymentTypeMultiAzStandby(rName string) string { resource "aws_timestreaminfluxdb_db_instance" "test" { name = %[1]q allocated_storage = 20 -username = "admin" + username = "admin" password = "testpassword" db_storage_type = "InfluxIOIncludedT1" vpc_subnet_ids = aws_subnet.test.*.id @@ -485,40 +424,3 @@ username = "admin" } `, rName)) } - -func testAccDBInstanceConfig_tags1(rName, tagKey1, tagValue1 string) string { - return acctest.ConfigCompose(testAccDBInstanceConfig_base(rName, 1), fmt.Sprintf(` -resource "aws_timestreaminfluxdb_db_instance" "test" { - allocated_storage = 20 - password = "testpassword" - db_storage_type = "InfluxIOIncludedT1" - vpc_subnet_ids = [aws_subnet.test_subnet.id] - vpc_security_group_ids = [aws_security_group.test_security_group.id] - db_instance_type = "db.influx.medium" - name = %[1]q - - tags = { - %[2]q = %[3]q - } -} -`, rName, tagKey1, tagValue1)) -} - -func testAccDBInstanceConfig_tags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { - return acctest.ConfigCompose(testAccDBInstanceConfig_base(rName, 1), fmt.Sprintf(` -resource "aws_timestreaminfluxdb_db_instance" "test" { - allocated_storage = 20 - password = "testpassword" - db_storage_type = "InfluxIOIncludedT1" - vpc_subnet_ids = [aws_subnet.test_subnet.id] - vpc_security_group_ids = [aws_security_group.test_security_group.id] - db_instance_type = "db.influx.medium" - name = %[1]q - - tags = { - %[2]q = %[3]q - %[4]q = %[5]q - } -} -`, rName, tagKey1, tagValue1, tagKey2, tagValue2)) -} diff --git a/internal/service/timestreaminfluxdb/generate.go b/internal/service/timestreaminfluxdb/generate.go index 1e1eab7dd3a..4ccb17d23dd 100644 --- a/internal/service/timestreaminfluxdb/generate.go +++ b/internal/service/timestreaminfluxdb/generate.go @@ -3,6 +3,7 @@ //go:generate go run ../../generate/servicepackage/main.go //go:generate go run ../../generate/tags/main.go -AWSSDKVersion=2 -KVTValues -SkipTypesImp -ListTags -ServiceTagsMap -UpdateTags +//go:generate go run ../../generate/tagstests/main.go // ONLY generate directives and package declaration! Do not add anything else to this file. package timestreaminfluxdb diff --git a/internal/service/timestreaminfluxdb/service_package_gen.go b/internal/service/timestreaminfluxdb/service_package_gen.go index ba78a648c66..b0e4d990ed2 100644 --- a/internal/service/timestreaminfluxdb/service_package_gen.go +++ b/internal/service/timestreaminfluxdb/service_package_gen.go @@ -22,7 +22,7 @@ func (p *servicePackage) FrameworkResources(ctx context.Context) []*types.Servic return []*types.ServicePackageFrameworkResource{ { Factory: newResourceDBInstance, - Name: "Db Instance", + Name: "DB Instance", Tags: &types.ServicePackageResourceTags{ IdentifierAttribute: names.AttrARN, }, diff --git a/internal/service/timestreaminfluxdb/testdata/DBInstance/tags/main_gen.tf b/internal/service/timestreaminfluxdb/testdata/DBInstance/tags/main_gen.tf new file mode 100644 index 00000000000..a1d76dc69aa --- /dev/null +++ b/internal/service/timestreaminfluxdb/testdata/DBInstance/tags/main_gen.tf @@ -0,0 +1,54 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" +} + +data "aws_availability_zones" "available" { + exclude_zone_ids = ["usw2-az4", "usgw1-az2"] + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +resource "aws_subnet" "test" { + count = 1 + + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[count.index] + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) +} + +resource "aws_security_group" "test" { + vpc_id = aws_vpc.test.id +} + +resource "aws_timestreaminfluxdb_db_instance" "test" { + name = var.rName + allocated_storage = 20 + username = "admin" + password = "testpassword" + vpc_subnet_ids = aws_subnet.test.*.id + vpc_security_group_ids = [aws_security_group.test.id] + db_instance_type = "db.influx.medium" + bucket = "initial" + organization = "organization" + + tags = var.resource_tags +} +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} diff --git a/internal/service/timestreaminfluxdb/testdata/DBInstance/tagsComputed1/main_gen.tf b/internal/service/timestreaminfluxdb/testdata/DBInstance/tagsComputed1/main_gen.tf new file mode 100644 index 00000000000..25904d01aed --- /dev/null +++ b/internal/service/timestreaminfluxdb/testdata/DBInstance/tagsComputed1/main_gen.tf @@ -0,0 +1,58 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "null" {} + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" +} + +data "aws_availability_zones" "available" { + exclude_zone_ids = ["usw2-az4", "usgw1-az2"] + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +resource "aws_subnet" "test" { + count = 1 + + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[count.index] + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) +} + +resource "aws_security_group" "test" { + vpc_id = aws_vpc.test.id +} + +resource "aws_timestreaminfluxdb_db_instance" "test" { + name = var.rName + allocated_storage = 20 + username = "admin" + password = "testpassword" + vpc_subnet_ids = aws_subnet.test.*.id + vpc_security_group_ids = [aws_security_group.test.id] + db_instance_type = "db.influx.medium" + bucket = "initial" + organization = "organization" + + tags = { + (var.unknownTagKey) = null_resource.test.id + } +} +resource "null_resource" "test" {} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "unknownTagKey" { + type = string + nullable = false +} diff --git a/internal/service/timestreaminfluxdb/testdata/DBInstance/tagsComputed2/main_gen.tf b/internal/service/timestreaminfluxdb/testdata/DBInstance/tagsComputed2/main_gen.tf new file mode 100644 index 00000000000..1519346353e --- /dev/null +++ b/internal/service/timestreaminfluxdb/testdata/DBInstance/tagsComputed2/main_gen.tf @@ -0,0 +1,69 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "null" {} + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" +} + +data "aws_availability_zones" "available" { + exclude_zone_ids = ["usw2-az4", "usgw1-az2"] + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +resource "aws_subnet" "test" { + count = 1 + + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[count.index] + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) +} + +resource "aws_security_group" "test" { + vpc_id = aws_vpc.test.id +} + +resource "aws_timestreaminfluxdb_db_instance" "test" { + name = var.rName + allocated_storage = 20 + username = "admin" + password = "testpassword" + vpc_subnet_ids = aws_subnet.test.*.id + vpc_security_group_ids = [aws_security_group.test.id] + db_instance_type = "db.influx.medium" + bucket = "initial" + organization = "organization" + + tags = { + (var.unknownTagKey) = null_resource.test.id + (var.knownTagKey) = var.knownTagValue + } +} +resource "null_resource" "test" {} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "unknownTagKey" { + type = string + nullable = false +} + +variable "knownTagKey" { + type = string + nullable = false +} + +variable "knownTagValue" { + type = string + nullable = false +} diff --git a/internal/service/timestreaminfluxdb/testdata/DBInstance/tags_defaults/main_gen.tf b/internal/service/timestreaminfluxdb/testdata/DBInstance/tags_defaults/main_gen.tf new file mode 100644 index 00000000000..61e52d28841 --- /dev/null +++ b/internal/service/timestreaminfluxdb/testdata/DBInstance/tags_defaults/main_gen.tf @@ -0,0 +1,65 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" { + default_tags { + tags = var.provider_tags + } +} + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" +} + +data "aws_availability_zones" "available" { + exclude_zone_ids = ["usw2-az4", "usgw1-az2"] + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +resource "aws_subnet" "test" { + count = 1 + + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[count.index] + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) +} + +resource "aws_security_group" "test" { + vpc_id = aws_vpc.test.id +} + +resource "aws_timestreaminfluxdb_db_instance" "test" { + name = var.rName + allocated_storage = 20 + username = "admin" + password = "testpassword" + vpc_subnet_ids = aws_subnet.test.*.id + vpc_security_group_ids = [aws_security_group.test.id] + db_instance_type = "db.influx.medium" + bucket = "initial" + organization = "organization" + + tags = var.resource_tags +} +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} + +variable "provider_tags" { + type = map(string) + nullable = false +} diff --git a/internal/service/timestreaminfluxdb/testdata/DBInstance/tags_ignore/main_gen.tf b/internal/service/timestreaminfluxdb/testdata/DBInstance/tags_ignore/main_gen.tf new file mode 100644 index 00000000000..48c166490e4 --- /dev/null +++ b/internal/service/timestreaminfluxdb/testdata/DBInstance/tags_ignore/main_gen.tf @@ -0,0 +1,74 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" { + default_tags { + tags = var.provider_tags + } + ignore_tags { + keys = var.ignore_tag_keys + } +} + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" +} + +data "aws_availability_zones" "available" { + exclude_zone_ids = ["usw2-az4", "usgw1-az2"] + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +resource "aws_subnet" "test" { + count = 1 + + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[count.index] + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) +} + +resource "aws_security_group" "test" { + vpc_id = aws_vpc.test.id +} + +resource "aws_timestreaminfluxdb_db_instance" "test" { + name = var.rName + allocated_storage = 20 + username = "admin" + password = "testpassword" + vpc_subnet_ids = aws_subnet.test.*.id + vpc_security_group_ids = [aws_security_group.test.id] + db_instance_type = "db.influx.medium" + bucket = "initial" + organization = "organization" + + tags = var.resource_tags +} +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} + +variable "provider_tags" { + type = map(string) + nullable = true + default = null +} + +variable "ignore_tag_keys" { + type = set(string) + nullable = false +} diff --git a/internal/service/timestreaminfluxdb/testdata/tmpl/db_instance_tags.gtpl b/internal/service/timestreaminfluxdb/testdata/tmpl/db_instance_tags.gtpl new file mode 100644 index 00000000000..6ca926b208e --- /dev/null +++ b/internal/service/timestreaminfluxdb/testdata/tmpl/db_instance_tags.gtpl @@ -0,0 +1,39 @@ +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" +} + +data "aws_availability_zones" "available" { + exclude_zone_ids = ["usw2-az4", "usgw1-az2"] + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +resource "aws_subnet" "test" { + count = 1 + + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[count.index] + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) +} + +resource "aws_security_group" "test" { + vpc_id = aws_vpc.test.id +} + +resource "aws_timestreaminfluxdb_db_instance" "test" { + name = var.rName + allocated_storage = 20 + username = "admin" + password = "testpassword" + vpc_subnet_ids = aws_subnet.test.*.id + vpc_security_group_ids = [aws_security_group.test.id] + db_instance_type = "db.influx.medium" + bucket = "initial" + organization = "organization" + +{{- template "tags" . }} +} \ No newline at end of file From aed858c1f0e79970b9ad966af5344dcf84843ca8 Mon Sep 17 00:00:00 2001 From: Adrian Johnson Date: Tue, 30 Jul 2024 12:52:44 -0500 Subject: [PATCH 14/21] cleanup --- internal/service/timestreaminfluxdb/db_instance.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/internal/service/timestreaminfluxdb/db_instance.go b/internal/service/timestreaminfluxdb/db_instance.go index 4c60c76346c..2eb0825cd33 100644 --- a/internal/service/timestreaminfluxdb/db_instance.go +++ b/internal/service/timestreaminfluxdb/db_instance.go @@ -352,10 +352,7 @@ func (r *resourceDBInstance) Create(ctx context.Context, req resource.CreateRequ in := timestreaminfluxdb.CreateDbInstanceInput{} - option := func(o *flex.AutoFlexOptions) { - o.SetIgnoredFields() - } - resp.Diagnostics.Append(flex.Expand(ctx, plan, &in, option)...) + resp.Diagnostics.Append(flex.Expand(ctx, plan, &in)...) if resp.Diagnostics.HasError() { return From afa90e1a96c62a97000dda0b5f3d4f41e8cac180 Mon Sep 17 00:00:00 2001 From: Adrian Johnson Date: Tue, 30 Jul 2024 13:33:21 -0500 Subject: [PATCH 15/21] add CHANGELOG entry --- .changelog/37963.txt | 3 +++ internal/service/timestreaminfluxdb/db_instance.go | 2 ++ internal/service/timestreaminfluxdb/db_instance_test.go | 7 ++++--- 3 files changed, 9 insertions(+), 3 deletions(-) create mode 100644 .changelog/37963.txt diff --git a/.changelog/37963.txt b/.changelog/37963.txt new file mode 100644 index 00000000000..9a02403ed61 --- /dev/null +++ b/.changelog/37963.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_timestreaminfluxdb_db_instance +``` \ No newline at end of file diff --git a/internal/service/timestreaminfluxdb/db_instance.go b/internal/service/timestreaminfluxdb/db_instance.go index 2eb0825cd33..e8700f1e2b9 100644 --- a/internal/service/timestreaminfluxdb/db_instance.go +++ b/internal/service/timestreaminfluxdb/db_instance.go @@ -358,6 +358,8 @@ func (r *resourceDBInstance) Create(ctx context.Context, req resource.CreateRequ return } + in.Tags = getTagsIn(ctx) + out, err := conn.CreateDbInstance(ctx, &in) if err != nil { resp.Diagnostics.AddError( diff --git a/internal/service/timestreaminfluxdb/db_instance_test.go b/internal/service/timestreaminfluxdb/db_instance_test.go index d631dca6adc..89da100d0d7 100644 --- a/internal/service/timestreaminfluxdb/db_instance_test.go +++ b/internal/service/timestreaminfluxdb/db_instance_test.go @@ -211,9 +211,10 @@ func TestAccTimestreamInfluxDBDBInstance_deploymentTypeMultiAzStandby(t *testing ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{names.AttrBucket, names.AttrUsername, names.AttrPassword, "organization"}, }, }, }) From 40fd4b09d15f5bb7799b1212fa3d485ad9c502dc Mon Sep 17 00:00:00 2001 From: Adrian Johnson Date: Tue, 30 Jul 2024 14:21:17 -0500 Subject: [PATCH 16/21] make tests serial --- .../service/timestreaminfluxdb/db_instance.go | 1 + .../db_instance_tags_gen_test.go | 98 ++++++++++++------- .../timestreaminfluxdb/db_instance_test.go | 10 +- .../timestreaminfluxdb_test.go | 27 +++++ 4 files changed, 95 insertions(+), 41 deletions(-) create mode 100644 internal/service/timestreaminfluxdb/timestreaminfluxdb_test.go diff --git a/internal/service/timestreaminfluxdb/db_instance.go b/internal/service/timestreaminfluxdb/db_instance.go index e8700f1e2b9..9d08365fb5e 100644 --- a/internal/service/timestreaminfluxdb/db_instance.go +++ b/internal/service/timestreaminfluxdb/db_instance.go @@ -42,6 +42,7 @@ import ( // @FrameworkResource("aws_timestreaminfluxdb_db_instance", name="DB Instance") // @Tags(identifierAttribute="arn") // @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb;timestreaminfluxdb.GetDbInstanceOutput") +// @Testing(serialize=true) func newResourceDBInstance(_ context.Context) (resource.ResourceWithConfigure, error) { r := &resourceDBInstance{} diff --git a/internal/service/timestreaminfluxdb/db_instance_tags_gen_test.go b/internal/service/timestreaminfluxdb/db_instance_tags_gen_test.go index 8a8738e3e45..032fa99685b 100644 --- a/internal/service/timestreaminfluxdb/db_instance_tags_gen_test.go +++ b/internal/service/timestreaminfluxdb/db_instance_tags_gen_test.go @@ -17,13 +17,39 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -func TestAccTimestreamInfluxDBDBInstance_tags(t *testing.T) { +func testAccTimestreamInfluxDBDBInstance_tagsSerial(t *testing.T) { + t.Helper() + + testCases := map[string]func(t *testing.T){ + acctest.CtBasic: testAccTimestreamInfluxDBDBInstance_tags, + "null": testAccTimestreamInfluxDBDBInstance_tags_null, + "AddOnUpdate": testAccTimestreamInfluxDBDBInstance_tags_AddOnUpdate, + "EmptyTag_OnCreate": testAccTimestreamInfluxDBDBInstance_tags_EmptyTag_OnCreate, + "EmptyTag_OnUpdate_Add": testAccTimestreamInfluxDBDBInstance_tags_EmptyTag_OnUpdate_Add, + "EmptyTag_OnUpdate_Replace": testAccTimestreamInfluxDBDBInstance_tags_EmptyTag_OnUpdate_Replace, + "DefaultTags_providerOnly": testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_providerOnly, + "DefaultTags_nonOverlapping": testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_nonOverlapping, + "DefaultTags_overlapping": testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_overlapping, + "DefaultTags_updateToProviderOnly": testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_updateToProviderOnly, + "DefaultTags_updateToResourceOnly": testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_updateToResourceOnly, + "DefaultTags_emptyResourceTag": testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_emptyResourceTag, + "DefaultTags_nullOverlappingResourceTag": testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_nullOverlappingResourceTag, + "DefaultTags_nullNonOverlappingResourceTag": testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_nullNonOverlappingResourceTag, + "ComputedTag_OnCreate": testAccTimestreamInfluxDBDBInstance_tags_ComputedTag_OnCreate, + "ComputedTag_OnUpdate_Add": testAccTimestreamInfluxDBDBInstance_tags_ComputedTag_OnUpdate_Add, + "ComputedTag_OnUpdate_Replace": testAccTimestreamInfluxDBDBInstance_tags_ComputedTag_OnUpdate_Replace, + } + + acctest.RunSerialTests1Level(t, testCases, 0) +} + +func testAccTimestreamInfluxDBDBInstance_tags(t *testing.T) { ctx := acctest.Context(t) var v timestreaminfluxdb.GetDbInstanceOutput resourceName := "aws_timestreaminfluxdb_db_instance.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), CheckDestroy: testAccCheckDBInstanceDestroy(ctx), @@ -188,7 +214,7 @@ func TestAccTimestreamInfluxDBDBInstance_tags(t *testing.T) { }) } -func TestAccTimestreamInfluxDBDBInstance_tags_null(t *testing.T) { +func testAccTimestreamInfluxDBDBInstance_tags_null(t *testing.T) { t.Skip("Tags with null values are not correctly handled with the Plugin Framework") ctx := acctest.Context(t) @@ -196,7 +222,7 @@ func TestAccTimestreamInfluxDBDBInstance_tags_null(t *testing.T) { resourceName := "aws_timestreaminfluxdb_db_instance.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), CheckDestroy: testAccCheckDBInstanceDestroy(ctx), @@ -249,13 +275,13 @@ func TestAccTimestreamInfluxDBDBInstance_tags_null(t *testing.T) { }) } -func TestAccTimestreamInfluxDBDBInstance_tags_AddOnUpdate(t *testing.T) { +func testAccTimestreamInfluxDBDBInstance_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) var v timestreaminfluxdb.GetDbInstanceOutput resourceName := "aws_timestreaminfluxdb_db_instance.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), CheckDestroy: testAccCheckDBInstanceDestroy(ctx), @@ -325,13 +351,13 @@ func TestAccTimestreamInfluxDBDBInstance_tags_AddOnUpdate(t *testing.T) { }) } -func TestAccTimestreamInfluxDBDBInstance_tags_EmptyTag_OnCreate(t *testing.T) { +func testAccTimestreamInfluxDBDBInstance_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) var v timestreaminfluxdb.GetDbInstanceOutput resourceName := "aws_timestreaminfluxdb_db_instance.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), CheckDestroy: testAccCheckDBInstanceDestroy(ctx), @@ -411,13 +437,13 @@ func TestAccTimestreamInfluxDBDBInstance_tags_EmptyTag_OnCreate(t *testing.T) { }) } -func TestAccTimestreamInfluxDBDBInstance_tags_EmptyTag_OnUpdate_Add(t *testing.T) { +func testAccTimestreamInfluxDBDBInstance_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) var v timestreaminfluxdb.GetDbInstanceOutput resourceName := "aws_timestreaminfluxdb_db_instance.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), CheckDestroy: testAccCheckDBInstanceDestroy(ctx), @@ -540,13 +566,13 @@ func TestAccTimestreamInfluxDBDBInstance_tags_EmptyTag_OnUpdate_Add(t *testing.T }) } -func TestAccTimestreamInfluxDBDBInstance_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { +func testAccTimestreamInfluxDBDBInstance_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) var v timestreaminfluxdb.GetDbInstanceOutput resourceName := "aws_timestreaminfluxdb_db_instance.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), CheckDestroy: testAccCheckDBInstanceDestroy(ctx), @@ -624,13 +650,13 @@ func TestAccTimestreamInfluxDBDBInstance_tags_EmptyTag_OnUpdate_Replace(t *testi }) } -func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_providerOnly(t *testing.T) { +func testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) var v timestreaminfluxdb.GetDbInstanceOutput resourceName := "aws_timestreaminfluxdb_db_instance.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), CheckDestroy: testAccCheckDBInstanceDestroy(ctx), @@ -805,13 +831,13 @@ func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_providerOnly(t *testin }) } -func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_nonOverlapping(t *testing.T) { +func testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) var v timestreaminfluxdb.GetDbInstanceOutput resourceName := "aws_timestreaminfluxdb_db_instance.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), CheckDestroy: testAccCheckDBInstanceDestroy(ctx), @@ -965,13 +991,13 @@ func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_nonOverlapping(t *test }) } -func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_overlapping(t *testing.T) { +func testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) var v timestreaminfluxdb.GetDbInstanceOutput resourceName := "aws_timestreaminfluxdb_db_instance.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), CheckDestroy: testAccCheckDBInstanceDestroy(ctx), @@ -1141,13 +1167,13 @@ func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_overlapping(t *testing }) } -func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_updateToProviderOnly(t *testing.T) { +func testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) var v timestreaminfluxdb.GetDbInstanceOutput resourceName := "aws_timestreaminfluxdb_db_instance.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), CheckDestroy: testAccCheckDBInstanceDestroy(ctx), @@ -1231,13 +1257,13 @@ func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_updateToProviderOnly(t }) } -func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_updateToResourceOnly(t *testing.T) { +func testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) var v timestreaminfluxdb.GetDbInstanceOutput resourceName := "aws_timestreaminfluxdb_db_instance.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), CheckDestroy: testAccCheckDBInstanceDestroy(ctx), @@ -1320,13 +1346,13 @@ func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_updateToResourceOnly(t }) } -func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_emptyResourceTag(t *testing.T) { +func testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) var v timestreaminfluxdb.GetDbInstanceOutput resourceName := "aws_timestreaminfluxdb_db_instance.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), CheckDestroy: testAccCheckDBInstanceDestroy(ctx), @@ -1386,13 +1412,13 @@ func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_emptyResourceTag(t *te }) } -func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { +func testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) var v timestreaminfluxdb.GetDbInstanceOutput resourceName := "aws_timestreaminfluxdb_db_instance.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), CheckDestroy: testAccCheckDBInstanceDestroy(ctx), @@ -1444,7 +1470,7 @@ func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_emptyProviderOnlyTag(t }) } -func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { +func testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { t.Skip("Tags with null values are not correctly handled with the Plugin Framework") ctx := acctest.Context(t) @@ -1452,7 +1478,7 @@ func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_nullOverlappingResourc resourceName := "aws_timestreaminfluxdb_db_instance.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), CheckDestroy: testAccCheckDBInstanceDestroy(ctx), @@ -1508,7 +1534,7 @@ func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_nullOverlappingResourc }) } -func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { +func testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { t.Skip("Tags with null values are not correctly handled with the Plugin Framework") ctx := acctest.Context(t) @@ -1516,7 +1542,7 @@ func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_nullNonOverlappingReso resourceName := "aws_timestreaminfluxdb_db_instance.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), CheckDestroy: testAccCheckDBInstanceDestroy(ctx), @@ -1572,13 +1598,13 @@ func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_nullNonOverlappingReso }) } -func TestAccTimestreamInfluxDBDBInstance_tags_ComputedTag_OnCreate(t *testing.T) { +func testAccTimestreamInfluxDBDBInstance_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) var v timestreaminfluxdb.GetDbInstanceOutput resourceName := "aws_timestreaminfluxdb_db_instance.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), CheckDestroy: testAccCheckDBInstanceDestroy(ctx), @@ -1626,13 +1652,13 @@ func TestAccTimestreamInfluxDBDBInstance_tags_ComputedTag_OnCreate(t *testing.T) }) } -func TestAccTimestreamInfluxDBDBInstance_tags_ComputedTag_OnUpdate_Add(t *testing.T) { +func testAccTimestreamInfluxDBDBInstance_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) var v timestreaminfluxdb.GetDbInstanceOutput resourceName := "aws_timestreaminfluxdb_db_instance.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), CheckDestroy: testAccCheckDBInstanceDestroy(ctx), @@ -1716,13 +1742,13 @@ func TestAccTimestreamInfluxDBDBInstance_tags_ComputedTag_OnUpdate_Add(t *testin }) } -func TestAccTimestreamInfluxDBDBInstance_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { +func testAccTimestreamInfluxDBDBInstance_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) var v timestreaminfluxdb.GetDbInstanceOutput resourceName := "aws_timestreaminfluxdb_db_instance.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), CheckDestroy: testAccCheckDBInstanceDestroy(ctx), diff --git a/internal/service/timestreaminfluxdb/db_instance_test.go b/internal/service/timestreaminfluxdb/db_instance_test.go index 89da100d0d7..31e5f6ef681 100644 --- a/internal/service/timestreaminfluxdb/db_instance_test.go +++ b/internal/service/timestreaminfluxdb/db_instance_test.go @@ -24,7 +24,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -func TestAccTimestreamInfluxDBDBInstance_basic(t *testing.T) { +func testAccTimestreamInfluxDBDBInstance_basic(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { t.Skip("skipping long-running test in short mode") @@ -65,7 +65,7 @@ func TestAccTimestreamInfluxDBDBInstance_basic(t *testing.T) { }) } -func TestAccTimestreamInfluxDBDBInstance_disappears(t *testing.T) { +func testAccTimestreamInfluxDBDBInstance_disappears(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { t.Skip("skipping long-running test in short mode") @@ -96,7 +96,7 @@ func TestAccTimestreamInfluxDBDBInstance_disappears(t *testing.T) { }) } -func TestAccTimestreamInfluxDBDBInstance_logDeliveryConfiguration(t *testing.T) { +func testAccTimestreamInfluxDBDBInstance_logDeliveryConfiguration(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { t.Skip("skipping long-running test in short mode") @@ -144,7 +144,7 @@ func TestAccTimestreamInfluxDBDBInstance_logDeliveryConfiguration(t *testing.T) }) } -func TestAccTimestreamInfluxDBDBInstance_publiclyAccessible(t *testing.T) { +func testAccTimestreamInfluxDBDBInstance_publiclyAccessible(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { t.Skip("skipping long-running test in short mode") @@ -181,7 +181,7 @@ func TestAccTimestreamInfluxDBDBInstance_publiclyAccessible(t *testing.T) { }) } -func TestAccTimestreamInfluxDBDBInstance_deploymentTypeMultiAzStandby(t *testing.T) { +func testAccTimestreamInfluxDBDBInstance_deploymentTypeMultiAzStandby(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { t.Skip("skipping long-running test in short mode") diff --git a/internal/service/timestreaminfluxdb/timestreaminfluxdb_test.go b/internal/service/timestreaminfluxdb/timestreaminfluxdb_test.go new file mode 100644 index 00000000000..043548931b6 --- /dev/null +++ b/internal/service/timestreaminfluxdb/timestreaminfluxdb_test.go @@ -0,0 +1,27 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package timestreaminfluxdb_test + +import ( + "testing" + + "github.com/hashicorp/terraform-provider-aws/internal/acctest" +) + +func TestAccTimestreamInfluxDB_serial(t *testing.T) { + t.Parallel() + + testCases := map[string]map[string]func(t *testing.T){ + "DB Instance": { + acctest.CtBasic: testAccTimestreamInfluxDBDBInstance_basic, + "deploymentTypeMultiAZStandby": testAccTimestreamInfluxDBDBInstance_deploymentTypeMultiAzStandby, + acctest.CtDisappears: testAccTimestreamInfluxDBDBInstance_disappears, + "logDeliveryConfiguration": testAccTimestreamInfluxDBDBInstance_logDeliveryConfiguration, + "publiclyAccessible": testAccTimestreamInfluxDBDBInstance_publiclyAccessible, + "tags": testAccTimestreamInfluxDBDBInstance_tagsSerial, + }, + } + + acctest.RunSerialTests2Levels(t, testCases, 0) +} From 3320039938a91e9c012d0e081c94bcd77bded221 Mon Sep 17 00:00:00 2001 From: Adrian Johnson Date: Tue, 30 Jul 2024 20:35:49 -0500 Subject: [PATCH 17/21] make tests serial --- .../service/timestreaminfluxdb/db_instance.go | 42 +++++---- .../db_instance_tags_gen_test.go | 90 +++++++++++++++++++ .../timestreaminfluxdb/db_instance_test.go | 10 +-- 3 files changed, 118 insertions(+), 24 deletions(-) diff --git a/internal/service/timestreaminfluxdb/db_instance.go b/internal/service/timestreaminfluxdb/db_instance.go index 9d08365fb5e..2e1f0b7a3f6 100644 --- a/internal/service/timestreaminfluxdb/db_instance.go +++ b/internal/service/timestreaminfluxdb/db_instance.go @@ -43,6 +43,7 @@ import ( // @Tags(identifierAttribute="arn") // @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb;timestreaminfluxdb.GetDbInstanceOutput") // @Testing(serialize=true) +// @Testing(importIgnore="bucket;username;organization;password") func newResourceDBInstance(_ context.Context) (resource.ResourceWithConfigure, error) { r := &resourceDBInstance{} @@ -153,7 +154,10 @@ func (r *resourceDBInstance) Schema(ctx context.Context, req resource.SchemaRequ with a Multi-AZ standby for high availability.`, }, names.AttrEndpoint: schema.StringAttribute{ - Computed: true, + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, Description: `The endpoint used to connect to InfluxDB. The default InfluxDB port is 8086.`, }, names.AttrID: framework.IDAttribute(), @@ -398,6 +402,9 @@ func (r *resourceDBInstance) Create(ctx context.Context, req resource.CreateRequ return } + // flatten using legacy since this computed output may be null + state.SecondaryAvailabilityZone = flex.StringToFrameworkLegacy(ctx, output.SecondaryAvailabilityZone) + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) } @@ -410,7 +417,7 @@ func (r *resourceDBInstance) Read(ctx context.Context, req resource.ReadRequest, return } - out, err := findDBInstanceByID(ctx, conn, state.ID.ValueString()) + output, err := findDBInstanceByID(ctx, conn, state.ID.ValueString()) if tfresource.NotFound(err) { resp.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) @@ -426,12 +433,15 @@ func (r *resourceDBInstance) Read(ctx context.Context, req resource.ReadRequest, return } - resp.Diagnostics.Append(flex.Flatten(ctx, out, &state)...) + resp.Diagnostics.Append(flex.Flatten(ctx, output, &state)...) if resp.Diagnostics.HasError() { return } + // flatten using legacy since this computed output may be null + state.SecondaryAvailabilityZone = flex.StringToFrameworkLegacy(ctx, output.SecondaryAvailabilityZone) + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) } @@ -457,7 +467,7 @@ func (r *resourceDBInstance) Update(ctx context.Context, req resource.UpdateRequ return } - out, err := conn.UpdateDbInstance(ctx, &in) + _, err := conn.UpdateDbInstance(ctx, &in) if err != nil { resp.Diagnostics.AddError( create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionUpdating, ResNameDBInstance, plan.ID.String(), err), @@ -466,14 +476,6 @@ func (r *resourceDBInstance) Update(ctx context.Context, req resource.UpdateRequ return } - if out == nil || out.Id == nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionUpdating, ResNameDBInstance, plan.ID.String(), nil), - errors.New("empty output").Error(), - ) - return - } - updateTimeout := r.UpdateTimeout(ctx, plan.Timeouts) output, err := waitDBInstanceUpdated(ctx, conn, plan.ID.ValueString(), updateTimeout) if err != nil { @@ -489,6 +491,9 @@ func (r *resourceDBInstance) Update(ctx context.Context, req resource.UpdateRequ if resp.Diagnostics.HasError() { return } + + // flatten using legacy since this computed output may be null + plan.SecondaryAvailabilityZone = flex.StringToFrameworkLegacy(ctx, output.SecondaryAvailabilityZone) } resp.Diagnostics.Append(resp.State.Set(ctx, &plan)...) @@ -536,7 +541,7 @@ func (r *resourceDBInstance) ModifyPlan(ctx context.Context, request resource.Mo func waitDBInstanceCreated(ctx context.Context, conn *timestreaminfluxdb.Client, id string, timeout time.Duration) (*timestreaminfluxdb.GetDbInstanceOutput, error) { stateConf := &retry.StateChangeConf{ - Pending: enum.Slice(awstypes.StatusCreating, awstypes.StatusUpdating, awstypes.StatusModifying), + Pending: enum.Slice(awstypes.StatusCreating), Target: enum.Slice(awstypes.StatusAvailable), Refresh: statusDBInstance(ctx, conn, id), Timeout: timeout, @@ -572,12 +577,11 @@ func waitDBInstanceUpdated(ctx context.Context, conn *timestreaminfluxdb.Client, func waitDBInstanceDeleted(ctx context.Context, conn *timestreaminfluxdb.Client, id string, timeout time.Duration) (*timestreaminfluxdb.GetDbInstanceOutput, error) { stateConf := &retry.StateChangeConf{ - Pending: enum.Slice(awstypes.StatusDeleting, awstypes.StatusModifying, awstypes.StatusUpdating, awstypes.StatusAvailable), - Target: []string{}, - Refresh: statusDBInstance(ctx, conn, id), - Timeout: timeout, - Delay: 30 * time.Second, - PollInterval: 30 * time.Second, + Pending: enum.Slice(awstypes.StatusDeleting), + Target: []string{}, + Refresh: statusDBInstance(ctx, conn, id), + Timeout: timeout, + Delay: 30 * time.Second, } outputRaw, err := stateConf.WaitForStateContext(ctx) diff --git a/internal/service/timestreaminfluxdb/db_instance_tags_gen_test.go b/internal/service/timestreaminfluxdb/db_instance_tags_gen_test.go index 032fa99685b..975be1fbc15 100644 --- a/internal/service/timestreaminfluxdb/db_instance_tags_gen_test.go +++ b/internal/service/timestreaminfluxdb/db_instance_tags_gen_test.go @@ -94,6 +94,9 @@ func testAccTimestreamInfluxDBDBInstance_tags(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, }, { ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags/"), @@ -139,6 +142,9 @@ func testAccTimestreamInfluxDBDBInstance_tags(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, }, { ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags/"), @@ -179,6 +185,9 @@ func testAccTimestreamInfluxDBDBInstance_tags(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, }, { ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags/"), @@ -209,6 +218,9 @@ func testAccTimestreamInfluxDBDBInstance_tags(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, }, }, }) @@ -261,6 +273,9 @@ func testAccTimestreamInfluxDBDBInstance_tags_null(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, }, { ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags/"), @@ -346,6 +361,9 @@ func testAccTimestreamInfluxDBDBInstance_tags_AddOnUpdate(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, }, }, }) @@ -402,6 +420,9 @@ func testAccTimestreamInfluxDBDBInstance_tags_EmptyTag_OnCreate(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, }, { ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags/"), @@ -432,6 +453,9 @@ func testAccTimestreamInfluxDBDBInstance_tags_EmptyTag_OnCreate(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, }, }, }) @@ -521,6 +545,9 @@ func testAccTimestreamInfluxDBDBInstance_tags_EmptyTag_OnUpdate_Add(t *testing.T ResourceName: resourceName, ImportState: true, ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, }, { ConfigDirectory: config.StaticDirectory("testdata/DBInstance/tags/"), @@ -561,6 +588,9 @@ func testAccTimestreamInfluxDBDBInstance_tags_EmptyTag_OnUpdate_Add(t *testing.T ResourceName: resourceName, ImportState: true, ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, }, }, }) @@ -645,6 +675,9 @@ func testAccTimestreamInfluxDBDBInstance_tags_EmptyTag_OnUpdate_Replace(t *testi ResourceName: resourceName, ImportState: true, ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, }, }, }) @@ -703,6 +736,9 @@ func testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_providerOnly(t *testin ResourceName: resourceName, ImportState: true, ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, }, { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -750,6 +786,9 @@ func testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_providerOnly(t *testin ResourceName: resourceName, ImportState: true, ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, }, { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -793,6 +832,9 @@ func testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_providerOnly(t *testin ResourceName: resourceName, ImportState: true, ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, }, { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -826,6 +868,9 @@ func testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_providerOnly(t *testin ResourceName: resourceName, ImportState: true, ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, }, }, }) @@ -894,6 +939,9 @@ func testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_nonOverlapping(t *test ResourceName: resourceName, ImportState: true, ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, }, { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -953,6 +1001,9 @@ func testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_nonOverlapping(t *test ResourceName: resourceName, ImportState: true, ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, }, { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -986,6 +1037,9 @@ func testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_nonOverlapping(t *test ResourceName: resourceName, ImportState: true, ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, }, }, }) @@ -1052,6 +1106,9 @@ func testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_overlapping(t *testing ResourceName: resourceName, ImportState: true, ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, }, { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -1111,6 +1168,9 @@ func testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_overlapping(t *testing ResourceName: resourceName, ImportState: true, ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, }, { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -1162,6 +1222,9 @@ func testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_overlapping(t *testing ResourceName: resourceName, ImportState: true, ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, }, }, }) @@ -1252,6 +1315,9 @@ func testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_updateToProviderOnly(t ResourceName: resourceName, ImportState: true, ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, }, }, }) @@ -1341,6 +1407,9 @@ func testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_updateToResourceOnly(t ResourceName: resourceName, ImportState: true, ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, }, }, }) @@ -1407,6 +1476,9 @@ func testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_emptyResourceTag(t *te ResourceName: resourceName, ImportState: true, ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, }, }, }) @@ -1465,6 +1537,9 @@ func testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_emptyProviderOnlyTag(t ResourceName: resourceName, ImportState: true, ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, }, }, }) @@ -1529,6 +1604,9 @@ func testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_nullOverlappingResourc ResourceName: resourceName, ImportState: true, ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, }, }, }) @@ -1593,6 +1671,9 @@ func testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_nullNonOverlappingReso ResourceName: resourceName, ImportState: true, ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, }, }, }) @@ -1647,6 +1728,9 @@ func testAccTimestreamInfluxDBDBInstance_tags_ComputedTag_OnCreate(t *testing.T) ResourceName: resourceName, ImportState: true, ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, }, }, }) @@ -1737,6 +1821,9 @@ func testAccTimestreamInfluxDBDBInstance_tags_ComputedTag_OnUpdate_Add(t *testin ResourceName: resourceName, ImportState: true, ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, }, }, }) @@ -1820,6 +1907,9 @@ func testAccTimestreamInfluxDBDBInstance_tags_ComputedTag_OnUpdate_Replace(t *te ResourceName: resourceName, ImportState: true, ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, }, }, }) diff --git a/internal/service/timestreaminfluxdb/db_instance_test.go b/internal/service/timestreaminfluxdb/db_instance_test.go index 31e5f6ef681..71501859bfe 100644 --- a/internal/service/timestreaminfluxdb/db_instance_test.go +++ b/internal/service/timestreaminfluxdb/db_instance_test.go @@ -34,7 +34,7 @@ func testAccTimestreamInfluxDBDBInstance_basic(t *testing.T) { rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_timestreaminfluxdb_db_instance.test" - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheck(ctx, t) @@ -75,7 +75,7 @@ func testAccTimestreamInfluxDBDBInstance_disappears(t *testing.T) { rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_timestreaminfluxdb_db_instance.test" - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheck(ctx, t) @@ -106,7 +106,7 @@ func testAccTimestreamInfluxDBDBInstance_logDeliveryConfiguration(t *testing.T) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_timestreaminfluxdb_db_instance.test" - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheck(ctx, t) @@ -154,7 +154,7 @@ func testAccTimestreamInfluxDBDBInstance_publiclyAccessible(t *testing.T) { rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_timestreaminfluxdb_db_instance.test" - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheck(ctx, t) @@ -191,7 +191,7 @@ func testAccTimestreamInfluxDBDBInstance_deploymentTypeMultiAzStandby(t *testing rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_timestreaminfluxdb_db_instance.test" - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheck(ctx, t) From 3d7e51e5372119b3396250abca38a19b4f0f1a25 Mon Sep 17 00:00:00 2001 From: Adrian Johnson Date: Wed, 31 Jul 2024 11:24:37 -0500 Subject: [PATCH 18/21] make tests parallel --- .../service/timestreaminfluxdb/db_instance.go | 1 - .../db_instance_tags_gen_test.go | 98 +++++++------------ .../timestreaminfluxdb/db_instance_test.go | 20 ++-- .../timestreaminfluxdb_test.go | 38 +++---- ...mestreaminfluxdb_db_instance.html.markdown | 96 +++++++++--------- 5 files changed, 106 insertions(+), 147 deletions(-) diff --git a/internal/service/timestreaminfluxdb/db_instance.go b/internal/service/timestreaminfluxdb/db_instance.go index 2e1f0b7a3f6..0a4a6d819da 100644 --- a/internal/service/timestreaminfluxdb/db_instance.go +++ b/internal/service/timestreaminfluxdb/db_instance.go @@ -42,7 +42,6 @@ import ( // @FrameworkResource("aws_timestreaminfluxdb_db_instance", name="DB Instance") // @Tags(identifierAttribute="arn") // @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb;timestreaminfluxdb.GetDbInstanceOutput") -// @Testing(serialize=true) // @Testing(importIgnore="bucket;username;organization;password") func newResourceDBInstance(_ context.Context) (resource.ResourceWithConfigure, error) { r := &resourceDBInstance{} diff --git a/internal/service/timestreaminfluxdb/db_instance_tags_gen_test.go b/internal/service/timestreaminfluxdb/db_instance_tags_gen_test.go index 975be1fbc15..8101734c4f1 100644 --- a/internal/service/timestreaminfluxdb/db_instance_tags_gen_test.go +++ b/internal/service/timestreaminfluxdb/db_instance_tags_gen_test.go @@ -17,39 +17,13 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -func testAccTimestreamInfluxDBDBInstance_tagsSerial(t *testing.T) { - t.Helper() - - testCases := map[string]func(t *testing.T){ - acctest.CtBasic: testAccTimestreamInfluxDBDBInstance_tags, - "null": testAccTimestreamInfluxDBDBInstance_tags_null, - "AddOnUpdate": testAccTimestreamInfluxDBDBInstance_tags_AddOnUpdate, - "EmptyTag_OnCreate": testAccTimestreamInfluxDBDBInstance_tags_EmptyTag_OnCreate, - "EmptyTag_OnUpdate_Add": testAccTimestreamInfluxDBDBInstance_tags_EmptyTag_OnUpdate_Add, - "EmptyTag_OnUpdate_Replace": testAccTimestreamInfluxDBDBInstance_tags_EmptyTag_OnUpdate_Replace, - "DefaultTags_providerOnly": testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_providerOnly, - "DefaultTags_nonOverlapping": testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_nonOverlapping, - "DefaultTags_overlapping": testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_overlapping, - "DefaultTags_updateToProviderOnly": testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_updateToProviderOnly, - "DefaultTags_updateToResourceOnly": testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_updateToResourceOnly, - "DefaultTags_emptyResourceTag": testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_emptyResourceTag, - "DefaultTags_nullOverlappingResourceTag": testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_nullOverlappingResourceTag, - "DefaultTags_nullNonOverlappingResourceTag": testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_nullNonOverlappingResourceTag, - "ComputedTag_OnCreate": testAccTimestreamInfluxDBDBInstance_tags_ComputedTag_OnCreate, - "ComputedTag_OnUpdate_Add": testAccTimestreamInfluxDBDBInstance_tags_ComputedTag_OnUpdate_Add, - "ComputedTag_OnUpdate_Replace": testAccTimestreamInfluxDBDBInstance_tags_ComputedTag_OnUpdate_Replace, - } - - acctest.RunSerialTests1Level(t, testCases, 0) -} - -func testAccTimestreamInfluxDBDBInstance_tags(t *testing.T) { +func TestAccTimestreamInfluxDBDBInstance_tags(t *testing.T) { ctx := acctest.Context(t) var v timestreaminfluxdb.GetDbInstanceOutput resourceName := "aws_timestreaminfluxdb_db_instance.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), CheckDestroy: testAccCheckDBInstanceDestroy(ctx), @@ -226,7 +200,7 @@ func testAccTimestreamInfluxDBDBInstance_tags(t *testing.T) { }) } -func testAccTimestreamInfluxDBDBInstance_tags_null(t *testing.T) { +func TestAccTimestreamInfluxDBDBInstance_tags_null(t *testing.T) { t.Skip("Tags with null values are not correctly handled with the Plugin Framework") ctx := acctest.Context(t) @@ -234,7 +208,7 @@ func testAccTimestreamInfluxDBDBInstance_tags_null(t *testing.T) { resourceName := "aws_timestreaminfluxdb_db_instance.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), CheckDestroy: testAccCheckDBInstanceDestroy(ctx), @@ -290,13 +264,13 @@ func testAccTimestreamInfluxDBDBInstance_tags_null(t *testing.T) { }) } -func testAccTimestreamInfluxDBDBInstance_tags_AddOnUpdate(t *testing.T) { +func TestAccTimestreamInfluxDBDBInstance_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) var v timestreaminfluxdb.GetDbInstanceOutput resourceName := "aws_timestreaminfluxdb_db_instance.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), CheckDestroy: testAccCheckDBInstanceDestroy(ctx), @@ -369,13 +343,13 @@ func testAccTimestreamInfluxDBDBInstance_tags_AddOnUpdate(t *testing.T) { }) } -func testAccTimestreamInfluxDBDBInstance_tags_EmptyTag_OnCreate(t *testing.T) { +func TestAccTimestreamInfluxDBDBInstance_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) var v timestreaminfluxdb.GetDbInstanceOutput resourceName := "aws_timestreaminfluxdb_db_instance.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), CheckDestroy: testAccCheckDBInstanceDestroy(ctx), @@ -461,13 +435,13 @@ func testAccTimestreamInfluxDBDBInstance_tags_EmptyTag_OnCreate(t *testing.T) { }) } -func testAccTimestreamInfluxDBDBInstance_tags_EmptyTag_OnUpdate_Add(t *testing.T) { +func TestAccTimestreamInfluxDBDBInstance_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) var v timestreaminfluxdb.GetDbInstanceOutput resourceName := "aws_timestreaminfluxdb_db_instance.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), CheckDestroy: testAccCheckDBInstanceDestroy(ctx), @@ -596,13 +570,13 @@ func testAccTimestreamInfluxDBDBInstance_tags_EmptyTag_OnUpdate_Add(t *testing.T }) } -func testAccTimestreamInfluxDBDBInstance_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { +func TestAccTimestreamInfluxDBDBInstance_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) var v timestreaminfluxdb.GetDbInstanceOutput resourceName := "aws_timestreaminfluxdb_db_instance.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), CheckDestroy: testAccCheckDBInstanceDestroy(ctx), @@ -683,13 +657,13 @@ func testAccTimestreamInfluxDBDBInstance_tags_EmptyTag_OnUpdate_Replace(t *testi }) } -func testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_providerOnly(t *testing.T) { +func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) var v timestreaminfluxdb.GetDbInstanceOutput resourceName := "aws_timestreaminfluxdb_db_instance.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), CheckDestroy: testAccCheckDBInstanceDestroy(ctx), @@ -876,13 +850,13 @@ func testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_providerOnly(t *testin }) } -func testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_nonOverlapping(t *testing.T) { +func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) var v timestreaminfluxdb.GetDbInstanceOutput resourceName := "aws_timestreaminfluxdb_db_instance.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), CheckDestroy: testAccCheckDBInstanceDestroy(ctx), @@ -1045,13 +1019,13 @@ func testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_nonOverlapping(t *test }) } -func testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_overlapping(t *testing.T) { +func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) var v timestreaminfluxdb.GetDbInstanceOutput resourceName := "aws_timestreaminfluxdb_db_instance.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), CheckDestroy: testAccCheckDBInstanceDestroy(ctx), @@ -1230,13 +1204,13 @@ func testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_overlapping(t *testing }) } -func testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_updateToProviderOnly(t *testing.T) { +func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) var v timestreaminfluxdb.GetDbInstanceOutput resourceName := "aws_timestreaminfluxdb_db_instance.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), CheckDestroy: testAccCheckDBInstanceDestroy(ctx), @@ -1323,13 +1297,13 @@ func testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_updateToProviderOnly(t }) } -func testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_updateToResourceOnly(t *testing.T) { +func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) var v timestreaminfluxdb.GetDbInstanceOutput resourceName := "aws_timestreaminfluxdb_db_instance.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), CheckDestroy: testAccCheckDBInstanceDestroy(ctx), @@ -1415,13 +1389,13 @@ func testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_updateToResourceOnly(t }) } -func testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_emptyResourceTag(t *testing.T) { +func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) var v timestreaminfluxdb.GetDbInstanceOutput resourceName := "aws_timestreaminfluxdb_db_instance.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), CheckDestroy: testAccCheckDBInstanceDestroy(ctx), @@ -1484,13 +1458,13 @@ func testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_emptyResourceTag(t *te }) } -func testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { +func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) var v timestreaminfluxdb.GetDbInstanceOutput resourceName := "aws_timestreaminfluxdb_db_instance.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), CheckDestroy: testAccCheckDBInstanceDestroy(ctx), @@ -1545,7 +1519,7 @@ func testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_emptyProviderOnlyTag(t }) } -func testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { +func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { t.Skip("Tags with null values are not correctly handled with the Plugin Framework") ctx := acctest.Context(t) @@ -1553,7 +1527,7 @@ func testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_nullOverlappingResourc resourceName := "aws_timestreaminfluxdb_db_instance.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), CheckDestroy: testAccCheckDBInstanceDestroy(ctx), @@ -1612,7 +1586,7 @@ func testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_nullOverlappingResourc }) } -func testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { +func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { t.Skip("Tags with null values are not correctly handled with the Plugin Framework") ctx := acctest.Context(t) @@ -1620,7 +1594,7 @@ func testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_nullNonOverlappingReso resourceName := "aws_timestreaminfluxdb_db_instance.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), CheckDestroy: testAccCheckDBInstanceDestroy(ctx), @@ -1679,13 +1653,13 @@ func testAccTimestreamInfluxDBDBInstance_tags_DefaultTags_nullNonOverlappingReso }) } -func testAccTimestreamInfluxDBDBInstance_tags_ComputedTag_OnCreate(t *testing.T) { +func TestAccTimestreamInfluxDBDBInstance_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) var v timestreaminfluxdb.GetDbInstanceOutput resourceName := "aws_timestreaminfluxdb_db_instance.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), CheckDestroy: testAccCheckDBInstanceDestroy(ctx), @@ -1736,13 +1710,13 @@ func testAccTimestreamInfluxDBDBInstance_tags_ComputedTag_OnCreate(t *testing.T) }) } -func testAccTimestreamInfluxDBDBInstance_tags_ComputedTag_OnUpdate_Add(t *testing.T) { +func TestAccTimestreamInfluxDBDBInstance_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) var v timestreaminfluxdb.GetDbInstanceOutput resourceName := "aws_timestreaminfluxdb_db_instance.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), CheckDestroy: testAccCheckDBInstanceDestroy(ctx), @@ -1829,13 +1803,13 @@ func testAccTimestreamInfluxDBDBInstance_tags_ComputedTag_OnUpdate_Add(t *testin }) } -func testAccTimestreamInfluxDBDBInstance_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { +func TestAccTimestreamInfluxDBDBInstance_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) var v timestreaminfluxdb.GetDbInstanceOutput resourceName := "aws_timestreaminfluxdb_db_instance.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), CheckDestroy: testAccCheckDBInstanceDestroy(ctx), diff --git a/internal/service/timestreaminfluxdb/db_instance_test.go b/internal/service/timestreaminfluxdb/db_instance_test.go index 71501859bfe..89da100d0d7 100644 --- a/internal/service/timestreaminfluxdb/db_instance_test.go +++ b/internal/service/timestreaminfluxdb/db_instance_test.go @@ -24,7 +24,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -func testAccTimestreamInfluxDBDBInstance_basic(t *testing.T) { +func TestAccTimestreamInfluxDBDBInstance_basic(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { t.Skip("skipping long-running test in short mode") @@ -34,7 +34,7 @@ func testAccTimestreamInfluxDBDBInstance_basic(t *testing.T) { rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_timestreaminfluxdb_db_instance.test" - resource.Test(t, resource.TestCase{ + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheck(ctx, t) @@ -65,7 +65,7 @@ func testAccTimestreamInfluxDBDBInstance_basic(t *testing.T) { }) } -func testAccTimestreamInfluxDBDBInstance_disappears(t *testing.T) { +func TestAccTimestreamInfluxDBDBInstance_disappears(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { t.Skip("skipping long-running test in short mode") @@ -75,7 +75,7 @@ func testAccTimestreamInfluxDBDBInstance_disappears(t *testing.T) { rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_timestreaminfluxdb_db_instance.test" - resource.Test(t, resource.TestCase{ + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheck(ctx, t) @@ -96,7 +96,7 @@ func testAccTimestreamInfluxDBDBInstance_disappears(t *testing.T) { }) } -func testAccTimestreamInfluxDBDBInstance_logDeliveryConfiguration(t *testing.T) { +func TestAccTimestreamInfluxDBDBInstance_logDeliveryConfiguration(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { t.Skip("skipping long-running test in short mode") @@ -106,7 +106,7 @@ func testAccTimestreamInfluxDBDBInstance_logDeliveryConfiguration(t *testing.T) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_timestreaminfluxdb_db_instance.test" - resource.Test(t, resource.TestCase{ + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheck(ctx, t) @@ -144,7 +144,7 @@ func testAccTimestreamInfluxDBDBInstance_logDeliveryConfiguration(t *testing.T) }) } -func testAccTimestreamInfluxDBDBInstance_publiclyAccessible(t *testing.T) { +func TestAccTimestreamInfluxDBDBInstance_publiclyAccessible(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { t.Skip("skipping long-running test in short mode") @@ -154,7 +154,7 @@ func testAccTimestreamInfluxDBDBInstance_publiclyAccessible(t *testing.T) { rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_timestreaminfluxdb_db_instance.test" - resource.Test(t, resource.TestCase{ + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheck(ctx, t) @@ -181,7 +181,7 @@ func testAccTimestreamInfluxDBDBInstance_publiclyAccessible(t *testing.T) { }) } -func testAccTimestreamInfluxDBDBInstance_deploymentTypeMultiAzStandby(t *testing.T) { +func TestAccTimestreamInfluxDBDBInstance_deploymentTypeMultiAzStandby(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { t.Skip("skipping long-running test in short mode") @@ -191,7 +191,7 @@ func testAccTimestreamInfluxDBDBInstance_deploymentTypeMultiAzStandby(t *testing rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_timestreaminfluxdb_db_instance.test" - resource.Test(t, resource.TestCase{ + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheck(ctx, t) diff --git a/internal/service/timestreaminfluxdb/timestreaminfluxdb_test.go b/internal/service/timestreaminfluxdb/timestreaminfluxdb_test.go index 043548931b6..32b795875b8 100644 --- a/internal/service/timestreaminfluxdb/timestreaminfluxdb_test.go +++ b/internal/service/timestreaminfluxdb/timestreaminfluxdb_test.go @@ -3,25 +3,19 @@ package timestreaminfluxdb_test -import ( - "testing" - - "github.com/hashicorp/terraform-provider-aws/internal/acctest" -) - -func TestAccTimestreamInfluxDB_serial(t *testing.T) { - t.Parallel() - - testCases := map[string]map[string]func(t *testing.T){ - "DB Instance": { - acctest.CtBasic: testAccTimestreamInfluxDBDBInstance_basic, - "deploymentTypeMultiAZStandby": testAccTimestreamInfluxDBDBInstance_deploymentTypeMultiAzStandby, - acctest.CtDisappears: testAccTimestreamInfluxDBDBInstance_disappears, - "logDeliveryConfiguration": testAccTimestreamInfluxDBDBInstance_logDeliveryConfiguration, - "publiclyAccessible": testAccTimestreamInfluxDBDBInstance_publiclyAccessible, - "tags": testAccTimestreamInfluxDBDBInstance_tagsSerial, - }, - } - - acctest.RunSerialTests2Levels(t, testCases, 0) -} +//func TestAccTimestreamInfluxDB_serial(t *testing.T) { +// t.Parallel() +// +// testCases := map[string]map[string]func(t *testing.T){ +// "DB Instance": { +// acctest.CtBasic: testAccTimestreamInfluxDBDBInstance_basic, +// "deploymentTypeMultiAZStandby": testAccTimestreamInfluxDBDBInstance_deploymentTypeMultiAzStandby, +// acctest.CtDisappears: testAccTimestreamInfluxDBDBInstance_disappears, +// "logDeliveryConfiguration": testAccTimestreamInfluxDBDBInstance_logDeliveryConfiguration, +// "publiclyAccessible": testAccTimestreamInfluxDBDBInstance_publiclyAccessible, +// "tags": testAccTimestreamInfluxDBDBInstance_tagsSerial, +// }, +// } +// +// acctest.RunSerialTests2Levels(t, testCases, 0) +//} diff --git a/website/docs/r/timestreaminfluxdb_db_instance.html.markdown b/website/docs/r/timestreaminfluxdb_db_instance.html.markdown index d5e21b6881a..0aa9cbf3ffa 100644 --- a/website/docs/r/timestreaminfluxdb_db_instance.html.markdown +++ b/website/docs/r/timestreaminfluxdb_db_instance.html.markdown @@ -5,14 +5,6 @@ page_title: "AWS: aws_timestreaminfluxdb_db_instance" description: |- Terraform resource for managing an Amazon Timestream for InfluxDB Db Instance. --- -` # Resource: aws_timestreaminfluxdb_db_instance @@ -29,8 +21,8 @@ resource "aws_timestreaminfluxdb_db_instance" "example" { db_instance_type = "db.influx.medium" username = "admin" password = "example-password" - vpc_subnet_ids = [aws_subnet.test_subnet.id] - vpc_security_group_ids = [aws_security_group.example_security_group.id] + vpc_subnet_ids = [aws_subnet.exampleid] + vpc_security_group_ids = [aws_security_group.example.id] name = "example-db-instance" } ``` @@ -40,18 +32,18 @@ resource "aws_timestreaminfluxdb_db_instance" "example" { All Timestream for InfluxDB instances require a VPC, subnet, and security group. The following example shows how these prerequisite resources can be created and used with `aws_timestreaminfluxdb_db_instance`. ```terraform -resource "aws_vpc" "example_vpc" { +resource "aws_vpc" "example" { cidr_block = "10.0.0.0/16" } -resource "aws_subnet" "example_subnet" { - vpc_id = aws_vpc.example_vpc.id +resource "aws_subnet" "example" { + vpc_id = aws_vpc.example.id cidr_block = "10.0.1.0/24" } -resource "aws_security_group" "example_security_group" { - name = "example_security_group" - vpc_id = aws_vpc.example_vpc.id +resource "aws_security_group" "example" { + name = "example" + vpc_id = aws_vpc.example.id } resource "aws_timestreaminfluxdb_db_instance" "example" { @@ -60,8 +52,8 @@ resource "aws_timestreaminfluxdb_db_instance" "example" { db_instance_type = "db.influx.medium" username = "admin" password = "example-password" - vpc_subnet_ids = [aws_subnet.example_subnet.id] - vpc_security_group_ids = [aws_security_group.example_security_group.id] + vpc_subnet_ids = [aws_subnet.example.id] + vpc_security_group_ids = [aws_security_group.example.id] name = "example-db-instance" } ``` @@ -71,47 +63,47 @@ resource "aws_timestreaminfluxdb_db_instance" "example" { The following configuration shows how to define the necessary resources and arguments to allow public internet access on your Timestream for InfluxDB instance's endpoint on port `8086`. After applying this configuration, the instance's InfluxDB UI can be accessed by visiting your instance's endpoint at port `8086`. ```terraform -resource "aws_vpc" "example_vpc" { +resource "aws_vpc" "example" { cidr_block = "10.0.0.0/16" } -resource "aws_subnet" "example_subnet" { - vpc_id = aws_vpc.example_vpc.id +resource "aws_subnet" "example" { + vpc_id = aws_vpc.example.id cidr_block = "10.0.1.0/24" } -resource "aws_security_group" "example_security_group" { - name = "example_security_group" - vpc_id = aws_vpc.example_vpc.id +resource "aws_security_group" "example" { + name = "example" + vpc_id = aws_vpc.example.id } -resource "aws_internet_gateway" "test_internet_gateway" { - vpc_id = aws_vpc.test_vpc.id +resource "aws_internet_gateway" "example" { + vpc_id = aws_vpc.example.id tags = { - Name = "test_internet_gateway" + Name = "example" } } resource "aws_route" "test_route" { - route_table_id = aws_vpc.test_vpc.main_route_table_id + route_table_id = aws_vpc.example.main_route_table_id destination_cidr_block = "0.0.0.0/0" - gateway_id = aws_internet_gateway.test_internet_gateway.id + gateway_id = aws_internet_gateway.example.id } resource "aws_route_table_association" "test_route_table_association" { subnet_id = aws_subnet.test_subnet.id - route_table_id = aws_vpc.test_vpc.main_route_table_id + route_table_id = aws_vpc.example.main_route_table_id } -resource "aws_vpc_security_group_ingress_rule" "test_vpc_security_group_ingress_rule_vpc" { - security_group_id = aws_security_group.test_security_group.id - referenced_security_group_id = aws_security_group.test_security_group.id +resource "aws_vpc_security_group_ingress_rule" "example" { + security_group_id = aws_security_group.example.id + referenced_security_group_id = aws_security_group.example.id ip_protocol = -1 } -resource "aws_vpc_security_group_ingress_rule" "test_vpc_security_group_ingress_rule_influxdb" { - security_group_id = aws_security_group.test_security_group.id +resource "aws_vpc_security_group_ingress_rule" "example" { + security_group_id = aws_security_group.example.id cidr_ipv4 = "0.0.0.0/0" ip_protocol = "tcp" from_port = 8086 @@ -124,8 +116,8 @@ resource "aws_timestreaminfluxdb_db_instance" "example" { db_instance_type = "db.influx.medium" username = "admin" password = "example-password" - vpc_subnet_ids = [aws_subnet.example_subnet.id] - vpc_security_group_ids = [aws_security_group.example_security_group.id] + vpc_subnet_ids = [aws_subnet.example.id] + vpc_security_group_ids = [aws_security_group.example.id] name = "example-db-instance" publicly_accessible = true # False by default } @@ -136,11 +128,11 @@ resource "aws_timestreaminfluxdb_db_instance" "example" { You can use an S3 bucket to store logs generated by your Timestream for InfluxDB instance. The following example shows what resources and arguments are required to configure an S3 bucket for logging, including the IAM policy that needs to be set in order to allow Timestream for InfluxDB to place logs in your S3 bucket. The configuration of the required VPC, security group, and subnet have been left out of the example for brevity. ```terraform -resource "aws_s3_bucket" "example_s3_bucket" { +resource "aws_s3_bucket" "example" { bucket = "example-s3-bucket" } -data "aws_iam_policy_document" "allow_timestreaminfluxdb_policy_document" { +data "aws_iam_policy_document" "example" { statement { actions = ["s3:PutObject"] principals { @@ -148,14 +140,14 @@ data "aws_iam_policy_document" "allow_timestreaminfluxdb_policy_document" { identifiers = ["timestream-influxdb.amazonaws.com"] } resources = [ - "${aws_s3_bucket.example_s3_bucket.arn}/*" + "${aws_s3_bucket.example.arn}/*" ] } } -resource "aws_s3_bucket_policy" "allow_timestreaminfluxdb_policy" { - bucket = aws_s3_bucket.example_s3_bucket.id - policy = data.aws_iam_policy_document.allow_timestreaminfluxdb_policy_document.json +resource "aws_s3_bucket_policy" "example" { + bucket = aws_s3_bucket.example.id + policy = data.aws_iam_policy_document.example.json } resource "aws_timestreaminfluxdb_db_instance" "example" { @@ -164,13 +156,13 @@ resource "aws_timestreaminfluxdb_db_instance" "example" { db_instance_type = "db.influx.medium" username = "admin" password = "example-password" - vpc_subnet_ids = [aws_subnet.example_subnet.id] - vpc_security_group_ids = [aws_security_group.example_security_group.id] + vpc_subnet_ids = [aws_subnet.example.id] + vpc_security_group_ids = [aws_security_group.example.id] name = "example-db-instance" log_delivery_configuration { s3_configuration { - bucket_name = aws_s3_bucket.example_s3_bucket.name + bucket_name = aws_s3_bucket.example.name enabled = true } } @@ -182,14 +174,14 @@ resource "aws_timestreaminfluxdb_db_instance" "example" { To use multi-region availability, at least two subnets must be created in different availability zones and used with your Timestream for InfluxDB instance. ```terraform -resource "aws_subnet" "example_subnet_1" { - vpc_id = aws_vpc.example_vpc.id +resource "aws_subnet" "example_1" { + vpc_id = aws_vpc.example.id cidr_block = "10.0.1.0/24" availability_zone = "us-west-2a" } -resource "aws_subnet" "example_subnet_2" { - vpc_id = aws_vpc.example_vpc.id +resource "aws_subnet" "example_2" { + vpc_id = aws_vpc.example.id cidr_block = "10.0.2.0/24" availability_zone = "us-west-2b" } @@ -201,8 +193,8 @@ resource "aws_timestreaminfluxdb_db_instance" "example" { deployment_type = "WITH_MULTIAZ_STANDBY" username = "admin" password = "example-password" - vpc_subnet_ids = [aws_subnet.example_subnet_1.id, aws_subnet.example_subnet_2.id] - vpc_security_group_ids = [aws_security_group.example_security_group.id] + vpc_subnet_ids = [aws_subnet.example_1.id, aws_subnet.example_2.id] + vpc_security_group_ids = [aws_security_group.example.id] name = "example-db-instance" } ``` From 0855b2b7a8bea7cfb174f0fc8e483e6ecf44cfa6 Mon Sep 17 00:00:00 2001 From: Adrian Johnson Date: Wed, 31 Jul 2024 12:25:31 -0500 Subject: [PATCH 19/21] update documentation --- .../timestreaminfluxdb_test.go | 21 ------------------- ...mestreaminfluxdb_db_instance.html.markdown | 12 +++++++---- 2 files changed, 8 insertions(+), 25 deletions(-) delete mode 100644 internal/service/timestreaminfluxdb/timestreaminfluxdb_test.go diff --git a/internal/service/timestreaminfluxdb/timestreaminfluxdb_test.go b/internal/service/timestreaminfluxdb/timestreaminfluxdb_test.go deleted file mode 100644 index 32b795875b8..00000000000 --- a/internal/service/timestreaminfluxdb/timestreaminfluxdb_test.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package timestreaminfluxdb_test - -//func TestAccTimestreamInfluxDB_serial(t *testing.T) { -// t.Parallel() -// -// testCases := map[string]map[string]func(t *testing.T){ -// "DB Instance": { -// acctest.CtBasic: testAccTimestreamInfluxDBDBInstance_basic, -// "deploymentTypeMultiAZStandby": testAccTimestreamInfluxDBDBInstance_deploymentTypeMultiAzStandby, -// acctest.CtDisappears: testAccTimestreamInfluxDBDBInstance_disappears, -// "logDeliveryConfiguration": testAccTimestreamInfluxDBDBInstance_logDeliveryConfiguration, -// "publiclyAccessible": testAccTimestreamInfluxDBDBInstance_publiclyAccessible, -// "tags": testAccTimestreamInfluxDBDBInstance_tagsSerial, -// }, -// } -// -// acctest.RunSerialTests2Levels(t, testCases, 0) -//} diff --git a/website/docs/r/timestreaminfluxdb_db_instance.html.markdown b/website/docs/r/timestreaminfluxdb_db_instance.html.markdown index 0aa9cbf3ffa..00ecde030b6 100644 --- a/website/docs/r/timestreaminfluxdb_db_instance.html.markdown +++ b/website/docs/r/timestreaminfluxdb_db_instance.html.markdown @@ -21,6 +21,7 @@ resource "aws_timestreaminfluxdb_db_instance" "example" { db_instance_type = "db.influx.medium" username = "admin" password = "example-password" + organization = "organization" vpc_subnet_ids = [aws_subnet.exampleid] vpc_security_group_ids = [aws_security_group.example.id] name = "example-db-instance" @@ -52,6 +53,7 @@ resource "aws_timestreaminfluxdb_db_instance" "example" { db_instance_type = "db.influx.medium" username = "admin" password = "example-password" + organization = "organization" vpc_subnet_ids = [aws_subnet.example.id] vpc_security_group_ids = [aws_security_group.example.id] name = "example-db-instance" @@ -116,6 +118,7 @@ resource "aws_timestreaminfluxdb_db_instance" "example" { db_instance_type = "db.influx.medium" username = "admin" password = "example-password" + organization = "organization" vpc_subnet_ids = [aws_subnet.example.id] vpc_security_group_ids = [aws_security_group.example.id] name = "example-db-instance" @@ -156,6 +159,7 @@ resource "aws_timestreaminfluxdb_db_instance" "example" { db_instance_type = "db.influx.medium" username = "admin" password = "example-password" + organization = "organization" vpc_subnet_ids = [aws_subnet.example.id] vpc_security_group_ids = [aws_security_group.example.id] name = "example-db-instance" @@ -193,6 +197,7 @@ resource "aws_timestreaminfluxdb_db_instance" "example" { deployment_type = "WITH_MULTIAZ_STANDBY" username = "admin" password = "example-password" + organization = "organization" vpc_subnet_ids = [aws_subnet.example_1.id, aws_subnet.example_2.id] vpc_security_group_ids = [aws_security_group.example.id] name = "example-db-instance" @@ -204,22 +209,22 @@ resource "aws_timestreaminfluxdb_db_instance" "example" { The following arguments are required: * `allocated_storage` - (Required) Amount of storage in GiB (gibibytes). The minimum value is 20, the maximum value is 16384. +* `bucket` - (Required) Name of the initial InfluxDB bucket. All InfluxDB data is stored in a bucket. A bucket combines the concept of a database and a retention period (the duration of time that each data point persists). A bucket belongs to an organization. Along with `organization`, `username`, and `password`, this argument will be stored in the secret referred to by the `influx_auth_parameters_secret_arn` attribute. * `db_instance_type` - (Required) Timestream for InfluxDB DB instance type to run InfluxDB on. Valid options are: `"db.influx.medium"`, `"db.influx.large"`, `"db.influx.xlarge"`, `"db.influx.2xlarge"`, `"db.influx.4xlarge"`, `"db.influx.8xlarge"`, `"db.influx.12xlarge"`, and `"db.influx.16xlarge"`. * `name` - (Required) Name that uniquely identifies the DB instance when interacting with the Amazon Timestream for InfluxDB API and CLI commands. This name will also be a prefix included in the endpoint. DB instance names must be unique per customer and per region. The argument must start with a letter, cannot contain consecutive hyphens (`-`) and cannot end with a hyphen. * `password` - (Required) Password of the initial admin user created in InfluxDB. This password will allow you to access the InfluxDB UI to perform various administrative tasks and also use the InfluxDB CLI to create an operator token. Along with `bucket`, `username`, and `organization`, this argument will be stored in the secret referred to by the `influx_auth_parameters_secret_arn` attribute. +* `organization` - (Required) Name of the initial organization for the initial admin user in InfluxDB. An InfluxDB organization is a workspace for a group of users. Along with `bucket`, `username`, and `password`, this argument will be stored in the secret referred to by the `influx_auth_parameters_secret_arn` attribute. +* `username` - (Required) Username of the initial admin user created in InfluxDB. Must start with a letter and can't end with a hyphen or contain two consecutive hyphens. This username will allow you to access the InfluxDB UI to perform various administrative tasks and also use the InfluxDB CLI to create an operator token. Along with `bucket`, `organization`, and `password`, this argument will be stored in the secret referred to by the `influx_auth_parameters_secret_arn` attribute. * `vpc_security_group_ids` - (Required) List of VPC security group IDs to associate with the DB instance. * `vpc_subnet_ids` - (Required) List of VPC subnet IDs to associate with the DB instance. Provide at least two VPC subnet IDs in different availability zones when deploying with a Multi-AZ standby. The following arguments are optional: -* `bucket` - (Default `"bucket"`) Name of the initial InfluxDB bucket. All InfluxDB data is stored in a bucket. A bucket combines the concept of a database and a retention period (the duration of time that each data point persists). A bucket belongs to an organization. Along with `organization`, `username`, and `password`, this argument will be stored in the secret referred to by the `influx_auth_parameters_secret_arn` attribute. * `db_parameter_group_identifier` - (Optional) ID of the DB parameter group assigned to your DB instance. If added to an existing Timestream for InfluxDB instance or given a new value, will cause an in-place update to the instance. However, if an instance already has a value for `db_parameter_group_identifier`, removing `db_parameter_group_identifier` will cause the instance to be destroyed and recreated. * `db_storage_type` - (Default `"InfluxIOIncludedT1"`) Timestream for InfluxDB DB storage type to read and write InfluxDB data. You can choose between 3 different types of provisioned Influx IOPS included storage according to your workloads requirements: Influx IO Included 3000 IOPS, Influx IO Included 12000 IOPS, Influx IO Included 16000 IOPS. Valid options are: `"InfluxIOIncludedT1"`, `"InfluxIOIncludedT2"`, and `"InfluxIOIncludedT1"`. If you use `"InfluxIOIncludedT2" or "InfluxIOIncludedT3", the minimum value for `allocated_storage` is 400. * `deployment_type` - (Default `"SINGLE_AZ"`) Specifies whether the DB instance will be deployed as a standalone instance or with a Multi-AZ standby for high availability. Valid options are: `"SINGLE_AZ"`, `"WITH_MULTIAZ_STANDBY"`. * `log_delivery_configuration` - (Optional) Configuration for sending InfluxDB engine logs to a specified S3 bucket. -* `organization` - (Default `"organization"`) Name of the initial organization for the initial admin user in InfluxDB. An InfluxDB organization is a workspace for a group of users. Along with `bucket`, `username`, and `password`, this argument will be stored in the secret referred to by the `influx_auth_parameters_secret_arn` attribute. * `publicly_accessible` - (Default `false`) Configures the DB instance with a public IP to facilitate access. Other resources, such as a VPC, a subnet, an internet gateway, and a route table with routes, are also required to enabled public access, in addition to this argument. See "[Usage with Public Internet Access Enabled](#usage-with-public-internet-access-enabled)" for an example configuration with all required resources for public internet access. -* `username` - (Default `"admin"`) Username of the initial admin user created in InfluxDB. Must start with a letter and can't end with a hyphen or contain two consecutive hyphens. This username will allow you to access the InfluxDB UI to perform various administrative tasks and also use the InfluxDB CLI to create an operator token. Along with `bucket`, `organization`, and `password`, this argument will be stored in the secret referred to by the `influx_auth_parameters_secret_arn` attribute. * `tags` - (Optional) Map of tags assigned to the resource. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### Nested Fields @@ -245,7 +250,6 @@ This resource exports the following attributes in addition to the arguments abov * `id` - ID of the Timestream for InfluxDB instance. * `influx_auth_parameters_secret_arn` - ARN of the AWS Secrets Manager secret containing the initial InfluxDB authorization parameters. The secret value is a JSON formatted key-value pair holding InfluxDB authorization values: organization, bucket, username, and password. This secret will be read by the `aws_timestreaminfluxdb_db_instance` resource in order to support importing: deleting the secret or secret values can cause errors. * `secondary_availability_zone` - Availability Zone in which the standby instance is located when deploying with a MultiAZ standby instance. -* `status` - The status of the Timestream for InfluxDB instance. * `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Timeouts From dcb600b01a6d7b8594ee0e244ef5be768758a317 Mon Sep 17 00:00:00 2001 From: Adrian Johnson Date: Wed, 31 Jul 2024 12:37:50 -0500 Subject: [PATCH 20/21] add vpc lock and parallelism to teamcity test --- .teamcity/components/generated/services_all.kt | 2 +- internal/generate/teamcity/acctest_services.hcl | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/.teamcity/components/generated/services_all.kt b/.teamcity/components/generated/services_all.kt index 379fdaf3a10..b57e454b001 100644 --- a/.teamcity/components/generated/services_all.kt +++ b/.teamcity/components/generated/services_all.kt @@ -226,7 +226,7 @@ val services = mapOf( "sts" to ServiceSpec("STS (Security Token)"), "swf" to ServiceSpec("SWF (Simple Workflow)"), "synthetics" to ServiceSpec("CloudWatch Synthetics", parallelismOverride = 10), - "timestreaminfluxdb" to ServiceSpec("Timestream for InfluxDB"), + "timestreaminfluxdb" to ServiceSpec("Timestream for InfluxDB", vpcLock = true, parallelismOverride = 3), "timestreamwrite" to ServiceSpec("Timestream Write"), "transcribe" to ServiceSpec("Transcribe"), "transfer" to ServiceSpec("Transfer Family", vpcLock = true), diff --git a/internal/generate/teamcity/acctest_services.hcl b/internal/generate/teamcity/acctest_services.hcl index 51e8a8155df..c3836ae463d 100644 --- a/internal/generate/teamcity/acctest_services.hcl +++ b/internal/generate/teamcity/acctest_services.hcl @@ -235,6 +235,11 @@ service "synthetics" { parallelism = 10 } +service "timestreaminfluxdb" { + vpc_lock = true + parallelism = 3 +} + service "transfer" { vpc_lock = true } From 94f6fe307ccc39982488e70afe46f38ff837ea09 Mon Sep 17 00:00:00 2001 From: Adrian Johnson Date: Wed, 31 Jul 2024 12:45:20 -0500 Subject: [PATCH 21/21] chore: fmt --- internal/service/timestreaminfluxdb/db_instance.go | 3 +-- website/docs/r/timestreaminfluxdb_db_instance.html.markdown | 6 +++--- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/internal/service/timestreaminfluxdb/db_instance.go b/internal/service/timestreaminfluxdb/db_instance.go index 0a4a6d819da..60158012a75 100644 --- a/internal/service/timestreaminfluxdb/db_instance.go +++ b/internal/service/timestreaminfluxdb/db_instance.go @@ -259,8 +259,7 @@ func (r *resourceDBInstance) Schema(ctx context.Context, req resource.SchemaRequ setplanmodifier.RequiresReplace(), }, Validators: []validator.Set{ - setvalidator.SizeAtLeast(1), - setvalidator.SizeAtMost(5), + setvalidator.SizeBetween(1, 5), setvalidator.ValueStringsAre( stringvalidator.LengthAtMost(64), stringvalidator.RegexMatches(regexache.MustCompile("^sg-[a-z0-9]+$"), ""), diff --git a/website/docs/r/timestreaminfluxdb_db_instance.html.markdown b/website/docs/r/timestreaminfluxdb_db_instance.html.markdown index 00ecde030b6..f3bf9294f5e 100644 --- a/website/docs/r/timestreaminfluxdb_db_instance.html.markdown +++ b/website/docs/r/timestreaminfluxdb_db_instance.html.markdown @@ -118,7 +118,7 @@ resource "aws_timestreaminfluxdb_db_instance" "example" { db_instance_type = "db.influx.medium" username = "admin" password = "example-password" - organization = "organization" + organization = "organization" vpc_subnet_ids = [aws_subnet.example.id] vpc_security_group_ids = [aws_security_group.example.id] name = "example-db-instance" @@ -159,7 +159,7 @@ resource "aws_timestreaminfluxdb_db_instance" "example" { db_instance_type = "db.influx.medium" username = "admin" password = "example-password" - organization = "organization" + organization = "organization" vpc_subnet_ids = [aws_subnet.example.id] vpc_security_group_ids = [aws_security_group.example.id] name = "example-db-instance" @@ -197,7 +197,7 @@ resource "aws_timestreaminfluxdb_db_instance" "example" { deployment_type = "WITH_MULTIAZ_STANDBY" username = "admin" password = "example-password" - organization = "organization" + organization = "organization" vpc_subnet_ids = [aws_subnet.example_1.id, aws_subnet.example_2.id] vpc_security_group_ids = [aws_security_group.example.id] name = "example-db-instance"