From 758936c56d78b8143838c811faf4014b2c8e5668 Mon Sep 17 00:00:00 2001 From: AWS SDK for Go v2 automation user Date: Wed, 8 Feb 2023 19:13:00 +0000 Subject: [PATCH] Regenerated Clients --- .../20a176f2919a4dacbf384bc4900f664a.json | 8 +++ .../21ba3b71c9f748a2970efc98e00cd5d3.json | 8 +++ .../342ecb3f3a9d40319775b914c9cd3a2e.json | 8 +++ .../bbcbd512268943458a8db104ea381f02.json | 8 +++ .../internal/endpoints/endpoints.go | 3 + service/backup/api_op_DescribeBackupJob.go | 4 ++ .../api_op_DescribeProtectedResource.go | 4 ++ .../backup/api_op_DescribeRecoveryPoint.go | 4 ++ .../api_op_PutBackupVaultLockConfiguration.go | 6 +- service/backup/api_op_StartBackupJob.go | 3 +- service/backup/deserializers.go | 72 +++++++++++++++++++ service/backup/types/types.go | 20 ++++++ .../api_op_CreateOriginAccessControl.go | 12 ++-- service/cloudfront/types/enums.go | 4 +- service/cloudfront/types/types.go | 6 +- .../datasync/internal/endpoints/endpoints.go | 9 +++ service/eks/internal/endpoints/endpoints.go | 9 +++ service/glue/deserializers.go | 45 ++++++++++++ service/glue/serializers.go | 25 +++++++ service/glue/types/types.go | 40 ++++++++++- .../kendra/internal/endpoints/endpoints.go | 3 + 21 files changed, 284 insertions(+), 17 deletions(-) create mode 100644 .changelog/20a176f2919a4dacbf384bc4900f664a.json create mode 100644 .changelog/21ba3b71c9f748a2970efc98e00cd5d3.json create mode 100644 .changelog/342ecb3f3a9d40319775b914c9cd3a2e.json create mode 100644 .changelog/bbcbd512268943458a8db104ea381f02.json diff --git a/.changelog/20a176f2919a4dacbf384bc4900f664a.json b/.changelog/20a176f2919a4dacbf384bc4900f664a.json new file mode 100644 index 00000000000..4930c4d4212 --- /dev/null +++ b/.changelog/20a176f2919a4dacbf384bc4900f664a.json @@ -0,0 +1,8 @@ +{ + "id": "20a176f2-919a-4dac-bf38-4bc4900f664a", + "type": "feature", + "description": "CloudFront Origin Access Control extends support to AWS Elemental MediaStore origins.", + "modules": [ + "service/cloudfront" + ] +} \ No newline at end of file diff --git a/.changelog/21ba3b71c9f748a2970efc98e00cd5d3.json b/.changelog/21ba3b71c9f748a2970efc98e00cd5d3.json new file mode 100644 index 00000000000..45f8e63c4d1 --- /dev/null +++ b/.changelog/21ba3b71c9f748a2970efc98e00cd5d3.json @@ -0,0 +1,8 @@ +{ + "id": "21ba3b71-c9f7-48a2-970e-fc98e00cd5d3", + "type": "feature", + "description": "This release added one attribute (resource name) in the output model of our 9 existing APIs in AWS backup so that customers will see the resource name at the output. No input required from Customers.", + "modules": [ + "service/backup" + ] +} \ No newline at end of file diff --git a/.changelog/342ecb3f3a9d40319775b914c9cd3a2e.json b/.changelog/342ecb3f3a9d40319775b914c9cd3a2e.json new file mode 100644 index 00000000000..e1ed22f0d96 --- /dev/null +++ b/.changelog/342ecb3f3a9d40319775b914c9cd3a2e.json @@ -0,0 +1,8 @@ +{ + "id": "342ecb3f-3a9d-4031-9775-b914c9cd3a2e", + "type": "feature", + "description": "DirectJDBCSource + Glue 4.0 streaming options", + "modules": [ + "service/glue" + ] +} \ No newline at end of file diff --git a/.changelog/bbcbd512268943458a8db104ea381f02.json b/.changelog/bbcbd512268943458a8db104ea381f02.json new file mode 100644 index 00000000000..24731237c75 --- /dev/null +++ b/.changelog/bbcbd512268943458a8db104ea381f02.json @@ -0,0 +1,8 @@ +{ + "id": "bbcbd512-2689-4345-8a8d-b104ea381f02", + "type": "feature", + "description": "This release removes the LFTagpolicyResource expression limits.", + "modules": [ + "service/lakeformation" + ] +} \ No newline at end of file diff --git a/service/arczonalshift/internal/endpoints/endpoints.go b/service/arczonalshift/internal/endpoints/endpoints.go index 0955d6be601..c7466ccbc09 100644 --- a/service/arczonalshift/internal/endpoints/endpoints.go +++ b/service/arczonalshift/internal/endpoints/endpoints.go @@ -138,6 +138,9 @@ var defaultPartitions = endpoints.Partitions{ endpoints.EndpointKey{ Region: "ap-northeast-1", }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-1", + }: endpoints.Endpoint{}, endpoints.EndpointKey{ Region: "ap-southeast-2", }: endpoints.Endpoint{}, diff --git a/service/backup/api_op_DescribeBackupJob.go b/service/backup/api_op_DescribeBackupJob.go index 5b5aef0c954..d8935081a5e 100644 --- a/service/backup/api_op_DescribeBackupJob.go +++ b/service/backup/api_op_DescribeBackupJob.go @@ -123,6 +123,10 @@ type DescribeBackupJobOutput struct { // on the resource type. ResourceArn *string + // This is the non-unique name of the resource that belongs to the specified + // backup. + ResourceName *string + // The type of Amazon Web Services resource to be backed up; for example, an Amazon // Elastic Block Store (Amazon EBS) volume or an Amazon Relational Database Service // (Amazon RDS) database. diff --git a/service/backup/api_op_DescribeProtectedResource.go b/service/backup/api_op_DescribeProtectedResource.go index e69dfc8d2b6..21eacce8cd0 100644 --- a/service/backup/api_op_DescribeProtectedResource.go +++ b/service/backup/api_op_DescribeProtectedResource.go @@ -52,6 +52,10 @@ type DescribeProtectedResourceOutput struct { // resource type. ResourceArn *string + // This is the non-unique name of the resource that belongs to the specified + // backup. + ResourceName *string + // The type of Amazon Web Services resource saved as a recovery point; for example, // an Amazon EBS volume or an Amazon RDS database. ResourceType *string diff --git a/service/backup/api_op_DescribeRecoveryPoint.go b/service/backup/api_op_DescribeRecoveryPoint.go index ee2301eec0b..212b4eacb54 100644 --- a/service/backup/api_op_DescribeRecoveryPoint.go +++ b/service/backup/api_op_DescribeRecoveryPoint.go @@ -140,6 +140,10 @@ type DescribeRecoveryPointOutput struct { // on the resource type. ResourceArn *string + // This is the non-unique name of the resource that belongs to the specified + // backup. + ResourceName *string + // The type of Amazon Web Services resource to save as a recovery point; for // example, an Amazon Elastic Block Store (Amazon EBS) volume or an Amazon // Relational Database Service (Amazon RDS) database. diff --git a/service/backup/api_op_PutBackupVaultLockConfiguration.go b/service/backup/api_op_PutBackupVaultLockConfiguration.go index 59c685f0f25..c9a78b2d7c1 100644 --- a/service/backup/api_op_PutBackupVaultLockConfiguration.go +++ b/service/backup/api_op_PutBackupVaultLockConfiguration.go @@ -15,8 +15,10 @@ import ( // attempts to update the lifecycle policy that controls the retention period of // any recovery point currently stored in a backup vault. If specified, Vault Lock // enforces a minimum and maximum retention period for future backup and copy jobs -// that target a backup vault. Backup Vault Lock has yet to receive a third-party -// assessment for SEC 17a-4(f) and CFTC. +// that target a backup vault. Backup Vault Lock has been assessed by Cohasset +// Associates for use in environments that are subject to SEC 17a-4, CFTC, and +// FINRA regulations. For more information about how Backup Vault Lock relates to +// these regulations, see the Cohasset Associates Compliance Assessment. func (c *Client) PutBackupVaultLockConfiguration(ctx context.Context, params *PutBackupVaultLockConfigurationInput, optFns ...func(*Options)) (*PutBackupVaultLockConfigurationOutput, error) { if params == nil { params = &PutBackupVaultLockConfigurationInput{} diff --git a/service/backup/api_op_StartBackupJob.go b/service/backup/api_op_StartBackupJob.go index 2e3641c248e..8bf8ebd6e56 100644 --- a/service/backup/api_op_StartBackupJob.go +++ b/service/backup/api_op_StartBackupJob.go @@ -109,7 +109,8 @@ type StartBackupJobOutput struct { // job. IsParent bool - // An ARN that uniquely identifies a recovery point; for example, + // Note: This field is only returned for Amazon EFS and Advanced DynamoDB + // resources. An ARN that uniquely identifies a recovery point; for example, // arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45. RecoveryPointArn *string diff --git a/service/backup/deserializers.go b/service/backup/deserializers.go index 64fbb3db2cb..54c2e108b65 100644 --- a/service/backup/deserializers.go +++ b/service/backup/deserializers.go @@ -2615,6 +2615,15 @@ func awsRestjson1_deserializeOpDocumentDescribeBackupJobOutput(v **DescribeBacku sv.ResourceArn = ptr.String(jtv) } + case "ResourceName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected string to be of type string, got %T instead", value) + } + sv.ResourceName = ptr.String(jtv) + } + case "ResourceType": if value != nil { jtv, ok := value.(string) @@ -3649,6 +3658,15 @@ func awsRestjson1_deserializeOpDocumentDescribeProtectedResourceOutput(v **Descr sv.ResourceArn = ptr.String(jtv) } + case "ResourceName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected string to be of type string, got %T instead", value) + } + sv.ResourceName = ptr.String(jtv) + } + case "ResourceType": if value != nil { jtv, ok := value.(string) @@ -3975,6 +3993,15 @@ func awsRestjson1_deserializeOpDocumentDescribeRecoveryPointOutput(v **DescribeR sv.ResourceArn = ptr.String(jtv) } + case "ResourceName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected string to be of type string, got %T instead", value) + } + sv.ResourceName = ptr.String(jtv) + } + case "ResourceType": if value != nil { jtv, ok := value.(string) @@ -12603,6 +12630,15 @@ func awsRestjson1_deserializeDocumentBackupJob(v **types.BackupJob, value interf sv.ResourceArn = ptr.String(jtv) } + case "ResourceName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected string to be of type string, got %T instead", value) + } + sv.ResourceName = ptr.String(jtv) + } + case "ResourceType": if value != nil { jtv, ok := value.(string) @@ -14395,6 +14431,15 @@ func awsRestjson1_deserializeDocumentCopyJob(v **types.CopyJob, value interface{ sv.ResourceArn = ptr.String(jtv) } + case "ResourceName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected string to be of type string, got %T instead", value) + } + sv.ResourceName = ptr.String(jtv) + } + case "ResourceType": if value != nil { jtv, ok := value.(string) @@ -15590,6 +15635,15 @@ func awsRestjson1_deserializeDocumentProtectedResource(v **types.ProtectedResour sv.ResourceArn = ptr.String(jtv) } + case "ResourceName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected string to be of type string, got %T instead", value) + } + sv.ResourceName = ptr.String(jtv) + } + case "ResourceType": if value != nil { jtv, ok := value.(string) @@ -15830,6 +15884,15 @@ func awsRestjson1_deserializeDocumentRecoveryPointByBackupVault(v **types.Recove sv.ResourceArn = ptr.String(jtv) } + case "ResourceName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected string to be of type string, got %T instead", value) + } + sv.ResourceName = ptr.String(jtv) + } + case "ResourceType": if value != nil { jtv, ok := value.(string) @@ -16005,6 +16068,15 @@ func awsRestjson1_deserializeDocumentRecoveryPointByResource(v **types.RecoveryP sv.RecoveryPointArn = ptr.String(jtv) } + case "ResourceName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected string to be of type string, got %T instead", value) + } + sv.ResourceName = ptr.String(jtv) + } + case "Status": if value != nil { jtv, ok := value.(string) diff --git a/service/backup/types/types.go b/service/backup/types/types.go index 7a035847ff2..bfb2b1a4fc3 100644 --- a/service/backup/types/types.go +++ b/service/backup/types/types.go @@ -116,6 +116,10 @@ type BackupJob struct { // resource type. ResourceArn *string + // This is the non-unique name of the resource that belongs to the specified + // backup. + ResourceName *string + // The type of Amazon Web Services resource to be backed up; for example, an Amazon // Elastic Block Store (Amazon EBS) volume or an Amazon Relational Database Service // (Amazon RDS) database. For Windows Volume Shadow Copy Service (VSS) backups, the @@ -770,6 +774,10 @@ type CopyJob struct { // RDS) database. ResourceArn *string + // This is the non-unique name of the resource that belongs to the specified + // backup. + ResourceName *string + // The type of Amazon Web Services resource to be copied; for example, an Amazon // Elastic Block Store (Amazon EBS) volume or an Amazon Relational Database Service // (Amazon RDS) database. @@ -942,6 +950,10 @@ type ProtectedResource struct { // the ARN depends on the resource type. ResourceArn *string + // This is the non-unique name of the resource that belongs to the specified + // backup. + ResourceName *string + // The type of Amazon Web Services resource; for example, an Amazon Elastic Block // Store (Amazon EBS) volume or an Amazon Relational Database Service (Amazon RDS) // database. For Windows Volume Shadow Copy Service (VSS) backups, the only @@ -1043,6 +1055,10 @@ type RecoveryPointByBackupVault struct { // resource type. ResourceArn *string + // This is the non-unique name of the resource that belongs to the specified + // backup. + ResourceName *string + // The type of Amazon Web Services resource saved as a recovery point; for example, // an Amazon Elastic Block Store (Amazon EBS) volume or an Amazon Relational // Database Service (Amazon RDS) database. For Windows Volume Shadow Copy Service @@ -1096,6 +1112,10 @@ type RecoveryPointByResource struct { // arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45. RecoveryPointArn *string + // This is the non-unique name of the resource that belongs to the specified + // backup. + ResourceName *string + // A status code specifying the state of the recovery point. Status RecoveryPointStatus diff --git a/service/cloudfront/api_op_CreateOriginAccessControl.go b/service/cloudfront/api_op_CreateOriginAccessControl.go index 8c26dd33b28..61b3436901c 100644 --- a/service/cloudfront/api_op_CreateOriginAccessControl.go +++ b/service/cloudfront/api_op_CreateOriginAccessControl.go @@ -13,12 +13,12 @@ import ( // Creates a new origin access control in CloudFront. After you create an origin // access control, you can add it to an origin in a CloudFront distribution so that -// CloudFront sends authenticated (signed) requests to the origin. For an Amazon S3 -// origin, this makes it possible to block public access to the Amazon S3 bucket so -// that viewers (users) can access the content in the bucket only through -// CloudFront. For more information about using a CloudFront origin access control, -// see Restricting access to an Amazon S3 origin -// (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-restricting-access-to-s3.html) +// CloudFront sends authenticated (signed) requests to the origin. This makes it +// possible to block public access to the origin, allowing viewers (users) to +// access the origin's content only through CloudFront. For more information about +// using a CloudFront origin access control, see Restricting access to an Amazon +// Web Services origin +// (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-restricting-access-to-origin.html) // in the Amazon CloudFront Developer Guide. func (c *Client) CreateOriginAccessControl(ctx context.Context, params *CreateOriginAccessControlInput, optFns ...func(*Options)) (*CreateOriginAccessControlOutput, error) { if params == nil { diff --git a/service/cloudfront/types/enums.go b/service/cloudfront/types/enums.go index b5a0dfaed48..dfb41f87e95 100644 --- a/service/cloudfront/types/enums.go +++ b/service/cloudfront/types/enums.go @@ -354,7 +354,8 @@ type OriginAccessControlOriginTypes string // Enum values for OriginAccessControlOriginTypes const ( - OriginAccessControlOriginTypesS3 OriginAccessControlOriginTypes = "s3" + OriginAccessControlOriginTypesS3 OriginAccessControlOriginTypes = "s3" + OriginAccessControlOriginTypesMediastore OriginAccessControlOriginTypes = "mediastore" ) // Values returns all known values for OriginAccessControlOriginTypes. Note that @@ -364,6 +365,7 @@ const ( func (OriginAccessControlOriginTypes) Values() []OriginAccessControlOriginTypes { return []OriginAccessControlOriginTypes{ "s3", + "mediastore", } } diff --git a/service/cloudfront/types/types.go b/service/cloudfront/types/types.go index b758aea10c9..e392d6cad46 100644 --- a/service/cloudfront/types/types.go +++ b/service/cloudfront/types/types.go @@ -3044,8 +3044,7 @@ type OriginAccessControlConfig struct { // This member is required. Name *string - // The type of origin that this origin access control is for. The only valid value - // is s3. + // The type of origin that this origin access control is for. // // This member is required. OriginAccessControlOriginType OriginAccessControlOriginTypes @@ -3143,8 +3142,7 @@ type OriginAccessControlSummary struct { // This member is required. Name *string - // The type of origin that this origin access control is for. The only valid value - // is s3. + // The type of origin that this origin access control is for. // // This member is required. OriginAccessControlOriginType OriginAccessControlOriginTypes diff --git a/service/datasync/internal/endpoints/endpoints.go b/service/datasync/internal/endpoints/endpoints.go index 0fe058929e2..47bdece5629 100644 --- a/service/datasync/internal/endpoints/endpoints.go +++ b/service/datasync/internal/endpoints/endpoints.go @@ -153,6 +153,9 @@ var defaultPartitions = endpoints.Partitions{ endpoints.EndpointKey{ Region: "ap-south-1", }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-south-2", + }: endpoints.Endpoint{}, endpoints.EndpointKey{ Region: "ap-southeast-1", }: endpoints.Endpoint{}, @@ -174,12 +177,18 @@ var defaultPartitions = endpoints.Partitions{ endpoints.EndpointKey{ Region: "eu-central-1", }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-central-2", + }: endpoints.Endpoint{}, endpoints.EndpointKey{ Region: "eu-north-1", }: endpoints.Endpoint{}, endpoints.EndpointKey{ Region: "eu-south-1", }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-south-2", + }: endpoints.Endpoint{}, endpoints.EndpointKey{ Region: "eu-west-1", }: endpoints.Endpoint{}, diff --git a/service/eks/internal/endpoints/endpoints.go b/service/eks/internal/endpoints/endpoints.go index 62b9f01e267..db00092c37d 100644 --- a/service/eks/internal/endpoints/endpoints.go +++ b/service/eks/internal/endpoints/endpoints.go @@ -153,6 +153,9 @@ var defaultPartitions = endpoints.Partitions{ endpoints.EndpointKey{ Region: "ap-south-1", }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-south-2", + }: endpoints.Endpoint{}, endpoints.EndpointKey{ Region: "ap-southeast-1", }: endpoints.Endpoint{}, @@ -168,12 +171,18 @@ var defaultPartitions = endpoints.Partitions{ endpoints.EndpointKey{ Region: "eu-central-1", }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-central-2", + }: endpoints.Endpoint{}, endpoints.EndpointKey{ Region: "eu-north-1", }: endpoints.Endpoint{}, endpoints.EndpointKey{ Region: "eu-south-1", }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-south-2", + }: endpoints.Endpoint{}, endpoints.EndpointKey{ Region: "eu-west-1", }: endpoints.Endpoint{}, diff --git a/service/glue/deserializers.go b/service/glue/deserializers.go index ba3bec74a44..fbddbade913 100644 --- a/service/glue/deserializers.go +++ b/service/glue/deserializers.go @@ -38228,6 +38228,15 @@ func awsAwsjson11_deserializeDocumentKafkaStreamingSourceOptions(v **types.Kafka for key, value := range shape { switch key { + case "AddRecordTimestamp": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EnclosedInStringProperty to be of type string, got %T instead", value) + } + sv.AddRecordTimestamp = ptr.String(jtv) + } + case "Assign": if value != nil { jtv, ok := value.(string) @@ -38273,6 +38282,15 @@ func awsAwsjson11_deserializeDocumentKafkaStreamingSourceOptions(v **types.Kafka sv.Delimiter = ptr.String(jtv) } + case "EmitConsumerLagMetrics": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EnclosedInStringProperty to be of type string, got %T instead", value) + } + sv.EmitConsumerLagMetrics = ptr.String(jtv) + } + case "EndingOffsets": if value != nil { jtv, ok := value.(string) @@ -38282,6 +38300,15 @@ func awsAwsjson11_deserializeDocumentKafkaStreamingSourceOptions(v **types.Kafka sv.EndingOffsets = ptr.String(jtv) } + case "IncludeHeaders": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BoxedBoolean to be of type *bool, got %T instead", value) + } + sv.IncludeHeaders = ptr.Bool(jtv) + } + case "MaxOffsetsPerTrigger": if value != nil { jtv, ok := value.(json.Number) @@ -38506,6 +38533,15 @@ func awsAwsjson11_deserializeDocumentKinesisStreamingSourceOptions(v **types.Kin sv.AddIdleTimeBetweenReads = ptr.Bool(jtv) } + case "AddRecordTimestamp": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EnclosedInStringProperty to be of type string, got %T instead", value) + } + sv.AddRecordTimestamp = ptr.String(jtv) + } + case "AvoidEmptyBatches": if value != nil { jtv, ok := value.(bool) @@ -38546,6 +38582,15 @@ func awsAwsjson11_deserializeDocumentKinesisStreamingSourceOptions(v **types.Kin sv.DescribeShardInterval = ptr.Int64(i64) } + case "EmitConsumerLagMetrics": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EnclosedInStringProperty to be of type string, got %T instead", value) + } + sv.EmitConsumerLagMetrics = ptr.String(jtv) + } + case "EndpointUrl": if value != nil { jtv, ok := value.(string) diff --git a/service/glue/serializers.go b/service/glue/serializers.go index 5a07a8aa111..64d4d274a31 100644 --- a/service/glue/serializers.go +++ b/service/glue/serializers.go @@ -14845,6 +14845,11 @@ func awsAwsjson11_serializeDocumentKafkaStreamingSourceOptions(v *types.KafkaStr object := value.Object() defer object.Close() + if v.AddRecordTimestamp != nil { + ok := object.Key("AddRecordTimestamp") + ok.String(*v.AddRecordTimestamp) + } + if v.Assign != nil { ok := object.Key("Assign") ok.String(*v.Assign) @@ -14870,11 +14875,21 @@ func awsAwsjson11_serializeDocumentKafkaStreamingSourceOptions(v *types.KafkaStr ok.String(*v.Delimiter) } + if v.EmitConsumerLagMetrics != nil { + ok := object.Key("EmitConsumerLagMetrics") + ok.String(*v.EmitConsumerLagMetrics) + } + if v.EndingOffsets != nil { ok := object.Key("EndingOffsets") ok.String(*v.EndingOffsets) } + if v.IncludeHeaders != nil { + ok := object.Key("IncludeHeaders") + ok.Boolean(*v.IncludeHeaders) + } + if v.MaxOffsetsPerTrigger != nil { ok := object.Key("MaxOffsetsPerTrigger") ok.Long(*v.MaxOffsetsPerTrigger) @@ -14943,6 +14958,11 @@ func awsAwsjson11_serializeDocumentKinesisStreamingSourceOptions(v *types.Kinesi ok.Boolean(*v.AddIdleTimeBetweenReads) } + if v.AddRecordTimestamp != nil { + ok := object.Key("AddRecordTimestamp") + ok.String(*v.AddRecordTimestamp) + } + if v.AvoidEmptyBatches != nil { ok := object.Key("AvoidEmptyBatches") ok.Boolean(*v.AvoidEmptyBatches) @@ -14963,6 +14983,11 @@ func awsAwsjson11_serializeDocumentKinesisStreamingSourceOptions(v *types.Kinesi ok.Long(*v.DescribeShardInterval) } + if v.EmitConsumerLagMetrics != nil { + ok := object.Key("EmitConsumerLagMetrics") + ok.String(*v.EmitConsumerLagMetrics) + } + if v.EndpointUrl != nil { ok := object.Key("EndpointUrl") ok.String(*v.EndpointUrl) diff --git a/service/glue/types/types.go b/service/glue/types/types.go index 3fc8bce1266..a6006ae1124 100644 --- a/service/glue/types/types.go +++ b/service/glue/types/types.go @@ -722,7 +722,7 @@ type CodeGenConfigurationNode struct { // Specifies a custom visual transform created by a user. DynamicTransform *DynamicTransform - // Specifies a DynamoDB data source in the Glue Data Catalog. + // Specifies a DynamoDBC Catalog data store in the Glue Data Catalog. DynamoDBCatalogSource *DynamoDBCatalogSource // Specifies your data quality evaluation criteria. @@ -794,7 +794,7 @@ type CodeGenConfigurationNode struct { // Specifies a target that uses Amazon Redshift. RedshiftTarget *RedshiftTarget - // Specifies a Relational database data source in the Glue Data Catalog. + // Specifies a relational catalog data store in the Glue Data Catalog. RelationalCatalogSource *RelationalCatalogSource // Specifies a transform that renames a single data property key. @@ -3981,6 +3981,12 @@ type JsonClassifier struct { // Additional options for streaming. type KafkaStreamingSourceOptions struct { + // When this option is set to 'true', the data output will contain an additional + // column named "__src_timestamp" that indicates the time when the corresponding + // record received by the topic. The default value is 'false'. This option is + // supported in Glue version 4.0 or later. + AddRecordTimestamp *string + // The specific TopicPartitions to consume. You must specify at least one of // "topicName", "assign" or "subscribePattern". Assign *string @@ -3999,10 +4005,23 @@ type KafkaStreamingSourceOptions struct { // Specifies the delimiter character. Delimiter *string + // When this option is set to 'true', for each batch, it will emit the metrics for + // the duration between the oldest record received by the topic and the time it + // arrives in Glue to CloudWatch. The metric's name is + // "glue.driver.streaming.maxConsumerLagInMs". The default value is 'false'. This + // option is supported in Glue version 4.0 or later. + EmitConsumerLagMetrics *string + // The end point when a batch query is ended. Possible values are either "latest" // or a JSON string that specifies an ending offset for each TopicPartition. EndingOffsets *string + // Whether to include the Kafka headers. When the option is set to "true", the data + // output will contain an additional column named "glue_streaming_kafka_headers" + // with type Array[Struct(key: String, value: String)]. The default value is + // "false". This option is available in Glue version 3.0 or later only. + IncludeHeaders *bool + // The rate limit on the maximum number of offsets that are processed per trigger // interval. The specified total number of offsets is proportionally split across // topicPartitions of different volumes. The default value is null, which means @@ -4069,6 +4088,12 @@ type KinesisStreamingSourceOptions struct { // above. AddIdleTimeBetweenReads *bool + // When this option is set to 'true', the data output will contain an additional + // column named "__src_timestamp" that indicates the time when the corresponding + // record received by the stream. The default value is 'false'. This option is + // supported in Glue version 4.0 or later. + AddRecordTimestamp *string + // Avoids creating an empty microbatch job by checking for unread data in the // Kinesis data stream before the batch is started. The default value is "False". AvoidEmptyBatches *bool @@ -4083,6 +4108,13 @@ type KinesisStreamingSourceOptions struct { // consider resharding. The default value is 1s. DescribeShardInterval *int64 + // When this option is set to 'true', for each batch, it will emit the metrics for + // the duration between the oldest record received by the stream and the time it + // arrives in Glue to CloudWatch. The metric's name is + // "glue.driver.streaming.maxConsumerLagInMs". The default value is 'false'. This + // option is supported in Glue version 4.0 or later. + EmitConsumerLagMetrics *string + // The URL of the Kinesis endpoint. EndpointUrl *string @@ -5610,6 +5642,8 @@ type S3HudiCatalogTarget struct { // Specifies a target that writes to a Hudi data source in Amazon S3. type S3HudiDirectTarget struct { + // Specifies additional connection options for the connector. + // // This member is required. AdditionalOptions map[string]string @@ -5664,7 +5698,7 @@ type S3HudiSource struct { // Specifies additional connection options. AdditionalHudiOptions map[string]string - // Specifies additional connection options for the Amazon S3 data store. + // Specifies additional options for the connector. AdditionalOptions *S3DirectSourceAdditionalOptions // Specifies the data schema for the Hudi source. diff --git a/service/kendra/internal/endpoints/endpoints.go b/service/kendra/internal/endpoints/endpoints.go index 85b51b77572..1f4c208bf47 100644 --- a/service/kendra/internal/endpoints/endpoints.go +++ b/service/kendra/internal/endpoints/endpoints.go @@ -135,6 +135,9 @@ var defaultPartitions = endpoints.Partitions{ RegionRegex: partitionRegexp.Aws, IsRegionalized: true, Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "ap-northeast-1", + }: endpoints.Endpoint{}, endpoints.EndpointKey{ Region: "ap-south-1", }: endpoints.Endpoint{},