diff --git a/clients/client-pipes/README.md b/clients/client-pipes/README.md index f08c01b5b7c43..71091ea96ada9 100644 --- a/clients/client-pipes/README.md +++ b/clients/client-pipes/README.md @@ -6,9 +6,12 @@ AWS SDK for JavaScript Pipes Client for Node.js, Browser and React Native. -
Amazon EventBridge Pipes connects event sources to targets. Pipes reduces the need for specialized knowledge and integration code when developing -event driven architectures. This helps ensures consistency across your company’s applications. With Pipes, the target can be any available EventBridge target. -To set up a pipe, you select the event source, add optional event filtering, define optional enrichment, and select the target for the event data.
+Amazon EventBridge Pipes connects event sources to targets. Pipes reduces the need +for specialized knowledge and integration code when developing event driven architectures. +This helps ensures consistency across your company’s applications. With Pipes, the target +can be any available EventBridge target. To set up a pipe, you select the event +source, add optional event filtering, define optional enrichment, and select the target for +the event data.
## Installing diff --git a/clients/client-pipes/src/Pipes.ts b/clients/client-pipes/src/Pipes.ts index 3e270bdf69350..e9a286ca587a1 100644 --- a/clients/client-pipes/src/Pipes.ts +++ b/clients/client-pipes/src/Pipes.ts @@ -159,9 +159,12 @@ export interface Pipes { } /** - *Amazon EventBridge Pipes connects event sources to targets. Pipes reduces the need for specialized knowledge and integration code when developing - * event driven architectures. This helps ensures consistency across your company’s applications. With Pipes, the target can be any available EventBridge target. - * To set up a pipe, you select the event source, add optional event filtering, define optional enrichment, and select the target for the event data.
+ *Amazon EventBridge Pipes connects event sources to targets. Pipes reduces the need + * for specialized knowledge and integration code when developing event driven architectures. + * This helps ensures consistency across your company’s applications. With Pipes, the target + * can be any available EventBridge target. To set up a pipe, you select the event + * source, add optional event filtering, define optional enrichment, and select the target for + * the event data.
* @public */ export class Pipes extends PipesClient implements Pipes {} diff --git a/clients/client-pipes/src/PipesClient.ts b/clients/client-pipes/src/PipesClient.ts index 45fffd69f55fd..b38a97d647b62 100644 --- a/clients/client-pipes/src/PipesClient.ts +++ b/clients/client-pipes/src/PipesClient.ts @@ -279,9 +279,12 @@ export type PipesClientResolvedConfigType = __SmithyResolvedConfiguration<__Http export interface PipesClientResolvedConfig extends PipesClientResolvedConfigType {} /** - *Amazon EventBridge Pipes connects event sources to targets. Pipes reduces the need for specialized knowledge and integration code when developing - * event driven architectures. This helps ensures consistency across your company’s applications. With Pipes, the target can be any available EventBridge target. - * To set up a pipe, you select the event source, add optional event filtering, define optional enrichment, and select the target for the event data.
+ *Amazon EventBridge Pipes connects event sources to targets. Pipes reduces the need + * for specialized knowledge and integration code when developing event driven architectures. + * This helps ensures consistency across your company’s applications. With Pipes, the target + * can be any available EventBridge target. To set up a pipe, you select the event + * source, add optional event filtering, define optional enrichment, and select the target for + * the event data.
* @public */ export class PipesClient extends __Client< diff --git a/clients/client-pipes/src/commands/CreatePipeCommand.ts b/clients/client-pipes/src/commands/CreatePipeCommand.ts index d944abb59aa4f..1d4654bdfc974 100644 --- a/clients/client-pipes/src/commands/CreatePipeCommand.ts +++ b/clients/client-pipes/src/commands/CreatePipeCommand.ts @@ -27,7 +27,8 @@ export interface CreatePipeCommandInput extends CreatePipeRequest {} export interface CreatePipeCommandOutput extends CreatePipeResponse, __MetadataBearer {} /** - *Create a pipe. Amazon EventBridge Pipes connect event sources to targets and reduces the need for specialized knowledge and integration code.
+ *Create a pipe. Amazon EventBridge Pipes connect event sources to targets and reduces + * the need for specialized knowledge and integration code.
* @example * Use a bare-bones client and the command you need to make an API call. * ```javascript @@ -331,6 +332,39 @@ export interface CreatePipeCommandOutput extends CreatePipeResponse, __MetadataB * LogStreamName: "STRING_VALUE", * Timestamp: "STRING_VALUE", * }, + * TimestreamParameters: { // PipeTargetTimestreamParameters + * TimeValue: "STRING_VALUE", // required + * EpochTimeUnit: "STRING_VALUE", + * TimeFieldType: "STRING_VALUE", + * TimestampFormat: "STRING_VALUE", + * VersionValue: "STRING_VALUE", // required + * DimensionMappings: [ // DimensionMappings // required + * { // DimensionMapping + * DimensionValue: "STRING_VALUE", // required + * DimensionValueType: "STRING_VALUE", // required + * DimensionName: "STRING_VALUE", // required + * }, + * ], + * SingleMeasureMappings: [ // SingleMeasureMappings + * { // SingleMeasureMapping + * MeasureValue: "STRING_VALUE", // required + * MeasureValueType: "STRING_VALUE", // required + * MeasureName: "STRING_VALUE", // required + * }, + * ], + * MultiMeasureMappings: [ // MultiMeasureMappings + * { // MultiMeasureMapping + * MultiMeasureName: "STRING_VALUE", // required + * MultiMeasureAttributeMappings: [ // MultiMeasureAttributeMappings // required + * { // MultiMeasureAttributeMapping + * MeasureValue: "STRING_VALUE", // required + * MeasureValueType: "STRING_VALUE", // required + * MultiMeasureAttributeName: "STRING_VALUE", // required + * }, + * ], + * }, + * ], + * }, * }, * RoleArn: "STRING_VALUE", // required * Tags: { // TagMap diff --git a/clients/client-pipes/src/commands/DescribePipeCommand.ts b/clients/client-pipes/src/commands/DescribePipeCommand.ts index 8e6aa2c9be9eb..55e63b176b70e 100644 --- a/clients/client-pipes/src/commands/DescribePipeCommand.ts +++ b/clients/client-pipes/src/commands/DescribePipeCommand.ts @@ -339,6 +339,39 @@ export interface DescribePipeCommandOutput extends DescribePipeResponse, __Metad * // LogStreamName: "STRING_VALUE", * // Timestamp: "STRING_VALUE", * // }, + * // TimestreamParameters: { // PipeTargetTimestreamParameters + * // TimeValue: "STRING_VALUE", // required + * // EpochTimeUnit: "STRING_VALUE", + * // TimeFieldType: "STRING_VALUE", + * // TimestampFormat: "STRING_VALUE", + * // VersionValue: "STRING_VALUE", // required + * // DimensionMappings: [ // DimensionMappings // required + * // { // DimensionMapping + * // DimensionValue: "STRING_VALUE", // required + * // DimensionValueType: "STRING_VALUE", // required + * // DimensionName: "STRING_VALUE", // required + * // }, + * // ], + * // SingleMeasureMappings: [ // SingleMeasureMappings + * // { // SingleMeasureMapping + * // MeasureValue: "STRING_VALUE", // required + * // MeasureValueType: "STRING_VALUE", // required + * // MeasureName: "STRING_VALUE", // required + * // }, + * // ], + * // MultiMeasureMappings: [ // MultiMeasureMappings + * // { // MultiMeasureMapping + * // MultiMeasureName: "STRING_VALUE", // required + * // MultiMeasureAttributeMappings: [ // MultiMeasureAttributeMappings // required + * // { // MultiMeasureAttributeMapping + * // MeasureValue: "STRING_VALUE", // required + * // MeasureValueType: "STRING_VALUE", // required + * // MultiMeasureAttributeName: "STRING_VALUE", // required + * // }, + * // ], + * // }, + * // ], + * // }, * // }, * // RoleArn: "STRING_VALUE", * // Tags: { // TagMap diff --git a/clients/client-pipes/src/commands/TagResourceCommand.ts b/clients/client-pipes/src/commands/TagResourceCommand.ts index 03023112c10a4..dd5fde2dc1eae 100644 --- a/clients/client-pipes/src/commands/TagResourceCommand.ts +++ b/clients/client-pipes/src/commands/TagResourceCommand.ts @@ -27,16 +27,16 @@ export interface TagResourceCommandInput extends TagResourceRequest {} export interface TagResourceCommandOutput extends TagResourceResponse, __MetadataBearer {} /** - *Assigns one or more tags (key-value pairs) to the specified pipe. Tags can - * help you organize and categorize your resources. You can also use them to scope user - * permissions by granting a user permission to access or change only resources with certain tag + *
Assigns one or more tags (key-value pairs) to the specified pipe. Tags can help you + * organize and categorize your resources. You can also use them to scope user permissions by + * granting a user permission to access or change only resources with certain tag * values.
- *Tags don't have any semantic meaning to Amazon Web Services and are interpreted strictly as strings of - * characters.
+ *Tags don't have any semantic meaning to Amazon Web Services and are interpreted strictly + * as strings of characters.
*You can use the TagResource
action with a pipe that already has tags. If
- * you specify a new tag key, this tag is appended to the list of tags associated with the
- * pipe. If you specify a tag key that is already associated with the pipe, the new tag
- * value that you specify replaces the previous value for that tag.
You can associate as many as 50 tags with a pipe.
* @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-pipes/src/commands/UpdatePipeCommand.ts b/clients/client-pipes/src/commands/UpdatePipeCommand.ts index 34c8ad34e863f..6978f7dc3528b 100644 --- a/clients/client-pipes/src/commands/UpdatePipeCommand.ts +++ b/clients/client-pipes/src/commands/UpdatePipeCommand.ts @@ -27,13 +27,18 @@ export interface UpdatePipeCommandInput extends UpdatePipeRequest {} export interface UpdatePipeCommandOutput extends UpdatePipeResponse, __MetadataBearer {} /** - *Update an existing pipe. When you call UpdatePipe
, EventBridge only the updates fields you have specified in the request; the rest remain unchanged.
- * The exception to this is if you modify any Amazon Web Services-service specific fields in the SourceParameters
, EnrichmentParameters
, or
- * TargetParameters
objects. For example, DynamoDBStreamParameters
or EventBridgeEventBusParameters
.
- * EventBridge updates the fields in these objects atomically as one and overrides existing values.
- * This is by design, and means that if you don't specify an optional field in one of these Parameters
objects, EventBridge sets that field to its system-default value during the update.
Update an existing pipe. When you call UpdatePipe
, EventBridge only the
+ * updates fields you have specified in the request; the rest remain unchanged. The exception
+ * to this is if you modify any Amazon Web Services-service specific fields in the
+ * SourceParameters
, EnrichmentParameters
, or
+ * TargetParameters
objects. For example,
+ * DynamoDBStreamParameters
or EventBridgeEventBusParameters
.
+ * EventBridge updates the fields in these objects atomically as one and overrides existing
+ * values. This is by design, and means that if you don't specify an optional field in one of
+ * these Parameters
objects, EventBridge sets that field to its system-default
+ * value during the update.
For more information about pipes, see - * Amazon EventBridge Pipes in the Amazon EventBridge User Guide.
+ * Amazon EventBridge Pipes in the Amazon EventBridge User Guide. * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript @@ -321,6 +326,39 @@ export interface UpdatePipeCommandOutput extends UpdatePipeResponse, __MetadataB * LogStreamName: "STRING_VALUE", * Timestamp: "STRING_VALUE", * }, + * TimestreamParameters: { // PipeTargetTimestreamParameters + * TimeValue: "STRING_VALUE", // required + * EpochTimeUnit: "STRING_VALUE", + * TimeFieldType: "STRING_VALUE", + * TimestampFormat: "STRING_VALUE", + * VersionValue: "STRING_VALUE", // required + * DimensionMappings: [ // DimensionMappings // required + * { // DimensionMapping + * DimensionValue: "STRING_VALUE", // required + * DimensionValueType: "STRING_VALUE", // required + * DimensionName: "STRING_VALUE", // required + * }, + * ], + * SingleMeasureMappings: [ // SingleMeasureMappings + * { // SingleMeasureMapping + * MeasureValue: "STRING_VALUE", // required + * MeasureValueType: "STRING_VALUE", // required + * MeasureName: "STRING_VALUE", // required + * }, + * ], + * MultiMeasureMappings: [ // MultiMeasureMappings + * { // MultiMeasureMapping + * MultiMeasureName: "STRING_VALUE", // required + * MultiMeasureAttributeMappings: [ // MultiMeasureAttributeMappings // required + * { // MultiMeasureAttributeMapping + * MeasureValue: "STRING_VALUE", // required + * MeasureValueType: "STRING_VALUE", // required + * MultiMeasureAttributeName: "STRING_VALUE", // required + * }, + * ], + * }, + * ], + * }, * }, * RoleArn: "STRING_VALUE", // required * LogConfiguration: { // PipeLogConfigurationParameters diff --git a/clients/client-pipes/src/index.ts b/clients/client-pipes/src/index.ts index 6a76321679c5b..cc8ca3ad0b406 100644 --- a/clients/client-pipes/src/index.ts +++ b/clients/client-pipes/src/index.ts @@ -1,9 +1,12 @@ // smithy-typescript generated code /* eslint-disable */ /** - *Amazon EventBridge Pipes connects event sources to targets. Pipes reduces the need for specialized knowledge and integration code when developing - * event driven architectures. This helps ensures consistency across your company’s applications. With Pipes, the target can be any available EventBridge target. - * To set up a pipe, you select the event source, add optional event filtering, define optional enrichment, and select the target for the event data.
+ *Amazon EventBridge Pipes connects event sources to targets. Pipes reduces the need + * for specialized knowledge and integration code when developing event driven architectures. + * This helps ensures consistency across your company’s applications. With Pipes, the target + * can be any available EventBridge target. To set up a pipe, you select the event + * source, add optional event filtering, define optional enrichment, and select the target for + * the event data.
* * @packageDocumentation */ diff --git a/clients/client-pipes/src/models/models_0.ts b/clients/client-pipes/src/models/models_0.ts index 0a2d06bd5a7d9..b4cf5c95615a3 100644 --- a/clients/client-pipes/src/models/models_0.ts +++ b/clients/client-pipes/src/models/models_0.ts @@ -18,27 +18,31 @@ export const AssignPublicIp = { export type AssignPublicIp = (typeof AssignPublicIp)[keyof typeof AssignPublicIp]; /** - *This structure specifies the VPC subnets and security groups for the task, and whether a public IP address is to be used.
- * This structure is relevant only for ECS tasks that use the awsvpc
network mode.
This structure specifies the VPC subnets and security groups for the task, and whether a
+ * public IP address is to be used. This structure is relevant only for ECS tasks that use the
+ * awsvpc
network mode.
Specifies the subnets associated with the task. These subnets must all be in the same VPC. You can specify as many as 16 subnets.
+ *Specifies the subnets associated with the task. These subnets must all be in the same + * VPC. You can specify as many as 16 subnets.
* @public */ Subnets: string[] | undefined; /** - *Specifies the security groups associated with the task. These security groups must all be in the same VPC. You can specify as many - * as five security groups. If you do not specify a security group, the default security group for the VPC is used.
+ *Specifies the security groups associated with the task. These security groups must all + * be in the same VPC. You can specify as many as five security groups. If you do not specify + * a security group, the default security group for the VPC is used.
* @public */ SecurityGroups?: string[]; /** - *Specifies whether the task's elastic network interface receives a public IP address. You can specify ENABLED
only when
- * LaunchType
in EcsParameters
is set to FARGATE
.
Specifies whether the task's elastic network interface receives a public IP address. You
+ * can specify ENABLED
only when LaunchType
in
+ * EcsParameters
is set to FARGATE
.
The environment variables to send to the container. You can add new environment variables, which are added to the container at launch, or you can override the existing + *
The environment variables to send to the container. You can add new environment + * variables, which are added to the container at launch, or you can override the existing * environment variables from the Docker image or the task definition.
*Environment variables cannot start with "Batch
". This naming convention is reserved for variables that Batch sets.
Environment variables cannot start with "Batch
". This
+ * naming convention is reserved for variables that Batch sets.
The name of the key-value pair. For environment variables, this is the name of the environment variable.
+ *The name of the key-value pair. For environment variables, this is the name of the + * environment variable.
* @public */ Name?: string; /** - *The value of the key-value pair. For environment variables, this is the value of the environment variable.
+ *The value of the key-value pair. For environment variables, this is the value of the + * environment variable.
* @public */ Value?: string; @@ -96,49 +104,55 @@ export type BatchResourceRequirementType = (typeof BatchResourceRequirementType)[keyof typeof BatchResourceRequirementType]; /** - *The type and amount of a resource to assign to a container. The supported resources include GPU
, MEMORY
, and VCPU
.
The type and amount of a resource to assign to a container. The supported resources
+ * include GPU
, MEMORY
, and VCPU
.
The type of resource to assign to a container. The supported resources include GPU
, MEMORY
, and VCPU
.
The type of resource to assign to a container. The supported resources include
+ * GPU
, MEMORY
, and VCPU
.
The quantity of the specified resource to reserve for the container. The values vary based on the
- * type
specified.
The quantity of the specified resource to reserve for the container. The values vary
+ * based on the type
specified.
The number of physical GPUs to reserve for the container. Make sure that the number of GPUs reserved for all - * containers in a job doesn't exceed the number of available GPUs on the compute resource that the job is launched - * on.
+ *The number of physical GPUs to reserve for the container. Make sure that the + * number of GPUs reserved for all containers in a job doesn't exceed the number of + * available GPUs on the compute resource that the job is launched on.
*GPUs aren't available for jobs that are running on Fargate resources.
+ *GPUs aren't available for jobs that are running on Fargate + * resources.
*The memory hard limit (in MiB) present to the container. This parameter is supported for jobs that are
- * running on EC2 resources. If your container attempts to exceed the memory specified, the container is terminated.
- * This parameter maps to Memory
in the
- * Create a container section of the Docker Remote API
- * and the --memory
option to docker run.
- * You must specify at least 4 MiB of memory for a job. This is required but can be specified in several places for
- * multi-node parallel (MNP) jobs. It must be specified for each node at least once. This parameter maps to
- * Memory
in the
- * Create a container section of the Docker Remote API and the
- * --memory
option to docker run.
The memory hard limit (in MiB) present to the container. This parameter is
+ * supported for jobs that are running on EC2 resources. If your container attempts
+ * to exceed the memory specified, the container is terminated. This parameter maps
+ * to Memory
in the Create a
+ * container section of the Docker Remote API and
+ * the --memory
option to docker run. You
+ * must specify at least 4 MiB of memory for a job. This is required but can be
+ * specified in several places for multi-node parallel (MNP) jobs. It must be
+ * specified for each node at least once. This parameter maps to Memory
+ * in the
+ * Create a container section of the Docker Remote API and
+ * the --memory
option to docker run.
If you're trying to maximize your resource utilization by providing your jobs as much memory as possible for - * a particular instance type, see Memory - * management in the Batch User Guide.
+ *If you're trying to maximize your resource utilization by providing your + * jobs as much memory as possible for a particular instance type, see Memory management in the Batch User + * Guide.
*For jobs that are running on Fargate resources, then value
is the hard limit (in MiB), and
- * must match one of the supported values and the VCPU
values must be one of the values supported for
- * that memory value.
For jobs that are running on Fargate resources, then
+ * value
is the hard limit (in MiB), and must match one of the
+ * supported values and the VCPU
values must be one of the values
+ * supported for that memory value.
* VCPU
= 2, 4, or 8
* VCPU
= 4
* VCPU
= 8 or 16
* VCPU
= 16
The number of vCPUs reserved for the container. This parameter maps to CpuShares
in the
- *
- * Create a container section of the Docker Remote API
- * and the --cpu-shares
option to
- * docker run. Each vCPU is equivalent to 1,024 CPU shares. For EC2
- * resources, you must specify at least one vCPU. This is required but can be specified in several places; it must be
- * specified for each node at least once.
The default for the Fargate On-Demand vCPU resource count quota is 6 vCPUs. For more information about - * Fargate quotas, see Fargate quotas in the Amazon Web Services General Reference.
- *For jobs that are running on Fargate resources, then value
must match one of the supported
- * values and the MEMORY
values must be one of the values supported for that VCPU
value.
- * The supported values are 0.25, 0.5, 1, 2, 4, 8, and 16
The number of vCPUs reserved for the container. This parameter maps to
+ * CpuShares
in the Create a
+ * container section of the Docker Remote API and
+ * the --cpu-shares
option to docker run. Each
+ * vCPU is equivalent to 1,024 CPU shares. For EC2 resources, you must specify at
+ * least one vCPU. This is required but can be specified in several places; it must
+ * be specified for each node at least once.
The default for the Fargate On-Demand vCPU resource count quota + * is 6 vCPUs. For more information about Fargate quotas, see Fargate quotas in the Amazon Web Services + * General Reference.
+ *For jobs that are running on Fargate resources, then
+ * value
must match one of the supported values and the
+ * MEMORY
values must be one of the values supported for that
+ * VCPU
value. The supported values are 0.25, 0.5, 1, 2, 4, 8, and
+ * 16
- * MEMORY
= 2048, 3072, 4096, 5120, 6144, 7168, or 8192
MEMORY
= 2048, 3072, 4096, 5120, 6144, 7168, or
+ * 8192
*
- * MEMORY
= 4096, 5120, 6144, 7168, 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, or 16384
MEMORY
= 4096, 5120, 6144, 7168, 8192, 9216, 10240,
+ * 11264, 12288, 13312, 14336, 15360, or 16384
*
- * MEMORY
= 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, 16384, 17408, 18432, 19456,
- * 20480, 21504, 22528, 23552, 24576, 25600, 26624, 27648, 28672, 29696, or 30720
MEMORY
= 8192, 9216, 10240, 11264, 12288, 13312, 14336,
+ * 15360, 16384, 17408, 18432, 19456, 20480, 21504, 22528, 23552, 24576,
+ * 25600, 26624, 27648, 28672, 29696, or 30720
*
- * MEMORY
= 16384, 20480, 24576, 28672, 32768, 36864, 40960, 45056, 49152, 53248, 57344, or 61440
- *
MEMORY
= 16384, 20480, 24576, 28672, 32768, 36864, 40960,
+ * 45056, 49152, 53248, 57344, or 61440
*
- * MEMORY
= 32768, 40960, 49152, 57344, 65536, 73728, 81920, 90112, 98304, 106496, 114688, or 122880
- *
MEMORY
= 32768, 40960, 49152, 57344, 65536, 73728, 81920,
+ * 90112, 98304, 106496, 114688, or 122880
* The command to send to the container that overrides the default command from the Docker image or the task definition.
+ *The command to send to the container that overrides the default command from the Docker + * image or the task definition.
* @public */ Command?: string[]; /** - *The environment variables to send to the container. You can add new environment variables, which are added to the container at launch, or you can override the existing + *
The environment variables to send to the container. You can add new environment + * variables, which are added to the container at launch, or you can override the existing * environment variables from the Docker image or the task definition.
*Environment variables cannot start with "Batch
". This naming convention is reserved for variables that Batch sets.
Environment variables cannot start with "Batch
". This
+ * naming convention is reserved for variables that Batch sets.
The instance type to use for a multi-node parallel job.
*This parameter isn't applicable to single-node container jobs or jobs that run on Fargate resources, and shouldn't be provided.
+ *This parameter isn't applicable to single-node container jobs or jobs that run on + * Fargate resources, and shouldn't be provided.
*The type and amount of resources to assign to a container. This overrides the settings in the job definition. The supported resources include GPU
, MEMORY
,
- * and VCPU
.
The type and amount of resources to assign to a container. This overrides the settings
+ * in the job definition. The supported resources include GPU
,
+ * MEMORY
, and VCPU
.
The job ID of the Batch job that's associated with this dependency.
+ *The job ID of the Batch job that's associated with this + * dependency.
* @public */ JobId?: string; @@ -344,14 +372,15 @@ export interface BatchJobDependency { } /** - *The retry strategy that's associated with a job. For more information, see - * Automated job retries in the Batch User Guide.
+ *The retry strategy that's associated with a job. For more information, see Automated job + * retries in the Batch User Guide.
* @public */ export interface BatchRetryStrategy { /** - *The number of times to move a job to the RUNNABLE
status. If the value of attempts
is greater than one, the job is retried on
- * failure the same number of attempts as the value.
The number of times to move a job to the RUNNABLE
status. If the value of
+ * attempts
is greater than one, the job is retried on failure the same number
+ * of attempts as the value.
The weight value designates the relative percentage of the total number of tasks launched - * that should use the specified capacity provider. The weight value is taken into consideration - * after the base value, if defined, is satisfied.
+ *The weight value designates the relative percentage of the total number of tasks + * launched that should use the specified capacity provider. The weight value is taken into + * consideration after the base value, if defined, is satisfied.
* @public */ weight?: number; /** *The base value designates how many tasks, at a minimum, to run on the specified capacity - * provider. Only one capacity provider in a capacity provider strategy can have a base defined. - * If no value is specified, the default value of 0 is used.
+ * provider. Only one capacity provider in a capacity provider strategy can have a base + * defined. If no value is specified, the default value of 0 is used. * @public */ base?: number; @@ -458,30 +487,29 @@ export const RequestedPipeState = { export type RequestedPipeState = (typeof RequestedPipeState)[keyof typeof RequestedPipeState]; /** - *These are custom parameter to be used when the target is an API Gateway REST APIs or - * EventBridge ApiDestinations. In the latter case, these are merged with any - * InvocationParameters specified on the Connection, with any values from the Connection taking - * precedence.
+ *These are custom parameter to be used when the target is an API Gateway REST APIs + * or EventBridge ApiDestinations. In the latter case, these are merged with any + * InvocationParameters specified on the Connection, with any values from the Connection + * taking precedence.
* @public */ export interface PipeEnrichmentHttpParameters { /** - *The path parameter values to be used to populate API Gateway REST API or EventBridge - * ApiDestination path wildcards ("*").
+ *The path parameter values to be used to populate API Gateway REST API or EventBridge ApiDestination path wildcards ("*").
* @public */ PathParameterValues?: string[]; /** - *The headers that need to be sent as part of request invoking the API Gateway REST API or - * EventBridge ApiDestination.
+ *The headers that need to be sent as part of request invoking the API Gateway REST + * API or EventBridge ApiDestination.
* @public */ HeaderParameters?: RecordThe query string keys/values that need to be sent as part of request invoking the API Gateway - * REST API or EventBridge ApiDestination.
+ *The query string keys/values that need to be sent as part of request invoking the + * API Gateway REST API or EventBridge ApiDestination.
* @public */ QueryStringParameters?: RecordValid JSON text passed to the enrichment. In this case, nothing from the event itself is - * passed to the enrichment. For more information, see The JavaScript Object Notation (JSON) Data - * Interchange Format.
+ * passed to the enrichment. For more information, see The JavaScript Object Notation (JSON) + * Data Interchange Format. *To remove an input template, specify an empty string.
* @public */ InputTemplate?: string; /** - *Contains the HTTP parameters to use when the target is a API Gateway REST endpoint or - * EventBridge ApiDestination.
- *If you specify an API Gateway REST API or EventBridge ApiDestination as a target, you can - * use this parameter to specify headers, path parameters, and query string keys/values as part - * of your target invoking request. If you're using ApiDestinations, the corresponding Connection - * can also have these values configured. In case of any conflicting keys, values from the - * Connection take precedence.
+ *Contains the HTTP parameters to use when the target is a API Gateway REST + * endpoint or EventBridge ApiDestination.
+ *If you specify an API Gateway REST API or EventBridge ApiDestination as a + * target, you can use this parameter to specify headers, path parameters, and query string + * keys/values as part of your target invoking request. If you're using ApiDestinations, the + * corresponding Connection can also have these values configured. In case of any conflicting + * keys, values from the Connection take precedence.
* @public */ HttpParameters?: PipeEnrichmentHttpParameters; } /** - *The Amazon Kinesis Data Firehose logging configuration settings for the pipe.
+ *The Amazon Data Firehose logging configuration settings for the pipe.
* @public */ export interface FirehoseLogDestinationParameters { /** - *Specifies the Amazon Resource Name (ARN) of the Kinesis Data Firehose delivery stream to which EventBridge delivers the pipe log records.
+ *Specifies the Amazon Resource Name (ARN) of the Firehose delivery stream to + * which EventBridge delivers the pipe log records.
* @public */ DeliveryStreamArn: string | undefined; @@ -576,13 +605,15 @@ export type S3OutputFormat = (typeof S3OutputFormat)[keyof typeof S3OutputFormat */ export interface S3LogDestinationParameters { /** - *Specifies the name of the Amazon S3 bucket to which EventBridge delivers the log records for the pipe.
+ *Specifies the name of the Amazon S3 bucket to which EventBridge delivers + * the log records for the pipe.
* @public */ BucketName: string | undefined; /** - *Specifies the Amazon Web Services account that owns the Amazon S3 bucket to which EventBridge delivers the log records for the pipe.
+ *Specifies the Amazon Web Services account that owns the Amazon S3 bucket to which + * EventBridge delivers the log records for the pipe.
* @public */ BucketOwner: string | undefined; @@ -600,7 +631,8 @@ export interface S3LogDestinationParameters { * *
- * w3c
: W3C extended logging file format
+ * w3c
: W3C extended
+ * logging file format
*
Specifies any prefix text with which to begin Amazon S3 log object names.
- *You can use prefixes to organize the data that you store in Amazon S3 buckets. - * A prefix is a string of characters at the beginning of the object key name. - * A prefix can be any length, subject to the maximum length of the object key name (1,024 bytes). - * For more information, see Organizing objects using prefixes - * in the Amazon Simple Storage Service User Guide.
+ *You can use prefixes to organize the data that you store in Amazon S3 buckets. A + * prefix is a string of characters at the beginning of the object key name. A prefix can be + * any length, subject to the maximum length of the object key name (1,024 bytes). For more + * information, see Organizing objects using + * prefixes in the Amazon Simple Storage Service User Guide.
* @public */ Prefix?: string; @@ -630,13 +662,13 @@ export interface S3LogDestinationParameters { *FirehoseLogDestinationParameters
, or
* S3LogDestinationParameters
), EventBridge sets that field to its
* system-default value during the update.
- * For example, suppose when you created the pipe you
- * specified a Kinesis Data Firehose stream log destination. You then update the pipe to add an
- * Amazon S3 log destination. In addition to specifying the
- * S3LogDestinationParameters
for the new log destination, you must also
- * specify the fields in the FirehoseLogDestinationParameters
object in order to
- * retain the Kinesis Data Firehose stream log destination.
For more information on generating pipe log records, see Log EventBridge Pipes in the Amazon EventBridge User Guide.
+ *For example, suppose when you created the pipe you specified a Firehose stream
+ * log destination. You then update the pipe to add an Amazon S3 log destination. In
+ * addition to specifying the S3LogDestinationParameters
for the new log
+ * destination, you must also specify the fields in the
+ * FirehoseLogDestinationParameters
object in order to retain the Firehose stream log destination.
For more information on generating pipe log records, see Log EventBridge + * Pipes in the Amazon EventBridge User Guide.
* @public */ export interface PipeLogConfigurationParameters { @@ -647,7 +679,7 @@ export interface PipeLogConfigurationParameters { S3LogDestination?: S3LogDestinationParameters; /** - *The Amazon Kinesis Data Firehose logging configuration settings for the pipe.
+ *The Amazon Data Firehose logging configuration settings for the pipe.
* @public */ FirehoseLogDestination?: FirehoseLogDestinationParameters; @@ -660,16 +692,21 @@ export interface PipeLogConfigurationParameters { /** *The level of logging detail to include. This applies to all log destinations for the pipe.
- *For more information, see Specifying EventBridge Pipes log level in the Amazon EventBridge User Guide.
+ *For more information, see Specifying + * EventBridge Pipes log level in the Amazon EventBridge User + * Guide.
* @public */ Level: LogLevel | undefined; /** - *Specify ON
to include the execution data (specifically, the payload
and awsRequest
fields) in the log messages for this pipe.
Specify ALL
to include the execution data (specifically, the
+ * payload
, awsRequest
, and awsResponse
fields) in
+ * the log messages for this pipe.
This applies to all log destinations for the pipe.
- *For more information, see Including execution data in logs in the Amazon EventBridge User Guide.
- *The default is OFF
.
For more information, see Including execution data in logs in the Amazon EventBridge User + * Guide.
+ *By default, execution data is not included.
* @public */ IncludeExecutionData?: IncludeExecutionDataOption[]; @@ -746,7 +783,8 @@ export interface PipeSourceActiveMQBrokerParameters { } /** - *A DeadLetterConfig
object that contains information about a dead-letter queue configuration.
A DeadLetterConfig
object that contains information about a dead-letter
+ * queue configuration.
Filter events using an event pattern. For more information, see Events and Event - * Patterns in the Amazon EventBridge User Guide.
+ * Patterns in the Amazon EventBridge User Guide. * @public */ export interface Filter { @@ -945,7 +983,8 @@ export interface PipeSourceKinesisStreamParameters { StartingPosition: KinesisStreamStartPosition | undefined; /** - *With StartingPosition
set to AT_TIMESTAMP
, the time from which to start reading, in Unix time seconds.
With StartingPosition
set to AT_TIMESTAMP
, the time from which
+ * to start reading, in Unix time seconds.
This structure specifies the VPC subnets and security groups for the stream, and whether a public IP address is to be used.
+ *This structure specifies the VPC subnets and security groups for the stream, and whether + * a public IP address is to be used.
* @public */ export interface SelfManagedKafkaAccessConfigurationVpc { /** - *Specifies the subnets associated with the stream. These subnets must all be in the same VPC. You can specify as many as 16 subnets.
+ *Specifies the subnets associated with the stream. These subnets must all be in the same + * VPC. You can specify as many as 16 subnets.
* @public */ Subnets?: string[]; /** - *Specifies the security groups associated with the stream. These security groups must all be in the same VPC. You can specify as many - * as five security groups. If you do not specify a security group, the default security group for the VPC is used.
+ *Specifies the security groups associated with the stream. These security groups must all + * be in the same VPC. You can specify as many as five security groups. If you do not specify + * a security group, the default security group for the VPC is used.
* @public */ SecurityGroup?: string[]; @@ -1227,6 +1269,10 @@ export interface SelfManagedKafkaAccessConfigurationVpc { /** *The parameters for using a self-managed Apache Kafka stream as a source.
+ *A self managed cluster refers to any Apache Kafka cluster not hosted by Amazon Web Services. + * This includes both clusters you manage yourself, as well as those hosted by a third-party + * provider, such as Confluent + * Cloud, CloudKarafka, or Redpanda. For more information, see Apache Kafka streams as a source in the Amazon EventBridge User Guide.
* @public */ export interface PipeSourceSelfManagedKafkaParameters { @@ -1355,6 +1401,10 @@ export interface PipeSourceParameters { /** *The parameters for using a self-managed Apache Kafka stream as a source.
+ *A self managed cluster refers to any Apache Kafka cluster not hosted by Amazon Web Services. + * This includes both clusters you manage yourself, as well as those hosted by a third-party + * provider, such as Confluent + * Cloud, CloudKarafka, or Redpanda. For more information, see Apache Kafka streams as a source in the Amazon EventBridge User Guide.
* @public */ SelfManagedKafkaParameters?: PipeSourceSelfManagedKafkaParameters; @@ -1366,15 +1416,17 @@ export interface PipeSourceParameters { */ export interface PipeTargetBatchJobParameters { /** - *The job definition used by this job. This value can be one of name
, name:revision
, or the Amazon Resource Name (ARN) for the job definition.
- * If name is specified without a revision then the latest active revision is used.
The job definition used by this job. This value can be one of name
,
+ * name:revision
, or the Amazon Resource Name (ARN) for the job definition. If
+ * name is specified without a revision then the latest active revision is used.
The name of the job. It can be up to 128 letters long. The first character must be alphanumeric, can contain uppercase and lowercase letters, numbers, hyphens (-), - * and underscores (_).
+ *The name of the job. It can be up to 128 letters long. The first character must be + * alphanumeric, can contain uppercase and lowercase letters, numbers, hyphens (-), and + * underscores (_).
* @public */ JobName: string | undefined; @@ -1387,7 +1439,8 @@ export interface PipeTargetBatchJobParameters { ArrayProperties?: BatchArrayProperties; /** - *The retry strategy to use for failed jobs. When a retry strategy is specified here, it overrides the retry strategy defined in the job definition.
+ *The retry strategy to use for failed jobs. When a retry strategy is specified here, it + * overrides the retry strategy defined in the job definition.
* @public */ RetryStrategy?: BatchRetryStrategy; @@ -1399,17 +1452,21 @@ export interface PipeTargetBatchJobParameters { ContainerOverrides?: BatchContainerOverrides; /** - *A list of dependencies for the job. A job can depend upon a maximum of 20 jobs. You can specify a SEQUENTIAL
type dependency without
- * specifying a job ID for array jobs so that each child array job completes sequentially, starting at index 0. You can also specify an N_TO_N
- * type dependency with a job ID for array jobs. In that case, each index child of this job must wait for the corresponding index child of each
- * dependency to complete before it can begin.
A list of dependencies for the job. A job can depend upon a maximum of 20 jobs. You can
+ * specify a SEQUENTIAL
type dependency without specifying a job ID for array
+ * jobs so that each child array job completes sequentially, starting at index 0. You can also
+ * specify an N_TO_N
type dependency with a job ID for array jobs. In that case,
+ * each index child of this job must wait for the corresponding index child of each dependency
+ * to complete before it can begin.
Additional parameters passed to the job that replace parameter substitution placeholders that are set in the job definition. Parameters are specified as a key and - * value pair mapping. Parameters included here override any corresponding parameter defaults from the job definition.
+ *Additional parameters passed to the job that replace parameter substitution placeholders + * that are set in the job definition. Parameters are specified as a key and value pair + * mapping. Parameters included here override any corresponding parameter defaults from the + * job definition.
* @public */ Parameters?: RecordThe time the event occurred, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC.
+ *The time the event occurred, expressed as the number of milliseconds after Jan 1, 1970 + * 00:00:00 UTC.
* @public */ Timestamp?: string; @@ -1449,33 +1507,38 @@ export const LaunchType = { export type LaunchType = (typeof LaunchType)[keyof typeof LaunchType]; /** - *This structure specifies the network configuration for an Amazon ECS task.
+ *This structure specifies the network configuration for an Amazon ECS + * task.
* @public */ export interface NetworkConfiguration { /** *Use this structure to specify the VPC subnets and security groups for the task, and
- * whether a public IP address is to be used. This structure is relevant only for ECS tasks that
- * use the awsvpc
network mode.
awsvpc
network mode.
* @public
*/
awsvpcConfiguration?: AwsVpcConfiguration;
}
/**
- * The environment variables to send to the container. You can add new environment variables, which are added to the container at launch, or you can - * override the existing environment variables from the Docker image or the task definition. You must also specify a container name.
+ *The environment variables to send to the container. You can add new environment + * variables, which are added to the container at launch, or you can override the existing + * environment variables from the Docker image or the task definition. You must also specify a + * container name.
* @public */ export interface EcsEnvironmentVariable { /** - *The name of the key-value pair. For environment variables, this is the name of the environment variable.
+ *The name of the key-value pair. For environment variables, this is the name of the + * environment variable.
* @public */ name?: string; /** - *The value of the key-value pair. For environment variables, this is the value of the environment variable.
+ *The value of the key-value pair. For environment variables, this is the value of the + * environment variable.
* @public */ value?: string; @@ -1496,20 +1559,21 @@ export type EcsEnvironmentFileType = (typeof EcsEnvironmentFileType)[keyof typeo /** *A list of files containing the environment variables to pass to a container. You can
- * specify up to ten environment files. The file must have a .env
file
- * extension. Each line in an environment file should contain an environment variable in
- * VARIABLE=VALUE
format. Lines beginning with #
are treated
- * as comments and are ignored. For more information about the environment variable file
- * syntax, see Declare default
- * environment variables in file.
.env
file extension.
+ * Each line in an environment file should contain an environment variable in
+ * VARIABLE=VALUE
format. Lines beginning with #
are treated as
+ * comments and are ignored. For more information about the environment variable file syntax,
+ * see Declare default environment
+ * variables in file.
* If there are environment variables specified using the environment
- * parameter in a container definition, they take precedence over the variables contained
- * within an environment file. If multiple environment files are specified that contain the
- * same variable, they're processed from the top down. We recommend that you use unique
- * variable names. For more information, see Specifying environment
- * variables in the Amazon Elastic Container Service Developer Guide.
This parameter is only supported for tasks hosted on Fargate using the - * following platform versions:
+ * following platform versions: *Linux platform version 1.4.0
or later.
The Amazon Resource Name (ARN) of the Amazon S3 object containing the environment variable file.
+ *The Amazon Resource Name (ARN) of the Amazon S3 object containing the + * environment variable file.
* @public */ value: string | undefined; @@ -1549,18 +1614,18 @@ export const EcsResourceRequirementType = { export type EcsResourceRequirementType = (typeof EcsResourceRequirementType)[keyof typeof EcsResourceRequirementType]; /** - *The type and amount of a resource to assign to a container. The supported resource - * types are GPUs and Elastic Inference accelerators. For more information, see Working with - * GPUs on Amazon ECS or Working with - * Amazon Elastic Inference on Amazon ECS in the - * Amazon Elastic Container Service Developer Guide + *
The type and amount of a resource to assign to a container. The supported resource types + * are GPUs and Elastic Inference accelerators. For more information, see Working with + * GPUs on Amazon ECS or Working with Amazon Elastic + * Inference on Amazon ECS in the Amazon Elastic Container Service + * Developer Guide *
* @public */ export interface EcsResourceRequirement { /** - *The type of resource to assign to a container. The supported values are
- * GPU
or InferenceAccelerator
.
The type of resource to assign to a container. The supported values are GPU
+ * or InferenceAccelerator
.
The value for the specified resource type.
*If the GPU
type is used, the value is the number of physical
- * GPUs
the Amazon ECS container agent reserves for the container. The number
- * of GPUs that's reserved for all containers in a task can't exceed the number of
+ * GPUs
the Amazon ECS container agent reserves for the container. The
+ * number of GPUs that's reserved for all containers in a task can't exceed the number of
* available GPUs on the container instance that the task is launched on.
If the InferenceAccelerator
type is used, the value
matches
- * the deviceName
for an InferenceAccelerator specified in a
- * task definition.
deviceName
for an InferenceAccelerator specified in a task
+ * definition.
* @public
*/
value: string | undefined;
}
/**
- * The overrides that are sent to a container. An empty container override can be passed in. An example of an empty
- * container override is \{"containerOverrides": [ ] \}
. If a non-empty container override is specified, the name
parameter must be included.
The overrides that are sent to a container. An empty container override can be passed
+ * in. An example of an empty container override is \{"containerOverrides": [ ] \}
.
+ * If a non-empty container override is specified, the name
parameter must be
+ * included.
The command to send to the container that overrides the default command from the Docker image or the task definition. You must also specify a container name.
+ *The command to send to the container that overrides the default command from the Docker + * image or the task definition. You must also specify a container name.
* @public */ Command?: string[]; /** - *The number of cpu
units reserved for the container, instead of the default value from the task definition. You must also specify a container name.
The number of cpu
units reserved for the container, instead of the default
+ * value from the task definition. You must also specify a container name.
The environment variables to send to the container. You can add new environment variables, which are added to the container at launch, or you can - * override the existing environment variables from the Docker image or the task definition. You must also specify a container name.
+ *The environment variables to send to the container. You can add new environment + * variables, which are added to the container at launch, or you can override the existing + * environment variables from the Docker image or the task definition. You must also specify a + * container name.
* @public */ Environment?: EcsEnvironmentVariable[]; /** - *A list of files containing the environment variables to pass to a container, instead of the value from the container definition.
+ *A list of files containing the environment variables to pass to a container, instead of + * the value from the container definition.
* @public */ EnvironmentFiles?: EcsEnvironmentFile[]; /** - *The hard limit (in MiB) of memory to present to the container, instead of the default value from the task definition. - * If your container attempts to exceed the memory specified here, the container is killed. You must also specify a container name.
+ *The hard limit (in MiB) of memory to present to the container, instead of the default + * value from the task definition. If your container attempts to exceed the memory specified + * here, the container is killed. You must also specify a container name.
* @public */ Memory?: number; /** - *The soft limit (in MiB) of memory to reserve for the container, instead of the default value from the task definition. - * You must also specify a container name.
+ *The soft limit (in MiB) of memory to reserve for the container, instead of the default + * value from the task definition. You must also specify a container name.
* @public */ MemoryReservation?: number; /** - *The name of the container that receives the override. This parameter is required if any override is specified.
+ *The name of the container that receives the override. This parameter is required if any + * override is specified.
* @public */ Name?: string; /** - *The type and amount of a resource to assign to a container, instead of the default value from the task definition. The only supported resource is a GPU.
+ *The type and amount of a resource to assign to a container, instead of the default value + * from the task definition. The only supported resource is a GPU.
* @public */ ResourceRequirements?: EcsResourceRequirement[]; @@ -1640,20 +1715,20 @@ export interface EcsContainerOverride { /** *The amount of ephemeral storage to allocate for the task. This parameter is used to * expand the total amount of ephemeral storage available, beyond the default amount, for - * tasks hosted on Fargate. For more information, see Fargate task - * storage in the Amazon ECS User Guide for Fargate.
+ * tasks hosted on Fargate. For more information, see Fargate task storage in the Amazon ECS User Guide + * for Fargate. *This parameter is only supported for tasks hosted on Fargate using
- * Linux platform version 1.4.0
or later. This parameter is not supported
- * for Windows containers on Fargate.
1.4.0
or later. This parameter is not supported for
+ * Windows containers on Fargate.
* The total amount, in GiB, of ephemeral storage to set for the task. The minimum
- * supported value is 21
GiB and the maximum supported value is
- * 200
GiB.
21
GiB and the maximum supported value is 200
+ * GiB.
* @public
*/
sizeInGiB: number | undefined;
@@ -1662,14 +1737,15 @@ export interface EcsEphemeralStorage {
/**
* Details on an Elastic Inference accelerator task override. This parameter is used to * override the Elastic Inference accelerator specified in the task definition. For more - * information, see Working with Amazon - * Elastic Inference on Amazon ECS in the - * Amazon Elastic Container Service Developer Guide.
+ * information, see Working with Amazon Elastic + * Inference on Amazon ECS in the Amazon Elastic Container Service + * Developer Guide. * @public */ export interface EcsInferenceAcceleratorOverride { /** - *The Elastic Inference accelerator device name to override for the task. This parameter must match a deviceName
specified in the task definition.
The Elastic Inference accelerator device name to override for the task. This parameter
+ * must match a deviceName
specified in the task definition.
The ephemeral storage setting override for the task.
*This parameter is only supported for tasks hosted on Fargate that - * use the following platform versions:
+ *This parameter is only supported for tasks hosted on Fargate that use + * the following platform versions:
*Linux platform version 1.4.0
or later.
The Amazon Resource Name (ARN) of the task execution IAM role override for the task. For more - * information, see Amazon ECS task - * execution IAM role in the Amazon Elastic Container Service Developer Guide.
+ *The Amazon Resource Name (ARN) of the task execution IAM role override for the task. For + * more information, see Amazon ECS + * task execution IAM role in the Amazon Elastic Container Service Developer + * Guide.
* @public */ ExecutionRoleArn?: string; @@ -1737,10 +1814,10 @@ export interface EcsTaskOverride { Memory?: string; /** - *The Amazon Resource Name (ARN) of the IAM role that containers in this task can assume. All containers - * in this task are granted the permissions that are specified in this role. For more - * information, see IAM Role for Tasks - * in the Amazon Elastic Container Service Developer Guide.
+ *The Amazon Resource Name (ARN) of the IAM role that containers in this task can assume. + * All containers in this task are granted the permissions that are specified in this role. + * For more information, see IAM Role for Tasks in + * the Amazon Elastic Container Service Developer Guide.
* @public */ TaskRoleArn?: string; @@ -1761,23 +1838,24 @@ export const PlacementConstraintType = { export type PlacementConstraintType = (typeof PlacementConstraintType)[keyof typeof PlacementConstraintType]; /** - *An object representing a constraint on task placement. To learn more, see Task Placement Constraints in the Amazon Elastic Container Service Developer - * Guide.
+ *An object representing a constraint on task placement. To learn more, see Task Placement + * Constraints in the Amazon Elastic Container Service Developer Guide.
* @public */ export interface PlacementConstraint { /** *The type of constraint. Use distinctInstance to ensure that each task in a particular - * group is running on a different container instance. Use memberOf to restrict the selection to - * a group of valid candidates.
+ * group is running on a different container instance. Use memberOf to restrict the selection + * to a group of valid candidates. * @public */ type?: PlacementConstraintType; /** *A cluster query language expression to apply to the constraint. You cannot specify an
- * expression if the constraint type is distinctInstance
. To learn more, see Cluster Query Language in the Amazon Elastic Container Service Developer Guide.
- *
distinctInstance
. To learn more, see
+ * Cluster Query
+ * Language in the Amazon Elastic Container Service Developer Guide.
* @public
*/
expression?: string;
@@ -1799,18 +1877,19 @@ export const PlacementStrategyType = {
export type PlacementStrategyType = (typeof PlacementStrategyType)[keyof typeof PlacementStrategyType];
/**
- * The task placement strategy for a task or service. To learn more, see Task Placement Strategies in the Amazon Elastic Container Service Service Developer - * Guide.
+ *The task placement strategy for a task or service. To learn more, see Task Placement + * Strategies in the Amazon Elastic Container Service Service Developer Guide.
* @public */ export interface PlacementStrategy { /** *The type of placement strategy. The random placement strategy randomly places tasks on * available candidates. The spread placement strategy spreads placement across available - * candidates evenly based on the field parameter. The binpack strategy places tasks on available - * candidates that have the least available amount of the resource that is specified with the - * field parameter. For example, if you binpack on memory, a task is placed on the instance with - * the least amount of remaining memory (but still enough to run the task).
+ * candidates evenly based on the field parameter. The binpack strategy places tasks on + * available candidates that have the least available amount of the resource that is specified + * with the field parameter. For example, if you binpack on memory, a task is placed on the + * instance with the least amount of remaining memory (but still enough to run the task). + * * @public */ type?: PlacementStrategyType; @@ -1819,8 +1898,8 @@ export interface PlacementStrategy { *The field to apply the placement strategy against. For the spread placement strategy, * valid values are instanceId (or host, which has the same effect), or any platform or custom * attribute that is applied to a container instance, such as attribute:ecs.availability-zone. - * For the binpack placement strategy, valid values are cpu and memory. For the random placement - * strategy, this field is not used.
+ * For the binpack placement strategy, valid values are cpu and memory. For the random + * placement strategy, this field is not used. * @public */ field?: string; @@ -1840,8 +1919,8 @@ export const PropagateTags = { export type PropagateTags = (typeof PropagateTags)[keyof typeof PropagateTags]; /** - *A key-value pair associated with an Amazon Web Services resource. In EventBridge, rules and event buses - * support tagging.
+ *A key-value pair associated with an Amazon Web Services resource. In EventBridge, + * rules and event buses support tagging.
* @public */ export interface Tag { @@ -1865,35 +1944,36 @@ export interface Tag { */ export interface PipeTargetEcsTaskParameters { /** - *The ARN of the task definition to use if the event target is an Amazon ECS task.
+ *The ARN of the task definition to use if the event target is an Amazon ECS task. + *
* @public */ TaskDefinitionArn: string | undefined; /** - *The number of tasks to create based on TaskDefinition
. The default is 1.
The number of tasks to create based on TaskDefinition
. The default is
+ * 1.
Specifies the launch type on which your task is running. The launch type that you specify
- * here must match one of the launch type (compatibilities) of the target task. The
- * FARGATE
value is supported only in the Regions where Fargate with Amazon ECS
- * is supported. For more information, see Fargate on Amazon ECS in
- * the Amazon Elastic Container Service Developer Guide.
Specifies the launch type on which your task is running. The launch type that you
+ * specify here must match one of the launch type (compatibilities) of the target task. The
+ * FARGATE
value is supported only in the Regions where Fargate with Amazon ECS is supported. For more information, see
+ * Fargate on Amazon ECS in the Amazon Elastic Container Service Developer Guide.
Use this structure if the Amazon ECS task uses the awsvpc
network mode. This
- * structure specifies the VPC subnets and security groups associated with the task, and whether
- * a public IP address is to be used. This structure is required if LaunchType
is
- * FARGATE
because the awsvpc
mode is required for Fargate
- * tasks.
If you specify NetworkConfiguration
when the target ECS task does not use the
- * awsvpc
network mode, the task fails.
Use this structure if the Amazon ECS task uses the awsvpc
network
+ * mode. This structure specifies the VPC subnets and security groups associated with the
+ * task, and whether a public IP address is to be used. This structure is required if
+ * LaunchType
is FARGATE
because the awsvpc
mode is
+ * required for Fargate tasks.
If you specify NetworkConfiguration
when the target ECS task does not use
+ * the awsvpc
network mode, the task fails.
Specifies the platform version for the task. Specify only the numeric portion of the
* platform version, such as 1.1.0
.
This structure is used only if LaunchType
is FARGATE
. For more
- * information about valid platform versions, see Fargate Platform
- * Versions in the Amazon Elastic Container Service Developer
- * Guide.
Specifies an Amazon ECS task group for the task. The maximum length is 255 characters.
+ *Specifies an Amazon ECS task group for the task. The maximum length is 255 + * characters.
* @public */ Group?: string; @@ -1919,15 +2000,15 @@ export interface PipeTargetEcsTaskParameters { *The capacity provider strategy to use for the task.
*If a capacityProviderStrategy
is specified, the launchType
* parameter must be omitted. If no capacityProviderStrategy
or launchType is
- * specified, the defaultCapacityProviderStrategy
for the cluster is used.
defaultCapacityProviderStrategy
for the cluster is used.
+ *
* @public
*/
CapacityProviderStrategy?: CapacityProviderStrategyItem[];
/**
- * Specifies whether to enable Amazon ECS managed tags for the task. For more information, - * see Tagging Your Amazon ECS Resources in the Amazon Elastic Container Service Developer - * Guide.
+ *Specifies whether to enable Amazon ECS managed tags for the task. For more + * information, see Tagging Your Amazon ECS Resources in the Amazon Elastic Container Service Developer Guide.
* @public */ EnableECSManagedTags?: boolean; @@ -1956,9 +2037,10 @@ export interface PipeTargetEcsTaskParameters { PlacementStrategy?: PlacementStrategy[]; /** - *Specifies whether to propagate the tags from the task definition to the task. If no value
- * is specified, the tags are not propagated. Tags can only be propagated to the task during task
- * creation. To add tags to a task after task creation, use the TagResource
API action.
Specifies whether to propagate the tags from the task definition to the task. If no
+ * value is specified, the tags are not propagated. Tags can only be propagated to the task
+ * during task creation. To add tags to a task after task creation, use the
+ * TagResource
API action.
The metadata that you apply to the task to help you categorize and organize them. Each tag - * consists of a key and an optional value, both of which you define. To learn more, see RunTask in the Amazon ECS API Reference.
+ *The metadata that you apply to the task to help you categorize and organize them. Each + * tag consists of a key and an optional value, both of which you define. To learn more, see + * RunTask in the Amazon ECS API Reference.
* @public */ Tags?: Tag[]; @@ -1989,13 +2072,16 @@ export interface PipeTargetEcsTaskParameters { */ export interface PipeTargetEventBridgeEventBusParameters { /** - *The URL subdomain of the endpoint. For example, if the URL for Endpoint is https://abcde.veo.endpoints.event.amazonaws.com, then the EndpointId is abcde.veo
.
The URL subdomain of the endpoint. For example, if the URL for Endpoint is
+ * https://abcde.veo.endpoints.event.amazonaws.com, then the EndpointId is
+ * abcde.veo
.
A free-form string, with a maximum of 128 characters, used to decide what fields to expect in the event detail.
+ *A free-form string, with a maximum of 128 characters, used to decide what fields to + * expect in the event detail.
* @public */ DetailType?: string; @@ -2007,8 +2093,8 @@ export interface PipeTargetEventBridgeEventBusParameters { Source?: string; /** - *Amazon Web Services resources, identified by Amazon Resource Name (ARN), which the event primarily - * concerns. Any number, including zero, may be present.
+ *Amazon Web Services resources, identified by Amazon Resource Name (ARN), which the event + * primarily concerns. Any number, including zero, may be present.
* @public */ Resources?: string[]; @@ -2027,22 +2113,21 @@ export interface PipeTargetEventBridgeEventBusParameters { */ export interface PipeTargetHttpParameters { /** - *The path parameter values to be used to populate API Gateway REST API or EventBridge - * ApiDestination path wildcards ("*").
+ *The path parameter values to be used to populate API Gateway REST API or EventBridge ApiDestination path wildcards ("*").
* @public */ PathParameterValues?: string[]; /** - *The headers that need to be sent as part of request invoking the API Gateway REST API or - * EventBridge ApiDestination.
+ *The headers that need to be sent as part of request invoking the API Gateway REST + * API or EventBridge ApiDestination.
* @public */ HeaderParameters?: RecordThe query string keys/values that need to be sent as part of request invoking the API Gateway - * REST API or EventBridge ApiDestination.
+ *The query string keys/values that need to be sent as part of request invoking the + * API Gateway REST API or EventBridge ApiDestination.
* @public */ QueryStringParameters?: RecordDetermines which shard in the stream the data record is assigned to. Partition keys are Unicode strings with a maximum length limit of 256 characters - * for each key. Amazon Kinesis Data Streams uses the partition key as input to a hash function that maps the partition key and associated data to a specific shard. - * Specifically, an MD5 hash function is used to map partition keys to 128-bit integer values and to map associated data records to shards. As a result of this - * hashing mechanism, all data records with the same partition key map to the same shard within the stream.
+ *Determines which shard in the stream the data record is assigned to. Partition keys are + * Unicode strings with a maximum length limit of 256 characters for each key. Amazon Kinesis Data Streams uses the partition key as input to a hash function that maps the + * partition key and associated data to a specific shard. Specifically, an MD5 hash function + * is used to map partition keys to 128-bit integer values and to map associated data records + * to shards. As a result of this hashing mechanism, all data records with the same partition + * key map to the same shard within the stream.
* @public */ PartitionKey: string | undefined; @@ -2087,14 +2174,23 @@ export interface PipeTargetLambdaFunctionParameters { *
- * REQUEST_RESPONSE
(default) - Invoke synchronously. This corresponds to the RequestResponse
option in the InvocationType
parameter for the Lambda Invoke API.
REQUEST_RESPONSE
(default) - Invoke synchronously. This corresponds
+ * to the RequestResponse
option in the InvocationType
+ * parameter for the Lambda
+ * Invoke
+ * API.
*
- * FIRE_AND_FORGET
- Invoke asynchronously. This corresponds to the Event
option in the InvocationType
parameter for the Lambda Invoke API.
FIRE_AND_FORGET
- Invoke asynchronously. This corresponds to the
+ * Event
option in the InvocationType
parameter for the
+ * Lambda
+ * Invoke
+ * API.
* For more information, see Invocation types in the Amazon EventBridge User Guide.
+ *For more information, see Invocation + * types in the Amazon EventBridge User Guide.
* @public */ InvocationType?: PipeTargetInvocationType; @@ -2114,7 +2210,8 @@ export interface PipeTargetRedshiftDataParameters { SecretManagerArn?: string; /** - *The name of the database. Required when authenticating using temporary credentials.
+ *The name of the database. Required when authenticating using temporary + * credentials.
* @public */ Database: string | undefined; @@ -2153,13 +2250,15 @@ export interface PipeTargetRedshiftDataParameters { */ export interface SageMakerPipelineParameter { /** - *Name of parameter to start execution of a SageMaker Model Building Pipeline.
+ *Name of parameter to start execution of a SageMaker Model Building + * Pipeline.
* @public */ Name: string | undefined; /** - *Value of parameter to start execution of a SageMaker Model Building Pipeline.
+ *Value of parameter to start execution of a SageMaker Model Building + * Pipeline.
* @public */ Value: string | undefined; @@ -2171,7 +2270,8 @@ export interface SageMakerPipelineParameter { */ export interface PipeTargetSageMakerPipelineParameters { /** - *List of Parameter names and values for SageMaker Model Building Pipeline execution.
+ *List of Parameter names and values for SageMaker Model Building Pipeline + * execution.
* @public */ PipelineParameterList?: SageMakerPipelineParameter[]; @@ -2202,27 +2302,273 @@ export interface PipeTargetSqsQueueParameters { */ export interface PipeTargetStateMachineParameters { /** - *Specify whether to invoke the Step Functions state machine synchronously or asynchronously.
+ *Specify whether to invoke the Step Functions state machine synchronously or + * asynchronously.
*
- * REQUEST_RESPONSE
(default) - Invoke synchronously. For more information, see StartSyncExecution in the Step Functions API Reference.
REQUEST_RESPONSE
(default) - Invoke synchronously. For more
+ * information, see StartSyncExecution in the Step Functions API
+ * Reference.
*
- * REQUEST_RESPONSE
is not supported for STANDARD
state machine workflows.
REQUEST_RESPONSE
is not supported for STANDARD
state
+ * machine workflows.
*
- * FIRE_AND_FORGET
- Invoke asynchronously. For more information, see StartExecution in the Step Functions API Reference.
FIRE_AND_FORGET
- Invoke asynchronously. For more information, see
+ * StartExecution in the Step Functions API
+ * Reference.
* For more information, see Invocation types in the Amazon EventBridge User Guide.
+ *For more information, see Invocation + * types in the Amazon EventBridge User Guide.
* @public */ InvocationType?: PipeTargetInvocationType; } +/** + * @public + * @enum + */ +export const DimensionValueType = { + VARCHAR: "VARCHAR", +} as const; + +/** + * @public + */ +export type DimensionValueType = (typeof DimensionValueType)[keyof typeof DimensionValueType]; + +/** + *Maps source data to a dimension in the target Timestream for LiveAnalytics + * table.
+ *For more information, see Amazon Timestream for LiveAnalytics concepts + *
+ * @public + */ +export interface DimensionMapping { + /** + *Dynamic path to the dimension value in the source event.
+ * @public + */ + DimensionValue: string | undefined; + + /** + *The data type of the dimension for the time-series data.
+ * @public + */ + DimensionValueType: DimensionValueType | undefined; + + /** + *The metadata attributes of the time series. For example, the name and Availability Zone + * of an Amazon EC2 instance or the name of the manufacturer of a wind turbine are + * dimensions.
+ * @public + */ + DimensionName: string | undefined; +} + +/** + * @public + * @enum + */ +export const EpochTimeUnit = { + MICROSECONDS: "MICROSECONDS", + MILLISECONDS: "MILLISECONDS", + NANOSECONDS: "NANOSECONDS", + SECONDS: "SECONDS", +} as const; + +/** + * @public + */ +export type EpochTimeUnit = (typeof EpochTimeUnit)[keyof typeof EpochTimeUnit]; + +/** + * @public + * @enum + */ +export const MeasureValueType = { + BIGINT: "BIGINT", + BOOLEAN: "BOOLEAN", + DOUBLE: "DOUBLE", + TIMESTAMP: "TIMESTAMP", + VARCHAR: "VARCHAR", +} as const; + +/** + * @public + */ +export type MeasureValueType = (typeof MeasureValueType)[keyof typeof MeasureValueType]; + +/** + *A mapping of a source event data field to a measure in a Timestream for + * LiveAnalytics record.
+ * @public + */ +export interface MultiMeasureAttributeMapping { + /** + *Dynamic path to the measurement attribute in the source event.
+ * @public + */ + MeasureValue: string | undefined; + + /** + *Data type of the measurement attribute in the source event.
+ * @public + */ + MeasureValueType: MeasureValueType | undefined; + + /** + *Target measure name to be used.
+ * @public + */ + MultiMeasureAttributeName: string | undefined; +} + +/** + *Maps multiple measures from the source event to the same Timestream for + * LiveAnalytics record.
+ *For more information, see Amazon Timestream for LiveAnalytics concepts + *
+ * @public + */ +export interface MultiMeasureMapping { + /** + *The name of the multiple measurements per record (multi-measure).
+ * @public + */ + MultiMeasureName: string | undefined; + + /** + *Mappings that represent multiple source event fields mapped to measures in the same + * Timestream for LiveAnalytics record.
+ * @public + */ + MultiMeasureAttributeMappings: MultiMeasureAttributeMapping[] | undefined; +} + +/** + *Maps a single source data field to a single record in the specified Timestream + * for LiveAnalytics table.
+ *For more information, see Amazon Timestream for LiveAnalytics concepts + *
+ * @public + */ +export interface SingleMeasureMapping { + /** + *Dynamic path of the source field to map to the measure in the record.
+ * @public + */ + MeasureValue: string | undefined; + + /** + *Data type of the source field.
+ * @public + */ + MeasureValueType: MeasureValueType | undefined; + + /** + *Target measure name for the measurement attribute in the Timestream table.
+ * @public + */ + MeasureName: string | undefined; +} + +/** + * @public + * @enum + */ +export const TimeFieldType = { + EPOCH: "EPOCH", + TIMESTAMP_FORMAT: "TIMESTAMP_FORMAT", +} as const; + +/** + * @public + */ +export type TimeFieldType = (typeof TimeFieldType)[keyof typeof TimeFieldType]; + +/** + *The parameters for using a Timestream for LiveAnalytics table as a + * target.
+ * @public + */ +export interface PipeTargetTimestreamParameters { + /** + *Dynamic path to the source data field that represents the time value for your data.
+ * @public + */ + TimeValue: string | undefined; + + /** + *The granularity of the time units used. Default is MILLISECONDS
.
Required if TimeFieldType
is specified as EPOCH
.
The type of time value used.
+ *The default is EPOCH
.
How to format the timestamps. For example,
+ * YYYY-MM-DDThh:mm:ss.sssTZD
.
Required if TimeFieldType
is specified as
+ * TIMESTAMP_FORMAT
.
64 bit version value or source data field that represents the version value for your data.
+ *Write requests with a higher version number will update the existing measure values of the record and version. + * In cases where the measure value is the same, the version will still be updated.
+ *Default value is 1.
+ *Timestream for LiveAnalytics does not support updating partial measure values in a record.
+ *Write requests for duplicate data with a
+ * higher version number will update the existing measure value and version. In cases where
+ * the measure value is the same, Version
will still be updated. Default value is
+ * 1
.
+ * Version
must be 1
or greater, or you will receive a
+ * ValidationException
error.
Map source data to dimensions in the target Timestream for LiveAnalytics + * table.
+ *For more information, see Amazon Timestream for LiveAnalytics concepts + *
+ * @public + */ + DimensionMappings: DimensionMapping[] | undefined; + + /** + *Mappings of single source data fields to individual records in the specified Timestream for LiveAnalytics table.
+ * @public + */ + SingleMeasureMappings?: SingleMeasureMapping[]; + + /** + *Maps multiple measures from the source event to the same record in the specified Timestream for LiveAnalytics table.
+ * @public + */ + MultiMeasureMappings?: MultiMeasureMapping[]; +} + /** *The parameters required to set up a target for your pipe.
*For more information about pipe target parameters, including how to use dynamic path parameters, see Target parameters in the Amazon EventBridge User Guide.
@@ -2231,8 +2577,8 @@ export interface PipeTargetStateMachineParameters { export interface PipeTargetParameters { /** *Valid JSON text passed to the target. In this case, nothing from the event itself is - * passed to the target. For more information, see The JavaScript Object Notation (JSON) Data - * Interchange Format.
+ * passed to the target. For more information, see The JavaScript Object Notation (JSON) + * Data Interchange Format. *To remove an input template, specify an empty string.
* @public */ @@ -2305,6 +2651,13 @@ export interface PipeTargetParameters { * @public */ CloudWatchLogsParameters?: PipeTargetCloudWatchLogsParameters; + + /** + *The parameters for using a Timestream for LiveAnalytics table as a + * target.
+ * @public + */ + TimestreamParameters?: PipeTargetTimestreamParameters; } /** @@ -2461,7 +2814,8 @@ export class InternalException extends __BaseException { readonly name: "InternalException" = "InternalException"; readonly $fault: "server" = "server"; /** - *The number of seconds to wait before retrying the action that caused the exception.
+ *The number of seconds to wait before retrying the action that caused the + * exception.
* @public */ retryAfterSeconds?: number; @@ -2568,7 +2922,8 @@ export class ThrottlingException extends __BaseException { quotaCode?: string; /** - *The number of seconds to wait before retrying the action that caused the exception.
+ *The number of seconds to wait before retrying the action that caused the + * exception.
* @public */ retryAfterSeconds?: number; @@ -2615,7 +2970,8 @@ export class ValidationException extends __BaseException { readonly name: "ValidationException" = "ValidationException"; readonly $fault: "client" = "client"; /** - *The list of fields for which validation failed and the corresponding failure messages.
+ *The list of fields for which validation failed and the corresponding failure + * messages.
* @public */ fieldList?: ValidationExceptionField[]; @@ -2714,12 +3070,12 @@ export interface DescribePipeRequest { } /** - *The Amazon Kinesis Data Firehose logging configuration settings for the pipe.
+ *The Amazon Data Firehose logging configuration settings for the pipe.
* @public */ export interface FirehoseLogDestination { /** - *The Amazon Resource Name (ARN) of the Kinesis Data Firehose delivery stream to which EventBridge delivers the pipe log records.
+ *The Amazon Resource Name (ARN) of the Firehose delivery stream to which EventBridge delivers the pipe log records.
* @public */ DeliveryStreamArn?: string; @@ -2731,15 +3087,16 @@ export interface FirehoseLogDestination { */ export interface S3LogDestination { /** - *The name of the Amazon S3 bucket to which EventBridge delivers the log records for the pipe.
+ *The name of the Amazon S3 bucket to which EventBridge delivers the log + * records for the pipe.
* @public */ BucketName?: string; /** *The prefix text with which to begin Amazon S3 log object names.
- *For more information, see Organizing objects using prefixes - * in the Amazon Simple Storage Service User Guide.
+ *For more information, see Organizing objects using + * prefixes in the Amazon Simple Storage Service User Guide.
* @public */ Prefix?: string; @@ -2763,7 +3120,8 @@ export interface S3LogDestination { *
- * w3c
: W3C extended logging file format
+ * w3c
: W3C extended
+ * logging file format
*
The Amazon Kinesis Data Firehose logging configuration settings for the pipe.
+ *The Amazon Data Firehose logging configuration settings for the pipe.
* @public */ FirehoseLogDestination?: FirehoseLogDestination; @@ -2802,9 +3160,12 @@ export interface PipeLogConfiguration { Level?: LogLevel; /** - *Whether the execution data (specifically, the payload
, awsRequest
, and awsResponse
fields) is included in the log messages for this pipe.
Whether the execution data (specifically, the payload
,
+ * awsRequest
, and awsResponse
fields) is included in the log
+ * messages for this pipe.
This applies to all log destinations for the pipe.
- *For more information, see Including execution data in logs in the Amazon EventBridge User Guide.
+ *For more information, see Including execution data in logs in the Amazon EventBridge User + * Guide.
* @public */ IncludeExecutionData?: IncludeExecutionDataOption[]; @@ -2923,8 +3284,9 @@ export interface DescribePipeResponse { */ export interface ListPipesRequest { /** - *A value that will return a subset of the pipes associated with this account. For example, "NamePrefix": "ABC"
will return
- * all endpoints with "ABC" in the name.
A value that will return a subset of the pipes associated with this account. For
+ * example, "NamePrefix": "ABC"
will return all endpoints with "ABC" in the
+ * name.
An object that represents a pipe. Amazon EventBridgePipes connect event sources to targets and reduces the need for specialized knowledge and integration code.
+ *An object that represents a pipe. Amazon EventBridgePipes connect event sources to + * targets and reduces the need for specialized knowledge and integration code.
* @public */ export interface Pipe { @@ -3355,6 +3718,10 @@ export interface UpdatePipeSourceRabbitMQBrokerParameters { /** *The parameters for using a self-managed Apache Kafka stream as a source.
+ *A self managed cluster refers to any Apache Kafka cluster not hosted by Amazon Web Services. + * This includes both clusters you manage yourself, as well as those hosted by a third-party + * provider, such as Confluent + * Cloud, CloudKarafka, or Redpanda. For more information, see Apache Kafka streams as a source in the Amazon EventBridge User Guide.
* @public */ export interface UpdatePipeSourceSelfManagedKafkaParameters { @@ -3459,6 +3826,10 @@ export interface UpdatePipeSourceParameters { /** *The parameters for using a self-managed Apache Kafka stream as a source.
+ *A self managed cluster refers to any Apache Kafka cluster not hosted by Amazon Web Services. + * This includes both clusters you manage yourself, as well as those hosted by a third-party + * provider, such as Confluent + * Cloud, CloudKarafka, or Redpanda. For more information, see Apache Kafka streams as a source in the Amazon EventBridge User Guide.
* @public */ SelfManagedKafkaParameters?: UpdatePipeSourceSelfManagedKafkaParameters; diff --git a/clients/client-pipes/src/protocols/Aws_restJson1.ts b/clients/client-pipes/src/protocols/Aws_restJson1.ts index 953362c1fd9ae..8aa2cc98fd2fe 100644 --- a/clients/client-pipes/src/protocols/Aws_restJson1.ts +++ b/clients/client-pipes/src/protocols/Aws_restJson1.ts @@ -50,6 +50,7 @@ import { CloudwatchLogsLogDestinationParameters, ConflictException, DeadLetterConfig, + DimensionMapping, EcsContainerOverride, EcsEnvironmentFile, EcsEnvironmentVariable, @@ -64,6 +65,8 @@ import { InternalException, MQBrokerAccessCredentials, MSKAccessCredentials, + MultiMeasureAttributeMapping, + MultiMeasureMapping, NetworkConfiguration, NotFoundException, Pipe, @@ -90,6 +93,7 @@ import { PipeTargetSageMakerPipelineParameters, PipeTargetSqsQueueParameters, PipeTargetStateMachineParameters, + PipeTargetTimestreamParameters, PlacementConstraint, PlacementStrategy, S3LogDestinationParameters, @@ -97,6 +101,7 @@ import { SelfManagedKafkaAccessConfigurationCredentials, SelfManagedKafkaAccessConfigurationVpc, ServiceQuotaExceededException, + SingleMeasureMapping, Tag, ThrottlingException, UpdatePipeSourceActiveMQBrokerParameters, @@ -755,6 +760,10 @@ const de_ValidationExceptionRes = async (parsedOutput: any, context: __SerdeCont // se_DeadLetterConfig omitted. +// se_DimensionMapping omitted. + +// se_DimensionMappings omitted. + // se_EcsContainerOverride omitted. // se_EcsContainerOverrideList omitted. @@ -799,6 +808,14 @@ const de_ValidationExceptionRes = async (parsedOutput: any, context: __SerdeCont // se_MSKAccessCredentials omitted. +// se_MultiMeasureAttributeMapping omitted. + +// se_MultiMeasureAttributeMappings omitted. + +// se_MultiMeasureMapping omitted. + +// se_MultiMeasureMappings omitted. + // se_NetworkConfiguration omitted. // se_PathParameterList omitted. @@ -881,6 +898,8 @@ const se_PipeSourceParameters = (input: PipeSourceParameters, context: __SerdeCo // se_PipeTargetStateMachineParameters omitted. +// se_PipeTargetTimestreamParameters omitted. + // se_PlacementConstraint omitted. // se_PlacementConstraints omitted. @@ -905,6 +924,10 @@ const se_PipeSourceParameters = (input: PipeSourceParameters, context: __SerdeCo // se_SelfManagedKafkaAccessConfigurationVpc omitted. +// se_SingleMeasureMapping omitted. + +// se_SingleMeasureMappings omitted. + // se_Sqls omitted. // se_StringList omitted. @@ -965,6 +988,10 @@ const se_PipeSourceParameters = (input: PipeSourceParameters, context: __SerdeCo // de_DeadLetterConfig omitted. +// de_DimensionMapping omitted. + +// de_DimensionMappings omitted. + // de_EcsContainerOverride omitted. // de_EcsContainerOverrideList omitted. @@ -1009,6 +1036,14 @@ const se_PipeSourceParameters = (input: PipeSourceParameters, context: __SerdeCo // de_MSKAccessCredentials omitted. +// de_MultiMeasureAttributeMapping omitted. + +// de_MultiMeasureAttributeMappings omitted. + +// de_MultiMeasureMapping omitted. + +// de_MultiMeasureMappings omitted. + // de_NetworkConfiguration omitted. // de_PathParameterList omitted. @@ -1121,6 +1156,8 @@ const de_PipeSourceParameters = (output: any, context: __SerdeContext): PipeSour // de_PipeTargetStateMachineParameters omitted. +// de_PipeTargetTimestreamParameters omitted. + // de_PlacementConstraint omitted. // de_PlacementConstraints omitted. @@ -1145,6 +1182,10 @@ const de_PipeSourceParameters = (output: any, context: __SerdeContext): PipeSour // de_SelfManagedKafkaAccessConfigurationVpc omitted. +// de_SingleMeasureMapping omitted. + +// de_SingleMeasureMappings omitted. + // de_Sqls omitted. // de_StringList omitted. diff --git a/codegen/sdk-codegen/aws-models/pipes.json b/codegen/sdk-codegen/aws-models/pipes.json index 44dc0466ac236..c8f548554a1bd 100644 --- a/codegen/sdk-codegen/aws-models/pipes.json +++ b/codegen/sdk-codegen/aws-models/pipes.json @@ -52,25 +52,25 @@ "Subnets": { "target": "com.amazonaws.pipes#Subnets", "traits": { - "smithy.api#documentation": "Specifies the subnets associated with the task. These subnets must all be in the same VPC. You can specify as many as 16 subnets.
", + "smithy.api#documentation": "Specifies the subnets associated with the task. These subnets must all be in the same\n VPC. You can specify as many as 16 subnets.
", "smithy.api#required": {} } }, "SecurityGroups": { "target": "com.amazonaws.pipes#SecurityGroups", "traits": { - "smithy.api#documentation": "Specifies the security groups associated with the task. These security groups must all be in the same VPC. You can specify as many \n as five security groups. If you do not specify a security group, the default security group for the VPC is used.
" + "smithy.api#documentation": "Specifies the security groups associated with the task. These security groups must all\n be in the same VPC. You can specify as many as five security groups. If you do not specify\n a security group, the default security group for the VPC is used.
" } }, "AssignPublicIp": { "target": "com.amazonaws.pipes#AssignPublicIp", "traits": { - "smithy.api#documentation": "Specifies whether the task's elastic network interface receives a public IP address. You can specify ENABLED
only when \n LaunchType
in EcsParameters
is set to FARGATE
.
Specifies whether the task's elastic network interface receives a public IP address. You\n can specify ENABLED
only when LaunchType
in\n EcsParameters
is set to FARGATE
.
This structure specifies the VPC subnets and security groups for the task, and whether a public IP address is to be used. \n This structure is relevant only for ECS tasks that use the awsvpc
network mode.
This structure specifies the VPC subnets and security groups for the task, and whether a\n public IP address is to be used. This structure is relevant only for ECS tasks that use the\n awsvpc
network mode.
The command to send to the container that overrides the default command from the Docker image or the task definition.
" + "smithy.api#documentation": "The command to send to the container that overrides the default command from the Docker\n image or the task definition.
" } }, "Environment": { "target": "com.amazonaws.pipes#BatchEnvironmentVariableList", "traits": { - "smithy.api#documentation": "The environment variables to send to the container. You can add new environment variables, which are added to the container at launch, or you can override the existing \n environment variables from the Docker image or the task definition.
\nEnvironment variables cannot start with \"Batch
\". This naming convention is reserved for variables that Batch sets.
The environment variables to send to the container. You can add new environment\n variables, which are added to the container at launch, or you can override the existing\n environment variables from the Docker image or the task definition.
\nEnvironment variables cannot start with \"Batch
\". This\n naming convention is reserved for variables that Batch sets.
The instance type to use for a multi-node parallel job.
\nThis parameter isn't applicable to single-node container jobs or jobs that run on Fargate resources, and shouldn't be provided.
\nThe instance type to use for a multi-node parallel job.
\nThis parameter isn't applicable to single-node container jobs or jobs that run on\n Fargate resources, and shouldn't be provided.
\nThe type and amount of resources to assign to a container. This overrides the settings in the job definition. The supported resources include GPU
, MEMORY
, \n and VCPU
.
The type and amount of resources to assign to a container. This overrides the settings\n in the job definition. The supported resources include GPU
,\n MEMORY
, and VCPU
.
The name of the key-value pair. For environment variables, this is the name of the environment variable.
" + "smithy.api#documentation": "The name of the key-value pair. For environment variables, this is the name of the\n environment variable.
" } }, "Value": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "The value of the key-value pair. For environment variables, this is the value of the environment variable.
" + "smithy.api#documentation": "The value of the key-value pair. For environment variables, this is the value of the\n environment variable.
" } } }, "traits": { - "smithy.api#documentation": "The environment variables to send to the container. You can add new environment variables, which are added to the container at launch, or you can override the existing \n environment variables from the Docker image or the task definition.
\nEnvironment variables cannot start with \"Batch
\". This naming convention is reserved for variables that Batch sets.
The environment variables to send to the container. You can add new environment\n variables, which are added to the container at launch, or you can override the existing\n environment variables from the Docker image or the task definition.
\nEnvironment variables cannot start with \"Batch
\". This\n naming convention is reserved for variables that Batch sets.
The job ID of the Batch job that's associated with this dependency.
" + "smithy.api#documentation": "The job ID of the Batch job that's associated with this\n dependency.
" } }, "Type": { @@ -216,20 +216,20 @@ "Type": { "target": "com.amazonaws.pipes#BatchResourceRequirementType", "traits": { - "smithy.api#documentation": "The type of resource to assign to a container. The supported resources include GPU
, MEMORY
, and VCPU
.
The type of resource to assign to a container. The supported resources include\n GPU
, MEMORY
, and VCPU
.
The quantity of the specified resource to reserve for the container. The values vary based on the\n type
specified.
The number of physical GPUs to reserve for the container. Make sure that the number of GPUs reserved for all\n containers in a job doesn't exceed the number of available GPUs on the compute resource that the job is launched\n on.
\nGPUs aren't available for jobs that are running on Fargate resources.
\nThe memory hard limit (in MiB) present to the container. This parameter is supported for jobs that are\n running on EC2 resources. If your container attempts to exceed the memory specified, the container is terminated.\n This parameter maps to Memory
in the \n Create a container section of the Docker Remote API \n and the --memory
option to docker run.\n You must specify at least 4 MiB of memory for a job. This is required but can be specified in several places for\n multi-node parallel (MNP) jobs. It must be specified for each node at least once. This parameter maps to\n Memory
in the \n Create a container section of the Docker Remote API and the\n --memory
option to docker run.
If you're trying to maximize your resource utilization by providing your jobs as much memory as possible for\n a particular instance type, see Memory\n management in the Batch User Guide.
\nFor jobs that are running on Fargate resources, then value
is the hard limit (in MiB), and\n must match one of the supported values and the VCPU
values must be one of the values supported for\n that memory value.
\n VCPU
= 0.25
\n VCPU
= 0.25 or 0.5
\n VCPU
= 0.25, 0.5, or 1
\n VCPU
= 0.5, or 1
\n VCPU
= 0.5, 1, or 2
\n VCPU
= 1 or 2
\n VCPU
= 1, 2, 4, or 8
\n VCPU
= 2 or 4
\n VCPU
= 2, 4, or 8
\n VCPU
= 4
\n VCPU
= 4 or 8
\n VCPU
= 8
\n VCPU
= 8 or 16
\n VCPU
= 16
The number of vCPUs reserved for the container. This parameter maps to CpuShares
in the\n \n Create a container section of the Docker Remote API \n and the --cpu-shares
option to\n docker run. Each vCPU is equivalent to 1,024 CPU shares. For EC2\n resources, you must specify at least one vCPU. This is required but can be specified in several places; it must be\n specified for each node at least once.
The default for the Fargate On-Demand vCPU resource count quota is 6 vCPUs. For more information about\n Fargate quotas, see Fargate quotas in the Amazon Web Services General Reference.
\nFor jobs that are running on Fargate resources, then value
must match one of the supported\n values and the MEMORY
values must be one of the values supported for that VCPU
value.\n The supported values are 0.25, 0.5, 1, 2, 4, 8, and 16
\n MEMORY
= 512, 1024, or 2048
\n MEMORY
= 1024, 2048, 3072, or 4096
\n MEMORY
= 2048, 3072, 4096, 5120, 6144, 7168, or 8192
\n MEMORY
= 4096, 5120, 6144, 7168, 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, or 16384
\n MEMORY
= 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, 16384, 17408, 18432, 19456,\n 20480, 21504, 22528, 23552, 24576, 25600, 26624, 27648, 28672, 29696, or 30720
\n MEMORY
= 16384, 20480, 24576, 28672, 32768, 36864, 40960, 45056, 49152, 53248, 57344, or 61440\n
\n MEMORY
= 32768, 40960, 49152, 57344, 65536, 73728, 81920, 90112, 98304, 106496, 114688, or 122880\n
The quantity of the specified resource to reserve for the container. The values vary\n based on the type
specified.
The number of physical GPUs to reserve for the container. Make sure that the\n number of GPUs reserved for all containers in a job doesn't exceed the number of\n available GPUs on the compute resource that the job is launched on.
\nGPUs aren't available for jobs that are running on Fargate\n resources.
\nThe memory hard limit (in MiB) present to the container. This parameter is\n supported for jobs that are running on EC2 resources. If your container attempts\n to exceed the memory specified, the container is terminated. This parameter maps\n to Memory
in the Create a\n container section of the Docker Remote API and\n the --memory
option to docker run. You\n must specify at least 4 MiB of memory for a job. This is required but can be\n specified in several places for multi-node parallel (MNP) jobs. It must be\n specified for each node at least once. This parameter maps to Memory
\n in the \n Create a container section of the Docker Remote API and\n the --memory
option to docker run.
If you're trying to maximize your resource utilization by providing your\n jobs as much memory as possible for a particular instance type, see Memory management in the Batch User\n Guide.
\nFor jobs that are running on Fargate resources, then\n value
is the hard limit (in MiB), and must match one of the\n supported values and the VCPU
values must be one of the values\n supported for that memory value.
\n VCPU
= 0.25
\n VCPU
= 0.25 or 0.5
\n VCPU
= 0.25, 0.5, or 1
\n VCPU
= 0.5, or 1
\n VCPU
= 0.5, 1, or 2
\n VCPU
= 1 or 2
\n VCPU
= 1, 2, 4, or 8
\n VCPU
= 2 or 4
\n VCPU
= 2, 4, or 8
\n VCPU
= 4
\n VCPU
= 4 or 8
\n VCPU
= 8
\n VCPU
= 8 or 16
\n VCPU
= 16
The number of vCPUs reserved for the container. This parameter maps to\n CpuShares
in the Create a\n container section of the Docker Remote API and\n the --cpu-shares
option to docker run. Each\n vCPU is equivalent to 1,024 CPU shares. For EC2 resources, you must specify at\n least one vCPU. This is required but can be specified in several places; it must\n be specified for each node at least once.
The default for the Fargate On-Demand vCPU resource count quota\n is 6 vCPUs. For more information about Fargate quotas, see Fargate quotas in the Amazon Web Services\n General Reference.
\nFor jobs that are running on Fargate resources, then\n value
must match one of the supported values and the\n MEMORY
values must be one of the values supported for that\n VCPU
value. The supported values are 0.25, 0.5, 1, 2, 4, 8, and\n 16
\n MEMORY
= 512, 1024, or 2048
\n MEMORY
= 1024, 2048, 3072, or 4096
\n MEMORY
= 2048, 3072, 4096, 5120, 6144, 7168, or\n 8192
\n MEMORY
= 4096, 5120, 6144, 7168, 8192, 9216, 10240,\n 11264, 12288, 13312, 14336, 15360, or 16384
\n MEMORY
= 8192, 9216, 10240, 11264, 12288, 13312, 14336,\n 15360, 16384, 17408, 18432, 19456, 20480, 21504, 22528, 23552, 24576,\n 25600, 26624, 27648, 28672, 29696, or 30720
\n MEMORY
= 16384, 20480, 24576, 28672, 32768, 36864, 40960,\n 45056, 49152, 53248, 57344, or 61440
\n MEMORY
= 32768, 40960, 49152, 57344, 65536, 73728, 81920,\n 90112, 98304, 106496, 114688, or 122880
The type and amount of a resource to assign to a container. The supported resources include GPU
, MEMORY
, and VCPU
.
The type and amount of a resource to assign to a container. The supported resources\n include GPU
, MEMORY
, and VCPU
.
The number of times to move a job to the RUNNABLE
status. If the value of attempts
is greater than one, the job is retried on \n failure the same number of attempts as the value.
The number of times to move a job to the RUNNABLE
status. If the value of\n attempts
is greater than one, the job is retried on failure the same number\n of attempts as the value.
The retry strategy that's associated with a job. For more information, see \n Automated job retries in the Batch User Guide.
" + "smithy.api#documentation": "The retry strategy that's associated with a job. For more information, see Automated job\n retries in the Batch User Guide.
" } }, "com.amazonaws.pipes#Boolean": { @@ -322,14 +322,14 @@ "target": "com.amazonaws.pipes#CapacityProviderStrategyItemWeight", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "The weight value designates the relative percentage of the total number of tasks launched\n that should use the specified capacity provider. The weight value is taken into consideration\n after the base value, if defined, is satisfied.
" + "smithy.api#documentation": "The weight value designates the relative percentage of the total number of tasks\n launched that should use the specified capacity provider. The weight value is taken into\n consideration after the base value, if defined, is satisfied.
" } }, "base": { "target": "com.amazonaws.pipes#CapacityProviderStrategyItemBase", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "The base value designates how many tasks, at a minimum, to run on the specified capacity\n provider. Only one capacity provider in a capacity provider strategy can have a base defined.\n If no value is specified, the default value of 0 is used.
" + "smithy.api#documentation": "The base value designates how many tasks, at a minimum, to run on the specified capacity\n provider. Only one capacity provider in a capacity provider strategy can have a base\n defined. If no value is specified, the default value of 0 is used.
" } } }, @@ -457,7 +457,7 @@ "traits": { "aws.iam#actionPermissionDescription": "Grants permission to create a pipe", "aws.iam#conditionKeys": ["aws:ResourceTag/${TagKey}", "aws:RequestTag/${TagKey}", "aws:TagKeys"], - "smithy.api#documentation": "Create a pipe. Amazon EventBridge Pipes connect event sources to targets and reduces the need for specialized knowledge and integration code.
", + "smithy.api#documentation": "Create a pipe. Amazon EventBridge Pipes connect event sources to targets and reduces\n the need for specialized knowledge and integration code.
", "smithy.api#http": { "method": "POST", "uri": "/v1/pipes/{Name}", @@ -630,7 +630,7 @@ } }, "traits": { - "smithy.api#documentation": "A DeadLetterConfig
object that contains information about a dead-letter queue configuration.
A DeadLetterConfig
object that contains information about a dead-letter\n queue configuration.
Dynamic path to the dimension value in the source event.
", + "smithy.api#required": {} + } + }, + "DimensionValueType": { + "target": "com.amazonaws.pipes#DimensionValueType", + "traits": { + "smithy.api#documentation": "The data type of the dimension for the time-series data.
", + "smithy.api#required": {} + } + }, + "DimensionName": { + "target": "com.amazonaws.pipes#DimensionName", + "traits": { + "smithy.api#documentation": "The metadata attributes of the time series. For example, the name and Availability Zone\n of an Amazon EC2 instance or the name of the manufacturer of a wind turbine are\n dimensions.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "Maps source data to a dimension in the target Timestream for LiveAnalytics\n table.
\nFor more information, see Amazon Timestream for LiveAnalytics concepts\n
" + } + }, + "com.amazonaws.pipes#DimensionMappings": { + "type": "list", + "member": { + "target": "com.amazonaws.pipes#DimensionMapping" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + } + } + }, + "com.amazonaws.pipes#DimensionName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + } + } + }, + "com.amazonaws.pipes#DimensionValue": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2048 + } + } + }, + "com.amazonaws.pipes#DimensionValueType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "VARCHAR", + "name": "VARCHAR" + } + ] + } + }, "com.amazonaws.pipes#DynamoDBStreamStartPosition": { "type": "string", "traits": { @@ -914,54 +984,54 @@ "Command": { "target": "com.amazonaws.pipes#StringList", "traits": { - "smithy.api#documentation": "The command to send to the container that overrides the default command from the Docker image or the task definition. You must also specify a container name.
" + "smithy.api#documentation": "The command to send to the container that overrides the default command from the Docker\n image or the task definition. You must also specify a container name.
" } }, "Cpu": { "target": "smithy.api#Integer", "traits": { - "smithy.api#documentation": "The number of cpu
units reserved for the container, instead of the default value from the task definition. You must also specify a container name.
The number of cpu
units reserved for the container, instead of the default\n value from the task definition. You must also specify a container name.
The environment variables to send to the container. You can add new environment variables, which are added to the container at launch, or you can \n override the existing environment variables from the Docker image or the task definition. You must also specify a container name.
" + "smithy.api#documentation": "The environment variables to send to the container. You can add new environment\n variables, which are added to the container at launch, or you can override the existing\n environment variables from the Docker image or the task definition. You must also specify a\n container name.
" } }, "EnvironmentFiles": { "target": "com.amazonaws.pipes#EcsEnvironmentFileList", "traits": { - "smithy.api#documentation": "A list of files containing the environment variables to pass to a container, instead of the value from the container definition.
" + "smithy.api#documentation": "A list of files containing the environment variables to pass to a container, instead of\n the value from the container definition.
" } }, "Memory": { "target": "smithy.api#Integer", "traits": { - "smithy.api#documentation": "The hard limit (in MiB) of memory to present to the container, instead of the default value from the task definition. \n If your container attempts to exceed the memory specified here, the container is killed. You must also specify a container name.
" + "smithy.api#documentation": "The hard limit (in MiB) of memory to present to the container, instead of the default\n value from the task definition. If your container attempts to exceed the memory specified\n here, the container is killed. You must also specify a container name.
" } }, "MemoryReservation": { "target": "smithy.api#Integer", "traits": { - "smithy.api#documentation": "The soft limit (in MiB) of memory to reserve for the container, instead of the default value from the task definition. \n You must also specify a container name.
" + "smithy.api#documentation": "The soft limit (in MiB) of memory to reserve for the container, instead of the default\n value from the task definition. You must also specify a container name.
" } }, "Name": { "target": "com.amazonaws.pipes#String", "traits": { - "smithy.api#documentation": "The name of the container that receives the override. This parameter is required if any override is specified.
" + "smithy.api#documentation": "The name of the container that receives the override. This parameter is required if any\n override is specified.
" } }, "ResourceRequirements": { "target": "com.amazonaws.pipes#EcsResourceRequirementsList", "traits": { - "smithy.api#documentation": "The type and amount of a resource to assign to a container, instead of the default value from the task definition. The only supported resource is a GPU.
" + "smithy.api#documentation": "The type and amount of a resource to assign to a container, instead of the default value\n from the task definition. The only supported resource is a GPU.
" } } }, "traits": { - "smithy.api#documentation": "The overrides that are sent to a container. An empty container override can be passed in. An example of an empty \n container override is {\"containerOverrides\": [ ] }
. If a non-empty container override is specified, the name
parameter must be included.
The overrides that are sent to a container. An empty container override can be passed\n in. An example of an empty container override is {\"containerOverrides\": [ ] }
.\n If a non-empty container override is specified, the name
parameter must be\n included.
The Amazon Resource Name (ARN) of the Amazon S3 object containing the environment variable file.
", + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the Amazon S3 object containing the\n environment variable file.
", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "A list of files containing the environment variables to pass to a container. You can\n specify up to ten environment files. The file must have a .env
file\n extension. Each line in an environment file should contain an environment variable in\n VARIABLE=VALUE
format. Lines beginning with #
are treated\n as comments and are ignored. For more information about the environment variable file\n syntax, see Declare default\n environment variables in file.
If there are environment variables specified using the environment
\n parameter in a container definition, they take precedence over the variables contained\n within an environment file. If multiple environment files are specified that contain the\n same variable, they're processed from the top down. We recommend that you use unique\n variable names. For more information, see Specifying environment\n variables in the Amazon Elastic Container Service Developer Guide.
This parameter is only supported for tasks hosted on Fargate using the\n following platform versions:
\nLinux platform version 1.4.0
or later.
Windows platform version 1.0.0
or later.
A list of files containing the environment variables to pass to a container. You can\n specify up to ten environment files. The file must have a .env
file extension.\n Each line in an environment file should contain an environment variable in\n VARIABLE=VALUE
format. Lines beginning with #
are treated as\n comments and are ignored. For more information about the environment variable file syntax,\n see Declare default environment\n variables in file.
If there are environment variables specified using the environment
\n parameter in a container definition, they take precedence over the variables contained\n within an environment file. If multiple environment files are specified that contain the\n same variable, they're processed from the top down. We recommend that you use unique\n variable names. For more information, see Specifying environment\n variables in the Amazon Elastic Container Service Developer\n Guide.
This parameter is only supported for tasks hosted on Fargate using the\n following platform versions:
\nLinux platform version 1.4.0
or later.
Windows platform version 1.0.0
or later.
The name of the key-value pair. For environment variables, this is the name of the environment variable.
" + "smithy.api#documentation": "The name of the key-value pair. For environment variables, this is the name of the\n environment variable.
" } }, "value": { "target": "com.amazonaws.pipes#String", "traits": { - "smithy.api#documentation": "The value of the key-value pair. For environment variables, this is the value of the environment variable.
" + "smithy.api#documentation": "The value of the key-value pair. For environment variables, this is the value of the\n environment variable.
" } } }, "traits": { - "smithy.api#documentation": "The environment variables to send to the container. You can add new environment variables, which are added to the container at launch, or you can \n override the existing environment variables from the Docker image or the task definition. You must also specify a container name.
" + "smithy.api#documentation": "The environment variables to send to the container. You can add new environment\n variables, which are added to the container at launch, or you can override the existing\n environment variables from the Docker image or the task definition. You must also specify a\n container name.
" } }, "com.amazonaws.pipes#EcsEnvironmentVariableList": { @@ -1041,13 +1111,13 @@ "sizeInGiB": { "target": "com.amazonaws.pipes#EphemeralStorageSize", "traits": { - "smithy.api#documentation": "The total amount, in GiB, of ephemeral storage to set for the task. The minimum\n supported value is 21
GiB and the maximum supported value is\n 200
GiB.
The total amount, in GiB, of ephemeral storage to set for the task. The minimum\n supported value is 21
GiB and the maximum supported value is 200
\n GiB.
The amount of ephemeral storage to allocate for the task. This parameter is used to\n expand the total amount of ephemeral storage available, beyond the default amount, for\n tasks hosted on Fargate. For more information, see Fargate task\n storage in the Amazon ECS User Guide for Fargate.
\nThis parameter is only supported for tasks hosted on Fargate using\n Linux platform version 1.4.0
or later. This parameter is not supported\n for Windows containers on Fargate.
The amount of ephemeral storage to allocate for the task. This parameter is used to\n expand the total amount of ephemeral storage available, beyond the default amount, for\n tasks hosted on Fargate. For more information, see Fargate task storage in the Amazon ECS User Guide\n for Fargate.
\nThis parameter is only supported for tasks hosted on Fargate using\n Linux platform version 1.4.0
or later. This parameter is not supported for\n Windows containers on Fargate.
The Elastic Inference accelerator device name to override for the task. This parameter must match a deviceName
specified in the task definition.
The Elastic Inference accelerator device name to override for the task. This parameter\n must match a deviceName
specified in the task definition.
Details on an Elastic Inference accelerator task override. This parameter is used to\n override the Elastic Inference accelerator specified in the task definition. For more\n information, see Working with Amazon\n Elastic Inference on Amazon ECS in the\n Amazon Elastic Container Service Developer Guide.
" + "smithy.api#documentation": "Details on an Elastic Inference accelerator task override. This parameter is used to\n override the Elastic Inference accelerator specified in the task definition. For more\n information, see Working with Amazon Elastic\n Inference on Amazon ECS in the Amazon Elastic Container Service\n Developer Guide.
" } }, "com.amazonaws.pipes#EcsInferenceAcceleratorOverrideList": { @@ -1082,20 +1152,20 @@ "type": { "target": "com.amazonaws.pipes#EcsResourceRequirementType", "traits": { - "smithy.api#documentation": "The type of resource to assign to a container. The supported values are\n GPU
or InferenceAccelerator
.
The type of resource to assign to a container. The supported values are GPU
\n or InferenceAccelerator
.
The value for the specified resource type.
\nIf the GPU
type is used, the value is the number of physical\n GPUs
the Amazon ECS container agent reserves for the container. The number\n of GPUs that's reserved for all containers in a task can't exceed the number of\n available GPUs on the container instance that the task is launched on.
If the InferenceAccelerator
type is used, the value
matches\n the deviceName
for an InferenceAccelerator specified in a\n task definition.
The value for the specified resource type.
\nIf the GPU
type is used, the value is the number of physical\n GPUs
the Amazon ECS container agent reserves for the container. The\n number of GPUs that's reserved for all containers in a task can't exceed the number of\n available GPUs on the container instance that the task is launched on.
If the InferenceAccelerator
type is used, the value
matches\n the deviceName
for an InferenceAccelerator specified in a task\n definition.
The type and amount of a resource to assign to a container. The supported resource\n types are GPUs and Elastic Inference accelerators. For more information, see Working with\n GPUs on Amazon ECS or Working with\n Amazon Elastic Inference on Amazon ECS in the\n Amazon Elastic Container Service Developer Guide\n
" + "smithy.api#documentation": "The type and amount of a resource to assign to a container. The supported resource types\n are GPUs and Elastic Inference accelerators. For more information, see Working with\n GPUs on Amazon ECS or Working with Amazon Elastic\n Inference on Amazon ECS in the Amazon Elastic Container Service\n Developer Guide\n
" } }, "com.amazonaws.pipes#EcsResourceRequirementType": { @@ -1137,13 +1207,13 @@ "EphemeralStorage": { "target": "com.amazonaws.pipes#EcsEphemeralStorage", "traits": { - "smithy.api#documentation": "The ephemeral storage setting override for the task.
\nThis parameter is only supported for tasks hosted on Fargate that\n use the following platform versions:
\nLinux platform version 1.4.0
or later.
Windows platform version 1.0.0
or later.
The ephemeral storage setting override for the task.
\nThis parameter is only supported for tasks hosted on Fargate that use\n the following platform versions:
\nLinux platform version 1.4.0
or later.
Windows platform version 1.0.0
or later.
The Amazon Resource Name (ARN) of the task execution IAM role override for the task. For more\n information, see Amazon ECS task\n execution IAM role in the Amazon Elastic Container Service Developer Guide.
" + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the task execution IAM role override for the task. For\n more information, see Amazon ECS\n task execution IAM role in the Amazon Elastic Container Service Developer\n Guide.
" } }, "InferenceAcceleratorOverrides": { @@ -1161,7 +1231,7 @@ "TaskRoleArn": { "target": "com.amazonaws.pipes#ArnOrJsonPath", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the IAM role that containers in this task can assume. All containers\n in this task are granted the permissions that are specified in this role. For more\n information, see IAM Role for Tasks\n in the Amazon Elastic Container Service Developer Guide.
" + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the IAM role that containers in this task can assume.\n All containers in this task are granted the permissions that are specified in this role.\n For more information, see IAM Role for Tasks in\n the Amazon Elastic Container Service Developer Guide.
" } } }, @@ -1189,6 +1259,29 @@ } } }, + "com.amazonaws.pipes#EpochTimeUnit": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "MILLISECONDS", + "name": "MILLISECONDS" + }, + { + "value": "SECONDS", + "name": "SECONDS" + }, + { + "value": "MICROSECONDS", + "name": "MICROSECONDS" + }, + { + "value": "NANOSECONDS", + "name": "NANOSECONDS" + } + ] + } + }, "com.amazonaws.pipes#ErrorMessage": { "type": "string" }, @@ -1257,7 +1350,7 @@ } }, "traits": { - "smithy.api#documentation": "Filter events using an event pattern. For more information, see Events and Event\n Patterns in the Amazon EventBridge User Guide.
" + "smithy.api#documentation": "Filter events using an event pattern. For more information, see Events and Event\n Patterns in the Amazon EventBridge User Guide.
" } }, "com.amazonaws.pipes#FilterCriteria": { @@ -1302,12 +1395,12 @@ "DeliveryStreamArn": { "target": "com.amazonaws.pipes#FirehoseArn", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the Kinesis Data Firehose delivery stream to which EventBridge delivers the pipe log records.
" + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the Firehose delivery stream to which EventBridge delivers the pipe log records.
" } } }, "traits": { - "smithy.api#documentation": "The Amazon Kinesis Data Firehose logging configuration settings for the pipe.
" + "smithy.api#documentation": "The Amazon Data Firehose logging configuration settings for the pipe.
" } }, "com.amazonaws.pipes#FirehoseLogDestinationParameters": { @@ -1316,13 +1409,13 @@ "DeliveryStreamArn": { "target": "com.amazonaws.pipes#FirehoseArn", "traits": { - "smithy.api#documentation": "Specifies the Amazon Resource Name (ARN) of the Kinesis Data Firehose delivery stream to which EventBridge delivers the pipe log records.
", + "smithy.api#documentation": "Specifies the Amazon Resource Name (ARN) of the Firehose delivery stream to\n which EventBridge delivers the pipe log records.
", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "The Amazon Kinesis Data Firehose logging configuration settings for the pipe.
" + "smithy.api#documentation": "The Amazon Data Firehose logging configuration settings for the pipe.
" } }, "com.amazonaws.pipes#HeaderKey": { @@ -1397,7 +1490,7 @@ "retryAfterSeconds": { "target": "smithy.api#Integer", "traits": { - "smithy.api#documentation": "The number of seconds to wait before retrying the action that caused the exception.
", + "smithy.api#documentation": "The number of seconds to wait before retrying the action that caused the\n exception.
", "smithy.api#httpHeader": "Retry-After" } } @@ -1566,7 +1659,7 @@ "NamePrefix": { "target": "com.amazonaws.pipes#PipeName", "traits": { - "smithy.api#documentation": "A value that will return a subset of the pipes associated with this account. For example, \"NamePrefix\": \"ABC\"
will return \n all endpoints with \"ABC\" in the name.
A value that will return a subset of the pipes associated with this account. For\n example, \"NamePrefix\": \"ABC\"
will return all endpoints with \"ABC\" in the\n name.
Dynamic path to the measurement attribute in the source event.
", + "smithy.api#required": {} + } + }, + "MeasureValueType": { + "target": "com.amazonaws.pipes#MeasureValueType", + "traits": { + "smithy.api#documentation": "Data type of the measurement attribute in the source event.
", + "smithy.api#required": {} + } + }, + "MultiMeasureAttributeName": { + "target": "com.amazonaws.pipes#MultiMeasureAttributeName", + "traits": { + "smithy.api#documentation": "Target measure name to be used.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "A mapping of a source event data field to a measure in a Timestream for\n LiveAnalytics record.
" + } + }, + "com.amazonaws.pipes#MultiMeasureAttributeMappings": { + "type": "list", + "member": { + "target": "com.amazonaws.pipes#MultiMeasureAttributeMapping" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + } + } + }, + "com.amazonaws.pipes#MultiMeasureAttributeName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + } + } + }, + "com.amazonaws.pipes#MultiMeasureMapping": { + "type": "structure", + "members": { + "MultiMeasureName": { + "target": "com.amazonaws.pipes#MultiMeasureName", + "traits": { + "smithy.api#documentation": "The name of the multiple measurements per record (multi-measure).
", + "smithy.api#required": {} + } + }, + "MultiMeasureAttributeMappings": { + "target": "com.amazonaws.pipes#MultiMeasureAttributeMappings", + "traits": { + "smithy.api#documentation": "Mappings that represent multiple source event fields mapped to measures in the same\n Timestream for LiveAnalytics record.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "Maps multiple measures from the source event to the same Timestream for\n LiveAnalytics record.
\nFor more information, see Amazon Timestream for LiveAnalytics concepts\n
" + } + }, + "com.amazonaws.pipes#MultiMeasureMappings": { + "type": "list", + "member": { + "target": "com.amazonaws.pipes#MultiMeasureMapping" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 1024 + } + } + }, + "com.amazonaws.pipes#MultiMeasureName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + } + } + }, "com.amazonaws.pipes#NetworkConfiguration": { "type": "structure", "members": { "awsvpcConfiguration": { "target": "com.amazonaws.pipes#AwsVpcConfiguration", "traits": { - "smithy.api#documentation": "Use this structure to specify the VPC subnets and security groups for the task, and\n whether a public IP address is to be used. This structure is relevant only for ECS tasks that\n use the awsvpc
network mode.
Use this structure to specify the VPC subnets and security groups for the task, and\n whether a public IP address is to be used. This structure is relevant only for ECS tasks\n that use the awsvpc
network mode.
This structure specifies the network configuration for an Amazon ECS task.
" + "smithy.api#documentation": "This structure specifies the network configuration for an Amazon ECS\n task.
" } }, "com.amazonaws.pipes#NextToken": { @@ -1977,7 +2208,7 @@ } }, "traits": { - "smithy.api#documentation": "An object that represents a pipe. Amazon EventBridgePipes connect event sources to targets and reduces the need for specialized knowledge and integration code.
" + "smithy.api#documentation": "An object that represents a pipe. Amazon EventBridgePipes connect event sources to\n targets and reduces the need for specialized knowledge and integration code.
" } }, "com.amazonaws.pipes#PipeArn": { @@ -2007,24 +2238,24 @@ "PathParameterValues": { "target": "com.amazonaws.pipes#PathParameterList", "traits": { - "smithy.api#documentation": "The path parameter values to be used to populate API Gateway REST API or EventBridge\n ApiDestination path wildcards (\"*\").
" + "smithy.api#documentation": "The path parameter values to be used to populate API Gateway REST API or EventBridge ApiDestination path wildcards (\"*\").
" } }, "HeaderParameters": { "target": "com.amazonaws.pipes#HeaderParametersMap", "traits": { - "smithy.api#documentation": "The headers that need to be sent as part of request invoking the API Gateway REST API or\n EventBridge ApiDestination.
" + "smithy.api#documentation": "The headers that need to be sent as part of request invoking the API Gateway REST\n API or EventBridge ApiDestination.
" } }, "QueryStringParameters": { "target": "com.amazonaws.pipes#QueryStringParametersMap", "traits": { - "smithy.api#documentation": "The query string keys/values that need to be sent as part of request invoking the API Gateway \n REST API or EventBridge ApiDestination.
" + "smithy.api#documentation": "The query string keys/values that need to be sent as part of request invoking the\n API Gateway REST API or EventBridge ApiDestination.
" } } }, "traits": { - "smithy.api#documentation": "These are custom parameter to be used when the target is an API Gateway REST APIs or\n EventBridge ApiDestinations. In the latter case, these are merged with any\n InvocationParameters specified on the Connection, with any values from the Connection taking\n precedence.
" + "smithy.api#documentation": "These are custom parameter to be used when the target is an API Gateway REST APIs\n or EventBridge ApiDestinations. In the latter case, these are merged with any\n InvocationParameters specified on the Connection, with any values from the Connection\n taking precedence.
" } }, "com.amazonaws.pipes#PipeEnrichmentParameters": { @@ -2033,13 +2264,13 @@ "InputTemplate": { "target": "com.amazonaws.pipes#InputTemplate", "traits": { - "smithy.api#documentation": "Valid JSON text passed to the enrichment. In this case, nothing from the event itself is\n passed to the enrichment. For more information, see The JavaScript Object Notation (JSON) Data\n Interchange Format.
\nTo remove an input template, specify an empty string.
" + "smithy.api#documentation": "Valid JSON text passed to the enrichment. In this case, nothing from the event itself is\n passed to the enrichment. For more information, see The JavaScript Object Notation (JSON)\n Data Interchange Format.
\nTo remove an input template, specify an empty string.
" } }, "HttpParameters": { "target": "com.amazonaws.pipes#PipeEnrichmentHttpParameters", "traits": { - "smithy.api#documentation": "Contains the HTTP parameters to use when the target is a API Gateway REST endpoint or\n EventBridge ApiDestination.
\nIf you specify an API Gateway REST API or EventBridge ApiDestination as a target, you can\n use this parameter to specify headers, path parameters, and query string keys/values as part\n of your target invoking request. If you're using ApiDestinations, the corresponding Connection\n can also have these values configured. In case of any conflicting keys, values from the\n Connection take precedence.
" + "smithy.api#documentation": "Contains the HTTP parameters to use when the target is a API Gateway REST\n endpoint or EventBridge ApiDestination.
\nIf you specify an API Gateway REST API or EventBridge ApiDestination as a\n target, you can use this parameter to specify headers, path parameters, and query string\n keys/values as part of your target invoking request. If you're using ApiDestinations, the\n corresponding Connection can also have these values configured. In case of any conflicting\n keys, values from the Connection take precedence.
" } } }, @@ -2065,7 +2296,7 @@ "FirehoseLogDestination": { "target": "com.amazonaws.pipes#FirehoseLogDestination", "traits": { - "smithy.api#documentation": "The Amazon Kinesis Data Firehose logging configuration settings for the pipe.
" + "smithy.api#documentation": "The Amazon Data Firehose logging configuration settings for the pipe.
" } }, "CloudwatchLogsLogDestination": { @@ -2083,7 +2314,7 @@ "IncludeExecutionData": { "target": "com.amazonaws.pipes#IncludeExecutionData", "traits": { - "smithy.api#documentation": "Whether the execution data (specifically, the payload
, awsRequest
, and awsResponse
fields) is included in the log messages for this pipe.
This applies to all log destinations for the pipe.
\nFor more information, see Including execution data in logs in the Amazon EventBridge User Guide.
" + "smithy.api#documentation": "Whether the execution data (specifically, the payload
,\n awsRequest
, and awsResponse
fields) is included in the log\n messages for this pipe.
This applies to all log destinations for the pipe.
\nFor more information, see Including execution data in logs in the Amazon EventBridge User\n Guide.
" } } }, @@ -2103,7 +2334,7 @@ "FirehoseLogDestination": { "target": "com.amazonaws.pipes#FirehoseLogDestinationParameters", "traits": { - "smithy.api#documentation": "The Amazon Kinesis Data Firehose logging configuration settings for the pipe.
" + "smithy.api#documentation": "The Amazon Data Firehose logging configuration settings for the pipe.
" } }, "CloudwatchLogsLogDestination": { @@ -2115,19 +2346,19 @@ "Level": { "target": "com.amazonaws.pipes#LogLevel", "traits": { - "smithy.api#documentation": "The level of logging detail to include. This applies to all log destinations for the pipe.
\nFor more information, see Specifying EventBridge Pipes log level in the Amazon EventBridge User Guide.
", + "smithy.api#documentation": "The level of logging detail to include. This applies to all log destinations for the pipe.
\nFor more information, see Specifying\n EventBridge Pipes log level in the Amazon EventBridge User\n Guide.
", "smithy.api#required": {} } }, "IncludeExecutionData": { "target": "com.amazonaws.pipes#IncludeExecutionData", "traits": { - "smithy.api#documentation": "Specify ON
to include the execution data (specifically, the payload
and awsRequest
fields) in the log messages for this pipe.
This applies to all log destinations for the pipe.
\nFor more information, see Including execution data in logs in the Amazon EventBridge User Guide.
\nThe default is OFF
.
Specify ALL
to include the execution data (specifically, the\n payload
, awsRequest
, and awsResponse
fields) in\n the log messages for this pipe.
This applies to all log destinations for the pipe.
\nFor more information, see Including execution data in logs in the Amazon EventBridge User\n Guide.
\nBy default, execution data is not included.
" } } }, "traits": { - "smithy.api#documentation": "Specifies the logging configuration settings for the pipe.
\nWhen you call UpdatePipe
, EventBridge updates the fields in the\n PipeLogConfigurationParameters
object atomically as one and overrides\n existing values. This is by design. If you don't specify an optional field in any of the\n Amazon Web Services service parameters objects\n (CloudwatchLogsLogDestinationParameters
,\n FirehoseLogDestinationParameters
, or\n S3LogDestinationParameters
), EventBridge sets that field to its\n system-default value during the update.
For example, suppose when you created the pipe you\n specified a Kinesis Data Firehose stream log destination. You then update the pipe to add an\n Amazon S3 log destination. In addition to specifying the\n S3LogDestinationParameters
for the new log destination, you must also\n specify the fields in the FirehoseLogDestinationParameters
object in order to\n retain the Kinesis Data Firehose stream log destination.
For more information on generating pipe log records, see Log EventBridge Pipes in the Amazon EventBridge User Guide.
" + "smithy.api#documentation": "Specifies the logging configuration settings for the pipe.
\nWhen you call UpdatePipe
, EventBridge updates the fields in the\n PipeLogConfigurationParameters
object atomically as one and overrides\n existing values. This is by design. If you don't specify an optional field in any of the\n Amazon Web Services service parameters objects\n (CloudwatchLogsLogDestinationParameters
,\n FirehoseLogDestinationParameters
, or\n S3LogDestinationParameters
), EventBridge sets that field to its\n system-default value during the update.
For example, suppose when you created the pipe you specified a Firehose stream\n log destination. You then update the pipe to add an Amazon S3 log destination. In\n addition to specifying the S3LogDestinationParameters
for the new log\n destination, you must also specify the fields in the\n FirehoseLogDestinationParameters
object in order to retain the Firehose stream log destination.
For more information on generating pipe log records, see Log EventBridge\n Pipes in the Amazon EventBridge User Guide.
" } }, "com.amazonaws.pipes#PipeName": { @@ -2331,7 +2562,7 @@ "StartingPositionTimestamp": { "target": "com.amazonaws.pipes#Timestamp", "traits": { - "smithy.api#documentation": "With StartingPosition
set to AT_TIMESTAMP
, the time from which to start reading, in Unix time seconds.
With StartingPosition
set to AT_TIMESTAMP
, the time from which\n to start reading, in Unix time seconds.
The parameters for using a self-managed Apache Kafka stream as a source.
" + "smithy.api#documentation": "The parameters for using a self-managed Apache Kafka stream as a source.
\nA self managed cluster refers to any Apache Kafka cluster not hosted by Amazon Web Services.\n This includes both clusters you manage yourself, as well as those hosted by a third-party\n provider, such as Confluent\n Cloud, CloudKarafka, or Redpanda. For more information, see Apache Kafka streams as a source in the Amazon EventBridge User Guide.
" } } }, @@ -2540,7 +2771,7 @@ } }, "traits": { - "smithy.api#documentation": "The parameters for using a self-managed Apache Kafka stream as a source.
" + "smithy.api#documentation": "The parameters for using a self-managed Apache Kafka stream as a source.
\nA self managed cluster refers to any Apache Kafka cluster not hosted by Amazon Web Services.\n This includes both clusters you manage yourself, as well as those hosted by a third-party\n provider, such as Confluent\n Cloud, CloudKarafka, or Redpanda. For more information, see Apache Kafka streams as a source in the Amazon EventBridge User Guide.
" } }, "com.amazonaws.pipes#PipeSourceSqsQueueParameters": { @@ -2646,14 +2877,14 @@ "JobDefinition": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "The job definition used by this job. This value can be one of name
, name:revision
, or the Amazon Resource Name (ARN) for the job definition. \n If name is specified without a revision then the latest active revision is used.
The job definition used by this job. This value can be one of name
,\n name:revision
, or the Amazon Resource Name (ARN) for the job definition. If\n name is specified without a revision then the latest active revision is used.
The name of the job. It can be up to 128 letters long. The first character must be alphanumeric, can contain uppercase and lowercase letters, numbers, hyphens (-), \n and underscores (_).
", + "smithy.api#documentation": "The name of the job. It can be up to 128 letters long. The first character must be\n alphanumeric, can contain uppercase and lowercase letters, numbers, hyphens (-), and\n underscores (_).
", "smithy.api#required": {} } }, @@ -2666,7 +2897,7 @@ "RetryStrategy": { "target": "com.amazonaws.pipes#BatchRetryStrategy", "traits": { - "smithy.api#documentation": "The retry strategy to use for failed jobs. When a retry strategy is specified here, it overrides the retry strategy defined in the job definition.
" + "smithy.api#documentation": "The retry strategy to use for failed jobs. When a retry strategy is specified here, it\n overrides the retry strategy defined in the job definition.
" } }, "ContainerOverrides": { @@ -2678,13 +2909,13 @@ "DependsOn": { "target": "com.amazonaws.pipes#BatchDependsOn", "traits": { - "smithy.api#documentation": "A list of dependencies for the job. A job can depend upon a maximum of 20 jobs. You can specify a SEQUENTIAL
type dependency without \n specifying a job ID for array jobs so that each child array job completes sequentially, starting at index 0. You can also specify an N_TO_N
\n type dependency with a job ID for array jobs. In that case, each index child of this job must wait for the corresponding index child of each \n dependency to complete before it can begin.
A list of dependencies for the job. A job can depend upon a maximum of 20 jobs. You can\n specify a SEQUENTIAL
type dependency without specifying a job ID for array\n jobs so that each child array job completes sequentially, starting at index 0. You can also\n specify an N_TO_N
type dependency with a job ID for array jobs. In that case,\n each index child of this job must wait for the corresponding index child of each dependency\n to complete before it can begin.
Additional parameters passed to the job that replace parameter substitution placeholders that are set in the job definition. Parameters are specified as a key and \n value pair mapping. Parameters included here override any corresponding parameter defaults from the job definition.
" + "smithy.api#documentation": "Additional parameters passed to the job that replace parameter substitution placeholders\n that are set in the job definition. Parameters are specified as a key and value pair\n mapping. Parameters included here override any corresponding parameter defaults from the\n job definition.
" } } }, @@ -2704,7 +2935,7 @@ "Timestamp": { "target": "com.amazonaws.pipes#JsonPath", "traits": { - "smithy.api#documentation": "The time the event occurred, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC.
" + "smithy.api#documentation": "The time the event occurred, expressed as the number of milliseconds after Jan 1, 1970\n 00:00:00 UTC.
" } } }, @@ -2718,51 +2949,51 @@ "TaskDefinitionArn": { "target": "com.amazonaws.pipes#ArnOrJsonPath", "traits": { - "smithy.api#documentation": "The ARN of the task definition to use if the event target is an Amazon ECS task.
", + "smithy.api#documentation": "The ARN of the task definition to use if the event target is an Amazon ECS task.\n
", "smithy.api#required": {} } }, "TaskCount": { "target": "com.amazonaws.pipes#LimitMin1", "traits": { - "smithy.api#documentation": "The number of tasks to create based on TaskDefinition
. The default is 1.
The number of tasks to create based on TaskDefinition
. The default is\n 1.
Specifies the launch type on which your task is running. The launch type that you specify\n here must match one of the launch type (compatibilities) of the target task. The\n FARGATE
value is supported only in the Regions where Fargate with Amazon ECS\n is supported. For more information, see Fargate on Amazon ECS in\n the Amazon Elastic Container Service Developer Guide.
Specifies the launch type on which your task is running. The launch type that you\n specify here must match one of the launch type (compatibilities) of the target task. The\n FARGATE
value is supported only in the Regions where Fargate with Amazon ECS is supported. For more information, see\n Fargate on Amazon ECS in the Amazon Elastic Container Service Developer Guide.
Use this structure if the Amazon ECS task uses the awsvpc
network mode. This\n structure specifies the VPC subnets and security groups associated with the task, and whether\n a public IP address is to be used. This structure is required if LaunchType
is\n FARGATE
because the awsvpc
mode is required for Fargate\n tasks.
If you specify NetworkConfiguration
when the target ECS task does not use the\n awsvpc
network mode, the task fails.
Use this structure if the Amazon ECS task uses the awsvpc
network\n mode. This structure specifies the VPC subnets and security groups associated with the\n task, and whether a public IP address is to be used. This structure is required if\n LaunchType
is FARGATE
because the awsvpc
mode is\n required for Fargate tasks.
If you specify NetworkConfiguration
when the target ECS task does not use\n the awsvpc
network mode, the task fails.
Specifies the platform version for the task. Specify only the numeric portion of the\n platform version, such as 1.1.0
.
This structure is used only if LaunchType
is FARGATE
. For more\n information about valid platform versions, see Fargate Platform\n Versions in the Amazon Elastic Container Service Developer\n Guide.
Specifies the platform version for the task. Specify only the numeric portion of the\n platform version, such as 1.1.0
.
This structure is used only if LaunchType
is FARGATE
. For more\n information about valid platform versions, see Fargate\n Platform Versions in the Amazon Elastic Container Service Developer\n Guide.
Specifies an Amazon ECS task group for the task. The maximum length is 255 characters.
" + "smithy.api#documentation": "Specifies an Amazon ECS task group for the task. The maximum length is 255\n characters.
" } }, "CapacityProviderStrategy": { "target": "com.amazonaws.pipes#CapacityProviderStrategy", "traits": { - "smithy.api#documentation": "The capacity provider strategy to use for the task.
\nIf a capacityProviderStrategy
is specified, the launchType
\n parameter must be omitted. If no capacityProviderStrategy
or launchType is\n specified, the defaultCapacityProviderStrategy
for the cluster is used.
The capacity provider strategy to use for the task.
\nIf a capacityProviderStrategy
is specified, the launchType
\n parameter must be omitted. If no capacityProviderStrategy
or launchType is\n specified, the defaultCapacityProviderStrategy
for the cluster is used.\n
Specifies whether to enable Amazon ECS managed tags for the task. For more information,\n see Tagging Your Amazon ECS Resources in the Amazon Elastic Container Service Developer\n Guide.
" + "smithy.api#documentation": "Specifies whether to enable Amazon ECS managed tags for the task. For more\n information, see Tagging Your Amazon ECS Resources in the Amazon Elastic Container Service Developer Guide.
" } }, "EnableExecuteCommand": { @@ -2787,7 +3018,7 @@ "PropagateTags": { "target": "com.amazonaws.pipes#PropagateTags", "traits": { - "smithy.api#documentation": "Specifies whether to propagate the tags from the task definition to the task. If no value\n is specified, the tags are not propagated. Tags can only be propagated to the task during task\n creation. To add tags to a task after task creation, use the TagResource
API action.
Specifies whether to propagate the tags from the task definition to the task. If no\n value is specified, the tags are not propagated. Tags can only be propagated to the task\n during task creation. To add tags to a task after task creation, use the\n TagResource
API action.
The metadata that you apply to the task to help you categorize and organize them. Each tag\n consists of a key and an optional value, both of which you define. To learn more, see RunTask in the Amazon ECS API Reference.
" + "smithy.api#documentation": "The metadata that you apply to the task to help you categorize and organize them. Each\n tag consists of a key and an optional value, both of which you define. To learn more, see\n RunTask in the Amazon ECS API Reference.
" } } }, @@ -2819,13 +3050,13 @@ "EndpointId": { "target": "com.amazonaws.pipes#EventBridgeEndpointId", "traits": { - "smithy.api#documentation": "The URL subdomain of the endpoint. For example, if the URL for Endpoint is https://abcde.veo.endpoints.event.amazonaws.com, then the EndpointId is abcde.veo
.
The URL subdomain of the endpoint. For example, if the URL for Endpoint is\n https://abcde.veo.endpoints.event.amazonaws.com, then the EndpointId is\n abcde.veo
.
A free-form string, with a maximum of 128 characters, used to decide what fields to expect in the event detail.
" + "smithy.api#documentation": "A free-form string, with a maximum of 128 characters, used to decide what fields to\n expect in the event detail.
" } }, "Source": { @@ -2837,7 +3068,7 @@ "Resources": { "target": "com.amazonaws.pipes#EventBridgeEventResourceList", "traits": { - "smithy.api#documentation": "Amazon Web Services resources, identified by Amazon Resource Name (ARN), which the event primarily\n concerns. Any number, including zero, may be present.
" + "smithy.api#documentation": "Amazon Web Services resources, identified by Amazon Resource Name (ARN), which the event\n primarily concerns. Any number, including zero, may be present.
" } }, "Time": { @@ -2857,19 +3088,19 @@ "PathParameterValues": { "target": "com.amazonaws.pipes#PathParameterList", "traits": { - "smithy.api#documentation": "The path parameter values to be used to populate API Gateway REST API or EventBridge\n ApiDestination path wildcards (\"*\").
" + "smithy.api#documentation": "The path parameter values to be used to populate API Gateway REST API or EventBridge ApiDestination path wildcards (\"*\").
" } }, "HeaderParameters": { "target": "com.amazonaws.pipes#HeaderParametersMap", "traits": { - "smithy.api#documentation": "The headers that need to be sent as part of request invoking the API Gateway REST API or\n EventBridge ApiDestination.
" + "smithy.api#documentation": "The headers that need to be sent as part of request invoking the API Gateway REST\n API or EventBridge ApiDestination.
" } }, "QueryStringParameters": { "target": "com.amazonaws.pipes#QueryStringParametersMap", "traits": { - "smithy.api#documentation": "The query string keys/values that need to be sent as part of request invoking the API Gateway \n REST API or EventBridge ApiDestination.
" + "smithy.api#documentation": "The query string keys/values that need to be sent as part of request invoking the\n API Gateway REST API or EventBridge ApiDestination.
" } } }, @@ -2898,7 +3129,7 @@ "PartitionKey": { "target": "com.amazonaws.pipes#KinesisPartitionKey", "traits": { - "smithy.api#documentation": "Determines which shard in the stream the data record is assigned to. Partition keys are Unicode strings with a maximum length limit of 256 characters \n for each key. Amazon Kinesis Data Streams uses the partition key as input to a hash function that maps the partition key and associated data to a specific shard. \n Specifically, an MD5 hash function is used to map partition keys to 128-bit integer values and to map associated data records to shards. As a result of this \n hashing mechanism, all data records with the same partition key map to the same shard within the stream.
", + "smithy.api#documentation": "Determines which shard in the stream the data record is assigned to. Partition keys are\n Unicode strings with a maximum length limit of 256 characters for each key. Amazon Kinesis Data Streams uses the partition key as input to a hash function that maps the\n partition key and associated data to a specific shard. Specifically, an MD5 hash function\n is used to map partition keys to 128-bit integer values and to map associated data records\n to shards. As a result of this hashing mechanism, all data records with the same partition\n key map to the same shard within the stream.
", "smithy.api#required": {} } } @@ -2913,7 +3144,7 @@ "InvocationType": { "target": "com.amazonaws.pipes#PipeTargetInvocationType", "traits": { - "smithy.api#documentation": "Specify whether to invoke the function synchronously or asynchronously.
\n\n REQUEST_RESPONSE
(default) - Invoke synchronously. This corresponds to the RequestResponse
option in the InvocationType
parameter for the Lambda Invoke API.
\n FIRE_AND_FORGET
- Invoke asynchronously. This corresponds to the Event
option in the InvocationType
parameter for the Lambda Invoke API.
For more information, see Invocation types in the Amazon EventBridge User Guide.
" + "smithy.api#documentation": "Specify whether to invoke the function synchronously or asynchronously.
\n\n REQUEST_RESPONSE
(default) - Invoke synchronously. This corresponds\n to the RequestResponse
option in the InvocationType
\n parameter for the Lambda\n Invoke\n API.
\n FIRE_AND_FORGET
- Invoke asynchronously. This corresponds to the\n Event
option in the InvocationType
parameter for the\n Lambda\n Invoke\n API.
For more information, see Invocation\n types in the Amazon EventBridge User Guide.
" } } }, @@ -2927,7 +3158,7 @@ "InputTemplate": { "target": "com.amazonaws.pipes#InputTemplate", "traits": { - "smithy.api#documentation": "Valid JSON text passed to the target. In this case, nothing from the event itself is\n passed to the target. For more information, see The JavaScript Object Notation (JSON) Data\n Interchange Format.
\nTo remove an input template, specify an empty string.
" + "smithy.api#documentation": "Valid JSON text passed to the target. In this case, nothing from the event itself is\n passed to the target. For more information, see The JavaScript Object Notation (JSON)\n Data Interchange Format.
\nTo remove an input template, specify an empty string.
" } }, "LambdaFunctionParameters": { @@ -2995,6 +3226,12 @@ "traits": { "smithy.api#documentation": "The parameters for using an CloudWatch Logs log stream as a target.
" } + }, + "TimestreamParameters": { + "target": "com.amazonaws.pipes#PipeTargetTimestreamParameters", + "traits": { + "smithy.api#documentation": "The parameters for using a Timestream for LiveAnalytics table as a\n target.
" + } } }, "traits": { @@ -3013,7 +3250,7 @@ "Database": { "target": "com.amazonaws.pipes#Database", "traits": { - "smithy.api#documentation": "The name of the database. Required when authenticating using temporary credentials.
", + "smithy.api#documentation": "The name of the database. Required when authenticating using temporary\n credentials.
", "smithy.api#required": {} } }, @@ -3054,7 +3291,7 @@ "PipelineParameterList": { "target": "com.amazonaws.pipes#SageMakerPipelineParameterList", "traits": { - "smithy.api#documentation": "List of Parameter names and values for SageMaker Model Building Pipeline execution.
" + "smithy.api#documentation": "List of Parameter names and values for SageMaker Model Building Pipeline\n execution.
" } } }, @@ -3088,7 +3325,7 @@ "InvocationType": { "target": "com.amazonaws.pipes#PipeTargetInvocationType", "traits": { - "smithy.api#documentation": "Specify whether to invoke the Step Functions state machine synchronously or asynchronously.
\n\n REQUEST_RESPONSE
(default) - Invoke synchronously. For more information, see StartSyncExecution in the Step Functions API Reference.
\n REQUEST_RESPONSE
is not supported for STANDARD
state machine workflows.
\n FIRE_AND_FORGET
- Invoke asynchronously. For more information, see StartExecution in the Step Functions API Reference.
For more information, see Invocation types in the Amazon EventBridge User Guide.
" + "smithy.api#documentation": "Specify whether to invoke the Step Functions state machine synchronously or\n asynchronously.
\n\n REQUEST_RESPONSE
(default) - Invoke synchronously. For more\n information, see StartSyncExecution in the Step Functions API\n Reference.
\n REQUEST_RESPONSE
is not supported for STANDARD
state\n machine workflows.
\n FIRE_AND_FORGET
- Invoke asynchronously. For more information, see\n StartExecution in the Step Functions API\n Reference.
For more information, see Invocation\n types in the Amazon EventBridge User Guide.
" } } }, @@ -3096,6 +3333,65 @@ "smithy.api#documentation": "The parameters for using a Step Functions state machine as a target.
" } }, + "com.amazonaws.pipes#PipeTargetTimestreamParameters": { + "type": "structure", + "members": { + "TimeValue": { + "target": "com.amazonaws.pipes#TimeValue", + "traits": { + "smithy.api#documentation": "Dynamic path to the source data field that represents the time value for your data.
", + "smithy.api#required": {} + } + }, + "EpochTimeUnit": { + "target": "com.amazonaws.pipes#EpochTimeUnit", + "traits": { + "smithy.api#documentation": "The granularity of the time units used. Default is MILLISECONDS
.
Required if TimeFieldType
is specified as EPOCH
.
The type of time value used.
\nThe default is EPOCH
.
How to format the timestamps. For example,\n YYYY-MM-DDThh:mm:ss.sssTZD
.
Required if TimeFieldType
is specified as\n TIMESTAMP_FORMAT
.
64 bit version value or source data field that represents the version value for your data.
\nWrite requests with a higher version number will update the existing measure values of the record and version. \n In cases where the measure value is the same, the version will still be updated.
\nDefault value is 1.
\nTimestream for LiveAnalytics does not support updating partial measure values in a record.
\nWrite requests for duplicate data with a\n higher version number will update the existing measure value and version. In cases where\n the measure value is the same, Version
will still be updated. Default value is\n 1
.
\n Version
must be 1
or greater, or you will receive a\n ValidationException
error.
Map source data to dimensions in the target Timestream for LiveAnalytics\n table.
\nFor more information, see Amazon Timestream for LiveAnalytics concepts\n
", + "smithy.api#required": {} + } + }, + "SingleMeasureMappings": { + "target": "com.amazonaws.pipes#SingleMeasureMappings", + "traits": { + "smithy.api#documentation": "Mappings of single source data fields to individual records in the specified Timestream for LiveAnalytics table.
" + } + }, + "MultiMeasureMappings": { + "target": "com.amazonaws.pipes#MultiMeasureMappings", + "traits": { + "smithy.api#documentation": "Maps multiple measures from the source event to the same record in the specified Timestream for LiveAnalytics table.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The parameters for using a Timestream for LiveAnalytics table as a\n target.
" + } + }, "com.amazonaws.pipes#Pipes": { "type": "service", "version": "2015-10-07", @@ -3170,7 +3466,7 @@ ], "maxAge": 86400 }, - "smithy.api#documentation": "Amazon EventBridge Pipes connects event sources to targets. Pipes reduces the need for specialized knowledge and integration code when developing \n event driven architectures. This helps ensures consistency across your company’s applications. With Pipes, the target can be any available EventBridge target. \n To set up a pipe, you select the event source, add optional event filtering, define optional enrichment, and select the target for the event data.
", + "smithy.api#documentation": "Amazon EventBridge Pipes connects event sources to targets. Pipes reduces the need\n for specialized knowledge and integration code when developing event driven architectures.\n This helps ensures consistency across your company’s applications. With Pipes, the target\n can be any available EventBridge target. To set up a pipe, you select the event\n source, add optional event filtering, define optional enrichment, and select the target for\n the event data.
", "smithy.api#title": "Amazon EventBridge Pipes", "smithy.api#xmlNamespace": { "uri": "http://events.amazonaws.com/doc/2015-10-07" @@ -3847,18 +4143,18 @@ "type": { "target": "com.amazonaws.pipes#PlacementConstraintType", "traits": { - "smithy.api#documentation": "The type of constraint. Use distinctInstance to ensure that each task in a particular\n group is running on a different container instance. Use memberOf to restrict the selection to\n a group of valid candidates.
" + "smithy.api#documentation": "The type of constraint. Use distinctInstance to ensure that each task in a particular\n group is running on a different container instance. Use memberOf to restrict the selection\n to a group of valid candidates.
" } }, "expression": { "target": "com.amazonaws.pipes#PlacementConstraintExpression", "traits": { - "smithy.api#documentation": "A cluster query language expression to apply to the constraint. You cannot specify an\n expression if the constraint type is distinctInstance
. To learn more, see Cluster Query Language in the Amazon Elastic Container Service Developer Guide.\n
A cluster query language expression to apply to the constraint. You cannot specify an\n expression if the constraint type is distinctInstance
. To learn more, see\n Cluster Query\n Language in the Amazon Elastic Container Service Developer Guide.
An object representing a constraint on task placement. To learn more, see Task Placement Constraints in the Amazon Elastic Container Service Developer\n Guide.
" + "smithy.api#documentation": "An object representing a constraint on task placement. To learn more, see Task Placement\n Constraints in the Amazon Elastic Container Service Developer Guide.
" } }, "com.amazonaws.pipes#PlacementConstraintExpression": { @@ -3916,18 +4212,18 @@ "type": { "target": "com.amazonaws.pipes#PlacementStrategyType", "traits": { - "smithy.api#documentation": "The type of placement strategy. The random placement strategy randomly places tasks on\n available candidates. The spread placement strategy spreads placement across available\n candidates evenly based on the field parameter. The binpack strategy places tasks on available\n candidates that have the least available amount of the resource that is specified with the\n field parameter. For example, if you binpack on memory, a task is placed on the instance with\n the least amount of remaining memory (but still enough to run the task).
" + "smithy.api#documentation": "The type of placement strategy. The random placement strategy randomly places tasks on\n available candidates. The spread placement strategy spreads placement across available\n candidates evenly based on the field parameter. The binpack strategy places tasks on\n available candidates that have the least available amount of the resource that is specified\n with the field parameter. For example, if you binpack on memory, a task is placed on the\n instance with the least amount of remaining memory (but still enough to run the task).\n
" } }, "field": { "target": "com.amazonaws.pipes#PlacementStrategyField", "traits": { - "smithy.api#documentation": "The field to apply the placement strategy against. For the spread placement strategy,\n valid values are instanceId (or host, which has the same effect), or any platform or custom\n attribute that is applied to a container instance, such as attribute:ecs.availability-zone.\n For the binpack placement strategy, valid values are cpu and memory. For the random placement\n strategy, this field is not used.
" + "smithy.api#documentation": "The field to apply the placement strategy against. For the spread placement strategy,\n valid values are instanceId (or host, which has the same effect), or any platform or custom\n attribute that is applied to a container instance, such as attribute:ecs.availability-zone.\n For the binpack placement strategy, valid values are cpu and memory. For the random\n placement strategy, this field is not used.
" } } }, "traits": { - "smithy.api#documentation": "The task placement strategy for a task or service. To learn more, see Task Placement Strategies in the Amazon Elastic Container Service Service Developer\n Guide.
" + "smithy.api#documentation": "The task placement strategy for a task or service. To learn more, see Task Placement\n Strategies in the Amazon Elastic Container Service Service Developer Guide.
" } }, "com.amazonaws.pipes#PlacementStrategyField": { @@ -4069,13 +4365,13 @@ "BucketName": { "target": "com.amazonaws.pipes#String", "traits": { - "smithy.api#documentation": "The name of the Amazon S3 bucket to which EventBridge delivers the log records for the pipe.
" + "smithy.api#documentation": "The name of the Amazon S3 bucket to which EventBridge delivers the log\n records for the pipe.
" } }, "Prefix": { "target": "com.amazonaws.pipes#String", "traits": { - "smithy.api#documentation": "The prefix text with which to begin Amazon S3 log object names.
\nFor more information, see Organizing objects using prefixes\n in the Amazon Simple Storage Service User Guide.
" + "smithy.api#documentation": "The prefix text with which to begin Amazon S3 log object names.
\nFor more information, see Organizing objects using\n prefixes in the Amazon Simple Storage Service User Guide.
" } }, "BucketOwner": { @@ -4087,7 +4383,7 @@ "OutputFormat": { "target": "com.amazonaws.pipes#S3OutputFormat", "traits": { - "smithy.api#documentation": "The format EventBridge uses for the log records.
\n\n json
: JSON
\n plain
: Plain text
\n w3c
: W3C extended logging file format\n
The format EventBridge uses for the log records.
\n\n json
: JSON
\n plain
: Plain text
\n w3c
: W3C extended\n logging file format\n
Specifies the name of the Amazon S3 bucket to which EventBridge delivers the log records for the pipe.
", + "smithy.api#documentation": "Specifies the name of the Amazon S3 bucket to which EventBridge delivers\n the log records for the pipe.
", "smithy.api#length": { "min": 3, "max": 63 @@ -4112,7 +4408,7 @@ "BucketOwner": { "target": "com.amazonaws.pipes#String", "traits": { - "smithy.api#documentation": "Specifies the Amazon Web Services account that owns the Amazon S3 bucket to which EventBridge delivers the log records for the pipe.
", + "smithy.api#documentation": "Specifies the Amazon Web Services account that owns the Amazon S3 bucket to which\n EventBridge delivers the log records for the pipe.
", "smithy.api#pattern": "^\\d{12}$", "smithy.api#required": {} } @@ -4120,13 +4416,13 @@ "OutputFormat": { "target": "com.amazonaws.pipes#S3OutputFormat", "traits": { - "smithy.api#documentation": "How EventBridge should format the log records.
\n\n json
: JSON
\n plain
: Plain text
\n w3c
: W3C extended logging file format\n
How EventBridge should format the log records.
\n\n json
: JSON
\n plain
: Plain text
\n w3c
: W3C extended\n logging file format\n
Specifies any prefix text with which to begin Amazon S3 log object names.
\nYou can use prefixes to organize the data that you store in Amazon S3 buckets. \n A prefix is a string of characters at the beginning of the object key name. \n A prefix can be any length, subject to the maximum length of the object key name (1,024 bytes). \n For more information, see Organizing objects using prefixes\n in the Amazon Simple Storage Service User Guide.
", + "smithy.api#documentation": "Specifies any prefix text with which to begin Amazon S3 log object names.
\nYou can use prefixes to organize the data that you store in Amazon S3 buckets. A\n prefix is a string of characters at the beginning of the object key name. A prefix can be\n any length, subject to the maximum length of the object key name (1,024 bytes). For more\n information, see Organizing objects using\n prefixes in the Amazon Simple Storage Service User Guide.
", "smithy.api#length": { "max": 256 } @@ -4162,14 +4458,14 @@ "Name": { "target": "com.amazonaws.pipes#SageMakerPipelineParameterName", "traits": { - "smithy.api#documentation": "Name of parameter to start execution of a SageMaker Model Building Pipeline.
", + "smithy.api#documentation": "Name of parameter to start execution of a SageMaker Model Building\n Pipeline.
", "smithy.api#required": {} } }, "Value": { "target": "com.amazonaws.pipes#SageMakerPipelineParameterValue", "traits": { - "smithy.api#documentation": "Value of parameter to start execution of a SageMaker Model Building Pipeline.
", + "smithy.api#documentation": "Value of parameter to start execution of a SageMaker Model Building\n Pipeline.
", "smithy.api#required": {} } } @@ -4318,18 +4614,18 @@ "Subnets": { "target": "com.amazonaws.pipes#SubnetIds", "traits": { - "smithy.api#documentation": "Specifies the subnets associated with the stream. These subnets must all be in the same VPC. You can specify as many as 16 subnets.
" + "smithy.api#documentation": "Specifies the subnets associated with the stream. These subnets must all be in the same\n VPC. You can specify as many as 16 subnets.
" } }, "SecurityGroup": { "target": "com.amazonaws.pipes#SecurityGroupIds", "traits": { - "smithy.api#documentation": "Specifies the security groups associated with the stream. These security groups must all be in the same VPC. You can specify as many \n as five security groups. If you do not specify a security group, the default security group for the VPC is used.
" + "smithy.api#documentation": "Specifies the security groups associated with the stream. These security groups must all\n be in the same VPC. You can specify as many as five security groups. If you do not specify\n a security group, the default security group for the VPC is used.
" } } }, "traits": { - "smithy.api#documentation": "This structure specifies the VPC subnets and security groups for the stream, and whether a public IP address is to be used.
" + "smithy.api#documentation": "This structure specifies the VPC subnets and security groups for the stream, and whether\n a public IP address is to be used.
" } }, "com.amazonaws.pipes#SelfManagedKafkaStartPosition": { @@ -4391,6 +4687,47 @@ "smithy.api#httpError": 402 } }, + "com.amazonaws.pipes#SingleMeasureMapping": { + "type": "structure", + "members": { + "MeasureValue": { + "target": "com.amazonaws.pipes#MeasureValue", + "traits": { + "smithy.api#documentation": "Dynamic path of the source field to map to the measure in the record.
", + "smithy.api#required": {} + } + }, + "MeasureValueType": { + "target": "com.amazonaws.pipes#MeasureValueType", + "traits": { + "smithy.api#documentation": "Data type of the source field.
", + "smithy.api#required": {} + } + }, + "MeasureName": { + "target": "com.amazonaws.pipes#MeasureName", + "traits": { + "smithy.api#documentation": "Target measure name for the measurement attribute in the Timestream table.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "Maps a single source data field to a single record in the specified Timestream\n for LiveAnalytics table.
\nFor more information, see Amazon Timestream for LiveAnalytics concepts\n
" + } + }, + "com.amazonaws.pipes#SingleMeasureMappings": { + "type": "list", + "member": { + "target": "com.amazonaws.pipes#SingleMeasureMapping" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 8192 + } + } + }, "com.amazonaws.pipes#Sql": { "type": "string", "traits": { @@ -4693,7 +5030,7 @@ } }, "traits": { - "smithy.api#documentation": "A key-value pair associated with an Amazon Web Services resource. In EventBridge, rules and event buses\n support tagging.
" + "smithy.api#documentation": "A key-value pair associated with an Amazon Web Services resource. In EventBridge,\n rules and event buses support tagging.
" } }, "com.amazonaws.pipes#TagKey": { @@ -4760,7 +5097,7 @@ "traits": { "aws.iam#actionPermissionDescription": "Grants permission to add tags to a resource", "aws.iam#conditionKeys": ["aws:TagKeys", "aws:RequestTag/${TagKey}", "aws:ResourceTag/${TagKey}"], - "smithy.api#documentation": "Assigns one or more tags (key-value pairs) to the specified pipe. Tags can\n help you organize and categorize your resources. You can also use them to scope user\n permissions by granting a user permission to access or change only resources with certain tag\n values.
\nTags don't have any semantic meaning to Amazon Web Services and are interpreted strictly as strings of\n characters.
\nYou can use the TagResource
action with a pipe that already has tags. If\n you specify a new tag key, this tag is appended to the list of tags associated with the\n pipe. If you specify a tag key that is already associated with the pipe, the new tag\n value that you specify replaces the previous value for that tag.
You can associate as many as 50 tags with a pipe.
", + "smithy.api#documentation": "Assigns one or more tags (key-value pairs) to the specified pipe. Tags can help you\n organize and categorize your resources. You can also use them to scope user permissions by\n granting a user permission to access or change only resources with certain tag\n values.
\nTags don't have any semantic meaning to Amazon Web Services and are interpreted strictly\n as strings of characters.
\nYou can use the TagResource
action with a pipe that already has tags. If\n you specify a new tag key, this tag is appended to the list of tags associated with the\n pipe. If you specify a tag key that is already associated with the pipe, the new tag value\n that you specify replaces the previous value for that tag.
You can associate as many as 50 tags with a pipe.
", "smithy.api#http": { "uri": "/tags/{resourceArn}", "method": "POST" @@ -4837,7 +5174,7 @@ "retryAfterSeconds": { "target": "smithy.api#Integer", "traits": { - "smithy.api#documentation": "The number of seconds to wait before retrying the action that caused the exception.
", + "smithy.api#documentation": "The number of seconds to wait before retrying the action that caused the\n exception.
", "smithy.api#httpHeader": "Retry-After" } } @@ -4848,9 +5185,42 @@ "smithy.api#httpError": 429 } }, + "com.amazonaws.pipes#TimeFieldType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "EPOCH", + "name": "EPOCH" + }, + { + "value": "TIMESTAMP_FORMAT", + "name": "TIMESTAMP_FORMAT" + } + ] + } + }, + "com.amazonaws.pipes#TimeValue": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + } + } + }, "com.amazonaws.pipes#Timestamp": { "type": "timestamp" }, + "com.amazonaws.pipes#TimestampFormat": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + } + } + }, "com.amazonaws.pipes#URI": { "type": "string", "traits": { @@ -4956,7 +5326,7 @@ "traits": { "aws.iam#actionPermissionDescription": "Grants permission to update a pipe", "aws.iam#conditionKeys": ["aws:ResourceTag/${TagKey}", "aws:RequestTag/${TagKey}", "aws:TagKeys"], - "smithy.api#documentation": "Update an existing pipe. When you call UpdatePipe
, EventBridge only the updates fields you have specified in the request; the rest remain unchanged.\n The exception to this is if you modify any Amazon Web Services-service specific fields in the SourceParameters
, EnrichmentParameters
, or \n TargetParameters
objects. For example, DynamoDBStreamParameters
or EventBridgeEventBusParameters
. \n EventBridge updates the fields in these objects atomically as one and overrides existing values. \n This is by design, and means that if you don't specify an optional field in one of these Parameters
objects, EventBridge sets that field to its system-default value during the update.
For more information about pipes, see \n Amazon EventBridge Pipes in the Amazon EventBridge User Guide.
", + "smithy.api#documentation": "Update an existing pipe. When you call UpdatePipe
, EventBridge only the\n updates fields you have specified in the request; the rest remain unchanged. The exception\n to this is if you modify any Amazon Web Services-service specific fields in the\n SourceParameters
, EnrichmentParameters
, or\n TargetParameters
objects. For example,\n DynamoDBStreamParameters
or EventBridgeEventBusParameters
.\n EventBridge updates the fields in these objects atomically as one and overrides existing\n values. This is by design, and means that if you don't specify an optional field in one of\n these Parameters
objects, EventBridge sets that field to its system-default\n value during the update.
For more information about pipes, see \n Amazon EventBridge Pipes in the Amazon EventBridge User Guide.
", "smithy.api#http": { "method": "PUT", "uri": "/v1/pipes/{Name}", @@ -5283,7 +5653,7 @@ "SelfManagedKafkaParameters": { "target": "com.amazonaws.pipes#UpdatePipeSourceSelfManagedKafkaParameters", "traits": { - "smithy.api#documentation": "The parameters for using a self-managed Apache Kafka stream as a source.
" + "smithy.api#documentation": "The parameters for using a self-managed Apache Kafka stream as a source.
\nA self managed cluster refers to any Apache Kafka cluster not hosted by Amazon Web Services.\n This includes both clusters you manage yourself, as well as those hosted by a third-party\n provider, such as Confluent\n Cloud, CloudKarafka, or Redpanda. For more information, see Apache Kafka streams as a source in the Amazon EventBridge User Guide.
" } } }, @@ -5353,7 +5723,7 @@ } }, "traits": { - "smithy.api#documentation": "The parameters for using a self-managed Apache Kafka stream as a source.
" + "smithy.api#documentation": "The parameters for using a self-managed Apache Kafka stream as a source.
\nA self managed cluster refers to any Apache Kafka cluster not hosted by Amazon Web Services.\n This includes both clusters you manage yourself, as well as those hosted by a third-party\n provider, such as Confluent\n Cloud, CloudKarafka, or Redpanda. For more information, see Apache Kafka streams as a source in the Amazon EventBridge User Guide.
" } }, "com.amazonaws.pipes#UpdatePipeSourceSqsQueueParameters": { @@ -5385,7 +5755,7 @@ "fieldList": { "target": "com.amazonaws.pipes#ValidationExceptionFieldList", "traits": { - "smithy.api#documentation": "The list of fields for which validation failed and the corresponding failure messages.
" + "smithy.api#documentation": "The list of fields for which validation failed and the corresponding failure\n messages.
" } } }, @@ -5422,6 +5792,15 @@ "member": { "target": "com.amazonaws.pipes#ValidationExceptionField" } + }, + "com.amazonaws.pipes#VersionValue": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + } + } } } }