diff --git a/.changes/1.34.113.json b/.changes/1.34.113.json new file mode 100644 index 0000000000..cfa389e1f4 --- /dev/null +++ b/.changes/1.34.113.json @@ -0,0 +1,17 @@ +[ + { + "category": "``dynamodb``", + "description": "Documentation only updates for DynamoDB.", + "type": "api-change" + }, + { + "category": "``iotfleetwise``", + "description": "AWS IoT FleetWise now supports listing vehicles with attributes filter, ListVehicles API is updated to support additional attributes filter.", + "type": "api-change" + }, + { + "category": "``managedblockchain``", + "description": "This is a minor documentation update to address the impact of the shut down of the Goerli and Polygon networks.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 43c45a616e..9bdfd67f16 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,14 @@ CHANGELOG ========= +1.34.113 +======== + +* api-change:``dynamodb``: Documentation only updates for DynamoDB. +* api-change:``iotfleetwise``: AWS IoT FleetWise now supports listing vehicles with attributes filter, ListVehicles API is updated to support additional attributes filter. +* api-change:``managedblockchain``: This is a minor documentation update to address the impact of the shut down of the Goerli and Polygon networks. + + 1.34.112 ======== diff --git a/botocore/__init__.py b/botocore/__init__.py index 9d9e57385e..15fedef539 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import os import re -__version__ = '1.34.112' +__version__ = '1.34.113' class NullHandler(logging.Handler): diff --git a/botocore/data/dynamodb/2012-08-10/service-2.json b/botocore/data/dynamodb/2012-08-10/service-2.json index a8f3c338c9..9d0bdadbb6 100644 --- a/botocore/data/dynamodb/2012-08-10/service-2.json +++ b/botocore/data/dynamodb/2012-08-10/service-2.json @@ -5,6 +5,7 @@ "endpointPrefix":"dynamodb", "jsonVersion":"1.0", "protocol":"json", + "protocols":["json"], "serviceAbbreviation":"DynamoDB", "serviceFullName":"Amazon DynamoDB", "serviceId":"DynamoDB", @@ -60,7 +61,7 @@ {"shape":"RequestLimitExceeded"}, {"shape":"InternalServerError"} ], - "documentation":"

The BatchWriteItem operation puts or deletes multiple items in one or more tables. A single call to BatchWriteItem can transmit up to 16MB of data over the network, consisting of up to 25 item put or delete operations. While individual items can be up to 400 KB once stored, it's important to note that an item's representation might be greater than 400KB while being sent in DynamoDB's JSON format for the API call. For more details on this distinction, see Naming Rules and Data Types.

BatchWriteItem cannot update items. If you perform a BatchWriteItem operation on an existing item, that item's values will be overwritten by the operation and it will appear like it was updated. To update items, we recommend you use the UpdateItem action.

The individual PutItem and DeleteItem operations specified in BatchWriteItem are atomic; however BatchWriteItem as a whole is not. If any requested operations fail because the table's provisioned throughput is exceeded or an internal processing failure occurs, the failed operations are returned in the UnprocessedItems response parameter. You can investigate and optionally resend the requests. Typically, you would call BatchWriteItem in a loop. Each iteration would check for unprocessed items and submit a new BatchWriteItem request with those unprocessed items until all items have been processed.

If none of the items can be processed due to insufficient provisioned throughput on all of the tables in the request, then BatchWriteItem returns a ProvisionedThroughputExceededException.

If DynamoDB returns any unprocessed items, you should retry the batch operation on those items. However, we strongly recommend that you use an exponential backoff algorithm. If you retry the batch operation immediately, the underlying read or write requests can still fail due to throttling on the individual tables. If you delay the batch operation using exponential backoff, the individual requests in the batch are much more likely to succeed.

For more information, see Batch Operations and Error Handling in the Amazon DynamoDB Developer Guide.

With BatchWriteItem, you can efficiently write or delete large amounts of data, such as from Amazon EMR, or copy data from another database into DynamoDB. In order to improve performance with these large-scale operations, BatchWriteItem does not behave in the same way as individual PutItem and DeleteItem calls would. For example, you cannot specify conditions on individual put and delete requests, and BatchWriteItem does not return deleted items in the response.

If you use a programming language that supports concurrency, you can use threads to write items in parallel. Your application must include the necessary logic to manage the threads. With languages that don't support threading, you must update or delete the specified items one at a time. In both situations, BatchWriteItem performs the specified put and delete operations in parallel, giving you the power of the thread pool approach without having to introduce complexity into your application.

Parallel processing reduces latency, but each specified put and delete request consumes the same number of write capacity units whether it is processed in parallel or not. Delete operations on nonexistent items consume one write capacity unit.

If one or more of the following is true, DynamoDB rejects the entire batch write operation:

", + "documentation":"

The BatchWriteItem operation puts or deletes multiple items in one or more tables. A single call to BatchWriteItem can transmit up to 16MB of data over the network, consisting of up to 25 item put or delete operations. While individual items can be up to 400 KB once stored, it's important to note that an item's representation might be greater than 400KB while being sent in DynamoDB's JSON format for the API call. For more details on this distinction, see Naming Rules and Data Types.

BatchWriteItem cannot update items. If you perform a BatchWriteItem operation on an existing item, that item's values will be overwritten by the operation and it will appear like it was updated. To update items, we recommend you use the UpdateItem action.

The individual PutItem and DeleteItem operations specified in BatchWriteItem are atomic; however BatchWriteItem as a whole is not. If any requested operations fail because the table's provisioned throughput is exceeded or an internal processing failure occurs, the failed operations are returned in the UnprocessedItems response parameter. You can investigate and optionally resend the requests. Typically, you would call BatchWriteItem in a loop. Each iteration would check for unprocessed items and submit a new BatchWriteItem request with those unprocessed items until all items have been processed.

If none of the items can be processed due to insufficient provisioned throughput on all of the tables in the request, then BatchWriteItem returns a ProvisionedThroughputExceededException.

If DynamoDB returns any unprocessed items, you should retry the batch operation on those items. However, we strongly recommend that you use an exponential backoff algorithm. If you retry the batch operation immediately, the underlying read or write requests can still fail due to throttling on the individual tables. If you delay the batch operation using exponential backoff, the individual requests in the batch are much more likely to succeed.

For more information, see Batch Operations and Error Handling in the Amazon DynamoDB Developer Guide.

With BatchWriteItem, you can efficiently write or delete large amounts of data, such as from Amazon EMR, or copy data from another database into DynamoDB. In order to improve performance with these large-scale operations, BatchWriteItem does not behave in the same way as individual PutItem and DeleteItem calls would. For example, you cannot specify conditions on individual put and delete requests, and BatchWriteItem does not return deleted items in the response.

If you use a programming language that supports concurrency, you can use threads to write items in parallel. Your application must include the necessary logic to manage the threads. With languages that don't support threading, you must update or delete the specified items one at a time. In both situations, BatchWriteItem performs the specified put and delete operations in parallel, giving you the power of the thread pool approach without having to introduce complexity into your application.

Parallel processing reduces latency, but each specified put and delete request consumes the same number of write capacity units whether it is processed in parallel or not. Delete operations on nonexistent items consume one write capacity unit.

If one or more of the following is true, DynamoDB rejects the entire batch write operation:

", "endpointdiscovery":{ } }, @@ -98,7 +99,7 @@ {"shape":"GlobalTableAlreadyExistsException"}, {"shape":"TableNotFoundException"} ], - "documentation":"

Creates a global table from an existing table. A global table creates a replication relationship between two or more DynamoDB tables with the same table name in the provided Regions.

This operation only applies to Version 2017.11.29 (Legacy) of global tables. We recommend using Version 2019.11.21 (Current) when creating new global tables, as it provides greater flexibility, higher efficiency and consumes less write capacity than 2017.11.29 (Legacy). To determine which version you are using, see Determining the version. To update existing global tables from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see Updating global tables.

If you want to add a new replica table to a global table, each of the following conditions must be true:

If global secondary indexes are specified, then the following conditions must also be met:

If local secondary indexes are specified, then the following conditions must also be met:

Write capacity settings should be set consistently across your replica tables and secondary indexes. DynamoDB strongly recommends enabling auto scaling to manage the write capacity settings for all of your global tables replicas and indexes.

If you prefer to manage write capacity settings manually, you should provision equal replicated write capacity units to your replica tables. You should also provision equal replicated write capacity units to matching secondary indexes across your global table.

", + "documentation":"

Creates a global table from an existing table. A global table creates a replication relationship between two or more DynamoDB tables with the same table name in the provided Regions.

For global tables, this operation only applies to global tables using Version 2019.11.21 (Current version), as it provides greater flexibility, higher efficiency and consumes less write capacity than 2017.11.29 (Legacy). To determine which version you are using, see Determining the version. To update existing global tables from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see Updating global tables.

If you want to add a new replica table to a global table, each of the following conditions must be true:

If global secondary indexes are specified, then the following conditions must also be met:

If local secondary indexes are specified, then the following conditions must also be met:

Write capacity settings should be set consistently across your replica tables and secondary indexes. DynamoDB strongly recommends enabling auto scaling to manage the write capacity settings for all of your global tables replicas and indexes.

If you prefer to manage write capacity settings manually, you should provision equal replicated write capacity units to your replica tables. You should also provision equal replicated write capacity units to matching secondary indexes across your global table.

", "endpointdiscovery":{ } }, @@ -191,7 +192,7 @@ {"shape":"LimitExceededException"}, {"shape":"InternalServerError"} ], - "documentation":"

The DeleteTable operation deletes a table and all of its items. After a DeleteTable request, the specified table is in the DELETING state until DynamoDB completes the deletion. If the table is in the ACTIVE state, you can delete it. If a table is in CREATING or UPDATING states, then DynamoDB returns a ResourceInUseException. If the specified table does not exist, DynamoDB returns a ResourceNotFoundException. If table is already in the DELETING state, no error is returned.

This operation only applies to Version 2019.11.21 (Current) of global tables.

DynamoDB might continue to accept data read and write operations, such as GetItem and PutItem, on a table in the DELETING state until the table deletion is complete.

When you delete a table, any indexes on that table are also deleted.

If you have DynamoDB Streams enabled on the table, then the corresponding stream on that table goes into the DISABLED state, and the stream is automatically deleted after 24 hours.

Use the DescribeTable action to check the status of the table.

", + "documentation":"

The DeleteTable operation deletes a table and all of its items. After a DeleteTable request, the specified table is in the DELETING state until DynamoDB completes the deletion. If the table is in the ACTIVE state, you can delete it. If a table is in CREATING or UPDATING states, then DynamoDB returns a ResourceInUseException. If the specified table does not exist, DynamoDB returns a ResourceNotFoundException. If table is already in the DELETING state, no error is returned.

For global tables, this operation only applies to global tables using Version 2019.11.21 (Current version).

DynamoDB might continue to accept data read and write operations, such as GetItem and PutItem, on a table in the DELETING state until the table deletion is complete.

When you delete a table, any indexes on that table are also deleted.

If you have DynamoDB Streams enabled on the table, then the corresponding stream on that table goes into the DISABLED state, and the stream is automatically deleted after 24 hours.

Use the DescribeTable action to check the status of the table.

", "endpointdiscovery":{ } }, @@ -279,7 +280,7 @@ {"shape":"InternalServerError"}, {"shape":"GlobalTableNotFoundException"} ], - "documentation":"

Returns information about the specified global table.

This operation only applies to Version 2017.11.29 (Legacy) of global tables. We recommend using Version 2019.11.21 (Current) when creating new global tables, as it provides greater flexibility, higher efficiency and consumes less write capacity than 2017.11.29 (Legacy). To determine which version you are using, see Determining the version. To update existing global tables from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see Updating global tables.

", + "documentation":"

Returns information about the specified global table.

For global tables, this operation only applies to global tables using Version 2019.11.21 (Current version), as it provides greater flexibility, higher efficiency and consumes less write capacity than 2017.11.29 (Legacy). To determine which version you are using, see Determining the version. To update existing global tables from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see Updating global tables.

", "endpointdiscovery":{ } }, @@ -295,7 +296,7 @@ {"shape":"GlobalTableNotFoundException"}, {"shape":"InternalServerError"} ], - "documentation":"

Describes Region-specific settings for a global table.

This operation only applies to Version 2017.11.29 (Legacy) of global tables. We recommend using Version 2019.11.21 (Current) when creating new global tables, as it provides greater flexibility, higher efficiency and consumes less write capacity than 2017.11.29 (Legacy). To determine which version you are using, see Determining the version. To update existing global tables from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see Updating global tables.

", + "documentation":"

Describes Region-specific settings for a global table.

For global tables, this operation only applies to global tables using Version 2019.11.21 (Current version), as it provides greater flexibility, higher efficiency and consumes less write capacity than 2017.11.29 (Legacy). To determine which version you are using, see Determining the version. To update existing global tables from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see Updating global tables.

", "endpointdiscovery":{ } }, @@ -355,7 +356,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerError"} ], - "documentation":"

Returns information about the table, including the current status of the table, when it was created, the primary key schema, and any indexes on the table.

This operation only applies to Version 2019.11.21 (Current) of global tables.

If you issue a DescribeTable request immediately after a CreateTable request, DynamoDB might return a ResourceNotFoundException. This is because DescribeTable uses an eventually consistent query, and the metadata for your table might not be available at that moment. Wait for a few seconds, and then try the DescribeTable request again.

", + "documentation":"

Returns information about the table, including the current status of the table, when it was created, the primary key schema, and any indexes on the table.

For global tables, this operation only applies to global tables using Version 2019.11.21 (Current version).

If you issue a DescribeTable request immediately after a CreateTable request, DynamoDB might return a ResourceNotFoundException. This is because DescribeTable uses an eventually consistent query, and the metadata for your table might not be available at that moment. Wait for a few seconds, and then try the DescribeTable request again.

", "endpointdiscovery":{ } }, @@ -371,7 +372,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerError"} ], - "documentation":"

Describes auto scaling settings across replicas of the global table at once.

This operation only applies to Version 2019.11.21 (Current) of global tables.

" + "documentation":"

Describes auto scaling settings across replicas of the global table at once.

For global tables, this operation only applies to global tables using Version 2019.11.21 (Current version).

" }, "DescribeTimeToLive":{ "name":"DescribeTimeToLive", @@ -586,7 +587,7 @@ "errors":[ {"shape":"InternalServerError"} ], - "documentation":"

Lists all global tables that have a replica in the specified Region.

This operation only applies to Version 2017.11.29 (Legacy) of global tables. We recommend using Version 2019.11.21 (Current) when creating new global tables, as it provides greater flexibility, higher efficiency and consumes less write capacity than 2017.11.29 (Legacy). To determine which version you are using, see Determining the version. To update existing global tables from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see Updating global tables.

", + "documentation":"

Lists all global tables that have a replica in the specified Region.

For global tables, this operation only applies to global tables using Version 2019.11.21 (Current version), as it provides greater flexibility, higher efficiency and consumes less write capacity than 2017.11.29 (Legacy). To determine which version you are using, see Determining the version. To update existing global tables from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see Updating global tables.

", "endpointdiscovery":{ } }, @@ -871,7 +872,7 @@ {"shape":"ReplicaNotFoundException"}, {"shape":"TableNotFoundException"} ], - "documentation":"

Adds or removes replicas in the specified global table. The global table must already exist to be able to use this operation. Any replica to be added must be empty, have the same name as the global table, have the same key schema, have DynamoDB Streams enabled, and have the same provisioned and maximum write capacity units.

This operation only applies to Version 2017.11.29 (Legacy) of global tables. We recommend using Version 2019.11.21 (Current) when creating new global tables, as it provides greater flexibility, higher efficiency and consumes less write capacity than 2017.11.29 (Legacy). To determine which version you are using, see Determining the version. To update existing global tables from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see Updating global tables.

This operation only applies to Version 2017.11.29 of global tables. If you are using global tables Version 2019.11.21 you can use UpdateTable instead.

Although you can use UpdateGlobalTable to add replicas and remove replicas in a single request, for simplicity we recommend that you issue separate requests for adding or removing replicas.

If global secondary indexes are specified, then the following conditions must also be met:

", + "documentation":"

Adds or removes replicas in the specified global table. The global table must already exist to be able to use this operation. Any replica to be added must be empty, have the same name as the global table, have the same key schema, have DynamoDB Streams enabled, and have the same provisioned and maximum write capacity units.

For global tables, this operation only applies to global tables using Version 2019.11.21 (Current version), as it provides greater flexibility, higher efficiency and consumes less write capacity than 2017.11.29 (Legacy). To determine which version you are using, see Determining the version. To update existing global tables from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see Updating global tables.

For global tables, this operation only applies to global tables using Version 2019.11.21 (Current version). If you are using global tables Version 2019.11.21 you can use UpdateTable instead.

Although you can use UpdateGlobalTable to add replicas and remove replicas in a single request, for simplicity we recommend that you issue separate requests for adding or removing replicas.

If global secondary indexes are specified, then the following conditions must also be met:

", "endpointdiscovery":{ } }, @@ -891,7 +892,7 @@ {"shape":"ResourceInUseException"}, {"shape":"InternalServerError"} ], - "documentation":"

Updates settings for a global table.

This operation only applies to Version 2017.11.29 (Legacy) of global tables. We recommend using Version 2019.11.21 (Current) when creating new global tables, as it provides greater flexibility, higher efficiency and consumes less write capacity than 2017.11.29 (Legacy). To determine which version you are using, see Determining the version. To update existing global tables from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see Updating global tables.

", + "documentation":"

Updates settings for a global table.

For global tables, this operation only applies to global tables using Version 2019.11.21 (Current version), as it provides greater flexibility, higher efficiency and consumes less write capacity than 2017.11.29 (Legacy). To determine which version you are using, see Determining the version. To update existing global tables from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see Updating global tables.

", "endpointdiscovery":{ } }, @@ -948,7 +949,7 @@ {"shape":"LimitExceededException"}, {"shape":"InternalServerError"} ], - "documentation":"

Modifies the provisioned throughput settings, global secondary indexes, or DynamoDB Streams settings for a given table.

This operation only applies to Version 2019.11.21 (Current) of global tables.

You can only perform one of the following operations at once:

UpdateTable is an asynchronous operation; while it's executing, the table status changes from ACTIVE to UPDATING. While it's UPDATING, you can't issue another UpdateTable request. When the table returns to the ACTIVE state, the UpdateTable operation is complete.

", + "documentation":"

Modifies the provisioned throughput settings, global secondary indexes, or DynamoDB Streams settings for a given table.

For global tables, this operation only applies to global tables using Version 2019.11.21 (Current version).

You can only perform one of the following operations at once:

UpdateTable is an asynchronous operation; while it's executing, the table status changes from ACTIVE to UPDATING. While it's UPDATING, you can't issue another UpdateTable request. When the table returns to the ACTIVE state, the UpdateTable operation is complete.

", "endpointdiscovery":{ } }, @@ -966,7 +967,7 @@ {"shape":"LimitExceededException"}, {"shape":"InternalServerError"} ], - "documentation":"

Updates auto scaling settings on your global tables at once.

This operation only applies to Version 2019.11.21 (Current) of global tables.

" + "documentation":"

Updates auto scaling settings on your global tables at once.

For global tables, this operation only applies to global tables using Version 2019.11.21 (Current version).

" }, "UpdateTimeToLive":{ "name":"UpdateTimeToLive", @@ -1838,7 +1839,7 @@ "documentation":"

The amount of throughput consumed on each global index affected by the operation.

" } }, - "documentation":"

The capacity units consumed by an operation. The data returned includes the total provisioned throughput consumed, along with statistics for the table and any indexes involved in the operation. ConsumedCapacity is only returned if the request asked for it. For more information, see Provisioned Throughput in the Amazon DynamoDB Developer Guide.

" + "documentation":"

The capacity units consumed by an operation. The data returned includes the total provisioned throughput consumed, along with statistics for the table and any indexes involved in the operation. ConsumedCapacity is only returned if the request asked for it. For more information, see Provisioned capacity mode in the Amazon DynamoDB Developer Guide.

" }, "ConsumedCapacityMultiple":{ "type":"list", @@ -2077,7 +2078,7 @@ }, "BillingMode":{ "shape":"BillingMode", - "documentation":"

Controls how you are charged for read and write throughput and how you manage capacity. This setting can be changed later.

" + "documentation":"

Controls how you are charged for read and write throughput and how you manage capacity. This setting can be changed later.

" }, "ProvisionedThroughput":{ "shape":"ProvisionedThroughput", @@ -2281,7 +2282,7 @@ }, "ConsumedCapacity":{ "shape":"ConsumedCapacity", - "documentation":"

The capacity units consumed by the DeleteItem operation. The data returned includes the total provisioned throughput consumed, along with statistics for the table and any indexes involved in the operation. ConsumedCapacity is only returned if the ReturnConsumedCapacity parameter was specified. For more information, see Provisioned Throughput in the Amazon DynamoDB Developer Guide.

" + "documentation":"

The capacity units consumed by the DeleteItem operation. The data returned includes the total provisioned throughput consumed, along with statistics for the table and any indexes involved in the operation. ConsumedCapacity is only returned if the ReturnConsumedCapacity parameter was specified. For more information, see Provisioned capacity mode in the Amazon DynamoDB Developer Guide.

" }, "ItemCollectionMetrics":{ "shape":"ItemCollectionMetrics", @@ -3157,7 +3158,7 @@ }, "ConsumedCapacity":{ "shape":"ConsumedCapacity", - "documentation":"

The capacity units consumed by the GetItem operation. The data returned includes the total provisioned throughput consumed, along with statistics for the table and any indexes involved in the operation. ConsumedCapacity is only returned if the ReturnConsumedCapacity parameter was specified. For more information, see Provisioned Throughput in the Amazon DynamoDB Developer Guide.

" + "documentation":"

The capacity units consumed by the GetItem operation. The data returned includes the total provisioned throughput consumed, along with statistics for the table and any indexes involved in the operation. ConsumedCapacity is only returned if the ReturnConsumedCapacity parameter was specified. For more information, see Capacity unit consumption for read operations in the Amazon DynamoDB Developer Guide.

" } }, "documentation":"

Represents the output of a GetItem operation.

" @@ -4659,7 +4660,7 @@ }, "ConsumedCapacity":{ "shape":"ConsumedCapacity", - "documentation":"

The capacity units consumed by the PutItem operation. The data returned includes the total provisioned throughput consumed, along with statistics for the table and any indexes involved in the operation. ConsumedCapacity is only returned if the ReturnConsumedCapacity parameter was specified. For more information, see Provisioned Throughput in the Amazon DynamoDB Developer Guide.

" + "documentation":"

The capacity units consumed by the PutItem operation. The data returned includes the total provisioned throughput consumed, along with statistics for the table and any indexes involved in the operation. ConsumedCapacity is only returned if the ReturnConsumedCapacity parameter was specified. For more information, see Capacity unity consumption for write operations in the Amazon DynamoDB Developer Guide.

" }, "ItemCollectionMetrics":{ "shape":"ItemCollectionMetrics", @@ -4806,7 +4807,7 @@ }, "ConsumedCapacity":{ "shape":"ConsumedCapacity", - "documentation":"

The capacity units consumed by the Query operation. The data returned includes the total provisioned throughput consumed, along with statistics for the table and any indexes involved in the operation. ConsumedCapacity is only returned if the ReturnConsumedCapacity parameter was specified. For more information, see Provisioned Throughput in the Amazon DynamoDB Developer Guide.

" + "documentation":"

The capacity units consumed by the Query operation. The data returned includes the total provisioned throughput consumed, along with statistics for the table and any indexes involved in the operation. ConsumedCapacity is only returned if the ReturnConsumedCapacity parameter was specified. For more information, see Capacity unit consumption for read operations in the Amazon DynamoDB Developer Guide.

" } }, "documentation":"

Represents the output of a Query operation.

" @@ -5605,7 +5606,7 @@ }, "ConsumedCapacity":{ "shape":"ConsumedCapacity", - "documentation":"

The capacity units consumed by the Scan operation. The data returned includes the total provisioned throughput consumed, along with statistics for the table and any indexes involved in the operation. ConsumedCapacity is only returned if the ReturnConsumedCapacity parameter was specified. For more information, see Provisioned Throughput in the Amazon DynamoDB Developer Guide.

" + "documentation":"

The capacity units consumed by the Scan operation. The data returned includes the total provisioned throughput consumed, along with statistics for the table and any indexes involved in the operation. ConsumedCapacity is only returned if the ReturnConsumedCapacity parameter was specified. For more information, see Capacity unit consumption for read operations in the Amazon DynamoDB Developer Guide.

" } }, "documentation":"

Represents the output of a Scan operation.

" @@ -6388,7 +6389,7 @@ }, "GlobalTableBillingMode":{ "shape":"BillingMode", - "documentation":"

The billing mode of the global table. If GlobalTableBillingMode is not specified, the global table defaults to PROVISIONED capacity billing mode.

" + "documentation":"

The billing mode of the global table. If GlobalTableBillingMode is not specified, the global table defaults to PROVISIONED capacity billing mode.

" }, "GlobalTableProvisionedWriteCapacityUnits":{ "shape":"PositiveLongObject", @@ -6489,7 +6490,7 @@ }, "ConsumedCapacity":{ "shape":"ConsumedCapacity", - "documentation":"

The capacity units consumed by the UpdateItem operation. The data returned includes the total provisioned throughput consumed, along with statistics for the table and any indexes involved in the operation. ConsumedCapacity is only returned if the ReturnConsumedCapacity parameter was specified. For more information, see Provisioned Throughput in the Amazon DynamoDB Developer Guide.

" + "documentation":"

The capacity units consumed by the UpdateItem operation. The data returned includes the total provisioned throughput consumed, along with statistics for the table and any indexes involved in the operation. ConsumedCapacity is only returned if the ReturnConsumedCapacity parameter was specified. For more information, see Capacity unity consumption for write operations in the Amazon DynamoDB Developer Guide.

" }, "ItemCollectionMetrics":{ "shape":"ItemCollectionMetrics", @@ -6595,7 +6596,7 @@ }, "BillingMode":{ "shape":"BillingMode", - "documentation":"

Controls how you are charged for read and write throughput and how you manage capacity. When switching from pay-per-request to provisioned capacity, initial provisioned capacity values must be set. The initial provisioned capacity values are estimated based on the consumed read and write capacity of your table and global secondary indexes over the past 30 minutes.

" + "documentation":"

Controls how you are charged for read and write throughput and how you manage capacity. When switching from pay-per-request to provisioned capacity, initial provisioned capacity values must be set. The initial provisioned capacity values are estimated based on the consumed read and write capacity of your table and global secondary indexes over the past 30 minutes.

" }, "ProvisionedThroughput":{ "shape":"ProvisionedThroughput", @@ -6615,7 +6616,7 @@ }, "ReplicaUpdates":{ "shape":"ReplicationGroupUpdateList", - "documentation":"

A list of replica update actions (create, delete, or update) for the table.

This property only applies to Version 2019.11.21 (Current) of global tables.

" + "documentation":"

A list of replica update actions (create, delete, or update) for the table.

For global tables, this property only applies to global tables using Version 2019.11.21 (Current version).

" }, "TableClass":{ "shape":"TableClass", diff --git a/botocore/data/endpoints.json b/botocore/data/endpoints.json index 998cfec67f..f3318630c6 100644 --- a/botocore/data/endpoints.json +++ b/botocore/data/endpoints.json @@ -8953,11 +8953,6 @@ "us-west-2" : { } } }, - "honeycode" : { - "endpoints" : { - "us-west-2" : { } - } - }, "iam" : { "endpoints" : { "aws-global" : { @@ -28943,4 +28938,4 @@ "services" : { } } ], "version" : 3 -} \ No newline at end of file +} diff --git a/botocore/data/iotfleetwise/2021-06-17/service-2.json b/botocore/data/iotfleetwise/2021-06-17/service-2.json index 26960e3e92..655b9f2727 100644 --- a/botocore/data/iotfleetwise/2021-06-17/service-2.json +++ b/botocore/data/iotfleetwise/2021-06-17/service-2.json @@ -1419,7 +1419,7 @@ }, "signalCatalogArn":{ "shape":"arn", - "documentation":"

(Optional) The Amazon Resource Name (ARN) of the signal catalog to associate with the campaign.

" + "documentation":"

The Amazon Resource Name (ARN) of the signal catalog to associate with the campaign.

" }, "targetArn":{ "shape":"arn", @@ -3251,6 +3251,14 @@ "shape":"arn", "documentation":"

The Amazon Resource Name (ARN) of a vehicle model (model manifest). You can use this optional parameter to list only the vehicles created from a certain vehicle model.

" }, + "attributeNames":{ + "shape":"attributeNamesList", + "documentation":"

The fully qualified names of the attributes. For example, the fully qualified name of an attribute might be Vehicle.Body.Engine.Type.

" + }, + "attributeValues":{ + "shape":"attributeValuesList", + "documentation":"

Static information about a vehicle attribute value in string format. For example:

\"1.3 L R2\"

" + }, "nextToken":{ "shape":"nextToken", "documentation":"

A pagination token for the next set of results.

If the results of a search are large, only a portion of the results are returned, and a nextToken pagination token is returned in the response. To retrieve the next set of results, reissue the search request and include the returned token. When all results have been returned, the response does not contain a pagination token value.

" @@ -4884,7 +4892,19 @@ "min":1, "pattern":"[a-zA-Z0-9_.-]+" }, + "attributeNamesList":{ + "type":"list", + "member":{"shape":"attributeName"}, + "max":5, + "min":1 + }, "attributeValue":{"type":"string"}, + "attributeValuesList":{ + "type":"list", + "member":{"shape":"attributeValue"}, + "max":5, + "min":1 + }, "attributesMap":{ "type":"map", "key":{"shape":"attributeName"}, diff --git a/botocore/data/managedblockchain/2018-09-24/service-2.json b/botocore/data/managedblockchain/2018-09-24/service-2.json index 8f7a755fd9..e179748dbd 100644 --- a/botocore/data/managedblockchain/2018-09-24/service-2.json +++ b/botocore/data/managedblockchain/2018-09-24/service-2.json @@ -5,6 +5,7 @@ "endpointPrefix":"managedblockchain", "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceAbbreviation":"ManagedBlockchain", "serviceFullName":"Amazon Managed Blockchain", "serviceId":"ManagedBlockchain", @@ -652,7 +653,7 @@ }, "NetworkType":{ "shape":"AccessorNetworkType", - "documentation":"

The blockchain network that the Accessor token is created for.

We recommend using the appropriate networkType value for the blockchain network that you are creating the Accessor token for. You cannnot use the value ETHEREUM_MAINNET_AND_GOERLI to specify a networkType for your Accessor token.

The default value of ETHEREUM_MAINNET_AND_GOERLI is only applied:

" + "documentation":"

The blockchain network that the Accessor token is created for.

" } } }, @@ -790,7 +791,7 @@ }, "NetworkId":{ "shape":"ResourceIdString", - "documentation":"

The unique identifier of the network for the node.

Ethereum public networks have the following NetworkIds:

", + "documentation":"

The unique identifier of the network for the node.

Ethereum public networks have the following NetworkIds:

", "location":"uri", "locationName":"networkId" }, @@ -916,7 +917,7 @@ "members":{ "NetworkId":{ "shape":"ResourceIdString", - "documentation":"

The unique identifier of the network that the node is on.

Ethereum public networks have the following NetworkIds:

", + "documentation":"

The unique identifier of the network that the node is on.

Ethereum public networks have the following NetworkIds:

", "location":"uri", "locationName":"networkId" }, @@ -1818,7 +1819,7 @@ "members":{ "ChainId":{ "shape":"String", - "documentation":"

The Ethereum CHAIN_ID associated with the Ethereum network. Chain IDs are as follows:

" + "documentation":"

The Ethereum CHAIN_ID associated with the Ethereum network. Chain IDs are as follows:

" } }, "documentation":"

Attributes of Ethereum for a network.

" diff --git a/docs/source/conf.py b/docs/source/conf.py index 0daa18dec2..11f963180e 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -59,7 +59,7 @@ # The short X.Y version. version = '1.34.1' # The full version, including alpha/beta/rc tags. -release = '1.34.112' +release = '1.34.113' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages.