Retrieves the results for a given media analysis job.
+ * Takes a JobId
returned by StartMediaAnalysisJob.
You are not authorized to perform the action.
+ * + * @throws {@link InternalServerError} (server fault) + *Amazon Rekognition experienced a service issue. Try your call again.
+ * + * @throws {@link InvalidParameterException} (client fault) + *Input parameter violated a constraint. Validate your parameter before calling the API + * operation again.
+ * + * @throws {@link ProvisionedThroughputExceededException} (client fault) + *The number of requests exceeded your throughput limit. If you want to increase this + * limit, contact Amazon Rekognition.
+ * + * @throws {@link ResourceNotFoundException} (client fault) + *The resource specified in the request cannot be found.
+ * + * @throws {@link ThrottlingException} (server fault) + *Amazon Rekognition is temporarily unable to process the request. Try your call again.
+ * + * @throws {@link RekognitionServiceException} + *Base exception class for all service exceptions from Rekognition service.
+ * + */ +export class GetMediaAnalysisJobCommand extends $Command< + GetMediaAnalysisJobCommandInput, + GetMediaAnalysisJobCommandOutput, + RekognitionClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + public static getEndpointParameterInstructions(): EndpointParameterInstructions { + return { + UseFIPS: { type: "builtInParams", name: "useFipsEndpoint" }, + Endpoint: { type: "builtInParams", name: "endpoint" }, + Region: { type: "builtInParams", name: "region" }, + UseDualStack: { type: "builtInParams", name: "useDualstackEndpoint" }, + }; + } + + /** + * @public + */ + constructor(readonly input: GetMediaAnalysisJobCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackReturns a list of media analysis jobs. Results are sorted by CreationTimestamp
in descending order.
You are not authorized to perform the action.
+ * + * @throws {@link InternalServerError} (server fault) + *Amazon Rekognition experienced a service issue. Try your call again.
+ * + * @throws {@link InvalidPaginationTokenException} (client fault) + *Pagination token in the request is not valid.
+ * + * @throws {@link InvalidParameterException} (client fault) + *Input parameter violated a constraint. Validate your parameter before calling the API + * operation again.
+ * + * @throws {@link ProvisionedThroughputExceededException} (client fault) + *The number of requests exceeded your throughput limit. If you want to increase this + * limit, contact Amazon Rekognition.
+ * + * @throws {@link ThrottlingException} (server fault) + *Amazon Rekognition is temporarily unable to process the request. Try your call again.
+ * + * @throws {@link RekognitionServiceException} + *Base exception class for all service exceptions from Rekognition service.
+ * + */ +export class ListMediaAnalysisJobsCommand extends $Command< + ListMediaAnalysisJobsCommandInput, + ListMediaAnalysisJobsCommandOutput, + RekognitionClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + public static getEndpointParameterInstructions(): EndpointParameterInstructions { + return { + UseFIPS: { type: "builtInParams", name: "useFipsEndpoint" }, + Endpoint: { type: "builtInParams", name: "endpoint" }, + Region: { type: "builtInParams", name: "region" }, + UseDualStack: { type: "builtInParams", name: "useDualstackEndpoint" }, + }; + } + + /** + * @public + */ + constructor(readonly input: ListMediaAnalysisJobsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackInitiates a new media analysis job. Accepts a manifest file in an Amazon S3 bucket. The + * output is a manifest file and a summary of the manifest stored in the Amazon S3 bucket.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { RekognitionClient, StartMediaAnalysisJobCommand } from "@aws-sdk/client-rekognition"; // ES Modules import + * // const { RekognitionClient, StartMediaAnalysisJobCommand } = require("@aws-sdk/client-rekognition"); // CommonJS import + * const client = new RekognitionClient(config); + * const input = { // StartMediaAnalysisJobRequest + * ClientRequestToken: "STRING_VALUE", + * JobName: "STRING_VALUE", + * OperationsConfig: { // MediaAnalysisOperationsConfig + * DetectModerationLabels: { // MediaAnalysisDetectModerationLabelsConfig + * MinConfidence: Number("float"), + * ProjectVersion: "STRING_VALUE", + * }, + * }, + * Input: { // MediaAnalysisInput + * S3Object: { // S3Object + * Bucket: "STRING_VALUE", + * Name: "STRING_VALUE", + * Version: "STRING_VALUE", + * }, + * }, + * OutputConfig: { // MediaAnalysisOutputConfig + * S3Bucket: "STRING_VALUE", // required + * S3KeyPrefix: "STRING_VALUE", + * }, + * KmsKeyId: "STRING_VALUE", + * }; + * const command = new StartMediaAnalysisJobCommand(input); + * const response = await client.send(command); + * // { // StartMediaAnalysisJobResponse + * // JobId: "STRING_VALUE", // required + * // }; + * + * ``` + * + * @param StartMediaAnalysisJobCommandInput - {@link StartMediaAnalysisJobCommandInput} + * @returns {@link StartMediaAnalysisJobCommandOutput} + * @see {@link StartMediaAnalysisJobCommandInput} for command's `input` shape. + * @see {@link StartMediaAnalysisJobCommandOutput} for command's `response` shape. + * @see {@link RekognitionClientResolvedConfig | config} for RekognitionClient's `config` shape. + * + * @throws {@link AccessDeniedException} (client fault) + *You are not authorized to perform the action.
+ * + * @throws {@link IdempotentParameterMismatchException} (client fault) + *A ClientRequestToken
input parameter was reused with an operation, but at least one of the other input
+ * parameters is different from the previous call to the operation.
Amazon Rekognition experienced a service issue. Try your call again.
+ * + * @throws {@link InvalidManifestException} (client fault) + *Indicates that a provided manifest file is empty or larger than the allowed limit.
+ * + * @throws {@link InvalidParameterException} (client fault) + *Input parameter violated a constraint. Validate your parameter before calling the API + * operation again.
+ * + * @throws {@link InvalidS3ObjectException} (client fault) + *Amazon Rekognition is unable to access the S3 object specified in the request.
+ * + * @throws {@link LimitExceededException} (client fault) + *An Amazon Rekognition service limit was exceeded. For example, if you start too many jobs
+ * concurrently, subsequent calls to start operations (ex:
+ * StartLabelDetection
) will raise a LimitExceededException
+ * exception (HTTP status code: 400) until the number of concurrently running jobs is below
+ * the Amazon Rekognition service limit.
The number of requests exceeded your throughput limit. If you want to increase this + * limit, contact Amazon Rekognition.
+ * + * @throws {@link ResourceNotFoundException} (client fault) + *The resource specified in the request cannot be found.
+ * + * @throws {@link ResourceNotReadyException} (client fault) + *The requested resource isn't ready. For example,
+ * this exception occurs when you call DetectCustomLabels
with a
+ * model version that isn't deployed.
Amazon Rekognition is temporarily unable to process the request. Try your call again.
+ * + * @throws {@link RekognitionServiceException} + *Base exception class for all service exceptions from Rekognition service.
+ * + */ +export class StartMediaAnalysisJobCommand extends $Command< + StartMediaAnalysisJobCommandInput, + StartMediaAnalysisJobCommandOutput, + RekognitionClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + public static getEndpointParameterInstructions(): EndpointParameterInstructions { + return { + UseFIPS: { type: "builtInParams", name: "useFipsEndpoint" }, + Endpoint: { type: "builtInParams", name: "endpoint" }, + Region: { type: "builtInParams", name: "region" }, + UseDualStack: { type: "builtInParams", name: "useDualstackEndpoint" }, + }; + } + + /** + * @public + */ + constructor(readonly input: StartMediaAnalysisJobCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackUnique identifier for the media analysis job for which you want to retrieve results.
+ */ + JobId: string | undefined; +} + +/** + * @public + * @enum + */ +export const MediaAnalysisJobFailureCode = { + ACCESS_DENIED: "ACCESS_DENIED", + INTERNAL_ERROR: "INTERNAL_ERROR", + INVALID_KMS_KEY: "INVALID_KMS_KEY", + INVALID_MANIFEST: "INVALID_MANIFEST", + INVALID_OUTPUT_CONFIG: "INVALID_OUTPUT_CONFIG", + INVALID_S3_OBJECT: "INVALID_S3_OBJECT", + RESOURCE_NOT_FOUND: "RESOURCE_NOT_FOUND", + RESOURCE_NOT_READY: "RESOURCE_NOT_READY", + THROTTLED: "THROTTLED", +} as const; + +/** + * @public + */ +export type MediaAnalysisJobFailureCode = + (typeof MediaAnalysisJobFailureCode)[keyof typeof MediaAnalysisJobFailureCode]; + +/** + * @public + *Details about the error that resulted in failure of the job.
+ */ +export interface MediaAnalysisJobFailureDetails { + /** + * @public + *Error code for the failed job.
+ */ + Code?: MediaAnalysisJobFailureCode; + + /** + * @public + *Human readable error message.
+ */ + Message?: string; +} + +/** + * @public + *Contains input information for a media analysis job.
+ */ +export interface MediaAnalysisInput { + /** + * @public + *Provides the S3 bucket name and object name.
+ *The region for the S3 bucket containing the S3 object must match the region you use for + * Amazon Rekognition operations.
+ *For Amazon Rekognition to process an S3 object, the user must have permission to + * access the S3 object. For more information, see How Amazon Rekognition works with IAM in the + * Amazon Rekognition Developer Guide.
+ */ + S3Object: S3Object | undefined; +} + +/** + * @public + *Summary that provides statistics on input manifest and errors identified in the input manifest.
+ */ +export interface MediaAnalysisManifestSummary { + /** + * @public + *Provides the S3 bucket name and object name.
+ *The region for the S3 bucket containing the S3 object must match the region you use for + * Amazon Rekognition operations.
+ *For Amazon Rekognition to process an S3 object, the user must have permission to + * access the S3 object. For more information, see How Amazon Rekognition works with IAM in the + * Amazon Rekognition Developer Guide.
+ */ + S3Object?: S3Object; +} + +/** + * @public + *Configuration for Moderation Labels Detection.
+ */ +export interface MediaAnalysisDetectModerationLabelsConfig { + /** + * @public + *Specifies the minimum confidence level for the moderation labels to return. Amazon Rekognition + * doesn't return any labels with a confidence level lower than this specified value. + *
+ */ + MinConfidence?: number; + + /** + * @public + *Specifies the custom moderation model to be used during the label detection job. + * If not provided the pre-trained model is used.
+ */ + ProjectVersion?: string; +} + +/** + * @public + *Configuration options for a media analysis job. Configuration is operation-specific.
+ */ +export interface MediaAnalysisOperationsConfig { + /** + * @public + *Contains configuration options for a DetectModerationLabels job.
+ */ + DetectModerationLabels?: MediaAnalysisDetectModerationLabelsConfig; +} + +/** + * @public + *Output configuration provided in the job creation request.
+ */ +export interface MediaAnalysisOutputConfig { + /** + * @public + *Specifies the Amazon S3 bucket to contain the output of the media analysis job.
+ */ + S3Bucket: string | undefined; + + /** + * @public + *Specifies the Amazon S3 key prefix that comes after the name of the bucket you have + * designated for storage.
+ */ + S3KeyPrefix?: string; +} + +/** + * @public + *Contains the results for a media analysis job created with StartMediaAnalysisJob.
+ */ +export interface MediaAnalysisResults { + /** + * @public + *Provides the S3 bucket name and object name.
+ *The region for the S3 bucket containing the S3 object must match the region you use for + * Amazon Rekognition operations.
+ *For Amazon Rekognition to process an S3 object, the user must have permission to + * access the S3 object. For more information, see How Amazon Rekognition works with IAM in the + * Amazon Rekognition Developer Guide.
+ */ + S3Object?: S3Object; +} + +/** + * @public + * @enum + */ +export const MediaAnalysisJobStatus = { + CREATED: "CREATED", + FAILED: "FAILED", + IN_PROGRESS: "IN_PROGRESS", + QUEUED: "QUEUED", + SUCCEEDED: "SUCCEEDED", +} as const; + +/** + * @public + */ +export type MediaAnalysisJobStatus = (typeof MediaAnalysisJobStatus)[keyof typeof MediaAnalysisJobStatus]; + +/** + * @public + */ +export interface GetMediaAnalysisJobResponse { + /** + * @public + *The identifier for the media analysis job.
+ */ + JobId: string | undefined; + + /** + * @public + *The name of the media analysis job.
+ */ + JobName?: string; + + /** + * @public + *Operation configurations that were provided during job creation.
+ */ + OperationsConfig: MediaAnalysisOperationsConfig | undefined; + + /** + * @public + *The current status of the media analysis job.
+ */ + Status: MediaAnalysisJobStatus | undefined; + + /** + * @public + *Details about the error that resulted in failure of the job.
+ */ + FailureDetails?: MediaAnalysisJobFailureDetails; + + /** + * @public + *The Unix date and time when the job was started.
+ */ + CreationTimestamp: Date | undefined; + + /** + * @public + *The Unix date and time when the job finished.
+ */ + CompletionTimestamp?: Date; + + /** + * @public + *Reference to the input manifest that was provided in the job creation request.
+ */ + Input: MediaAnalysisInput | undefined; + + /** + * @public + *Output configuration that was provided in the creation request.
+ */ + OutputConfig: MediaAnalysisOutputConfig | undefined; + + /** + * @public + *KMS Key that was provided in the creation request.
+ */ + KmsKeyId?: string; + + /** + * @public + *Output manifest that contains prediction results.
+ */ + Results?: MediaAnalysisResults; + + /** + * @public + *The summary manifest provides statistics on input manifest and errors identified in the input manifest.
+ */ + ManifestSummary?: MediaAnalysisManifestSummary; +} + /** * @public * @enum @@ -7241,6 +7489,36 @@ export interface IndexFacesResponse { UnindexedFaces?: UnindexedFace[]; } +/** + * @public + *Indicates that a provided manifest file is empty or larger than the allowed limit.
+ */ +export class InvalidManifestException extends __BaseException { + readonly name: "InvalidManifestException" = "InvalidManifestException"; + readonly $fault: "client" = "client"; + Message?: string; + Code?: string; + /** + * @public + *A universally unique identifier (UUID) for the request.
+ */ + Logref?: string; + /** + * @internal + */ + constructor(opts: __ExceptionOptionTypeSpecifies the starting point in a Kinesis stream to start processing. You can use the @@ -7534,748 +7812,411 @@ export interface ListFacesResponse { /** * @public */ -export interface ListProjectPoliciesRequest { - /** - * @public - *
The ARN of the project for which you want to list the project policies.
- */ - ProjectArn: string | undefined; - +export interface ListMediaAnalysisJobsRequest { /** * @public - *If the previous response was incomplete (because there is more results to retrieve), - * Amazon Rekognition Custom Labels returns a pagination token in the response. You can use this pagination token - * to retrieve the next set of results.
+ *Pagination token, if the previous response was incomplete.
*/ NextToken?: string; /** * @public - *The maximum number of results to return per paginated call. The largest value you can - * specify is 5. If you specify a value greater than 5, a ValidationException error - * occurs. The default value is 5.
+ *The maximum number of results to return per paginated call. The largest value user can specify is 100.
+ * If user specifies a value greater than 100, an InvalidParameterException
error occurs. The default value is 100.
Describes a project policy in the response from ListProjectPolicies.
- *+ *
Description for a media analysis job.
*/ -export interface ProjectPolicy { +export interface MediaAnalysisJobDescription { /** * @public - *The Amazon Resource Name (ARN) of the project to which the project policy is attached.
+ *The identifier for a media analysis job.
*/ - ProjectArn?: string; + JobId: string | undefined; /** * @public - *The name of the project policy.
+ *The name of a media analysis job.
*/ - PolicyName?: string; + JobName?: string; /** * @public - *The revision ID of the project policy.
+ *Operation configurations that were provided during job creation.
*/ - PolicyRevisionId?: string; + OperationsConfig: MediaAnalysisOperationsConfig | undefined; /** * @public - *The JSON document for the project policy.
+ *The status of the media analysis job being retrieved.
*/ - PolicyDocument?: string; + Status: MediaAnalysisJobStatus | undefined; /** * @public - *The Unix datetime for the creation of the project policy.
+ *Details about the error that resulted in failure of the job.
*/ - CreationTimestamp?: Date; + FailureDetails?: MediaAnalysisJobFailureDetails; /** * @public - *The Unix datetime for when the project policy was last updated.
+ *The Unix date and time when the job was started.
*/ - LastUpdatedTimestamp?: Date; -} + CreationTimestamp: Date | undefined; -/** - * @public - */ -export interface ListProjectPoliciesResponse { /** * @public - *A list of project policies attached to the project.
+ *The Unix date and time when the job finished.
*/ - ProjectPolicies?: ProjectPolicy[]; + CompletionTimestamp?: Date; /** * @public - *If the response is truncated, Amazon Rekognition returns this token that you can use in the - * subsequent request to retrieve the next set of project policies.
+ *Reference to the input manifest that was provided in the job creation request.
*/ - NextToken?: string; -} + Input: MediaAnalysisInput | undefined; -/** - * @public - */ -export interface ListStreamProcessorsRequest { /** * @public - *If the previous response was incomplete (because there are more stream processors to retrieve), Amazon Rekognition Video - * returns a pagination token in the response. You can use this pagination token to retrieve the next set of stream processors.
+ *Output configuration that was provided in the creation request.
*/ - NextToken?: string; + OutputConfig: MediaAnalysisOutputConfig | undefined; /** * @public - *Maximum number of stream processors you want Amazon Rekognition Video to return in the response. The default is 1000.
+ *KMS Key that was provided in the creation request.
*/ - MaxResults?: number; -} + KmsKeyId?: string; -/** - * @public - *An object that recognizes faces or labels in a streaming video. An Amazon Rekognition stream processor is created by a call to CreateStreamProcessor. The request
- * parameters for CreateStreamProcessor
describe the Kinesis video stream source for the streaming video, face recognition parameters, and where to stream the analysis resullts.
- *
- *
Name of the Amazon Rekognition stream processor.
+ *Output manifest that contains prediction results.
*/ - Name?: string; + Results?: MediaAnalysisResults; /** * @public - *Current status of the Amazon Rekognition stream processor.
+ *Provides statistics on input manifest and errors identified in the input manifest.
*/ - Status?: StreamProcessorStatus; + ManifestSummary?: MediaAnalysisManifestSummary; } /** * @public */ -export interface ListStreamProcessorsResponse { +export interface ListMediaAnalysisJobsResponse { /** * @public - *If the response is truncated, Amazon Rekognition Video returns this token that you can use in the subsequent - * request to retrieve the next set of stream processors.
+ *Pagination token, if the previous response was incomplete.
*/ NextToken?: string; /** * @public - *List of stream processors that you have created.
+ *Contains a list of all media analysis jobs.
*/ - StreamProcessors?: StreamProcessor[]; + MediaAnalysisJobs: MediaAnalysisJobDescription[] | undefined; } /** * @public */ -export interface ListTagsForResourceRequest { - /** - * @public - *Amazon Resource Name (ARN) of the model, collection, or stream processor that contains - * the tags that you want a list of.
- */ - ResourceArn: string | undefined; -} - -/** - * @public - */ -export interface ListTagsForResourceResponse { +export interface ListProjectPoliciesRequest { /** * @public - *A list of key-value tags assigned to the resource.
+ *The ARN of the project for which you want to list the project policies.
*/ - Tags?: RecordThe ID of an existing collection.
+ *If the previous response was incomplete (because there is more results to retrieve), + * Amazon Rekognition Custom Labels returns a pagination token in the response. You can use this pagination token + * to retrieve the next set of results.
*/ - CollectionId: string | undefined; + NextToken?: string; /** * @public - *Maximum number of UsersID to return.
+ *The maximum number of results to return per paginated call. The largest value you can + * specify is 5. If you specify a value greater than 5, a ValidationException error + * occurs. The default value is 5.
*/ MaxResults?: number; - - /** - * @public - *Pagingation token to receive the next set of UsersID.
- */ - NextToken?: string; } /** * @public - *Metadata of the user stored in a collection.
+ *Describes a project policy in the response from ListProjectPolicies.
+ **/ -export interface User { - /** - * @public - *
A provided ID for the User. Unique within the collection.
- */ - UserId?: string; - +export interface ProjectPolicy { /** * @public - *Communicates if the UserID has been updated with latest set of faces to be associated - * with the UserID.
+ *The Amazon Resource Name (ARN) of the project to which the project policy is attached.
*/ - UserStatus?: UserStatus; -} + ProjectArn?: string; -/** - * @public - */ -export interface ListUsersResponse { /** * @public - *List of UsersID associated with the specified collection.
+ *The name of the project policy.
*/ - Users?: User[]; + PolicyName?: string; /** * @public - *A pagination token to be used with the subsequent request if the response is - * truncated.
+ *The revision ID of the project policy.
*/ - NextToken?: string; -} + PolicyRevisionId?: string; -/** - * @public - *The format of the project policy document that you supplied to
- * PutProjectPolicy
is incorrect.
A universally unique identifier (UUID) for the request.
- */ - Logref?: string; - /** - * @internal + *The JSON document for the project policy.
*/ - constructor(opts: __ExceptionOptionTypeContains metadata for a UserID matched with a given face.
- */ -export interface MatchedUser { /** * @public - *A provided ID for the UserID. Unique within the collection.
+ *The Unix datetime for the creation of the project policy.
*/ - UserId?: string; + CreationTimestamp?: Date; /** * @public - *The status of the user matched to a provided FaceID.
+ *The Unix datetime for when the project policy was last updated.
*/ - UserStatus?: UserStatus; + LastUpdatedTimestamp?: Date; } /** * @public - *The Amazon Simple Notification Service topic to which Amazon Rekognition publishes the completion status of a video analysis operation. For more information, see - * Calling Amazon Rekognition Video operations. Note that the Amazon SNS topic must have a topic name that begins with AmazonRekognition if you are using the AmazonRekognitionServiceRole permissions policy to access the topic. - * For more information, see Giving access to multiple Amazon SNS topics.
*/ -export interface NotificationChannel { +export interface ListProjectPoliciesResponse { /** * @public - *The Amazon SNS topic to which Amazon Rekognition posts the completion status.
+ *A list of project policies attached to the project.
*/ - SNSTopicArn: string | undefined; + ProjectPolicies?: ProjectPolicy[]; /** * @public - *The ARN of an IAM role that gives Amazon Rekognition publishing permissions to the Amazon SNS topic.
+ *If the response is truncated, Amazon Rekognition returns this token that you can use in the + * subsequent request to retrieve the next set of project policies.
*/ - RoleArn: string | undefined; + NextToken?: string; } /** * @public */ -export interface PutProjectPolicyRequest { - /** - * @public - *The Amazon Resource Name (ARN) of the project that the project policy is attached to.
- */ - ProjectArn: string | undefined; - - /** - * @public - *A name for the policy.
- */ - PolicyName: string | undefined; - +export interface ListStreamProcessorsRequest { /** * @public - *The revision ID for the Project Policy. Each time you modify a policy, Amazon Rekognition Custom Labels
- * generates and assigns a new PolicyRevisionId
and then deletes the previous version of the
- * policy.
If the previous response was incomplete (because there are more stream processors to retrieve), Amazon Rekognition Video + * returns a pagination token in the response. You can use this pagination token to retrieve the next set of stream processors.
*/ - PolicyRevisionId?: string; + NextToken?: string; /** * @public - *A resource policy to add to the model. The policy is a JSON structure that contains - * one or more statements that define the policy. - * The policy must follow the IAM syntax. For - * more information about the contents of a JSON policy document, see - * IAM JSON policy reference.
+ *Maximum number of stream processors you want Amazon Rekognition Video to return in the response. The default is 1000.
*/ - PolicyDocument: string | undefined; + MaxResults?: number; } /** * @public + *An object that recognizes faces or labels in a streaming video. An Amazon Rekognition stream processor is created by a call to CreateStreamProcessor. The request
+ * parameters for CreateStreamProcessor
describe the Kinesis video stream source for the streaming video, face recognition parameters, and where to stream the analysis resullts.
+ *
+ *
The ID of the project policy.
+ *Name of the Amazon Rekognition stream processor.
*/ - PolicyRevisionId?: string; -} + Name?: string; -/** - * @public - */ -export interface RecognizeCelebritiesRequest { /** * @public - *The input image as base64-encoded bytes or an S3 object. If you use the AWS CLI to - * call Amazon Rekognition operations, passing base64-encoded image bytes is not supported.
- *If you are using an AWS SDK to call Amazon Rekognition, you might not need to
- * base64-encode image bytes passed using the Bytes
field. For more information, see
- * Images in the Amazon Rekognition developer guide.
Current status of the Amazon Rekognition stream processor.
*/ - Image: Image | undefined; + Status?: StreamProcessorStatus; } /** * @public */ -export interface RecognizeCelebritiesResponse { - /** - * @public - *Details about each celebrity found in the image. Amazon Rekognition can detect a maximum of 64
- * celebrities in an image. Each celebrity object includes the following attributes:
- * Face
, Confidence
, Emotions
, Landmarks
,
- * Pose
, Quality
, Smile
, Id
,
- * KnownGender
, MatchConfidence
, Name
,
- * Urls
.
Details about each unrecognized face in the image.
+ *If the response is truncated, Amazon Rekognition Video returns this token that you can use in the subsequent + * request to retrieve the next set of stream processors.
*/ - UnrecognizedFaces?: ComparedFace[]; + NextToken?: string; /** * @public - *Support for estimating image orientation using the the OrientationCorrection field - * has ceased as of August 2021. Any returned values for this field included in an API response - * will always be NULL.
- *The orientation of the input image (counterclockwise direction). If your application
- * displays the image, you can use this value to correct the orientation. The bounding box
- * coordinates returned in CelebrityFaces
and UnrecognizedFaces
- * represent face locations before the image orientation is corrected.
If the input image is in .jpeg format, it might contain exchangeable image (Exif)
- * metadata that includes the image's orientation. If so, and the Exif metadata for the input
- * image populates the orientation field, the value of OrientationCorrection
is
- * null. The CelebrityFaces
and UnrecognizedFaces
bounding box
- * coordinates represent face locations after Exif metadata is used to correct the image
- * orientation. Images in .png format don't contain Exif metadata.
List of stream processors that you have created.
*/ - OrientationCorrection?: OrientationCorrection; + StreamProcessors?: StreamProcessor[]; } /** * @public */ -export interface SearchFacesRequest { - /** - * @public - *ID of the collection the face belongs to.
- */ - CollectionId: string | undefined; - - /** - * @public - *ID of a face to find matches for in the collection.
- */ - FaceId: string | undefined; - - /** - * @public - *Maximum number of faces to return. The operation returns the maximum number of faces - * with the highest confidence in the match.
- */ - MaxFaces?: number; - +export interface ListTagsForResourceRequest { /** * @public - *Optional value specifying the minimum confidence in the face match to return. For - * example, don't return any matches where confidence in matches is less than 70%. The default - * value is 80%.
+ *Amazon Resource Name (ARN) of the model, collection, or stream processor that contains + * the tags that you want a list of.
*/ - FaceMatchThreshold?: number; + ResourceArn: string | undefined; } /** * @public */ -export interface SearchFacesResponse { - /** - * @public - *ID of the face that was searched for matches in a collection.
- */ - SearchedFaceId?: string; - - /** - * @public - *An array of faces that matched the input face, along with the confidence in the - * match.
- */ - FaceMatches?: FaceMatch[]; - +export interface ListTagsForResourceResponse { /** * @public - *Version number of the face detection model associated with the input collection
- * (CollectionId
).
A list of key-value tags assigned to the resource.
*/ - FaceModelVersion?: string; + Tags?: RecordID of the collection to search.
+ *The ID of an existing collection.
*/ CollectionId: string | undefined; /** * @public - *The input image as base64-encoded bytes or an S3 object. If you use the AWS CLI to - * call Amazon Rekognition operations, passing base64-encoded image bytes is not supported.
- *If you are using an AWS SDK to call Amazon Rekognition, you might not need to
- * base64-encode image bytes passed using the Bytes
field. For more information, see
- * Images in the Amazon Rekognition developer guide.
Maximum number of faces to return. The operation returns the maximum number of faces - * with the highest confidence in the match.
- */ - MaxFaces?: number; - - /** - * @public - *(Optional) Specifies the minimum confidence in the face match to return. For example, - * don't return any matches where confidence in matches is less than 70%. The default value is - * 80%.
- */ - FaceMatchThreshold?: number; - - /** - * @public - *A filter that specifies a quality bar for how much filtering is done to identify faces.
- * Filtered faces aren't searched for in the collection. If you specify AUTO
,
- * Amazon Rekognition chooses the quality bar. If you specify LOW
, MEDIUM
, or
- * HIGH
, filtering removes all faces that don’t meet the chosen quality bar.
- * The quality bar is
- * based on a variety of common use cases. Low-quality detections can occur for a number of
- * reasons. Some examples are an object that's misidentified as a face, a face that's too blurry,
- * or a face with a pose that's too extreme to use. If you specify NONE
, no
- * filtering is performed. The default value is NONE
.
To use quality filtering, the collection you are using must be associated with version 3 - * of the face model or higher.
- */ - QualityFilter?: QualityFilter; -} - -/** - * @public - */ -export interface SearchFacesByImageResponse { - /** - * @public - *The bounding box around the face in the input image that Amazon Rekognition used for the - * search.
- */ - SearchedFaceBoundingBox?: BoundingBox; - - /** - * @public - *The level of confidence that the searchedFaceBoundingBox
, contains a
- * face.
An array of faces that match the input face, along with the confidence in the - * match.
+ *Maximum number of UsersID to return.
*/ - FaceMatches?: FaceMatch[]; + MaxResults?: number; /** * @public - *Version number of the face detection model associated with the input collection
- * (CollectionId
).
Pagingation token to receive the next set of UsersID.
*/ - FaceModelVersion?: string; + NextToken?: string; } /** * @public + *Metadata of the user stored in a collection.
*/ -export interface SearchUsersRequest { - /** - * @public - *The ID of an existing collection containing the UserID, used with a UserId or FaceId. If a - * FaceId is provided, UserId isn’t required to be present in the Collection.
- */ - CollectionId: string | undefined; - +export interface User { /** * @public - *ID for the existing User.
+ *A provided ID for the User. Unique within the collection.
*/ UserId?: string; /** * @public - *ID for the existing face.
- */ - FaceId?: string; - - /** - * @public - *Optional value that specifies the minimum confidence in the matched UserID to return. - * Default value of 80.
- */ - UserMatchThreshold?: number; - - /** - * @public - *Maximum number of identities to return.
+ *Communicates if the UserID has been updated with latest set of faces to be associated + * with the UserID.
*/ - MaxUsers?: number; + UserStatus?: UserStatus; } /** * @public - *Provides face metadata such as FaceId, BoundingBox, Confidence of the input face used for - * search.
*/ -export interface SearchedFace { +export interface ListUsersResponse { /** * @public - *Unique identifier assigned to the face.
+ *List of UsersID associated with the specified collection.
*/ - FaceId?: string; -} + Users?: User[]; -/** - * @public - *Contains metadata about a User searched for within a collection.
- */ -export interface SearchedUser { /** * @public - *A provided ID for the UserID. Unique within the collection.
+ *A pagination token to be used with the subsequent request if the response is + * truncated.
*/ - UserId?: string; + NextToken?: string; } /** * @public - *Provides UserID metadata along with the confidence in the match of this UserID with the - * input face.
+ *The format of the project policy document that you supplied to
+ * PutProjectPolicy
is incorrect.
Describes the UserID metadata.
+ *A universally unique identifier (UUID) for the request.
*/ - Similarity?: number; - + Logref?: string; /** - * @public - *Confidence in the match of this UserID with the input face.
+ * @internal */ - User?: MatchedUser; + constructor(opts: __ExceptionOptionTypeContains metadata for a UserID matched with a given face.
*/ -export interface SearchUsersResponse { - /** - * @public - *An array of UserMatch objects that matched the input face along with the confidence in the - * match. Array will be empty if there are no matches.
- */ - UserMatches?: UserMatch[]; - - /** - * @public - *Version number of the face detection model associated with the input CollectionId.
- */ - FaceModelVersion?: string; - +export interface MatchedUser { /** * @public - *Contains the ID of a face that was used to search for matches in a collection.
+ *A provided ID for the UserID. Unique within the collection.
*/ - SearchedFace?: SearchedFace; + UserId?: string; /** * @public - *Contains the ID of the UserID that was used to search for matches in a collection.
+ *The status of the user matched to a provided FaceID.
*/ - SearchedUser?: SearchedUser; + UserStatus?: UserStatus; } /** * @public + *The Amazon Simple Notification Service topic to which Amazon Rekognition publishes the completion status of a video analysis operation. For more information, see + * Calling Amazon Rekognition Video operations. Note that the Amazon SNS topic must have a topic name that begins with AmazonRekognition if you are using the AmazonRekognitionServiceRole permissions policy to access the topic. + * For more information, see Giving access to multiple Amazon SNS topics.
*/ -export interface SearchUsersByImageRequest { - /** - * @public - *The ID of an existing collection containing the UserID.
- */ - CollectionId: string | undefined; - - /** - * @public - *Provides the input image either as bytes or an S3 object.
- *You pass image bytes to an Amazon Rekognition API operation by using the Bytes
- * property. For example, you would use the Bytes
property to pass an image loaded
- * from a local file system. Image bytes passed by using the Bytes
property must be
- * base64-encoded. Your code may not need to encode image bytes if you are using an AWS SDK to
- * call Amazon Rekognition API operations.
For more information, see Analyzing an Image Loaded from a Local File System - * in the Amazon Rekognition Developer Guide.
- * You pass images stored in an S3 bucket to an Amazon Rekognition API operation by using the
- * S3Object
property. Images stored in an S3 bucket do not need to be
- * base64-encoded.
The region for the S3 bucket containing the S3 object must match the region you use for - * Amazon Rekognition operations.
- *If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes using the - * Bytes property is not supported. You must first upload the image to an Amazon S3 bucket and - * then call the operation using the S3Object property.
- *For Amazon Rekognition to process an S3 object, the user must have permission to - * access the S3 object. For more information, see How Amazon Rekognition works with IAM in the - * Amazon Rekognition Developer Guide.
- */ - Image: Image | undefined; - - /** - * @public - *Specifies the minimum confidence in the UserID match to return. Default value is - * 80.
- */ - UserMatchThreshold?: number; - - /** - * @public - *Maximum number of UserIDs to return.
- */ - MaxUsers?: number; - +export interface NotificationChannel { /** * @public - *A filter that specifies a quality bar for how much filtering is done to identify faces. - * Filtered faces aren't searched for in the collection. The default value is NONE.
+ *The Amazon SNS topic to which Amazon Rekognition posts the completion status.
*/ - QualityFilter?: QualityFilter; -} + SNSTopicArn: string | undefined; -/** - * @public - *Contains data regarding the input face used for a search.
- */ -export interface SearchedFaceDetails { /** * @public - *Structure containing attributes of the face that the algorithm detected.
- *A FaceDetail
object contains either the default facial attributes or all
- * facial attributes. The default attributes are BoundingBox
,
- * Confidence
, Landmarks
, Pose
, and
- * Quality
.
- * GetFaceDetection is the only Amazon Rekognition Video stored video operation that can
- * return a FaceDetail
object with all attributes. To specify which attributes to
- * return, use the FaceAttributes
input parameter for StartFaceDetection. The following Amazon Rekognition Video operations return only the default
- * attributes. The corresponding Start operations don't have a FaceAttributes
input
- * parameter:
GetCelebrityRecognition
- *GetPersonTracking
- *GetFaceSearch
- *The Amazon Rekognition Image DetectFaces and IndexFaces operations
- * can return all facial attributes. To specify which attributes to return, use the
- * Attributes
input parameter for DetectFaces
. For
- * IndexFaces
, use the DetectAttributes
input parameter.
The ARN of an IAM role that gives Amazon Rekognition publishing permissions to the Amazon SNS topic.
*/ - FaceDetail?: FaceDetail; + RoleArn: string | undefined; } /** diff --git a/clients/client-rekognition/src/models/models_1.ts b/clients/client-rekognition/src/models/models_1.ts index e09e2e64ffaf..98de87d64322 100644 --- a/clients/client-rekognition/src/models/models_1.ts +++ b/clients/client-rekognition/src/models/models_1.ts @@ -3,25 +3,484 @@ import { ExceptionOptionType as __ExceptionOptionType } from "@smithy/smithy-cli import { BlackFrame, + BoundingBox, + Celebrity, + ComparedFace, ConnectedHomeSettingsForUpdate, DatasetChanges, DetectionFilter, FaceAttributes, FaceDetail, + FaceMatch, + Image, KinesisVideoStreamStartSelector, LabelDetectionFeatureName, LabelDetectionSettings, + MatchedUser, + MediaAnalysisInput, + MediaAnalysisOperationsConfig, + MediaAnalysisOutputConfig, NotificationChannel, + OrientationCorrection, ProjectVersionStatus, + QualityFilter, RegionOfInterest, - SearchedFaceDetails, SegmentType, StreamProcessorDataSharingPreference, - UserMatch, Video, } from "./models_0"; import { RekognitionServiceException as __BaseException } from "./RekognitionServiceException"; +/** + * @public + */ +export interface PutProjectPolicyRequest { + /** + * @public + *The Amazon Resource Name (ARN) of the project that the project policy is attached to.
+ */ + ProjectArn: string | undefined; + + /** + * @public + *A name for the policy.
+ */ + PolicyName: string | undefined; + + /** + * @public + *The revision ID for the Project Policy. Each time you modify a policy, Amazon Rekognition Custom Labels
+ * generates and assigns a new PolicyRevisionId
and then deletes the previous version of the
+ * policy.
A resource policy to add to the model. The policy is a JSON structure that contains + * one or more statements that define the policy. + * The policy must follow the IAM syntax. For + * more information about the contents of a JSON policy document, see + * IAM JSON policy reference.
+ */ + PolicyDocument: string | undefined; +} + +/** + * @public + */ +export interface PutProjectPolicyResponse { + /** + * @public + *The ID of the project policy.
+ */ + PolicyRevisionId?: string; +} + +/** + * @public + */ +export interface RecognizeCelebritiesRequest { + /** + * @public + *The input image as base64-encoded bytes or an S3 object. If you use the AWS CLI to + * call Amazon Rekognition operations, passing base64-encoded image bytes is not supported.
+ *If you are using an AWS SDK to call Amazon Rekognition, you might not need to
+ * base64-encode image bytes passed using the Bytes
field. For more information, see
+ * Images in the Amazon Rekognition developer guide.
Details about each celebrity found in the image. Amazon Rekognition can detect a maximum of 64
+ * celebrities in an image. Each celebrity object includes the following attributes:
+ * Face
, Confidence
, Emotions
, Landmarks
,
+ * Pose
, Quality
, Smile
, Id
,
+ * KnownGender
, MatchConfidence
, Name
,
+ * Urls
.
Details about each unrecognized face in the image.
+ */ + UnrecognizedFaces?: ComparedFace[]; + + /** + * @public + *Support for estimating image orientation using the the OrientationCorrection field + * has ceased as of August 2021. Any returned values for this field included in an API response + * will always be NULL.
+ *The orientation of the input image (counterclockwise direction). If your application
+ * displays the image, you can use this value to correct the orientation. The bounding box
+ * coordinates returned in CelebrityFaces
and UnrecognizedFaces
+ * represent face locations before the image orientation is corrected.
If the input image is in .jpeg format, it might contain exchangeable image (Exif)
+ * metadata that includes the image's orientation. If so, and the Exif metadata for the input
+ * image populates the orientation field, the value of OrientationCorrection
is
+ * null. The CelebrityFaces
and UnrecognizedFaces
bounding box
+ * coordinates represent face locations after Exif metadata is used to correct the image
+ * orientation. Images in .png format don't contain Exif metadata.
ID of the collection the face belongs to.
+ */ + CollectionId: string | undefined; + + /** + * @public + *ID of a face to find matches for in the collection.
+ */ + FaceId: string | undefined; + + /** + * @public + *Maximum number of faces to return. The operation returns the maximum number of faces + * with the highest confidence in the match.
+ */ + MaxFaces?: number; + + /** + * @public + *Optional value specifying the minimum confidence in the face match to return. For + * example, don't return any matches where confidence in matches is less than 70%. The default + * value is 80%.
+ */ + FaceMatchThreshold?: number; +} + +/** + * @public + */ +export interface SearchFacesResponse { + /** + * @public + *ID of the face that was searched for matches in a collection.
+ */ + SearchedFaceId?: string; + + /** + * @public + *An array of faces that matched the input face, along with the confidence in the + * match.
+ */ + FaceMatches?: FaceMatch[]; + + /** + * @public + *Version number of the face detection model associated with the input collection
+ * (CollectionId
).
ID of the collection to search.
+ */ + CollectionId: string | undefined; + + /** + * @public + *The input image as base64-encoded bytes or an S3 object. If you use the AWS CLI to + * call Amazon Rekognition operations, passing base64-encoded image bytes is not supported.
+ *If you are using an AWS SDK to call Amazon Rekognition, you might not need to
+ * base64-encode image bytes passed using the Bytes
field. For more information, see
+ * Images in the Amazon Rekognition developer guide.
Maximum number of faces to return. The operation returns the maximum number of faces + * with the highest confidence in the match.
+ */ + MaxFaces?: number; + + /** + * @public + *(Optional) Specifies the minimum confidence in the face match to return. For example, + * don't return any matches where confidence in matches is less than 70%. The default value is + * 80%.
+ */ + FaceMatchThreshold?: number; + + /** + * @public + *A filter that specifies a quality bar for how much filtering is done to identify faces.
+ * Filtered faces aren't searched for in the collection. If you specify AUTO
,
+ * Amazon Rekognition chooses the quality bar. If you specify LOW
, MEDIUM
, or
+ * HIGH
, filtering removes all faces that don’t meet the chosen quality bar.
+ * The quality bar is
+ * based on a variety of common use cases. Low-quality detections can occur for a number of
+ * reasons. Some examples are an object that's misidentified as a face, a face that's too blurry,
+ * or a face with a pose that's too extreme to use. If you specify NONE
, no
+ * filtering is performed. The default value is NONE
.
To use quality filtering, the collection you are using must be associated with version 3 + * of the face model or higher.
+ */ + QualityFilter?: QualityFilter; +} + +/** + * @public + */ +export interface SearchFacesByImageResponse { + /** + * @public + *The bounding box around the face in the input image that Amazon Rekognition used for the + * search.
+ */ + SearchedFaceBoundingBox?: BoundingBox; + + /** + * @public + *The level of confidence that the searchedFaceBoundingBox
, contains a
+ * face.
An array of faces that match the input face, along with the confidence in the + * match.
+ */ + FaceMatches?: FaceMatch[]; + + /** + * @public + *Version number of the face detection model associated with the input collection
+ * (CollectionId
).
The ID of an existing collection containing the UserID, used with a UserId or FaceId. If a + * FaceId is provided, UserId isn’t required to be present in the Collection.
+ */ + CollectionId: string | undefined; + + /** + * @public + *ID for the existing User.
+ */ + UserId?: string; + + /** + * @public + *ID for the existing face.
+ */ + FaceId?: string; + + /** + * @public + *Optional value that specifies the minimum confidence in the matched UserID to return. + * Default value of 80.
+ */ + UserMatchThreshold?: number; + + /** + * @public + *Maximum number of identities to return.
+ */ + MaxUsers?: number; +} + +/** + * @public + *Provides face metadata such as FaceId, BoundingBox, Confidence of the input face used for + * search.
+ */ +export interface SearchedFace { + /** + * @public + *Unique identifier assigned to the face.
+ */ + FaceId?: string; +} + +/** + * @public + *Contains metadata about a User searched for within a collection.
+ */ +export interface SearchedUser { + /** + * @public + *A provided ID for the UserID. Unique within the collection.
+ */ + UserId?: string; +} + +/** + * @public + *Provides UserID metadata along with the confidence in the match of this UserID with the + * input face.
+ */ +export interface UserMatch { + /** + * @public + *Describes the UserID metadata.
+ */ + Similarity?: number; + + /** + * @public + *Confidence in the match of this UserID with the input face.
+ */ + User?: MatchedUser; +} + +/** + * @public + */ +export interface SearchUsersResponse { + /** + * @public + *An array of UserMatch objects that matched the input face along with the confidence in the + * match. Array will be empty if there are no matches.
+ */ + UserMatches?: UserMatch[]; + + /** + * @public + *Version number of the face detection model associated with the input CollectionId.
+ */ + FaceModelVersion?: string; + + /** + * @public + *Contains the ID of a face that was used to search for matches in a collection.
+ */ + SearchedFace?: SearchedFace; + + /** + * @public + *Contains the ID of the UserID that was used to search for matches in a collection.
+ */ + SearchedUser?: SearchedUser; +} + +/** + * @public + */ +export interface SearchUsersByImageRequest { + /** + * @public + *The ID of an existing collection containing the UserID.
+ */ + CollectionId: string | undefined; + + /** + * @public + *Provides the input image either as bytes or an S3 object.
+ *You pass image bytes to an Amazon Rekognition API operation by using the Bytes
+ * property. For example, you would use the Bytes
property to pass an image loaded
+ * from a local file system. Image bytes passed by using the Bytes
property must be
+ * base64-encoded. Your code may not need to encode image bytes if you are using an AWS SDK to
+ * call Amazon Rekognition API operations.
For more information, see Analyzing an Image Loaded from a Local File System + * in the Amazon Rekognition Developer Guide.
+ * You pass images stored in an S3 bucket to an Amazon Rekognition API operation by using the
+ * S3Object
property. Images stored in an S3 bucket do not need to be
+ * base64-encoded.
The region for the S3 bucket containing the S3 object must match the region you use for + * Amazon Rekognition operations.
+ *If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes using the + * Bytes property is not supported. You must first upload the image to an Amazon S3 bucket and + * then call the operation using the S3Object property.
+ *For Amazon Rekognition to process an S3 object, the user must have permission to + * access the S3 object. For more information, see How Amazon Rekognition works with IAM in the + * Amazon Rekognition Developer Guide.
+ */ + Image: Image | undefined; + + /** + * @public + *Specifies the minimum confidence in the UserID match to return. Default value is + * 80.
+ */ + UserMatchThreshold?: number; + + /** + * @public + *Maximum number of UserIDs to return.
+ */ + MaxUsers?: number; + + /** + * @public + *A filter that specifies a quality bar for how much filtering is done to identify faces. + * Filtered faces aren't searched for in the collection. The default value is NONE.
+ */ + QualityFilter?: QualityFilter; +} + +/** + * @public + *Contains data regarding the input face used for a search.
+ */ +export interface SearchedFaceDetails { + /** + * @public + *Structure containing attributes of the face that the algorithm detected.
+ *A FaceDetail
object contains either the default facial attributes or all
+ * facial attributes. The default attributes are BoundingBox
,
+ * Confidence
, Landmarks
, Pose
, and
+ * Quality
.
+ * GetFaceDetection is the only Amazon Rekognition Video stored video operation that can
+ * return a FaceDetail
object with all attributes. To specify which attributes to
+ * return, use the FaceAttributes
input parameter for StartFaceDetection. The following Amazon Rekognition Video operations return only the default
+ * attributes. The corresponding Start operations don't have a FaceAttributes
input
+ * parameter:
GetCelebrityRecognition
+ *GetPersonTracking
+ *GetFaceSearch
+ *The Amazon Rekognition Image DetectFaces and IndexFaces operations
+ * can return all facial attributes. To specify which attributes to return, use the
+ * Attributes
input parameter for DetectFaces
. For
+ * IndexFaces
, use the DetectAttributes
input parameter.
Idempotency token used to prevent the accidental creation of duplicate versions. If
+ * you use the same token with multiple StartMediaAnalysisJobRequest
requests, the same
+ * response is returned. Use ClientRequestToken
to prevent the same request from being
+ * processed more than once.
The name of the job. Does not have to be unique.
+ */ + JobName?: string; + + /** + * @public + *Configuration options for the media analysis job to be created.
+ */ + OperationsConfig: MediaAnalysisOperationsConfig | undefined; + + /** + * @public + *Input data to be analyzed by the job.
+ */ + Input: MediaAnalysisInput | undefined; + + /** + * @public + *The Amazon S3 bucket location to store the results.
+ */ + OutputConfig: MediaAnalysisOutputConfig | undefined; + + /** + * @public + *The identifier of customer managed AWS KMS key (name or ARN). The key + * is used to encrypt images copied into the service. The key is also used + * to encrypt results and manifest files written to the output Amazon S3 bucket.
+ */ + KmsKeyId?: string; +} + +/** + * @public + */ +export interface StartMediaAnalysisJobResponse { + /** + * @public + *Identifier for the created job.
+ */ + JobId: string | undefined; +} + /** * @public */ diff --git a/clients/client-rekognition/src/pagination/ListMediaAnalysisJobsPaginator.ts b/clients/client-rekognition/src/pagination/ListMediaAnalysisJobsPaginator.ts new file mode 100644 index 000000000000..2400c3d680f8 --- /dev/null +++ b/clients/client-rekognition/src/pagination/ListMediaAnalysisJobsPaginator.ts @@ -0,0 +1,50 @@ +// smithy-typescript generated code +import { Paginator } from "@smithy/types"; + +import { + ListMediaAnalysisJobsCommand, + ListMediaAnalysisJobsCommandInput, + ListMediaAnalysisJobsCommandOutput, +} from "../commands/ListMediaAnalysisJobsCommand"; +import { RekognitionClient } from "../RekognitionClient"; +import { RekognitionPaginationConfiguration } from "./Interfaces"; + +/** + * @internal + */ +const makePagedClientRequest = async ( + client: RekognitionClient, + input: ListMediaAnalysisJobsCommandInput, + ...args: any +): PromiseRetrieves the results for a given media analysis job. \n Takes a JobId
returned by StartMediaAnalysisJob.
Unique identifier for the media analysis job for which you want to retrieve results.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.rekognition#GetMediaAnalysisJobResponse": { + "type": "structure", + "members": { + "JobId": { + "target": "com.amazonaws.rekognition#MediaAnalysisJobId", + "traits": { + "smithy.api#documentation": "The identifier for the media analysis job.
", + "smithy.api#required": {} + } + }, + "JobName": { + "target": "com.amazonaws.rekognition#MediaAnalysisJobName", + "traits": { + "smithy.api#documentation": "The name of the media analysis job.
" + } + }, + "OperationsConfig": { + "target": "com.amazonaws.rekognition#MediaAnalysisOperationsConfig", + "traits": { + "smithy.api#documentation": "Operation configurations that were provided during job creation.
", + "smithy.api#required": {} + } + }, + "Status": { + "target": "com.amazonaws.rekognition#MediaAnalysisJobStatus", + "traits": { + "smithy.api#documentation": "The current status of the media analysis job.
", + "smithy.api#required": {} + } + }, + "FailureDetails": { + "target": "com.amazonaws.rekognition#MediaAnalysisJobFailureDetails", + "traits": { + "smithy.api#documentation": "Details about the error that resulted in failure of the job.
" + } + }, + "CreationTimestamp": { + "target": "com.amazonaws.rekognition#DateTime", + "traits": { + "smithy.api#documentation": "The Unix date and time when the job was started.
", + "smithy.api#required": {} + } + }, + "CompletionTimestamp": { + "target": "com.amazonaws.rekognition#DateTime", + "traits": { + "smithy.api#documentation": "The Unix date and time when the job finished.
" + } + }, + "Input": { + "target": "com.amazonaws.rekognition#MediaAnalysisInput", + "traits": { + "smithy.api#documentation": "Reference to the input manifest that was provided in the job creation request.
", + "smithy.api#required": {} + } + }, + "OutputConfig": { + "target": "com.amazonaws.rekognition#MediaAnalysisOutputConfig", + "traits": { + "smithy.api#documentation": "Output configuration that was provided in the creation request.
", + "smithy.api#required": {} + } + }, + "KmsKeyId": { + "target": "com.amazonaws.rekognition#KmsKeyId", + "traits": { + "smithy.api#documentation": "KMS Key that was provided in the creation request.
" + } + }, + "Results": { + "target": "com.amazonaws.rekognition#MediaAnalysisResults", + "traits": { + "smithy.api#documentation": "Output manifest that contains prediction results.
" + } + }, + "ManifestSummary": { + "target": "com.amazonaws.rekognition#MediaAnalysisManifestSummary", + "traits": { + "smithy.api#documentation": "The summary manifest provides statistics on input manifest and errors identified in the input manifest.
" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.rekognition#GetPersonTracking": { "type": "operation", "input": { @@ -7376,6 +7509,27 @@ "smithy.api#error": "client" } }, + "com.amazonaws.rekognition#InvalidManifestException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.rekognition#String" + }, + "Code": { + "target": "com.amazonaws.rekognition#String" + }, + "Logref": { + "target": "com.amazonaws.rekognition#String", + "traits": { + "smithy.api#documentation": "A universally unique identifier (UUID) for the request.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Indicates that a provided manifest file is empty or larger than the allowed limit.
", + "smithy.api#error": "client" + } + }, "com.amazonaws.rekognition#InvalidPaginationTokenException": { "type": "structure", "members": { @@ -8588,6 +8742,93 @@ "smithy.api#output": {} } }, + "com.amazonaws.rekognition#ListMediaAnalysisJobs": { + "type": "operation", + "input": { + "target": "com.amazonaws.rekognition#ListMediaAnalysisJobsRequest" + }, + "output": { + "target": "com.amazonaws.rekognition#ListMediaAnalysisJobsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.rekognition#AccessDeniedException" + }, + { + "target": "com.amazonaws.rekognition#InternalServerError" + }, + { + "target": "com.amazonaws.rekognition#InvalidPaginationTokenException" + }, + { + "target": "com.amazonaws.rekognition#InvalidParameterException" + }, + { + "target": "com.amazonaws.rekognition#ProvisionedThroughputExceededException" + }, + { + "target": "com.amazonaws.rekognition#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "Returns a list of media analysis jobs. Results are sorted by CreationTimestamp
in descending order.
Pagination token, if the previous response was incomplete.
" + } + }, + "MaxResults": { + "target": "com.amazonaws.rekognition#ListMediaAnalysisJobsPageSize", + "traits": { + "smithy.api#documentation": "The maximum number of results to return per paginated call. The largest value user can specify is 100. \n If user specifies a value greater than 100, an InvalidParameterException
error occurs. The default value is 100.
Pagination token, if the previous response was incomplete.
" + } + }, + "MediaAnalysisJobs": { + "target": "com.amazonaws.rekognition#MediaAnalysisJobDescriptions", + "traits": { + "smithy.api#documentation": "Contains a list of all media analysis jobs.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.rekognition#ListProjectPolicies": { "type": "operation", "input": { @@ -8943,48 +9184,401 @@ "smithy.api#sensitive": {} } }, - "com.amazonaws.rekognition#LivenessOutputConfig": { + "com.amazonaws.rekognition#LivenessOutputConfig": { + "type": "structure", + "members": { + "S3Bucket": { + "target": "com.amazonaws.rekognition#S3Bucket", + "traits": { + "smithy.api#documentation": "The path to an AWS Amazon S3 bucket used to store Face Liveness session results.
", + "smithy.api#required": {} + } + }, + "S3KeyPrefix": { + "target": "com.amazonaws.rekognition#LivenessS3KeyPrefix", + "traits": { + "smithy.api#documentation": "The prefix prepended to the output files for the Face Liveness session results.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Contains settings that specify the location of an Amazon S3 bucket used to store the output of\n a Face Liveness session. Note that the S3 bucket must be located in the caller's AWS account\n and in the same region as the Face Liveness end-point. Additionally, the Amazon S3 object keys are\n auto-generated by the Face Liveness system.
" + } + }, + "com.amazonaws.rekognition#LivenessS3KeyPrefix": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 950 + }, + "smithy.api#pattern": "^\\S*$" + } + }, + "com.amazonaws.rekognition#LivenessSessionId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 36, + "max": 36 + }, + "smithy.api#pattern": "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$" + } + }, + "com.amazonaws.rekognition#LivenessSessionStatus": { + "type": "enum", + "members": { + "CREATED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CREATED" + } + }, + "IN_PROGRESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "IN_PROGRESS" + } + }, + "SUCCEEDED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SUCCEEDED" + } + }, + "FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FAILED" + } + }, + "EXPIRED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "EXPIRED" + } + } + } + }, + "com.amazonaws.rekognition#MalformedPolicyDocumentException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.rekognition#String" + }, + "Code": { + "target": "com.amazonaws.rekognition#String" + }, + "Logref": { + "target": "com.amazonaws.rekognition#String", + "traits": { + "smithy.api#documentation": "A universally unique identifier (UUID) for the request.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The format of the project policy document that you supplied to \n PutProjectPolicy
is incorrect.
A provided ID for the UserID. Unique within the collection.
" + } + }, + "UserStatus": { + "target": "com.amazonaws.rekognition#UserStatus", + "traits": { + "smithy.api#documentation": "The status of the user matched to a provided FaceID.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Contains metadata for a UserID matched with a given face.
" + } + }, + "com.amazonaws.rekognition#MaxDurationInSecondsULong": { + "type": "long", + "traits": { + "smithy.api#range": { + "min": 1, + "max": 120 + } + } + }, + "com.amazonaws.rekognition#MaxFaces": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1, + "max": 4096 + } + } + }, + "com.amazonaws.rekognition#MaxFacesToIndex": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1 + } + } + }, + "com.amazonaws.rekognition#MaxPixelThreshold": { + "type": "float", + "traits": { + "smithy.api#range": { + "min": 0, + "max": 1 + } + } + }, + "com.amazonaws.rekognition#MaxResults": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1 + } + } + }, + "com.amazonaws.rekognition#MaxUserResults": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1, + "max": 500 + } + } + }, + "com.amazonaws.rekognition#MediaAnalysisDetectModerationLabelsConfig": { + "type": "structure", + "members": { + "MinConfidence": { + "target": "com.amazonaws.rekognition#Percent", + "traits": { + "smithy.api#documentation": "Specifies the minimum confidence level for the moderation labels to return. Amazon Rekognition\n doesn't return any labels with a confidence level lower than this specified value.\n
" + } + }, + "ProjectVersion": { + "target": "com.amazonaws.rekognition#ProjectVersionId", + "traits": { + "smithy.api#documentation": "Specifies the custom moderation model to be used during the label detection job. \n If not provided the pre-trained model is used.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Configuration for Moderation Labels Detection.
" + } + }, + "com.amazonaws.rekognition#MediaAnalysisInput": { + "type": "structure", + "members": { + "S3Object": { + "target": "com.amazonaws.rekognition#S3Object", + "traits": { + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "Contains input information for a media analysis job.
" + } + }, + "com.amazonaws.rekognition#MediaAnalysisJobDescription": { + "type": "structure", + "members": { + "JobId": { + "target": "com.amazonaws.rekognition#MediaAnalysisJobId", + "traits": { + "smithy.api#documentation": "The identifier for a media analysis job.
", + "smithy.api#required": {} + } + }, + "JobName": { + "target": "com.amazonaws.rekognition#MediaAnalysisJobName", + "traits": { + "smithy.api#documentation": "The name of a media analysis job.
" + } + }, + "OperationsConfig": { + "target": "com.amazonaws.rekognition#MediaAnalysisOperationsConfig", + "traits": { + "smithy.api#documentation": "Operation configurations that were provided during job creation.
", + "smithy.api#required": {} + } + }, + "Status": { + "target": "com.amazonaws.rekognition#MediaAnalysisJobStatus", + "traits": { + "smithy.api#documentation": "The status of the media analysis job being retrieved.
", + "smithy.api#required": {} + } + }, + "FailureDetails": { + "target": "com.amazonaws.rekognition#MediaAnalysisJobFailureDetails", + "traits": { + "smithy.api#documentation": "Details about the error that resulted in failure of the job.
" + } + }, + "CreationTimestamp": { + "target": "com.amazonaws.rekognition#DateTime", + "traits": { + "smithy.api#documentation": "The Unix date and time when the job was started.
", + "smithy.api#required": {} + } + }, + "CompletionTimestamp": { + "target": "com.amazonaws.rekognition#DateTime", + "traits": { + "smithy.api#documentation": "The Unix date and time when the job finished.
" + } + }, + "Input": { + "target": "com.amazonaws.rekognition#MediaAnalysisInput", + "traits": { + "smithy.api#documentation": "Reference to the input manifest that was provided in the job creation request.
", + "smithy.api#required": {} + } + }, + "OutputConfig": { + "target": "com.amazonaws.rekognition#MediaAnalysisOutputConfig", + "traits": { + "smithy.api#documentation": "Output configuration that was provided in the creation request.
", + "smithy.api#required": {} + } + }, + "KmsKeyId": { + "target": "com.amazonaws.rekognition#KmsKeyId", + "traits": { + "smithy.api#documentation": "KMS Key that was provided in the creation request.
" + } + }, + "Results": { + "target": "com.amazonaws.rekognition#MediaAnalysisResults", + "traits": { + "smithy.api#documentation": "Output manifest that contains prediction results.
" + } + }, + "ManifestSummary": { + "target": "com.amazonaws.rekognition#MediaAnalysisManifestSummary", + "traits": { + "smithy.api#documentation": "Provides statistics on input manifest and errors identified in the input manifest.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Description for a media analysis job.
" + } + }, + "com.amazonaws.rekognition#MediaAnalysisJobDescriptions": { + "type": "list", + "member": { + "target": "com.amazonaws.rekognition#MediaAnalysisJobDescription" + } + }, + "com.amazonaws.rekognition#MediaAnalysisJobFailureCode": { + "type": "enum", + "members": { + "INTERNAL_ERROR": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INTERNAL_ERROR" + } + }, + "INVALID_S3_OBJECT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INVALID_S3_OBJECT" + } + }, + "INVALID_MANIFEST": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INVALID_MANIFEST" + } + }, + "INVALID_OUTPUT_CONFIG": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INVALID_OUTPUT_CONFIG" + } + }, + "INVALID_KMS_KEY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INVALID_KMS_KEY" + } + }, + "ACCESS_DENIED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ACCESS_DENIED" + } + }, + "RESOURCE_NOT_FOUND": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "RESOURCE_NOT_FOUND" + } + }, + "RESOURCE_NOT_READY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "RESOURCE_NOT_READY" + } + }, + "THROTTLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "THROTTLED" + } + } + } + }, + "com.amazonaws.rekognition#MediaAnalysisJobFailureDetails": { "type": "structure", "members": { - "S3Bucket": { - "target": "com.amazonaws.rekognition#S3Bucket", + "Code": { + "target": "com.amazonaws.rekognition#MediaAnalysisJobFailureCode", "traits": { - "smithy.api#documentation": "The path to an AWS Amazon S3 bucket used to store Face Liveness session results.
", - "smithy.api#required": {} + "smithy.api#documentation": "Error code for the failed job.
" } }, - "S3KeyPrefix": { - "target": "com.amazonaws.rekognition#LivenessS3KeyPrefix", + "Message": { + "target": "com.amazonaws.rekognition#String", "traits": { - "smithy.api#documentation": "The prefix prepended to the output files for the Face Liveness session results.
" + "smithy.api#documentation": "Human readable error message.
" } } }, "traits": { - "smithy.api#documentation": "Contains settings that specify the location of an Amazon S3 bucket used to store the output of\n a Face Liveness session. Note that the S3 bucket must be located in the caller's AWS account\n and in the same region as the Face Liveness end-point. Additionally, the Amazon S3 object keys are\n auto-generated by the Face Liveness system.
" + "smithy.api#documentation": "Details about the error that resulted in failure of the job.
" } }, - "com.amazonaws.rekognition#LivenessS3KeyPrefix": { + "com.amazonaws.rekognition#MediaAnalysisJobId": { "type": "string", "traits": { "smithy.api#length": { - "min": 0, - "max": 950 + "min": 1, + "max": 64 }, - "smithy.api#pattern": "^\\S*$" + "smithy.api#pattern": "^[a-zA-Z0-9-_]+$" } }, - "com.amazonaws.rekognition#LivenessSessionId": { + "com.amazonaws.rekognition#MediaAnalysisJobName": { "type": "string", "traits": { "smithy.api#length": { - "min": 36, - "max": 36 + "min": 1, + "max": 64 }, - "smithy.api#pattern": "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$" + "smithy.api#pattern": "^[a-zA-Z0-9_.\\-]+$" } }, - "com.amazonaws.rekognition#LivenessSessionStatus": { + "com.amazonaws.rekognition#MediaAnalysisJobStatus": { "type": "enum", "members": { "CREATED": { @@ -8993,6 +9587,12 @@ "smithy.api#enumValue": "CREATED" } }, + "QUEUED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "QUEUED" + } + }, "IN_PROGRESS": { "target": "smithy.api#Unit", "traits": { @@ -9010,106 +9610,74 @@ "traits": { "smithy.api#enumValue": "FAILED" } - }, - "EXPIRED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "EXPIRED" - } } } }, - "com.amazonaws.rekognition#MalformedPolicyDocumentException": { + "com.amazonaws.rekognition#MediaAnalysisManifestSummary": { "type": "structure", "members": { - "Message": { - "target": "com.amazonaws.rekognition#String" - }, - "Code": { - "target": "com.amazonaws.rekognition#String" - }, - "Logref": { - "target": "com.amazonaws.rekognition#String", + "S3Object": { + "target": "com.amazonaws.rekognition#S3Object" + } + }, + "traits": { + "smithy.api#documentation": "Summary that provides statistics on input manifest and errors identified in the input manifest.
" + } + }, + "com.amazonaws.rekognition#MediaAnalysisOperationsConfig": { + "type": "structure", + "members": { + "DetectModerationLabels": { + "target": "com.amazonaws.rekognition#MediaAnalysisDetectModerationLabelsConfig", "traits": { - "smithy.api#documentation": "A universally unique identifier (UUID) for the request.
" + "smithy.api#documentation": "Contains configuration options for a DetectModerationLabels job.
" } } }, "traits": { - "smithy.api#documentation": "The format of the project policy document that you supplied to \n PutProjectPolicy
is incorrect.
Configuration options for a media analysis job. Configuration is operation-specific.
" } }, - "com.amazonaws.rekognition#MatchedUser": { + "com.amazonaws.rekognition#MediaAnalysisOutputConfig": { "type": "structure", "members": { - "UserId": { - "target": "com.amazonaws.rekognition#UserId", + "S3Bucket": { + "target": "com.amazonaws.rekognition#S3Bucket", "traits": { - "smithy.api#documentation": "A provided ID for the UserID. Unique within the collection.
" + "smithy.api#documentation": "Specifies the Amazon S3 bucket to contain the output of the media analysis job.
", + "smithy.api#required": {} } }, - "UserStatus": { - "target": "com.amazonaws.rekognition#UserStatus", + "S3KeyPrefix": { + "target": "com.amazonaws.rekognition#MediaAnalysisS3KeyPrefix", "traits": { - "smithy.api#documentation": "The status of the user matched to a provided FaceID.
" + "smithy.api#documentation": "Specifies the Amazon S3 key prefix that comes after the name of the bucket you have\n designated for storage.
" } } }, "traits": { - "smithy.api#documentation": "Contains metadata for a UserID matched with a given face.
" - } - }, - "com.amazonaws.rekognition#MaxDurationInSecondsULong": { - "type": "long", - "traits": { - "smithy.api#range": { - "min": 1, - "max": 120 - } + "smithy.api#documentation": "Output configuration provided in the job creation request.
" } }, - "com.amazonaws.rekognition#MaxFaces": { - "type": "integer", - "traits": { - "smithy.api#range": { - "min": 1, - "max": 4096 + "com.amazonaws.rekognition#MediaAnalysisResults": { + "type": "structure", + "members": { + "S3Object": { + "target": "com.amazonaws.rekognition#S3Object" } - } - }, - "com.amazonaws.rekognition#MaxFacesToIndex": { - "type": "integer", + }, "traits": { - "smithy.api#range": { - "min": 1 - } + "smithy.api#documentation": "Contains the results for a media analysis job created with StartMediaAnalysisJob.
" } }, - "com.amazonaws.rekognition#MaxPixelThreshold": { - "type": "float", + "com.amazonaws.rekognition#MediaAnalysisS3KeyPrefix": { + "type": "string", "traits": { - "smithy.api#range": { + "smithy.api#length": { "min": 0, - "max": 1 - } - } - }, - "com.amazonaws.rekognition#MaxResults": { - "type": "integer", - "traits": { - "smithy.api#range": { - "min": 1 - } - } - }, - "com.amazonaws.rekognition#MaxUserResults": { - "type": "integer", - "traits": { - "smithy.api#range": { - "min": 1, - "max": 500 - } + "max": 800 + }, + "smithy.api#pattern": "^\\S*$" } }, "com.amazonaws.rekognition#MinCoveragePercentage": { @@ -10538,6 +11106,9 @@ { "target": "com.amazonaws.rekognition#GetLabelDetection" }, + { + "target": "com.amazonaws.rekognition#GetMediaAnalysisJob" + }, { "target": "com.amazonaws.rekognition#GetPersonTracking" }, @@ -10562,6 +11133,9 @@ { "target": "com.amazonaws.rekognition#ListFaces" }, + { + "target": "com.amazonaws.rekognition#ListMediaAnalysisJobs" + }, { "target": "com.amazonaws.rekognition#ListProjectPolicies" }, @@ -10607,6 +11181,9 @@ { "target": "com.amazonaws.rekognition#StartLabelDetection" }, + { + "target": "com.amazonaws.rekognition#StartMediaAnalysisJob" + }, { "target": "com.amazonaws.rekognition#StartPersonTracking" }, @@ -10697,7 +11274,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -10740,7 +11316,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -10753,7 +11330,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -10767,7 +11343,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -10790,7 +11365,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -10825,7 +11399,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -10836,14 +11409,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -10857,14 +11432,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -10873,11 +11446,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -10888,14 +11461,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -10909,7 +11484,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -10929,7 +11503,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -10940,14 +11513,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -10958,9 +11533,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -13089,6 +13666,145 @@ "smithy.api#output": {} } }, + "com.amazonaws.rekognition#StartMediaAnalysisJob": { + "type": "operation", + "input": { + "target": "com.amazonaws.rekognition#StartMediaAnalysisJobRequest" + }, + "output": { + "target": "com.amazonaws.rekognition#StartMediaAnalysisJobResponse" + }, + "errors": [ + { + "target": "com.amazonaws.rekognition#AccessDeniedException" + }, + { + "target": "com.amazonaws.rekognition#IdempotentParameterMismatchException" + }, + { + "target": "com.amazonaws.rekognition#InternalServerError" + }, + { + "target": "com.amazonaws.rekognition#InvalidManifestException" + }, + { + "target": "com.amazonaws.rekognition#InvalidParameterException" + }, + { + "target": "com.amazonaws.rekognition#InvalidS3ObjectException" + }, + { + "target": "com.amazonaws.rekognition#LimitExceededException" + }, + { + "target": "com.amazonaws.rekognition#ProvisionedThroughputExceededException" + }, + { + "target": "com.amazonaws.rekognition#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.rekognition#ResourceNotReadyException" + }, + { + "target": "com.amazonaws.rekognition#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "Initiates a new media analysis job. Accepts a manifest file in an Amazon S3 bucket. The\n output is a manifest file and a summary of the manifest stored in the Amazon S3 bucket.
", + "smithy.api#examples": [ + { + "title": "StartMediaAnalysisJob", + "documentation": "Initiates a new media analysis job.", + "input": { + "JobName": "job-name", + "OperationsConfig": { + "DetectModerationLabels": { + "MinConfidence": 50, + "ProjectVersion": "arn:aws:rekognition:us-east-1:111122223333:project/my-project/version/1/1690556751958" + } + }, + "Input": { + "S3Object": { + "Bucket": "input-bucket", + "Name": "input-manifest.json" + } + }, + "OutputConfig": { + "S3Bucket": "output-bucket", + "S3KeyPrefix": "output-location" + } + }, + "output": { + "JobId": "861a0645d98ef88efb75477628c011c04942d9d5f58faf2703c393c8cf8c1537" + } + } + ], + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.rekognition#StartMediaAnalysisJobRequest": { + "type": "structure", + "members": { + "ClientRequestToken": { + "target": "com.amazonaws.rekognition#ClientRequestToken", + "traits": { + "smithy.api#documentation": "Idempotency token used to prevent the accidental creation of duplicate versions. If\n you use the same token with multiple StartMediaAnalysisJobRequest
requests, the same\n response is returned. Use ClientRequestToken
to prevent the same request from being\n processed more than once.
The name of the job. Does not have to be unique.
" + } + }, + "OperationsConfig": { + "target": "com.amazonaws.rekognition#MediaAnalysisOperationsConfig", + "traits": { + "smithy.api#documentation": "Configuration options for the media analysis job to be created.
", + "smithy.api#required": {} + } + }, + "Input": { + "target": "com.amazonaws.rekognition#MediaAnalysisInput", + "traits": { + "smithy.api#documentation": "Input data to be analyzed by the job.
", + "smithy.api#required": {} + } + }, + "OutputConfig": { + "target": "com.amazonaws.rekognition#MediaAnalysisOutputConfig", + "traits": { + "smithy.api#documentation": "The Amazon S3 bucket location to store the results.
", + "smithy.api#required": {} + } + }, + "KmsKeyId": { + "target": "com.amazonaws.rekognition#KmsKeyId", + "traits": { + "smithy.api#documentation": "The identifier of customer managed AWS KMS key (name or ARN). The key \n is used to encrypt images copied into the service. The key is also used \n to encrypt results and manifest files written to the output Amazon S3 bucket.
" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.rekognition#StartMediaAnalysisJobResponse": { + "type": "structure", + "members": { + "JobId": { + "target": "com.amazonaws.rekognition#MediaAnalysisJobId", + "traits": { + "smithy.api#documentation": "Identifier for the created job.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.rekognition#StartPersonTracking": { "type": "operation", "input": {