Client library for Amazon Rekognition
npm install --save @datafire/amazonaws_rekognition
let amazonaws_rekognition = require('@datafire/amazonaws_rekognition').create({
accessKeyId: "",
secretAccessKey: "",
region: ""
});
.then(data => {
console.log(data);
});
This is the Amazon Rekognition API reference.
amazonaws_rekognition.CompareFaces({
"SourceImage": null,
"TargetImage": null
}, context)
- input
object
- QualityFilter
- SimilarityThreshold
- SourceImage required
- Bytes
- S3Object
- Bucket
- Name
- Version
- TargetImage required
- Bytes
- S3Object
- Bucket
- Name
- Version
- output CompareFacesResponse
amazonaws_rekognition.CreateCollection({
"CollectionId": null
}, context)
- input
object
- CollectionId required
- output CreateCollectionResponse
amazonaws_rekognition.CreateProject({
"ProjectName": null
}, context)
- input
object
- ProjectName required
- output CreateProjectResponse
amazonaws_rekognition.CreateProjectVersion({
"ProjectArn": null,
"VersionName": null,
"OutputConfig": null,
"TrainingData": null,
"TestingData": null
}, context)
- input
object
- output CreateProjectVersionResponse
amazonaws_rekognition.CreateStreamProcessor({
"Input": null,
"Output": null,
"Name": null,
"Settings": null,
"RoleArn": null
}, context)
- input
object
- Input required
- KinesisVideoStream
- Arn
- KinesisVideoStream
- Name required
- Output required
- KinesisDataStream
- Arn
- KinesisDataStream
- RoleArn required
- Settings required
- FaceSearch
- CollectionId
- FaceMatchThreshold
- FaceSearch
- Input required
amazonaws_rekognition.DeleteCollection({
"CollectionId": null
}, context)
- input
object
- CollectionId required
- output DeleteCollectionResponse
amazonaws_rekognition.DeleteFaces({
"CollectionId": null,
"FaceIds": null
}, context)
- input
object
- CollectionId required
- FaceIds required
- items FaceId
- output DeleteFacesResponse
amazonaws_rekognition.DeleteProject({
"ProjectArn": null
}, context)
- input
object
- ProjectArn required
- output DeleteProjectResponse
amazonaws_rekognition.DeleteProjectVersion({
"ProjectVersionArn": null
}, context)
- input
object
- ProjectVersionArn required
- output DeleteProjectVersionResponse
amazonaws_rekognition.DeleteStreamProcessor({
"Name": null
}, context)
- input
object
- Name required
amazonaws_rekognition.DescribeCollection({
"CollectionId": null
}, context)
- input
object
- CollectionId required
- output DescribeCollectionResponse
amazonaws_rekognition.DescribeProjectVersions({
"ProjectArn": null
}, context)
- input
object
- MaxResults
string
- NextToken
string
- MaxResults
- NextToken
- ProjectArn required
- VersionNames
- items VersionName
- MaxResults
amazonaws_rekognition.DescribeProjects({}, context)
- input
object
- MaxResults
string
- NextToken
string
- MaxResults
- NextToken
- MaxResults
- output DescribeProjectsResponse
amazonaws_rekognition.DescribeStreamProcessor({
"Name": null
}, context)
- input
object
- Name required
amazonaws_rekognition.DetectCustomLabels({
"ProjectVersionArn": null,
"Image": {}
}, context)
- input
object
- Image required Image
- MaxResults
- MinConfidence
- ProjectVersionArn required
- output DetectCustomLabelsResponse
amazonaws_rekognition.DetectFaces({
"Image": null
}, context)
- input
object
- Attributes
- items Attribute
- Image required
- Bytes
- S3Object
- Bucket
- Name
- Version
- Attributes
- output DetectFacesResponse
amazonaws_rekognition.DetectLabels({
"Image": null
}, context)
- input
object
- Image required
- Bytes
- S3Object
- Bucket
- Name
- Version
- MaxLabels
- MinConfidence
- Image required
- output DetectLabelsResponse
amazonaws_rekognition.DetectModerationLabels({
"Image": null
}, context)
- input
object
- HumanLoopConfig
- DataAttributes
- ContentClassifiers
- items ContentClassifier
- ContentClassifiers
- FlowDefinitionArn required
- HumanLoopName required
- DataAttributes
- Image required
- Bytes
- S3Object
- Bucket
- Name
- Version
- MinConfidence
- HumanLoopConfig
amazonaws_rekognition.DetectProtectiveEquipment({
"Image": null
}, context)
- input
object
- Image required
- Bytes
- S3Object
- Bucket
- Name
- Version
- SummarizationAttributes
- MinConfidence required
- RequiredEquipmentTypes required
- items ProtectiveEquipmentType
- Image required
amazonaws_rekognition.DetectText({
"Image": null
}, context)
- input
object
- Filters
- RegionsOfInterest
- items RegionOfInterest
- WordFilter DetectionFilter
- RegionsOfInterest
- Image required
- Bytes
- S3Object
- Bucket
- Name
- Version
- Filters
- output DetectTextResponse
amazonaws_rekognition.GetCelebrityInfo({
"Id": null
}, context)
- input
object
- Id required
- output GetCelebrityInfoResponse
amazonaws_rekognition.GetCelebrityRecognition({
"JobId": null
}, context)
- input
object
- MaxResults
string
- NextToken
string
- JobId required
- MaxResults
- NextToken
- SortBy
- MaxResults
amazonaws_rekognition.GetContentModeration({
"JobId": null
}, context)
- input
object
- MaxResults
string
- NextToken
string
- JobId required
- MaxResults
- NextToken
- SortBy
- MaxResults
- output GetContentModerationResponse
amazonaws_rekognition.GetFaceDetection({
"JobId": null
}, context)
- input
object
- MaxResults
string
- NextToken
string
- JobId required
- MaxResults
- NextToken
- MaxResults
- output GetFaceDetectionResponse
amazonaws_rekognition.GetFaceSearch({
"JobId": null
}, context)
- input
object
- MaxResults
string
- NextToken
string
- JobId required
- MaxResults
- NextToken
- SortBy
- MaxResults
- output GetFaceSearchResponse
amazonaws_rekognition.GetLabelDetection({
"JobId": null
}, context)
- input
object
- MaxResults
string
- NextToken
string
- JobId required
- MaxResults
- NextToken
- SortBy
- MaxResults
- output GetLabelDetectionResponse
amazonaws_rekognition.GetPersonTracking({
"JobId": null
}, context)
- input
object
- MaxResults
string
- NextToken
string
- JobId required
- MaxResults
- NextToken
- SortBy
- MaxResults
- output GetPersonTrackingResponse
amazonaws_rekognition.GetSegmentDetection({
"JobId": null
}, context)
- input
object
- MaxResults
string
- NextToken
string
- JobId required
- MaxResults
- NextToken
- MaxResults
- output GetSegmentDetectionResponse
amazonaws_rekognition.GetTextDetection({
"JobId": null
}, context)
- input
object
- MaxResults
string
- NextToken
string
- JobId required
- MaxResults
- NextToken
- MaxResults
- output GetTextDetectionResponse
amazonaws_rekognition.IndexFaces({
"CollectionId": null,
"Image": null
}, context)
- input
object
- CollectionId required
- DetectionAttributes
- items Attribute
- ExternalImageId
- Image required
- Bytes
- S3Object
- Bucket
- Name
- Version
- MaxFaces
- QualityFilter
- output IndexFacesResponse
amazonaws_rekognition.ListCollections({}, context)
- input
object
- MaxResults
string
- NextToken
string
- MaxResults
- NextToken
- MaxResults
- output ListCollectionsResponse
amazonaws_rekognition.ListFaces({
"CollectionId": null
}, context)
- input
object
- MaxResults
string
- NextToken
string
- CollectionId required
- MaxResults
- NextToken
- MaxResults
- output ListFacesResponse
amazonaws_rekognition.ListStreamProcessors({}, context)
- input
object
- MaxResults
string
- NextToken
string
- MaxResults
- NextToken
- MaxResults
- output ListStreamProcessorsResponse
amazonaws_rekognition.RecognizeCelebrities({
"Image": null
}, context)
- input
object
- Image required
- Bytes
- S3Object
- Bucket
- Name
- Version
- Image required
- output RecognizeCelebritiesResponse
amazonaws_rekognition.SearchFaces({
"CollectionId": null,
"FaceId": null
}, context)
- input
object
- CollectionId required
- FaceId required
- FaceMatchThreshold
- MaxFaces
- output SearchFacesResponse
amazonaws_rekognition.SearchFacesByImage({
"CollectionId": null,
"Image": null
}, context)
- input
object
- CollectionId required
- FaceMatchThreshold
- Image required
- Bytes
- S3Object
- Bucket
- Name
- Version
- MaxFaces
- QualityFilter
- output SearchFacesByImageResponse
amazonaws_rekognition.StartCelebrityRecognition({
"Video": null
}, context)
- input
object
- ClientRequestToken
- JobTag
- NotificationChannel
- RoleArn required
- SNSTopicArn required
- Video required
- S3Object
- Bucket
- Name
- Version
- S3Object
amazonaws_rekognition.StartContentModeration({
"Video": null
}, context)
- input
object
- ClientRequestToken
- JobTag
- MinConfidence
- NotificationChannel
- RoleArn required
- SNSTopicArn required
- Video required
- S3Object
- Bucket
- Name
- Version
- S3Object
amazonaws_rekognition.StartFaceDetection({
"Video": null
}, context)
- input
object
- ClientRequestToken
- FaceAttributes
- JobTag
- NotificationChannel
- RoleArn required
- SNSTopicArn required
- Video required
- S3Object
- Bucket
- Name
- Version
- S3Object
- output StartFaceDetectionResponse
amazonaws_rekognition.StartFaceSearch({
"Video": null,
"CollectionId": null
}, context)
- input
object
- ClientRequestToken
- CollectionId required
- FaceMatchThreshold
- JobTag
- NotificationChannel
- RoleArn required
- SNSTopicArn required
- Video required
- S3Object
- Bucket
- Name
- Version
- S3Object
- output StartFaceSearchResponse
amazonaws_rekognition.StartLabelDetection({
"Video": null
}, context)
- input
object
- ClientRequestToken
- JobTag
- MinConfidence
- NotificationChannel
- RoleArn required
- SNSTopicArn required
- Video required
- S3Object
- Bucket
- Name
- Version
- S3Object
- output StartLabelDetectionResponse
amazonaws_rekognition.StartPersonTracking({
"Video": null
}, context)
- input
object
- ClientRequestToken
- JobTag
- NotificationChannel
- RoleArn required
- SNSTopicArn required
- Video required
- S3Object
- Bucket
- Name
- Version
- S3Object
- output StartPersonTrackingResponse
amazonaws_rekognition.StartProjectVersion({
"ProjectVersionArn": null,
"MinInferenceUnits": null
}, context)
- input
object
- MinInferenceUnits required
- ProjectVersionArn required
- output StartProjectVersionResponse
amazonaws_rekognition.StartSegmentDetection({
"Video": {},
"SegmentTypes": null
}, context)
- input
object
- ClientRequestToken
- Filters
- ShotFilter
- MinSegmentConfidence
- TechnicalCueFilter
- MinSegmentConfidence
- ShotFilter
- JobTag
- NotificationChannel
- RoleArn required
- SNSTopicArn required
- SegmentTypes required
- items SegmentType
- Video required Video
amazonaws_rekognition.StartStreamProcessor({
"Name": null
}, context)
- input
object
- Name required
- output StartStreamProcessorResponse
amazonaws_rekognition.StartTextDetection({
"Video": {}
}, context)
- input
object
- ClientRequestToken
- Filters
- RegionsOfInterest
- items RegionOfInterest
- WordFilter
- MinBoundingBoxHeight
- MinBoundingBoxWidth
- MinConfidence
- RegionsOfInterest
- JobTag
- NotificationChannel NotificationChannel
- Video required Video
- output StartTextDetectionResponse
amazonaws_rekognition.StopProjectVersion({
"ProjectVersionArn": null
}, context)
- input
object
- ProjectVersionArn required
- output StopProjectVersionResponse
amazonaws_rekognition.StopStreamProcessor({
"Name": null
}, context)
- input
object
- Name required
- output StopStreamProcessorResponse
- AgeRange
object
:Structure containing the estimated age range, in years, for a face.
Amazon Rekognition estimates an age range for faces detected in the input image. Estimated age ranges can overlap. A face of a 5-year-old might have an estimated range of 4-6, while the face of a 6-year-old might have an estimated range of 4-8.
- High
- Low
- Asset
object
: Assets are the images that you use to train and evaluate a model version. Assets can also contain validation information that you use to debug a failed model training.- GroundTruthManifest GroundTruthManifest
- Assets
array
- items Asset
- Attribute
string
(values: DEFAULT, ALL)
- Attributes
array
- items Attribute
- AudioMetadata
object
: Metadata information about an audio stream. An array ofAudioMetadata
objects for the audio streams found in a stored video is returned by GetSegmentDetection.- Codec
- DurationMillis
- NumberOfChannels
- SampleRate
- AudioMetadataList
array
- items AudioMetadata
- Beard
object
: Indicates whether or not the face has a beard, and the confidence level in the determination.- Confidence
- Value
- BodyPart
string
(values: FACE, HEAD, LEFT_HAND, RIGHT_HAND)
- BodyParts
array
- Boolean
boolean
- BoundingBox
object
:Identifies the bounding box around the label, face, text or personal protective equipment. The
left
(x-coordinate) andtop
(y-coordinate) are coordinates representing the top and left sides of the bounding box. Note that the upper-left corner of the image is the origin (0,0).The
top
andleft
values returned are ratios of the overall image size. For example, if the input image is 700x200 pixels, and the top-left coordinate of the bounding box is 350x50 pixels, the API returns aleft
value of 0.5 (350/700) and atop
value of 0.25 (50/200).The
width
andheight
values represent the dimensions of the bounding box as a ratio of the overall image dimension. For example, if the input image is 700x200 pixels, and the bounding box width is 70 pixels, the width returned is 0.1.The bounding box coordinates can have negative values. For example, if Amazon Rekognition is able to detect a face that is at the image edge and is only partially visible, the service can return coordinates that are outside the image bounds and, depending on the image edge, you might get negative values or values greater than 1 for the
left
ortop
values.- Height
- Left
- Top
- Width
- BoundingBoxHeight
number
- BoundingBoxWidth
number
- Celebrity
object
: Provides information about a celebrity recognized by the RecognizeCelebrities operation.
- CelebrityDetail
object
: Information about a recognized celebrity.- BoundingBox
- Height
- Left
- Top
- Width
- Confidence
- Face
- AgeRange
- High
- Low
- Beard
- Confidence
- Value
- BoundingBox
- Height
- Left
- Top
- Width
- Confidence
- Emotions
- items Emotion
- Eyeglasses
- Confidence
- Value
- EyesOpen
- Confidence
- Value
- Gender
- Confidence
- Value
- Landmarks
- items Landmark
- MouthOpen
- Confidence
- Value
- Mustache
- Confidence
- Value
- Pose
- Pitch
- Roll
- Yaw
- Quality
- Brightness
- Sharpness
- Smile
- Confidence
- Value
- Sunglasses
- Confidence
- Value
- AgeRange
- Id
- Name
- Urls
- items Url
- BoundingBox
- CelebrityList
array
- items Celebrity
- CelebrityRecognition
object
: Information about a detected celebrity and the time the celebrity was detected in a stored video. For more information, see GetCelebrityRecognition in the Amazon Rekognition Developer Guide.- Celebrity
- BoundingBox
- Height
- Left
- Top
- Width
- Confidence
- Face
- AgeRange
- High
- Low
- Beard
- Confidence
- Value
- BoundingBox
- Height
- Left
- Top
- Width
- Confidence
- Emotions
- items Emotion
- Eyeglasses
- Confidence
- Value
- EyesOpen
- Confidence
- Value
- Gender
- Confidence
- Value
- Landmarks
- items Landmark
- MouthOpen
- Confidence
- Value
- Mustache
- Confidence
- Value
- Pose
- Pitch
- Roll
- Yaw
- Quality
- Brightness
- Sharpness
- Smile
- Confidence
- Value
- Sunglasses
- Confidence
- Value
- AgeRange
- Id
- Name
- Urls
- items Url
- BoundingBox
- Timestamp
- Celebrity
- CelebrityRecognitionSortBy
string
(values: ID, TIMESTAMP)
- CelebrityRecognitions
array
- items CelebrityRecognition
- ClientRequestToken
string
- CollectionId
string
- CollectionIdList
array
- items CollectionId
- CompareFacesMatch
object
: Provides information about a face in a target image that matches the source image face analyzed byCompareFaces
. TheFace
property contains the bounding box of the face in the target image. TheSimilarity
property is the confidence that the source image face matches the face in the bounding box.- Face
- BoundingBox
- Height
- Left
- Top
- Width
- Confidence
- Landmarks
- items Landmark
- Pose
- Pitch
- Roll
- Yaw
- Quality
- Brightness
- Sharpness
- BoundingBox
- Similarity
- Face
- CompareFacesMatchList
array
- items CompareFacesMatch
- CompareFacesRequest
object
- QualityFilter
- SimilarityThreshold
- SourceImage required
- Bytes
- S3Object
- Bucket
- Name
- Version
- TargetImage required
- Bytes
- S3Object
- Bucket
- Name
- Version
- CompareFacesResponse
object
- FaceMatches
- items CompareFacesMatch
- SourceImageFace
- BoundingBox
- Height
- Left
- Top
- Width
- Confidence
- BoundingBox
- SourceImageOrientationCorrection
- TargetImageOrientationCorrection
- UnmatchedFaces
- items ComparedFace
- FaceMatches
- CompareFacesUnmatchList
array
- items ComparedFace
- ComparedFace
object
: Provides face metadata for target image faces that are analyzed byCompareFaces
andRecognizeCelebrities
.- BoundingBox
- Height
- Left
- Top
- Width
- Confidence
- Landmarks
- items Landmark
- Pose
- Pitch
- Roll
- Yaw
- Quality
- Brightness
- Sharpness
- BoundingBox
- ComparedFaceList
array
- items ComparedFace
- ComparedSourceImageFace
object
: Type that describes the face Amazon Rekognition chose to compare with the faces in the target. This contains a bounding box for the selected face and confidence level that the bounding box contains a face. Note that Amazon Rekognition selects the largest face in the source image for this comparison.- BoundingBox
- Height
- Left
- Top
- Width
- Confidence
- BoundingBox
- ContentClassifier
string
(values: FreeOfPersonallyIdentifiableInformation, FreeOfAdultContent)
- ContentClassifiers
array
- items ContentClassifier
- ContentModerationDetection
object
: Information about an unsafe content label detection in a stored video.- ModerationLabel
- Confidence
- Name
- ParentName
- Timestamp
- ModerationLabel
- ContentModerationDetections
array
- ContentModerationSortBy
string
(values: NAME, TIMESTAMP)
- CoversBodyPart
object
: Information about an item of Personal Protective Equipment covering a corresponding body part. For more information, see DetectProtectiveEquipment.- Confidence
- Value
- CreateCollectionRequest
object
- CollectionId required
- CreateCollectionResponse
object
- CollectionArn
- FaceModelVersion
- StatusCode
- CreateProjectRequest
object
- ProjectName required
- CreateProjectResponse
object
- ProjectArn
- CreateProjectVersionRequest
object
- CreateProjectVersionResponse
object
- ProjectVersionArn
- CreateStreamProcessorRequest
object
- Input required
- KinesisVideoStream
- Arn
- KinesisVideoStream
- Name required
- Output required
- KinesisDataStream
- Arn
- KinesisDataStream
- RoleArn required
- Settings required
- FaceSearch
- CollectionId
- FaceMatchThreshold
- FaceSearch
- Input required
- CreateStreamProcessorResponse
object
- StreamProcessorArn
- CustomLabel
object
: A custom label detected in an image by a call to DetectCustomLabels.- Confidence
- Geometry
- BoundingBox
- Height
- Left
- Top
- Width
- Polygon
- items Point
- BoundingBox
- Name
- CustomLabels
array
- items CustomLabel
- DateTime
string
- Degree
number
- DeleteCollectionRequest
object
- CollectionId required
- DeleteCollectionResponse
object
- StatusCode
- DeleteFacesRequest
object
- CollectionId required
- FaceIds required
- items FaceId
- DeleteFacesResponse
object
- DeletedFaces
- items FaceId
- DeletedFaces
- DeleteProjectRequest
object
- ProjectArn required
- DeleteProjectResponse
object
- Status
- DeleteProjectVersionRequest
object
- ProjectVersionArn required
- DeleteProjectVersionResponse
object
- Status
- DeleteStreamProcessorRequest
object
- Name required
- DeleteStreamProcessorResponse
object
- DescribeCollectionRequest
object
- CollectionId required
- DescribeCollectionResponse
object
- CollectionARN
- CreationTimestamp
- FaceCount
- FaceModelVersion
- DescribeProjectVersionsRequest
object
- MaxResults
- NextToken
- ProjectArn required
- VersionNames
- items VersionName
- DescribeProjectVersionsResponse
object
- NextToken
- ProjectVersionDescriptions
- DescribeProjectsRequest
object
- MaxResults
- NextToken
- DescribeProjectsResponse
object
- NextToken
- ProjectDescriptions
- items ProjectDescription
- DescribeStreamProcessorRequest
object
- Name required
- DescribeStreamProcessorResponse
object
- CreationTimestamp
- Input
- KinesisVideoStream
- Arn
- KinesisVideoStream
- LastUpdateTimestamp
- Name
- Output
- KinesisDataStream
- Arn
- KinesisDataStream
- RoleArn
- Settings
- FaceSearch
- CollectionId
- FaceMatchThreshold
- FaceSearch
- Status
- StatusMessage
- StreamProcessorArn
- DetectCustomLabelsRequest
object
- Image required Image
- MaxResults
- MinConfidence
- ProjectVersionArn required
- DetectCustomLabelsResponse
object
- CustomLabels
- items CustomLabel
- CustomLabels
- DetectFacesRequest
object
- Attributes
- items Attribute
- Image required
- Bytes
- S3Object
- Bucket
- Name
- Version
- Attributes
- DetectFacesResponse
object
- FaceDetails
- items FaceDetail
- OrientationCorrection
- FaceDetails
- DetectLabelsRequest
object
- Image required
- Bytes
- S3Object
- Bucket
- Name
- Version
- MaxLabels
- MinConfidence
- Image required
- DetectLabelsResponse
object
- LabelModelVersion
- Labels
- items Label
- OrientationCorrection
- DetectModerationLabelsRequest
object
- HumanLoopConfig
- DataAttributes
- ContentClassifiers
- items ContentClassifier
- ContentClassifiers
- FlowDefinitionArn required
- HumanLoopName required
- DataAttributes
- Image required
- Bytes
- S3Object
- Bucket
- Name
- Version
- MinConfidence
- HumanLoopConfig
- DetectModerationLabelsResponse
object
- HumanLoopActivationOutput
- HumanLoopActivationConditionsEvaluationResults
- HumanLoopActivationReasons
- HumanLoopArn
- ModerationLabels
- items ModerationLabel
- ModerationModelVersion
- HumanLoopActivationOutput
- DetectProtectiveEquipmentRequest
object
- Image required
- Bytes
- S3Object
- Bucket
- Name
- Version
- SummarizationAttributes
- MinConfidence required
- RequiredEquipmentTypes required
- items ProtectiveEquipmentType
- Image required
- DetectProtectiveEquipmentResponse
object
- Persons
- ProtectiveEquipmentModelVersion
- Summary
- DetectTextFilters
object
: A set of optional parameters that you can use to set the criteria that the text must meet to be included in your response.WordFilter
looks at a word’s height, width, and minimum confidence.RegionOfInterest
lets you set a specific region of the image to look for text in.- RegionsOfInterest
- items RegionOfInterest
- WordFilter DetectionFilter
- RegionsOfInterest
- DetectTextRequest
object
- Filters
- RegionsOfInterest
- items RegionOfInterest
- WordFilter DetectionFilter
- RegionsOfInterest
- Image required
- Bytes
- S3Object
- Bucket
- Name
- Version
- Filters
- DetectTextResponse
object
- TextDetections
- items TextDetection
- TextModelVersion
- TextDetections
- DetectionFilter
object
: A set of parameters that allow you to filter out certain results from your returned results.- MinBoundingBoxHeight
- MinBoundingBoxWidth
- MinConfidence
- Emotion
object
: The emotions that appear to be expressed on the face, and the confidence level in the determination. The API is only making a determination of the physical appearance of a person's face. It is not a determination of the person’s internal emotional state and should not be used in such a way. For example, a person pretending to have a sad face might not be sad emotionally.- Confidence
- Type
- EmotionName
string
(values: HAPPY, SAD, ANGRY, CONFUSED, DISGUSTED, SURPRISED, CALM, UNKNOWN, FEAR)
- Emotions
array
- items Emotion
- EquipmentDetection
object
: Information about an item of Personal Protective Equipment (PPE) detected by DetectProtectiveEquipment. For more information, see DetectProtectiveEquipment.- BoundingBox
- Height
- Left
- Top
- Width
- Confidence
- CoversBodyPart
- Confidence
- Value
- Type
- BoundingBox
- EquipmentDetections
array
- items EquipmentDetection
- EvaluationResult
object
: The evaluation results for the training of a model.- F1Score
- Summary
- S3Object S3Object
- ExtendedPaginationToken
string
- ExternalImageId
string
- EyeOpen
object
: Indicates whether or not the eyes on the face are open, and the confidence level in the determination.- Confidence
- Value
- Eyeglasses
object
: Indicates whether or not the face is wearing eye glasses, and the confidence level in the determination.- Confidence
- Value
- Face
object
: Describes the face properties such as the bounding box, face ID, image ID of the input image, and external image ID that you assigned.- BoundingBox
- Height
- Left
- Top
- Width
- Confidence
- ExternalImageId
- FaceId
- ImageId
- BoundingBox
- FaceAttributes
string
(values: DEFAULT, ALL)
- FaceDetail
object
:Structure containing attributes of the face that the algorithm detected.
A
FaceDetail
object contains either the default facial attributes or all facial attributes. The default attributes areBoundingBox
,Confidence
,Landmarks
,Pose
, andQuality
.GetFaceDetection is the only Amazon Rekognition Video stored video operation that can return a
FaceDetail
object with all attributes. To specify which attributes to return, use theFaceAttributes
input parameter for StartFaceDetection. The following Amazon Rekognition Video operations return only the default attributes. The corresponding Start operations don't have aFaceAttributes
input parameter.-
GetCelebrityRecognition
-
GetPersonTracking
-
GetFaceSearch
The Amazon Rekognition Image DetectFaces and IndexFaces operations can return all facial attributes. To specify which attributes to return, use the
Attributes
input parameter forDetectFaces
. ForIndexFaces
, use theDetectAttributes
input parameter.- AgeRange
- High
- Low
- Beard
- Confidence
- Value
- BoundingBox
- Height
- Left
- Top
- Width
- Confidence
- Emotions
- items Emotion
- Eyeglasses
- Confidence
- Value
- EyesOpen
- Confidence
- Value
- Gender
- Confidence
- Value
- Landmarks
- items Landmark
- MouthOpen
- Confidence
- Value
- Mustache
- Confidence
- Value
- Pose
- Pitch
- Roll
- Yaw
- Quality
- Brightness
- Sharpness
- Smile
- Confidence
- Value
- Sunglasses
- Confidence
- Value
-
- FaceDetailList
array
- items FaceDetail
- FaceDetection
object
: Information about a face detected in a video analysis request and the time the face was detected in the video.- Face
- AgeRange
- High
- Low
- Beard
- Confidence
- Value
- BoundingBox
- Height
- Left
- Top
- Width
- Confidence
- Emotions
- items Emotion
- Eyeglasses
- Confidence
- Value
- EyesOpen
- Confidence
- Value
- Gender
- Confidence
- Value
- Landmarks
- items Landmark
- MouthOpen
- Confidence
- Value
- Mustache
- Confidence
- Value
- Pose
- Pitch
- Roll
- Yaw
- Quality
- Brightness
- Sharpness
- Smile
- Confidence
- Value
- Sunglasses
- Confidence
- Value
- AgeRange
- Timestamp
- Face
- FaceDetections
array
- items FaceDetection
- FaceId
string
- FaceIdList
array
- items FaceId
- FaceList
array
- items Face
- FaceMatch
object
: Provides face metadata. In addition, it also provides the confidence in the match of this face with the input face.- Face
- BoundingBox
- Height
- Left
- Top
- Width
- Confidence
- ExternalImageId
- FaceId
- ImageId
- BoundingBox
- Similarity
- Face
- FaceMatchList
array
- items FaceMatch
- FaceModelVersionList
array
- items String
- FaceRecord
object
: Object containing both the face metadata (stored in the backend database), and facial attributes that are detected but aren't stored in the database.- Face
- BoundingBox
- Height
- Left
- Top
- Width
- Confidence
- ExternalImageId
- FaceId
- ImageId
- BoundingBox
- FaceDetail
- AgeRange
- High
- Low
- Beard
- Confidence
- Value
- BoundingBox
- Height
- Left
- Top
- Width
- Confidence
- Emotions
- items Emotion
- Eyeglasses
- Confidence
- Value
- EyesOpen
- Confidence
- Value
- Gender
- Confidence
- Value
- Landmarks
- items Landmark
- MouthOpen
- Confidence
- Value
- Mustache
- Confidence
- Value
- Pose
- Pitch
- Roll
- Yaw
- Quality
- Brightness
- Sharpness
- Smile
- Confidence
- Value
- Sunglasses
- Confidence
- Value
- AgeRange
- Face
- FaceRecordList
array
- items FaceRecord
- FaceSearchSettings
object
: Input face recognition parameters for an Amazon Rekognition stream processor.FaceRecognitionSettings
is a request parameter for CreateStreamProcessor.- CollectionId
- FaceMatchThreshold
- FaceSearchSortBy
string
(values: INDEX, TIMESTAMP)
- Float
number
- FlowDefinitionArn
string
- Gender
object
:The predicted gender of a detected face.
Amazon Rekognition makes gender binary (male/female) predictions based on the physical appearance of a face in a particular image. This kind of prediction is not designed to categorize a person’s gender identity, and you shouldn't use Amazon Rekognition to make such a determination. For example, a male actor wearing a long-haired wig and earrings for a role might be predicted as female.
Using Amazon Rekognition to make gender binary predictions is best suited for use cases where aggregate gender distribution statistics need to be analyzed without identifying specific users. For example, the percentage of female users compared to male users on a social media platform.
We don't recommend using gender binary predictions to make decisions that impact an individual's rights, privacy, or access to services.
- Confidence
- Value
- GenderType
string
(values: Male, Female)
- Geometry
object
: Information about where an object (DetectCustomLabels) or text (DetectText) is located on an image.- BoundingBox
- Height
- Left
- Top
- Width
- Polygon
- items Point
- BoundingBox
- GetCelebrityInfoRequest
object
- Id required
- GetCelebrityInfoResponse
object
- Name
- Urls
- items Url
- GetCelebrityRecognitionRequest
object
- JobId required
- MaxResults
- NextToken
- SortBy
- GetCelebrityRecognitionResponse
object
- Celebrities
- items CelebrityRecognition
- JobStatus
- NextToken
- StatusMessage
- VideoMetadata
- Codec
- DurationMillis
- Format
- FrameHeight
- FrameRate
- FrameWidth
- Celebrities
- GetContentModerationRequest
object
- JobId required
- MaxResults
- NextToken
- SortBy
- GetContentModerationResponse
object
- JobStatus
- ModerationLabels
- ModerationModelVersion
- NextToken
- StatusMessage
- VideoMetadata
- Codec
- DurationMillis
- Format
- FrameHeight
- FrameRate
- FrameWidth
- GetFaceDetectionRequest
object
- JobId required
- MaxResults
- NextToken
- GetFaceDetectionResponse
object
- Faces
- items FaceDetection
- JobStatus
- NextToken
- StatusMessage
- VideoMetadata
- Codec
- DurationMillis
- Format
- FrameHeight
- FrameRate
- FrameWidth
- Faces
- GetFaceSearchRequest
object
- JobId required
- MaxResults
- NextToken
- SortBy
- GetFaceSearchResponse
object
- JobStatus
- NextToken
- Persons
- items PersonMatch
- StatusMessage
- VideoMetadata
- Codec
- DurationMillis
- Format
- FrameHeight
- FrameRate
- FrameWidth
- GetLabelDetectionRequest
object
- JobId required
- MaxResults
- NextToken
- SortBy
- GetLabelDetectionResponse
object
- JobStatus
- LabelModelVersion
- Labels
- items LabelDetection
- NextToken
- StatusMessage
- VideoMetadata
- Codec
- DurationMillis
- Format
- FrameHeight
- FrameRate
- FrameWidth
- GetPersonTrackingRequest
object
- JobId required
- MaxResults
- NextToken
- SortBy
- GetPersonTrackingResponse
object
- JobStatus
- NextToken
- Persons
- items PersonDetection
- StatusMessage
- VideoMetadata
- Codec
- DurationMillis
- Format
- FrameHeight
- FrameRate
- FrameWidth
- GetSegmentDetectionRequest
object
- JobId required
- MaxResults
- NextToken
- GetSegmentDetectionResponse
object
- AudioMetadata
- items AudioMetadata
- JobStatus
- NextToken
- Segments
- items SegmentDetection
- SelectedSegmentTypes
- items SegmentTypeInfo
- StatusMessage
- VideoMetadata
- items VideoMetadata
- AudioMetadata
- GetTextDetectionRequest
object
- JobId required
- MaxResults
- NextToken
- GetTextDetectionResponse
object
- JobStatus
- NextToken
- StatusMessage
- TextDetections
- items TextDetectionResult
- TextModelVersion
- VideoMetadata VideoMetadata
- GroundTruthManifest
object
: The S3 bucket that contains an Amazon Sagemaker Ground Truth format manifest file.- S3Object S3Object
- HumanLoopActivationConditionsEvaluationResults
string
- HumanLoopActivationOutput
object
: Shows the results of the human in the loop evaluation. If there is no HumanLoopArn, the input did not trigger human review.- HumanLoopActivationConditionsEvaluationResults
- HumanLoopActivationReasons
- HumanLoopArn
- HumanLoopActivationReason
string
- HumanLoopActivationReasons
array
- HumanLoopArn
string
- HumanLoopConfig
object
: Sets up the flow definition the image will be sent to if one of the conditions is met. You can also set certain attributes of the image before review.- DataAttributes
- ContentClassifiers
- items ContentClassifier
- ContentClassifiers
- FlowDefinitionArn required
- HumanLoopName required
- DataAttributes
- HumanLoopDataAttributes
object
: Allows you to set attributes of the image. Currently, you can declare an image as free of personally identifiable information.- ContentClassifiers
- items ContentClassifier
- ContentClassifiers
- HumanLoopName
string
- Image
object
:Provides the input image either as bytes or an S3 object.
You pass image bytes to an Amazon Rekognition API operation by using the
Bytes
property. For example, you would use theBytes
property to pass an image loaded from a local file system. Image bytes passed by using theBytes
property must be base64-encoded. Your code may not need to encode image bytes if you are using an AWS SDK to call Amazon Rekognition API operations.For more information, see Analyzing an Image Loaded from a Local File System in the Amazon Rekognition Developer Guide.
You pass images stored in an S3 bucket to an Amazon Rekognition API operation by using the
S3Object
property. Images stored in an S3 bucket do not need to be base64-encoded.The region for the S3 bucket containing the S3 object must match the region you use for Amazon Rekognition operations.
If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes using the Bytes property is not supported. You must first upload the image to an Amazon S3 bucket and then call the operation using the S3Object property.
For Amazon Rekognition to process an S3 object, the user must have permission to access the S3 object. For more information, see Resource Based Policies in the Amazon Rekognition Developer Guide.
- Bytes
- S3Object
- Bucket
- Name
- Version
- ImageBlob
string
- ImageId
string
- ImageQuality
object
: Identifies face image brightness and sharpness.- Brightness
- Sharpness
- IndexFacesRequest
object
- CollectionId required
- DetectionAttributes
- items Attribute
- ExternalImageId
- Image required
- Bytes
- S3Object
- Bucket
- Name
- Version
- MaxFaces
- QualityFilter
- IndexFacesResponse
object
- FaceModelVersion
- FaceRecords
- items FaceRecord
- OrientationCorrection
- UnindexedFaces
- items UnindexedFace
- InferenceUnits
integer
- Instance
object
: An instance of a label returned by Amazon Rekognition Image (DetectLabels) or by Amazon Rekognition Video (GetLabelDetection).- BoundingBox
- Height
- Left
- Top
- Width
- Confidence
- BoundingBox
- Instances
array
- items Instance
- JobId
string
- JobTag
string
- KinesisDataArn
string
- KinesisDataStream
object
: The Kinesis data stream Amazon Rekognition to which the analysis results of a Amazon Rekognition stream processor are streamed. For more information, see CreateStreamProcessor in the Amazon Rekognition Developer Guide.- Arn
- KinesisVideoArn
string
- KinesisVideoStream
object
: Kinesis video stream stream that provides the source streaming video for a Amazon Rekognition Video stream processor. For more information, see CreateStreamProcessor in the Amazon Rekognition Developer Guide.- Arn
- Label
object
:Structure containing details about the detected label, including the name, detected instances, parent labels, and level of confidence.
- LabelDetection
object
: Information about a label detected in a video analysis request and the time the label was detected in the video.
- LabelDetectionSortBy
string
(values: NAME, TIMESTAMP)
- LabelDetections
array
- items LabelDetection
- Labels
array
- items Label
- Landmark
object
: Indicates the location of the landmark on the face.- Type
- X
- Y
- LandmarkType
string
(values: eyeLeft, eyeRight, nose, mouthLeft, mouthRight, leftEyeBrowLeft, leftEyeBrowRight, leftEyeBrowUp, rightEyeBrowLeft, rightEyeBrowRight, rightEyeBrowUp, leftEyeLeft, leftEyeRight, leftEyeUp, leftEyeDown, rightEyeLeft, rightEyeRight, rightEyeUp, rightEyeDown, noseLeft, noseRight, mouthUp, mouthDown, leftPupil, rightPupil, upperJawlineLeft, midJawlineLeft, chinBottom, midJawlineRight, upperJawlineRight)
- Landmarks
array
- items Landmark
- ListCollectionsRequest
object
- MaxResults
- NextToken
- ListCollectionsResponse
object
- CollectionIds
- items CollectionId
- FaceModelVersions
- items String
- NextToken
- CollectionIds
- ListFacesRequest
object
- CollectionId required
- MaxResults
- NextToken
- ListFacesResponse
object
- FaceModelVersion
- Faces
- items Face
- NextToken
- ListStreamProcessorsRequest
object
- MaxResults
- NextToken
- ListStreamProcessorsResponse
object
- NextToken
- StreamProcessors
- items StreamProcessor
- MaxFaces
integer
- MaxFacesToIndex
integer
- MaxResults
integer
- ModerationLabel
object
: Provides information about a single type of unsafe content found in an image or video. Each type of moderated content has a label within a hierarchical taxonomy. For more information, see Detecting Unsafe Content in the Amazon Rekognition Developer Guide.- Confidence
- Name
- ParentName
- ModerationLabels
array
- items ModerationLabel
- MouthOpen
object
: Indicates whether or not the mouth on the face is open, and the confidence level in the determination.- Confidence
- Value
- Mustache
object
: Indicates whether or not the face has a mustache, and the confidence level in the determination.- Confidence
- Value
- NotificationChannel
object
: The Amazon Simple Notification Service topic to which Amazon Rekognition publishes the completion status of a video analysis operation. For more information, see api-video.- RoleArn required
- SNSTopicArn required
- OrientationCorrection
string
(values: ROTATE_0, ROTATE_90, ROTATE_180, ROTATE_270)
- OutputConfig
object
: The S3 bucket and folder location where training output is placed.- S3Bucket
- S3KeyPrefix
- PageSize
integer
- PaginationToken
string
- Parent
object
: A parent label for a label. A label can have 0, 1, or more parents.- Name
- Parents
array
- items Parent
- Percent
number
- PersonDetail
object
: Details about a person detected in a video analysis request.- BoundingBox
- Height
- Left
- Top
- Width
- Face
- AgeRange
- High
- Low
- Beard
- Confidence
- Value
- BoundingBox
- Height
- Left
- Top
- Width
- Confidence
- Emotions
- items Emotion
- Eyeglasses
- Confidence
- Value
- EyesOpen
- Confidence
- Value
- Gender
- Confidence
- Value
- Landmarks
- items Landmark
- MouthOpen
- Confidence
- Value
- Mustache
- Confidence
- Value
- Pose
- Pitch
- Roll
- Yaw
- Quality
- Brightness
- Sharpness
- Smile
- Confidence
- Value
- Sunglasses
- Confidence
- Value
- AgeRange
- Index
- BoundingBox
- PersonDetection
object
:Details and path tracking information for a single time a person's path is tracked in a video. Amazon Rekognition operations that track people's paths return an array of
PersonDetection
objects with elements for each time a person's path is tracked in a video.For more information, see GetPersonTracking in the Amazon Rekognition Developer Guide.
- Person
- BoundingBox
- Height
- Left
- Top
- Width
- Face
- AgeRange
- High
- Low
- Beard
- Confidence
- Value
- BoundingBox
- Height
- Left
- Top
- Width
- Confidence
- Emotions
- items Emotion
- Eyeglasses
- Confidence
- Value
- EyesOpen
- Confidence
- Value
- Gender
- Confidence
- Value
- Landmarks
- items Landmark
- MouthOpen
- Confidence
- Value
- Mustache
- Confidence
- Value
- Pose
- Pitch
- Roll
- Yaw
- Quality
- Brightness
- Sharpness
- Smile
- Confidence
- Value
- Sunglasses
- Confidence
- Value
- AgeRange
- Index
- BoundingBox
- Timestamp
- Person
- PersonDetections
array
- items PersonDetection
- PersonIndex
integer
- PersonMatch
object
: Information about a person whose face matches a face(s) in an Amazon Rekognition collection. Includes information about the faces in the Amazon Rekognition collection (FaceMatch), information about the person (PersonDetail), and the time stamp for when the person was detected in a video. An array ofPersonMatch
objects is returned by GetFaceSearch.- FaceMatches
- items FaceMatch
- Person
- BoundingBox
- Height
- Left
- Top
- Width
- Face
- AgeRange
- High
- Low
- Beard
- Confidence
- Value
- BoundingBox
- Height
- Left
- Top
- Width
- Confidence
- Emotions
- items Emotion
- Eyeglasses
- Confidence
- Value
- EyesOpen
- Confidence
- Value
- Gender
- Confidence
- Value
- Landmarks
- items Landmark
- MouthOpen
- Confidence
- Value
- Mustache
- Confidence
- Value
- Pose
- Pitch
- Roll
- Yaw
- Quality
- Brightness
- Sharpness
- Smile
- Confidence
- Value
- Sunglasses
- Confidence
- Value
- AgeRange
- Index
- BoundingBox
- Timestamp
- FaceMatches
- PersonMatches
array
- items PersonMatch
- PersonTrackingSortBy
string
(values: INDEX, TIMESTAMP)
- Point
object
:The X and Y coordinates of a point on an image. The X and Y values returned are ratios of the overall image size. For example, if the input image is 700x200 and the operation returns X=0.5 and Y=0.25, then the point is at the (350,50) pixel coordinate on the image.
An array of
Point
objects,Polygon
, is returned by DetectText and by DetectCustomLabels.Polygon
represents a fine-grained polygon around a detected item. For more information, see Geometry in the Amazon Rekognition Developer Guide.- X
- Y
- Polygon
array
- items Point
- Pose
object
: Indicates the pose of the face as determined by its pitch, roll, and yaw.- Pitch
- Roll
- Yaw
- ProjectArn
string
- ProjectDescription
object
: A description of a Amazon Rekognition Custom Labels project.- CreationTimestamp
- ProjectArn
- Status
- ProjectDescriptions
array
- items ProjectDescription
- ProjectName
string
- ProjectStatus
string
(values: CREATING, CREATED, DELETING)
- ProjectVersionArn
string
- ProjectVersionDescription
object
: The description of a version of a model.
- ProjectVersionDescriptions
array
- ProjectVersionStatus
string
(values: TRAINING_IN_PROGRESS, TRAINING_COMPLETED, TRAINING_FAILED, STARTING, RUNNING, FAILED, STOPPING, STOPPED, DELETING)
- ProjectVersionsPageSize
integer
- ProjectsPageSize
integer
- ProtectiveEquipmentBodyPart
object
: Information about a body part detected by DetectProtectiveEquipment that contains PPE. An array ofProtectiveEquipmentBodyPart
objects is returned for each person detected byDetectProtectiveEquipment
.- Confidence
- EquipmentDetections
- items EquipmentDetection
- Name
- ProtectiveEquipmentPerson
object
: A person detected by a call to DetectProtectiveEquipment. The API returns all persons detected in the input image in an array ofProtectiveEquipmentPerson
objects.- BodyParts
- BoundingBox
- Height
- Left
- Top
- Width
- Confidence
- Id
- ProtectiveEquipmentPersonIds
array
- items UInteger
- ProtectiveEquipmentPersons
array
- ProtectiveEquipmentSummarizationAttributes
object
: Specifies summary attributes to return from a call to DetectProtectiveEquipment. You can specify which types of PPE to summarize. You can also specify a minimum confidence value for detections. Summary information is returned in theSummary
(ProtectiveEquipmentSummary) field of the response fromDetectProtectiveEquipment
. The summary includes which persons in an image were detected wearing the requested types of person protective equipment (PPE), which persons were detected as not wearing PPE, and the persons in which a determination could not be made. For more information, see ProtectiveEquipmentSummary.- MinConfidence required
- RequiredEquipmentTypes required
- items ProtectiveEquipmentType
- ProtectiveEquipmentSummary
object
:Summary information for required items of personal protective equipment (PPE) detected on persons by a call to DetectProtectiveEquipment. You specify the required type of PPE in the
SummarizationAttributes
(ProtectiveEquipmentSummarizationAttributes) input parameter. The summary includes which persons were detected wearing the required personal protective equipment (PersonsWithRequiredEquipment
), which persons were detected as not wearing the required PPE (PersonsWithoutRequiredEquipment
), and the persons in which a determination could not be made (PersonsIndeterminate
).To get a total for each category, use the size of the field array. For example, to find out how many people were detected as wearing the specified PPE, use the size of the
PersonsWithRequiredEquipment
array. If you want to find out more about a person, such as the location (BoundingBox) of the person on the image, use the person ID in each array element. Each person ID matches the ID field of a ProtectiveEquipmentPerson object returned in thePersons
array byDetectProtectiveEquipment
.
- ProtectiveEquipmentType
string
(values: FACE_COVER, HAND_COVER, HEAD_COVER)
- ProtectiveEquipmentTypes
array
- items ProtectiveEquipmentType
- QualityFilter
string
(values: NONE, AUTO, LOW, MEDIUM, HIGH)
- Reason
string
(values: EXCEEDS_MAX_FACES, EXTREME_POSE, LOW_BRIGHTNESS, LOW_SHARPNESS, LOW_CONFIDENCE, SMALL_BOUNDING_BOX, LOW_FACE_QUALITY)
- Reasons
array
- items Reason
- RecognizeCelebritiesRequest
object
- Image required
- Bytes
- S3Object
- Bucket
- Name
- Version
- Image required
- RecognizeCelebritiesResponse
object
- CelebrityFaces
- items Celebrity
- OrientationCorrection
- UnrecognizedFaces
- items ComparedFace
- CelebrityFaces
- RegionOfInterest
object
:Specifies a location within the frame that Rekognition checks for text. Uses a
BoundingBox
object to set a region of the screen.A word is included in the region if the word is more than half in that region. If there is more than one region, the word will be compared with all regions of the screen. Any word more than half in a region is kept in the results.
- BoundingBox
- Height
- Left
- Top
- Width
- BoundingBox
- RegionsOfInterest
array
- items RegionOfInterest
- RekognitionUniqueId
string
- RoleArn
string
- S3Bucket
string
- S3KeyPrefix
string
- S3Object
object
:Provides the S3 bucket name and object name.
The region for the S3 bucket containing the S3 object must match the region you use for Amazon Rekognition operations.
For Amazon Rekognition to process an S3 object, the user must have permission to access the S3 object. For more information, see Resource-Based Policies in the Amazon Rekognition Developer Guide.
- Bucket
- Name
- Version
- S3ObjectName
string
- S3ObjectVersion
string
- SNSTopicArn
string
- SearchFacesByImageRequest
object
- CollectionId required
- FaceMatchThreshold
- Image required
- Bytes
- S3Object
- Bucket
- Name
- Version
- MaxFaces
- QualityFilter
- SearchFacesByImageResponse
object
- FaceMatches
- items FaceMatch
- FaceModelVersion
- SearchedFaceBoundingBox
- Height
- Left
- Top
- Width
- SearchedFaceConfidence
- FaceMatches
- SearchFacesRequest
object
- CollectionId required
- FaceId required
- FaceMatchThreshold
- MaxFaces
- SearchFacesResponse
object
- FaceMatches
- items FaceMatch
- FaceModelVersion
- SearchedFaceId
- FaceMatches
- SegmentConfidence
number
- SegmentDetection
object
: A technical cue or shot detection segment detected in a video. An array ofSegmentDetection
objects containing all segments detected in a stored video is returned by GetSegmentDetection.- DurationMillis
- DurationSMPTE
- EndTimecodeSMPTE
- EndTimestampMillis
- ShotSegment
- Confidence
- Index
- StartTimecodeSMPTE
- StartTimestampMillis
- TechnicalCueSegment
- Confidence
- Type
- Type
- SegmentDetections
array
- items SegmentDetection
- SegmentType
string
(values: TECHNICAL_CUE, SHOT)
- SegmentTypeInfo
object
: Information about the type of a segment requested in a call to StartSegmentDetection. An array ofSegmentTypeInfo
objects is returned by the response from GetSegmentDetection.- ModelVersion
- Type
- SegmentTypes
array
- items SegmentType
- SegmentTypesInfo
array
- items SegmentTypeInfo
- ShotSegment
object
: Information about a shot detection segment detected in a video. For more information, see SegmentDetection.- Confidence
- Index
- Smile
object
: Indicates whether or not the face is smiling, and the confidence level in the determination.- Confidence
- Value
- StartCelebrityRecognitionRequest
object
- ClientRequestToken
- JobTag
- NotificationChannel
- RoleArn required
- SNSTopicArn required
- Video required
- S3Object
- Bucket
- Name
- Version
- S3Object
- StartCelebrityRecognitionResponse
object
- JobId
- StartContentModerationRequest
object
- ClientRequestToken
- JobTag
- MinConfidence
- NotificationChannel
- RoleArn required
- SNSTopicArn required
- Video required
- S3Object
- Bucket
- Name
- Version
- S3Object
- StartContentModerationResponse
object
- JobId
- StartFaceDetectionRequest
object
- ClientRequestToken
- FaceAttributes
- JobTag
- NotificationChannel
- RoleArn required
- SNSTopicArn required
- Video required
- S3Object
- Bucket
- Name
- Version
- S3Object
- StartFaceDetectionResponse
object
- JobId
- StartFaceSearchRequest
object
- ClientRequestToken
- CollectionId required
- FaceMatchThreshold
- JobTag
- NotificationChannel
- RoleArn required
- SNSTopicArn required
- Video required
- S3Object
- Bucket
- Name
- Version
- S3Object
- StartFaceSearchResponse
object
- JobId
- StartLabelDetectionRequest
object
- ClientRequestToken
- JobTag
- MinConfidence
- NotificationChannel
- RoleArn required
- SNSTopicArn required
- Video required
- S3Object
- Bucket
- Name
- Version
- S3Object
- StartLabelDetectionResponse
object
- JobId
- StartPersonTrackingRequest
object
- ClientRequestToken
- JobTag
- NotificationChannel
- RoleArn required
- SNSTopicArn required
- Video required
- S3Object
- Bucket
- Name
- Version
- S3Object
- StartPersonTrackingResponse
object
- JobId
- StartProjectVersionRequest
object
- MinInferenceUnits required
- ProjectVersionArn required
- StartProjectVersionResponse
object
- Status
- StartSegmentDetectionFilters
object
: Filters applied to the technical cue or shot detection segments. For more information, see StartSegmentDetection.- ShotFilter
- MinSegmentConfidence
- TechnicalCueFilter
- MinSegmentConfidence
- ShotFilter
- StartSegmentDetectionRequest
object
- ClientRequestToken
- Filters
- ShotFilter
- MinSegmentConfidence
- TechnicalCueFilter
- MinSegmentConfidence
- ShotFilter
- JobTag
- NotificationChannel
- RoleArn required
- SNSTopicArn required
- SegmentTypes required
- items SegmentType
- Video required Video
- StartSegmentDetectionResponse
object
- JobId
- StartShotDetectionFilter
object
: Filters for the shot detection segments returned byGetSegmentDetection
. For more information, see StartSegmentDetectionFilters.- MinSegmentConfidence
- StartStreamProcessorRequest
object
- Name required
- StartStreamProcessorResponse
object
- StartTechnicalCueDetectionFilter
object
: Filters for the technical segments returned by GetSegmentDetection. For more information, see StartSegmentDetectionFilters.- MinSegmentConfidence
- StartTextDetectionFilters
object
: Set of optional parameters that let you set the criteria text must meet to be included in your response.WordFilter
looks at a word's height, width and minimum confidence.RegionOfInterest
lets you set a specific region of the screen to look for text in.- RegionsOfInterest
- items RegionOfInterest
- WordFilter
- MinBoundingBoxHeight
- MinBoundingBoxWidth
- MinConfidence
- RegionsOfInterest
- StartTextDetectionRequest
object
- ClientRequestToken
- Filters
- RegionsOfInterest
- items RegionOfInterest
- WordFilter
- MinBoundingBoxHeight
- MinBoundingBoxWidth
- MinConfidence
- RegionsOfInterest
- JobTag
- NotificationChannel NotificationChannel
- Video required Video
- StartTextDetectionResponse
object
- JobId
- StatusMessage
string
- StopProjectVersionRequest
object
- ProjectVersionArn required
- StopProjectVersionResponse
object
- Status
- StopStreamProcessorRequest
object
- Name required
- StopStreamProcessorResponse
object
- StreamProcessor
object
: An object that recognizes faces in a streaming video. An Amazon Rekognition stream processor is created by a call to CreateStreamProcessor. The request parameters forCreateStreamProcessor
describe the Kinesis video stream source for the streaming video, face recognition parameters, and where to stream the analysis resullts.- Name
- Status
- StreamProcessorArn
string
- StreamProcessorInput
object
: Information about the source streaming video.- KinesisVideoStream
- Arn
- KinesisVideoStream
- StreamProcessorList
array
- items StreamProcessor
- StreamProcessorName
string
- StreamProcessorOutput
object
: Information about the Amazon Kinesis Data Streams stream to which a Amazon Rekognition Video stream processor streams the results of a video analysis. For more information, see CreateStreamProcessor in the Amazon Rekognition Developer Guide.- KinesisDataStream
- Arn
- KinesisDataStream
- StreamProcessorSettings
object
: Input parameters used to recognize faces in a streaming video analyzed by a Amazon Rekognition stream processor.- FaceSearch
- CollectionId
- FaceMatchThreshold
- FaceSearch
- StreamProcessorStatus
string
(values: STOPPED, STARTING, RUNNING, FAILED, STOPPING)
- String
string
- Summary
object
:The S3 bucket that contains the training summary. The training summary includes aggregated evaluation metrics for the entire testing dataset and metrics for each individual label.
You get the training summary S3 bucket location by calling DescribeProjectVersions.
- S3Object S3Object
- Sunglasses
object
: Indicates whether or not the face is wearing sunglasses, and the confidence level in the determination.- Confidence
- Value
- TechnicalCueSegment
object
: Information about a technical cue segment. For more information, see SegmentDetection.- Confidence
- Type
- TechnicalCueType
string
(values: ColorBars, EndCredits, BlackFrames)
- TestingData
object
: The dataset used for testing. Optionally, ifAutoCreate
is set, Amazon Rekognition Custom Labels creates a testing dataset using an 80/20 split of the training dataset.- Assets
- items Asset
- AutoCreate
- Assets
- TestingDataResult
object
: Sagemaker Groundtruth format manifest files for the input, output and validation datasets that are used and created during testing.
- TextDetection
object
:Information about a word or line of text detected by DetectText.
The
DetectedText
field contains the text that Amazon Rekognition detected in the image.Every word and line has an identifier (
Id
). Each word belongs to a line and has a parent identifier (ParentId
) that identifies the line of text in which the word appears. The wordId
is also an index for the word within a line of words.For more information, see Detecting Text in the Amazon Rekognition Developer Guide.
- Confidence
- DetectedText
- Geometry
- BoundingBox
- Height
- Left
- Top
- Width
- Polygon
- items Point
- BoundingBox
- Id
- ParentId
- Type
- TextDetectionList
array
- items TextDetection
- TextDetectionResult
object
: Information about text detected in a video. Incudes the detected text, the time in milliseconds from the start of the video that the text was detected, and where it was detected on the screen.- TextDetection
- Confidence
- DetectedText
- Geometry
- BoundingBox
- Height
- Left
- Top
- Width
- Polygon
- items Point
- BoundingBox
- Id
- ParentId
- Type
- Timestamp
- TextDetection
- TextDetectionResults
array
- items TextDetectionResult
- TextTypes
string
(values: LINE, WORD)
- Timecode
string
- Timestamp
integer
- TrainingData
object
: The dataset used for training.- Assets
- items Asset
- Assets
- TrainingDataResult
object
: Sagemaker Groundtruth format manifest files for the input, output and validation datasets that are used and created during testing.
- UInteger
integer
- ULong
integer
- UnindexedFace
object
: A face that IndexFaces detected, but didn't index. Use theReasons
response attribute to determine why a face wasn't indexed.- FaceDetail
- AgeRange
- High
- Low
- Beard
- Confidence
- Value
- BoundingBox
- Height
- Left
- Top
- Width
- Confidence
- Emotions
- items Emotion
- Eyeglasses
- Confidence
- Value
- EyesOpen
- Confidence
- Value
- Gender
- Confidence
- Value
- Landmarks
- items Landmark
- MouthOpen
- Confidence
- Value
- Mustache
- Confidence
- Value
- Pose
- Pitch
- Roll
- Yaw
- Quality
- Brightness
- Sharpness
- Smile
- Confidence
- Value
- Sunglasses
- Confidence
- Value
- AgeRange
- Reasons
- items Reason
- FaceDetail
- UnindexedFaces
array
- items UnindexedFace
- Url
string
- Urls
array
- items Url
- ValidationData
object
:Contains the Amazon S3 bucket location of the validation data for a model training job.
The validation data includes error information for individual JSON lines in the dataset. For more information, see Debugging a Failed Model Training in the Amazon Rekognition Custom Labels Developer Guide.
You get the
ValidationData
object for the training dataset (TrainingDataResult) and the test dataset (TestingDataResult) by calling DescribeProjectVersions.The assets array contains a single Asset object. The GroundTruthManifest field of the Asset object contains the S3 bucket location of the validation data.
- Assets
- items Asset
- Assets
- VersionName
string
- VersionNames
array
- items VersionName
- Video
object
: Video file stored in an Amazon S3 bucket. Amazon Rekognition video start operations such as StartLabelDetection useVideo
to specify a video for analysis. The supported file formats are .mp4, .mov and .avi.- S3Object
- Bucket
- Name
- Version
- S3Object
- VideoJobStatus
string
(values: IN_PROGRESS, SUCCEEDED, FAILED)
- VideoMetadata
object
: Information about a video that Amazon Rekognition analyzed.Videometadata
is returned in every page of paginated responses from a Amazon Rekognition video operation.- Codec
- DurationMillis
- Format
- FrameHeight
- FrameRate
- FrameWidth
- VideoMetadataList
array
- items VideoMetadata