package types
import "github.com/aws/aws-sdk-go-v2/service/rekognition/types"
Index ¶
- type AccessDeniedException
- func (e *AccessDeniedException) Error() string
- func (e *AccessDeniedException) ErrorCode() string
- func (e *AccessDeniedException) ErrorFault() smithy.ErrorFault
- func (e *AccessDeniedException) ErrorMessage() string
- type AgeRange
- type Asset
- type Attribute
- type AudioMetadata
- type Beard
- type BoundingBox
- type Celebrity
- type CelebrityDetail
- type CelebrityRecognition
- type CelebrityRecognitionSortBy
- type CompareFacesMatch
- type ComparedFace
- type ComparedSourceImageFace
- type ContentClassifier
- type ContentModerationDetection
- type ContentModerationSortBy
- type CustomLabel
- type DetectTextFilters
- type DetectionFilter
- type Emotion
- type EmotionName
- type EvaluationResult
- type EyeOpen
- type Eyeglasses
- type Face
- type FaceAttributes
- type FaceDetail
- type FaceDetection
- type FaceMatch
- type FaceRecord
- type FaceSearchSettings
- type FaceSearchSortBy
- type Gender
- type GenderType
- type Geometry
- type GroundTruthManifest
- type HumanLoopActivationOutput
- type HumanLoopConfig
- type HumanLoopDataAttributes
- type HumanLoopQuotaExceededException
- func (e *HumanLoopQuotaExceededException) Error() string
- func (e *HumanLoopQuotaExceededException) ErrorCode() string
- func (e *HumanLoopQuotaExceededException) ErrorFault() smithy.ErrorFault
- func (e *HumanLoopQuotaExceededException) ErrorMessage() string
- type IdempotentParameterMismatchException
- func (e *IdempotentParameterMismatchException) Error() string
- func (e *IdempotentParameterMismatchException) ErrorCode() string
- func (e *IdempotentParameterMismatchException) ErrorFault() smithy.ErrorFault
- func (e *IdempotentParameterMismatchException) ErrorMessage() string
- type Image
- type ImageQuality
- type ImageTooLargeException
- func (e *ImageTooLargeException) Error() string
- func (e *ImageTooLargeException) ErrorCode() string
- func (e *ImageTooLargeException) ErrorFault() smithy.ErrorFault
- func (e *ImageTooLargeException) ErrorMessage() string
- type Instance
- type InternalServerError
- func (e *InternalServerError) Error() string
- func (e *InternalServerError) ErrorCode() string
- func (e *InternalServerError) ErrorFault() smithy.ErrorFault
- func (e *InternalServerError) ErrorMessage() string
- type InvalidImageFormatException
- func (e *InvalidImageFormatException) Error() string
- func (e *InvalidImageFormatException) ErrorCode() string
- func (e *InvalidImageFormatException) ErrorFault() smithy.ErrorFault
- func (e *InvalidImageFormatException) ErrorMessage() string
- type InvalidPaginationTokenException
- func (e *InvalidPaginationTokenException) Error() string
- func (e *InvalidPaginationTokenException) ErrorCode() string
- func (e *InvalidPaginationTokenException) ErrorFault() smithy.ErrorFault
- func (e *InvalidPaginationTokenException) ErrorMessage() string
- type InvalidParameterException
- func (e *InvalidParameterException) Error() string
- func (e *InvalidParameterException) ErrorCode() string
- func (e *InvalidParameterException) ErrorFault() smithy.ErrorFault
- func (e *InvalidParameterException) ErrorMessage() string
- type InvalidS3ObjectException
- func (e *InvalidS3ObjectException) Error() string
- func (e *InvalidS3ObjectException) ErrorCode() string
- func (e *InvalidS3ObjectException) ErrorFault() smithy.ErrorFault
- func (e *InvalidS3ObjectException) ErrorMessage() string
- type KinesisDataStream
- type KinesisVideoStream
- type Label
- type LabelDetection
- type LabelDetectionSortBy
- type Landmark
- type LandmarkType
- type LimitExceededException
- func (e *LimitExceededException) Error() string
- func (e *LimitExceededException) ErrorCode() string
- func (e *LimitExceededException) ErrorFault() smithy.ErrorFault
- func (e *LimitExceededException) ErrorMessage() string
- type ModerationLabel
- type MouthOpen
- type Mustache
- type NotificationChannel
- type OrientationCorrection
- type OutputConfig
- type Parent
- type PersonDetail
- type PersonDetection
- type PersonMatch
- type PersonTrackingSortBy
- type Point
- type Pose
- type ProjectDescription
- type ProjectStatus
- type ProjectVersionDescription
- type ProjectVersionStatus
- type ProvisionedThroughputExceededException
- func (e *ProvisionedThroughputExceededException) Error() string
- func (e *ProvisionedThroughputExceededException) ErrorCode() string
- func (e *ProvisionedThroughputExceededException) ErrorFault() smithy.ErrorFault
- func (e *ProvisionedThroughputExceededException) ErrorMessage() string
- type QualityFilter
- type Reason
- type RegionOfInterest
- type ResourceAlreadyExistsException
- func (e *ResourceAlreadyExistsException) Error() string
- func (e *ResourceAlreadyExistsException) ErrorCode() string
- func (e *ResourceAlreadyExistsException) ErrorFault() smithy.ErrorFault
- func (e *ResourceAlreadyExistsException) ErrorMessage() string
- type ResourceInUseException
- func (e *ResourceInUseException) Error() string
- func (e *ResourceInUseException) ErrorCode() string
- func (e *ResourceInUseException) ErrorFault() smithy.ErrorFault
- func (e *ResourceInUseException) ErrorMessage() string
- type ResourceNotFoundException
- func (e *ResourceNotFoundException) Error() string
- func (e *ResourceNotFoundException) ErrorCode() string
- func (e *ResourceNotFoundException) ErrorFault() smithy.ErrorFault
- func (e *ResourceNotFoundException) ErrorMessage() string
- type ResourceNotReadyException
- func (e *ResourceNotReadyException) Error() string
- func (e *ResourceNotReadyException) ErrorCode() string
- func (e *ResourceNotReadyException) ErrorFault() smithy.ErrorFault
- func (e *ResourceNotReadyException) ErrorMessage() string
- type S3Object
- type SegmentDetection
- type SegmentType
- type SegmentTypeInfo
- type ShotSegment
- type Smile
- type StartSegmentDetectionFilters
- type StartShotDetectionFilter
- type StartTechnicalCueDetectionFilter
- type StartTextDetectionFilters
- type StreamProcessor
- type StreamProcessorInput
- type StreamProcessorOutput
- type StreamProcessorSettings
- type StreamProcessorStatus
- type Summary
- type Sunglasses
- type TechnicalCueSegment
- type TechnicalCueType
- type TestingData
- type TestingDataResult
- type TextDetection
- type TextDetectionResult
- type TextTypes
- type ThrottlingException
- func (e *ThrottlingException) Error() string
- func (e *ThrottlingException) ErrorCode() string
- func (e *ThrottlingException) ErrorFault() smithy.ErrorFault
- func (e *ThrottlingException) ErrorMessage() string
- type TrainingData
- type TrainingDataResult
- type UnindexedFace
- type Video
- type VideoJobStatus
- type VideoMetadata
- type VideoTooLargeException
Types ¶
type AccessDeniedException ¶
You are not authorized to perform the action.
func (*AccessDeniedException) Error ¶
func (e *AccessDeniedException) Error() string
func (*AccessDeniedException) ErrorCode ¶
func (e *AccessDeniedException) ErrorCode() string
func (*AccessDeniedException) ErrorFault ¶
func (e *AccessDeniedException) ErrorFault() smithy.ErrorFault
func (*AccessDeniedException) ErrorMessage ¶
func (e *AccessDeniedException) ErrorMessage() string
type AgeRange ¶
type AgeRange struct { // The highest estimated age. High *int32 // The lowest estimated age. Low *int32 }
Structure containing the estimated age range, in years, for a face. Amazon Rekognition estimates an age range for faces detected in the input image. Estimated age ranges can overlap. A face of a 5-year-old might have an estimated range of 4-6, while the face of a 6-year-old might have an estimated range of 4-8.
type Asset ¶
type Asset struct { // The S3 bucket that contains the Ground Truth manifest file. GroundTruthManifest *GroundTruthManifest }
Assets are the images that you use to train and evaluate a model version. Assets are referenced by Sagemaker GroundTruth manifest files.
type Attribute ¶
type Attribute string
Enum values for Attribute
type AudioMetadata ¶
type AudioMetadata struct { // The number of audio channels in the segement. NumberOfChannels *int64 // The duration of the audio stream in milliseconds. DurationMillis *int64 // The audio codec used to encode or decode the audio stream. Codec *string // The sample rate for the audio stream. SampleRate *int64 }
Metadata information about an audio stream. An array of AudioMetadata objects for the audio streams found in a stored video is returned by GetSegmentDetection ().
type Beard ¶
type Beard struct { // Boolean value that indicates whether the face has beard or not. Value *bool // Level of confidence in the determination. Confidence *float32 }
Indicates whether or not the face has a beard, and the confidence level in the determination.
type BoundingBox ¶
type BoundingBox struct { // Width of the bounding box as a ratio of the overall image width. Width *float32 // Height of the bounding box as a ratio of the overall image height. Height *float32 // Left coordinate of the bounding box as a ratio of overall image width. Left *float32 // Top coordinate of the bounding box as a ratio of overall image height. Top *float32 }
Identifies the bounding box around the label, face, or text. The left (x-coordinate) and top (y-coordinate) are coordinates representing the top and left sides of the bounding box. Note that the upper-left corner of the image is the origin (0,0). The top and left values returned are ratios of the overall image size. For example, if the input image is 700x200 pixels, and the top-left coordinate of the bounding box is 350x50 pixels, the API returns a left value of 0.5 (350/700) and a top value of 0.25 (50/200). The width and height values represent the dimensions of the bounding box as a ratio of the overall image dimension. For example, if the input image is 700x200 pixels, and the bounding box width is 70 pixels, the width returned is 0.1. The bounding box coordinates can have negative values. For example, if Amazon Rekognition is able to detect a face that is at the image edge and is only partially visible, the service can return coordinates that are outside the image bounds and, depending on the image edge, you might get negative values or values greater than 1 for the left or top values.
type Celebrity ¶
type Celebrity struct { // A unique identifier for the celebrity. Id *string // The confidence, in percentage, that Amazon Rekognition has that the recognized // face is the celebrity. MatchConfidence *float32 // The name of the celebrity. Name *string // An array of URLs pointing to additional information about the celebrity. If // there is no additional information about the celebrity, this list is empty. Urls []*string // Provides information about the celebrity's face, such as its location on the // image. Face *ComparedFace }
Provides information about a celebrity recognized by the RecognizeCelebrities () operation.
type CelebrityDetail ¶
type CelebrityDetail struct { // Bounding box around the body of a celebrity. BoundingBox *BoundingBox // The name of the celebrity. Name *string // The confidence, in percentage, that Amazon Rekognition has that the recognized // face is the celebrity. Confidence *float32 // An array of URLs pointing to additional celebrity information. Urls []*string // Face details for the recognized celebrity. Face *FaceDetail // The unique identifier for the celebrity. Id *string }
Information about a recognized celebrity.
type CelebrityRecognition ¶
type CelebrityRecognition struct { // The time, in milliseconds from the start of the video, that the celebrity was // recognized. Timestamp *int64 // Information about a recognized celebrity. Celebrity *CelebrityDetail }
Information about a detected celebrity and the time the celebrity was detected in a stored video. For more information, see GetCelebrityRecognition in the Amazon Rekognition Developer Guide.
type CelebrityRecognitionSortBy ¶
type CelebrityRecognitionSortBy string
const ( CelebrityRecognitionSortById CelebrityRecognitionSortBy = "ID" CelebrityRecognitionSortByTimestamp CelebrityRecognitionSortBy = "TIMESTAMP" )
Enum values for CelebrityRecognitionSortBy
type CompareFacesMatch ¶
type CompareFacesMatch struct { // Level of confidence that the faces match. Similarity *float32 // Provides face metadata (bounding box and confidence that the bounding box // actually contains a face). Face *ComparedFace }
Provides information about a face in a target image that matches the source image face analyzed by CompareFaces. The Face property contains the bounding box of the face in the target image. The Similarity property is the confidence that the source image face matches the face in the bounding box.
type ComparedFace ¶
type ComparedFace struct { // An array of facial landmarks. Landmarks []*Landmark // Bounding box of the face. BoundingBox *BoundingBox // Identifies face image brightness and sharpness. Quality *ImageQuality // Level of confidence that what the bounding box contains is a face. Confidence *float32 // Indicates the pose of the face as determined by its pitch, roll, and yaw. Pose *Pose }
Provides face metadata for target image faces that are analyzed by CompareFaces and RecognizeCelebrities.
type ComparedSourceImageFace ¶
type ComparedSourceImageFace struct { // Confidence level that the selected bounding box contains a face. Confidence *float32 // Bounding box of the face. BoundingBox *BoundingBox }
Type that describes the face Amazon Rekognition chose to compare with the faces in the target. This contains a bounding box for the selected face and confidence level that the bounding box contains a face. Note that Amazon Rekognition selects the largest face in the source image for this comparison.
type ContentClassifier ¶
type ContentClassifier string
const ( ContentClassifierFree_of_personally_identifiable_information ContentClassifier = "FreeOfPersonallyIdentifiableInformation" ContentClassifierFree_of_adult_content ContentClassifier = "FreeOfAdultContent" )
Enum values for ContentClassifier
type ContentModerationDetection ¶
type ContentModerationDetection struct { // The unsafe content label detected by in the stored video. ModerationLabel *ModerationLabel // Time, in milliseconds from the beginning of the video, that the unsafe content // label was detected. Timestamp *int64 }
Information about an unsafe content label detection in a stored video.
type ContentModerationSortBy ¶
type ContentModerationSortBy string
const ( ContentModerationSortByName ContentModerationSortBy = "NAME" ContentModerationSortByTimestamp ContentModerationSortBy = "TIMESTAMP" )
Enum values for ContentModerationSortBy
type CustomLabel ¶
type CustomLabel struct { // The name of the custom label. Name *string // The location of the detected object on the image that corresponds to the custom // label. Includes an axis aligned coarse bounding box surrounding the object and a // finer grain polygon for more accurate spatial information. Geometry *Geometry // The confidence that the model has in the detection of the custom label. The // range is 0-100. A higher value indicates a higher confidence. Confidence *float32 }
A custom label detected in an image by a call to DetectCustomLabels ().
type DetectTextFilters ¶
type DetectTextFilters struct { // A set of parameters that allow you to filter out certain results from your // returned results. WordFilter *DetectionFilter // A Filter focusing on a certain area of the image. Uses a BoundingBox object to // set the region of the image. RegionsOfInterest []*RegionOfInterest }
A set of optional parameters that you can use to set the criteria that the text must meet to be included in your response. WordFilter looks at a word’s height, width, and minimum confidence. RegionOfInterest lets you set a specific region of the image to look for text in.
type DetectionFilter ¶
type DetectionFilter struct { // Sets the minimum height of the word bounding box. Words with bounding box // heights lesser than this value will be excluded from the result. Value is // relative to the video frame height. MinBoundingBoxHeight *float32 // Sets the minimum width of the word bounding box. Words with bounding boxes // widths lesser than this value will be excluded from the result. Value is // relative to the video frame width. MinBoundingBoxWidth *float32 // Sets confidence of word detection. Words with detection confidence below this // will be excluded from the result. Values should be between 0.5 and 1 as Text in // Video will not return any result below 0.5. MinConfidence *float32 }
A set of parameters that allow you to filter out certain results from your returned results.
type Emotion ¶
type Emotion struct { // Type of emotion detected. Type EmotionName // Level of confidence in the determination. Confidence *float32 }
The emotions that appear to be expressed on the face, and the confidence level in the determination. The API is only making a determination of the physical appearance of a person's face. It is not a determination of the person’s internal emotional state and should not be used in such a way. For example, a person pretending to have a sad face might not be sad emotionally.
type EmotionName ¶
type EmotionName string
const ( EmotionNameHappy EmotionName = "HAPPY" EmotionNameSad EmotionName = "SAD" EmotionNameAngry EmotionName = "ANGRY" EmotionNameConfused EmotionName = "CONFUSED" EmotionNameDisgusted EmotionName = "DISGUSTED" EmotionNameSurprised EmotionName = "SURPRISED" EmotionNameCalm EmotionName = "CALM" EmotionNameUnknown EmotionName = "UNKNOWN" EmotionNameFear EmotionName = "FEAR" )
Enum values for EmotionName
type EvaluationResult ¶
type EvaluationResult struct { // The F1 score for the evaluation of all labels. The F1 score metric evaluates the // overall precision and recall performance of the model as a single value. A // higher value indicates better precision and recall performance. A lower score // indicates that precision, recall, or both are performing poorly. F1Score *float32 // The S3 bucket that contains the training summary. Summary *Summary }
The evaluation results for the training of a model.
type EyeOpen ¶
type EyeOpen struct { // Level of confidence in the determination. Confidence *float32 // Boolean value that indicates whether the eyes on the face are open. Value *bool }
Indicates whether or not the eyes on the face are open, and the confidence level in the determination.
type Eyeglasses ¶
type Eyeglasses struct { // Level of confidence in the determination. Confidence *float32 // Boolean value that indicates whether the face is wearing eye glasses or not. Value *bool }
Indicates whether or not the face is wearing eye glasses, and the confidence level in the determination.
type Face ¶
type Face struct { // Unique identifier that Amazon Rekognition assigns to the face. FaceId *string // Confidence level that the bounding box contains a face (and not a different // object such as a tree). Confidence *float32 // Unique identifier that Amazon Rekognition assigns to the input image. ImageId *string // Bounding box of the face. BoundingBox *BoundingBox // Identifier that you assign to all the faces in the input image. ExternalImageId *string }
Describes the face properties such as the bounding box, face ID, image ID of the input image, and external image ID that you assigned.
type FaceAttributes ¶
type FaceAttributes string
const ( FaceAttributesDefault FaceAttributes = "DEFAULT" FaceAttributesAll FaceAttributes = "ALL" )
Enum values for FaceAttributes
type FaceDetail ¶
type FaceDetail struct { // Indicates whether or not the mouth on the face is open, and the confidence level // in the determination. MouthOpen *MouthOpen // Indicates whether or not the face is wearing sunglasses, and the confidence // level in the determination. Sunglasses *Sunglasses // Indicates whether or not the eyes on the face are open, and the confidence level // in the determination. EyesOpen *EyeOpen // The predicted gender of a detected face. Gender *Gender // Indicates whether or not the face has a mustache, and the confidence level in // the determination. Mustache *Mustache // Identifies image brightness and sharpness. Default attribute. Quality *ImageQuality // Indicates whether or not the face is wearing eye glasses, and the confidence // level in the determination. Eyeglasses *Eyeglasses // Confidence level that the bounding box contains a face (and not a different // object such as a tree). Default attribute. Confidence *float32 // Indicates whether or not the face is smiling, and the confidence level in the // determination. Smile *Smile // Indicates whether or not the face has a beard, and the confidence level in the // determination. Beard *Beard // The emotions that appear to be expressed on the face, and the confidence level // in the determination. The API is only making a determination of the physical // appearance of a person's face. It is not a determination of the person’s // internal emotional state and should not be used in such a way. For example, a // person pretending to have a sad face might not be sad emotionally. Emotions []*Emotion // Bounding box of the face. Default attribute. BoundingBox *BoundingBox // Indicates the location of landmarks on the face. Default attribute. Landmarks []*Landmark // The estimated age range, in years, for the face. Low represents the lowest // estimated age and High represents the highest estimated age. AgeRange *AgeRange // Indicates the pose of the face as determined by its pitch, roll, and yaw. // Default attribute. Pose *Pose }
Structure containing attributes of the face that the algorithm detected. A FaceDetail object contains either the default facial attributes or all facial attributes. The default attributes are BoundingBox, Confidence, Landmarks, Pose, and Quality. GetFaceDetection () is the only Amazon Rekognition Video stored video operation that can return a FaceDetail object with all attributes. To specify which attributes to return, use the FaceAttributes input parameter for StartFaceDetection (). The following Amazon Rekognition Video operations return only the default attributes. The corresponding Start operations don't have a FaceAttributes input parameter.
GetCelebrityRecognition
*
GetPersonTracking
- GetFaceSearch
The Amazon Rekognition Image DetectFaces () and IndexFaces () operations can return all facial attributes. To specify which attributes to return, use the Attributes input parameter for DetectFaces. For IndexFaces, use the DetectAttributes input parameter.
type FaceDetection ¶
type FaceDetection struct { // Time, in milliseconds from the start of the video, that the face was detected. Timestamp *int64 // The face properties for the detected face. Face *FaceDetail }
Information about a face detected in a video analysis request and the time the face was detected in the video.
type FaceMatch ¶
type FaceMatch struct { // Confidence in the match of this face with the input face. Similarity *float32 // Describes the face properties such as the bounding box, face ID, image ID of the // source image, and external image ID that you assigned. Face *Face }
Provides face metadata. In addition, it also provides the confidence in the match of this face with the input face.
type FaceRecord ¶
type FaceRecord struct { // Describes the face properties such as the bounding box, face ID, image ID of the // input image, and external image ID that you assigned. Face *Face // Structure containing attributes of the face that the algorithm detected. FaceDetail *FaceDetail }
Object containing both the face metadata (stored in the backend database), and facial attributes that are detected but aren't stored in the database.
type FaceSearchSettings ¶
type FaceSearchSettings struct { // The ID of a collection that contains faces that you want to search for. CollectionId *string // Minimum face match confidence score that must be met to return a result for a // recognized face. Default is 80. 0 is the lowest confidence. 100 is the highest // confidence. FaceMatchThreshold *float32 }
Input face recognition parameters for an Amazon Rekognition stream processor. FaceRecognitionSettings is a request parameter for CreateStreamProcessor ().
type FaceSearchSortBy ¶
type FaceSearchSortBy string
const ( FaceSearchSortByIndex FaceSearchSortBy = "INDEX" FaceSearchSortByTimestamp FaceSearchSortBy = "TIMESTAMP" )
Enum values for FaceSearchSortBy
type Gender ¶
type Gender struct { // Level of confidence in the prediction. Confidence *float32 // The predicted gender of the face. Value GenderType }
The predicted gender of a detected face. <p>Amazon Rekognition makes gender binary (male/female) predictions based on the physical appearance of a face in a particular image. This kind of prediction is not designed to categorize a person’s gender identity, and you shouldn't use Amazon Rekognition to make such a determination. For example, a male actor wearing a long-haired wig and earrings for a role might be predicted as female.</p> <p>Using Amazon Rekognition to make gender binary predictions is best suited for use cases where aggregate gender distribution statistics need to be analyzed without identifying specific users. For example, the percentage of female users compared to male users on a social media platform. </p> <p>We don't recommend using gender binary predictions to make decisions that impact an individual's rights, privacy, or access to services.</p>
type GenderType ¶
type GenderType string
const ( GenderTypeMale GenderType = "Male" GenderTypeFemale GenderType = "Female" )
Enum values for GenderType
type Geometry ¶
type Geometry struct { // Within the bounding box, a fine-grained polygon around the detected item. Polygon []*Point // An axis-aligned coarse representation of the detected item's location on the // image. BoundingBox *BoundingBox }
Information about where an object (DetectCustomLabels ()) or text (DetectText ()) is located on an image.
type GroundTruthManifest ¶
type GroundTruthManifest struct { // Provides the S3 bucket name and object name. The region for the S3 bucket // containing the S3 object must match the region you use for Amazon Rekognition // operations. <p>For Amazon Rekognition to process an S3 object, the user must // have permission to access the S3 object. For more information, see // Resource-Based Policies in the Amazon Rekognition Developer Guide. </p> S3Object *S3Object }
The S3 bucket that contains the Ground Truth manifest file.
type HumanLoopActivationOutput ¶
type HumanLoopActivationOutput struct { // Shows the result of condition evaluations, including those conditions which // activated a human review. // This value conforms to the media type: application/json HumanLoopActivationConditionsEvaluationResults *string // The Amazon Resource Name (ARN) of the HumanLoop created. HumanLoopArn *string // Shows if and why human review was needed. HumanLoopActivationReasons []*string }
Shows the results of the human in the loop evaluation. If there is no HumanLoopArn, the input did not trigger human review.
type HumanLoopConfig ¶
type HumanLoopConfig struct { // Sets attributes of the input data. DataAttributes *HumanLoopDataAttributes // The name of the human review used for this image. This should be kept unique // within a region. // // This member is required. HumanLoopName *string // The Amazon Resource Name (ARN) of the flow definition. You can create a flow // definition by using the Amazon Sagemaker CreateFlowDefinition // (https://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateFlowDefinition.html) // Operation. // // This member is required. FlowDefinitionArn *string }
Sets up the flow definition the image will be sent to if one of the conditions is met. You can also set certain attributes of the image before review.
type HumanLoopDataAttributes ¶
type HumanLoopDataAttributes struct { // Sets whether the input image is free of personally identifiable information. ContentClassifiers []ContentClassifier }
Allows you to set attributes of the image. Currently, you can declare an image as free of personally identifiable information.
type HumanLoopQuotaExceededException ¶
type HumanLoopQuotaExceededException struct { Message *string ResourceType *string QuotaCode *string Logref *string Code *string ServiceCode *string }
The number of in-progress human reviews you have has exceeded the number allowed.
func (*HumanLoopQuotaExceededException) Error ¶
func (e *HumanLoopQuotaExceededException) Error() string
func (*HumanLoopQuotaExceededException) ErrorCode ¶
func (e *HumanLoopQuotaExceededException) ErrorCode() string
func (*HumanLoopQuotaExceededException) ErrorFault ¶
func (e *HumanLoopQuotaExceededException) ErrorFault() smithy.ErrorFault
func (*HumanLoopQuotaExceededException) ErrorMessage ¶
func (e *HumanLoopQuotaExceededException) ErrorMessage() string
type IdempotentParameterMismatchException ¶
A ClientRequestToken input parameter was reused with an operation, but at least one of the other input parameters is different from the previous call to the operation.
func (*IdempotentParameterMismatchException) Error ¶
func (e *IdempotentParameterMismatchException) Error() string
func (*IdempotentParameterMismatchException) ErrorCode ¶
func (e *IdempotentParameterMismatchException) ErrorCode() string
func (*IdempotentParameterMismatchException) ErrorFault ¶
func (e *IdempotentParameterMismatchException) ErrorFault() smithy.ErrorFault
func (*IdempotentParameterMismatchException) ErrorMessage ¶
func (e *IdempotentParameterMismatchException) ErrorMessage() string
type Image ¶
type Image struct { // Blob of image bytes up to 5 MBs. Bytes []byte // Identifies an S3 object as the image source. S3Object *S3Object }
Provides the input image either as bytes or an S3 object. You pass image bytes to an Amazon Rekognition API operation by using the Bytes property. For example, you would use the Bytes property to pass an image loaded from a local file system. Image bytes passed by using the Bytes property must be base64-encoded. Your code may not need to encode image bytes if you are using an AWS SDK to call Amazon Rekognition API operations. <p>For more information, see Analyzing an Image Loaded from a Local File System in the Amazon Rekognition Developer Guide.</p> <p> You pass images stored in an S3 bucket to an Amazon Rekognition API operation by using the <code>S3Object</code> property. Images stored in an S3 bucket do not need to be base64-encoded.</p> <p>The region for the S3 bucket containing the S3 object must match the region you use for Amazon Rekognition operations.</p> <p>If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes using the Bytes property is not supported. You must first upload the image to an Amazon S3 bucket and then call the operation using the S3Object property.</p> <p>For Amazon Rekognition to process an S3 object, the user must have permission to access the S3 object. For more information, see Resource Based Policies in the Amazon Rekognition Developer Guide. </p>
type ImageQuality ¶
type ImageQuality struct { // Value representing sharpness of the face. The service returns a value between 0 // and 100 (inclusive). A higher value indicates a sharper face image. Sharpness *float32 // Value representing brightness of the face. The service returns a value between 0 // and 100 (inclusive). A higher value indicates a brighter face image. Brightness *float32 }
Identifies face image brightness and sharpness.
type ImageTooLargeException ¶
The input image size exceeds the allowed limit. For more information, see Limits in Amazon Rekognition in the Amazon Rekognition Developer Guide.
func (*ImageTooLargeException) Error ¶
func (e *ImageTooLargeException) Error() string
func (*ImageTooLargeException) ErrorCode ¶
func (e *ImageTooLargeException) ErrorCode() string
func (*ImageTooLargeException) ErrorFault ¶
func (e *ImageTooLargeException) ErrorFault() smithy.ErrorFault
func (*ImageTooLargeException) ErrorMessage ¶
func (e *ImageTooLargeException) ErrorMessage() string
type Instance ¶
type Instance struct { // The position of the label instance on the image. BoundingBox *BoundingBox // The confidence that Amazon Rekognition has in the accuracy of the bounding box. Confidence *float32 }
An instance of a label returned by Amazon Rekognition Image (DetectLabels ()) or by Amazon Rekognition Video (GetLabelDetection ()).
type InternalServerError ¶
Amazon Rekognition experienced a service issue. Try your call again.
func (*InternalServerError) Error ¶
func (e *InternalServerError) Error() string
func (*InternalServerError) ErrorCode ¶
func (e *InternalServerError) ErrorCode() string
func (*InternalServerError) ErrorFault ¶
func (e *InternalServerError) ErrorFault() smithy.ErrorFault
func (*InternalServerError) ErrorMessage ¶
func (e *InternalServerError) ErrorMessage() string
type InvalidImageFormatException ¶
The provided image format is not supported.
func (*InvalidImageFormatException) Error ¶
func (e *InvalidImageFormatException) Error() string
func (*InvalidImageFormatException) ErrorCode ¶
func (e *InvalidImageFormatException) ErrorCode() string
func (*InvalidImageFormatException) ErrorFault ¶
func (e *InvalidImageFormatException) ErrorFault() smithy.ErrorFault
func (*InvalidImageFormatException) ErrorMessage ¶
func (e *InvalidImageFormatException) ErrorMessage() string
type InvalidPaginationTokenException ¶
Pagination token in the request is not valid.
func (*InvalidPaginationTokenException) Error ¶
func (e *InvalidPaginationTokenException) Error() string
func (*InvalidPaginationTokenException) ErrorCode ¶
func (e *InvalidPaginationTokenException) ErrorCode() string
func (*InvalidPaginationTokenException) ErrorFault ¶
func (e *InvalidPaginationTokenException) ErrorFault() smithy.ErrorFault
func (*InvalidPaginationTokenException) ErrorMessage ¶
func (e *InvalidPaginationTokenException) ErrorMessage() string
type InvalidParameterException ¶
Input parameter violated a constraint. Validate your parameter before calling the API operation again.
func (*InvalidParameterException) Error ¶
func (e *InvalidParameterException) Error() string
func (*InvalidParameterException) ErrorCode ¶
func (e *InvalidParameterException) ErrorCode() string
func (*InvalidParameterException) ErrorFault ¶
func (e *InvalidParameterException) ErrorFault() smithy.ErrorFault
func (*InvalidParameterException) ErrorMessage ¶
func (e *InvalidParameterException) ErrorMessage() string
type InvalidS3ObjectException ¶
Amazon Rekognition is unable to access the S3 object specified in the request.
func (*InvalidS3ObjectException) Error ¶
func (e *InvalidS3ObjectException) Error() string
func (*InvalidS3ObjectException) ErrorCode ¶
func (e *InvalidS3ObjectException) ErrorCode() string
func (*InvalidS3ObjectException) ErrorFault ¶
func (e *InvalidS3ObjectException) ErrorFault() smithy.ErrorFault
func (*InvalidS3ObjectException) ErrorMessage ¶
func (e *InvalidS3ObjectException) ErrorMessage() string
type KinesisDataStream ¶
type KinesisDataStream struct { // ARN of the output Amazon Kinesis Data Streams stream. Arn *string }
The Kinesis data stream Amazon Rekognition to which the analysis results of a Amazon Rekognition stream processor are streamed. For more information, see CreateStreamProcessor in the Amazon Rekognition Developer Guide.
type KinesisVideoStream ¶
type KinesisVideoStream struct { // ARN of the Kinesis video stream stream that streams the source video. Arn *string }
Kinesis video stream stream that provides the source streaming video for a Amazon Rekognition Video stream processor. For more information, see CreateStreamProcessor in the Amazon Rekognition Developer Guide.
type Label ¶
type Label struct { // The name (label) of the object or scene. Name *string // If Label represents an object, Instances contains the bounding boxes for each // instance of the detected object. Bounding boxes are returned for common object // labels such as people, cars, furniture, apparel or pets. Instances []*Instance // The parent labels for a label. The response includes all ancestor labels. Parents []*Parent // Level of confidence. Confidence *float32 }
Structure containing details about the detected label, including the name, detected instances, parent labels, and level of confidence.
type LabelDetection ¶
type LabelDetection struct { // Details about the detected label. Label *Label // Time, in milliseconds from the start of the video, that the label was detected. Timestamp *int64 }
Information about a label detected in a video analysis request and the time the label was detected in the video.
type LabelDetectionSortBy ¶
type LabelDetectionSortBy string
const ( LabelDetectionSortByName LabelDetectionSortBy = "NAME" LabelDetectionSortByTimestamp LabelDetectionSortBy = "TIMESTAMP" )
Enum values for LabelDetectionSortBy
type Landmark ¶
type Landmark struct { // Type of landmark. Type LandmarkType // The y-coordinate from the top left of the landmark expressed as the ratio of the // height of the image. For example, if the image is 700 x 200 and the y-coordinate // of the landmark is at 100 pixels, this value is 0.5. Y *float32 // The x-coordinate from the top left of the landmark expressed as the ratio of the // width of the image. For example, if the image is 700 x 200 and the x-coordinate // of the landmark is at 350 pixels, this value is 0.5. X *float32 }
Indicates the location of the landmark on the face.
type LandmarkType ¶
type LandmarkType string
const ( LandmarkTypeEyeleft LandmarkType = "eyeLeft" LandmarkTypeEyeright LandmarkType = "eyeRight" LandmarkTypeNose LandmarkType = "nose" LandmarkTypeMouthleft LandmarkType = "mouthLeft" LandmarkTypeMouthright LandmarkType = "mouthRight" LandmarkTypeLefteyebrowleft LandmarkType = "leftEyeBrowLeft" LandmarkTypeLefteyebrowright LandmarkType = "leftEyeBrowRight" LandmarkTypeLefteyebrowup LandmarkType = "leftEyeBrowUp" LandmarkTypeRighteyebrowleft LandmarkType = "rightEyeBrowLeft" LandmarkTypeRighteyebrowright LandmarkType = "rightEyeBrowRight" LandmarkTypeRighteyebrowup LandmarkType = "rightEyeBrowUp" LandmarkTypeLefteyeleft LandmarkType = "leftEyeLeft" LandmarkTypeLefteyeright LandmarkType = "leftEyeRight" LandmarkTypeLefteyeup LandmarkType = "leftEyeUp" LandmarkTypeLefteyedown LandmarkType = "leftEyeDown" LandmarkTypeRighteyeleft LandmarkType = "rightEyeLeft" LandmarkTypeRighteyeright LandmarkType = "rightEyeRight" LandmarkTypeRighteyeup LandmarkType = "rightEyeUp" LandmarkTypeRighteyedown LandmarkType = "rightEyeDown" LandmarkTypeNoseleft LandmarkType = "noseLeft" LandmarkTypeNoseright LandmarkType = "noseRight" LandmarkTypeMouthup LandmarkType = "mouthUp" LandmarkTypeMouthdown LandmarkType = "mouthDown" LandmarkTypeLeftpupil LandmarkType = "leftPupil" LandmarkTypeRightpupil LandmarkType = "rightPupil" LandmarkTypeUpperjawlineleft LandmarkType = "upperJawlineLeft" LandmarkTypeMidjawlineleft LandmarkType = "midJawlineLeft" LandmarkTypeChinbottom LandmarkType = "chinBottom" LandmarkTypeMidjawlineright LandmarkType = "midJawlineRight" LandmarkTypeUpperjawlineright LandmarkType = "upperJawlineRight" )
Enum values for LandmarkType
type LimitExceededException ¶
An Amazon Rekognition service limit was exceeded. For example, if you start too many Amazon Rekognition Video jobs concurrently, calls to start operations (StartLabelDetection, for example) will raise a LimitExceededException exception (HTTP status code: 400) until the number of concurrently running jobs is below the Amazon Rekognition service limit.
func (*LimitExceededException) Error ¶
func (e *LimitExceededException) Error() string
func (*LimitExceededException) ErrorCode ¶
func (e *LimitExceededException) ErrorCode() string
func (*LimitExceededException) ErrorFault ¶
func (e *LimitExceededException) ErrorFault() smithy.ErrorFault
func (*LimitExceededException) ErrorMessage ¶
func (e *LimitExceededException) ErrorMessage() string
type ModerationLabel ¶
type ModerationLabel struct { // The name for the parent label. Labels at the top level of the hierarchy have the // parent label "". ParentName *string // The label name for the type of unsafe content detected in the image. Name *string // Specifies the confidence that Amazon Rekognition has that the label has been // correctly identified. If you don't specify the MinConfidence parameter in the // call to DetectModerationLabels, the operation returns labels with a confidence // value greater than or equal to 50 percent. Confidence *float32 }
Provides information about a single type of unsafe content found in an image or video. Each type of moderated content has a label within a hierarchical taxonomy. For more information, see Detecting Unsafe Content in the Amazon Rekognition Developer Guide.
type MouthOpen ¶
type MouthOpen struct { // Boolean value that indicates whether the mouth on the face is open or not. Value *bool // Level of confidence in the determination. Confidence *float32 }
Indicates whether or not the mouth on the face is open, and the confidence level in the determination.
type Mustache ¶
type Mustache struct { // Boolean value that indicates whether the face has mustache or not. Value *bool // Level of confidence in the determination. Confidence *float32 }
Indicates whether or not the face has a mustache, and the confidence level in the determination.
type NotificationChannel ¶
type NotificationChannel struct { // The ARN of an IAM role that gives Amazon Rekognition publishing permissions to // the Amazon SNS topic. // // This member is required. RoleArn *string // The Amazon SNS topic to which Amazon Rekognition to posts the completion status. // // This member is required. SNSTopicArn *string }
The Amazon Simple Notification Service topic to which Amazon Rekognition publishes the completion status of a video analysis operation. For more information, see api-video ().
type OrientationCorrection ¶
type OrientationCorrection string
const ( OrientationCorrectionRotate_0 OrientationCorrection = "ROTATE_0" OrientationCorrectionRotate_90 OrientationCorrection = "ROTATE_90" OrientationCorrectionRotate_180 OrientationCorrection = "ROTATE_180" OrientationCorrectionRotate_270 OrientationCorrection = "ROTATE_270" )
Enum values for OrientationCorrection
type OutputConfig ¶
type OutputConfig struct { // The S3 bucket where training output is placed. S3Bucket *string // The prefix applied to the training output files. S3KeyPrefix *string }
The S3 bucket and folder location where training output is placed.
type Parent ¶
type Parent struct { // The name of the parent label. Name *string }
A parent label for a label. A label can have 0, 1, or more parents.
type PersonDetail ¶
type PersonDetail struct { // Identifier for the person detected person within a video. Use to keep track of // the person throughout the video. The identifier is not stored by Amazon // Rekognition. Index *int64 // Face details for the detected person. Face *FaceDetail // Bounding box around the detected person. BoundingBox *BoundingBox }
Details about a person detected in a video analysis request.
type PersonDetection ¶
type PersonDetection struct { // The time, in milliseconds from the start of the video, that the person's path // was tracked. Timestamp *int64 // Details about a person whose path was tracked in a video. Person *PersonDetail }
Details and path tracking information for a single time a person's path is tracked in a video. Amazon Rekognition operations that track people's paths return an array of PersonDetection objects with elements for each time a person's path is tracked in a video. <p>For more information, see GetPersonTracking in the Amazon Rekognition Developer Guide. </p>
type PersonMatch ¶
type PersonMatch struct { // Information about the faces in the input collection that match the face of a // person in the video. FaceMatches []*FaceMatch // The time, in milliseconds from the beginning of the video, that the person was // matched in the video. Timestamp *int64 // Information about the matched person. Person *PersonDetail }
Information about a person whose face matches a face(s) in an Amazon Rekognition collection. Includes information about the faces in the Amazon Rekognition collection (FaceMatch ()), information about the person (PersonDetail ()), and the time stamp for when the person was detected in a video. An array of PersonMatch objects is returned by GetFaceSearch ().
type PersonTrackingSortBy ¶
type PersonTrackingSortBy string
const ( PersonTrackingSortByIndex PersonTrackingSortBy = "INDEX" PersonTrackingSortByTimestamp PersonTrackingSortBy = "TIMESTAMP" )
Enum values for PersonTrackingSortBy
type Point ¶
type Point struct { // The value of the X coordinate for a point on a Polygon. X *float32 // The value of the Y coordinate for a point on a Polygon. Y *float32 }
The X and Y coordinates of a point on an image. The X and Y values returned are ratios of the overall image size. For example, if the input image is 700x200 and the operation returns X=0.5 and Y=0.25, then the point is at the (350,50) pixel coordinate on the image. <p>An array of <code>Point</code> objects, <code>Polygon</code>, is returned by <a>DetectText</a> and by <a>DetectCustomLabels</a>. <code>Polygon</code> represents a fine-grained polygon around a detected item. For more information, see Geometry in the Amazon Rekognition Developer Guide. </p>
type Pose ¶
type Pose struct { // Value representing the face rotation on the yaw axis. Yaw *float32 // Value representing the face rotation on the roll axis. Roll *float32 // Value representing the face rotation on the pitch axis. Pitch *float32 }
Indicates the pose of the face as determined by its pitch, roll, and yaw.
type ProjectDescription ¶
type ProjectDescription struct { // The current status of the project. Status ProjectStatus // The Amazon Resource Name (ARN) of the project. ProjectArn *string // The Unix timestamp for the date and time that the project was created. CreationTimestamp *time.Time }
A description of a Amazon Rekognition Custom Labels project.
type ProjectStatus ¶
type ProjectStatus string
const ( ProjectStatusCreating ProjectStatus = "CREATING" ProjectStatusCreated ProjectStatus = "CREATED" ProjectStatusDeleting ProjectStatus = "DELETING" )
Enum values for ProjectStatus
type ProjectVersionDescription ¶
type ProjectVersionDescription struct { // The location where training results are saved. OutputConfig *OutputConfig // The current status of the model version. Status ProjectVersionStatus // The minimum number of inference units used by the model. For more information, // see StartProjectVersion (). MinInferenceUnits *int32 // The Unix date and time that training of the model ended. TrainingEndTimestamp *time.Time // The manifest file that represents the testing results. TestingDataResult *TestingDataResult // The manifest file that represents the training results. TrainingDataResult *TrainingDataResult // The Amazon Resource Name (ARN) of the model version. ProjectVersionArn *string // The training results. EvaluationResult is only returned if training is // successful. EvaluationResult *EvaluationResult // The duration, in seconds, that the model version has been billed for training. // This value is only returned if the model version has been successfully trained. BillableTrainingTimeInSeconds *int64 // A descriptive message for an error or warning that occurred. StatusMessage *string // The Unix datetime for the date and time that training started. CreationTimestamp *time.Time }
The description of a version of a model.
type ProjectVersionStatus ¶
type ProjectVersionStatus string
const ( ProjectVersionStatusTraining_in_progress ProjectVersionStatus = "TRAINING_IN_PROGRESS" ProjectVersionStatusTraining_completed ProjectVersionStatus = "TRAINING_COMPLETED" ProjectVersionStatusTraining_failed ProjectVersionStatus = "TRAINING_FAILED" ProjectVersionStatusStarting ProjectVersionStatus = "STARTING" ProjectVersionStatusRunning ProjectVersionStatus = "RUNNING" ProjectVersionStatusFailed ProjectVersionStatus = "FAILED" ProjectVersionStatusStopping ProjectVersionStatus = "STOPPING" ProjectVersionStatusStopped ProjectVersionStatus = "STOPPED" ProjectVersionStatusDeleting ProjectVersionStatus = "DELETING" )
Enum values for ProjectVersionStatus
type ProvisionedThroughputExceededException ¶
The number of requests exceeded your throughput limit. If you want to increase this limit, contact Amazon Rekognition.
func (*ProvisionedThroughputExceededException) Error ¶
func (e *ProvisionedThroughputExceededException) Error() string
func (*ProvisionedThroughputExceededException) ErrorCode ¶
func (e *ProvisionedThroughputExceededException) ErrorCode() string
func (*ProvisionedThroughputExceededException) ErrorFault ¶
func (e *ProvisionedThroughputExceededException) ErrorFault() smithy.ErrorFault
func (*ProvisionedThroughputExceededException) ErrorMessage ¶
func (e *ProvisionedThroughputExceededException) ErrorMessage() string
type QualityFilter ¶
type QualityFilter string
const ( QualityFilterNone QualityFilter = "NONE" QualityFilterAuto QualityFilter = "AUTO" QualityFilterLow QualityFilter = "LOW" QualityFilterMedium QualityFilter = "MEDIUM" QualityFilterHigh QualityFilter = "HIGH" )
Enum values for QualityFilter
type Reason ¶
type Reason string
const ( ReasonExceeds_max_faces Reason = "EXCEEDS_MAX_FACES" ReasonExtreme_pose Reason = "EXTREME_POSE" ReasonLow_brightness Reason = "LOW_BRIGHTNESS" ReasonLow_sharpness Reason = "LOW_SHARPNESS" ReasonLow_confidence Reason = "LOW_CONFIDENCE" ReasonSmall_bounding_box Reason = "SMALL_BOUNDING_BOX" ReasonLow_face_quality Reason = "LOW_FACE_QUALITY" )
Enum values for Reason
type RegionOfInterest ¶
type RegionOfInterest struct { // The box representing a region of interest on screen. BoundingBox *BoundingBox }
Specifies a location within the frame that Rekognition checks for text. Uses a BoundingBox object to set a region of the screen. A word is included in the region if the word is more than half in that region. If there is more than one region, the word will be compared with all regions of the screen. Any word more than half in a region is kept in the results.
type ResourceAlreadyExistsException ¶
A collection with the specified ID already exists.
func (*ResourceAlreadyExistsException) Error ¶
func (e *ResourceAlreadyExistsException) Error() string
func (*ResourceAlreadyExistsException) ErrorCode ¶
func (e *ResourceAlreadyExistsException) ErrorCode() string
func (*ResourceAlreadyExistsException) ErrorFault ¶
func (e *ResourceAlreadyExistsException) ErrorFault() smithy.ErrorFault
func (*ResourceAlreadyExistsException) ErrorMessage ¶
func (e *ResourceAlreadyExistsException) ErrorMessage() string
type ResourceInUseException ¶
The specified resource is already being used.
func (*ResourceInUseException) Error ¶
func (e *ResourceInUseException) Error() string
func (*ResourceInUseException) ErrorCode ¶
func (e *ResourceInUseException) ErrorCode() string
func (*ResourceInUseException) ErrorFault ¶
func (e *ResourceInUseException) ErrorFault() smithy.ErrorFault
func (*ResourceInUseException) ErrorMessage ¶
func (e *ResourceInUseException) ErrorMessage() string
type ResourceNotFoundException ¶
The collection specified in the request cannot be found.
func (*ResourceNotFoundException) Error ¶
func (e *ResourceNotFoundException) Error() string
func (*ResourceNotFoundException) ErrorCode ¶
func (e *ResourceNotFoundException) ErrorCode() string
func (*ResourceNotFoundException) ErrorFault ¶
func (e *ResourceNotFoundException) ErrorFault() smithy.ErrorFault
func (*ResourceNotFoundException) ErrorMessage ¶
func (e *ResourceNotFoundException) ErrorMessage() string
type ResourceNotReadyException ¶
The requested resource isn't ready. For example, this exception occurs when you call DetectCustomLabels with a model version that isn't deployed.
func (*ResourceNotReadyException) Error ¶
func (e *ResourceNotReadyException) Error() string
func (*ResourceNotReadyException) ErrorCode ¶
func (e *ResourceNotReadyException) ErrorCode() string
func (*ResourceNotReadyException) ErrorFault ¶
func (e *ResourceNotReadyException) ErrorFault() smithy.ErrorFault
func (*ResourceNotReadyException) ErrorMessage ¶
func (e *ResourceNotReadyException) ErrorMessage() string
type S3Object ¶
type S3Object struct { // Name of the S3 bucket. Bucket *string // If the bucket is versioning enabled, you can specify the object version. Version *string // S3 object key name. Name *string }
Provides the S3 bucket name and object name. The region for the S3 bucket containing the S3 object must match the region you use for Amazon Rekognition operations. <p>For Amazon Rekognition to process an S3 object, the user must have permission to access the S3 object. For more information, see Resource-Based Policies in the Amazon Rekognition Developer Guide. </p>
type SegmentDetection ¶
type SegmentDetection struct { // The frame-accurate SMPTE timecode, from the start of a video, for the start of a // detected segment. StartTimecode is in HH:MM:SS:fr format (and ;fr for drop // frame-rates). StartTimecodeSMPTE *string // The duration of the timecode for the detected segment in SMPTE format. DurationSMPTE *string // If the segment is a shot detection, contains information about the shot // detection. ShotSegment *ShotSegment // The frame-accurate SMPTE timecode, from the start of a video, for the end of a // detected segment. EndTimecode is in HH:MM:SS:fr format (and ;fr for drop // frame-rates). EndTimecodeSMPTE *string // The end time of the detected segment, in milliseconds, from the start of the // video. EndTimestampMillis *int64 // The start time of the detected segment in milliseconds from the start of the // video. StartTimestampMillis *int64 // The type of the segment. Valid values are TECHNICAL_CUE and SHOT. Type SegmentType // If the segment is a technical cue, contains information about the technical cue. TechnicalCueSegment *TechnicalCueSegment // The duration of the detected segment in milliseconds. DurationMillis *int64 }
A technical cue or shot detection segment detected in a video. An array of SegmentDetection objects containing all segments detected in a stored video is returned by GetSegmentDetection ().
type SegmentType ¶
type SegmentType string
const ( SegmentTypeTechnical_cue SegmentType = "TECHNICAL_CUE" SegmentTypeShot SegmentType = "SHOT" )
Enum values for SegmentType
type SegmentTypeInfo ¶
type SegmentTypeInfo struct { // The type of a segment (technical cue or shot detection). Type SegmentType // The version of the model used to detect segments. ModelVersion *string }
Information about the type of a segment requested in a call to StartSegmentDetection (). An array of SegmentTypeInfo objects is returned by the response from GetSegmentDetection ().
type ShotSegment ¶
type ShotSegment struct { // The confidence that Amazon Rekognition Video has in the accuracy of the detected // segment. Confidence *float32 // An Identifier for a shot detection segment detected in a video Index *int64 }
Information about a shot detection segment detected in a video. For more information, see SegmentDetection ().
type Smile ¶
type Smile struct { // Boolean value that indicates whether the face is smiling or not. Value *bool // Level of confidence in the determination. Confidence *float32 }
Indicates whether or not the face is smiling, and the confidence level in the determination.
type StartSegmentDetectionFilters ¶
type StartSegmentDetectionFilters struct { // Filters that are specific to shot detections. ShotFilter *StartShotDetectionFilter // Filters that are specific to technical cues. TechnicalCueFilter *StartTechnicalCueDetectionFilter }
Filters applied to the technical cue or shot detection segments. For more information, see StartSegmentDetection ().
type StartShotDetectionFilter ¶
type StartShotDetectionFilter struct { // Specifies the minimum confidence that Amazon Rekognition Video must have in // order to return a detected segment. Confidence represents how certain Amazon // Rekognition is that a segment is correctly identified. 0 is the lowest // confidence. 100 is the highest confidence. Amazon Rekognition Video doesn't // return any segments with a confidence level lower than this specified value. If // you don't specify MinSegmentConfidence, the GetSegmentDetection returns segments // with confidence values greater than or equal to 50 percent. MinSegmentConfidence *float32 }
Filters for the shot detection segments returned by GetSegmentDetection. For more information, see StartSegmentDetectionFilters ().
type StartTechnicalCueDetectionFilter ¶
type StartTechnicalCueDetectionFilter struct { // Specifies the minimum confidence that Amazon Rekognition Video must have in // order to return a detected segment. Confidence represents how certain Amazon // Rekognition is that a segment is correctly identified. 0 is the lowest // confidence. 100 is the highest confidence. Amazon Rekognition Video doesn't // return any segments with a confidence level lower than this specified value. If // you don't specify MinSegmentConfidence, GetSegmentDetection returns segments // with confidence values greater than or equal to 50 percent. MinSegmentConfidence *float32 }
Filters for the technical segments returned by GetSegmentDetection (). For more information, see StartSegmentDetectionFilters ().
type StartTextDetectionFilters ¶
type StartTextDetectionFilters struct { // Filter focusing on a certain area of the frame. Uses a BoundingBox object to set // the region of the screen. RegionsOfInterest []*RegionOfInterest // Filters focusing on qualities of the text, such as confidence or size. WordFilter *DetectionFilter }
Set of optional parameters that let you set the criteria text must meet to be included in your response. WordFilter looks at a word's height, width and minimum confidence. RegionOfInterest lets you set a specific region of the screen to look for text in.
type StreamProcessor ¶
type StreamProcessor struct { // Name of the Amazon Rekognition stream processor. Name *string // Current status of the Amazon Rekognition stream processor. Status StreamProcessorStatus }
An object that recognizes faces in a streaming video. An Amazon Rekognition stream processor is created by a call to CreateStreamProcessor (). The request parameters for CreateStreamProcessor describe the Kinesis video stream source for the streaming video, face recognition parameters, and where to stream the analysis resullts. </p>
type StreamProcessorInput ¶
type StreamProcessorInput struct { // The Kinesis video stream input stream for the source streaming video. KinesisVideoStream *KinesisVideoStream }
Information about the source streaming video.
type StreamProcessorOutput ¶
type StreamProcessorOutput struct { // The Amazon Kinesis Data Streams stream to which the Amazon Rekognition stream // processor streams the analysis results. KinesisDataStream *KinesisDataStream }
Information about the Amazon Kinesis Data Streams stream to which a Amazon Rekognition Video stream processor streams the results of a video analysis. For more information, see CreateStreamProcessor in the Amazon Rekognition Developer Guide.
type StreamProcessorSettings ¶
type StreamProcessorSettings struct { // Face search settings to use on a streaming video. FaceSearch *FaceSearchSettings }
Input parameters used to recognize faces in a streaming video analyzed by a Amazon Rekognition stream processor.
type StreamProcessorStatus ¶
type StreamProcessorStatus string
const ( StreamProcessorStatusStopped StreamProcessorStatus = "STOPPED" StreamProcessorStatusStarting StreamProcessorStatus = "STARTING" StreamProcessorStatusRunning StreamProcessorStatus = "RUNNING" StreamProcessorStatusFailed StreamProcessorStatus = "FAILED" StreamProcessorStatusStopping StreamProcessorStatus = "STOPPING" )
Enum values for StreamProcessorStatus
type Summary ¶
type Summary struct { // Provides the S3 bucket name and object name. The region for the S3 bucket // containing the S3 object must match the region you use for Amazon Rekognition // operations. <p>For Amazon Rekognition to process an S3 object, the user must // have permission to access the S3 object. For more information, see // Resource-Based Policies in the Amazon Rekognition Developer Guide. </p> S3Object *S3Object }
The S3 bucket that contains the training summary. The training summary includes aggregated evaluation metrics for the entire testing dataset and metrics for each individual label. You get the training summary S3 bucket location by calling DescribeProjectVersions ().
type Sunglasses ¶
type Sunglasses struct { // Level of confidence in the determination. Confidence *float32 // Boolean value that indicates whether the face is wearing sunglasses or not. Value *bool }
Indicates whether or not the face is wearing sunglasses, and the confidence level in the determination.
type TechnicalCueSegment ¶
type TechnicalCueSegment struct { // The type of the technical cue. Type TechnicalCueType // The confidence that Amazon Rekognition Video has in the accuracy of the detected // segment. Confidence *float32 }
Information about a technical cue segment. For more information, see SegmentDetection ().
type TechnicalCueType ¶
type TechnicalCueType string
const ( TechnicalCueTypeColor_bars TechnicalCueType = "ColorBars" TechnicalCueTypeEnd_credits TechnicalCueType = "EndCredits" TechnicalCueTypeBlack_frames TechnicalCueType = "BlackFrames" )
Enum values for TechnicalCueType
type TestingData ¶
type TestingData struct { // The assets used for testing. Assets []*Asset // If specified, Amazon Rekognition Custom Labels creates a testing dataset with an // 80/20 split of the training dataset. AutoCreate *bool }
The dataset used for testing. Optionally, if AutoCreate is set, Amazon Rekognition Custom Labels creates a testing dataset using an 80/20 split of the training dataset.
type TestingDataResult ¶
type TestingDataResult struct { // The testing dataset that was supplied for training. Input *TestingData // The subset of the dataset that was actually tested. Some images (assets) might // not be tested due to file formatting and other issues. Output *TestingData }
A Sagemaker Groundtruth format manifest file representing the dataset used for testing.
type TextDetection ¶
type TextDetection struct { // The identifier for the detected text. The identifier is only unique for a single // call to DetectText. Id *int32 // The location of the detected text on the image. Includes an axis aligned coarse // bounding box surrounding the text and a finer grain polygon for more accurate // spatial information. Geometry *Geometry // The type of text that was detected. Type TextTypes // The word or line of text recognized by Amazon Rekognition. DetectedText *string // The Parent identifier for the detected text identified by the value of ID. If // the type of detected text is LINE, the value of ParentId is Null. ParentId *int32 // The confidence that Amazon Rekognition has in the accuracy of the detected text // and the accuracy of the geometry points around the detected text. Confidence *float32 }
Information about a word or line of text detected by DetectText (). The DetectedText field contains the text that Amazon Rekognition detected in the image. Every word and line has an identifier (Id). Each word belongs to a line and has a parent identifier (ParentId) that identifies the line of text in which the word appears. The word Id is also an index for the word within a line of words. <p>For more information, see Detecting Text in the Amazon Rekognition Developer Guide.</p>
type TextDetectionResult ¶
type TextDetectionResult struct { // Details about text detected in a video. TextDetection *TextDetection // The time, in milliseconds from the start of the video, that the text was // detected. Timestamp *int64 }
Information about text detected in a video. Incudes the detected text, the time in milliseconds from the start of the video that the text was detected, and where it was detected on the screen.
type TextTypes ¶
type TextTypes string
Enum values for TextTypes
type ThrottlingException ¶
Amazon Rekognition is temporarily unable to process the request. Try your call again.
func (*ThrottlingException) Error ¶
func (e *ThrottlingException) Error() string
func (*ThrottlingException) ErrorCode ¶
func (e *ThrottlingException) ErrorCode() string
func (*ThrottlingException) ErrorFault ¶
func (e *ThrottlingException) ErrorFault() smithy.ErrorFault
func (*ThrottlingException) ErrorMessage ¶
func (e *ThrottlingException) ErrorMessage() string
type TrainingData ¶
type TrainingData struct { // A Sagemaker GroundTruth manifest file that contains the training images // (assets). Assets []*Asset }
The dataset used for training.
type TrainingDataResult ¶
type TrainingDataResult struct { // The images (assets) that were actually trained by Amazon Rekognition Custom // Labels. Output *TrainingData // The training assets that you supplied for training. Input *TrainingData }
A Sagemaker Groundtruth format manifest file that represents the dataset used for training.
type UnindexedFace ¶
type UnindexedFace struct { // An array of reasons that specify why a face wasn't indexed. // // * EXTREME_POSE // - The face is at a pose that can't be detected. For example, the head is turned // too far away from the camera. // // * EXCEEDS_MAX_FACES - The number of faces // detected is already higher than that specified by the MaxFaces input parameter // for IndexFaces. // // * LOW_BRIGHTNESS - The image is too dark. // // * // LOW_SHARPNESS - The image is too blurry. // // * LOW_CONFIDENCE - The face was // detected with a low confidence. // // * SMALL_BOUNDING_BOX - The bounding box // around the face is too small. Reasons []Reason // The structure that contains attributes of a face that IndexFacesdetected, but // didn't index. FaceDetail *FaceDetail }
A face that IndexFaces () detected, but didn't index. Use the Reasons response attribute to determine why a face wasn't indexed.
type Video ¶
type Video struct { // The Amazon S3 bucket name and file name for the video. S3Object *S3Object }
Video file stored in an Amazon S3 bucket. Amazon Rekognition video start operations such as StartLabelDetection () use Video to specify a video for analysis. The supported file formats are .mp4, .mov and .avi.
type VideoJobStatus ¶
type VideoJobStatus string
const ( VideoJobStatusIn_progress VideoJobStatus = "IN_PROGRESS" VideoJobStatusSucceeded VideoJobStatus = "SUCCEEDED" VideoJobStatusFailed VideoJobStatus = "FAILED" )
Enum values for VideoJobStatus
type VideoMetadata ¶
type VideoMetadata struct { // Length of the video in milliseconds. DurationMillis *int64 // Vertical pixel dimension of the video. FrameHeight *int64 // Number of frames per second in the video. FrameRate *float32 // Horizontal pixel dimension of the video. FrameWidth *int64 // Format of the analyzed video. Possible values are MP4, MOV and AVI. Format *string // Type of compression used in the analyzed video. Codec *string }
Information about a video that Amazon Rekognition analyzed. Videometadata is returned in every page of paginated responses from a Amazon Rekognition video operation.
type VideoTooLargeException ¶
The file size or duration of the supplied media is too large. The maximum file size is 10GB. The maximum duration is 6 hours.
func (*VideoTooLargeException) Error ¶
func (e *VideoTooLargeException) Error() string
func (*VideoTooLargeException) ErrorCode ¶
func (e *VideoTooLargeException) ErrorCode() string
func (*VideoTooLargeException) ErrorFault ¶
func (e *VideoTooLargeException) ErrorFault() smithy.ErrorFault
func (*VideoTooLargeException) ErrorMessage ¶
func (e *VideoTooLargeException) ErrorMessage() string
Source Files ¶
- Version
- v0.26.0
- Published
- Oct 1, 2020
- Platform
- js/wasm
- Imports
- 3 packages
- Last checked
- 3 hours ago –
Tools for package owners.