package types
import "github.com/aws/aws-sdk-go-v2/service/transcribe/types"
Index ¶
- type AbsoluteTimeRange
- type BadRequestException
- func (e *BadRequestException) Error() string
- func (e *BadRequestException) ErrorCode() string
- func (e *BadRequestException) ErrorFault() smithy.ErrorFault
- func (e *BadRequestException) ErrorMessage() string
- type BaseModelName
- type CLMLanguageCode
- type CallAnalyticsJob
- type CallAnalyticsJobSettings
- type CallAnalyticsJobStatus
- type CallAnalyticsJobSummary
- type CategoryProperties
- type ChannelDefinition
- type ConflictException
- func (e *ConflictException) Error() string
- func (e *ConflictException) ErrorCode() string
- func (e *ConflictException) ErrorFault() smithy.ErrorFault
- func (e *ConflictException) ErrorMessage() string
- type ContentRedaction
- type InputDataConfig
- type InternalFailureException
- func (e *InternalFailureException) Error() string
- func (e *InternalFailureException) ErrorCode() string
- func (e *InternalFailureException) ErrorFault() smithy.ErrorFault
- func (e *InternalFailureException) ErrorMessage() string
- type InterruptionFilter
- type JobExecutionSettings
- type LanguageCode
- type LanguageCodeItem
- type LanguageIdSettings
- type LanguageModel
- type LimitExceededException
- func (e *LimitExceededException) Error() string
- func (e *LimitExceededException) ErrorCode() string
- func (e *LimitExceededException) ErrorFault() smithy.ErrorFault
- func (e *LimitExceededException) ErrorMessage() string
- type Media
- type MediaFormat
- type MedicalContentIdentificationType
- type MedicalTranscript
- type MedicalTranscriptionJob
- type MedicalTranscriptionJobSummary
- type MedicalTranscriptionSetting
- type ModelSettings
- type ModelStatus
- type NonTalkTimeFilter
- type NotFoundException
- func (e *NotFoundException) Error() string
- func (e *NotFoundException) ErrorCode() string
- func (e *NotFoundException) ErrorFault() smithy.ErrorFault
- func (e *NotFoundException) ErrorMessage() string
- type OutputLocationType
- type ParticipantRole
- type PiiEntityType
- type RedactionOutput
- type RedactionType
- type RelativeTimeRange
- type Rule
- type RuleMemberInterruptionFilter
- type RuleMemberNonTalkTimeFilter
- type RuleMemberSentimentFilter
- type RuleMemberTranscriptFilter
- type SentimentFilter
- type SentimentValue
- type Settings
- type Specialty
- type SubtitleFormat
- type Subtitles
- type SubtitlesOutput
- type Tag
- type Transcript
- type TranscriptFilter
- type TranscriptFilterType
- type TranscriptionJob
- type TranscriptionJobStatus
- type TranscriptionJobSummary
- type Type
- type UnknownUnionMember
- type VocabularyFilterInfo
- type VocabularyFilterMethod
- type VocabularyInfo
- type VocabularyState
Examples ¶
Types ¶
type AbsoluteTimeRange ¶
type AbsoluteTimeRange struct { // The time, in milliseconds, when Amazon Transcribe stops searching for the // specified criteria in your audio. If you include EndTime in your request, you // must also include StartTime. EndTime *int64 // The time, in milliseconds, from the start of your media file until the value you // specify in which Amazon Transcribe searches for your specified criteria. First *int64 // The time, in milliseconds, from the value you specify until the end of your // media file in which Amazon Transcribe searches for your specified criteria. Last *int64 // The time, in milliseconds, when Amazon Transcribe starts searching for the // specified criteria in your audio. If you include StartTime in your request, you // must also include EndTime. StartTime *int64 // contains filtered or unexported fields }
A time range, in milliseconds, between two points in your media file. You can use StartTime and EndTime to search a custom segment. For example, setting StartTime to 10000 and EndTime to 50000 only searches for your specified criteria in the audio contained between the 10,000 millisecond mark and the 50,000 millisecond mark of your media file. You must use StartTime and EndTime as a set; that is, if you include one, you must include both. You can use also First to search from the start of the audio until the time you specify, or Last to search from the time you specify until the end of the audio. For example, setting First to 50000 only searches for your specified criteria in the audio contained between the start of the media file to the 50,000 millisecond mark. You can use First and Last independently of each other. If you prefer to use percentage instead of milliseconds, see .
type BadRequestException ¶
type BadRequestException struct { Message *string // contains filtered or unexported fields }
Your request didn't pass one or more validation tests. This can occur when the entity you're trying to delete doesn't exist or if it's in a non-terminal state (such as IN PROGRESS). See the exception message field for more information.
func (*BadRequestException) Error ¶
func (e *BadRequestException) Error() string
func (*BadRequestException) ErrorCode ¶
func (e *BadRequestException) ErrorCode() string
func (*BadRequestException) ErrorFault ¶
func (e *BadRequestException) ErrorFault() smithy.ErrorFault
func (*BadRequestException) ErrorMessage ¶
func (e *BadRequestException) ErrorMessage() string
type BaseModelName ¶
type BaseModelName string
const ( BaseModelNameNarrowBand BaseModelName = "NarrowBand" BaseModelNameWideBand BaseModelName = "WideBand" )
Enum values for BaseModelName
func (BaseModelName) Values ¶
func (BaseModelName) Values() []BaseModelName
Values returns all known values for BaseModelName. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.
type CLMLanguageCode ¶
type CLMLanguageCode string
const ( CLMLanguageCodeEnUs CLMLanguageCode = "en-US" CLMLanguageCodeHiIn CLMLanguageCode = "hi-IN" CLMLanguageCodeEsUs CLMLanguageCode = "es-US" CLMLanguageCodeEnGb CLMLanguageCode = "en-GB" CLMLanguageCodeEnAu CLMLanguageCode = "en-AU" )
Enum values for CLMLanguageCode
func (CLMLanguageCode) Values ¶
func (CLMLanguageCode) Values() []CLMLanguageCode
Values returns all known values for CLMLanguageCode. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.
type CallAnalyticsJob ¶
type CallAnalyticsJob struct { // The name of the Call Analytics job. Job names are case sensitive and must be // unique within an Amazon Web Services account. CallAnalyticsJobName *string // Provides the status of the specified Call Analytics job. If the status is // COMPLETED, the job is finished and you can find the results at the location // specified in TranscriptFileUri (or RedactedTranscriptFileUri, if you requested // transcript redaction). If the status is FAILED, FailureReason provides details // on why your transcription job failed. CallAnalyticsJobStatus CallAnalyticsJobStatus // Allows you to specify which speaker is on which channel in your Call Analytics // job request. For example, if your agent is the first participant to speak, you // would set ChannelId to 0 (to indicate the first channel) and ParticipantRole to // AGENT (to indicate that it's the agent speaking). ChannelDefinitions []ChannelDefinition // The date and time the specified Call Analytics job finished processing. // Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC. For example, // 2022-05-04T12:33:13.922000-07:00 represents a transcription job that started // processing at 12:33 PM UTC-7 on May 4, 2022. CompletionTime *time.Time // The date and time the specified Call Analytics job request was made. Timestamps // are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC. For example, // 2022-05-04T12:32:58.761000-07:00 represents a transcription job that started // processing at 12:32 PM UTC-7 on May 4, 2022. CreationTime *time.Time // The Amazon Resource Name (ARN) of an IAM role that has permissions to access the // Amazon S3 bucket that contains your input files. If the role you specify doesn’t // have the appropriate permissions to access the specified Amazon S3 location, // your request fails. IAM role ARNs have the format // arn:partition:iam::account:role/role-name-with-path. For example: // arn:aws:iam::111122223333:role/Admin. For more information, see IAM ARNs // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-arns). DataAccessRoleArn *string // If CallAnalyticsJobStatus is FAILED, FailureReason contains information about // why the Call Analytics job request failed. The FailureReason field contains one // of the following values: // // * Unsupported media format. The media format specified // in MediaFormat isn't valid. Refer to MediaFormat for a list of supported // formats. // // * The media format provided does not match the detected media format. // The media format specified in MediaFormat doesn't match the format of the input // file. Check the media format of your media file and correct the specified // value. // // * Invalid sample rate for audio file. The sample rate specified in // MediaSampleRateHertz isn't valid. The sample rate must be between 8,000 and // 48,000 Hertz. // // * The sample rate provided does not match the detected sample // rate. The sample rate specified in MediaSampleRateHertz doesn't match the sample // rate detected in your input media file. Check the sample rate of your media file // and correct the specified value. // // * Invalid file size: file size too large. The // size of your media file is larger than what Amazon Transcribe can process. For // more information, refer to Guidelines and quotas // (https://docs.aws.amazon.com/transcribe/latest/dg/limits-guidelines.html#limits). // // * // Invalid number of channels: number of channels too large. Your audio contains // more channels than Amazon Transcribe is able to process. For more information, // refer to Guidelines and quotas // (https://docs.aws.amazon.com/transcribe/latest/dg/limits-guidelines.html#limits). FailureReason *string // The confidence score associated with the language identified in your media file. // Confidence scores are values between 0 and 1; a larger value indicates a higher // probability that the identified language correctly matches the language spoken // in your media. IdentifiedLanguageScore *float32 // The language code used to create your Call Analytics job. For a list of // supported languages and their associated language codes, refer to the Supported // languages // (https://docs.aws.amazon.com/transcribe/latest/dg/supported-languages.html) // table. If you don't know the language spoken in your media file, you can omit // this field and let Amazon Transcribe automatically identify the language of your // media. To improve the accuracy of language identification, you can include // several language codes and Amazon Transcribe chooses the closest match for your // transcription. LanguageCode LanguageCode // Describes the Amazon S3 location of the media file you want to use in your // request. Media *Media // The format of the input media file. MediaFormat MediaFormat // The sample rate, in Hertz, of the audio track in your input media file. MediaSampleRateHertz *int32 // Allows additional optional settings in your request, including content // redaction; allows you to apply custom language models, vocabulary filters, and // custom vocabularies to your Call Analytics job. Settings *CallAnalyticsJobSettings // The date and time the specified Call Analytics job began processing. Timestamps // are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC. For example, // 2022-05-04T12:32:58.789000-07:00 represents a transcription job that started // processing at 12:32 PM UTC-7 on May 4, 2022. StartTime *time.Time // Provides you with the Amazon S3 URI you can use to access your transcript. Transcript *Transcript // contains filtered or unexported fields }
Provides detailed information about a Call Analytics job. To view the job's status, refer to CallAnalyticsJobStatus. If the status is COMPLETED, the job is finished. You can find your completed transcript at the URI specified in TranscriptFileUri. If the status is FAILED, FailureReason provides details on why your transcription job failed. If you enabled personally identifiable information (PII) redaction, the redacted transcript appears at the location specified in RedactedTranscriptFileUri. If you chose to redact the audio in your media file, you can find your redacted media file at the location specified in the RedactedMediaFileUri field of your response.
type CallAnalyticsJobSettings ¶
type CallAnalyticsJobSettings struct { // Allows you to redact or flag specified personally identifiable information (PII) // in your transcript. If you use ContentRedaction, you must also include the // sub-parameters: PiiEntityTypes, RedactionOutput, and RedactionType. ContentRedaction *ContentRedaction // If using automatic language identification (IdentifyLanguage) in your request // and you want to apply a custom language model, a custom vocabulary, or a custom // vocabulary filter, include LanguageIdSettings with the relevant sub-parameters // (VocabularyName, LanguageModelName, and VocabularyFilterName). You can specify // two or more language codes that represent the languages you think may be present // in your media; including more than five is not recommended. Each language code // you include can have an associated custom language model, custom vocabulary, and // custom vocabulary filter. The languages you specify must match the languages of // the specified custom language models, custom vocabularies, and custom vocabulary // filters. To include language options using IdentifyLanguage without including a // custom language model, a custom vocabulary, or a custom vocabulary filter, use // LanguageOptions instead of LanguageIdSettings. Including language options can // improve the accuracy of automatic language identification. If you want to // include a custom language model with your request but do not want to use // automatic language identification, use instead the parameter with the // LanguageModelName sub-parameter. If you want to include a custom vocabulary or a // custom vocabulary filter (or both) with your request but do not want to use // automatic language identification, use instead the parameter with the // VocabularyName or VocabularyFilterName (or both) sub-parameter. LanguageIdSettings map[string]LanguageIdSettings // The name of the custom language model you want to use when processing your Call // Analytics job. Note that language model names are case sensitive. The language // of the specified language model must match the language code you specify in your // transcription request. If the languages don't match, the language model isn't // applied. There are no errors or warnings associated with a language mismatch. LanguageModelName *string // You can specify two or more language codes that represent the languages you // think may be present in your media; including more than five is not recommended. // If you're unsure what languages are present, do not include this parameter. // Including language options can improve the accuracy of language identification. // For a list of languages supported with Call Analytics, refer to the Supported // languages // (https://docs.aws.amazon.com/transcribe/latest/dg/supported-languages.html) // table. LanguageOptions []LanguageCode // Specify how you want your vocabulary filter applied to your transcript. To // replace words with ***, choose mask. To delete words, choose remove. To flag // words without changing them, choose tag. VocabularyFilterMethod VocabularyFilterMethod // The name of the custom vocabulary filter you want to include in your Call // Analytics transcription request. Vocabulary filter names are case sensitive. // Note that if you include VocabularyFilterName in your request, you must also // include VocabularyFilterMethod. VocabularyFilterName *string // The name of the custom vocabulary you want to include in your Call Analytics // transcription request. Vocabulary names are case sensitive. VocabularyName *string // contains filtered or unexported fields }
Provides additional optional settings for your request, including content redaction, automatic language identification; allows you to apply custom language models, vocabulary filters, and custom vocabularies.
type CallAnalyticsJobStatus ¶
type CallAnalyticsJobStatus string
const ( CallAnalyticsJobStatusQueued CallAnalyticsJobStatus = "QUEUED" CallAnalyticsJobStatusInProgress CallAnalyticsJobStatus = "IN_PROGRESS" CallAnalyticsJobStatusFailed CallAnalyticsJobStatus = "FAILED" CallAnalyticsJobStatusCompleted CallAnalyticsJobStatus = "COMPLETED" )
Enum values for CallAnalyticsJobStatus
func (CallAnalyticsJobStatus) Values ¶
func (CallAnalyticsJobStatus) Values() []CallAnalyticsJobStatus
Values returns all known values for CallAnalyticsJobStatus. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.
type CallAnalyticsJobSummary ¶
type CallAnalyticsJobSummary struct { // The name of the Call Analytics job. Job names are case sensitive and must be // unique within an Amazon Web Services account. CallAnalyticsJobName *string // Provides the status of your Call Analytics job. If the status is COMPLETED, the // job is finished and you can find the results at the location specified in // TranscriptFileUri (or RedactedTranscriptFileUri, if you requested transcript // redaction). If the status is FAILED, FailureReason provides details on why your // transcription job failed. CallAnalyticsJobStatus CallAnalyticsJobStatus // The date and time the specified Call Analytics job finished processing. // Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC. For example, // 2022-05-04T12:33:13.922000-07:00 represents a transcription job that started // processing at 12:33 PM UTC-7 on May 4, 2022. CompletionTime *time.Time // The date and time the specified Call Analytics job request was made. Timestamps // are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC. For example, // 2022-05-04T12:32:58.761000-07:00 represents a transcription job that started // processing at 12:32 PM UTC-7 on May 4, 2022. CreationTime *time.Time // If CallAnalyticsJobStatus is FAILED, FailureReason contains information about // why the Call Analytics job failed. See also: Common Errors // (https://docs.aws.amazon.com/transcribe/latest/APIReference/CommonErrors.html). FailureReason *string // The language code used to create your Call Analytics transcription. LanguageCode LanguageCode // The date and time your Call Analytics job began processing. Timestamps are in // the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC. For example, // 2022-05-04T12:32:58.789000-07:00 represents a transcription job that started // processing at 12:32 PM UTC-7 on May 4, 2022. StartTime *time.Time // contains filtered or unexported fields }
Provides detailed information about a specific Call Analytics job.
type CategoryProperties ¶
type CategoryProperties struct { // The name of the Call Analytics category. Category names are case sensitive and // must be unique within an Amazon Web Services account. CategoryName *string // The date and time the specified Call Analytics category was created. Timestamps // are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC. For example, // 2022-05-04T12:32:58.761000-07:00 represents 12:32 PM UTC-7 on May 4, 2022. CreateTime *time.Time // The date and time the specified Call Analytics category was last updated. // Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC. For example, // 2022-05-05T12:45:32.691000-07:00 represents 12:45 PM UTC-7 on May 5, 2022. LastUpdateTime *time.Time // The rules used to define a Call Analytics category. Each category can have // between 1 and 20 rules. Rules []Rule // contains filtered or unexported fields }
Provides you with the properties of the Call Analytics category you specified in your request. This includes the list of rules that define the specified category.
type ChannelDefinition ¶
type ChannelDefinition struct { // Specify the audio channel you want to define. ChannelId int32 // Specify the speaker you want to define. Omitting this parameter is equivalent to // specifying both participants. ParticipantRole ParticipantRole // contains filtered or unexported fields }
Allows you to specify which speaker is on which channel. For example, if your agent is the first participant to speak, you would set ChannelId to 0 (to indicate the first channel) and ParticipantRole to AGENT (to indicate that it's the agent speaking).
type ConflictException ¶
type ConflictException struct { Message *string // contains filtered or unexported fields }
A resource already exists with this name. Resource names must be unique within an Amazon Web Services account.
func (*ConflictException) Error ¶
func (e *ConflictException) Error() string
func (*ConflictException) ErrorCode ¶
func (e *ConflictException) ErrorCode() string
func (*ConflictException) ErrorFault ¶
func (e *ConflictException) ErrorFault() smithy.ErrorFault
func (*ConflictException) ErrorMessage ¶
func (e *ConflictException) ErrorMessage() string
type ContentRedaction ¶
type ContentRedaction struct { // Specify if you want only a redacted transcript, or if you want a redacted and an // unredacted transcript. When you choose redacted Amazon Transcribe creates only a // redacted transcript. When you choose redacted_and_unredacted Amazon Transcribe // creates a redacted and an unredacted transcript (as two separate files). // // This member is required. RedactionOutput RedactionOutput // Specify the category of information you want to redact; PII (personally // identifiable information) is the only valid value. You can use PiiEntityTypes to // choose which types of PII you want to redact. // // This member is required. RedactionType RedactionType // Specify which types of personally identifiable information (PII) you want to // redact in your transcript. You can include as many types as you'd like, or you // can select ALL. PiiEntityTypes []PiiEntityType // contains filtered or unexported fields }
Allows you to redact or flag specified personally identifiable information (PII) in your transcript. If you use ContentRedaction, you must also include the sub-parameters: PiiEntityTypes, RedactionOutput, and RedactionType.
type InputDataConfig ¶
type InputDataConfig struct { // The Amazon Resource Name (ARN) of an IAM role that has permissions to access the // Amazon S3 bucket that contains your input files. If the role you specify doesn’t // have the appropriate permissions to access the specified Amazon S3 location, // your request fails. IAM role ARNs have the format // arn:partition:iam::account:role/role-name-with-path. For example: // arn:aws:iam::111122223333:role/Admin. For more information, see IAM ARNs // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-arns). // // This member is required. DataAccessRoleArn *string // The Amazon S3 location (URI) of the text files you want to use to train your // custom language model. Here's an example URI path: // s3://DOC-EXAMPLE-BUCKET/my-model-training-data/ // // This member is required. S3Uri *string // The Amazon S3 location (URI) of the text files you want to use to tune your // custom language model. Here's an example URI path: // s3://DOC-EXAMPLE-BUCKET/my-model-tuning-data/ TuningDataS3Uri *string // contains filtered or unexported fields }
Contains the Amazon S3 location of the training data you want to use to create a new custom language model, and permissions to access this location. When using InputDataConfig, you must include these sub-parameters: S3Uri and DataAccessRoleArn. You can optionally include TuningDataS3Uri.
type InternalFailureException ¶
type InternalFailureException struct { Message *string // contains filtered or unexported fields }
There was an internal error. Check the error message, correct the issue, and try your request again.
func (*InternalFailureException) Error ¶
func (e *InternalFailureException) Error() string
func (*InternalFailureException) ErrorCode ¶
func (e *InternalFailureException) ErrorCode() string
func (*InternalFailureException) ErrorFault ¶
func (e *InternalFailureException) ErrorFault() smithy.ErrorFault
func (*InternalFailureException) ErrorMessage ¶
func (e *InternalFailureException) ErrorMessage() string
type InterruptionFilter ¶
type InterruptionFilter struct { // Allows you to specify a time range (in milliseconds) in your audio, during which // you want to search for an interruption. See for more detail. AbsoluteTimeRange *AbsoluteTimeRange // Set to TRUE to flag speech that does not contain interruptions. Set to FALSE to // flag speech that contains interruptions. Negate *bool // Specify the interrupter you want to flag. Omitting this parameter is equivalent // to specifying both participants. ParticipantRole ParticipantRole // Allows you to specify a time range (in percentage) in your media file, during // which you want to search for an interruption. See for more detail. RelativeTimeRange *RelativeTimeRange // Specify the duration of the interruptions in milliseconds. For example, you can // flag speech that contains more than 10000 milliseconds of interruptions. Threshold *int64 // contains filtered or unexported fields }
Flag the presence or absence of interruptions in your Call Analytics transcription output. Rules using InterruptionFilter are designed to match:
* Instances where an agent interrupts a customer
* Instances where a customer interrupts an agent
* Either participant interrupting the other
* A lack of interruptions
See Rule criteria (https://docs.aws.amazon.com/transcribe/latest/dg/call-analytics-create-categories.html#call-analytics-create-categories-rules) for usage examples.
type JobExecutionSettings ¶
type JobExecutionSettings struct { // Allows you to enable job queuing when your concurrent request limit is exceeded. // When AllowDeferredExecution is set to true, transcription job requests are // placed in a queue until the number of jobs falls below the concurrent request // limit. If AllowDeferredExecution is set to false and the number of transcription // job requests exceed the concurrent request limit, you get a // LimitExceededException error. Note that job queuing is enabled by default for // Call Analytics jobs. If you include AllowDeferredExecution in your request, you // must also include DataAccessRoleArn. AllowDeferredExecution *bool // The Amazon Resource Name (ARN) of an IAM role that has permissions to access the // Amazon S3 bucket that contains your input files. If the role you specify doesn’t // have the appropriate permissions to access the specified Amazon S3 location, // your request fails. IAM role ARNs have the format // arn:partition:iam::account:role/role-name-with-path. For example: // arn:aws:iam::111122223333:role/Admin. For more information, see IAM ARNs // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-arns). // Note that if you include DataAccessRoleArn in your request, you must also // include AllowDeferredExecution. DataAccessRoleArn *string // contains filtered or unexported fields }
Allows you to control how your transcription job is processed. Currently, the only JobExecutionSettings modification you can choose is enabling job queueing using the AllowDeferredExecution sub-parameter. If you include JobExecutionSettings in your request, you must also include the sub-parameters: AllowDeferredExecution and DataAccessRoleArn.
type LanguageCode ¶
type LanguageCode string
const ( LanguageCodeAfZa LanguageCode = "af-ZA" LanguageCodeArAe LanguageCode = "ar-AE" LanguageCodeArSa LanguageCode = "ar-SA" LanguageCodeDaDk LanguageCode = "da-DK" LanguageCodeDeCh LanguageCode = "de-CH" LanguageCodeDeDe LanguageCode = "de-DE" LanguageCodeEnAb LanguageCode = "en-AB" LanguageCodeEnAu LanguageCode = "en-AU" LanguageCodeEnGb LanguageCode = "en-GB" LanguageCodeEnIe LanguageCode = "en-IE" LanguageCodeEnIn LanguageCode = "en-IN" LanguageCodeEnUs LanguageCode = "en-US" LanguageCodeEnWl LanguageCode = "en-WL" LanguageCodeEsEs LanguageCode = "es-ES" LanguageCodeEsUs LanguageCode = "es-US" LanguageCodeFaIr LanguageCode = "fa-IR" LanguageCodeFrCa LanguageCode = "fr-CA" LanguageCodeFrFr LanguageCode = "fr-FR" LanguageCodeHeIl LanguageCode = "he-IL" LanguageCodeHiIn LanguageCode = "hi-IN" LanguageCodeIdId LanguageCode = "id-ID" LanguageCodeItIt LanguageCode = "it-IT" LanguageCodeJaJp LanguageCode = "ja-JP" LanguageCodeKoKr LanguageCode = "ko-KR" LanguageCodeMsMy LanguageCode = "ms-MY" LanguageCodeNlNl LanguageCode = "nl-NL" LanguageCodePtBr LanguageCode = "pt-BR" LanguageCodePtPt LanguageCode = "pt-PT" LanguageCodeRuRu LanguageCode = "ru-RU" LanguageCodeTaIn LanguageCode = "ta-IN" LanguageCodeTeIn LanguageCode = "te-IN" LanguageCodeTrTr LanguageCode = "tr-TR" LanguageCodeZhCn LanguageCode = "zh-CN" LanguageCodeZhTw LanguageCode = "zh-TW" LanguageCodeThTh LanguageCode = "th-TH" LanguageCodeEnZa LanguageCode = "en-ZA" LanguageCodeEnNz LanguageCode = "en-NZ" )
Enum values for LanguageCode
func (LanguageCode) Values ¶
func (LanguageCode) Values() []LanguageCode
Values returns all known values for LanguageCode. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.
type LanguageCodeItem ¶
type LanguageCodeItem struct { // Provides the total time, in seconds, each identified language is spoken in your // media. DurationInSeconds *float32 // Provides the language code for each language identified in your media. LanguageCode LanguageCode // contains filtered or unexported fields }
Provides information on the speech contained in a discreet utterance when multi-language identification is enabled in your request. This utterance represents a block of speech consisting of one language, preceded or followed by a block of speech in a different language.
type LanguageIdSettings ¶
type LanguageIdSettings struct { // The name of the custom language model you want to use when processing your // transcription job. Note that language model names are case sensitive. The // language of the specified language model must match the language code you // specify in your transcription request. If the languages don't match, the // language model isn't applied. There are no errors or warnings associated with a // language mismatch. LanguageModelName *string // The name of the custom vocabulary filter you want to use when processing your // transcription job. Vocabulary filter names are case sensitive. The language of // the specified vocabulary filter must match the language code you specify in your // transcription request. If the languages don't match, the vocabulary filter isn't // applied. There are no errors or warnings associated with a language mismatch. // Note that if you include VocabularyFilterName in your request, you must also // include VocabularyFilterMethod. VocabularyFilterName *string // The name of the custom vocabulary you want to use when processing your // transcription job. Vocabulary names are case sensitive. The language of the // specified vocabulary must match the language code you specify in your // transcription request. If the languages don't match, the vocabulary isn't // applied. There are no errors or warnings associated with a language mismatch. VocabularyName *string // contains filtered or unexported fields }
If using automatic language identification (IdentifyLanguage) in your request and you want to apply a custom language model, a custom vocabulary, or a custom vocabulary filter, include LanguageIdSettings with the relevant sub-parameters (VocabularyName, LanguageModelName, and VocabularyFilterName). You can specify two or more language codes that represent the languages you think may be present in your media; including more than five is not recommended. Each language code you include can have an associated custom language model, custom vocabulary, and custom vocabulary filter. The languages you specify must match the languages of the specified custom language models, custom vocabularies, and custom vocabulary filters. To include language options using IdentifyLanguage without including a custom language model, a custom vocabulary, or a custom vocabulary filter, use LanguageOptions instead of LanguageIdSettings. Including language options can improve the accuracy of automatic language identification. If you want to include a custom language model with your request but do not want to use automatic language identification, use instead the parameter with the LanguageModelName sub-parameter. If you want to include a custom vocabulary or a custom vocabulary filter (or both) with your request but do not want to use automatic language identification, use instead the parameter with the VocabularyName or VocabularyFilterName (or both) sub-parameter.
type LanguageModel ¶
type LanguageModel struct { // The Amazon Transcribe standard language model, or base model, used to create // your custom language model. BaseModelName BaseModelName // The date and time the specified custom language model was created. Timestamps // are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC. For example, // 2022-05-04T12:32:58.761000-07:00 represents 12:32 PM UTC-7 on May 4, 2022. CreateTime *time.Time // If ModelStatus is FAILED, FailureReason contains information about why the // custom language model request failed. See also: Common Errors // (https://docs.aws.amazon.com/transcribe/latest/APIReference/CommonErrors.html). FailureReason *string // The Amazon S3 location of the input files used to train and tune your custom // language model, in addition to the data access role ARN (Amazon Resource Name) // that has permissions to access these data. InputDataConfig *InputDataConfig // The language code used to create your custom language model. Each language model // must contain terms in only one language, and the language you select for your // model must match the language of your training and tuning data. For a list of // supported languages and their associated language codes, refer to the Supported // languages // (https://docs.aws.amazon.com/transcribe/latest/dg/supported-languages.html) // table. Note that U.S. English (en-US) is the only language supported with Amazon // Transcribe Medical. LanguageCode CLMLanguageCode // The date and time the specified language model was last modified. Timestamps are // in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC. For example, // 2022-05-04T12:32:58.761000-07:00 represents 12:32 PM UTC-7 on May 4, 2022. LastModifiedTime *time.Time // A unique name, chosen by you, for your custom language model. This name is case // sensitive, cannot contain spaces, and must be unique within an Amazon Web // Services account. ModelName *string // The status of the specified custom language model. When the status displays as // COMPLETED the model is ready for use. ModelStatus ModelStatus // Shows if a more current base model is available for use with the specified // custom language model. If false, your language model is using the most // up-to-date base model. If true, there is a newer base model available than the // one your language model is using. Note that to update a base model, you must // recreate the custom language model using the new base model. Base model upgrades // for existing custom language models are not supported. UpgradeAvailability *bool // contains filtered or unexported fields }
Provides information about a custom language model, including the base model name, when the model was created, the location of the files used to train the model, when the model was last modified, the name you chose for the model, its language, its processing state, and if there is an upgrade available for the base model.
type LimitExceededException ¶
type LimitExceededException struct { Message *string // contains filtered or unexported fields }
You've either sent too many requests or your input file is too long. Wait before retrying your request, or use a smaller file and try your request again.
func (*LimitExceededException) Error ¶
func (e *LimitExceededException) Error() string
func (*LimitExceededException) ErrorCode ¶
func (e *LimitExceededException) ErrorCode() string
func (*LimitExceededException) ErrorFault ¶
func (e *LimitExceededException) ErrorFault() smithy.ErrorFault
func (*LimitExceededException) ErrorMessage ¶
func (e *LimitExceededException) ErrorMessage() string
type Media ¶
type Media struct { // The Amazon S3 location of the media file you want to transcribe. For example: // // * // s3://DOC-EXAMPLE-BUCKET/my-media-file.flac // // * // s3://DOC-EXAMPLE-BUCKET/media-files/my-media-file.flac // // Note that the Amazon S3 // bucket that contains your input media must be located in the same Amazon Web // Services Region where you're making your transcription request. MediaFileUri *string // The Amazon S3 location of the media file you want to redact. For example: // // * // s3://DOC-EXAMPLE-BUCKET/my-media-file.flac // // * // s3://DOC-EXAMPLE-BUCKET/media-files/my-media-file.flac // // Note that the Amazon S3 // bucket that contains your input media must be located in the same Amazon Web // Services Region where you're making your transcription request. // RedactedMediaFileUri is only supported for Call Analytics // (StartCallAnalyticsJob) transcription requests. RedactedMediaFileUri *string // contains filtered or unexported fields }
Describes the Amazon S3 location of the media file you want to use in your request.
type MediaFormat ¶
type MediaFormat string
const ( MediaFormatMp3 MediaFormat = "mp3" MediaFormatMp4 MediaFormat = "mp4" MediaFormatWav MediaFormat = "wav" MediaFormatFlac MediaFormat = "flac" MediaFormatOgg MediaFormat = "ogg" MediaFormatAmr MediaFormat = "amr" MediaFormatWebm MediaFormat = "webm" )
Enum values for MediaFormat
func (MediaFormat) Values ¶
func (MediaFormat) Values() []MediaFormat
Values returns all known values for MediaFormat. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.
type MedicalContentIdentificationType ¶
type MedicalContentIdentificationType string
const ( MedicalContentIdentificationTypePhi MedicalContentIdentificationType = "PHI" )
Enum values for MedicalContentIdentificationType
func (MedicalContentIdentificationType) Values ¶
func (MedicalContentIdentificationType) Values() []MedicalContentIdentificationType
Values returns all known values for MedicalContentIdentificationType. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.
type MedicalTranscript ¶
type MedicalTranscript struct { // The Amazon S3 location of your transcript. You can use this URI to access or // download your transcript. If you included OutputBucketName in your transcription // job request, this is the URI of that bucket. If you also included OutputKey in // your request, your output is located in the path you specified in your request. // If you didn't include OutputBucketName in your transcription job request, your // transcript is stored in a service-managed bucket, and TranscriptFileUri provides // you with a temporary URI you can use for secure access to your transcript. // Temporary URIs for service-managed Amazon S3 buckets are only valid for 15 // minutes. If you get an AccesDenied error, you can get a new temporary URI by // running a GetTranscriptionJob or ListTranscriptionJob request. TranscriptFileUri *string // contains filtered or unexported fields }
Provides you with the Amazon S3 URI you can use to access your transcript.
type MedicalTranscriptionJob ¶
type MedicalTranscriptionJob struct { // The date and time the specified medical transcription job finished processing. // Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC. For example, // 2022-05-04T12:33:13.922000-07:00 represents a transcription job that started // processing at 12:33 PM UTC-7 on May 4, 2022. CompletionTime *time.Time // Labels all personal health information (PHI) identified in your transcript. For // more information, see Identifying personal health information (PHI) in a // transcription (https://docs.aws.amazon.com/transcribe/latest/dg/phi-id.html). ContentIdentificationType MedicalContentIdentificationType // The date and time the specified medical transcription job request was made. // Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC. For example, // 2022-05-04T12:32:58.761000-07:00 represents a transcription job that started // processing at 12:32 PM UTC-7 on May 4, 2022. CreationTime *time.Time // If TranscriptionJobStatus is FAILED, FailureReason contains information about // why the transcription job request failed. The FailureReason field contains one // of the following values: // // * Unsupported media format. The media format specified // in MediaFormat isn't valid. Refer to MediaFormat for a list of supported // formats. // // * The media format provided does not match the detected media format. // The media format specified in MediaFormat doesn't match the format of the input // file. Check the media format of your media file and correct the specified // value. // // * Invalid sample rate for audio file. The sample rate specified in // MediaSampleRateHertz isn't valid. The sample rate must be between 16,000 and // 48,000 Hertz. // // * The sample rate provided does not match the detected sample // rate. The sample rate specified in MediaSampleRateHertz doesn't match the sample // rate detected in your input media file. Check the sample rate of your media file // and correct the specified value. // // * Invalid file size: file size too large. The // size of your media file is larger than what Amazon Transcribe can process. For // more information, refer to Guidelines and quotas // (https://docs.aws.amazon.com/transcribe/latest/dg/limits-guidelines.html#limits). // // * // Invalid number of channels: number of channels too large. Your audio contains // more channels than Amazon Transcribe is able to process. For more information, // refer to Guidelines and quotas // (https://docs.aws.amazon.com/transcribe/latest/dg/limits-guidelines.html#limits). FailureReason *string // The language code used to create your medical transcription job. US English // (en-US) is the only supported language for medical transcriptions. LanguageCode LanguageCode // Describes the Amazon S3 location of the media file you want to use in your // request. Media *Media // The format of the input media file. MediaFormat MediaFormat // The sample rate, in Hertz, of the audio track in your input media file. MediaSampleRateHertz *int32 // The name of the medical transcription job. Job names are case sensitive and must // be unique within an Amazon Web Services account. MedicalTranscriptionJobName *string // Specify additional optional settings in your request, including channel // identification, alternative transcriptions, and speaker labeling; allows you to // apply custom vocabularies to your medical transcription job. Settings *MedicalTranscriptionSetting // Describes the medical specialty represented in your media. Specialty Specialty // The date and time the specified medical transcription job began processing. // Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC. For example, // 2022-05-04T12:32:58.789000-07:00 represents a transcription job that started // processing at 12:32 PM UTC-7 on May 4, 2022. StartTime *time.Time // The tags, each in the form of a key:value pair, assigned to the specified // medical transcription job. Tags []Tag // Provides you with the Amazon S3 URI you can use to access your transcript. Transcript *MedicalTranscript // Provides the status of the specified medical transcription job. If the status is // COMPLETED, the job is finished and you can find the results at the location // specified in TranscriptFileUri. If the status is FAILED, FailureReason provides // details on why your transcription job failed. TranscriptionJobStatus TranscriptionJobStatus // Indicates whether the input media is a dictation or a conversation, as specified // in the StartMedicalTranscriptionJob request. Type Type // contains filtered or unexported fields }
Provides detailed information about a medical transcription job. To view the status of the specified medical transcription job, check the TranscriptionJobStatus field. If the status is COMPLETED, the job is finished and you can find the results at the location specified in TranscriptFileUri. If the status is FAILED, FailureReason provides details on why your transcription job failed.
type MedicalTranscriptionJobSummary ¶
type MedicalTranscriptionJobSummary struct { // The date and time the specified medical transcription job finished processing. // Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC. For example, // 2022-05-04T12:33:13.922000-07:00 represents a transcription job that started // processing at 12:33 PM UTC-7 on May 4, 2022. CompletionTime *time.Time // Labels all personal health information (PHI) identified in your transcript. For // more information, see Identifying personal health information (PHI) in a // transcription (https://docs.aws.amazon.com/transcribe/latest/dg/phi-id.html). ContentIdentificationType MedicalContentIdentificationType // The date and time the specified medical transcription job request was made. // Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC. For example, // 2022-05-04T12:32:58.761000-07:00 represents a transcription job that started // processing at 12:32 PM UTC-7 on May 4, 2022. CreationTime *time.Time // If TranscriptionJobStatus is FAILED, FailureReason contains information about // why the transcription job failed. See also: Common Errors // (https://docs.aws.amazon.com/transcribe/latest/APIReference/CommonErrors.html). FailureReason *string // The language code used to create your medical transcription. US English (en-US) // is the only supported language for medical transcriptions. LanguageCode LanguageCode // The name of the medical transcription job. Job names are case sensitive and must // be unique within an Amazon Web Services account. MedicalTranscriptionJobName *string // Indicates where the specified medical transcription output is stored. If the // value is CUSTOMER_BUCKET, the location is the Amazon S3 bucket you specified // using the OutputBucketName parameter in your request. If you also included // OutputKey in your request, your output is located in the path you specified in // your request. If the value is SERVICE_BUCKET, the location is a service-managed // Amazon S3 bucket. To access a transcript stored in a service-managed bucket, use // the URI shown in the TranscriptFileUri field. OutputLocationType OutputLocationType // Provides the medical specialty represented in your media. Specialty Specialty // The date and time your medical transcription job began processing. Timestamps // are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC. For example, // 2022-05-04T12:32:58.789000-07:00 represents a transcription job that started // processing at 12:32 PM UTC-7 on May 4, 2022. StartTime *time.Time // Provides the status of your medical transcription job. If the status is // COMPLETED, the job is finished and you can find the results at the location // specified in TranscriptFileUri. If the status is FAILED, FailureReason provides // details on why your transcription job failed. TranscriptionJobStatus TranscriptionJobStatus // Indicates whether the input media is a dictation or a conversation, as specified // in the StartMedicalTranscriptionJob request. Type Type // contains filtered or unexported fields }
Provides detailed information about a specific medical transcription job.
type MedicalTranscriptionSetting ¶
type MedicalTranscriptionSetting struct { // Enables channel identification in multi-channel audio. Channel identification // transcribes the audio on each channel independently, then appends the output for // each channel into one transcript. If you have multi-channel audio and do not // enable channel identification, your audio is transcribed in a continuous manner // and your transcript does not separate the speech by channel. You can't include // both ShowSpeakerLabels and ChannelIdentification in the same request. Including // both parameters returns a BadRequestException. For more information, see // Transcribing multi-channel audio // (https://docs.aws.amazon.com/transcribe/latest/dg/channel-id.html). ChannelIdentification *bool // Indicate the maximum number of alternative transcriptions you want Amazon // Transcribe Medical to include in your transcript. If you select a number greater // than the number of alternative transcriptions generated by Amazon Transcribe // Medical, only the actual number of alternative transcriptions are included. If // you include MaxAlternatives in your request, you must also include // ShowAlternatives with a value of true. For more information, see Alternative // transcriptions // (https://docs.aws.amazon.com/transcribe/latest/dg/how-alternatives.html). MaxAlternatives *int32 // Specify the maximum number of speakers you want to identify in your media. Note // that if your media contains more speakers than the specified number, multiple // speakers will be identified as a single speaker. If you specify the // MaxSpeakerLabels field, you must set the ShowSpeakerLabels field to true. MaxSpeakerLabels *int32 // To include alternative transcriptions within your transcription output, include // ShowAlternatives in your transcription request. If you include ShowAlternatives, // you must also include MaxAlternatives, which is the maximum number of // alternative transcriptions you want Amazon Transcribe Medical to generate. For // more information, see Alternative transcriptions // (https://docs.aws.amazon.com/transcribe/latest/dg/how-alternatives.html). ShowAlternatives *bool // Enables speaker identification (diarization) in your transcription output. // Speaker identification labels the speech from individual speakers in your media // file. If you enable ShowSpeakerLabels in your request, you must also include // MaxSpeakerLabels. You can't include both ShowSpeakerLabels and // ChannelIdentification in the same request. Including both parameters returns a // BadRequestException. For more information, see Identifying speakers // (diarization) // (https://docs.aws.amazon.com/transcribe/latest/dg/diarization.html). ShowSpeakerLabels *bool // The name of the custom vocabulary you want to use when processing your medical // transcription job. Vocabulary names are case sensitive. The language of the // specified vocabulary must match the language code you specify in your // transcription request. If the languages don't match, the vocabulary isn't // applied. There are no errors or warnings associated with a language mismatch. US // English (en-US) is the only valid language for Amazon Transcribe Medical. VocabularyName *string // contains filtered or unexported fields }
Allows additional optional settings in your request, including channel identification, alternative transcriptions, and speaker labeling; allows you to apply custom vocabularies to your medical transcription job.
type ModelSettings ¶
type ModelSettings struct { // The name of the custom language model you want to use when processing your // transcription job. Note that language model names are case sensitive. The // language of the specified language model must match the language code you // specify in your transcription request. If the languages don't match, the // language model isn't applied. There are no errors or warnings associated with a // language mismatch. LanguageModelName *string // contains filtered or unexported fields }
Provides the name of the custom language model that was included in the specified transcription job. Only use ModelSettings with the LanguageModelName sub-parameter if you're not using automatic language identification (). If using LanguageIdSettings in your request, this parameter contains a LanguageModelName sub-parameter.
type ModelStatus ¶
type ModelStatus string
const ( ModelStatusInProgress ModelStatus = "IN_PROGRESS" ModelStatusFailed ModelStatus = "FAILED" ModelStatusCompleted ModelStatus = "COMPLETED" )
Enum values for ModelStatus
func (ModelStatus) Values ¶
func (ModelStatus) Values() []ModelStatus
Values returns all known values for ModelStatus. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.
type NonTalkTimeFilter ¶
type NonTalkTimeFilter struct { // Allows you to specify a time range (in milliseconds) in your audio, during which // you want to search for a period of silence. See for more detail. AbsoluteTimeRange *AbsoluteTimeRange // Set to TRUE to flag periods of speech. Set to FALSE to flag periods of silence Negate *bool // Allows you to specify a time range (in percentage) in your media file, during // which you want to search for a period of silence. See for more detail. RelativeTimeRange *RelativeTimeRange // Specify the duration, in milliseconds, of the period of silence you want to // flag. For example, you can flag a silent period that lasts 30000 milliseconds. Threshold *int64 // contains filtered or unexported fields }
Flag the presence or absence of periods of silence in your Call Analytics transcription output. Rules using NonTalkTimeFilter are designed to match:
* The presence of silence at specified periods throughout the call
* The presence of speech at specified periods throughout the call
See Rule criteria (https://docs.aws.amazon.com/transcribe/latest/dg/call-analytics-create-categories.html#call-analytics-create-categories-rules) for usage examples.
type NotFoundException ¶
type NotFoundException struct { Message *string // contains filtered or unexported fields }
We can't find the requested resource. Check that the specified name is correct and try your request again.
func (*NotFoundException) Error ¶
func (e *NotFoundException) Error() string
func (*NotFoundException) ErrorCode ¶
func (e *NotFoundException) ErrorCode() string
func (*NotFoundException) ErrorFault ¶
func (e *NotFoundException) ErrorFault() smithy.ErrorFault
func (*NotFoundException) ErrorMessage ¶
func (e *NotFoundException) ErrorMessage() string
type OutputLocationType ¶
type OutputLocationType string
const ( OutputLocationTypeCustomerBucket OutputLocationType = "CUSTOMER_BUCKET" OutputLocationTypeServiceBucket OutputLocationType = "SERVICE_BUCKET" )
Enum values for OutputLocationType
func (OutputLocationType) Values ¶
func (OutputLocationType) Values() []OutputLocationType
Values returns all known values for OutputLocationType. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.
type ParticipantRole ¶
type ParticipantRole string
const ( ParticipantRoleAgent ParticipantRole = "AGENT" ParticipantRoleCustomer ParticipantRole = "CUSTOMER" )
Enum values for ParticipantRole
func (ParticipantRole) Values ¶
func (ParticipantRole) Values() []ParticipantRole
Values returns all known values for ParticipantRole. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.
type PiiEntityType ¶
type PiiEntityType string
const ( PiiEntityTypeBankAccountNumber PiiEntityType = "BANK_ACCOUNT_NUMBER" PiiEntityTypeBankRouting PiiEntityType = "BANK_ROUTING" PiiEntityTypeCreditDebitNumber PiiEntityType = "CREDIT_DEBIT_NUMBER" PiiEntityTypeCreditDebitCvv PiiEntityType = "CREDIT_DEBIT_CVV" PiiEntityTypeCreditDebitExpiry PiiEntityType = "CREDIT_DEBIT_EXPIRY" PiiEntityTypePin PiiEntityType = "PIN" PiiEntityTypeEmail PiiEntityType = "EMAIL" PiiEntityTypeAddress PiiEntityType = "ADDRESS" PiiEntityTypeName PiiEntityType = "NAME" PiiEntityTypePhone PiiEntityType = "PHONE" PiiEntityTypeSsn PiiEntityType = "SSN" PiiEntityTypeAll PiiEntityType = "ALL" )
Enum values for PiiEntityType
func (PiiEntityType) Values ¶
func (PiiEntityType) Values() []PiiEntityType
Values returns all known values for PiiEntityType. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.
type RedactionOutput ¶
type RedactionOutput string
const ( RedactionOutputRedacted RedactionOutput = "redacted" RedactionOutputRedactedAndUnredacted RedactionOutput = "redacted_and_unredacted" )
Enum values for RedactionOutput
func (RedactionOutput) Values ¶
func (RedactionOutput) Values() []RedactionOutput
Values returns all known values for RedactionOutput. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.
type RedactionType ¶
type RedactionType string
const ( RedactionTypePii RedactionType = "PII" )
Enum values for RedactionType
func (RedactionType) Values ¶
func (RedactionType) Values() []RedactionType
Values returns all known values for RedactionType. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.
type RelativeTimeRange ¶
type RelativeTimeRange struct { // The time, in percentage, when Amazon Transcribe stops searching for the // specified criteria in your media file. If you include EndPercentage in your // request, you must also include StartPercentage. EndPercentage *int32 // The time, in percentage, from the start of your media file until the value you // specify in which Amazon Transcribe searches for your specified criteria. First *int32 // The time, in percentage, from the value you specify until the end of your media // file in which Amazon Transcribe searches for your specified criteria. Last *int32 // The time, in percentage, when Amazon Transcribe starts searching for the // specified criteria in your media file. If you include StartPercentage in your // request, you must also include EndPercentage. StartPercentage *int32 // contains filtered or unexported fields }
A time range, in percentage, between two points in your media file. You can use StartPercentage and EndPercentage to search a custom segment. For example, setting StartPercentage to 10 and EndPercentage to 50 only searches for your specified criteria in the audio contained between the 10 percent mark and the 50 percent mark of your media file. You can use also First to search from the start of the media file until the time you specify, or Last to search from the time you specify until the end of the media file. For example, setting First to 10 only searches for your specified criteria in the audio contained in the first 10 percent of the media file. If you prefer to use milliseconds instead of percentage, see .
type Rule ¶
type Rule interface {
// contains filtered or unexported methods
}
A rule is a set of criteria you can specify to flag an attribute in your Call Analytics output. Rules define a Call Analytics category. Rules can include these parameters: , , , and . To learn more about these parameters, refer to Rule criteria (https://docs.aws.amazon.com/transcribe/latest/dg/call-analytics-create-categories.html#call-analytics-create-categories-rules). To learn more about Call Analytics categories, see Creating categories (https://docs.aws.amazon.com/transcribe/latest/dg/call-analytics-create-categories.html). To learn more about Call Analytics, see Analyzing call center audio with Call Analytics (https://docs.aws.amazon.com/transcribe/latest/dg/call-analytics.html).
The following types satisfy this interface:
RuleMemberInterruptionFilter RuleMemberNonTalkTimeFilter RuleMemberSentimentFilter RuleMemberTranscriptFilter
Example (OutputUsage)¶
Code:play
// Code generated by smithy-go-codegen DO NOT EDIT. package main import ( "fmt" "github.com/aws/aws-sdk-go-v2/service/transcribe/types" ) func main() { var union types.Rule // type switches can be used to check the union value switch v := union.(type) { case *types.RuleMemberInterruptionFilter: _ = v.Value // Value is types.InterruptionFilter case *types.RuleMemberNonTalkTimeFilter: _ = v.Value // Value is types.NonTalkTimeFilter case *types.RuleMemberSentimentFilter: _ = v.Value // Value is types.SentimentFilter case *types.RuleMemberTranscriptFilter: _ = v.Value // Value is types.TranscriptFilter case *types.UnknownUnionMember: fmt.Println("unknown tag:", v.Tag) default: fmt.Println("union is nil or unknown type") } } var _ *types.NonTalkTimeFilter var _ *types.SentimentFilter var _ *types.TranscriptFilter var _ *types.InterruptionFilter
type RuleMemberInterruptionFilter ¶
type RuleMemberInterruptionFilter struct { Value InterruptionFilter // contains filtered or unexported fields }
Flag the presence or absence of interruptions in your Call Analytics transcription output. Refer to for more detail.
type RuleMemberNonTalkTimeFilter ¶
type RuleMemberNonTalkTimeFilter struct { Value NonTalkTimeFilter // contains filtered or unexported fields }
Flag the presence or absence of periods of silence in your Call Analytics transcription output. Refer to for more detail.
type RuleMemberSentimentFilter ¶
type RuleMemberSentimentFilter struct { Value SentimentFilter // contains filtered or unexported fields }
Flag the presence or absence of specific sentiments in your Call Analytics transcription output. Refer to for more detail.
type RuleMemberTranscriptFilter ¶
type RuleMemberTranscriptFilter struct { Value TranscriptFilter // contains filtered or unexported fields }
Flag the presence or absence of specific words or phrases in your Call Analytics transcription output. Refer to for more detail.
type SentimentFilter ¶
type SentimentFilter struct { // Specify the sentiments you want to flag. // // This member is required. Sentiments []SentimentValue // Allows you to specify a time range (in milliseconds) in your audio, during which // you want to search for the specified sentiments. See for more detail. AbsoluteTimeRange *AbsoluteTimeRange // Set to TRUE to flag the sentiments you didn't include in your request. Set to // FALSE to flag the sentiments you specified in your request. Negate *bool // Specify the participant you want to flag. Omitting this parameter is equivalent // to specifying both participants. ParticipantRole ParticipantRole // Allows you to specify a time range (in percentage) in your media file, during // which you want to search for the specified sentiments. See for more detail. RelativeTimeRange *RelativeTimeRange // contains filtered or unexported fields }
Flag the presence or absence of specific sentiments detected in your Call Analytics transcription output. Rules using SentimentFilter are designed to match:
* The presence or absence of a positive sentiment felt by the customer, agent, or both at specified points in the call
* The presence or absence of a negative sentiment felt by the customer, agent, or both at specified points in the call
* The presence or absence of a neutral sentiment felt by the customer, agent, or both at specified points in the call
* The presence or absence of a mixed sentiment felt by the customer, the agent, or both at specified points in the call
See Rule criteria (https://docs.aws.amazon.com/transcribe/latest/dg/call-analytics-create-categories.html#call-analytics-create-categories-rules) for examples.
type SentimentValue ¶
type SentimentValue string
const ( SentimentValuePositive SentimentValue = "POSITIVE" SentimentValueNegative SentimentValue = "NEGATIVE" SentimentValueNeutral SentimentValue = "NEUTRAL" SentimentValueMixed SentimentValue = "MIXED" )
Enum values for SentimentValue
func (SentimentValue) Values ¶
func (SentimentValue) Values() []SentimentValue
Values returns all known values for SentimentValue. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.
type Settings ¶
type Settings struct { // Enables channel identification in multi-channel audio. Channel identification // transcribes the audio on each channel independently, then appends the output for // each channel into one transcript. You can't include both ShowSpeakerLabels and // ChannelIdentification in the same request. Including both parameters returns a // BadRequestException. For more information, see Transcribing multi-channel audio // (https://docs.aws.amazon.com/transcribe/latest/dg/channel-id.html). ChannelIdentification *bool // Indicate the maximum number of alternative transcriptions you want Amazon // Transcribe to include in your transcript. If you select a number greater than // the number of alternative transcriptions generated by Amazon Transcribe, only // the actual number of alternative transcriptions are included. If you include // MaxAlternatives in your request, you must also include ShowAlternatives with a // value of true. For more information, see Alternative transcriptions // (https://docs.aws.amazon.com/transcribe/latest/dg/how-alternatives.html). MaxAlternatives *int32 // Specify the maximum number of speakers you want to identify in your media. Note // that if your media contains more speakers than the specified number, multiple // speakers will be identified as a single speaker. If you specify the // MaxSpeakerLabels field, you must set the ShowSpeakerLabels field to true. MaxSpeakerLabels *int32 // To include alternative transcriptions within your transcription output, include // ShowAlternatives in your transcription request. If you have multi-channel audio // and do not enable channel identification, your audio is transcribed in a // continuous manner and your transcript does not separate the speech by channel. // If you include ShowAlternatives, you must also include MaxAlternatives, which is // the maximum number of alternative transcriptions you want Amazon Transcribe to // generate. For more information, see Alternative transcriptions // (https://docs.aws.amazon.com/transcribe/latest/dg/how-alternatives.html). ShowAlternatives *bool // Enables speaker identification (diarization) in your transcription output. // Speaker identification labels the speech from individual speakers in your media // file. If you enable ShowSpeakerLabels in your request, you must also include // MaxSpeakerLabels. You can't include both ShowSpeakerLabels and // ChannelIdentification in the same request. Including both parameters returns a // BadRequestException. For more information, see Identifying speakers // (diarization) // (https://docs.aws.amazon.com/transcribe/latest/dg/diarization.html). ShowSpeakerLabels *bool // Specify how you want your vocabulary filter applied to your transcript. To // replace words with ***, choose mask. To delete words, choose remove. To flag // words without changing them, choose tag. VocabularyFilterMethod VocabularyFilterMethod // The name of the custom vocabulary filter you want to use in your transcription // job request. This name is case sensitive, cannot contain spaces, and must be // unique within an Amazon Web Services account. Note that if you include // VocabularyFilterName in your request, you must also include // VocabularyFilterMethod. VocabularyFilterName *string // The name of the custom vocabulary you want to use in your transcription job // request. This name is case sensitive, cannot contain spaces, and must be unique // within an Amazon Web Services account. VocabularyName *string // contains filtered or unexported fields }
Allows additional optional settings in your request, including channel identification, alternative transcriptions, and speaker labeling; allows you to apply custom vocabularies to your transcription job.
type Specialty ¶
type Specialty string
const ( SpecialtyPrimarycare Specialty = "PRIMARYCARE" )
Enum values for Specialty
func (Specialty) Values ¶
Values returns all known values for Specialty. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.
type SubtitleFormat ¶
type SubtitleFormat string
const ( SubtitleFormatVtt SubtitleFormat = "vtt" SubtitleFormatSrt SubtitleFormat = "srt" )
Enum values for SubtitleFormat
func (SubtitleFormat) Values ¶
func (SubtitleFormat) Values() []SubtitleFormat
Values returns all known values for SubtitleFormat. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.
type Subtitles ¶
type Subtitles struct { // Specify the output format for your subtitle file; if you select both WebVTT // (vtt) and SubRip (srt) formats, two output files are generated. Formats []SubtitleFormat // Specify the starting value that is assigned to the first subtitle segment. The // default start index for Amazon Transcribe is 0, which differs from the more // widely used standard of 1. If you're uncertain which value to use, we recommend // choosing 1, as this may improve compatibility with other services. OutputStartIndex *int32 // contains filtered or unexported fields }
Generate subtitles for your media file with your transcription request. You can choose a start index of 0 or 1, and you can specify either WebVTT or SubRip (or both) as your output format. Note that your subtitle files are placed in the same location as your transcription output.
type SubtitlesOutput ¶
type SubtitlesOutput struct { // Provides the format of your subtitle files. If your request included both WebVTT // (vtt) and SubRip (srt) formats, both formats are shown. Formats []SubtitleFormat // Provides the start index value for your subtitle files. If you did not specify a // value in your request, the default value of 0 is used. OutputStartIndex *int32 // The Amazon S3 location of your transcript. You can use this URI to access or // download your subtitle file. Your subtitle file is stored in the same location // as your transcript. If you specified both WebVTT and SubRip subtitle formats, // two URIs are provided. If you included OutputBucketName in your transcription // job request, this is the URI of that bucket. If you also included OutputKey in // your request, your output is located in the path you specified in your request. // If you didn't include OutputBucketName in your transcription job request, your // subtitle file is stored in a service-managed bucket, and TranscriptFileUri // provides you with a temporary URI you can use for secure access to your subtitle // file. Temporary URIs for service-managed Amazon S3 buckets are only valid for 15 // minutes. If you get an AccesDenied error, you can get a new temporary URI by // running a GetTranscriptionJob or ListTranscriptionJob request. SubtitleFileUris []string // contains filtered or unexported fields }
Provides information about your subtitle file, including format, start index, and Amazon S3 location.
type Tag ¶
type Tag struct { // The first part of a key:value pair that forms a tag associated with a given // resource. For example, in the tag Department:Sales, the key is 'Department'. // // This member is required. Key *string // The second part of a key:value pair that forms a tag associated with a given // resource. For example, in the tag Department:Sales, the value is 'Sales'. Note // that you can set the value of a tag to an empty string, but you can't set the // value of a tag to null. Omitting the tag value is the same as using an empty // string. // // This member is required. Value *string // contains filtered or unexported fields }
Adds metadata, in the form of a key:value pair, to the specified resource. For example, you could add the tag Department:Sales to a resource to indicate that it pertains to your organization's sales department. You can also use tags for tag-based access control. To learn more about tagging, see Tagging resources (https://docs.aws.amazon.com/transcribe/latest/dg/tagging.html).
type Transcript ¶
type Transcript struct { // The Amazon S3 location of your redacted transcript. You can use this URI to // access or download your transcript. If you included OutputBucketName in your // transcription job request, this is the URI of that bucket. If you also included // OutputKey in your request, your output is located in the path you specified in // your request. If you didn't include OutputBucketName in your transcription job // request, your transcript is stored in a service-managed bucket, and // RedactedTranscriptFileUri provides you with a temporary URI you can use for // secure access to your transcript. Temporary URIs for service-managed Amazon S3 // buckets are only valid for 15 minutes. If you get an AccesDenied error, you can // get a new temporary URI by running a GetTranscriptionJob or ListTranscriptionJob // request. RedactedTranscriptFileUri *string // The Amazon S3 location of your transcript. You can use this URI to access or // download your transcript. If you included OutputBucketName in your transcription // job request, this is the URI of that bucket. If you also included OutputKey in // your request, your output is located in the path you specified in your request. // If you didn't include OutputBucketName in your transcription job request, your // transcript is stored in a service-managed bucket, and TranscriptFileUri provides // you with a temporary URI you can use for secure access to your transcript. // Temporary URIs for service-managed Amazon S3 buckets are only valid for 15 // minutes. If you get an AccesDenied error, you can get a new temporary URI by // running a GetTranscriptionJob or ListTranscriptionJob request. TranscriptFileUri *string // contains filtered or unexported fields }
Provides you with the Amazon S3 URI you can use to access your transcript.
type TranscriptFilter ¶
type TranscriptFilter struct { // Specify the phrases you want to flag. // // This member is required. Targets []string // Flag the presence or absence of an exact match to the phrases you specify. For // example, if you specify the phrase "speak to a manager" as your Targets value, // only that exact phrase is flagged. Note that semantic matching is not supported. // For example, if your customer says "speak to the manager", instead of "speak to // a manager", your content is not flagged. // // This member is required. TranscriptFilterType TranscriptFilterType // Allows you to specify a time range (in milliseconds) in your audio, during which // you want to search for the specified key words or phrases. See for more detail. AbsoluteTimeRange *AbsoluteTimeRange // Set to TRUE to flag the absence of the phrase you specified in your request. Set // to FALSE to flag the presence of the phrase you specified in your request. Negate *bool // Specify the participant you want to flag. Omitting this parameter is equivalent // to specifying both participants. ParticipantRole ParticipantRole // Allows you to specify a time range (in percentage) in your media file, during // which you want to search for the specified key words or phrases. See for more // detail. RelativeTimeRange *RelativeTimeRange // contains filtered or unexported fields }
Flag the presence or absence of specific words or phrases detected in your Call Analytics transcription output. Rules using TranscriptFilter are designed to match:
* Custom words or phrases spoken by the agent, the customer, or both
* Custom words or phrases not spoken by the agent, the customer, or either
* Custom words or phrases that occur at a specific time frame
See Rule criteria (https://docs.aws.amazon.com/transcribe/latest/dg/call-analytics-create-categories.html#call-analytics-create-categories-rules) for examples.
type TranscriptFilterType ¶
type TranscriptFilterType string
const ( TranscriptFilterTypeExact TranscriptFilterType = "EXACT" )
Enum values for TranscriptFilterType
func (TranscriptFilterType) Values ¶
func (TranscriptFilterType) Values() []TranscriptFilterType
Values returns all known values for TranscriptFilterType. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.
type TranscriptionJob ¶
type TranscriptionJob struct { // The date and time the specified transcription job finished processing. // Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC. For example, // 2022-05-04T12:33:13.922000-07:00 represents a transcription job that started // processing at 12:33 PM UTC-7 on May 4, 2022. CompletionTime *time.Time // Redacts or flags specified personally identifiable information (PII) in your // transcript. ContentRedaction *ContentRedaction // The date and time the specified transcription job request was made. Timestamps // are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC. For example, // 2022-05-04T12:32:58.761000-07:00 represents a transcription job that started // processing at 12:32 PM UTC-7 on May 4, 2022. CreationTime *time.Time // If TranscriptionJobStatus is FAILED, FailureReason contains information about // why the transcription job request failed. The FailureReason field contains one // of the following values: // // * Unsupported media format. The media format specified // in MediaFormat isn't valid. Refer to MediaFormat for a list of supported // formats. // // * The media format provided does not match the detected media format. // The media format specified in MediaFormat doesn't match the format of the input // file. Check the media format of your media file and correct the specified // value. // // * Invalid sample rate for audio file. The sample rate specified in // MediaSampleRateHertz isn't valid. The sample rate must be between 8,000 and // 48,000 Hertz. // // * The sample rate provided does not match the detected sample // rate. The sample rate specified in MediaSampleRateHertz doesn't match the sample // rate detected in your input media file. Check the sample rate of your media file // and correct the specified value. // // * Invalid file size: file size too large. The // size of your media file is larger than what Amazon Transcribe can process. For // more information, refer to Guidelines and quotas // (https://docs.aws.amazon.com/transcribe/latest/dg/limits-guidelines.html#limits). // // * // Invalid number of channels: number of channels too large. Your audio contains // more channels than Amazon Transcribe is able to process. For more information, // refer to Guidelines and quotas // (https://docs.aws.amazon.com/transcribe/latest/dg/limits-guidelines.html#limits). FailureReason *string // The confidence score associated with the language identified in your media file. // Confidence scores are values between 0 and 1; a larger value indicates a higher // probability that the identified language correctly matches the language spoken // in your media. IdentifiedLanguageScore *float32 // Indicates whether automatic language identification was enabled (TRUE) for the // specified transcription job. IdentifyLanguage *bool // Indicates whether automatic multi-language identification was enabled (TRUE) for // the specified transcription job. IdentifyMultipleLanguages *bool // Provides information about how your transcription job is being processed. This // parameter shows if your request is queued and what data access role is being // used. JobExecutionSettings *JobExecutionSettings // The language code used to create your transcription job. For a list of supported // languages and their associated language codes, refer to the Supported languages // (https://docs.aws.amazon.com/transcribe/latest/dg/supported-languages.html) // table. Note that you must include one of LanguageCode, IdentifyLanguage, or // IdentifyMultipleLanguages in your request. If you include more than one of these // parameters, your transcription job fails. LanguageCode LanguageCode // The language codes used to create your transcription job. This parameter is used // with multi-language identification. For single-language identification requests, // refer to the singular version of this parameter, LanguageCode. For a list of // supported languages and their associated language codes, refer to the Supported // languages // (https://docs.aws.amazon.com/transcribe/latest/dg/supported-languages.html) // table. LanguageCodes []LanguageCodeItem // If using automatic language identification (IdentifyLanguage) in your request // and you want to apply a custom language model, a custom vocabulary, or a custom // vocabulary filter, include LanguageIdSettings with the relevant sub-parameters // (VocabularyName, LanguageModelName, and VocabularyFilterName). You can specify // two or more language codes that represent the languages you think may be present // in your media; including more than five is not recommended. Each language code // you include can have an associated custom language model, custom vocabulary, and // custom vocabulary filter. The languages you specify must match the languages of // the specified custom language models, custom vocabularies, and custom vocabulary // filters. To include language options using IdentifyLanguage without including a // custom language model, a custom vocabulary, or a custom vocabulary filter, use // LanguageOptions instead of LanguageIdSettings. Including language options can // improve the accuracy of automatic language identification. If you want to // include a custom language model with your request but do not want to use // automatic language identification, use instead the parameter with the // LanguageModelName sub-parameter. If you want to include a custom vocabulary or a // custom vocabulary filter (or both) with your request but do not want to use // automatic language identification, use instead the parameter with the // VocabularyName or VocabularyFilterName (or both) sub-parameter. LanguageIdSettings map[string]LanguageIdSettings // You can specify two or more language codes that represent the languages you // think may be present in your media; including more than five is not recommended. // If you're unsure what languages are present, do not include this parameter. If // you include LanguageOptions in your request, you must also include // IdentifyLanguage. For more information, refer to Supported languages // (https://docs.aws.amazon.com/transcribe/latest/dg/supported-languages.html). To // transcribe speech in Modern Standard Arabic (ar-SA), your media file must be // encoded at a sample rate of 16,000 Hz or higher. LanguageOptions []LanguageCode // Describes the Amazon S3 location of the media file you want to use in your // request. Media *Media // The format of the input media file. MediaFormat MediaFormat // The sample rate, in Hertz, of the audio track in your input media file. MediaSampleRateHertz *int32 // The custom language model you want to include with your transcription job. If // you include ModelSettings in your request, you must include the // LanguageModelName sub-parameter. ModelSettings *ModelSettings // Specify additional optional settings in your request, including channel // identification, alternative transcriptions, speaker labeling; allows you to // apply custom vocabularies and vocabulary filters. If you want to include a // custom vocabulary or a custom vocabulary filter (or both) with your request but // do not want to use automatic language identification, use Settings with the // VocabularyName or VocabularyFilterName (or both) sub-parameter. If you're using // automatic language identification with your request and want to include a custom // language model, a custom vocabulary, or a custom vocabulary filter, do not use // the Settings parameter; use instead the parameter with the LanguageModelName, // VocabularyName or VocabularyFilterName sub-parameters. Settings *Settings // The date and time the specified transcription job began processing. Timestamps // are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC. For example, // 2022-05-04T12:32:58.789000-07:00 represents a transcription job that started // processing at 12:32 PM UTC-7 on May 4, 2022. StartTime *time.Time // Generate subtitles for your media file with your transcription request. Subtitles *SubtitlesOutput // Adds one or more custom tags, each in the form of a key:value pair, to a new // transcription job at the time you start this new job. To learn more about using // tags with Amazon Transcribe, refer to Tagging resources // (https://docs.aws.amazon.com/transcribe/latest/dg/tagging.html). Tags []Tag // Provides you with the Amazon S3 URI you can use to access your transcript. Transcript *Transcript // The name of the transcription job. Job names are case sensitive and must be // unique within an Amazon Web Services account. TranscriptionJobName *string // Provides the status of the specified transcription job. If the status is // COMPLETED, the job is finished and you can find the results at the location // specified in TranscriptFileUri (or RedactedTranscriptFileUri, if you requested // transcript redaction). If the status is FAILED, FailureReason provides details // on why your transcription job failed. TranscriptionJobStatus TranscriptionJobStatus // contains filtered or unexported fields }
Provides detailed information about a transcription job. To view the status of the specified transcription job, check the TranscriptionJobStatus field. If the status is COMPLETED, the job is finished and you can find the results at the location specified in TranscriptFileUri. If the status is FAILED, FailureReason provides details on why your transcription job failed. If you enabled content redaction, the redacted transcript can be found at the location specified in RedactedTranscriptFileUri.
type TranscriptionJobStatus ¶
type TranscriptionJobStatus string
const ( TranscriptionJobStatusQueued TranscriptionJobStatus = "QUEUED" TranscriptionJobStatusInProgress TranscriptionJobStatus = "IN_PROGRESS" TranscriptionJobStatusFailed TranscriptionJobStatus = "FAILED" TranscriptionJobStatusCompleted TranscriptionJobStatus = "COMPLETED" )
Enum values for TranscriptionJobStatus
func (TranscriptionJobStatus) Values ¶
func (TranscriptionJobStatus) Values() []TranscriptionJobStatus
Values returns all known values for TranscriptionJobStatus. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.
type TranscriptionJobSummary ¶
type TranscriptionJobSummary struct { // The date and time the specified transcription job finished processing. // Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC. For example, // 2022-05-04T12:33:13.922000-07:00 represents a transcription job that started // processing at 12:33 PM UTC-7 on May 4, 2022. CompletionTime *time.Time // The content redaction settings of the transcription job. ContentRedaction *ContentRedaction // The date and time the specified transcription job request was made. Timestamps // are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC. For example, // 2022-05-04T12:32:58.761000-07:00 represents a transcription job that started // processing at 12:32 PM UTC-7 on May 4, 2022. CreationTime *time.Time // If TranscriptionJobStatus is FAILED, FailureReason contains information about // why the transcription job failed. See also: Common Errors // (https://docs.aws.amazon.com/transcribe/latest/APIReference/CommonErrors.html). FailureReason *string // The confidence score associated with the language identified in your media file. // Confidence scores are values between 0 and 1; a larger value indicates a higher // probability that the identified language correctly matches the language spoken // in your media. IdentifiedLanguageScore *float32 // Indicates whether automatic language identification was enabled (TRUE) for the // specified transcription job. IdentifyLanguage *bool // Indicates whether automatic multi-language identification was enabled (TRUE) for // the specified transcription job. IdentifyMultipleLanguages *bool // The language code used to create your transcription. LanguageCode LanguageCode // The language codes used to create your transcription job. This parameter is used // with multi-language identification. For single-language identification, the // singular version of this parameter, LanguageCode, is present. LanguageCodes []LanguageCodeItem // Provides the name of the custom language model that was included in the // specified transcription job. Only use ModelSettings with the LanguageModelName // sub-parameter if you're not using automatic language identification (). If using // LanguageIdSettings in your request, this parameter contains a LanguageModelName // sub-parameter. ModelSettings *ModelSettings // Indicates where the specified transcription output is stored. If the value is // CUSTOMER_BUCKET, the location is the Amazon S3 bucket you specified using the // OutputBucketName parameter in your request. If you also included OutputKey in // your request, your output is located in the path you specified in your request. // If the value is SERVICE_BUCKET, the location is a service-managed Amazon S3 // bucket. To access a transcript stored in a service-managed bucket, use the URI // shown in the TranscriptFileUri or RedactedTranscriptFileUri field. OutputLocationType OutputLocationType // The date and time your transcription job began processing. Timestamps are in the // format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC. For example, // 2022-05-04T12:32:58.789000-07:00 represents a transcription job that started // processing at 12:32 PM UTC-7 on May 4, 2022. StartTime *time.Time // The name of the transcription job. Job names are case sensitive and must be // unique within an Amazon Web Services account. TranscriptionJobName *string // Provides the status of your transcription job. If the status is COMPLETED, the // job is finished and you can find the results at the location specified in // TranscriptFileUri (or RedactedTranscriptFileUri, if you requested transcript // redaction). If the status is FAILED, FailureReason provides details on why your // transcription job failed. TranscriptionJobStatus TranscriptionJobStatus // contains filtered or unexported fields }
Provides detailed information about a specific transcription job.
type Type ¶
type Type string
Enum values for Type
func (Type) Values ¶
Values returns all known values for Type. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.
type UnknownUnionMember ¶
type UnknownUnionMember struct { Tag string Value []byte // contains filtered or unexported fields }
UnknownUnionMember is returned when a union member is returned over the wire, but has an unknown tag.
type VocabularyFilterInfo ¶
type VocabularyFilterInfo struct { // The language code that represents the language of the entries in your vocabulary // filter. Each vocabulary filter must contain terms in only one language. A // vocabulary filter can only be used to transcribe files in the same language as // the filter. For example, if you create a vocabulary filter using US English // (en-US), you can only apply this filter to files that contain English audio. For // a list of supported languages and their associated language codes, refer to the // Supported languages // (https://docs.aws.amazon.com/transcribe/latest/dg/supported-languages.html) // table. LanguageCode LanguageCode // The date and time the specified vocabulary filter was last modified. Timestamps // are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC. For example, // 2022-05-04T12:32:58.761000-07:00 represents 12:32 PM UTC-7 on May 4, 2022. LastModifiedTime *time.Time // A unique name, chosen by you, for your custom vocabulary filter. This name is // case sensitive, cannot contain spaces, and must be unique within an Amazon Web // Services account. VocabularyFilterName *string // contains filtered or unexported fields }
Provides information about a vocabulary filter, including the language of the filter, when it was last modified, and its name.
type VocabularyFilterMethod ¶
type VocabularyFilterMethod string
const ( VocabularyFilterMethodRemove VocabularyFilterMethod = "remove" VocabularyFilterMethodMask VocabularyFilterMethod = "mask" VocabularyFilterMethodTag VocabularyFilterMethod = "tag" )
Enum values for VocabularyFilterMethod
func (VocabularyFilterMethod) Values ¶
func (VocabularyFilterMethod) Values() []VocabularyFilterMethod
Values returns all known values for VocabularyFilterMethod. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.
type VocabularyInfo ¶
type VocabularyInfo struct { // The language code used to create your custom vocabulary. Each vocabulary must // contain terms in only one language. A custom vocabulary can only be used to // transcribe files in the same language as the vocabulary. For example, if you // create a vocabulary using US English (en-US), you can only apply this vocabulary // to files that contain English audio. LanguageCode LanguageCode // The date and time the specified vocabulary was last modified. Timestamps are in // the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC. For example, // 2022-05-04T12:32:58.761000-07:00 represents 12:32 PM UTC-7 on May 4, 2022. LastModifiedTime *time.Time // A unique name, chosen by you, for your custom vocabulary. This name is case // sensitive, cannot contain spaces, and must be unique within an Amazon Web // Services account. VocabularyName *string // The processing state of your custom vocabulary. If the state is READY, you can // use the vocabulary in a StartTranscriptionJob request. VocabularyState VocabularyState // contains filtered or unexported fields }
Provides information about a custom vocabulary, including the language of the vocabulary, when it was last modified, its name, and the processing state.
type VocabularyState ¶
type VocabularyState string
const ( VocabularyStatePending VocabularyState = "PENDING" VocabularyStateReady VocabularyState = "READY" VocabularyStateFailed VocabularyState = "FAILED" )
Enum values for VocabularyState
func (VocabularyState) Values ¶
func (VocabularyState) Values() []VocabularyState
Values returns all known values for VocabularyState. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.
Source Files ¶
- Version
- v1.21.4
- Published
- Aug 11, 2022
- Platform
- darwin/amd64
- Imports
- 4 packages
- Last checked
- 25 minutes ago –
Tools for package owners.