package types
import "github.com/aws/aws-sdk-go-v2/service/transcribe/types"
Index ¶
- type AbsoluteTimeRange
- type BadRequestException
- func (e *BadRequestException) Error() string
- func (e *BadRequestException) ErrorCode() string
- func (e *BadRequestException) ErrorFault() smithy.ErrorFault
- func (e *BadRequestException) ErrorMessage() string
- type BaseModelName
- type CLMLanguageCode
- type CallAnalyticsJob
- type CallAnalyticsJobSettings
- type CallAnalyticsJobStatus
- type CallAnalyticsJobSummary
- type CategoryProperties
- type ChannelDefinition
- type ConflictException
- func (e *ConflictException) Error() string
- func (e *ConflictException) ErrorCode() string
- func (e *ConflictException) ErrorFault() smithy.ErrorFault
- func (e *ConflictException) ErrorMessage() string
- type ContentRedaction
- type InputDataConfig
- type InternalFailureException
- func (e *InternalFailureException) Error() string
- func (e *InternalFailureException) ErrorCode() string
- func (e *InternalFailureException) ErrorFault() smithy.ErrorFault
- func (e *InternalFailureException) ErrorMessage() string
- type InterruptionFilter
- type JobExecutionSettings
- type LanguageCode
- type LanguageIdSettings
- type LanguageModel
- type LimitExceededException
- func (e *LimitExceededException) Error() string
- func (e *LimitExceededException) ErrorCode() string
- func (e *LimitExceededException) ErrorFault() smithy.ErrorFault
- func (e *LimitExceededException) ErrorMessage() string
- type Media
- type MediaFormat
- type MedicalContentIdentificationType
- type MedicalTranscript
- type MedicalTranscriptionJob
- type MedicalTranscriptionJobSummary
- type MedicalTranscriptionSetting
- type ModelSettings
- type ModelStatus
- type NonTalkTimeFilter
- type NotFoundException
- func (e *NotFoundException) Error() string
- func (e *NotFoundException) ErrorCode() string
- func (e *NotFoundException) ErrorFault() smithy.ErrorFault
- func (e *NotFoundException) ErrorMessage() string
- type OutputLocationType
- type ParticipantRole
- type RedactionOutput
- type RedactionType
- type RelativeTimeRange
- type Rule
- type RuleMemberInterruptionFilter
- type RuleMemberNonTalkTimeFilter
- type RuleMemberSentimentFilter
- type RuleMemberTranscriptFilter
- type SentimentFilter
- type SentimentValue
- type Settings
- type Specialty
- type SubtitleFormat
- type Subtitles
- type SubtitlesOutput
- type Tag
- type Transcript
- type TranscriptFilter
- type TranscriptFilterType
- type TranscriptionJob
- type TranscriptionJobStatus
- type TranscriptionJobSummary
- type Type
- type UnknownUnionMember
- type VocabularyFilterInfo
- type VocabularyFilterMethod
- type VocabularyInfo
- type VocabularyState
Examples ¶
Types ¶
type AbsoluteTimeRange ¶
type AbsoluteTimeRange struct { // A value that indicates the end of the time range in milliseconds. To set // absolute time range, you must specify a start time and an end time. For example, // if you specify the following values: // // * StartTime - 10000 // // * Endtime - // 50000 // // The time range is set between 10,000 milliseconds and 50,000 milliseconds // into the call. EndTime *int64 // A time range from the beginning of the call to the value that you've specified. // For example, if you specify 100000, the time range is set to the first 100,000 // milliseconds of the call. First *int64 // A time range from the value that you've specified to the end of the call. For // example, if you specify 100000, the time range is set to the last 100,000 // milliseconds of the call. Last *int64 // A value that indicates the beginning of the time range in seconds. To set // absolute time range, you must specify a start time and an end time. For example, // if you specify the following values: // // * StartTime - 10000 // // * Endtime - // 50000 // // The time range is set between 10,000 milliseconds and 50,000 milliseconds // into the call. StartTime *int64 // contains filtered or unexported fields }
A time range, set in seconds, between two points in the call.
type BadRequestException ¶
type BadRequestException struct { Message *string // contains filtered or unexported fields }
Your request didn't pass one or more validation tests. For example, if the entity that you're trying to delete doesn't exist or if it is in a non-terminal state (for example, it's "in progress"). See the exception Message field for more information.
func (*BadRequestException) Error ¶
func (e *BadRequestException) Error() string
func (*BadRequestException) ErrorCode ¶
func (e *BadRequestException) ErrorCode() string
func (*BadRequestException) ErrorFault ¶
func (e *BadRequestException) ErrorFault() smithy.ErrorFault
func (*BadRequestException) ErrorMessage ¶
func (e *BadRequestException) ErrorMessage() string
type BaseModelName ¶
type BaseModelName string
const ( BaseModelNameNarrowBand BaseModelName = "NarrowBand" BaseModelNameWideBand BaseModelName = "WideBand" )
Enum values for BaseModelName
func (BaseModelName) Values ¶
func (BaseModelName) Values() []BaseModelName
Values returns all known values for BaseModelName. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.
type CLMLanguageCode ¶
type CLMLanguageCode string
const ( CLMLanguageCodeEnUs CLMLanguageCode = "en-US" CLMLanguageCodeHiIn CLMLanguageCode = "hi-IN" CLMLanguageCodeEsUs CLMLanguageCode = "es-US" CLMLanguageCodeEnGb CLMLanguageCode = "en-GB" CLMLanguageCodeEnAu CLMLanguageCode = "en-AU" )
Enum values for CLMLanguageCode
func (CLMLanguageCode) Values ¶
func (CLMLanguageCode) Values() []CLMLanguageCode
Values returns all known values for CLMLanguageCode. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.
type CallAnalyticsJob ¶
type CallAnalyticsJob struct { // The name of the call analytics job. CallAnalyticsJobName *string // The status of the analytics job. CallAnalyticsJobStatus CallAnalyticsJobStatus // Shows numeric values to indicate the channel assigned to the agent's audio and // the channel assigned to the customer's audio. ChannelDefinitions []ChannelDefinition // A timestamp that shows when the analytics job was completed. CompletionTime *time.Time // A timestamp that shows when the analytics job was created. CreationTime *time.Time // The Amazon Resource Number (ARN) that you use to access the analytics job. ARNs // have the format // arn:partition:service:region:account-id:resource-type/resource-id. DataAccessRoleArn *string // If the AnalyticsJobStatus is FAILED, this field contains information about why // the job failed. The FailureReason field can contain one of the following // values: // // * Unsupported media format: The media format specified in the // MediaFormat field of the request isn't valid. See the description of the // MediaFormat field for a list of valid values. // // * The media format provided does // not match the detected media format: The media format of the audio file doesn't // match the format specified in the MediaFormat field in the request. Check the // media format of your media file and make sure the two values match. // // * Invalid // sample rate for audio file: The sample rate specified in the // MediaSampleRateHertz of the request isn't valid. The sample rate must be between // 8,000 and 48,000 Hertz. // // * The sample rate provided does not match the detected // sample rate: The sample rate in the audio file doesn't match the sample rate // specified in the MediaSampleRateHertz field in the request. Check the sample // rate of your media file and make sure that the two values match. // // * Invalid file // size: file size too large: The size of your audio file is larger than what // Amazon Transcribe Medical can process. For more information, see Guidelines and // Quotas in the Amazon Transcribe Medical Guide. // // * Invalid number of channels: // number of channels too large: Your audio contains more channels than Amazon // Transcribe Medical is configured to process. To request additional channels, see // Amazon Transcribe Medical Endpoints and Quotas in the Amazon Web Services // General Reference (https://docs.aws.amazon.com/general/latest/gr/Welcome.html). FailureReason *string // A value between zero and one that Amazon Transcribe assigned to the language // that it identified in the source audio. This value appears only when you don't // provide a single language code. Larger values indicate that Amazon Transcribe // has higher confidence in the language that it identified IdentifiedLanguageScore *float32 // If you know the language spoken between the customer and the agent, specify a // language code for this field. If you don't know the language, you can leave this // field blank, and Amazon Transcribe will use machine learning to automatically // identify the language. To improve the accuracy of language identification, you // can provide an array containing the possible language codes for the language // spoken in your audio. Refer to Supported languages and language-specific // features (https://docs.aws.amazon.com/transcribe/latest/dg/how-it-works.html) // for additional information. LanguageCode LanguageCode // Describes the input media file in a transcription request. Media *Media // The format of the input audio file. Note: for call analytics jobs, only the // following media formats are supported: MP3, MP4, WAV, FLAC, OGG, and WebM. MediaFormat MediaFormat // The sample rate, in Hertz, of the audio. MediaSampleRateHertz *int32 // Provides information about the settings used to run a transcription job. Settings *CallAnalyticsJobSettings // A timestamp that shows when the analytics job started processing. StartTime *time.Time // Identifies the location of a transcription. Transcript *Transcript // contains filtered or unexported fields }
Describes an asynchronous analytics job that was created with the StartAnalyticsJob operation.
type CallAnalyticsJobSettings ¶
type CallAnalyticsJobSettings struct { // Settings for content redaction within a transcription job. ContentRedaction *ContentRedaction // The language identification settings associated with your call analytics job. // These settings include VocabularyName, VocabularyFilterName, and // LanguageModelName. LanguageIdSettings map[string]LanguageIdSettings // The structure used to describe a custom language model. LanguageModelName *string // When you run a call analytics job, you can specify the language spoken in the // audio, or you can have Amazon Transcribe identify the language for you. To // specify a language, specify an array with one language code. If you don't know // the language, you can leave this field blank and Amazon Transcribe will use // machine learning to identify the language for you. To improve the ability of // Amazon Transcribe to correctly identify the language, you can provide an array // of the languages that can be present in the audio. Refer to Supported languages // and language-specific features // (https://docs.aws.amazon.com/transcribe/latest/dg/how-it-works.html) for // additional information. LanguageOptions []LanguageCode // Set to mask to remove filtered text from the transcript and replace it with // three asterisks ("***") as placeholder text. Set to remove to remove filtered // text from the transcript without using placeholder text. Set to tag to mark the // word in the transcription output that matches the vocabulary filter. When you // set the filter method to tag, the words matching your vocabulary filter are not // masked or removed. VocabularyFilterMethod VocabularyFilterMethod // The name of the vocabulary filter to use when running a call analytics job. The // filter that you specify must have the same language code as the analytics job. VocabularyFilterName *string // The name of a vocabulary to use when processing the call analytics job. VocabularyName *string // contains filtered or unexported fields }
Provides optional settings for the CallAnalyticsJob operation.
type CallAnalyticsJobStatus ¶
type CallAnalyticsJobStatus string
const ( CallAnalyticsJobStatusQueued CallAnalyticsJobStatus = "QUEUED" CallAnalyticsJobStatusInProgress CallAnalyticsJobStatus = "IN_PROGRESS" CallAnalyticsJobStatusFailed CallAnalyticsJobStatus = "FAILED" CallAnalyticsJobStatusCompleted CallAnalyticsJobStatus = "COMPLETED" )
Enum values for CallAnalyticsJobStatus
func (CallAnalyticsJobStatus) Values ¶
func (CallAnalyticsJobStatus) Values() []CallAnalyticsJobStatus
Values returns all known values for CallAnalyticsJobStatus. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.
type CallAnalyticsJobSummary ¶
type CallAnalyticsJobSummary struct { // The name of the call analytics job. CallAnalyticsJobName *string // The status of the call analytics job. CallAnalyticsJobStatus CallAnalyticsJobStatus // A timestamp that shows when the job was completed. CompletionTime *time.Time // A timestamp that shows when the call analytics job was created. CreationTime *time.Time // If the CallAnalyticsJobStatus is FAILED, a description of the error. FailureReason *string // The language of the transcript in the source audio file. LanguageCode LanguageCode // A timestamp that shows when the job began processing. StartTime *time.Time // contains filtered or unexported fields }
Provides summary information about a call analytics job.
type CategoryProperties ¶
type CategoryProperties struct { // The name of the call analytics category. CategoryName *string // A timestamp that shows when the call analytics category was created. CreateTime *time.Time // A timestamp that shows when the call analytics category was most recently // updated. LastUpdateTime *time.Time // The rules used to create a call analytics category. Rules []Rule // contains filtered or unexported fields }
An object that contains the rules and additional information about a call analytics category.
type ChannelDefinition ¶
type ChannelDefinition struct { // A value that indicates the audio channel. ChannelId int32 // Indicates whether the person speaking on the audio channel is the agent or // customer. ParticipantRole ParticipantRole // contains filtered or unexported fields }
For a call analytics job, an object that indicates the audio channel that belongs to the agent and the audio channel that belongs to the customer.
type ConflictException ¶
type ConflictException struct { Message *string // contains filtered or unexported fields }
There is already a resource with that name.
func (*ConflictException) Error ¶
func (e *ConflictException) Error() string
func (*ConflictException) ErrorCode ¶
func (e *ConflictException) ErrorCode() string
func (*ConflictException) ErrorFault ¶
func (e *ConflictException) ErrorFault() smithy.ErrorFault
func (*ConflictException) ErrorMessage ¶
func (e *ConflictException) ErrorMessage() string
type ContentRedaction ¶
type ContentRedaction struct { // The output transcript file stored in either the default S3 bucket or in a bucket // you specify. When you choose redacted Amazon Transcribe outputs only the // redacted transcript. When you choose redacted_and_unredacted Amazon Transcribe // outputs both the redacted and unredacted transcripts. // // This member is required. RedactionOutput RedactionOutput // Request parameter that defines the entities to be redacted. The only accepted // value is PII. // // This member is required. RedactionType RedactionType // contains filtered or unexported fields }
Settings for content redaction within a transcription job.
type InputDataConfig ¶
type InputDataConfig struct { // The Amazon Resource Name (ARN) that uniquely identifies the permissions you've // given Amazon Transcribe to access your Amazon S3 buckets containing your media // files or text data. ARNs have the format // arn:partition:service:region:account-id:resource-type/resource-id. // // This member is required. DataAccessRoleArn *string // The Amazon S3 prefix you specify to access the plain text files that you use to // train your custom language model. // // This member is required. S3Uri *string // The Amazon S3 prefix you specify to access the plain text files that you use to // tune your custom language model. TuningDataS3Uri *string // contains filtered or unexported fields }
The object that contains the Amazon S3 object location and access role required to train and tune your custom language model.
type InternalFailureException ¶
type InternalFailureException struct { Message *string // contains filtered or unexported fields }
There was an internal error. Check the error message and try your request again.
func (*InternalFailureException) Error ¶
func (e *InternalFailureException) Error() string
func (*InternalFailureException) ErrorCode ¶
func (e *InternalFailureException) ErrorCode() string
func (*InternalFailureException) ErrorFault ¶
func (e *InternalFailureException) ErrorFault() smithy.ErrorFault
func (*InternalFailureException) ErrorMessage ¶
func (e *InternalFailureException) ErrorMessage() string
type InterruptionFilter ¶
type InterruptionFilter struct { // An object you can use to specify a time range (in milliseconds) for when you'd // want to find the interruption. For example, you could search for an interruption // between the 30,000 millisecond mark and the 45,000 millisecond mark. You could // also specify the time period as the first 15,000 milliseconds or the last 15,000 // milliseconds. AbsoluteTimeRange *AbsoluteTimeRange // Set to TRUE to look for a time period where there was no interruption. Negate *bool // Indicates whether the caller or customer was interrupting. ParticipantRole ParticipantRole // An object that allows percentages to specify the proportion of the call where // there was a interruption. For example, you can specify the first half of the // call. You can also specify the period of time between halfway through to // three-quarters of the way through the call. Because the length of conversation // can vary between calls, you can apply relative time ranges across all calls. RelativeTimeRange *RelativeTimeRange // The duration of the interruption. Threshold *int64 // contains filtered or unexported fields }
An object that enables you to configure your category to be applied to call analytics jobs where either the customer or agent was interrupted.
type JobExecutionSettings ¶
type JobExecutionSettings struct { // Indicates whether a job should be queued by Amazon Transcribe when the // concurrent execution limit is exceeded. When the AllowDeferredExecution field is // true, jobs are queued and executed when the number of executing jobs falls below // the concurrent execution limit. If the field is false, Amazon Transcribe returns // a LimitExceededException exception. Note that job queuing is enabled by default // for call analytics jobs. If you specify the AllowDeferredExecution field, you // must specify the DataAccessRoleArn field. AllowDeferredExecution *bool // The Amazon Resource Name (ARN), in the form // arn:partition:service:region:account-id:resource-type/resource-id, of a role // that has access to the S3 bucket that contains the input files. Amazon // Transcribe assumes this role to read queued media files. If you have specified // an output S3 bucket for the transcription results, this role should have access // to the output bucket as well. If you specify the AllowDeferredExecution field, // you must specify the DataAccessRoleArn field. DataAccessRoleArn *string // contains filtered or unexported fields }
Provides information about when a transcription job should be executed.
type LanguageCode ¶
type LanguageCode string
const ( LanguageCodeAfZa LanguageCode = "af-ZA" LanguageCodeArAe LanguageCode = "ar-AE" LanguageCodeArSa LanguageCode = "ar-SA" LanguageCodeCyGb LanguageCode = "cy-GB" LanguageCodeDaDk LanguageCode = "da-DK" LanguageCodeDeCh LanguageCode = "de-CH" LanguageCodeDeDe LanguageCode = "de-DE" LanguageCodeEnAb LanguageCode = "en-AB" LanguageCodeEnAu LanguageCode = "en-AU" LanguageCodeEnGb LanguageCode = "en-GB" LanguageCodeEnIe LanguageCode = "en-IE" LanguageCodeEnIn LanguageCode = "en-IN" LanguageCodeEnUs LanguageCode = "en-US" LanguageCodeEnWl LanguageCode = "en-WL" LanguageCodeEsEs LanguageCode = "es-ES" LanguageCodeEsUs LanguageCode = "es-US" LanguageCodeFaIr LanguageCode = "fa-IR" LanguageCodeFrCa LanguageCode = "fr-CA" LanguageCodeFrFr LanguageCode = "fr-FR" LanguageCodeGaIe LanguageCode = "ga-IE" LanguageCodeGdGb LanguageCode = "gd-GB" LanguageCodeHeIl LanguageCode = "he-IL" LanguageCodeHiIn LanguageCode = "hi-IN" LanguageCodeIdId LanguageCode = "id-ID" LanguageCodeItIt LanguageCode = "it-IT" LanguageCodeJaJp LanguageCode = "ja-JP" LanguageCodeKoKr LanguageCode = "ko-KR" LanguageCodeMsMy LanguageCode = "ms-MY" LanguageCodeNlNl LanguageCode = "nl-NL" LanguageCodePtBr LanguageCode = "pt-BR" LanguageCodePtPt LanguageCode = "pt-PT" LanguageCodeRuRu LanguageCode = "ru-RU" LanguageCodeTaIn LanguageCode = "ta-IN" LanguageCodeTeIn LanguageCode = "te-IN" LanguageCodeTrTr LanguageCode = "tr-TR" LanguageCodeZhCn LanguageCode = "zh-CN" LanguageCodeZhTw LanguageCode = "zh-TW" LanguageCodeThTh LanguageCode = "th-TH" LanguageCodeEnZa LanguageCode = "en-ZA" LanguageCodeEnNz LanguageCode = "en-NZ" )
Enum values for LanguageCode
func (LanguageCode) Values ¶
func (LanguageCode) Values() []LanguageCode
Values returns all known values for LanguageCode. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.
type LanguageIdSettings ¶
type LanguageIdSettings struct { // The name of the language model you want to use when transcribing your audio. The // model you specify must have the same language code as the transcription job; if // the languages don't match, the language model won't be applied. LanguageModelName *string // The name of the vocabulary filter you want to use when transcribing your audio. // The filter you specify must have the same language code as the transcription // job; if the languages don't match, the vocabulary filter won't be applied. VocabularyFilterName *string // The name of the vocabulary you want to use when processing your transcription // job. The vocabulary you specify must have the same language code as the // transcription job; if the languages don't match, the vocabulary won't be // applied. VocabularyName *string // contains filtered or unexported fields }
Language-specific settings that can be specified when language identification is enabled.
type LanguageModel ¶
type LanguageModel struct { // The Amazon Transcribe standard language model, or base model used to create the // custom language model. BaseModelName BaseModelName // The time the custom language model was created. CreateTime *time.Time // The reason why the custom language model couldn't be created. FailureReason *string // The data access role and Amazon S3 prefixes for the input files used to train // the custom language model. InputDataConfig *InputDataConfig // The language code you used to create your custom language model. LanguageCode CLMLanguageCode // The most recent time the custom language model was modified. LastModifiedTime *time.Time // The name of the custom language model. ModelName *string // The creation status of a custom language model. When the status is COMPLETED the // model is ready for use. ModelStatus ModelStatus // Whether the base model used for the custom language model is up to date. If this // field is true then you are running the most up-to-date version of the base model // in your custom language model. UpgradeAvailability *bool // contains filtered or unexported fields }
The structure used to describe a custom language model.
type LimitExceededException ¶
type LimitExceededException struct { Message *string // contains filtered or unexported fields }
Either you have sent too many requests or your input file is too long. Wait before you resend your request, or use a smaller file and resend the request.
func (*LimitExceededException) Error ¶
func (e *LimitExceededException) Error() string
func (*LimitExceededException) ErrorCode ¶
func (e *LimitExceededException) ErrorCode() string
func (*LimitExceededException) ErrorFault ¶
func (e *LimitExceededException) ErrorFault() smithy.ErrorFault
func (*LimitExceededException) ErrorMessage ¶
func (e *LimitExceededException) ErrorMessage() string
type Media ¶
type Media struct { // The S3 object location of the input media file. The URI must be in the same // region as the API endpoint that you are calling. The general form is: For // example: For more information about S3 object names, see Object Keys // (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html#object-keys) // in the Amazon S3 Developer Guide. MediaFileUri *string // The S3 object location for your redacted output media file. This is only // supported for call analytics jobs. RedactedMediaFileUri *string // contains filtered or unexported fields }
Describes the input media file in a transcription request.
type MediaFormat ¶
type MediaFormat string
const ( MediaFormatMp3 MediaFormat = "mp3" MediaFormatMp4 MediaFormat = "mp4" MediaFormatWav MediaFormat = "wav" MediaFormatFlac MediaFormat = "flac" MediaFormatOgg MediaFormat = "ogg" MediaFormatAmr MediaFormat = "amr" MediaFormatWebm MediaFormat = "webm" )
Enum values for MediaFormat
func (MediaFormat) Values ¶
func (MediaFormat) Values() []MediaFormat
Values returns all known values for MediaFormat. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.
type MedicalContentIdentificationType ¶
type MedicalContentIdentificationType string
const ( MedicalContentIdentificationTypePhi MedicalContentIdentificationType = "PHI" )
Enum values for MedicalContentIdentificationType
func (MedicalContentIdentificationType) Values ¶
func (MedicalContentIdentificationType) Values() []MedicalContentIdentificationType
Values returns all known values for MedicalContentIdentificationType. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.
type MedicalTranscript ¶
type MedicalTranscript struct { // The S3 object location of the medical transcript. Use this URI to access the // medical transcript. This URI points to the S3 bucket you created to store the // medical transcript. TranscriptFileUri *string // contains filtered or unexported fields }
Identifies the location of a medical transcript.
type MedicalTranscriptionJob ¶
type MedicalTranscriptionJob struct { // A timestamp that shows when the job was completed. CompletionTime *time.Time // Shows the type of content that you've configured Amazon Transcribe Medical to // identify in a transcription job. If the value is PHI, you've configured the job // to identify personal health information (PHI) in the transcription output. ContentIdentificationType MedicalContentIdentificationType // A timestamp that shows when the job was created. CreationTime *time.Time // If the TranscriptionJobStatus field is FAILED, this field contains information // about why the job failed. The FailureReason field contains one of the following // values: // // * Unsupported media format- The media format specified in the // MediaFormat field of the request isn't valid. See the description of the // MediaFormat field for a list of valid values. // // * The media format provided does // not match the detected media format- The media format of the audio file doesn't // match the format specified in the MediaFormat field in the request. Check the // media format of your media file and make sure the two values match. // // * Invalid // sample rate for audio file- The sample rate specified in the // MediaSampleRateHertz of the request isn't valid. The sample rate must be between // 8,000 and 48,000 Hertz. // // * The sample rate provided does not match the detected // sample rate- The sample rate in the audio file doesn't match the sample rate // specified in the MediaSampleRateHertz field in the request. Check the sample // rate of your media file and make sure that the two values match. // // * Invalid file // size: file size too large- The size of your audio file is larger than what // Amazon Transcribe Medical can process. For more information, see Guidelines and // Quotas // (https://docs.aws.amazon.com/transcribe/latest/dg/limits-guidelines.html#limits) // in the Amazon Transcribe Medical Guide // // * Invalid number of channels: number of // channels too large- Your audio contains more channels than Amazon Transcribe // Medical is configured to process. To request additional channels, see Amazon // Transcribe Medical Endpoints and Quotas // (https://docs.aws.amazon.com/general/latest/gr/transcribe-medical.html) in the // Amazon Web Services General Reference FailureReason *string // The language code for the language spoken in the source audio file. US English // (en-US) is the only supported language for medical transcriptions. Any other // value you enter for language code results in a BadRequestException error. LanguageCode LanguageCode // Describes the input media file in a transcription request. Media *Media // The format of the input media file. MediaFormat MediaFormat // The sample rate, in Hertz, of the source audio containing medical information. // If you don't specify the sample rate, Amazon Transcribe Medical determines it // for you. If you choose to specify the sample rate, it must match the rate // detected by Amazon Transcribe Medical. In most cases, you should leave the // MedicalMediaSampleHertz blank and let Amazon Transcribe Medical determine the // sample rate. MediaSampleRateHertz *int32 // The name for a given medical transcription job. MedicalTranscriptionJobName *string // Object that contains object. Settings *MedicalTranscriptionSetting // The medical specialty of any clinicians providing a dictation or having a // conversation. Refer to Transcribing a medical conversation // (https://docs.aws.amazon.com/transcribe/latest/dg/transcribe-medical-conversation.html)for // a list of supported specialties. Specialty Specialty // A timestamp that shows when the job started processing. StartTime *time.Time // A key:value pair assigned to a given medical transcription job. Tags []Tag // An object that contains the MedicalTranscript. The MedicalTranscript contains // the TranscriptFileUri. Transcript *MedicalTranscript // The completion status of a medical transcription job. TranscriptionJobStatus TranscriptionJobStatus // The type of speech in the transcription job. CONVERSATION is generally used for // patient-physician dialogues. DICTATION is the setting for physicians speaking // their notes after seeing a patient. For more information, see What is Amazon // Transcribe Medical? // (https://docs.aws.amazon.com/transcribe/latest/dg/what-is-transcribe-med.html). Type Type // contains filtered or unexported fields }
The data structure that contains the information for a medical transcription job.
type MedicalTranscriptionJobSummary ¶
type MedicalTranscriptionJobSummary struct { // A timestamp that shows when the job was completed. CompletionTime *time.Time // Shows the type of information you've configured Amazon Transcribe Medical to // identify in a transcription job. If the value is PHI, you've configured the // transcription job to identify personal health information (PHI). ContentIdentificationType MedicalContentIdentificationType // A timestamp that shows when the medical transcription job was created. CreationTime *time.Time // If the TranscriptionJobStatus field is FAILED, a description of the error. FailureReason *string // The language of the transcript in the source audio file. LanguageCode LanguageCode // The name of a medical transcription job. MedicalTranscriptionJobName *string // Indicates the location of the transcription job's output. This field must be the // path of an S3 bucket; if you don't already have an S3 bucket, one is created // based on the path you add. OutputLocationType OutputLocationType // The medical specialty of the transcription job. Refer to Transcribing a medical // conversation // (https://docs.aws.amazon.com/transcribe/latest/dg/transcribe-medical-conversation.html)for // a list of supported specialties. Specialty Specialty // A timestamp that shows when the job began processing. StartTime *time.Time // The status of the medical transcription job. TranscriptionJobStatus TranscriptionJobStatus // The speech of the clinician in the input audio. Type Type // contains filtered or unexported fields }
Provides summary information about a transcription job.
type MedicalTranscriptionSetting ¶
type MedicalTranscriptionSetting struct { // Instructs Amazon Transcribe Medical to process each audio channel separately and // then merge the transcription output of each channel into a single transcription. // Amazon Transcribe Medical also produces a transcription of each item detected on // an audio channel, including the start time and end time of the item and // alternative transcriptions of item. The alternative transcriptions also come // with confidence scores provided by Amazon Transcribe Medical. You can't set both // ShowSpeakerLabels and ChannelIdentification in the same request. If you set // both, your request returns a BadRequestException ChannelIdentification *bool // The maximum number of alternatives that you tell the service to return. If you // specify the MaxAlternatives field, you must set the ShowAlternatives field to // true. MaxAlternatives *int32 // The maximum number of speakers to identify in the input audio. If there are more // speakers in the audio than this number, multiple speakers are identified as a // single speaker. If you specify the MaxSpeakerLabels field, you must set the // ShowSpeakerLabels field to true. MaxSpeakerLabels *int32 // Determines whether alternative transcripts are generated along with the // transcript that has the highest confidence. If you set ShowAlternatives field to // true, you must also set the maximum number of alternatives to return in the // MaxAlternatives field. ShowAlternatives *bool // Determines whether the transcription job uses speaker recognition to identify // different speakers in the input audio. Speaker recognition labels individual // speakers in the audio file. If you set the ShowSpeakerLabels field to true, you // must also set the maximum number of speaker labels in the MaxSpeakerLabels // field. You can't set both ShowSpeakerLabels and ChannelIdentification in the // same request. If you set both, your request returns a BadRequestException. ShowSpeakerLabels *bool // The name of the vocabulary to use when processing a medical transcription job. VocabularyName *string // contains filtered or unexported fields }
Optional settings for the StartMedicalTranscriptionJob operation.
type ModelSettings ¶
type ModelSettings struct { // The name of your custom language model. LanguageModelName *string // contains filtered or unexported fields }
The object used to call your custom language model to your transcription job.
type ModelStatus ¶
type ModelStatus string
const ( ModelStatusInProgress ModelStatus = "IN_PROGRESS" ModelStatusFailed ModelStatus = "FAILED" ModelStatusCompleted ModelStatus = "COMPLETED" )
Enum values for ModelStatus
func (ModelStatus) Values ¶
func (ModelStatus) Values() []ModelStatus
Values returns all known values for ModelStatus. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.
type NonTalkTimeFilter ¶
type NonTalkTimeFilter struct { // An object you can use to specify a time range (in milliseconds) for when no one // is talking. For example, you could specify a time period between the 30,000 // millisecond mark and the 45,000 millisecond mark. You could also specify the // time period as the first 15,000 milliseconds or the last 15,000 milliseconds. AbsoluteTimeRange *AbsoluteTimeRange // Set to TRUE to look for a time period when people were talking. Negate *bool // An object that allows percentages to specify the proportion of the call where // there was silence. For example, you can specify the first half of the call. You // can also specify the period of time between halfway through to three-quarters of // the way through the call. Because the length of conversation can vary between // calls, you can apply relative time ranges across all calls. RelativeTimeRange *RelativeTimeRange // The duration of the period when neither the customer nor agent was talking. Threshold *int64 // contains filtered or unexported fields }
An object that enables you to configure your category to be applied to call analytics jobs where either the customer or agent was interrupted.
type NotFoundException ¶
type NotFoundException struct { Message *string // contains filtered or unexported fields }
We can't find the requested resource. Check the name and try your request again.
func (*NotFoundException) Error ¶
func (e *NotFoundException) Error() string
func (*NotFoundException) ErrorCode ¶
func (e *NotFoundException) ErrorCode() string
func (*NotFoundException) ErrorFault ¶
func (e *NotFoundException) ErrorFault() smithy.ErrorFault
func (*NotFoundException) ErrorMessage ¶
func (e *NotFoundException) ErrorMessage() string
type OutputLocationType ¶
type OutputLocationType string
const ( OutputLocationTypeCustomerBucket OutputLocationType = "CUSTOMER_BUCKET" OutputLocationTypeServiceBucket OutputLocationType = "SERVICE_BUCKET" )
Enum values for OutputLocationType
func (OutputLocationType) Values ¶
func (OutputLocationType) Values() []OutputLocationType
Values returns all known values for OutputLocationType. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.
type ParticipantRole ¶
type ParticipantRole string
const ( ParticipantRoleAgent ParticipantRole = "AGENT" ParticipantRoleCustomer ParticipantRole = "CUSTOMER" )
Enum values for ParticipantRole
func (ParticipantRole) Values ¶
func (ParticipantRole) Values() []ParticipantRole
Values returns all known values for ParticipantRole. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.
type RedactionOutput ¶
type RedactionOutput string
const ( RedactionOutputRedacted RedactionOutput = "redacted" RedactionOutputRedactedAndUnredacted RedactionOutput = "redacted_and_unredacted" )
Enum values for RedactionOutput
func (RedactionOutput) Values ¶
func (RedactionOutput) Values() []RedactionOutput
Values returns all known values for RedactionOutput. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.
type RedactionType ¶
type RedactionType string
const ( RedactionTypePii RedactionType = "PII" )
Enum values for RedactionType
func (RedactionType) Values ¶
func (RedactionType) Values() []RedactionType
Values returns all known values for RedactionType. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.
type RelativeTimeRange ¶
type RelativeTimeRange struct { // A value that indicates the percentage of the end of the time range. To set a // relative time range, you must specify a start percentage and an end percentage. // For example, if you specify the following values: // // * StartPercentage - 10 // // * // EndPercentage - 50 // // This looks at the time range starting from 10% of the way // into the call to 50% of the way through the call. For a call that lasts 100,000 // milliseconds, this example range would apply from the 10,000 millisecond mark to // the 50,000 millisecond mark. EndPercentage *int32 // A range that takes the portion of the call up to the time in milliseconds set by // the value that you've specified. For example, if you specify 120000, the time // range is set for the first 120,000 milliseconds of the call. First *int32 // A range that takes the portion of the call from the time in milliseconds set by // the value that you've specified to the end of the call. For example, if you // specify 120000, the time range is set for the last 120,000 milliseconds of the // call. Last *int32 // A value that indicates the percentage of the beginning of the time range. To set // a relative time range, you must specify a start percentage and an end // percentage. For example, if you specify the following values: // // * StartPercentage // - 10 // // * EndPercentage - 50 // // This looks at the time range starting from 10% of // the way into the call to 50% of the way through the call. For a call that lasts // 100,000 milliseconds, this example range would apply from the 10,000 millisecond // mark to the 50,000 millisecond mark. StartPercentage *int32 // contains filtered or unexported fields }
An object that allows percentages to specify the proportion of the call where you would like to apply a filter. For example, you can specify the first half of the call. You can also specify the period of time between halfway through to three-quarters of the way through the call. Because the length of conversation can vary between calls, you can apply relative time ranges across all calls.
type Rule ¶
type Rule interface {
// contains filtered or unexported methods
}
A condition in the call between the customer and the agent that you want to filter for.
The following types satisfy this interface:
RuleMemberInterruptionFilter RuleMemberNonTalkTimeFilter RuleMemberSentimentFilter RuleMemberTranscriptFilter
Example (OutputUsage)¶
Code:play
// Code generated by smithy-go-codegen DO NOT EDIT. package main import ( "fmt" "github.com/aws/aws-sdk-go-v2/service/transcribe/types" ) func main() { var union types.Rule // type switches can be used to check the union value switch v := union.(type) { case *types.RuleMemberInterruptionFilter: _ = v.Value // Value is types.InterruptionFilter case *types.RuleMemberNonTalkTimeFilter: _ = v.Value // Value is types.NonTalkTimeFilter case *types.RuleMemberSentimentFilter: _ = v.Value // Value is types.SentimentFilter case *types.RuleMemberTranscriptFilter: _ = v.Value // Value is types.TranscriptFilter case *types.UnknownUnionMember: fmt.Println("unknown tag:", v.Tag) default: fmt.Println("union is nil or unknown type") } } var _ *types.NonTalkTimeFilter var _ *types.SentimentFilter var _ *types.TranscriptFilter var _ *types.InterruptionFilter
type RuleMemberInterruptionFilter ¶
type RuleMemberInterruptionFilter struct { Value InterruptionFilter // contains filtered or unexported fields }
A condition for a time period when either the customer or agent was interrupting the other person.
type RuleMemberNonTalkTimeFilter ¶
type RuleMemberNonTalkTimeFilter struct { Value NonTalkTimeFilter // contains filtered or unexported fields }
A condition for a time period when neither the customer nor the agent was talking.
type RuleMemberSentimentFilter ¶
type RuleMemberSentimentFilter struct { Value SentimentFilter // contains filtered or unexported fields }
A condition that is applied to a particular customer sentiment.
type RuleMemberTranscriptFilter ¶
type RuleMemberTranscriptFilter struct { Value TranscriptFilter // contains filtered or unexported fields }
A condition that catches particular words or phrases based on a exact match. For example, if you set the phrase "I want to speak to the manager", only that exact phrase will be returned.
type SentimentFilter ¶
type SentimentFilter struct { // An array that enables you to specify sentiments for the customer or agent. You // can specify one or more values. // // This member is required. Sentiments []SentimentValue // The time range, measured in seconds, of the sentiment. AbsoluteTimeRange *AbsoluteTimeRange // Set to TRUE to look for sentiments that weren't specified in the request. Negate *bool // A value that determines whether the sentiment belongs to the customer or the // agent. ParticipantRole ParticipantRole // The time range, set in percentages, that correspond to proportion of the call. RelativeTimeRange *RelativeTimeRange // contains filtered or unexported fields }
An object that enables you to specify a particular customer or agent sentiment. If at least 50 percent of the conversation turns (the back-and-forth between two speakers) in a specified time period match the specified sentiment, Amazon Transcribe will consider the sentiment a match.
type SentimentValue ¶
type SentimentValue string
const ( SentimentValuePositive SentimentValue = "POSITIVE" SentimentValueNegative SentimentValue = "NEGATIVE" SentimentValueNeutral SentimentValue = "NEUTRAL" SentimentValueMixed SentimentValue = "MIXED" )
Enum values for SentimentValue
func (SentimentValue) Values ¶
func (SentimentValue) Values() []SentimentValue
Values returns all known values for SentimentValue. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.
type Settings ¶
type Settings struct { // Instructs Amazon Transcribe to process each audio channel separately and then // merge the transcription output of each channel into a single transcription. // Amazon Transcribe also produces a transcription of each item detected on an // audio channel, including the start time and end time of the item and alternative // transcriptions of the item including the confidence that Amazon Transcribe has // in the transcription. You can't set both ShowSpeakerLabels and // ChannelIdentification in the same request. If you set both, your request returns // a BadRequestException. ChannelIdentification *bool // The number of alternative transcriptions that the service should return. If you // specify the MaxAlternatives field, you must set the ShowAlternatives field to // true. MaxAlternatives *int32 // The maximum number of speakers to identify in the input audio. If there are more // speakers in the audio than this number, multiple speakers are identified as a // single speaker. If you specify the MaxSpeakerLabels field, you must set the // ShowSpeakerLabels field to true. MaxSpeakerLabels *int32 // Determines whether the transcription contains alternative transcriptions. If you // set the ShowAlternatives field to true, you must also set the maximum number of // alternatives to return in the MaxAlternatives field. ShowAlternatives *bool // Determines whether the transcription job uses speaker recognition to identify // different speakers in the input audio. Speaker recognition labels individual // speakers in the audio file. If you set the ShowSpeakerLabels field to true, you // must also set the maximum number of speaker labels MaxSpeakerLabels field. You // can't set both ShowSpeakerLabels and ChannelIdentification in the same request. // If you set both, your request returns a BadRequestException. ShowSpeakerLabels *bool // Set to mask to remove filtered text from the transcript and replace it with // three asterisks ("***") as placeholder text. Set to remove to remove filtered // text from the transcript without using placeholder text. Set to tag to mark the // word in the transcription output that matches the vocabulary filter. When you // set the filter method to tag, the words matching your vocabulary filter are not // masked or removed. VocabularyFilterMethod VocabularyFilterMethod // The name of the vocabulary filter to use when transcribing the audio. The filter // that you specify must have the same language code as the transcription job. VocabularyFilterName *string // The name of a vocabulary to use when processing the transcription job. VocabularyName *string // contains filtered or unexported fields }
Provides optional settings for the StartTranscriptionJob operation.
type Specialty ¶
type Specialty string
const ( SpecialtyPrimarycare Specialty = "PRIMARYCARE" )
Enum values for Specialty
func (Specialty) Values ¶
Values returns all known values for Specialty. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.
type SubtitleFormat ¶
type SubtitleFormat string
const ( SubtitleFormatVtt SubtitleFormat = "vtt" SubtitleFormatSrt SubtitleFormat = "srt" )
Enum values for SubtitleFormat
func (SubtitleFormat) Values ¶
func (SubtitleFormat) Values() []SubtitleFormat
Values returns all known values for SubtitleFormat. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.
type Subtitles ¶
type Subtitles struct { // Specify the output format for your subtitle file. Formats []SubtitleFormat // contains filtered or unexported fields }
Generate subtitles for your batch transcription job.
type SubtitlesOutput ¶
type SubtitlesOutput struct { // Specify the output format for your subtitle file; if you select both SRT and VTT // formats, two output files are genereated. Formats []SubtitleFormat // Choose the output location for your subtitle file. This location must be an S3 // bucket. SubtitleFileUris []string // contains filtered or unexported fields }
Specify the output format for your subtitle file.
type Tag ¶
type Tag struct { // The first part of a key:value pair that forms a tag associated with a given // resource. For example, in the tag ‘Department’:’Sales’, the key is 'Department'. // // This member is required. Key *string // The second part of a key:value pair that forms a tag associated with a given // resource. For example, in the tag ‘Department’:’Sales’, the value is 'Sales'. // // This member is required. Value *string // contains filtered or unexported fields }
A key:value pair that adds metadata to a resource used by Amazon Transcribe. For example, a tag with the key:value pair ‘Department’:’Sales’ might be added to a resource to indicate its use by your organization's sales department.
type Transcript ¶
type Transcript struct { // The S3 object location of the redacted transcript. Use this URI to access the // redacted transcript. If you specified an S3 bucket in the OutputBucketName field // when you created the job, this is the URI of that bucket. If you chose to store // the transcript in Amazon Transcribe, this is a shareable URL that provides // secure access to that location. RedactedTranscriptFileUri *string // The S3 object location of the transcript. Use this URI to access the transcript. // If you specified an S3 bucket in the OutputBucketName field when you created the // job, this is the URI of that bucket. If you chose to store the transcript in // Amazon Transcribe, this is a shareable URL that provides secure access to that // location. TranscriptFileUri *string // contains filtered or unexported fields }
Identifies the location of a transcription.
type TranscriptFilter ¶
type TranscriptFilter struct { // The phrases that you're specifying for the transcript filter to match. // // This member is required. Targets []string // Matches the phrase to the transcription output in a word for word fashion. For // example, if you specify the phrase "I want to speak to the manager." Amazon // Transcribe attempts to match that specific phrase to the transcription. // // This member is required. TranscriptFilterType TranscriptFilterType // A time range, set in seconds, between two points in the call. AbsoluteTimeRange *AbsoluteTimeRange // If TRUE, the rule that you specify is applied to everything except for the // phrases that you specify. Negate *bool // Determines whether the customer or the agent is speaking the phrases that you've // specified. ParticipantRole ParticipantRole // An object that allows percentages to specify the proportion of the call where // you would like to apply a filter. For example, you can specify the first half of // the call. You can also specify the period of time between halfway through to // three-quarters of the way through the call. Because the length of conversation // can vary between calls, you can apply relative time ranges across all calls. RelativeTimeRange *RelativeTimeRange // contains filtered or unexported fields }
Matches the output of the transcription to either the specific phrases that you specify, or the intent of the phrases that you specify.
type TranscriptFilterType ¶
type TranscriptFilterType string
const ( TranscriptFilterTypeExact TranscriptFilterType = "EXACT" )
Enum values for TranscriptFilterType
func (TranscriptFilterType) Values ¶
func (TranscriptFilterType) Values() []TranscriptFilterType
Values returns all known values for TranscriptFilterType. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.
type TranscriptionJob ¶
type TranscriptionJob struct { // A timestamp that shows when the job completed. CompletionTime *time.Time // An object that describes content redaction settings for the transcription job. ContentRedaction *ContentRedaction // A timestamp that shows when the job was created. CreationTime *time.Time // If the TranscriptionJobStatus field is FAILED, this field contains information // about why the job failed. The FailureReason field can contain one of the // following values: // // * Unsupported media format - The media format specified in // the MediaFormat field of the request isn't valid. See the description of the // MediaFormat field for a list of valid values. // // * The media format provided does // not match the detected media format - The media format of the audio file doesn't // match the format specified in the MediaFormat field in the request. Check the // media format of your media file and make sure that the two values match. // // * // Invalid sample rate for audio file - The sample rate specified in the // MediaSampleRateHertz of the request isn't valid. The sample rate must be between // 8,000 and 48,000 Hertz. // // * The sample rate provided does not match the detected // sample rate - The sample rate in the audio file doesn't match the sample rate // specified in the MediaSampleRateHertz field in the request. Check the sample // rate of your media file and make sure that the two values match. // // * Invalid file // size: file size too large - The size of your audio file is larger than Amazon // Transcribe can process. For more information, see Limits // (https://docs.aws.amazon.com/transcribe/latest/dg/limits-guidelines.html#limits) // in the Amazon Transcribe Developer Guide. // // * Invalid number of channels: number // of channels too large - Your audio contains more channels than Amazon Transcribe // is configured to process. To request additional channels, see Amazon Transcribe // Limits // (https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits-amazon-transcribe) // in the Amazon Web Services General Reference. FailureReason *string // A value between zero and one that Amazon Transcribe assigned to the language // that it identified in the source audio. Larger values indicate that Amazon // Transcribe has higher confidence in the language it identified. IdentifiedLanguageScore *float32 // A value that shows if automatic language identification was enabled for a // transcription job. IdentifyLanguage *bool // Provides information about how a transcription job is executed. JobExecutionSettings *JobExecutionSettings // The language code for the input speech. LanguageCode LanguageCode // Language-specific settings that can be specified when language identification is // enabled for your transcription job. These settings include VocabularyName, // VocabularyFilterName, and LanguageModelNameLanguageModelName. LanguageIdSettings map[string]LanguageIdSettings // An object that shows the optional array of languages inputted for transcription // jobs with automatic language identification enabled. LanguageOptions []LanguageCode // An object that describes the input media for the transcription job. Media *Media // The format of the input media file. MediaFormat MediaFormat // The sample rate, in Hertz, of the audio track in the input media file. MediaSampleRateHertz *int32 // An object containing the details of your custom language model. ModelSettings *ModelSettings // Optional settings for the transcription job. Use these settings to turn on // speaker recognition, to set the maximum number of speakers that should be // identified and to specify a custom vocabulary to use when processing the // transcription job. Settings *Settings // A timestamp that shows when the job started processing. StartTime *time.Time // Generate subtitles for your batch transcription job. Subtitles *SubtitlesOutput // A key:value pair assigned to a given transcription job. Tags []Tag // An object that describes the output of the transcription job. Transcript *Transcript // The name of the transcription job. TranscriptionJobName *string // The status of the transcription job. TranscriptionJobStatus TranscriptionJobStatus // contains filtered or unexported fields }
Describes an asynchronous transcription job that was created with the StartTranscriptionJob operation.
type TranscriptionJobStatus ¶
type TranscriptionJobStatus string
const ( TranscriptionJobStatusQueued TranscriptionJobStatus = "QUEUED" TranscriptionJobStatusInProgress TranscriptionJobStatus = "IN_PROGRESS" TranscriptionJobStatusFailed TranscriptionJobStatus = "FAILED" TranscriptionJobStatusCompleted TranscriptionJobStatus = "COMPLETED" )
Enum values for TranscriptionJobStatus
func (TranscriptionJobStatus) Values ¶
func (TranscriptionJobStatus) Values() []TranscriptionJobStatus
Values returns all known values for TranscriptionJobStatus. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.
type TranscriptionJobSummary ¶
type TranscriptionJobSummary struct { // A timestamp that shows when the job was completed. CompletionTime *time.Time // The content redaction settings of the transcription job. ContentRedaction *ContentRedaction // A timestamp that shows when the job was created. CreationTime *time.Time // If the TranscriptionJobStatus field is FAILED, a description of the error. FailureReason *string // A value between zero and one that Amazon Transcribe assigned to the language it // identified in the source audio. A higher score indicates that Amazon Transcribe // is more confident in the language it identified. IdentifiedLanguageScore *float32 // Whether automatic language identification was enabled for a transcription job. IdentifyLanguage *bool // The language code for the input speech. LanguageCode LanguageCode // The object used to call your custom language model to your transcription job. ModelSettings *ModelSettings // Indicates the location of the output of the transcription job. If the value is // CUSTOMER_BUCKET then the location is the S3 bucket specified in the // outputBucketName field when the transcription job was started with the // StartTranscriptionJob operation. If the value is SERVICE_BUCKET then the output // is stored by Amazon Transcribe and can be retrieved using the URI in the // GetTranscriptionJob response's TranscriptFileUri field. OutputLocationType OutputLocationType // A timestamp that shows when the job started processing. StartTime *time.Time // The name of the transcription job. TranscriptionJobName *string // The status of the transcription job. When the status is COMPLETED, use the // GetTranscriptionJob operation to get the results of the transcription. TranscriptionJobStatus TranscriptionJobStatus // contains filtered or unexported fields }
Provides a summary of information about a transcription job.
type Type ¶
type Type string
Enum values for Type
func (Type) Values ¶
Values returns all known values for Type. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.
type UnknownUnionMember ¶
type UnknownUnionMember struct { Tag string Value []byte // contains filtered or unexported fields }
UnknownUnionMember is returned when a union member is returned over the wire, but has an unknown tag.
type VocabularyFilterInfo ¶
type VocabularyFilterInfo struct { // The language code of the words in the vocabulary filter. LanguageCode LanguageCode // The date and time that the vocabulary was last updated. LastModifiedTime *time.Time // The name of the vocabulary filter. The name must be unique in the account that // holds the filter. VocabularyFilterName *string // contains filtered or unexported fields }
Provides information about a vocabulary filter.
type VocabularyFilterMethod ¶
type VocabularyFilterMethod string
const ( VocabularyFilterMethodRemove VocabularyFilterMethod = "remove" VocabularyFilterMethodMask VocabularyFilterMethod = "mask" VocabularyFilterMethodTag VocabularyFilterMethod = "tag" )
Enum values for VocabularyFilterMethod
func (VocabularyFilterMethod) Values ¶
func (VocabularyFilterMethod) Values() []VocabularyFilterMethod
Values returns all known values for VocabularyFilterMethod. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.
type VocabularyInfo ¶
type VocabularyInfo struct { // The language code of the vocabulary entries. LanguageCode LanguageCode // The date and time that the vocabulary was last modified. LastModifiedTime *time.Time // The name of the vocabulary. VocabularyName *string // The processing state of the vocabulary. If the state is READY you can use the // vocabulary in a StartTranscriptionJob request. VocabularyState VocabularyState // contains filtered or unexported fields }
Provides information about a custom vocabulary.
type VocabularyState ¶
type VocabularyState string
const ( VocabularyStatePending VocabularyState = "PENDING" VocabularyStateReady VocabularyState = "READY" VocabularyStateFailed VocabularyState = "FAILED" )
Enum values for VocabularyState
func (VocabularyState) Values ¶
func (VocabularyState) Values() []VocabularyState
Values returns all known values for VocabularyState. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.
Source Files ¶
- Version
- v1.11.2
- Published
- Dec 2, 2021
- Platform
- windows/amd64
- Imports
- 4 packages
- Last checked
- 29 minutes ago –
Tools for package owners.