package types
import "github.com/aws/aws-sdk-go-v2/service/glue/types"
Index ¶
- type AccessDeniedException
- func (e *AccessDeniedException) Error() string
- func (e *AccessDeniedException) ErrorCode() string
- func (e *AccessDeniedException) ErrorFault() smithy.ErrorFault
- func (e *AccessDeniedException) ErrorMessage() string
- func (e *AccessDeniedException) GetMessage() string
- func (e *AccessDeniedException) HasMessage() bool
- type Action
- type AlreadyExistsException
- func (e *AlreadyExistsException) Error() string
- func (e *AlreadyExistsException) ErrorCode() string
- func (e *AlreadyExistsException) ErrorFault() smithy.ErrorFault
- func (e *AlreadyExistsException) ErrorMessage() string
- func (e *AlreadyExistsException) GetMessage() string
- func (e *AlreadyExistsException) HasMessage() bool
- type BatchStopJobRunError
- type BatchStopJobRunSuccessfulSubmission
- type BinaryColumnStatisticsData
- type BooleanColumnStatisticsData
- type CatalogEncryptionMode
- type CatalogEntry
- type CatalogImportStatus
- type CatalogTarget
- type Classifier
- type CloudWatchEncryption
- type CloudWatchEncryptionMode
- type CodeGenEdge
- type CodeGenNode
- type CodeGenNodeArg
- type Column
- type ColumnError
- type ColumnStatistics
- type ColumnStatisticsData
- type ColumnStatisticsError
- type ColumnStatisticsType
- type Comparator
- type ConcurrentModificationException
- func (e *ConcurrentModificationException) Error() string
- func (e *ConcurrentModificationException) ErrorCode() string
- func (e *ConcurrentModificationException) ErrorFault() smithy.ErrorFault
- func (e *ConcurrentModificationException) ErrorMessage() string
- func (e *ConcurrentModificationException) GetMessage() string
- func (e *ConcurrentModificationException) HasMessage() bool
- type ConcurrentRunsExceededException
- func (e *ConcurrentRunsExceededException) Error() string
- func (e *ConcurrentRunsExceededException) ErrorCode() string
- func (e *ConcurrentRunsExceededException) ErrorFault() smithy.ErrorFault
- func (e *ConcurrentRunsExceededException) ErrorMessage() string
- func (e *ConcurrentRunsExceededException) GetMessage() string
- func (e *ConcurrentRunsExceededException) HasMessage() bool
- type Condition
- type ConditionCheckFailureException
- func (e *ConditionCheckFailureException) Error() string
- func (e *ConditionCheckFailureException) ErrorCode() string
- func (e *ConditionCheckFailureException) ErrorFault() smithy.ErrorFault
- func (e *ConditionCheckFailureException) ErrorMessage() string
- func (e *ConditionCheckFailureException) GetMessage() string
- func (e *ConditionCheckFailureException) HasMessage() bool
- type ConfusionMatrix
- type Connection
- type ConnectionInput
- type ConnectionPasswordEncryption
- type ConnectionPropertyKey
- type ConnectionType
- type ConnectionsList
- type Crawl
- type CrawlState
- type Crawler
- type CrawlerMetrics
- type CrawlerNodeDetails
- type CrawlerNotRunningException
- func (e *CrawlerNotRunningException) Error() string
- func (e *CrawlerNotRunningException) ErrorCode() string
- func (e *CrawlerNotRunningException) ErrorFault() smithy.ErrorFault
- func (e *CrawlerNotRunningException) ErrorMessage() string
- func (e *CrawlerNotRunningException) GetMessage() string
- func (e *CrawlerNotRunningException) HasMessage() bool
- type CrawlerRunningException
- func (e *CrawlerRunningException) Error() string
- func (e *CrawlerRunningException) ErrorCode() string
- func (e *CrawlerRunningException) ErrorFault() smithy.ErrorFault
- func (e *CrawlerRunningException) ErrorMessage() string
- func (e *CrawlerRunningException) GetMessage() string
- func (e *CrawlerRunningException) HasMessage() bool
- type CrawlerState
- type CrawlerStoppingException
- func (e *CrawlerStoppingException) Error() string
- func (e *CrawlerStoppingException) ErrorCode() string
- func (e *CrawlerStoppingException) ErrorFault() smithy.ErrorFault
- func (e *CrawlerStoppingException) ErrorMessage() string
- func (e *CrawlerStoppingException) GetMessage() string
- func (e *CrawlerStoppingException) HasMessage() bool
- type CrawlerTargets
- type CreateCsvClassifierRequest
- type CreateGrokClassifierRequest
- type CreateJsonClassifierRequest
- type CreateXMLClassifierRequest
- type CsvClassifier
- type CsvHeaderOption
- type DataCatalogEncryptionSettings
- type DataLakePrincipal
- type Database
- type DatabaseIdentifier
- type DatabaseInput
- type DateColumnStatisticsData
- type DecimalColumnStatisticsData
- type DecimalNumber
- type DeleteBehavior
- type DevEndpoint
- type DevEndpointCustomLibraries
- type DoubleColumnStatisticsData
- type DynamoDBTarget
- type Edge
- type EnableHybridValues
- type EncryptionAtRest
- type EncryptionConfiguration
- type EntityNotFoundException
- func (e *EntityNotFoundException) Error() string
- func (e *EntityNotFoundException) ErrorCode() string
- func (e *EntityNotFoundException) ErrorFault() smithy.ErrorFault
- func (e *EntityNotFoundException) ErrorMessage() string
- func (e *EntityNotFoundException) GetMessage() string
- func (e *EntityNotFoundException) HasMessage() bool
- type ErrorDetail
- type EvaluationMetrics
- type ExecutionProperty
- type ExistCondition
- type ExportLabelsTaskRunProperties
- type FindMatchesMetrics
- type FindMatchesParameters
- type FindMatchesTaskRunProperties
- type GetConnectionsFilter
- type GlueEncryptionException
- func (e *GlueEncryptionException) Error() string
- func (e *GlueEncryptionException) ErrorCode() string
- func (e *GlueEncryptionException) ErrorFault() smithy.ErrorFault
- func (e *GlueEncryptionException) ErrorMessage() string
- func (e *GlueEncryptionException) GetMessage() string
- func (e *GlueEncryptionException) HasMessage() bool
- type GluePolicy
- type GlueTable
- type GrokClassifier
- type IdempotentParameterMismatchException
- func (e *IdempotentParameterMismatchException) Error() string
- func (e *IdempotentParameterMismatchException) ErrorCode() string
- func (e *IdempotentParameterMismatchException) ErrorFault() smithy.ErrorFault
- func (e *IdempotentParameterMismatchException) ErrorMessage() string
- func (e *IdempotentParameterMismatchException) GetMessage() string
- func (e *IdempotentParameterMismatchException) HasMessage() bool
- type IllegalWorkflowStateException
- func (e *IllegalWorkflowStateException) Error() string
- func (e *IllegalWorkflowStateException) ErrorCode() string
- func (e *IllegalWorkflowStateException) ErrorFault() smithy.ErrorFault
- func (e *IllegalWorkflowStateException) ErrorMessage() string
- func (e *IllegalWorkflowStateException) GetMessage() string
- func (e *IllegalWorkflowStateException) HasMessage() bool
- type ImportLabelsTaskRunProperties
- type InternalServiceException
- func (e *InternalServiceException) Error() string
- func (e *InternalServiceException) ErrorCode() string
- func (e *InternalServiceException) ErrorFault() smithy.ErrorFault
- func (e *InternalServiceException) ErrorMessage() string
- func (e *InternalServiceException) GetMessage() string
- func (e *InternalServiceException) HasMessage() bool
- type InvalidInputException
- func (e *InvalidInputException) Error() string
- func (e *InvalidInputException) ErrorCode() string
- func (e *InvalidInputException) ErrorFault() smithy.ErrorFault
- func (e *InvalidInputException) ErrorMessage() string
- func (e *InvalidInputException) GetMessage() string
- func (e *InvalidInputException) HasMessage() bool
- type JdbcTarget
- type Job
- type JobBookmarkEntry
- type JobBookmarksEncryption
- type JobBookmarksEncryptionMode
- type JobCommand
- type JobNodeDetails
- type JobRun
- type JobRunState
- type JobUpdate
- type JsonClassifier
- type LabelingSetGenerationTaskRunProperties
- type Language
- type LastCrawlInfo
- type LastCrawlStatus
- type Location
- type Logical
- type LogicalOperator
- type LongColumnStatisticsData
- type MLTransform
- type MLTransformNotReadyException
- func (e *MLTransformNotReadyException) Error() string
- func (e *MLTransformNotReadyException) ErrorCode() string
- func (e *MLTransformNotReadyException) ErrorFault() smithy.ErrorFault
- func (e *MLTransformNotReadyException) ErrorMessage() string
- func (e *MLTransformNotReadyException) GetMessage() string
- func (e *MLTransformNotReadyException) HasMessage() bool
- type MappingEntry
- type NoScheduleException
- func (e *NoScheduleException) Error() string
- func (e *NoScheduleException) ErrorCode() string
- func (e *NoScheduleException) ErrorFault() smithy.ErrorFault
- func (e *NoScheduleException) ErrorMessage() string
- func (e *NoScheduleException) GetMessage() string
- func (e *NoScheduleException) HasMessage() bool
- type Node
- type NodeType
- type NotificationProperty
- type OperationTimeoutException
- func (e *OperationTimeoutException) Error() string
- func (e *OperationTimeoutException) ErrorCode() string
- func (e *OperationTimeoutException) ErrorFault() smithy.ErrorFault
- func (e *OperationTimeoutException) ErrorMessage() string
- func (e *OperationTimeoutException) GetMessage() string
- func (e *OperationTimeoutException) HasMessage() bool
- type Order
- type Partition
- type PartitionError
- type PartitionInput
- type PartitionValueList
- type Permission
- type PhysicalConnectionRequirements
- type Predecessor
- type Predicate
- type PrincipalPermissions
- type PrincipalType
- type PropertyPredicate
- type ResourceNumberLimitExceededException
- func (e *ResourceNumberLimitExceededException) Error() string
- func (e *ResourceNumberLimitExceededException) ErrorCode() string
- func (e *ResourceNumberLimitExceededException) ErrorFault() smithy.ErrorFault
- func (e *ResourceNumberLimitExceededException) ErrorMessage() string
- func (e *ResourceNumberLimitExceededException) GetMessage() string
- func (e *ResourceNumberLimitExceededException) HasMessage() bool
- type ResourceShareType
- type ResourceType
- type ResourceUri
- type S3Encryption
- type S3EncryptionMode
- type S3Target
- type Schedule
- type ScheduleState
- type SchedulerNotRunningException
- func (e *SchedulerNotRunningException) Error() string
- func (e *SchedulerNotRunningException) ErrorCode() string
- func (e *SchedulerNotRunningException) ErrorFault() smithy.ErrorFault
- func (e *SchedulerNotRunningException) ErrorMessage() string
- func (e *SchedulerNotRunningException) GetMessage() string
- func (e *SchedulerNotRunningException) HasMessage() bool
- type SchedulerRunningException
- func (e *SchedulerRunningException) Error() string
- func (e *SchedulerRunningException) ErrorCode() string
- func (e *SchedulerRunningException) ErrorFault() smithy.ErrorFault
- func (e *SchedulerRunningException) ErrorMessage() string
- func (e *SchedulerRunningException) GetMessage() string
- func (e *SchedulerRunningException) HasMessage() bool
- type SchedulerTransitioningException
- func (e *SchedulerTransitioningException) Error() string
- func (e *SchedulerTransitioningException) ErrorCode() string
- func (e *SchedulerTransitioningException) ErrorFault() smithy.ErrorFault
- func (e *SchedulerTransitioningException) ErrorMessage() string
- func (e *SchedulerTransitioningException) GetMessage() string
- func (e *SchedulerTransitioningException) HasMessage() bool
- type SchemaChangePolicy
- type SchemaColumn
- type SecurityConfiguration
- type Segment
- type SerDeInfo
- type SkewedInfo
- type Sort
- type SortCriterion
- type SortDirectionType
- type StorageDescriptor
- type StringColumnStatisticsData
- type Table
- type TableError
- type TableIdentifier
- type TableInput
- type TableVersion
- type TableVersionError
- type TaskRun
- type TaskRunFilterCriteria
- type TaskRunProperties
- type TaskRunSortColumnType
- type TaskRunSortCriteria
- type TaskStatusType
- type TaskType
- type TransformFilterCriteria
- type TransformParameters
- type TransformSortColumnType
- type TransformSortCriteria
- type TransformStatusType
- type TransformType
- type Trigger
- type TriggerNodeDetails
- type TriggerState
- type TriggerType
- type TriggerUpdate
- type UpdateBehavior
- type UpdateCsvClassifierRequest
- type UpdateGrokClassifierRequest
- type UpdateJsonClassifierRequest
- type UpdateXMLClassifierRequest
- type UserDefinedFunction
- type UserDefinedFunctionInput
- type ValidationException
- func (e *ValidationException) Error() string
- func (e *ValidationException) ErrorCode() string
- func (e *ValidationException) ErrorFault() smithy.ErrorFault
- func (e *ValidationException) ErrorMessage() string
- func (e *ValidationException) GetMessage() string
- func (e *ValidationException) HasMessage() bool
- type VersionMismatchException
- func (e *VersionMismatchException) Error() string
- func (e *VersionMismatchException) ErrorCode() string
- func (e *VersionMismatchException) ErrorFault() smithy.ErrorFault
- func (e *VersionMismatchException) ErrorMessage() string
- func (e *VersionMismatchException) GetMessage() string
- func (e *VersionMismatchException) HasMessage() bool
- type WorkerType
- type Workflow
- type WorkflowGraph
- type WorkflowRun
- type WorkflowRunStatistics
- type WorkflowRunStatus
- type XMLClassifier
Types ¶
type AccessDeniedException ¶
type AccessDeniedException struct { Message *string }
Access to a resource was denied.
func (*AccessDeniedException) Error ¶
func (e *AccessDeniedException) Error() string
func (*AccessDeniedException) ErrorCode ¶
func (e *AccessDeniedException) ErrorCode() string
func (*AccessDeniedException) ErrorFault ¶
func (e *AccessDeniedException) ErrorFault() smithy.ErrorFault
func (*AccessDeniedException) ErrorMessage ¶
func (e *AccessDeniedException) ErrorMessage() string
func (*AccessDeniedException) GetMessage ¶
func (e *AccessDeniedException) GetMessage() string
func (*AccessDeniedException) HasMessage ¶
func (e *AccessDeniedException) HasMessage() bool
type Action ¶
type Action struct { // The JobRun timeout in minutes. This is the maximum time that a job run can // consume resources before it is terminated and enters TIMEOUT status. The default // is 2,880 minutes (48 hours). This overrides the timeout value set in the parent // job. Timeout *int32 // Specifies configuration properties of a job run notification. NotificationProperty *NotificationProperty // The job arguments used when this trigger fires. For this job run, they replace // the default arguments set in the job definition itself. You can specify // arguments here that your own job-execution script consumes, as well as arguments // that AWS Glue itself consumes. For information about how to specify and consume // your own Job arguments, see the Calling AWS Glue APIs in Python // (https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html) // topic in the developer guide. For information about the key-value pairs that AWS // Glue consumes to set up your job, see the Special Parameters Used by AWS Glue // (https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html) // topic in the developer guide. Arguments map[string]*string // The name of the SecurityConfiguration structure to be used with this action. SecurityConfiguration *string // The name of the crawler to be used with this action. CrawlerName *string // The name of a job to be executed. JobName *string }
Defines an action to be initiated by a trigger.
type AlreadyExistsException ¶
type AlreadyExistsException struct { Message *string }
A resource to be created or added already exists.
func (*AlreadyExistsException) Error ¶
func (e *AlreadyExistsException) Error() string
func (*AlreadyExistsException) ErrorCode ¶
func (e *AlreadyExistsException) ErrorCode() string
func (*AlreadyExistsException) ErrorFault ¶
func (e *AlreadyExistsException) ErrorFault() smithy.ErrorFault
func (*AlreadyExistsException) ErrorMessage ¶
func (e *AlreadyExistsException) ErrorMessage() string
func (*AlreadyExistsException) GetMessage ¶
func (e *AlreadyExistsException) GetMessage() string
func (*AlreadyExistsException) HasMessage ¶
func (e *AlreadyExistsException) HasMessage() bool
type BatchStopJobRunError ¶
type BatchStopJobRunError struct { // The JobRunId of the job run in question. JobRunId *string // The name of the job definition that is used in the job run in question. JobName *string // Specifies details about the error that was encountered. ErrorDetail *ErrorDetail }
Records an error that occurred when attempting to stop a specified job run.
type BatchStopJobRunSuccessfulSubmission ¶
type BatchStopJobRunSuccessfulSubmission struct { // The JobRunId of the job run that was stopped. JobRunId *string // The name of the job definition used in the job run that was stopped. JobName *string }
Records a successful request to stop a specified JobRun.
type BinaryColumnStatisticsData ¶
type BinaryColumnStatisticsData struct { // Number of nulls. NumberOfNulls *int64 // Maximum length of the column. MaximumLength *int64 // Average length of the column. AverageLength *float64 }
Defines a binary column statistics data.
type BooleanColumnStatisticsData ¶
type BooleanColumnStatisticsData struct { // Number of true value. NumberOfTrues *int64 // Number of nulls. NumberOfNulls *int64 // Number of false value. NumberOfFalses *int64 }
Defines a boolean column statistics.
type CatalogEncryptionMode ¶
type CatalogEncryptionMode string
const ( CatalogEncryptionModeDisabled CatalogEncryptionMode = "DISABLED" CatalogEncryptionModeSsekms CatalogEncryptionMode = "SSE-KMS" )
Enum values for CatalogEncryptionMode
type CatalogEntry ¶
type CatalogEntry struct { // The database in which the table metadata resides. DatabaseName *string // The name of the table in question. TableName *string }
Specifies a table definition in the AWS Glue Data Catalog.
type CatalogImportStatus ¶
type CatalogImportStatus struct { // True if the migration has completed, or False otherwise. ImportCompleted *bool // The time that the migration was started. ImportTime *time.Time // The name of the person who initiated the migration. ImportedBy *string }
A structure containing migration status information.
type CatalogTarget ¶
type CatalogTarget struct { // The name of the database to be synchronized. DatabaseName *string // A list of the tables to be synchronized. Tables []*string }
Specifies an AWS Glue Data Catalog target.
type Classifier ¶
type Classifier struct { // A classifier for comma-separated values (CSV). CsvClassifier *CsvClassifier // A classifier that uses grok. GrokClassifier *GrokClassifier // A classifier for JSON content. JsonClassifier *JsonClassifier // A classifier for XML content. XMLClassifier *XMLClassifier }
Classifiers are triggered during a crawl task. A classifier checks whether a given file is in a format it can handle. If it is, the classifier creates a schema in the form of a StructType object that matches that data format. You can use the standard classifiers that AWS Glue provides, or you can write your own classifiers to best categorize your data sources and specify the appropriate schemas to use for them. A classifier can be a grok classifier, an XML classifier, a JSON classifier, or a custom CSV classifier, as specified in one of the fields in the Classifier object.
type CloudWatchEncryption ¶
type CloudWatchEncryption struct { // The encryption mode to use for CloudWatch data. CloudWatchEncryptionMode CloudWatchEncryptionMode // The Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data. KmsKeyArn *string }
Specifies how Amazon CloudWatch data should be encrypted.
type CloudWatchEncryptionMode ¶
type CloudWatchEncryptionMode string
const ( CloudWatchEncryptionModeDisabled CloudWatchEncryptionMode = "DISABLED" CloudWatchEncryptionModeSsekms CloudWatchEncryptionMode = "SSE-KMS" )
Enum values for CloudWatchEncryptionMode
type CodeGenEdge ¶
type CodeGenEdge struct { // The ID of the node at which the edge starts. Source *string // The target of the edge. TargetParameter *string // The ID of the node at which the edge ends. Target *string }
Represents a directional edge in a directed acyclic graph (DAG).
type CodeGenNode ¶
type CodeGenNode struct { // The line number of the node. LineNumber *int32 // Properties of the node, in the form of name-value pairs. Args []*CodeGenNodeArg // The type of node that this is. NodeType *string // A node identifier that is unique within the node's graph. Id *string }
Represents a node in a directed acyclic graph (DAG)
type CodeGenNodeArg ¶
type CodeGenNodeArg struct { // True if the value is used as a parameter. Param *bool // The name of the argument or property. Name *string // The value of the argument or property. Value *string }
An argument or property of a node.
type Column ¶
type Column struct { // These key-value pairs define properties associated with the column. Parameters map[string]*string // A free-form text comment. Comment *string // The name of the Column. Name *string // The data type of the Column. Type *string }
A column in a Table.
type ColumnError ¶
type ColumnError struct { // The error message occurred during operation. Error *ErrorDetail // The name of the column. ColumnName *string }
Defines a column containing error.
type ColumnStatistics ¶
type ColumnStatistics struct { // The analyzed time of the column statistics. AnalyzedTime *time.Time // The statistics of the column. StatisticsData *ColumnStatisticsData // The type of the column. ColumnType *string // The name of the column. ColumnName *string }
Defines a column statistics.
type ColumnStatisticsData ¶
type ColumnStatisticsData struct { // The name of the column. Type ColumnStatisticsType // String Column Statistics Data. StringColumnStatisticsData *StringColumnStatisticsData // Double Column Statistics Data. DoubleColumnStatisticsData *DoubleColumnStatisticsData // Long Column Statistics Data. LongColumnStatisticsData *LongColumnStatisticsData // Boolean Column Statistics Data. BooleanColumnStatisticsData *BooleanColumnStatisticsData // Decimal Column Statistics Data. DecimalColumnStatisticsData *DecimalColumnStatisticsData // Date Column Statistics Data. DateColumnStatisticsData *DateColumnStatisticsData // Binary Column Statistics Data. BinaryColumnStatisticsData *BinaryColumnStatisticsData }
Defines a column statistics data.
type ColumnStatisticsError ¶
type ColumnStatisticsError struct { // The ColumnStatistics of the column. ColumnStatistics *ColumnStatistics // The error message occurred during operation. Error *ErrorDetail }
Defines a column containing error.
type ColumnStatisticsType ¶
type ColumnStatisticsType string
const ( ColumnStatisticsTypeBoolean ColumnStatisticsType = "BOOLEAN" ColumnStatisticsTypeDate ColumnStatisticsType = "DATE" ColumnStatisticsTypeDecimal ColumnStatisticsType = "DECIMAL" ColumnStatisticsTypeDouble ColumnStatisticsType = "DOUBLE" ColumnStatisticsTypeLong ColumnStatisticsType = "LONG" ColumnStatisticsTypeString ColumnStatisticsType = "STRING" ColumnStatisticsTypeBinary ColumnStatisticsType = "BINARY" )
Enum values for ColumnStatisticsType
type Comparator ¶
type Comparator string
const ( ComparatorEquals Comparator = "EQUALS" ComparatorGreater_than Comparator = "GREATER_THAN" ComparatorLess_than Comparator = "LESS_THAN" ComparatorGreater_than_equals Comparator = "GREATER_THAN_EQUALS" ComparatorLess_than_equals Comparator = "LESS_THAN_EQUALS" )
Enum values for Comparator
type ConcurrentModificationException ¶
type ConcurrentModificationException struct { Message *string }
Two processes are trying to modify a resource simultaneously.
func (*ConcurrentModificationException) Error ¶
func (e *ConcurrentModificationException) Error() string
func (*ConcurrentModificationException) ErrorCode ¶
func (e *ConcurrentModificationException) ErrorCode() string
func (*ConcurrentModificationException) ErrorFault ¶
func (e *ConcurrentModificationException) ErrorFault() smithy.ErrorFault
func (*ConcurrentModificationException) ErrorMessage ¶
func (e *ConcurrentModificationException) ErrorMessage() string
func (*ConcurrentModificationException) GetMessage ¶
func (e *ConcurrentModificationException) GetMessage() string
func (*ConcurrentModificationException) HasMessage ¶
func (e *ConcurrentModificationException) HasMessage() bool
type ConcurrentRunsExceededException ¶
type ConcurrentRunsExceededException struct { Message *string }
Too many jobs are being run concurrently.
func (*ConcurrentRunsExceededException) Error ¶
func (e *ConcurrentRunsExceededException) Error() string
func (*ConcurrentRunsExceededException) ErrorCode ¶
func (e *ConcurrentRunsExceededException) ErrorCode() string
func (*ConcurrentRunsExceededException) ErrorFault ¶
func (e *ConcurrentRunsExceededException) ErrorFault() smithy.ErrorFault
func (*ConcurrentRunsExceededException) ErrorMessage ¶
func (e *ConcurrentRunsExceededException) ErrorMessage() string
func (*ConcurrentRunsExceededException) GetMessage ¶
func (e *ConcurrentRunsExceededException) GetMessage() string
func (*ConcurrentRunsExceededException) HasMessage ¶
func (e *ConcurrentRunsExceededException) HasMessage() bool
type Condition ¶
type Condition struct { // The name of the crawler to which this condition applies. CrawlerName *string // The state of the crawler to which this condition applies. CrawlState CrawlState // A logical operator. LogicalOperator LogicalOperator // The name of the job whose JobRuns this condition applies to, and on which this // trigger waits. JobName *string // The condition state. Currently, the values supported are SUCCEEDED, STOPPED, // TIMEOUT, and FAILED. State JobRunState }
Defines a condition under which a trigger fires.
type ConditionCheckFailureException ¶
type ConditionCheckFailureException struct { Message *string }
A specified condition was not satisfied.
func (*ConditionCheckFailureException) Error ¶
func (e *ConditionCheckFailureException) Error() string
func (*ConditionCheckFailureException) ErrorCode ¶
func (e *ConditionCheckFailureException) ErrorCode() string
func (*ConditionCheckFailureException) ErrorFault ¶
func (e *ConditionCheckFailureException) ErrorFault() smithy.ErrorFault
func (*ConditionCheckFailureException) ErrorMessage ¶
func (e *ConditionCheckFailureException) ErrorMessage() string
func (*ConditionCheckFailureException) GetMessage ¶
func (e *ConditionCheckFailureException) GetMessage() string
func (*ConditionCheckFailureException) HasMessage ¶
func (e *ConditionCheckFailureException) HasMessage() bool
type ConfusionMatrix ¶
type ConfusionMatrix struct { // The number of matches in the data that the transform correctly found, in the // confusion matrix for your transform. NumTruePositives *int64 // The number of nonmatches in the data that the transform incorrectly classified // as a match, in the confusion matrix for your transform. NumFalsePositives *int64 // The number of matches in the data that the transform didn't find, in the // confusion matrix for your transform. NumFalseNegatives *int64 // The number of nonmatches in the data that the transform correctly rejected, in // the confusion matrix for your transform. NumTrueNegatives *int64 }
The confusion matrix shows you what your transform is predicting accurately and what types of errors it is making. <p>For more information, see <a href="https://en.wikipedia.org/wiki/Confusion_matrix">Confusion matrix</a> in Wikipedia.</p>
type Connection ¶
type Connection struct { // The name of the connection definition. Name *string // A map of physical connection requirements, such as virtual private cloud (VPC) // and SecurityGroup, that are needed to make this connection successfully. PhysicalConnectionRequirements *PhysicalConnectionRequirements // These key-value pairs define parameters for the connection: // // * HOST - The // host URI: either the fully qualified domain name (FQDN) or the IPv4 address of // the database host. // // * PORT - The port number, between 1024 and 65535, of the // port on which the database host is listening for database connections. // // * // USER_NAME - The name under which to log in to the database. The value string for // USER_NAME is "USERNAME". // // * PASSWORD - A password, if one is used, for the // user name. // // * ENCRYPTED_PASSWORD - When you enable connection password // protection by setting ConnectionPasswordEncryption in the Data Catalog // encryption settings, this field stores the encrypted password. // // * // JDBC_DRIVER_JAR_URI - The Amazon Simple Storage Service (Amazon S3) path of the // JAR file that contains the JDBC driver to use. // // * JDBC_DRIVER_CLASS_NAME - // The class name of the JDBC driver to use. // // * JDBC_ENGINE - The name of the // JDBC engine to use. // // * JDBC_ENGINE_VERSION - The version of the JDBC engine // to use. // // * CONFIG_FILES - (Reserved for future use.) // // * INSTANCE_ID - // The instance ID to use. // // * JDBC_CONNECTION_URL - The URL for connecting to a // JDBC data source. // // * JDBC_ENFORCE_SSL - A Boolean string (true, false) // specifying whether Secure Sockets Layer (SSL) with hostname matching is enforced // for the JDBC connection on the client. The default is false. // // * // CUSTOM_JDBC_CERT - An Amazon S3 location specifying the customer's root // certificate. AWS Glue uses this root certificate to validate the customer’s // certificate when connecting to the customer database. AWS Glue only handles // X.509 certificates. The certificate provided must be DER-encoded and supplied in // Base64 encoding PEM format. // // * SKIP_CUSTOM_JDBC_CERT_VALIDATION - By // default, this is false. AWS Glue validates the Signature algorithm and Subject // Public Key Algorithm for the customer certificate. The only permitted algorithms // for the Signature algorithm are SHA256withRSA, SHA384withRSA or SHA512withRSA. // For the Subject Public Key Algorithm, the key length must be at least 2048. You // can set the value of this property to true to skip AWS Glue’s validation of the // customer certificate. // // * CUSTOM_JDBC_CERT_STRING - A custom JDBC certificate // string which is used for domain match or distinguished name match to prevent a // man-in-the-middle attack. In Oracle database, this is used as the // SSL_SERVER_CERT_DN; in Microsoft SQL Server, this is used as the // hostNameInCertificate. // // * CONNECTION_URL - The URL for connecting to a // general (non-JDBC) data source. // // * KAFKA_BOOTSTRAP_SERVERS - A // comma-separated list of host and port pairs that are the addresses of the Apache // Kafka brokers in a Kafka cluster to which a Kafka client will connect to and // bootstrap itself. // // * KAFKA_SSL_ENABLED - Whether to enable or disable SSL on // an Apache Kafka connection. Default value is "true". // // * KAFKA_CUSTOM_CERT - // The Amazon S3 URL for the private CA cert file (.pem format). The default is an // empty string. // // * KAFKA_SKIP_CUSTOM_CERT_VALIDATION - Whether to skip the // validation of the CA cert file or not. AWS Glue validates for three algorithms: // SHA256withRSA, SHA384withRSA and SHA512withRSA. Default value is "false". ConnectionProperties map[string]*string // The type of the connection. Currently, SFTP is not supported. ConnectionType ConnectionType // A list of criteria that can be used in selecting this connection. MatchCriteria []*string // The description of the connection. Description *string // The user, group, or role that last updated this connection definition. LastUpdatedBy *string // The time that this connection definition was created. CreationTime *time.Time // The last time that this connection definition was updated. LastUpdatedTime *time.Time }
Defines a connection to a data source.
type ConnectionInput ¶
type ConnectionInput struct { // The type of the connection. Currently, these types are supported: // // * JDBC - // Designates a connection to a database through Java Database Connectivity // (JDBC). // // * KAFKA - Designates a connection to an Apache Kafka streaming // platform. // // * MONGODB - Designates a connection to a MongoDB document // database. // // SFTP is not supported. ConnectionType ConnectionType // A list of criteria that can be used in selecting this connection. MatchCriteria []*string // These key-value pairs define parameters for the connection. ConnectionProperties map[string]*string // A map of physical connection requirements, such as virtual private cloud (VPC) // and SecurityGroup, that are needed to successfully make this connection. PhysicalConnectionRequirements *PhysicalConnectionRequirements // The description of the connection. Description *string // The name of the connection. Name *string }
A structure that is used to specify a connection to create or update.
type ConnectionPasswordEncryption ¶
type ConnectionPasswordEncryption struct { // An AWS KMS key that is used to encrypt the connection password. <p>If // connection password protection is enabled, the caller of // <code>CreateConnection</code> and <code>UpdateConnection</code> needs at least // <code>kms:Encrypt</code> permission on the specified AWS KMS key, to encrypt // passwords before storing them in the Data Catalog. </p> <p>You can set the // decrypt permission to enable or restrict access on the password key according to // your security requirements.</p> AwsKmsKeyId *string // When the ReturnConnectionPasswordEncrypted flag is set to "true", passwords // remain encrypted in the responses of GetConnection and GetConnections. This // encryption takes effect independently from catalog encryption. ReturnConnectionPasswordEncrypted *bool }
The data structure used by the Data Catalog to encrypt the password as part of CreateConnection or UpdateConnection and store it in the ENCRYPTED_PASSWORD field in the connection properties. You can enable catalog encryption or only password encryption. <p>When a <code>CreationConnection</code> request arrives containing a password, the Data Catalog first encrypts the password using your AWS KMS key. It then encrypts the whole connection object again if catalog encryption is also enabled.</p> <p>This encryption requires that you set AWS KMS key permissions to enable or restrict access on the password key according to your security requirements. For example, you might want only administrators to have decrypt permission on the password key.</p>
type ConnectionPropertyKey ¶
type ConnectionPropertyKey string
const ( ConnectionPropertyKeyHost ConnectionPropertyKey = "HOST" ConnectionPropertyKeyPort ConnectionPropertyKey = "PORT" ConnectionPropertyKeyUser_name ConnectionPropertyKey = "USERNAME" ConnectionPropertyKeyPassword ConnectionPropertyKey = "PASSWORD" ConnectionPropertyKeyEncrypted_password ConnectionPropertyKey = "ENCRYPTED_PASSWORD" ConnectionPropertyKeyJdbc_driver_jar_uri ConnectionPropertyKey = "JDBC_DRIVER_JAR_URI" ConnectionPropertyKeyJdbc_driver_class_name ConnectionPropertyKey = "JDBC_DRIVER_CLASS_NAME" ConnectionPropertyKeyJdbc_engine ConnectionPropertyKey = "JDBC_ENGINE" ConnectionPropertyKeyJdbc_engine_version ConnectionPropertyKey = "JDBC_ENGINE_VERSION" ConnectionPropertyKeyConfig_files ConnectionPropertyKey = "CONFIG_FILES" ConnectionPropertyKeyInstance_id ConnectionPropertyKey = "INSTANCE_ID" ConnectionPropertyKeyJdbc_connection_url ConnectionPropertyKey = "JDBC_CONNECTION_URL" ConnectionPropertyKeyJdbc_enforce_ssl ConnectionPropertyKey = "JDBC_ENFORCE_SSL" ConnectionPropertyKeyCustom_jdbc_cert ConnectionPropertyKey = "CUSTOM_JDBC_CERT" ConnectionPropertyKeySkip_custom_jdbc_cert_validation ConnectionPropertyKey = "SKIP_CUSTOM_JDBC_CERT_VALIDATION" ConnectionPropertyKeyCustom_jdbc_cert_string ConnectionPropertyKey = "CUSTOM_JDBC_CERT_STRING" ConnectionPropertyKeyConnection_url ConnectionPropertyKey = "CONNECTION_URL" ConnectionPropertyKeyKafka_bootstrap_servers ConnectionPropertyKey = "KAFKA_BOOTSTRAP_SERVERS" ConnectionPropertyKeyKafka_ssl_enabled ConnectionPropertyKey = "KAFKA_SSL_ENABLED" ConnectionPropertyKeyKafka_custom_cert ConnectionPropertyKey = "KAFKA_CUSTOM_CERT" ConnectionPropertyKeyKafka_skip_custom_cert_validation ConnectionPropertyKey = "KAFKA_SKIP_CUSTOM_CERT_VALIDATION" )
Enum values for ConnectionPropertyKey
type ConnectionType ¶
type ConnectionType string
const ( ConnectionTypeJdbc ConnectionType = "JDBC" ConnectionTypeSftp ConnectionType = "SFTP" ConnectionTypeMongodb ConnectionType = "MONGODB" ConnectionTypeKafka ConnectionType = "KAFKA" )
Enum values for ConnectionType
type ConnectionsList ¶
type ConnectionsList struct { // A list of connections used by the job. Connections []*string }
Specifies the connections used by a job.
type Crawl ¶
type Crawl struct { // The error message associated with the crawl. ErrorMessage *string // The date and time on which the crawl started. StartedOn *time.Time // The date and time on which the crawl completed. CompletedOn *time.Time // The log stream associated with the crawl. LogStream *string // The state of the crawler. State CrawlState // The log group associated with the crawl. LogGroup *string }
The details of a crawl in the workflow.
type CrawlState ¶
type CrawlState string
const ( CrawlStateRunning CrawlState = "RUNNING" CrawlStateCancelling CrawlState = "CANCELLING" CrawlStateCancelled CrawlState = "CANCELLED" CrawlStateSucceeded CrawlState = "SUCCEEDED" CrawlStateFailed CrawlState = "FAILED" )
Enum values for CrawlState
type Crawler ¶
type Crawler struct { // The status of the last crawl, and potentially error information if an error // occurred. LastCrawl *LastCrawlInfo // Indicates whether the crawler is running, or whether a run is pending. State CrawlerState // The Amazon Resource Name (ARN) of an IAM role that's used to access customer // resources, such as Amazon Simple Storage Service (Amazon S3) data. Role *string // The name of the crawler. Name *string // For scheduled crawlers, the schedule when the crawler runs. Schedule *Schedule // The name of the SecurityConfiguration structure to be used by this crawler. CrawlerSecurityConfiguration *string // The name of the database in which the crawler's output is stored. DatabaseName *string // If the crawler is running, contains the total time elapsed since the last crawl // began. CrawlElapsedTime *int64 // The policy that specifies update and delete behaviors for the crawler. SchemaChangePolicy *SchemaChangePolicy // A description of the crawler. Description *string // A collection of targets to crawl. Targets *CrawlerTargets // The time that the crawler was created. CreationTime *time.Time // The prefix added to the names of tables that are created. TablePrefix *string // Crawler configuration information. This versioned JSON string allows users to // specify aspects of a crawler's behavior. For more information, see Configuring a // Crawler (https://docs.aws.amazon.com/glue/latest/dg/crawler-configuration.html). Configuration *string // The time that the crawler was last updated. LastUpdated *time.Time // A list of UTF-8 strings that specify the custom classifiers that are associated // with the crawler. Classifiers []*string // The version of the crawler. Version *int64 }
Specifies a crawler program that examines a data source and uses classifiers to try to determine its schema. If successful, the crawler records metadata concerning the data source in the AWS Glue Data Catalog.
type CrawlerMetrics ¶
type CrawlerMetrics struct { // The duration of the crawler's most recent run, in seconds. LastRuntimeSeconds *float64 // True if the crawler is still estimating how long it will take to complete this // run. StillEstimating *bool // The number of tables created by this crawler. TablesCreated *int32 // The name of the crawler. CrawlerName *string // The median duration of this crawler's runs, in seconds. MedianRuntimeSeconds *float64 // The number of tables deleted by this crawler. TablesDeleted *int32 // The estimated time left to complete a running crawl. TimeLeftSeconds *float64 // The number of tables updated by this crawler. TablesUpdated *int32 }
Metrics for a specified crawler.
type CrawlerNodeDetails ¶
type CrawlerNodeDetails struct { // A list of crawls represented by the crawl node. Crawls []*Crawl }
The details of a Crawler node present in the workflow.
type CrawlerNotRunningException ¶
type CrawlerNotRunningException struct { Message *string }
The specified crawler is not running.
func (*CrawlerNotRunningException) Error ¶
func (e *CrawlerNotRunningException) Error() string
func (*CrawlerNotRunningException) ErrorCode ¶
func (e *CrawlerNotRunningException) ErrorCode() string
func (*CrawlerNotRunningException) ErrorFault ¶
func (e *CrawlerNotRunningException) ErrorFault() smithy.ErrorFault
func (*CrawlerNotRunningException) ErrorMessage ¶
func (e *CrawlerNotRunningException) ErrorMessage() string
func (*CrawlerNotRunningException) GetMessage ¶
func (e *CrawlerNotRunningException) GetMessage() string
func (*CrawlerNotRunningException) HasMessage ¶
func (e *CrawlerNotRunningException) HasMessage() bool
type CrawlerRunningException ¶
type CrawlerRunningException struct { Message *string }
The operation cannot be performed because the crawler is already running.
func (*CrawlerRunningException) Error ¶
func (e *CrawlerRunningException) Error() string
func (*CrawlerRunningException) ErrorCode ¶
func (e *CrawlerRunningException) ErrorCode() string
func (*CrawlerRunningException) ErrorFault ¶
func (e *CrawlerRunningException) ErrorFault() smithy.ErrorFault
func (*CrawlerRunningException) ErrorMessage ¶
func (e *CrawlerRunningException) ErrorMessage() string
func (*CrawlerRunningException) GetMessage ¶
func (e *CrawlerRunningException) GetMessage() string
func (*CrawlerRunningException) HasMessage ¶
func (e *CrawlerRunningException) HasMessage() bool
type CrawlerState ¶
type CrawlerState string
const ( CrawlerStateReady CrawlerState = "READY" CrawlerStateRunning CrawlerState = "RUNNING" CrawlerStateStopping CrawlerState = "STOPPING" )
Enum values for CrawlerState
type CrawlerStoppingException ¶
type CrawlerStoppingException struct { Message *string }
The specified crawler is stopping.
func (*CrawlerStoppingException) Error ¶
func (e *CrawlerStoppingException) Error() string
func (*CrawlerStoppingException) ErrorCode ¶
func (e *CrawlerStoppingException) ErrorCode() string
func (*CrawlerStoppingException) ErrorFault ¶
func (e *CrawlerStoppingException) ErrorFault() smithy.ErrorFault
func (*CrawlerStoppingException) ErrorMessage ¶
func (e *CrawlerStoppingException) ErrorMessage() string
func (*CrawlerStoppingException) GetMessage ¶
func (e *CrawlerStoppingException) GetMessage() string
func (*CrawlerStoppingException) HasMessage ¶
func (e *CrawlerStoppingException) HasMessage() bool
type CrawlerTargets ¶
type CrawlerTargets struct { // Specifies JDBC targets. JdbcTargets []*JdbcTarget // Specifies Amazon Simple Storage Service (Amazon S3) targets. S3Targets []*S3Target // Specifies AWS Glue Data Catalog targets. CatalogTargets []*CatalogTarget // Specifies Amazon DynamoDB targets. DynamoDBTargets []*DynamoDBTarget }
Specifies data stores to crawl.
type CreateCsvClassifierRequest ¶
type CreateCsvClassifierRequest struct { // Specifies not to trim values before identifying the type of column values. The // default value is true. DisableValueTrimming *bool // The name of the classifier. Name *string // Enables the processing of files that contain only one column. AllowSingleColumn *bool // A custom symbol to denote what separates each column entry in the row. Delimiter *string // A list of strings representing column names. Header []*string // Indicates whether the CSV file contains a header. ContainsHeader CsvHeaderOption // A custom symbol to denote what combines content into a single column value. Must // be different from the column delimiter. QuoteSymbol *string }
Specifies a custom CSV classifier for CreateClassifier to create.
type CreateGrokClassifierRequest ¶
type CreateGrokClassifierRequest struct { // Optional custom grok patterns used by this classifier. CustomPatterns *string // An identifier of the data format that the classifier matches, such as Twitter, // JSON, Omniture logs, Amazon CloudWatch Logs, and so on. Classification *string // The name of the new classifier. Name *string // The grok pattern used by this classifier. GrokPattern *string }
Specifies a grok classifier for CreateClassifier to create.
type CreateJsonClassifierRequest ¶
type CreateJsonClassifierRequest struct { // The name of the classifier. Name *string // A JsonPath string defining the JSON data for the classifier to classify. AWS // Glue supports a subset of JsonPath, as described in Writing JsonPath Custom // Classifiers // (https://docs.aws.amazon.com/glue/latest/dg/custom-classifier.html#custom-classifier-json). JsonPath *string }
Specifies a JSON classifier for CreateClassifier to create.
type CreateXMLClassifierRequest ¶
type CreateXMLClassifierRequest struct { // An identifier of the data format that the classifier matches. Classification *string // The name of the classifier. Name *string // The XML tag designating the element that contains each record in an XML document // being parsed. This can't identify a self-closing element (closed by />). An // empty row element that contains only attributes can be parsed as long as it ends // with a closing tag (for example, is okay, but is not). RowTag *string }
Specifies an XML classifier for CreateClassifier to create.
type CsvClassifier ¶
type CsvClassifier struct { // The time that this classifier was registered. CreationTime *time.Time // The version of this classifier. Version *int64 // Specifies not to trim values before identifying the type of column values. The // default value is true. DisableValueTrimming *bool // Indicates whether the CSV file contains a header. ContainsHeader CsvHeaderOption // A custom symbol to denote what separates each column entry in the row. Delimiter *string // Enables the processing of files that contain only one column. AllowSingleColumn *bool // A custom symbol to denote what combines content into a single column value. It // must be different from the column delimiter. QuoteSymbol *string // A list of strings representing column names. Header []*string // The name of the classifier. Name *string // The time that this classifier was last updated. LastUpdated *time.Time }
A classifier for custom CSV content.
type CsvHeaderOption ¶
type CsvHeaderOption string
const ( CsvHeaderOptionUnknown CsvHeaderOption = "UNKNOWN" CsvHeaderOptionPresent CsvHeaderOption = "PRESENT" CsvHeaderOptionAbsent CsvHeaderOption = "ABSENT" )
Enum values for CsvHeaderOption
type DataCatalogEncryptionSettings ¶
type DataCatalogEncryptionSettings struct { // Specifies the encryption-at-rest configuration for the Data Catalog. EncryptionAtRest *EncryptionAtRest // When connection password protection is enabled, the Data Catalog uses a // customer-provided key to encrypt the password as part of CreateConnection or // UpdateConnection and store it in the ENCRYPTED_PASSWORD field in the connection // properties. You can enable catalog encryption or only password encryption. ConnectionPasswordEncryption *ConnectionPasswordEncryption }
Contains configuration information for maintaining Data Catalog security.
type DataLakePrincipal ¶
type DataLakePrincipal struct { // An identifier for the AWS Lake Formation principal. DataLakePrincipalIdentifier *string }
The AWS Lake Formation principal.
type Database ¶
type Database struct { // The name of the database. For Hive compatibility, this is folded to lowercase // when it is stored. Name *string // The ID of the Data Catalog in which the database resides. CatalogId *string // The location of the database (for example, an HDFS path). LocationUri *string // The time at which the metadata database was created in the catalog. CreateTime *time.Time // A description of the database. Description *string // These key-value pairs define parameters and properties of the database. Parameters map[string]*string // Creates a set of default permissions on the table for principals. CreateTableDefaultPermissions []*PrincipalPermissions // A DatabaseIdentifier structure that describes a target database for resource // linking. TargetDatabase *DatabaseIdentifier }
The Database object represents a logical grouping of tables that might reside in a Hive metastore or an RDBMS.
type DatabaseIdentifier ¶
type DatabaseIdentifier struct { // The ID of the Data Catalog in which the database resides. CatalogId *string // The name of the catalog database. DatabaseName *string }
A structure that describes a target database for resource linking.
type DatabaseInput ¶
type DatabaseInput struct { // A DatabaseIdentifier structure that describes a target database for resource // linking. TargetDatabase *DatabaseIdentifier // Creates a set of default permissions on the table for principals. CreateTableDefaultPermissions []*PrincipalPermissions // The location of the database (for example, an HDFS path). LocationUri *string // These key-value pairs define parameters and properties of the database. These // key-value pairs define parameters and properties of the database. Parameters map[string]*string // A description of the database. Description *string // The name of the database. For Hive compatibility, this is folded to lowercase // when it is stored. Name *string }
The structure used to create or update a database.
type DateColumnStatisticsData ¶
type DateColumnStatisticsData struct { // Number of distinct values. NumberOfDistinctValues *int64 // Number of nulls. NumberOfNulls *int64 // Minimum value of the column. MinimumValue *time.Time // Maximum value of the column. MaximumValue *time.Time }
Defines a date column statistics data.
type DecimalColumnStatisticsData ¶
type DecimalColumnStatisticsData struct { // Minimum value of the column. MinimumValue *DecimalNumber // Number of distinct values. NumberOfDistinctValues *int64 // Maximum value of the column. MaximumValue *DecimalNumber // Number of nulls. NumberOfNulls *int64 }
Defines a decimal column statistics data.
type DecimalNumber ¶
type DecimalNumber struct { // The unscaled numeric value. UnscaledValue []byte // The scale that determines where the decimal point falls in the unscaled value. Scale *int32 }
Contains a numeric value in decimal format.
type DeleteBehavior ¶
type DeleteBehavior string
const ( DeleteBehaviorLog DeleteBehavior = "LOG" DeleteBehaviorDelete_from_database DeleteBehavior = "DELETE_FROM_DATABASE" DeleteBehaviorDeprecate_in_database DeleteBehavior = "DEPRECATE_IN_DATABASE" )
Enum values for DeleteBehavior
type DevEndpoint ¶
type DevEndpoint struct { // The public key to be used by this DevEndpoint for authentication. This attribute // is provided for backward compatibility because the recommended attribute to use // is public keys. PublicKey *string // A list of security group identifiers used in this DevEndpoint. SecurityGroupIds []*string // A private IP address to access the DevEndpoint within a VPC if the DevEndpoint // is created within one. The PrivateAddress field is present only when you create // the DevEndpoint within your VPC. PrivateAddress *string // The name of the SecurityConfiguration structure to be used with this // DevEndpoint. SecurityConfiguration *string // The YARN endpoint address used by this DevEndpoint. YarnEndpointAddress *string // The path to one or more Java .jar files in an S3 bucket that should be loaded in // your DevEndpoint. You can only use pure Java/Scala libraries with a DevEndpoint. ExtraJarsS3Path *string // The number of AWS Glue Data Processing Units (DPUs) allocated to this // DevEndpoint. NumberOfNodes *int32 // The AWS Availability Zone where this DevEndpoint is located. AvailabilityZone *string // The paths to one or more Python libraries in an Amazon S3 bucket that should be // loaded in your DevEndpoint. Multiple values must be complete paths separated by // a comma. <note> <p>You can only use pure Python libraries with a // <code>DevEndpoint</code>. Libraries that rely on C extensions, such as the <a // href="http://pandas.pydata.org/">pandas</a> Python data analysis library, are // not currently supported.</p> </note> ExtraPythonLibsS3Path *string // The Amazon Resource Name (ARN) of the IAM role used in this DevEndpoint. RoleArn *string // The Apache Zeppelin port for the remote Apache Spark interpreter. ZeppelinRemoteSparkInterpreterPort *int32 // The type of predefined worker that is allocated to the development endpoint. // Accepts a value of Standard, G.1X, or G.2X. // // * For the Standard worker type, // each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors // per worker. // // * For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, // 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend // this worker type for memory-intensive jobs. // // * For the G.2X worker type, // each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 // executor per worker. We recommend this worker type for memory-intensive jobs. // // // <p>Known issue: when a development endpoint is created with the // <code>G.2X</code> <code>WorkerType</code> configuration, the Spark drivers for // the development endpoint will run on 4 vCPU, 16 GB of memory, and a 64 GB disk. // </p> WorkerType WorkerType // The public IP address used by this DevEndpoint. The PublicAddress field is // present only when you create a non-virtual private cloud (VPC) DevEndpoint. PublicAddress *string // The status of the last update. LastUpdateStatus *string // The subnet ID for this DevEndpoint. SubnetId *string // A list of public keys to be used by the DevEndpoints for authentication. Using // this attribute is preferred over a single public key because the public keys // allow you to have a different private key per client. If you previously created // an endpoint with a public key, you must remove that key to be able to set a list // of public keys. Call the UpdateDevEndpoint API operation with the public key // content in the deletePublicKeys attribute, and the list of new keys in the // addPublicKeys attribute. PublicKeys []*string // The number of workers of a defined workerType that are allocated to the // development endpoint. <p>The maximum number of workers you can define are 299 // for <code>G.1X</code>, and 149 for <code>G.2X</code>. </p> NumberOfWorkers *int32 // The point in time at which this DevEndpoint was created. CreatedTimestamp *time.Time // The ID of the virtual private cloud (VPC) used by this DevEndpoint. VpcId *string // The name of the DevEndpoint. EndpointName *string // Glue version determines the versions of Apache Spark and Python that AWS Glue // supports. The Python version indicates the version supported for running your // ETL scripts on development endpoints. <p>For more information about the // available AWS Glue versions and corresponding Spark and Python versions, see <a // href="https://docs.aws.amazon.com/glue/latest/dg/add-job.html">Glue version</a> // in the developer guide.</p> <p>Development endpoints that are created without // specifying a Glue version default to Glue 0.9.</p> <p>You can specify a version // of Python support for development endpoints by using the <code>Arguments</code> // parameter in the <code>CreateDevEndpoint</code> or // <code>UpdateDevEndpoint</code> APIs. If no arguments are provided, the version // defaults to Python 2.</p> GlueVersion *string // A map of arguments used to configure the DevEndpoint. Valid arguments are: // // // * "--enable-glue-datacatalog": "" // // * "GLUE_PYTHON_VERSION": "3" // // * // "GLUE_PYTHON_VERSION": "2" // // <p>You can specify a version of Python support // for development endpoints by using the <code>Arguments</code> parameter in the // <code>CreateDevEndpoint</code> or <code>UpdateDevEndpoint</code> APIs. If no // arguments are provided, the version defaults to Python 2.</p> Arguments map[string]*string // The point in time at which this DevEndpoint was last modified. LastModifiedTimestamp *time.Time // The current status of this DevEndpoint. Status *string // The reason for a current failure in this DevEndpoint. FailureReason *string }
A development endpoint where a developer can remotely debug extract, transform, and load (ETL) scripts.
type DevEndpointCustomLibraries ¶
type DevEndpointCustomLibraries struct { // The path to one or more Java .jar files in an S3 bucket that should be loaded in // your DevEndpoint. You can only use pure Java/Scala libraries with a DevEndpoint. ExtraJarsS3Path *string // The paths to one or more Python libraries in an Amazon Simple Storage Service // (Amazon S3) bucket that should be loaded in your DevEndpoint. Multiple values // must be complete paths separated by a comma. You can only use pure Python // libraries with a DevEndpoint. Libraries that rely on C extensions, such as the // pandas (http://pandas.pydata.org/) Python data analysis library, are not // currently supported. ExtraPythonLibsS3Path *string }
Custom libraries to be loaded into a development endpoint.
type DoubleColumnStatisticsData ¶
type DoubleColumnStatisticsData struct { // Minimum value of the column. MinimumValue *float64 // Number of distinct values. NumberOfDistinctValues *int64 // Number of nulls. NumberOfNulls *int64 // Maximum value of the column. MaximumValue *float64 }
Defines a double column statistics data.
type DynamoDBTarget ¶
type DynamoDBTarget struct { // Indicates whether to scan all the records, or to sample rows from the table. // Scanning all the records can take a long time when the table is not a high // throughput table. <p>A value of <code>true</code> means to scan all records, // while a value of <code>false</code> means to sample the records. If no value is // specified, the value defaults to <code>true</code>.</p> ScanAll *bool // The percentage of the configured read capacity units to use by the AWS Glue // crawler. Read capacity units is a term defined by DynamoDB, and is a numeric // value that acts as rate limiter for the number of reads that can be performed on // that table per second. <p>The valid values are null or a value between 0.1 to // 1.5. A null value is used when user does not provide a value, and defaults to // 0.5 of the configured Read Capacity Unit (for provisioned tables), or 0.25 of // the max configured Read Capacity Unit (for tables using on-demand mode).</p> ScanRate *float64 // The name of the DynamoDB table to crawl. Path *string }
Specifies an Amazon DynamoDB table to crawl.
type Edge ¶
type Edge struct { // The unique of the node within the workflow where the edge starts. SourceId *string // The unique of the node within the workflow where the edge ends. DestinationId *string }
An edge represents a directed connection between two AWS Glue components that are part of the workflow the edge belongs to.
type EnableHybridValues ¶
type EnableHybridValues string
const ( EnableHybridValuesTrue EnableHybridValues = "TRUE" EnableHybridValuesFalse EnableHybridValues = "FALSE" )
Enum values for EnableHybridValues
type EncryptionAtRest ¶
type EncryptionAtRest struct { // The ID of the AWS KMS key to use for encryption at rest. SseAwsKmsKeyId *string // The encryption-at-rest mode for encrypting Data Catalog data. CatalogEncryptionMode CatalogEncryptionMode }
Specifies the encryption-at-rest configuration for the Data Catalog.
type EncryptionConfiguration ¶
type EncryptionConfiguration struct { // The encryption configuration for Amazon Simple Storage Service (Amazon S3) data. S3Encryption []*S3Encryption // The encryption configuration for Amazon CloudWatch. CloudWatchEncryption *CloudWatchEncryption // The encryption configuration for job bookmarks. JobBookmarksEncryption *JobBookmarksEncryption }
Specifies an encryption configuration.
type EntityNotFoundException ¶
type EntityNotFoundException struct { Message *string }
A specified entity does not exist
func (*EntityNotFoundException) Error ¶
func (e *EntityNotFoundException) Error() string
func (*EntityNotFoundException) ErrorCode ¶
func (e *EntityNotFoundException) ErrorCode() string
func (*EntityNotFoundException) ErrorFault ¶
func (e *EntityNotFoundException) ErrorFault() smithy.ErrorFault
func (*EntityNotFoundException) ErrorMessage ¶
func (e *EntityNotFoundException) ErrorMessage() string
func (*EntityNotFoundException) GetMessage ¶
func (e *EntityNotFoundException) GetMessage() string
func (*EntityNotFoundException) HasMessage ¶
func (e *EntityNotFoundException) HasMessage() bool
type ErrorDetail ¶
type ErrorDetail struct { // The code associated with this error. ErrorCode *string // A message describing the error. ErrorMessage *string }
Contains details about an error.
type EvaluationMetrics ¶
type EvaluationMetrics struct { // The evaluation metrics for the find matches algorithm. FindMatchesMetrics *FindMatchesMetrics // The type of machine learning transform. TransformType TransformType }
Evaluation metrics provide an estimate of the quality of your machine learning transform.
type ExecutionProperty ¶
type ExecutionProperty struct { // The maximum number of concurrent runs allowed for the job. The default is 1. An // error is returned when this threshold is reached. The maximum value you can // specify is controlled by a service limit. MaxConcurrentRuns *int32 }
An execution property of a job.
type ExistCondition ¶
type ExistCondition string
const ( ExistConditionMust_exist ExistCondition = "MUST_EXIST" ExistConditionNot_exist ExistCondition = "NOT_EXIST" ExistConditionNone ExistCondition = "NONE" )
Enum values for ExistCondition
type ExportLabelsTaskRunProperties ¶
type ExportLabelsTaskRunProperties struct { // The Amazon Simple Storage Service (Amazon S3) path where you will export the // labels. OutputS3Path *string }
Specifies configuration properties for an exporting labels task run.
type FindMatchesMetrics ¶
type FindMatchesMetrics struct { // The confusion matrix shows you what your transform is predicting accurately and // what types of errors it is making. For more information, see Confusion matrix // (https://en.wikipedia.org/wiki/Confusion_matrix) in Wikipedia. ConfusionMatrix *ConfusionMatrix // The precision metric indicates when often your transform is correct when it // predicts a match. Specifically, it measures how well the transform finds true // positives from the total true positives possible. For more information, see // Precision and recall (https://en.wikipedia.org/wiki/Precision_and_recall) in // Wikipedia. Precision *float64 // The area under the precision/recall curve (AUPRC) is a single number measuring // the overall quality of the transform, that is independent of the choice made for // precision vs. recall. Higher values indicate that you have a more attractive // precision vs. recall tradeoff. For more information, see Precision and recall // (https://en.wikipedia.org/wiki/Precision_and_recall) in Wikipedia. AreaUnderPRCurve *float64 // The maximum F1 metric indicates the transform's accuracy between 0 and 1, where // 1 is the best accuracy. For more information, see F1 score // (https://en.wikipedia.org/wiki/F1_score) in Wikipedia. F1 *float64 // The recall metric indicates that for an actual match, how often your transform // predicts the match. Specifically, it measures how well the transform finds true // positives from the total records in the source data. For more information, see // Precision and recall (https://en.wikipedia.org/wiki/Precision_and_recall) in // Wikipedia. Recall *float64 }
The evaluation metrics for the find matches algorithm. The quality of your machine learning transform is measured by getting your transform to predict some matches and comparing the results to known matches from the same dataset. The quality metrics are based on a subset of your data, so they are not precise.
type FindMatchesParameters ¶
type FindMatchesParameters struct { // The name of a column that uniquely identifies rows in the source table. Used to // help identify matching records. PrimaryKeyColumnName *string // The value to switch on or off to force the output to match the provided labels // from users. If the value is True, the find matches transform forces the output // to match the provided labels. The results override the normal conflation // results. If the value is False, the find matches transform does not ensure all // the labels provided are respected, and the results rely on the trained model. // Note that setting this value to true may increase the conflation execution time. EnforceProvidedLabels *bool // The value that is selected when tuning your transform for a balance between // accuracy and cost. A value of 0.5 means that the system balances accuracy and // cost concerns. A value of 1.0 means a bias purely for accuracy, which typically // results in a higher cost, sometimes substantially higher. A value of 0.0 means a // bias purely for cost, which results in a less accurate FindMatches transform, // sometimes with unacceptable accuracy. <p>Accuracy measures how well the // transform finds true positives and true negatives. Increasing accuracy requires // more machine resources and cost. But it also results in increased recall. </p> // <p>Cost measures how many compute resources, and thus money, are consumed to run // the transform.</p> AccuracyCostTradeoff *float64 // The value selected when tuning your transform for a balance between precision // and recall. A value of 0.5 means no preference; a value of 1.0 means a bias // purely for precision, and a value of 0.0 means a bias for recall. Because this // is a tradeoff, choosing values close to 1.0 means very low recall, and choosing // values close to 0.0 results in very low precision. <p>The precision metric // indicates how often your model is correct when it predicts a match. </p> <p>The // recall metric indicates that for an actual match, how often your model predicts // the match.</p> PrecisionRecallTradeoff *float64 }
The parameters to configure the find matches transform.
type FindMatchesTaskRunProperties ¶
type FindMatchesTaskRunProperties struct { // The job ID for the Find Matches task run. JobId *string // The job run ID for the Find Matches task run. JobRunId *string // The name assigned to the job for the Find Matches task run. JobName *string }
Specifies configuration properties for a Find Matches task run.
type GetConnectionsFilter ¶
type GetConnectionsFilter struct { // The type of connections to return. Currently, SFTP is not supported. ConnectionType ConnectionType // A criteria string that must match the criteria recorded in the connection // definition for that connection definition to be returned. MatchCriteria []*string }
Filters the connection definitions that are returned by the GetConnections API operation.
type GlueEncryptionException ¶
type GlueEncryptionException struct { Message *string }
An encryption operation failed.
func (*GlueEncryptionException) Error ¶
func (e *GlueEncryptionException) Error() string
func (*GlueEncryptionException) ErrorCode ¶
func (e *GlueEncryptionException) ErrorCode() string
func (*GlueEncryptionException) ErrorFault ¶
func (e *GlueEncryptionException) ErrorFault() smithy.ErrorFault
func (*GlueEncryptionException) ErrorMessage ¶
func (e *GlueEncryptionException) ErrorMessage() string
func (*GlueEncryptionException) GetMessage ¶
func (e *GlueEncryptionException) GetMessage() string
func (*GlueEncryptionException) HasMessage ¶
func (e *GlueEncryptionException) HasMessage() bool
type GluePolicy ¶
type GluePolicy struct { // The date and time at which the policy was last updated. UpdateTime *time.Time // Contains the requested policy document, in JSON format. PolicyInJson *string // Contains the hash value associated with this policy. PolicyHash *string // The date and time at which the policy was created. CreateTime *time.Time }
A structure for returning a resource policy.
type GlueTable ¶
type GlueTable struct { // A database name in the AWS Glue Data Catalog. DatabaseName *string // A unique identifier for the AWS Glue Data Catalog. CatalogId *string // A table name in the AWS Glue Data Catalog. TableName *string // The name of the connection to the AWS Glue Data Catalog. ConnectionName *string }
The database and table in the AWS Glue Data Catalog that is used for input or output data.
type GrokClassifier ¶
type GrokClassifier struct { // Optional custom grok patterns defined by this classifier. For more information, // see custom patterns in Writing Custom Classifiers // (https://docs.aws.amazon.com/glue/latest/dg/custom-classifier.html). CustomPatterns *string // The grok pattern applied to a data store by this classifier. For more // information, see built-in patterns in Writing Custom Classifiers // (https://docs.aws.amazon.com/glue/latest/dg/custom-classifier.html). GrokPattern *string // The version of this classifier. Version *int64 // The time that this classifier was last updated. LastUpdated *time.Time // An identifier of the data format that the classifier matches, such as Twitter, // JSON, Omniture logs, and so on. Classification *string // The time that this classifier was registered. CreationTime *time.Time // The name of the classifier. Name *string }
A classifier that uses grok patterns.
type IdempotentParameterMismatchException ¶
type IdempotentParameterMismatchException struct { Message *string }
The same unique identifier was associated with two different records.
func (*IdempotentParameterMismatchException) Error ¶
func (e *IdempotentParameterMismatchException) Error() string
func (*IdempotentParameterMismatchException) ErrorCode ¶
func (e *IdempotentParameterMismatchException) ErrorCode() string
func (*IdempotentParameterMismatchException) ErrorFault ¶
func (e *IdempotentParameterMismatchException) ErrorFault() smithy.ErrorFault
func (*IdempotentParameterMismatchException) ErrorMessage ¶
func (e *IdempotentParameterMismatchException) ErrorMessage() string
func (*IdempotentParameterMismatchException) GetMessage ¶
func (e *IdempotentParameterMismatchException) GetMessage() string
func (*IdempotentParameterMismatchException) HasMessage ¶
func (e *IdempotentParameterMismatchException) HasMessage() bool
type IllegalWorkflowStateException ¶
type IllegalWorkflowStateException struct { Message *string }
The workflow is in an invalid state to perform a requested operation.
func (*IllegalWorkflowStateException) Error ¶
func (e *IllegalWorkflowStateException) Error() string
func (*IllegalWorkflowStateException) ErrorCode ¶
func (e *IllegalWorkflowStateException) ErrorCode() string
func (*IllegalWorkflowStateException) ErrorFault ¶
func (e *IllegalWorkflowStateException) ErrorFault() smithy.ErrorFault
func (*IllegalWorkflowStateException) ErrorMessage ¶
func (e *IllegalWorkflowStateException) ErrorMessage() string
func (*IllegalWorkflowStateException) GetMessage ¶
func (e *IllegalWorkflowStateException) GetMessage() string
func (*IllegalWorkflowStateException) HasMessage ¶
func (e *IllegalWorkflowStateException) HasMessage() bool
type ImportLabelsTaskRunProperties ¶
type ImportLabelsTaskRunProperties struct { // The Amazon Simple Storage Service (Amazon S3) path from where you will import // the labels. InputS3Path *string // Indicates whether to overwrite your existing labels. Replace *bool }
Specifies configuration properties for an importing labels task run.
type InternalServiceException ¶
type InternalServiceException struct { Message *string }
An internal service error occurred.
func (*InternalServiceException) Error ¶
func (e *InternalServiceException) Error() string
func (*InternalServiceException) ErrorCode ¶
func (e *InternalServiceException) ErrorCode() string
func (*InternalServiceException) ErrorFault ¶
func (e *InternalServiceException) ErrorFault() smithy.ErrorFault
func (*InternalServiceException) ErrorMessage ¶
func (e *InternalServiceException) ErrorMessage() string
func (*InternalServiceException) GetMessage ¶
func (e *InternalServiceException) GetMessage() string
func (*InternalServiceException) HasMessage ¶
func (e *InternalServiceException) HasMessage() bool
type InvalidInputException ¶
type InvalidInputException struct { Message *string }
The input provided was not valid.
func (*InvalidInputException) Error ¶
func (e *InvalidInputException) Error() string
func (*InvalidInputException) ErrorCode ¶
func (e *InvalidInputException) ErrorCode() string
func (*InvalidInputException) ErrorFault ¶
func (e *InvalidInputException) ErrorFault() smithy.ErrorFault
func (*InvalidInputException) ErrorMessage ¶
func (e *InvalidInputException) ErrorMessage() string
func (*InvalidInputException) GetMessage ¶
func (e *InvalidInputException) GetMessage() string
func (*InvalidInputException) HasMessage ¶
func (e *InvalidInputException) HasMessage() bool
type JdbcTarget ¶
type JdbcTarget struct { // The name of the connection to use to connect to the JDBC target. ConnectionName *string // The path of the JDBC target. Path *string // A list of glob patterns used to exclude from the crawl. For more information, // see Catalog Tables with a Crawler // (https://docs.aws.amazon.com/glue/latest/dg/add-crawler.html). Exclusions []*string }
Specifies a JDBC data store to crawl.
type Job ¶
type Job struct { // This field is reserved for future use. LogUri *string // The JobCommand that executes this job. Command *JobCommand // The number of workers of a defined workerType that are allocated when a job // runs. <p>The maximum number of workers you can define are 299 for // <code>G.1X</code>, and 149 for <code>G.2X</code>. </p> NumberOfWorkers *int32 // Glue version determines the versions of Apache Spark and Python that AWS Glue // supports. The Python version indicates the version supported for jobs of type // Spark. <p>For more information about the available AWS Glue versions and // corresponding Spark and Python versions, see <a // href="https://docs.aws.amazon.com/glue/latest/dg/add-job.html">Glue version</a> // in the developer guide.</p> <p>Jobs that are created without specifying a Glue // version default to Glue 0.9.</p> GlueVersion *string // The type of predefined worker that is allocated when a job runs. Accepts a value // of Standard, G.1X, or G.2X. // // * For the Standard worker type, each worker // provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker. // // // * For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, // 64 GB disk), and provides 1 executor per worker. We recommend this worker type // for memory-intensive jobs. // // * For the G.2X worker type, each worker maps to // 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per // worker. We recommend this worker type for memory-intensive jobs. WorkerType WorkerType // Specifies configuration properties of a job notification. NotificationProperty *NotificationProperty // The name of the SecurityConfiguration structure to be used with this job. SecurityConfiguration *string // The time and date that this job definition was created. CreatedOn *time.Time // The name you assign to this job definition. Name *string // The connections used for this job. Connections *ConnectionsList // The job timeout in minutes. This is the maximum time that a job run can consume // resources before it is terminated and enters TIMEOUT status. The default is // 2,880 minutes (48 hours). Timeout *int32 // This field is deprecated. Use MaxCapacity instead. <p>The number of AWS Glue // data processing units (DPUs) allocated to runs of this job. You can allocate // from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of processing // power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more // information, see the <a href="https://aws.amazon.com/glue/pricing/">AWS Glue // pricing page</a>.</p> <p></p> AllocatedCapacity *int32 // The number of AWS Glue data processing units (DPUs) that can be allocated when // this job runs. A DPU is a relative measure of processing power that consists of // 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the // AWS Glue pricing page (https://aws.amazon.com/glue/pricing/). <p>Do not set // <code>Max Capacity</code> if using <code>WorkerType</code> and // <code>NumberOfWorkers</code>.</p> <p>The value that can be allocated for // <code>MaxCapacity</code> depends on whether you are running a Python shell job, // an Apache Spark ETL job, or an Apache Spark streaming ETL job:</p> <ul> <li> // <p>When you specify a Python shell job // (<code>JobCommand.Name</code>="pythonshell"), you can allocate either 0.0625 or // 1 DPU. The default is 0.0625 DPU.</p> </li> <li> <p>When you specify an Apache // Spark ETL job (<code>JobCommand.Name</code>="glueetl") or Apache Spark streaming // ETL job (<code>JobCommand.Name</code>="gluestreaming"), you can allocate from 2 // to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU // allocation.</p> </li> </ul> MaxCapacity *float64 // Non-overridable arguments for this job, specified as name-value pairs. NonOverridableArguments map[string]*string // The maximum number of times to retry this job after a JobRun fails. MaxRetries *int32 // An ExecutionProperty specifying the maximum number of concurrent runs allowed // for this job. ExecutionProperty *ExecutionProperty // The last point in time when this job definition was modified. LastModifiedOn *time.Time // The default arguments for this job, specified as name-value pairs. You can // specify arguments here that your own job-execution script consumes, as well as // arguments that AWS Glue itself consumes. For information about how to specify // and consume your own Job arguments, see the Calling AWS Glue APIs in Python // (https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html) // topic in the developer guide. For information about the key-value pairs that AWS // Glue consumes to set up your job, see the Special Parameters Used by AWS Glue // (https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html) // topic in the developer guide. DefaultArguments map[string]*string // The name or Amazon Resource Name (ARN) of the IAM role associated with this job. Role *string // A description of the job. Description *string }
Specifies a job definition.
type JobBookmarkEntry ¶
type JobBookmarkEntry struct { // The bookmark itself. JobBookmark *string // The run ID number. Run *int32 // The run ID number. RunId *string // The name of the job in question. JobName *string // The version of the job. Version *int32 // The unique run identifier associated with the previous job run. PreviousRunId *string // The attempt ID number. Attempt *int32 }
Defines a point that a job can resume processing.
type JobBookmarksEncryption ¶
type JobBookmarksEncryption struct { // The encryption mode to use for job bookmarks data. JobBookmarksEncryptionMode JobBookmarksEncryptionMode // The Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data. KmsKeyArn *string }
Specifies how job bookmark data should be encrypted.
type JobBookmarksEncryptionMode ¶
type JobBookmarksEncryptionMode string
const ( JobBookmarksEncryptionModeDisabled JobBookmarksEncryptionMode = "DISABLED" JobBookmarksEncryptionModeCsekms JobBookmarksEncryptionMode = "CSE-KMS" )
Enum values for JobBookmarksEncryptionMode
type JobCommand ¶
type JobCommand struct { // The name of the job command. For an Apache Spark ETL job, this must be glueetl. // For a Python shell job, it must be pythonshell. For an Apache Spark streaming // ETL job, this must be gluestreaming. Name *string // Specifies the Amazon Simple Storage Service (Amazon S3) path to a script that // executes a job. ScriptLocation *string // The Python version being used to execute a Python shell job. Allowed values are // 2 or 3. PythonVersion *string }
Specifies code executed when a job is run.
type JobNodeDetails ¶
type JobNodeDetails struct { // The information for the job runs represented by the job node. JobRuns []*JobRun }
The details of a Job node present in the workflow.
type JobRun ¶
type JobRun struct { // The number of the attempt to run this job. Attempt *int32 // Glue version determines the versions of Apache Spark and Python that AWS Glue // supports. The Python version indicates the version supported for jobs of type // Spark. <p>For more information about the available AWS Glue versions and // corresponding Spark and Python versions, see <a // href="https://docs.aws.amazon.com/glue/latest/dg/add-job.html">Glue version</a> // in the developer guide.</p> <p>Jobs that are created without specifying a Glue // version default to Glue 0.9.</p> GlueVersion *string // The date and time at which this job run was started. StartedOn *time.Time // The number of workers of a defined workerType that are allocated when a job // runs. <p>The maximum number of workers you can define are 299 for // <code>G.1X</code>, and 149 for <code>G.2X</code>. </p> NumberOfWorkers *int32 // The amount of time (in seconds) that the job run consumed resources. ExecutionTime *int32 // This field is deprecated. Use MaxCapacity instead. <p>The number of AWS Glue // data processing units (DPUs) allocated to this JobRun. From 2 to 100 DPUs can be // allocated; the default is 10. A DPU is a relative measure of processing power // that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more // information, see the <a href="https://aws.amazon.com/glue/pricing/">AWS Glue // pricing page</a>.</p> AllocatedCapacity *int32 // A list of predecessors to this job run. PredecessorRuns []*Predecessor // The JobRun timeout in minutes. This is the maximum time that a job run can // consume resources before it is terminated and enters TIMEOUT status. The default // is 2,880 minutes (48 hours). This overrides the timeout value set in the parent // job. Timeout *int32 // The number of AWS Glue data processing units (DPUs) that can be allocated when // this job runs. A DPU is a relative measure of processing power that consists of // 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the // AWS Glue pricing page // (https://docs.aws.amazon.com/https:/aws.amazon.com/glue/pricing/). <p>Do not // set <code>Max Capacity</code> if using <code>WorkerType</code> and // <code>NumberOfWorkers</code>.</p> <p>The value that can be allocated for // <code>MaxCapacity</code> depends on whether you are running a Python shell job // or an Apache Spark ETL job:</p> <ul> <li> <p>When you specify a Python shell job // (<code>JobCommand.Name</code>="pythonshell"), you can allocate either 0.0625 or // 1 DPU. The default is 0.0625 DPU.</p> </li> <li> <p>When you specify an Apache // Spark ETL job (<code>JobCommand.Name</code>="glueetl"), you can allocate from 2 // to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU // allocation.</p> </li> </ul> MaxCapacity *float64 // The current state of the job run. JobRunState JobRunState // The type of predefined worker that is allocated when a job runs. Accepts a value // of Standard, G.1X, or G.2X. // // * For the Standard worker type, each worker // provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker. // // // * For the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a // 64GB disk, and 1 executor per worker. // // * For the G.2X worker type, each // worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per // worker. WorkerType WorkerType // An error message associated with this job run. ErrorMessage *string // The name of the log group for secure logging that can be server-side encrypted // in Amazon CloudWatch using AWS KMS. This name can be /aws-glue/jobs/, in which // case the default encryption is NONE. If you add a role name and // SecurityConfiguration name (in other words, // /aws-glue/jobs-yourRoleName-yourSecurityConfigurationName/), then that security // configuration is used to encrypt the log group. LogGroupName *string // The name of the SecurityConfiguration structure to be used with this job run. SecurityConfiguration *string // The date and time that this job run completed. CompletedOn *time.Time // The name of the job definition being used in this run. JobName *string // The ID of this job run. Id *string // The ID of the previous run of this job. For example, the JobRunId specified in // the StartJobRun action. PreviousRunId *string // Specifies configuration properties of a job run notification. NotificationProperty *NotificationProperty // The last time that this job run was modified. LastModifiedOn *time.Time // The job arguments associated with this run. For this job run, they replace the // default arguments set in the job definition itself. You can specify arguments // here that your own job-execution script consumes, as well as arguments that AWS // Glue itself consumes. For information about how to specify and consume your own // job arguments, see the Calling AWS Glue APIs in Python // (https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html) // topic in the developer guide. For information about the key-value pairs that AWS // Glue consumes to set up your job, see the Special Parameters Used by AWS Glue // (https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html) // topic in the developer guide. Arguments map[string]*string // The name of the trigger that started this job run. TriggerName *string }
Contains information about a job run.
type JobRunState ¶
type JobRunState string
const ( JobRunStateStarting JobRunState = "STARTING" JobRunStateRunning JobRunState = "RUNNING" JobRunStateStopping JobRunState = "STOPPING" JobRunStateStopped JobRunState = "STOPPED" JobRunStateSucceeded JobRunState = "SUCCEEDED" JobRunStateFailed JobRunState = "FAILED" JobRunStateTimeout JobRunState = "TIMEOUT" )
Enum values for JobRunState
type JobUpdate ¶
type JobUpdate struct { // An ExecutionProperty specifying the maximum number of concurrent runs allowed // for this job. ExecutionProperty *ExecutionProperty // This field is deprecated. Use MaxCapacity instead. <p>The number of AWS Glue // data processing units (DPUs) to allocate to this job. You can allocate from 2 to // 100 DPUs; the default is 10. A DPU is a relative measure of processing power // that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more // information, see the <a href="https://aws.amazon.com/glue/pricing/">AWS Glue // pricing page</a>.</p> AllocatedCapacity *int32 // The job timeout in minutes. This is the maximum time that a job run can consume // resources before it is terminated and enters TIMEOUT status. The default is // 2,880 minutes (48 hours). Timeout *int32 // The connections used for this job. Connections *ConnectionsList // Specifies the configuration properties of a job notification. NotificationProperty *NotificationProperty // The JobCommand that executes this job (required). Command *JobCommand // The name of the SecurityConfiguration structure to be used with this job. SecurityConfiguration *string // This field is reserved for future use. LogUri *string // Non-overridable arguments for this job, specified as name-value pairs. NonOverridableArguments map[string]*string // The default arguments for this job. You can specify arguments here that your own // job-execution script consumes, as well as arguments that AWS Glue itself // consumes. For information about how to specify and consume your own Job // arguments, see the Calling AWS Glue APIs in Python // (https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html) // topic in the developer guide. For information about the key-value pairs that AWS // Glue consumes to set up your job, see the Special Parameters Used by AWS Glue // (https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html) // topic in the developer guide. DefaultArguments map[string]*string // The maximum number of times to retry this job if it fails. MaxRetries *int32 // Glue version determines the versions of Apache Spark and Python that AWS Glue // supports. The Python version indicates the version supported for jobs of type // Spark. <p>For more information about the available AWS Glue versions and // corresponding Spark and Python versions, see <a // href="https://docs.aws.amazon.com/glue/latest/dg/add-job.html">Glue version</a> // in the developer guide.</p> GlueVersion *string // The type of predefined worker that is allocated when a job runs. Accepts a value // of Standard, G.1X, or G.2X. // // * For the Standard worker type, each worker // provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker. // // // * For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, // 64 GB disk), and provides 1 executor per worker. We recommend this worker type // for memory-intensive jobs. // // * For the G.2X worker type, each worker maps to // 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per // worker. We recommend this worker type for memory-intensive jobs. WorkerType WorkerType // Description of the job being defined. Description *string // The number of AWS Glue data processing units (DPUs) that can be allocated when // this job runs. A DPU is a relative measure of processing power that consists of // 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the // AWS Glue pricing page (https://aws.amazon.com/glue/pricing/). <p>Do not set // <code>Max Capacity</code> if using <code>WorkerType</code> and // <code>NumberOfWorkers</code>.</p> <p>The value that can be allocated for // <code>MaxCapacity</code> depends on whether you are running a Python shell job // or an Apache Spark ETL job:</p> <ul> <li> <p>When you specify a Python shell job // (<code>JobCommand.Name</code>="pythonshell"), you can allocate either 0.0625 or // 1 DPU. The default is 0.0625 DPU.</p> </li> <li> <p>When you specify an Apache // Spark ETL job (<code>JobCommand.Name</code>="glueetl") or Apache Spark streaming // ETL job (<code>JobCommand.Name</code>="gluestreaming"), you can allocate from 2 // to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU // allocation.</p> </li> </ul> MaxCapacity *float64 // The number of workers of a defined workerType that are allocated when a job // runs. <p>The maximum number of workers you can define are 299 for // <code>G.1X</code>, and 149 for <code>G.2X</code>. </p> NumberOfWorkers *int32 // The name or Amazon Resource Name (ARN) of the IAM role associated with this job // (required). Role *string }
Specifies information used to update an existing job definition. The previous job definition is completely overwritten by this information.
type JsonClassifier ¶
type JsonClassifier struct { // A JsonPath string defining the JSON data for the classifier to classify. AWS // Glue supports a subset of JsonPath, as described in Writing JsonPath Custom // Classifiers // (https://docs.aws.amazon.com/glue/latest/dg/custom-classifier.html#custom-classifier-json). JsonPath *string // The time that this classifier was registered. CreationTime *time.Time // The name of the classifier. Name *string // The time that this classifier was last updated. LastUpdated *time.Time // The version of this classifier. Version *int64 }
A classifier for JSON content.
type LabelingSetGenerationTaskRunProperties ¶
type LabelingSetGenerationTaskRunProperties struct { // The Amazon Simple Storage Service (Amazon S3) path where you will generate the // labeling set. OutputS3Path *string }
Specifies configuration properties for a labeling set generation task run.
type Language ¶
type Language string
Enum values for Language
type LastCrawlInfo ¶
type LastCrawlInfo struct { // The time at which the crawl started. StartTime *time.Time // The log group for the last crawl. LogGroup *string // The prefix for a message about this crawl. MessagePrefix *string // The log stream for the last crawl. LogStream *string // Status of the last crawl. Status LastCrawlStatus // If an error occurred, the error information about the last crawl. ErrorMessage *string }
Status and error information about the most recent crawl.
type LastCrawlStatus ¶
type LastCrawlStatus string
const ( LastCrawlStatusSucceeded LastCrawlStatus = "SUCCEEDED" LastCrawlStatusCancelled LastCrawlStatus = "CANCELLED" LastCrawlStatusFailed LastCrawlStatus = "FAILED" )
Enum values for LastCrawlStatus
type Location ¶
type Location struct { // An Amazon Simple Storage Service (Amazon S3) location. S3 []*CodeGenNodeArg // An Amazon DynamoDB table location. DynamoDB []*CodeGenNodeArg // A JDBC location. Jdbc []*CodeGenNodeArg }
The location of resources.
type Logical ¶
type Logical string
Enum values for Logical
type LogicalOperator ¶
type LogicalOperator string
const ( LogicalOperatorEquals LogicalOperator = "EQUALS" )
Enum values for LogicalOperator
type LongColumnStatisticsData ¶
type LongColumnStatisticsData struct { // Number of nulls. NumberOfNulls *int64 // Maximum value of the column. MaximumValue *int64 // Number of distinct values. NumberOfDistinctValues *int64 // Minimum value of the column. MinimumValue *int64 }
Defines a long column statistics data.
type MLTransform ¶
type MLTransform struct { // A count identifier for the labeling files generated by AWS Glue for this // transform. As you create a better transform, you can iteratively download, // label, and upload the labeling file. LabelCount *int32 // A timestamp. The last point in time when this machine learning transform was // modified. LastModifiedOn *time.Time // A TransformParameters object. You can use parameters to tune (customize) the // behavior of the machine learning transform by specifying what data it learns // from and your preference on various tradeoffs (such as precious vs. recall, or // accuracy vs. cost). Parameters *TransformParameters // This value determines which version of AWS Glue this machine learning transform // is compatible with. Glue 1.0 is recommended for most customers. If the value is // not set, the Glue compatibility defaults to Glue 0.9. For more information, see // AWS Glue Versions // (https://docs.aws.amazon.com/glue/latest/dg/release-notes.html#release-notes-versions) // in the developer guide. GlueVersion *string // A user-defined name for the machine learning transform. Names are not guaranteed // unique and can be changed at any time. Name *string // An EvaluationMetrics object. Evaluation metrics provide an estimate of the // quality of your machine learning transform. EvaluationMetrics *EvaluationMetrics // A map of key-value pairs representing the columns and data types that this // transform can run against. Has an upper bound of 100 columns. Schema []*SchemaColumn // A user-defined, long-form description text for the machine learning transform. // Descriptions are not guaranteed to be unique and can be changed at any time. Description *string // The timeout in minutes of the machine learning transform. Timeout *int32 // A list of AWS Glue table definitions used by the transform. InputRecordTables []*GlueTable // The maximum number of times to retry after an MLTaskRun of the machine learning // transform fails. MaxRetries *int32 // The number of workers of a defined workerType that are allocated when a task of // the transform runs. <p>If <code>WorkerType</code> is set, then // <code>NumberOfWorkers</code> is required (and vice versa).</p> NumberOfWorkers *int32 // The name or Amazon Resource Name (ARN) of the IAM role with the required // permissions. The required permissions include both AWS Glue service role // permissions to AWS Glue resources, and Amazon S3 permissions required by the // transform. <ul> <li> <p>This role needs AWS Glue service role permissions to // allow access to resources in AWS Glue. See <a // href="https://docs.aws.amazon.com/glue/latest/dg/attach-policy-iam-user.html">Attach // a Policy to IAM Users That Access AWS Glue</a>.</p> </li> <li> <p>This role // needs permission to your Amazon Simple Storage Service (Amazon S3) sources, // targets, temporary directory, scripts, and any libraries used by the task run // for this transform.</p> </li> </ul> Role *string // The current status of the machine learning transform. Status TransformStatusType // The unique transform ID that is generated for the machine learning transform. // The ID is guaranteed to be unique and does not change. TransformId *string // A timestamp. The time and date that this machine learning transform was created. CreatedOn *time.Time // The number of AWS Glue data processing units (DPUs) that are allocated to task // runs for this transform. You can allocate from 2 to 100 DPUs; the default is 10. // A DPU is a relative measure of processing power that consists of 4 vCPUs of // compute capacity and 16 GB of memory. For more information, see the AWS Glue // pricing page (http://aws.amazon.com/glue/pricing/). <p> // <code>MaxCapacity</code> is a mutually exclusive option with // <code>NumberOfWorkers</code> and <code>WorkerType</code>.</p> <ul> <li> <p>If // either <code>NumberOfWorkers</code> or <code>WorkerType</code> is set, then // <code>MaxCapacity</code> cannot be set.</p> </li> <li> <p>If // <code>MaxCapacity</code> is set then neither <code>NumberOfWorkers</code> or // <code>WorkerType</code> can be set.</p> </li> <li> <p>If <code>WorkerType</code> // is set, then <code>NumberOfWorkers</code> is required (and vice versa).</p> // </li> <li> <p> <code>MaxCapacity</code> and <code>NumberOfWorkers</code> must // both be at least 1.</p> </li> </ul> <p>When the <code>WorkerType</code> field is // set to a value other than <code>Standard</code>, the <code>MaxCapacity</code> // field is set automatically and becomes read-only.</p> MaxCapacity *float64 // The type of predefined worker that is allocated when a task of this transform // runs. Accepts a value of Standard, G.1X, or G.2X. // // * For the Standard worker // type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 // executors per worker. // // * For the G.1X worker type, each worker provides 4 // vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker. // // * For the // G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, // and 1 executor per worker. // // <p> <code>MaxCapacity</code> is a mutually // exclusive option with <code>NumberOfWorkers</code> and // <code>WorkerType</code>.</p> <ul> <li> <p>If either <code>NumberOfWorkers</code> // or <code>WorkerType</code> is set, then <code>MaxCapacity</code> cannot be // set.</p> </li> <li> <p>If <code>MaxCapacity</code> is set then neither // <code>NumberOfWorkers</code> or <code>WorkerType</code> can be set.</p> </li> // <li> <p>If <code>WorkerType</code> is set, then <code>NumberOfWorkers</code> is // required (and vice versa).</p> </li> <li> <p> <code>MaxCapacity</code> and // <code>NumberOfWorkers</code> must both be at least 1.</p> </li> </ul> WorkerType WorkerType }
A structure for a machine learning transform.
type MLTransformNotReadyException ¶
type MLTransformNotReadyException struct { Message *string }
The machine learning transform is not ready to run.
func (*MLTransformNotReadyException) Error ¶
func (e *MLTransformNotReadyException) Error() string
func (*MLTransformNotReadyException) ErrorCode ¶
func (e *MLTransformNotReadyException) ErrorCode() string
func (*MLTransformNotReadyException) ErrorFault ¶
func (e *MLTransformNotReadyException) ErrorFault() smithy.ErrorFault
func (*MLTransformNotReadyException) ErrorMessage ¶
func (e *MLTransformNotReadyException) ErrorMessage() string
func (*MLTransformNotReadyException) GetMessage ¶
func (e *MLTransformNotReadyException) GetMessage() string
func (*MLTransformNotReadyException) HasMessage ¶
func (e *MLTransformNotReadyException) HasMessage() bool
type MappingEntry ¶
type MappingEntry struct { // The name of the source table. SourceTable *string // The target type. TargetType *string // The target table. TargetTable *string // The source path. SourcePath *string // The target path. TargetPath *string // The source type. SourceType *string }
Defines a mapping.
type NoScheduleException ¶
type NoScheduleException struct { Message *string }
There is no applicable schedule.
func (*NoScheduleException) Error ¶
func (e *NoScheduleException) Error() string
func (*NoScheduleException) ErrorCode ¶
func (e *NoScheduleException) ErrorCode() string
func (*NoScheduleException) ErrorFault ¶
func (e *NoScheduleException) ErrorFault() smithy.ErrorFault
func (*NoScheduleException) ErrorMessage ¶
func (e *NoScheduleException) ErrorMessage() string
func (*NoScheduleException) GetMessage ¶
func (e *NoScheduleException) GetMessage() string
func (*NoScheduleException) HasMessage ¶
func (e *NoScheduleException) HasMessage() bool
type Node ¶
type Node struct { // Details of the Job when the node represents a Job. JobDetails *JobNodeDetails // Details of the Trigger when the node represents a Trigger. TriggerDetails *TriggerNodeDetails // The unique Id assigned to the node within the workflow. UniqueId *string // The name of the AWS Glue component represented by the node. Name *string // Details of the crawler when the node represents a crawler. CrawlerDetails *CrawlerNodeDetails // The type of AWS Glue component represented by the node. Type NodeType }
A node represents an AWS Glue component such as a trigger, or job, etc., that is part of a workflow.
type NodeType ¶
type NodeType string
const ( NodeTypeCrawler NodeType = "CRAWLER" NodeTypeJob NodeType = "JOB" NodeTypeTrigger NodeType = "TRIGGER" )
Enum values for NodeType
type NotificationProperty ¶
type NotificationProperty struct { // After a job run starts, the number of minutes to wait before sending a job run // delay notification. NotifyDelayAfter *int32 }
Specifies configuration properties of a notification.
type OperationTimeoutException ¶
type OperationTimeoutException struct { Message *string }
The operation timed out.
func (*OperationTimeoutException) Error ¶
func (e *OperationTimeoutException) Error() string
func (*OperationTimeoutException) ErrorCode ¶
func (e *OperationTimeoutException) ErrorCode() string
func (*OperationTimeoutException) ErrorFault ¶
func (e *OperationTimeoutException) ErrorFault() smithy.ErrorFault
func (*OperationTimeoutException) ErrorMessage ¶
func (e *OperationTimeoutException) ErrorMessage() string
func (*OperationTimeoutException) GetMessage ¶
func (e *OperationTimeoutException) GetMessage() string
func (*OperationTimeoutException) HasMessage ¶
func (e *OperationTimeoutException) HasMessage() bool
type Order ¶
type Order struct { // The name of the column. Column *string // Indicates that the column is sorted in ascending order (== 1), or in descending // order (==0). SortOrder *int32 }
Specifies the sort order of a sorted column.
type Partition ¶
type Partition struct { // The last time at which the partition was accessed. LastAccessTime *time.Time // The name of the catalog database in which to create the partition. DatabaseName *string // The last time at which column statistics were computed for this partition. LastAnalyzedTime *time.Time // These key-value pairs define partition parameters. Parameters map[string]*string // The time at which the partition was created. CreationTime *time.Time // Provides information about the physical location where the partition is stored. StorageDescriptor *StorageDescriptor // The values of the partition. Values []*string // The name of the database table in which to create the partition. TableName *string // The ID of the Data Catalog in which the partition resides. CatalogId *string }
Represents a slice of table data.
type PartitionError ¶
type PartitionError struct { // The values that define the partition. PartitionValues []*string // The details about the partition error. ErrorDetail *ErrorDetail }
Contains information about a partition error.
type PartitionInput ¶
type PartitionInput struct { // The last time at which column statistics were computed for this partition. LastAnalyzedTime *time.Time // The values of the partition. Although this parameter is not required by the SDK, // you must specify this parameter for a valid input. <p>The values for the keys // for the new partition must be passed as an array of String objects that must be // ordered in the same order as the partition keys appearing in the Amazon S3 // prefix. Otherwise AWS Glue will add the values to the wrong keys.</p> Values []*string // Provides information about the physical location where the partition is stored. StorageDescriptor *StorageDescriptor // These key-value pairs define partition parameters. Parameters map[string]*string // The last time at which the partition was accessed. LastAccessTime *time.Time }
The structure used to create and update a partition.
type PartitionValueList ¶
type PartitionValueList struct { // The list of values. Values []*string }
Contains a list of values defining partitions.
type Permission ¶
type Permission string
const ( PermissionAll Permission = "ALL" PermissionSelect Permission = "SELECT" PermissionAlter Permission = "ALTER" PermissionDrop Permission = "DROP" PermissionDelete Permission = "DELETE" PermissionInsert Permission = "INSERT" PermissionCreate_database Permission = "CREATE_DATABASE" PermissionCreate_table Permission = "CREATE_TABLE" PermissionData_location_access Permission = "DATA_LOCATION_ACCESS" )
Enum values for Permission
type PhysicalConnectionRequirements ¶
type PhysicalConnectionRequirements struct { // The security group ID list used by the connection. SecurityGroupIdList []*string // The subnet ID used by the connection. SubnetId *string // The connection's Availability Zone. This field is redundant because the // specified subnet implies the Availability Zone to be used. Currently the field // must be populated, but it will be deprecated in the future. AvailabilityZone *string }
Specifies the physical requirements for a connection.
type Predecessor ¶
type Predecessor struct { // The name of the job definition used by the predecessor job run. JobName *string // The job-run ID of the predecessor job run. RunId *string }
A job run that was used in the predicate of a conditional trigger that triggered this job run.
type Predicate ¶
type Predicate struct { // An optional field if only one condition is listed. If multiple conditions are // listed, then this field is required. Logical Logical // A list of the conditions that determine when the trigger will fire. Conditions []*Condition }
Defines the predicate of the trigger, which determines when it fires.
type PrincipalPermissions ¶
type PrincipalPermissions struct { // The permissions that are granted to the principal. Permissions []Permission // The principal who is granted permissions. Principal *DataLakePrincipal }
Permissions granted to a principal.
type PrincipalType ¶
type PrincipalType string
const ( PrincipalTypeUser PrincipalType = "USER" PrincipalTypeRole PrincipalType = "ROLE" PrincipalTypeGroup PrincipalType = "GROUP" )
Enum values for PrincipalType
type PropertyPredicate ¶
type PropertyPredicate struct { // The value of the property. Value *string // The comparator used to compare this property to others. Comparator Comparator // The key of the property. Key *string }
Defines a property predicate.
type ResourceNumberLimitExceededException ¶
type ResourceNumberLimitExceededException struct { Message *string }
A resource numerical limit was exceeded.
func (*ResourceNumberLimitExceededException) Error ¶
func (e *ResourceNumberLimitExceededException) Error() string
func (*ResourceNumberLimitExceededException) ErrorCode ¶
func (e *ResourceNumberLimitExceededException) ErrorCode() string
func (*ResourceNumberLimitExceededException) ErrorFault ¶
func (e *ResourceNumberLimitExceededException) ErrorFault() smithy.ErrorFault
func (*ResourceNumberLimitExceededException) ErrorMessage ¶
func (e *ResourceNumberLimitExceededException) ErrorMessage() string
func (*ResourceNumberLimitExceededException) GetMessage ¶
func (e *ResourceNumberLimitExceededException) GetMessage() string
func (*ResourceNumberLimitExceededException) HasMessage ¶
func (e *ResourceNumberLimitExceededException) HasMessage() bool
type ResourceShareType ¶
type ResourceShareType string
const ( ResourceShareType = "FOREIGN" ResourceShareType = "ALL" )
Enum values for ResourceShareType
type ResourceType ¶
type ResourceType string
const ( ResourceTypeJar ResourceType = "JAR" ResourceTypeFile ResourceType = "FILE" ResourceTypeArchive ResourceType = "ARCHIVE" )
Enum values for ResourceType
type ResourceUri ¶
type ResourceUri struct { // The URI for accessing the resource. Uri *string // The type of the resource. ResourceType ResourceType }
The URIs for function resources.
type S3Encryption ¶
type S3Encryption struct { // The Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data. KmsKeyArn *string // The encryption mode to use for Amazon S3 data. S3EncryptionMode S3EncryptionMode }
Specifies how Amazon Simple Storage Service (Amazon S3) data should be encrypted.
type S3EncryptionMode ¶
type S3EncryptionMode string
const ( S3EncryptionModeDisabled S3EncryptionMode = "DISABLED" S3EncryptionModeSsekms S3EncryptionMode = "SSE-KMS" S3EncryptionModeSses3 S3EncryptionMode = "SSE-S3" )
Enum values for S3EncryptionMode
type S3Target ¶
type S3Target struct { // A list of glob patterns used to exclude from the crawl. For more information, // see Catalog Tables with a Crawler // (https://docs.aws.amazon.com/glue/latest/dg/add-crawler.html). Exclusions []*string // The path to the Amazon S3 target. Path *string }
Specifies a data store in Amazon Simple Storage Service (Amazon S3).
type Schedule ¶
type Schedule struct { // The state of the schedule. State ScheduleState // A cron expression used to specify the schedule (see Time-Based Schedules for // Jobs and Crawlers // (https://docs.aws.amazon.com/glue/latest/dg/monitor-data-warehouse-schedule.html). // For example, to run something every day at 12:15 UTC, you would specify: cron(15 // 12 * * ? *). ScheduleExpression *string }
A scheduling object using a cron statement to schedule an event.
type ScheduleState ¶
type ScheduleState string
const ( ScheduleStateScheduled ScheduleState = "SCHEDULED" ScheduleStateNot_scheduled ScheduleState = "NOT_SCHEDULED" ScheduleStateTransitioning ScheduleState = "TRANSITIONING" )
Enum values for ScheduleState
type SchedulerNotRunningException ¶
type SchedulerNotRunningException struct { Message *string }
The specified scheduler is not running.
func (*SchedulerNotRunningException) Error ¶
func (e *SchedulerNotRunningException) Error() string
func (*SchedulerNotRunningException) ErrorCode ¶
func (e *SchedulerNotRunningException) ErrorCode() string
func (*SchedulerNotRunningException) ErrorFault ¶
func (e *SchedulerNotRunningException) ErrorFault() smithy.ErrorFault
func (*SchedulerNotRunningException) ErrorMessage ¶
func (e *SchedulerNotRunningException) ErrorMessage() string
func (*SchedulerNotRunningException) GetMessage ¶
func (e *SchedulerNotRunningException) GetMessage() string
func (*SchedulerNotRunningException) HasMessage ¶
func (e *SchedulerNotRunningException) HasMessage() bool
type SchedulerRunningException ¶
type SchedulerRunningException struct { Message *string }
The specified scheduler is already running.
func (*SchedulerRunningException) Error ¶
func (e *SchedulerRunningException) Error() string
func (*SchedulerRunningException) ErrorCode ¶
func (e *SchedulerRunningException) ErrorCode() string
func (*SchedulerRunningException) ErrorFault ¶
func (e *SchedulerRunningException) ErrorFault() smithy.ErrorFault
func (*SchedulerRunningException) ErrorMessage ¶
func (e *SchedulerRunningException) ErrorMessage() string
func (*SchedulerRunningException) GetMessage ¶
func (e *SchedulerRunningException) GetMessage() string
func (*SchedulerRunningException) HasMessage ¶
func (e *SchedulerRunningException) HasMessage() bool
type SchedulerTransitioningException ¶
type SchedulerTransitioningException struct { Message *string }
The specified scheduler is transitioning.
func (*SchedulerTransitioningException) Error ¶
func (e *SchedulerTransitioningException) Error() string
func (*SchedulerTransitioningException) ErrorCode ¶
func (e *SchedulerTransitioningException) ErrorCode() string
func (*SchedulerTransitioningException) ErrorFault ¶
func (e *SchedulerTransitioningException) ErrorFault() smithy.ErrorFault
func (*SchedulerTransitioningException) ErrorMessage ¶
func (e *SchedulerTransitioningException) ErrorMessage() string
func (*SchedulerTransitioningException) GetMessage ¶
func (e *SchedulerTransitioningException) GetMessage() string
func (*SchedulerTransitioningException) HasMessage ¶
func (e *SchedulerTransitioningException) HasMessage() bool
type SchemaChangePolicy ¶
type SchemaChangePolicy struct { // The deletion behavior when the crawler finds a deleted object. DeleteBehavior DeleteBehavior // The update behavior when the crawler finds a changed schema. UpdateBehavior UpdateBehavior }
A policy that specifies update and deletion behaviors for the crawler.
type SchemaColumn ¶
type SchemaColumn struct { // The name of the column. Name *string // The type of data in the column. DataType *string }
A key-value pair representing a column and data type that this transform can run against. The Schema parameter of the MLTransform may contain up to 100 of these structures.
type SecurityConfiguration ¶
type SecurityConfiguration struct { // The encryption configuration associated with this security configuration. EncryptionConfiguration *EncryptionConfiguration // The name of the security configuration. Name *string // The time at which this security configuration was created. CreatedTimeStamp *time.Time }
Specifies a security configuration.
type Segment ¶
type Segment struct { // The zero-based index number of the segment. For example, if the total number of // segments is 4, SegmentNumber values range from 0 through 3. SegmentNumber *int32 // The total number of segments. TotalSegments *int32 }
Defines a non-overlapping region of a table's partitions, allowing multiple requests to be executed in parallel.
type SerDeInfo ¶
type SerDeInfo struct { // Usually the class that implements the SerDe. An example is // org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe. SerializationLibrary *string // Name of the SerDe. Name *string // These key-value pairs define initialization parameters for the SerDe. Parameters map[string]*string }
Information about a serialization/deserialization program (SerDe) that serves as an extractor and loader.
type SkewedInfo ¶
type SkewedInfo struct { // A list of names of columns that contain skewed values. SkewedColumnNames []*string // A mapping of skewed values to the columns that contain them. SkewedColumnValueLocationMaps map[string]*string // A list of values that appear so frequently as to be considered skewed. SkewedColumnValues []*string }
Specifies skewed values in a table. Skewed values are those that occur with very high frequency.
type Sort ¶
type Sort string
Enum values for Sort
type SortCriterion ¶
type SortCriterion struct { // An ascending or descending sort. Sort Sort // The name of the field on which to sort. FieldName *string }
Specifies a field to sort by and a sort order.
type SortDirectionType ¶
type SortDirectionType string
const ( SortDirectionTypeDescending SortDirectionType = "DESCENDING" SortDirectionTypeAscending SortDirectionType = "ASCENDING" )
Enum values for SortDirectionType
type StorageDescriptor ¶
type StorageDescriptor struct { // The input format: SequenceFileInputFormat (binary), or TextInputFormat, or a // custom format. InputFormat *string // A list of the Columns in the table. Columns []*Column // Must be specified if the table contains any dimension columns. NumberOfBuckets *int32 // The physical location of the table. By default, this takes the form of the // warehouse location, followed by the database location in the warehouse, followed // by the table name. Location *string // The information about values that appear frequently in a column (skewed values). SkewedInfo *SkewedInfo // The output format: SequenceFileOutputFormat (binary), or // IgnoreKeyTextOutputFormat, or a custom format. OutputFormat *string // A list of reducer grouping columns, clustering columns, and bucketing columns in // the table. BucketColumns []*string // The user-supplied properties in key-value form. Parameters map[string]*string // True if the data in the table is compressed, or False if not. Compressed *bool // A list specifying the sort order of each bucket in the table. SortColumns []*Order // The serialization/deserialization (SerDe) information. SerdeInfo *SerDeInfo // True if the table data is stored in subdirectories, or False if not. StoredAsSubDirectories *bool }
Describes the physical storage of table data.
type StringColumnStatisticsData ¶
type StringColumnStatisticsData struct { // Average value of the column. AverageLength *float64 // Number of distinct values. NumberOfDistinctValues *int64 // Maximum value of the column. MaximumLength *int64 // Number of nulls. NumberOfNulls *int64 }
Defines a string column statistics data.
type Table ¶
type Table struct { // The last time that the table was accessed. This is usually taken from HDFS, and // might not be reliable. LastAccessTime *time.Time // The ID of the Data Catalog in which the table resides. CatalogId *string // A list of columns by which the table is partitioned. Only primitive types are // supported as partition keys. When you create a table used by Amazon Athena, and // you do not specify any partitionKeys, you must at least set the value of // partitionKeys to an empty list. For example: "PartitionKeys": [] PartitionKeys []*Column // A description of the table. Description *string // The name of the database where the table metadata resides. For Hive // compatibility, this must be all lowercase. DatabaseName *string // The type of this table (EXTERNAL_TABLE, VIRTUAL_VIEW, etc.). TableType *string // A TableIdentifier structure that describes a target table for resource linking. TargetTable *TableIdentifier // The person or entity who created the table. CreatedBy *string // These key-value pairs define properties associated with the table. Parameters map[string]*string // The time when the table definition was created in the Data Catalog. CreateTime *time.Time // If the table is a view, the expanded text of the view; otherwise null. ViewExpandedText *string // The retention time for this table. Retention *int32 // A storage descriptor containing information about the physical storage of this // table. StorageDescriptor *StorageDescriptor // The table name. For Hive compatibility, this must be entirely lowercase. Name *string // Indicates whether the table has been registered with AWS Lake Formation. IsRegisteredWithLakeFormation *bool // If the table is a view, the original text of the view; otherwise null. ViewOriginalText *string // The owner of the table. Owner *string // The last time that the table was updated. UpdateTime *time.Time // The last time that column statistics were computed for this table. LastAnalyzedTime *time.Time }
Represents a collection of related data organized in columns and rows.
type TableError ¶
type TableError struct { // The details about the error. ErrorDetail *ErrorDetail // The name of the table. For Hive compatibility, this must be entirely lowercase. TableName *string }
An error record for table operations.
type TableIdentifier ¶
type TableIdentifier struct { // The ID of the Data Catalog in which the table resides. CatalogId *string // The name of the catalog database that contains the target table. DatabaseName *string // The name of the target table. Name *string }
A structure that describes a target table for resource linking.
type TableInput ¶
type TableInput struct { // If the table is a view, the expanded text of the view; otherwise null. ViewExpandedText *string // The retention time for this table. Retention *int32 // The last time that column statistics were computed for this table. LastAnalyzedTime *time.Time // The table name. For Hive compatibility, this is folded to lowercase when it is // stored. Name *string // A storage descriptor containing information about the physical storage of this // table. StorageDescriptor *StorageDescriptor // The type of this table (EXTERNAL_TABLE, VIRTUAL_VIEW, etc.). TableType *string // These key-value pairs define properties associated with the table. Parameters map[string]*string // If the table is a view, the original text of the view; otherwise null. ViewOriginalText *string // The table owner. Owner *string // A list of columns by which the table is partitioned. Only primitive types are // supported as partition keys. When you create a table used by Amazon Athena, and // you do not specify any partitionKeys, you must at least set the value of // partitionKeys to an empty list. For example: "PartitionKeys": [] PartitionKeys []*Column // A TableIdentifier structure that describes a target table for resource linking. TargetTable *TableIdentifier // The last time that the table was accessed. LastAccessTime *time.Time // A description of the table. Description *string }
A structure used to define a table.
type TableVersion ¶
type TableVersion struct { // The table in question. Table *Table // The ID value that identifies this table version. A VersionId is a string // representation of an integer. Each version is incremented by 1. VersionId *string }
Specifies a version of a table.
type TableVersionError ¶
type TableVersionError struct { // The details about the error. ErrorDetail *ErrorDetail // The ID value of the version in question. A VersionID is a string representation // of an integer. Each version is incremented by 1. VersionId *string // The name of the table in question. TableName *string }
An error record for table-version operations.
type TaskRun ¶
type TaskRun struct { // The date and time that this task run started. StartedOn *time.Time // The amount of time (in seconds) that the task run consumed resources. ExecutionTime *int32 // The current status of the requested task run. Status TaskStatusType // Specifies configuration properties associated with this task run. Properties *TaskRunProperties // The unique identifier for the transform. TransformId *string // The last point in time that the requested task run was completed. CompletedOn *time.Time // The list of error strings associated with this task run. ErrorString *string // The names of the log group for secure logging, associated with this task run. LogGroupName *string // The unique identifier for this task run. TaskRunId *string // The last point in time that the requested task run was updated. LastModifiedOn *time.Time }
The sampling parameters that are associated with the machine learning transform.
type TaskRunFilterCriteria ¶
type TaskRunFilterCriteria struct { // Filter on task runs started before this date. StartedBefore *time.Time // Filter on task runs started after this date. StartedAfter *time.Time // The type of task run. TaskRunType TaskType // The current status of the task run. Status TaskStatusType }
The criteria that are used to filter the task runs for the machine learning transform.
type TaskRunProperties ¶
type TaskRunProperties struct { // The type of task run. TaskType TaskType // The configuration properties for a labeling set generation task run. LabelingSetGenerationTaskRunProperties *LabelingSetGenerationTaskRunProperties // The configuration properties for an importing labels task run. ImportLabelsTaskRunProperties *ImportLabelsTaskRunProperties // The configuration properties for a find matches task run. FindMatchesTaskRunProperties *FindMatchesTaskRunProperties // The configuration properties for an exporting labels task run. ExportLabelsTaskRunProperties *ExportLabelsTaskRunProperties }
The configuration properties for the task run.
type TaskRunSortColumnType ¶
type TaskRunSortColumnType string
const ( TaskRunSortColumnTypeTask_run_type TaskRunSortColumnType = "TASK_RUN_TYPE" TaskRunSortColumnTypeStatus TaskRunSortColumnType = "STATUS" TaskRunSortColumnTypeStarted TaskRunSortColumnType = "STARTED" )
Enum values for TaskRunSortColumnType
type TaskRunSortCriteria ¶
type TaskRunSortCriteria struct { // The sort direction to be used to sort the list of task runs for the machine // learning transform. SortDirection SortDirectionType // The column to be used to sort the list of task runs for the machine learning // transform. Column TaskRunSortColumnType }
The sorting criteria that are used to sort the list of task runs for the machine learning transform.
type TaskStatusType ¶
type TaskStatusType string
const ( TaskStatusTypeStarting TaskStatusType = "STARTING" TaskStatusTypeRunning TaskStatusType = "RUNNING" TaskStatusTypeStopping TaskStatusType = "STOPPING" TaskStatusTypeStopped TaskStatusType = "STOPPED" TaskStatusTypeSucceeded TaskStatusType = "SUCCEEDED" TaskStatusTypeFailed TaskStatusType = "FAILED" TaskStatusTypeTimeout TaskStatusType = "TIMEOUT" )
Enum values for TaskStatusType
type TaskType ¶
type TaskType string
const ( TaskTypeEvaluation TaskType = "EVALUATION" TaskTypeLabeling_set_generation TaskType = "LABELING_SET_GENERATION" TaskTypeImport_labels TaskType = "IMPORT_LABELS" TaskTypeExport_labels TaskType = "EXPORT_LABELS" TaskTypeFind_matches TaskType = "FIND_MATCHES" )
Enum values for TaskType
type TransformFilterCriteria ¶
type TransformFilterCriteria struct { // Filters on datasets with a specific schema. The Map object is an array of // key-value pairs representing the schema this transform accepts, where Column is // the name of a column, and Type is the type of the data such as an integer or // string. Has an upper bound of 100 columns. Schema []*SchemaColumn // A unique transform name that is used to filter the machine learning transforms. Name *string // The type of machine learning transform that is used to filter the machine // learning transforms. TransformType TransformType // Filter on transforms last modified before this date. LastModifiedBefore *time.Time // The time and date after which the transforms were created. CreatedAfter *time.Time // This value determines which version of AWS Glue this machine learning transform // is compatible with. Glue 1.0 is recommended for most customers. If the value is // not set, the Glue compatibility defaults to Glue 0.9. For more information, see // AWS Glue Versions // (https://docs.aws.amazon.com/glue/latest/dg/release-notes.html#release-notes-versions) // in the developer guide. GlueVersion *string // The time and date before which the transforms were created. CreatedBefore *time.Time // Filters the list of machine learning transforms by the last known status of the // transforms (to indicate whether a transform can be used or not). One of // "NOT_READY", "READY", or "DELETING". Status TransformStatusType // Filter on transforms last modified after this date. LastModifiedAfter *time.Time }
The criteria used to filter the machine learning transforms.
type TransformParameters ¶
type TransformParameters struct { // The parameters for the find matches algorithm. FindMatchesParameters *FindMatchesParameters // The type of machine learning transform. For information about the types of // machine learning transforms, see Creating Machine Learning Transforms // (http://docs.aws.amazon.com/glue/latest/dg/add-job-machine-learning-transform.html). TransformType TransformType }
The algorithm-specific parameters that are associated with the machine learning transform.
type TransformSortColumnType ¶
type TransformSortColumnType string
const ( TransformSortColumnTypeName TransformSortColumnType = "NAME" TransformSortColumnTypeTransform_type TransformSortColumnType = "TRANSFORM_TYPE" TransformSortColumnTypeStatus TransformSortColumnType = "STATUS" TransformSortColumnTypeCreated TransformSortColumnType = "CREATED" TransformSortColumnTypeLast_modified TransformSortColumnType = "LAST_MODIFIED" )
Enum values for TransformSortColumnType
type TransformSortCriteria ¶
type TransformSortCriteria struct { // The column to be used in the sorting criteria that are associated with the // machine learning transform. Column TransformSortColumnType // The sort direction to be used in the sorting criteria that are associated with // the machine learning transform. SortDirection SortDirectionType }
The sorting criteria that are associated with the machine learning transform.
type TransformStatusType ¶
type TransformStatusType string
const ( TransformStatusTypeNot_ready TransformStatusType = "NOT_READY" TransformStatusTypeReady TransformStatusType = "READY" TransformStatusTypeDeleting TransformStatusType = "DELETING" )
Enum values for TransformStatusType
type TransformType ¶
type TransformType string
const ( TransformTypeFind_matches TransformType = "FIND_MATCHES" )
Enum values for TransformType
type Trigger ¶
type Trigger struct { // The predicate of this trigger, which defines when it will fire. Predicate *Predicate // The type of trigger that this is. Type TriggerType // The name of the trigger. Name *string // The name of the workflow associated with the trigger. WorkflowName *string // The actions initiated by this trigger. Actions []*Action // Reserved for future use. Id *string // A description of this trigger. Description *string // A cron expression used to specify the schedule (see Time-Based Schedules for // Jobs and Crawlers // (https://docs.aws.amazon.com/glue/latest/dg/monitor-data-warehouse-schedule.html). // For example, to run something every day at 12:15 UTC, you would specify: cron(15 // 12 * * ? *). Schedule *string // The current state of the trigger. State TriggerState }
Information about a specific trigger.
type TriggerNodeDetails ¶
type TriggerNodeDetails struct { // The information of the trigger represented by the trigger node. Trigger *Trigger }
The details of a Trigger node present in the workflow.
type TriggerState ¶
type TriggerState string
const ( TriggerStateCreating TriggerState = "CREATING" TriggerStateCreated TriggerState = "CREATED" TriggerStateActivating TriggerState = "ACTIVATING" TriggerStateActivated TriggerState = "ACTIVATED" TriggerStateDeactivating TriggerState = "DEACTIVATING" TriggerStateDeactivated TriggerState = "DEACTIVATED" TriggerStateDeleting TriggerState = "DELETING" TriggerStateUpdating TriggerState = "UPDATING" )
Enum values for TriggerState
type TriggerType ¶
type TriggerType string
const ( TriggerTypeScheduled TriggerType = "SCHEDULED" TriggerTypeConditional TriggerType = "CONDITIONAL" TriggerTypeOn_demand TriggerType = "ON_DEMAND" )
Enum values for TriggerType
type TriggerUpdate ¶
type TriggerUpdate struct { // The predicate of this trigger, which defines when it will fire. Predicate *Predicate // A description of this trigger. Description *string // The actions initiated by this trigger. Actions []*Action // Reserved for future use. Name *string // A cron expression used to specify the schedule (see Time-Based Schedules for // Jobs and Crawlers // (https://docs.aws.amazon.com/glue/latest/dg/monitor-data-warehouse-schedule.html). // For example, to run something every day at 12:15 UTC, you would specify: cron(15 // 12 * * ? *). Schedule *string }
A structure used to provide information used to update a trigger. This object updates the previous trigger definition by overwriting it completely.
type UpdateBehavior ¶
type UpdateBehavior string
const ( UpdateBehaviorLog UpdateBehavior = "LOG" UpdateBehaviorUpdate_in_database UpdateBehavior = "UPDATE_IN_DATABASE" )
Enum values for UpdateBehavior
type UpdateCsvClassifierRequest ¶
type UpdateCsvClassifierRequest struct { // Specifies not to trim values before identifying the type of column values. The // default value is true. DisableValueTrimming *bool // A custom symbol to denote what separates each column entry in the row. Delimiter *string // A list of strings representing column names. Header []*string // Enables the processing of files that contain only one column. AllowSingleColumn *bool // The name of the classifier. Name *string // A custom symbol to denote what combines content into a single column value. It // must be different from the column delimiter. QuoteSymbol *string // Indicates whether the CSV file contains a header. ContainsHeader CsvHeaderOption }
Specifies a custom CSV classifier to be updated.
type UpdateGrokClassifierRequest ¶
type UpdateGrokClassifierRequest struct { // Optional custom grok patterns used by this classifier. CustomPatterns *string // The grok pattern used by this classifier. GrokPattern *string // An identifier of the data format that the classifier matches, such as Twitter, // JSON, Omniture logs, Amazon CloudWatch Logs, and so on. Classification *string // The name of the GrokClassifier. Name *string }
Specifies a grok classifier to update when passed to UpdateClassifier.
type UpdateJsonClassifierRequest ¶
type UpdateJsonClassifierRequest struct { // A JsonPath string defining the JSON data for the classifier to classify. AWS // Glue supports a subset of JsonPath, as described in Writing JsonPath Custom // Classifiers // (https://docs.aws.amazon.com/glue/latest/dg/custom-classifier.html#custom-classifier-json). JsonPath *string // The name of the classifier. Name *string }
Specifies a JSON classifier to be updated.
type UpdateXMLClassifierRequest ¶
type UpdateXMLClassifierRequest struct { // An identifier of the data format that the classifier matches. Classification *string // The XML tag designating the element that contains each record in an XML document // being parsed. This cannot identify a self-closing element (closed by />). An // empty row element that contains only attributes can be parsed as long as it ends // with a closing tag (for example, is okay, but is not). RowTag *string // The name of the classifier. Name *string }
Specifies an XML classifier to be updated.
type UserDefinedFunction ¶
type UserDefinedFunction struct { // The name of the catalog database that contains the function. DatabaseName *string // The Java class that contains the function code. ClassName *string // The ID of the Data Catalog in which the function resides. CatalogId *string // The time at which the function was created. CreateTime *time.Time // The name of the function. FunctionName *string // The resource URIs for the function. ResourceUris []*ResourceUri // The owner of the function. OwnerName *string // The owner type. OwnerType PrincipalType }
Represents the equivalent of a Hive user-defined function (UDF) definition.
type UserDefinedFunctionInput ¶
type UserDefinedFunctionInput struct { // The owner type. OwnerType PrincipalType // The name of the function. FunctionName *string // The resource URIs for the function. ResourceUris []*ResourceUri // The Java class that contains the function code. ClassName *string // The owner of the function. OwnerName *string }
A structure used to create or update a user-defined function.
type ValidationException ¶
type ValidationException struct { Message *string }
A value could not be validated.
func (*ValidationException) Error ¶
func (e *ValidationException) Error() string
func (*ValidationException) ErrorCode ¶
func (e *ValidationException) ErrorCode() string
func (*ValidationException) ErrorFault ¶
func (e *ValidationException) ErrorFault() smithy.ErrorFault
func (*ValidationException) ErrorMessage ¶
func (e *ValidationException) ErrorMessage() string
func (*ValidationException) GetMessage ¶
func (e *ValidationException) GetMessage() string
func (*ValidationException) HasMessage ¶
func (e *ValidationException) HasMessage() bool
type VersionMismatchException ¶
type VersionMismatchException struct { Message *string }
There was a version conflict.
func (*VersionMismatchException) Error ¶
func (e *VersionMismatchException) Error() string
func (*VersionMismatchException) ErrorCode ¶
func (e *VersionMismatchException) ErrorCode() string
func (*VersionMismatchException) ErrorFault ¶
func (e *VersionMismatchException) ErrorFault() smithy.ErrorFault
func (*VersionMismatchException) ErrorMessage ¶
func (e *VersionMismatchException) ErrorMessage() string
func (*VersionMismatchException) GetMessage ¶
func (e *VersionMismatchException) GetMessage() string
func (*VersionMismatchException) HasMessage ¶
func (e *VersionMismatchException) HasMessage() bool
type WorkerType ¶
type WorkerType string
const ( WorkerTypeStandard WorkerType = "Standard" WorkerTypeG1x WorkerType = "G.1X" WorkerTypeG2x WorkerType = "G.2X" )
Enum values for WorkerType
type Workflow ¶
type Workflow struct { // The graph representing all the AWS Glue components that belong to the workflow // as nodes and directed connections between them as edges. Graph *WorkflowGraph // A collection of properties to be used as part of each execution of the workflow. DefaultRunProperties map[string]*string // A description of the workflow. Description *string // The name of the workflow representing the flow. Name *string // The date and time when the workflow was last modified. LastModifiedOn *time.Time // The information about the last execution of the workflow. LastRun *WorkflowRun // The date and time when the workflow was created. CreatedOn *time.Time }
A workflow represents a flow in which AWS Glue components should be executed to complete a logical task.
type WorkflowGraph ¶
type WorkflowGraph struct { // A list of all the directed connections between the nodes belonging to the // workflow. Edges []*Edge // A list of the the AWS Glue components belong to the workflow represented as // nodes. Nodes []*Node }
A workflow graph represents the complete workflow containing all the AWS Glue components present in the workflow and all the directed connections between them.
type WorkflowRun ¶
type WorkflowRun struct { // The statistics of the run. Statistics *WorkflowRunStatistics // Name of the workflow that was executed. Name *string // The date and time when the workflow run completed. CompletedOn *time.Time // The graph representing all the AWS Glue components that belong to the workflow // as nodes and directed connections between them as edges. Graph *WorkflowGraph // The date and time when the workflow run was started. StartedOn *time.Time // The workflow run properties which were set during the run. WorkflowRunProperties map[string]*string // The ID of this workflow run. WorkflowRunId *string // The ID of the previous workflow run. PreviousRunId *string // The status of the workflow run. Status WorkflowRunStatus }
A workflow run is an execution of a workflow providing all the runtime information.
type WorkflowRunStatistics ¶
type WorkflowRunStatistics struct { // Total number of Actions that timed out. TimeoutActions *int32 // Total number Actions in running state. RunningActions *int32 // Total number of Actions that have succeeded. SucceededActions *int32 // Total number of Actions that have failed. FailedActions *int32 // Total number of Actions that have stopped. StoppedActions *int32 // Total number of Actions in the workflow run. TotalActions *int32 }
Workflow run statistics provides statistics about the workflow run.
type WorkflowRunStatus ¶
type WorkflowRunStatus string
const ( WorkflowRunStatusRunning WorkflowRunStatus = "RUNNING" WorkflowRunStatusCompleted WorkflowRunStatus = "COMPLETED" WorkflowRunStatusStopping WorkflowRunStatus = "STOPPING" WorkflowRunStatusStopped WorkflowRunStatus = "STOPPED" )
Enum values for WorkflowRunStatus
type XMLClassifier ¶
type XMLClassifier struct { // The XML tag designating the element that contains each record in an XML document // being parsed. This can't identify a self-closing element (closed by />). An // empty row element that contains only attributes can be parsed as long as it ends // with a closing tag (for example, is okay, but is not). RowTag *string // The version of this classifier. Version *int64 // The time that this classifier was last updated. LastUpdated *time.Time // The name of the classifier. Name *string // The time that this classifier was registered. CreationTime *time.Time // An identifier of the data format that the classifier matches. Classification *string }
A classifier for XML content.
Source Files ¶
- Version
- v0.1.0
- Published
- Sep 29, 2020
- Platform
- js/wasm
- Imports
- 4 packages
- Last checked
- 6 hours ago –
Tools for package owners.