package storage
import "cloud.google.com/go/bigquery/storage/apiv1alpha2"
Package storage is an auto-generated package for the BigQuery Storage API.
NOTE: This package is in alpha. It is not stable, and is likely to change.
Use of Context
The ctx passed to NewClient is used for authentication requests and for creating the underlying connection, but is not used for subsequent calls. Individual methods on the client use the ctx given to them.
To close the open connection, use the Close() method.
For information about setting deadlines, reusing contexts, and more please visit godoc.org/cloud.google.com/go.
Index ¶
- func DefaultAuthScopes() []string
- type BigQueryWriteCallOptions
- type BigQueryWriteClient
- func NewBigQueryWriteClient(ctx context.Context, opts ...option.ClientOption) (*BigQueryWriteClient, error)
- func (c *BigQueryWriteClient) AppendRows(ctx context.Context, opts ...gax.CallOption) (storagepb.BigQueryWrite_AppendRowsClient, error)
- func (c *BigQueryWriteClient) BatchCommitWriteStreams(ctx context.Context, req *storagepb.BatchCommitWriteStreamsRequest, opts ...gax.CallOption) (*storagepb.BatchCommitWriteStreamsResponse, error)
- func (c *BigQueryWriteClient) Close() error
- func (c *BigQueryWriteClient) Connection() *grpc.ClientConn
- func (c *BigQueryWriteClient) CreateWriteStream(ctx context.Context, req *storagepb.CreateWriteStreamRequest, opts ...gax.CallOption) (*storagepb.WriteStream, error)
- func (c *BigQueryWriteClient) FinalizeWriteStream(ctx context.Context, req *storagepb.FinalizeWriteStreamRequest, opts ...gax.CallOption) (*storagepb.FinalizeWriteStreamResponse, error)
- func (c *BigQueryWriteClient) GetWriteStream(ctx context.Context, req *storagepb.GetWriteStreamRequest, opts ...gax.CallOption) (*storagepb.WriteStream, error)
Examples ¶
- BigQueryWriteClient.AppendRows
- BigQueryWriteClient.BatchCommitWriteStreams
- BigQueryWriteClient.CreateWriteStream
- BigQueryWriteClient.FinalizeWriteStream
- BigQueryWriteClient.GetWriteStream
- NewBigQueryWriteClient
Functions ¶
func DefaultAuthScopes ¶
func DefaultAuthScopes() []string
DefaultAuthScopes reports the default set of authentication scopes to use with this package.
Types ¶
type BigQueryWriteCallOptions ¶
type BigQueryWriteCallOptions struct { CreateWriteStream []gax.CallOption AppendRows []gax.CallOption GetWriteStream []gax.CallOption FinalizeWriteStream []gax.CallOption BatchCommitWriteStreams []gax.CallOption }
BigQueryWriteCallOptions contains the retry settings for each method of BigQueryWriteClient.
type BigQueryWriteClient ¶
type BigQueryWriteClient struct { // The call options for this service. CallOptions *BigQueryWriteCallOptions // contains filtered or unexported fields }
BigQueryWriteClient is a client for interacting with BigQuery Storage API.
Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
func NewBigQueryWriteClient ¶
func NewBigQueryWriteClient(ctx context.Context, opts ...option.ClientOption) (*BigQueryWriteClient, error)
NewBigQueryWriteClient creates a new big query write client.
BigQuery Write API.
The Write API can be used to write data to BigQuery.
func (*BigQueryWriteClient) AppendRows ¶
func (c *BigQueryWriteClient) AppendRows(ctx context.Context, opts ...gax.CallOption) (storagepb.BigQueryWrite_AppendRowsClient, error)
AppendRows appends data to the given stream.
If offset is specified, the offset is checked against the end of stream. The server returns OUT_OF_RANGE in AppendRowsResponse if an attempt is made to append to an offset beyond the current end of the stream or ALREADY_EXISTS if user provids an offset that has already been written to. User can retry with adjusted offset within the same RPC stream. If offset is not specified, append happens at the end of the stream.
The response contains the offset at which the append happened. Responses are received in the same order in which requests are sent. There will be one response for each successful request. If the offset is not set in response, it means append didn’t happen due to some errors. If one request fails, all the subsequent requests will also fail until a success request is made again.
If the stream is of PENDING type, data will only be available for read
operations after the stream is committed.
Code:play
Example¶
package main
import (
"context"
"io"
storage "cloud.google.com/go/bigquery/storage/apiv1alpha2"
storagepb "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1alpha2"
)
func main() {
// import storagepb "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1alpha2"
ctx := context.Background()
c, err := storage.NewBigQueryWriteClient(ctx)
if err != nil {
// TODO: Handle error.
}
stream, err := c.AppendRows(ctx)
if err != nil {
// TODO: Handle error.
}
go func() {
reqs := []*storagepb.AppendRowsRequest{
// TODO: Create requests.
}
for _, req := range reqs {
if err := stream.Send(req); err != nil {
// TODO: Handle error.
}
}
stream.CloseSend()
}()
for {
resp, err := stream.Recv()
if err == io.EOF {
break
}
if err != nil {
// TODO: handle error.
}
// TODO: Use resp.
_ = resp
}
}
func (*BigQueryWriteClient) BatchCommitWriteStreams ¶
func (c *BigQueryWriteClient) BatchCommitWriteStreams(ctx context.Context, req *storagepb.BatchCommitWriteStreamsRequest, opts ...gax.CallOption) (*storagepb.BatchCommitWriteStreamsResponse, error)
BatchCommitWriteStreams atomically commits a group of PENDING streams that belong to the same
parent table.
Streams must be finalized before commit and cannot be committed multiple
times. Once a stream is committed, data in the stream becomes available
for read operations.
Code:play
Example¶
package main
import (
"context"
storage "cloud.google.com/go/bigquery/storage/apiv1alpha2"
storagepb "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1alpha2"
)
func main() {
// import storagepb "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1alpha2"
ctx := context.Background()
c, err := storage.NewBigQueryWriteClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &storagepb.BatchCommitWriteStreamsRequest{
// TODO: Fill request struct fields.
}
resp, err := c.BatchCommitWriteStreams(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func (*BigQueryWriteClient) Close ¶
func (c *BigQueryWriteClient) Close() error
Close closes the connection to the API service. The user should invoke this when the client is no longer required.
func (*BigQueryWriteClient) Connection ¶
func (c *BigQueryWriteClient) Connection() *grpc.ClientConn
Connection returns a connection to the API service.
Deprecated.
func (*BigQueryWriteClient) CreateWriteStream ¶
func (c *BigQueryWriteClient) CreateWriteStream(ctx context.Context, req *storagepb.CreateWriteStreamRequest, opts ...gax.CallOption) (*storagepb.WriteStream, error)
CreateWriteStream creates a write stream to the given table.
Code:play
Example¶
package main
import (
"context"
storage "cloud.google.com/go/bigquery/storage/apiv1alpha2"
storagepb "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1alpha2"
)
func main() {
// import storagepb "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1alpha2"
ctx := context.Background()
c, err := storage.NewBigQueryWriteClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &storagepb.CreateWriteStreamRequest{
// TODO: Fill request struct fields.
}
resp, err := c.CreateWriteStream(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func (*BigQueryWriteClient) FinalizeWriteStream ¶
func (c *BigQueryWriteClient) FinalizeWriteStream(ctx context.Context, req *storagepb.FinalizeWriteStreamRequest, opts ...gax.CallOption) (*storagepb.FinalizeWriteStreamResponse, error)
FinalizeWriteStream finalize a write stream so that no new data can be appended to the
stream.
Code:play
Example¶
package main
import (
"context"
storage "cloud.google.com/go/bigquery/storage/apiv1alpha2"
storagepb "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1alpha2"
)
func main() {
// import storagepb "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1alpha2"
ctx := context.Background()
c, err := storage.NewBigQueryWriteClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &storagepb.FinalizeWriteStreamRequest{
// TODO: Fill request struct fields.
}
resp, err := c.FinalizeWriteStream(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func (*BigQueryWriteClient) GetWriteStream ¶
func (c *BigQueryWriteClient) GetWriteStream(ctx context.Context, req *storagepb.GetWriteStreamRequest, opts ...gax.CallOption) (*storagepb.WriteStream, error)
GetWriteStream gets a write stream.
Code:play
Example¶
package main
import (
"context"
storage "cloud.google.com/go/bigquery/storage/apiv1alpha2"
storagepb "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1alpha2"
)
func main() {
// import storagepb "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1alpha2"
ctx := context.Background()
c, err := storage.NewBigQueryWriteClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &storagepb.GetWriteStreamRequest{
// TODO: Fill request struct fields.
}
resp, err := c.GetWriteStream(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
Source Files ¶
big_query_write_client.go doc.go
- Version
- v1.7.0
- Published
- May 6, 2020
- Platform
- linux/amd64
- Imports
- 15 packages
- Last checked
- 1 hour ago –
Tools for package owners.