mirror of
https://github.com/restic/restic.git
synced 2025-08-23 12:57:56 +00:00
Vendor dependencies for GCS
This commit is contained in:
85
vendor/cloud.google.com/go/bigquery/bigquery.go
generated
vendored
Normal file
85
vendor/cloud.google.com/go/bigquery/bigquery.go
generated
vendored
Normal file
@@ -0,0 +1,85 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
// TODO(mcgreevy): support dry-run mode when creating jobs.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"google.golang.org/api/option"
|
||||
htransport "google.golang.org/api/transport/http"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
const prodAddr = "https://www.googleapis.com/bigquery/v2/"
|
||||
|
||||
// ExternalData is a table which is stored outside of BigQuery. It is implemented by GCSReference.
|
||||
type ExternalData interface {
|
||||
externalDataConfig() bq.ExternalDataConfiguration
|
||||
}
|
||||
|
||||
const Scope = "https://www.googleapis.com/auth/bigquery"
|
||||
const userAgent = "gcloud-golang-bigquery/20160429"
|
||||
|
||||
// Client may be used to perform BigQuery operations.
|
||||
type Client struct {
|
||||
service service
|
||||
projectID string
|
||||
}
|
||||
|
||||
// NewClient constructs a new Client which can perform BigQuery operations.
|
||||
// Operations performed via the client are billed to the specified GCP project.
|
||||
func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) {
|
||||
o := []option.ClientOption{
|
||||
option.WithEndpoint(prodAddr),
|
||||
option.WithScopes(Scope),
|
||||
option.WithUserAgent(userAgent),
|
||||
}
|
||||
o = append(o, opts...)
|
||||
httpClient, endpoint, err := htransport.NewClient(ctx, o...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("dialing: %v", err)
|
||||
}
|
||||
|
||||
s, err := newBigqueryService(httpClient, endpoint)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("constructing bigquery client: %v", err)
|
||||
}
|
||||
|
||||
c := &Client{
|
||||
service: s,
|
||||
projectID: projectID,
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Close closes any resources held by the client.
|
||||
// Close should be called when the client is no longer needed.
|
||||
// It need not be called at program exit.
|
||||
func (c *Client) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) insertJob(ctx context.Context, conf *insertJobConf) (*Job, error) {
|
||||
job, err := c.service.insertJob(ctx, c.projectID, conf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
job.c = c
|
||||
return job, nil
|
||||
}
|
74
vendor/cloud.google.com/go/bigquery/copy.go
generated
vendored
Normal file
74
vendor/cloud.google.com/go/bigquery/copy.go
generated
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
// CopyConfig holds the configuration for a copy job.
|
||||
type CopyConfig struct {
|
||||
// JobID is the ID to use for the copy job. If unset, a job ID will be automatically created.
|
||||
JobID string
|
||||
|
||||
// Srcs are the tables from which data will be copied.
|
||||
Srcs []*Table
|
||||
|
||||
// Dst is the table into which the data will be copied.
|
||||
Dst *Table
|
||||
|
||||
// CreateDisposition specifies the circumstances under which the destination table will be created.
|
||||
// The default is CreateIfNeeded.
|
||||
CreateDisposition TableCreateDisposition
|
||||
|
||||
// WriteDisposition specifies how existing data in the destination table is treated.
|
||||
// The default is WriteAppend.
|
||||
WriteDisposition TableWriteDisposition
|
||||
}
|
||||
|
||||
// A Copier copies data into a BigQuery table from one or more BigQuery tables.
|
||||
type Copier struct {
|
||||
CopyConfig
|
||||
c *Client
|
||||
}
|
||||
|
||||
// CopierFrom returns a Copier which can be used to copy data into a
|
||||
// BigQuery table from one or more BigQuery tables.
|
||||
// The returned Copier may optionally be further configured before its Run method is called.
|
||||
func (t *Table) CopierFrom(srcs ...*Table) *Copier {
|
||||
return &Copier{
|
||||
c: t.c,
|
||||
CopyConfig: CopyConfig{
|
||||
Srcs: srcs,
|
||||
Dst: t,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Run initiates a copy job.
|
||||
func (c *Copier) Run(ctx context.Context) (*Job, error) {
|
||||
conf := &bq.JobConfigurationTableCopy{
|
||||
CreateDisposition: string(c.CreateDisposition),
|
||||
WriteDisposition: string(c.WriteDisposition),
|
||||
DestinationTable: c.Dst.tableRefProto(),
|
||||
}
|
||||
for _, t := range c.Srcs {
|
||||
conf.SourceTables = append(conf.SourceTables, t.tableRefProto())
|
||||
}
|
||||
job := &bq.Job{Configuration: &bq.JobConfiguration{Copy: conf}}
|
||||
setJobRef(job, c.JobID, c.c.projectID)
|
||||
return c.c.insertJob(ctx, &insertJobConf{job: job})
|
||||
}
|
137
vendor/cloud.google.com/go/bigquery/copy_test.go
generated
vendored
Normal file
137
vendor/cloud.google.com/go/bigquery/copy_test.go
generated
vendored
Normal file
@@ -0,0 +1,137 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
func defaultCopyJob() *bq.Job {
|
||||
return &bq.Job{
|
||||
Configuration: &bq.JobConfiguration{
|
||||
Copy: &bq.JobConfigurationTableCopy{
|
||||
DestinationTable: &bq.TableReference{
|
||||
ProjectId: "d-project-id",
|
||||
DatasetId: "d-dataset-id",
|
||||
TableId: "d-table-id",
|
||||
},
|
||||
SourceTables: []*bq.TableReference{
|
||||
{
|
||||
ProjectId: "s-project-id",
|
||||
DatasetId: "s-dataset-id",
|
||||
TableId: "s-table-id",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestCopy(t *testing.T) {
|
||||
testCases := []struct {
|
||||
dst *Table
|
||||
srcs []*Table
|
||||
config CopyConfig
|
||||
want *bq.Job
|
||||
}{
|
||||
{
|
||||
dst: &Table{
|
||||
ProjectID: "d-project-id",
|
||||
DatasetID: "d-dataset-id",
|
||||
TableID: "d-table-id",
|
||||
},
|
||||
srcs: []*Table{
|
||||
{
|
||||
ProjectID: "s-project-id",
|
||||
DatasetID: "s-dataset-id",
|
||||
TableID: "s-table-id",
|
||||
},
|
||||
},
|
||||
want: defaultCopyJob(),
|
||||
},
|
||||
{
|
||||
dst: &Table{
|
||||
ProjectID: "d-project-id",
|
||||
DatasetID: "d-dataset-id",
|
||||
TableID: "d-table-id",
|
||||
},
|
||||
srcs: []*Table{
|
||||
{
|
||||
ProjectID: "s-project-id",
|
||||
DatasetID: "s-dataset-id",
|
||||
TableID: "s-table-id",
|
||||
},
|
||||
},
|
||||
config: CopyConfig{
|
||||
CreateDisposition: CreateNever,
|
||||
WriteDisposition: WriteTruncate,
|
||||
},
|
||||
want: func() *bq.Job {
|
||||
j := defaultCopyJob()
|
||||
j.Configuration.Copy.CreateDisposition = "CREATE_NEVER"
|
||||
j.Configuration.Copy.WriteDisposition = "WRITE_TRUNCATE"
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: &Table{
|
||||
ProjectID: "d-project-id",
|
||||
DatasetID: "d-dataset-id",
|
||||
TableID: "d-table-id",
|
||||
},
|
||||
srcs: []*Table{
|
||||
{
|
||||
ProjectID: "s-project-id",
|
||||
DatasetID: "s-dataset-id",
|
||||
TableID: "s-table-id",
|
||||
},
|
||||
},
|
||||
config: CopyConfig{JobID: "job-id"},
|
||||
want: func() *bq.Job {
|
||||
j := defaultCopyJob()
|
||||
j.JobReference = &bq.JobReference{
|
||||
JobId: "job-id",
|
||||
ProjectId: "client-project-id",
|
||||
}
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
s := &testService{}
|
||||
c := &Client{
|
||||
service: s,
|
||||
projectID: "client-project-id",
|
||||
}
|
||||
tc.dst.c = c
|
||||
copier := tc.dst.CopierFrom(tc.srcs...)
|
||||
tc.config.Srcs = tc.srcs
|
||||
tc.config.Dst = tc.dst
|
||||
copier.CopyConfig = tc.config
|
||||
if _, err := copier.Run(context.Background()); err != nil {
|
||||
t.Errorf("err calling Run: %v", err)
|
||||
continue
|
||||
}
|
||||
if !testutil.Equal(s.Job, tc.want) {
|
||||
t.Errorf("copying: got:\n%v\nwant:\n%v", s.Job, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
110
vendor/cloud.google.com/go/bigquery/create_table_test.go
generated
vendored
Normal file
110
vendor/cloud.google.com/go/bigquery/create_table_test.go
generated
vendored
Normal file
@@ -0,0 +1,110 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
type createTableRecorder struct {
|
||||
conf *createTableConf
|
||||
service
|
||||
}
|
||||
|
||||
func (rec *createTableRecorder) createTable(ctx context.Context, conf *createTableConf) error {
|
||||
rec.conf = conf
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestCreateTableOptions(t *testing.T) {
|
||||
s := &createTableRecorder{}
|
||||
c := &Client{
|
||||
projectID: "p",
|
||||
service: s,
|
||||
}
|
||||
ds := c.Dataset("d")
|
||||
table := ds.Table("t")
|
||||
exp := time.Now()
|
||||
q := "query"
|
||||
if err := table.Create(context.Background(), TableExpiration(exp), ViewQuery(q), UseStandardSQL()); err != nil {
|
||||
t.Fatalf("err calling Table.Create: %v", err)
|
||||
}
|
||||
want := createTableConf{
|
||||
projectID: "p",
|
||||
datasetID: "d",
|
||||
tableID: "t",
|
||||
expiration: exp,
|
||||
viewQuery: q,
|
||||
useStandardSQL: true,
|
||||
}
|
||||
equal := func(x, y createTableConf) bool {
|
||||
return testutil.Equal(x, y, cmp.AllowUnexported(createTableConf{}))
|
||||
}
|
||||
|
||||
if !equal(*s.conf, want) {
|
||||
t.Errorf("createTableConf: got:\n%v\nwant:\n%v", *s.conf, want)
|
||||
}
|
||||
|
||||
sc := Schema{fieldSchema("desc", "name", "STRING", false, true)}
|
||||
if err := table.Create(context.Background(), TableExpiration(exp), sc); err != nil {
|
||||
t.Fatalf("err calling Table.Create: %v", err)
|
||||
}
|
||||
want = createTableConf{
|
||||
projectID: "p",
|
||||
datasetID: "d",
|
||||
tableID: "t",
|
||||
expiration: exp,
|
||||
// No need for an elaborate schema, that is tested in schema_test.go.
|
||||
schema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqTableFieldSchema("desc", "name", "STRING", "REQUIRED"),
|
||||
},
|
||||
},
|
||||
}
|
||||
if !equal(*s.conf, want) {
|
||||
t.Errorf("createTableConf: got:\n%v\nwant:\n%v", *s.conf, want)
|
||||
}
|
||||
|
||||
partitionCases := []struct {
|
||||
timePartitioning TimePartitioning
|
||||
expectedExpiration time.Duration
|
||||
}{
|
||||
{TimePartitioning{}, time.Duration(0)},
|
||||
{TimePartitioning{time.Second}, time.Second},
|
||||
}
|
||||
|
||||
for _, c := range partitionCases {
|
||||
if err := table.Create(context.Background(), c.timePartitioning); err != nil {
|
||||
t.Fatalf("err calling Table.Create: %v", err)
|
||||
}
|
||||
want = createTableConf{
|
||||
projectID: "p",
|
||||
datasetID: "d",
|
||||
tableID: "t",
|
||||
timePartitioning: &TimePartitioning{c.expectedExpiration},
|
||||
}
|
||||
if !equal(*s.conf, want) {
|
||||
t.Errorf("createTableConf: got:\n%v\nwant:\n%v", *s.conf, want)
|
||||
}
|
||||
}
|
||||
}
|
188
vendor/cloud.google.com/go/bigquery/dataset.go
generated
vendored
Normal file
188
vendor/cloud.google.com/go/bigquery/dataset.go
generated
vendored
Normal file
@@ -0,0 +1,188 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
)
|
||||
|
||||
// Dataset is a reference to a BigQuery dataset.
|
||||
type Dataset struct {
|
||||
ProjectID string
|
||||
DatasetID string
|
||||
c *Client
|
||||
}
|
||||
|
||||
type DatasetMetadata struct {
|
||||
CreationTime time.Time
|
||||
LastModifiedTime time.Time // When the dataset or any of its tables were modified.
|
||||
DefaultTableExpiration time.Duration
|
||||
Description string // The user-friendly description of this table.
|
||||
Name string // The user-friendly name for this table.
|
||||
ID string
|
||||
Location string // The geo location of the dataset.
|
||||
Labels map[string]string // User-provided labels.
|
||||
// TODO(jba): access rules
|
||||
}
|
||||
|
||||
// Dataset creates a handle to a BigQuery dataset in the client's project.
|
||||
func (c *Client) Dataset(id string) *Dataset {
|
||||
return c.DatasetInProject(c.projectID, id)
|
||||
}
|
||||
|
||||
// DatasetInProject creates a handle to a BigQuery dataset in the specified project.
|
||||
func (c *Client) DatasetInProject(projectID, datasetID string) *Dataset {
|
||||
return &Dataset{
|
||||
ProjectID: projectID,
|
||||
DatasetID: datasetID,
|
||||
c: c,
|
||||
}
|
||||
}
|
||||
|
||||
// Create creates a dataset in the BigQuery service. An error will be returned
|
||||
// if the dataset already exists.
|
||||
func (d *Dataset) Create(ctx context.Context) error {
|
||||
return d.c.service.insertDataset(ctx, d.DatasetID, d.ProjectID)
|
||||
}
|
||||
|
||||
// Delete deletes the dataset.
|
||||
func (d *Dataset) Delete(ctx context.Context) error {
|
||||
return d.c.service.deleteDataset(ctx, d.DatasetID, d.ProjectID)
|
||||
}
|
||||
|
||||
// Metadata fetches the metadata for the dataset.
|
||||
func (d *Dataset) Metadata(ctx context.Context) (*DatasetMetadata, error) {
|
||||
return d.c.service.getDatasetMetadata(ctx, d.ProjectID, d.DatasetID)
|
||||
}
|
||||
|
||||
// Table creates a handle to a BigQuery table in the dataset.
|
||||
// To determine if a table exists, call Table.Metadata.
|
||||
// If the table does not already exist, use Table.Create to create it.
|
||||
func (d *Dataset) Table(tableID string) *Table {
|
||||
return &Table{ProjectID: d.ProjectID, DatasetID: d.DatasetID, TableID: tableID, c: d.c}
|
||||
}
|
||||
|
||||
// Tables returns an iterator over the tables in the Dataset.
|
||||
func (d *Dataset) Tables(ctx context.Context) *TableIterator {
|
||||
it := &TableIterator{
|
||||
ctx: ctx,
|
||||
dataset: d,
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
|
||||
it.fetch,
|
||||
func() int { return len(it.tables) },
|
||||
func() interface{} { b := it.tables; it.tables = nil; return b })
|
||||
return it
|
||||
}
|
||||
|
||||
// A TableIterator is an iterator over Tables.
|
||||
type TableIterator struct {
|
||||
ctx context.Context
|
||||
dataset *Dataset
|
||||
tables []*Table
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
}
|
||||
|
||||
// Next returns the next result. Its second return value is Done if there are
|
||||
// no more results. Once Next returns Done, all subsequent calls will return
|
||||
// Done.
|
||||
func (it *TableIterator) Next() (*Table, error) {
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
t := it.tables[0]
|
||||
it.tables = it.tables[1:]
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
||||
func (it *TableIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
|
||||
|
||||
func (it *TableIterator) fetch(pageSize int, pageToken string) (string, error) {
|
||||
tables, tok, err := it.dataset.c.service.listTables(it.ctx, it.dataset.ProjectID, it.dataset.DatasetID, pageSize, pageToken)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, t := range tables {
|
||||
t.c = it.dataset.c
|
||||
it.tables = append(it.tables, t)
|
||||
}
|
||||
return tok, nil
|
||||
}
|
||||
|
||||
// Datasets returns an iterator over the datasets in the Client's project.
|
||||
func (c *Client) Datasets(ctx context.Context) *DatasetIterator {
|
||||
return c.DatasetsInProject(ctx, c.projectID)
|
||||
}
|
||||
|
||||
// DatasetsInProject returns an iterator over the datasets in the provided project.
|
||||
func (c *Client) DatasetsInProject(ctx context.Context, projectID string) *DatasetIterator {
|
||||
it := &DatasetIterator{
|
||||
ctx: ctx,
|
||||
c: c,
|
||||
projectID: projectID,
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
|
||||
it.fetch,
|
||||
func() int { return len(it.items) },
|
||||
func() interface{} { b := it.items; it.items = nil; return b })
|
||||
return it
|
||||
}
|
||||
|
||||
// DatasetIterator iterates over the datasets in a project.
|
||||
type DatasetIterator struct {
|
||||
// ListHidden causes hidden datasets to be listed when set to true.
|
||||
ListHidden bool
|
||||
|
||||
// Filter restricts the datasets returned by label. The filter syntax is described in
|
||||
// https://cloud.google.com/bigquery/docs/labeling-datasets#filtering_datasets_using_labels
|
||||
Filter string
|
||||
|
||||
ctx context.Context
|
||||
projectID string
|
||||
c *Client
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
items []*Dataset
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
||||
func (it *DatasetIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
|
||||
|
||||
func (it *DatasetIterator) Next() (*Dataset, error) {
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
item := it.items[0]
|
||||
it.items = it.items[1:]
|
||||
return item, nil
|
||||
}
|
||||
|
||||
func (it *DatasetIterator) fetch(pageSize int, pageToken string) (string, error) {
|
||||
datasets, nextPageToken, err := it.c.service.listDatasets(it.ctx, it.projectID,
|
||||
pageSize, pageToken, it.ListHidden, it.Filter)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, d := range datasets {
|
||||
d.c = it.c
|
||||
it.items = append(it.items, d)
|
||||
}
|
||||
return nextPageToken, nil
|
||||
}
|
156
vendor/cloud.google.com/go/bigquery/dataset_test.go
generated
vendored
Normal file
156
vendor/cloud.google.com/go/bigquery/dataset_test.go
generated
vendored
Normal file
@@ -0,0 +1,156 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
itest "google.golang.org/api/iterator/testing"
|
||||
)
|
||||
|
||||
// readServiceStub services read requests by returning data from an in-memory list of values.
|
||||
type listTablesServiceStub struct {
|
||||
expectedProject, expectedDataset string
|
||||
tables []*Table
|
||||
service
|
||||
}
|
||||
|
||||
func (s *listTablesServiceStub) listTables(ctx context.Context, projectID, datasetID string, pageSize int, pageToken string) ([]*Table, string, error) {
|
||||
if projectID != s.expectedProject {
|
||||
return nil, "", errors.New("wrong project id")
|
||||
}
|
||||
if datasetID != s.expectedDataset {
|
||||
return nil, "", errors.New("wrong dataset id")
|
||||
}
|
||||
const maxPageSize = 2
|
||||
if pageSize <= 0 || pageSize > maxPageSize {
|
||||
pageSize = maxPageSize
|
||||
}
|
||||
start := 0
|
||||
if pageToken != "" {
|
||||
var err error
|
||||
start, err = strconv.Atoi(pageToken)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
}
|
||||
end := start + pageSize
|
||||
if end > len(s.tables) {
|
||||
end = len(s.tables)
|
||||
}
|
||||
nextPageToken := ""
|
||||
if end < len(s.tables) {
|
||||
nextPageToken = strconv.Itoa(end)
|
||||
}
|
||||
return s.tables[start:end], nextPageToken, nil
|
||||
}
|
||||
|
||||
func TestTables(t *testing.T) {
|
||||
t1 := &Table{ProjectID: "p1", DatasetID: "d1", TableID: "t1"}
|
||||
t2 := &Table{ProjectID: "p1", DatasetID: "d1", TableID: "t2"}
|
||||
t3 := &Table{ProjectID: "p1", DatasetID: "d1", TableID: "t3"}
|
||||
allTables := []*Table{t1, t2, t3}
|
||||
c := &Client{
|
||||
service: &listTablesServiceStub{
|
||||
expectedProject: "x",
|
||||
expectedDataset: "y",
|
||||
tables: allTables,
|
||||
},
|
||||
projectID: "x",
|
||||
}
|
||||
msg, ok := itest.TestIterator(allTables,
|
||||
func() interface{} { return c.Dataset("y").Tables(context.Background()) },
|
||||
func(it interface{}) (interface{}, error) { return it.(*TableIterator).Next() })
|
||||
if !ok {
|
||||
t.Error(msg)
|
||||
}
|
||||
}
|
||||
|
||||
type listDatasetsFake struct {
|
||||
service
|
||||
|
||||
projectID string
|
||||
datasets []*Dataset
|
||||
hidden map[*Dataset]bool
|
||||
}
|
||||
|
||||
func (df *listDatasetsFake) listDatasets(_ context.Context, projectID string, pageSize int, pageToken string, listHidden bool, filter string) ([]*Dataset, string, error) {
|
||||
const maxPageSize = 2
|
||||
if pageSize <= 0 || pageSize > maxPageSize {
|
||||
pageSize = maxPageSize
|
||||
}
|
||||
if filter != "" {
|
||||
return nil, "", errors.New("filter not supported")
|
||||
}
|
||||
if projectID != df.projectID {
|
||||
return nil, "", errors.New("bad project ID")
|
||||
}
|
||||
start := 0
|
||||
if pageToken != "" {
|
||||
var err error
|
||||
start, err = strconv.Atoi(pageToken)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
}
|
||||
var (
|
||||
i int
|
||||
result []*Dataset
|
||||
nextPageToken string
|
||||
)
|
||||
for i = start; len(result) < pageSize && i < len(df.datasets); i++ {
|
||||
if df.hidden[df.datasets[i]] && !listHidden {
|
||||
continue
|
||||
}
|
||||
result = append(result, df.datasets[i])
|
||||
}
|
||||
if i < len(df.datasets) {
|
||||
nextPageToken = strconv.Itoa(i)
|
||||
}
|
||||
return result, nextPageToken, nil
|
||||
}
|
||||
|
||||
func TestDatasets(t *testing.T) {
|
||||
service := &listDatasetsFake{projectID: "p"}
|
||||
client := &Client{service: service}
|
||||
datasets := []*Dataset{
|
||||
{"p", "a", client},
|
||||
{"p", "b", client},
|
||||
{"p", "hidden", client},
|
||||
{"p", "c", client},
|
||||
}
|
||||
service.datasets = datasets
|
||||
service.hidden = map[*Dataset]bool{datasets[2]: true}
|
||||
c := &Client{
|
||||
projectID: "p",
|
||||
service: service,
|
||||
}
|
||||
msg, ok := itest.TestIterator(datasets,
|
||||
func() interface{} { it := c.Datasets(context.Background()); it.ListHidden = true; return it },
|
||||
func(it interface{}) (interface{}, error) { return it.(*DatasetIterator).Next() })
|
||||
if !ok {
|
||||
t.Fatalf("ListHidden=true: %s", msg)
|
||||
}
|
||||
|
||||
msg, ok = itest.TestIterator([]*Dataset{datasets[0], datasets[1], datasets[3]},
|
||||
func() interface{} { it := c.Datasets(context.Background()); it.ListHidden = false; return it },
|
||||
func(it interface{}) (interface{}, error) { return it.(*DatasetIterator).Next() })
|
||||
if !ok {
|
||||
t.Fatalf("ListHidden=false: %s", msg)
|
||||
}
|
||||
}
|
295
vendor/cloud.google.com/go/bigquery/doc.go
generated
vendored
Normal file
295
vendor/cloud.google.com/go/bigquery/doc.go
generated
vendored
Normal file
@@ -0,0 +1,295 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/*
|
||||
Package bigquery provides a client for the BigQuery service.
|
||||
|
||||
Note: This package is in beta. Some backwards-incompatible changes may occur.
|
||||
|
||||
The following assumes a basic familiarity with BigQuery concepts.
|
||||
See https://cloud.google.com/bigquery/docs.
|
||||
|
||||
|
||||
Creating a Client
|
||||
|
||||
To start working with this package, create a client:
|
||||
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, projectID)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
Querying
|
||||
|
||||
To query existing tables, create a Query and call its Read method:
|
||||
|
||||
q := client.Query(`
|
||||
SELECT year, SUM(number) as num
|
||||
FROM [bigquery-public-data:usa_names.usa_1910_2013]
|
||||
WHERE name = "William"
|
||||
GROUP BY year
|
||||
ORDER BY year
|
||||
`)
|
||||
it, err := q.Read(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
Then iterate through the resulting rows. You can store a row using
|
||||
anything that implements the ValueLoader interface, or with a slice or map of bigquery.Value.
|
||||
A slice is simplest:
|
||||
|
||||
for {
|
||||
var values []bigquery.Value
|
||||
err := it.Next(&values)
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(values)
|
||||
}
|
||||
|
||||
You can also use a struct whose exported fields match the query:
|
||||
|
||||
type Count struct {
|
||||
Year int
|
||||
Num int
|
||||
}
|
||||
for {
|
||||
var c Count
|
||||
err := it.Next(&c)
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(c)
|
||||
}
|
||||
|
||||
You can also start the query running and get the results later.
|
||||
Create the query as above, but call Run instead of Read. This returns a Job,
|
||||
which represents an asychronous operation.
|
||||
|
||||
job, err := q.Run(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
Get the job's ID, a printable string. You can save this string to retrieve
|
||||
the results at a later time, even in another process.
|
||||
|
||||
jobID := job.ID()
|
||||
fmt.Printf("The job ID is %s\n", jobID)
|
||||
|
||||
To retrieve the job's results from the ID, first look up the Job:
|
||||
|
||||
job, err = client.JobFromID(ctx, jobID)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
Use the Job.Read method to obtain an iterator, and loop over the rows.
|
||||
Query.Read is just a convenience method that combines Query.Run and Job.Read.
|
||||
|
||||
it, err = job.Read(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// Proceed with iteration as above.
|
||||
|
||||
Datasets and Tables
|
||||
|
||||
You can refer to datasets in the client's project with the Dataset method, and
|
||||
in other projects with the DatasetInProject method:
|
||||
|
||||
myDataset := client.Dataset("my_dataset")
|
||||
yourDataset := client.DatasetInProject("your-project-id", "your_dataset")
|
||||
|
||||
These methods create references to datasets, not the datasets themselves. You can have
|
||||
a dataset reference even if the dataset doesn't exist yet. Use Dataset.Create to
|
||||
create a dataset from a reference:
|
||||
|
||||
if err := myDataset.Create(ctx); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
You can refer to tables with Dataset.Table. Like bigquery.Dataset, bigquery.Table is a reference
|
||||
to an object in BigQuery that may or may not exist.
|
||||
|
||||
table := myDataset.Table("my_table")
|
||||
|
||||
You can create, delete and update the metadata of tables with methods on Table.
|
||||
Table.Create supports a few options. For instance, you could create a temporary table with:
|
||||
|
||||
err = myDataset.Table("temp").Create(ctx, bigquery.TableExpiration(time.Now().Add(1*time.Hour)))
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
We'll see how to create a table with a schema in the next section.
|
||||
|
||||
Schemas
|
||||
|
||||
There are two ways to construct schemas with this package.
|
||||
You can build a schema by hand, like so:
|
||||
|
||||
schema1 := bigquery.Schema{
|
||||
&bigquery.FieldSchema{Name: "Name", Required: true, Type: bigquery.StringFieldType},
|
||||
&bigquery.FieldSchema{Name: "Grades", Repeated: true, Type: bigquery.IntegerFieldType},
|
||||
}
|
||||
|
||||
Or you can infer the schema from a struct:
|
||||
|
||||
type student struct {
|
||||
Name string
|
||||
Grades []int
|
||||
}
|
||||
schema2, err := bigquery.InferSchema(student{})
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// schema1 and schema2 are identical.
|
||||
|
||||
Struct inference supports tags like those of the encoding/json package,
|
||||
so you can change names or ignore fields:
|
||||
|
||||
type student2 struct {
|
||||
Name string `bigquery:"full_name"`
|
||||
Grades []int
|
||||
Secret string `bigquery:"-"`
|
||||
}
|
||||
schema3, err := bigquery.InferSchema(student2{})
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// schema3 has fields "full_name" and "Grade".
|
||||
|
||||
Having constructed a schema, you can pass it to Table.Create as an option:
|
||||
|
||||
if err := table.Create(ctx, schema1); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
Copying
|
||||
|
||||
You can copy one or more tables to another table. Begin by constructing a Copier
|
||||
describing the copy. Then set any desired copy options, and finally call Run to get a Job:
|
||||
|
||||
copier := myDataset.Table("dest").CopierFrom(myDataset.Table("src"))
|
||||
copier.WriteDisposition = bigquery.WriteTruncate
|
||||
job, err = copier.Run(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
You can chain the call to Run if you don't want to set options:
|
||||
|
||||
job, err = myDataset.Table("dest").CopierFrom(myDataset.Table("src")).Run(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
You can wait for your job to complete:
|
||||
|
||||
status, err := job.Wait(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
Job.Wait polls with exponential backoff. You can also poll yourself, if you
|
||||
wish:
|
||||
|
||||
for {
|
||||
status, err := job.Status(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
if status.Done() {
|
||||
if status.Err() != nil {
|
||||
log.Fatalf("Job failed with error %v", status.Err())
|
||||
}
|
||||
break
|
||||
}
|
||||
time.Sleep(pollInterval)
|
||||
}
|
||||
|
||||
Loading and Uploading
|
||||
|
||||
There are two ways to populate a table with this package: load the data from a Google Cloud Storage
|
||||
object, or upload rows directly from your program.
|
||||
|
||||
For loading, first create a GCSReference, configuring it if desired. Then make a Loader, optionally configure
|
||||
it as well, and call its Run method.
|
||||
|
||||
gcsRef := bigquery.NewGCSReference("gs://my-bucket/my-object")
|
||||
gcsRef.AllowJaggedRows = true
|
||||
loader := myDataset.Table("dest").LoaderFrom(gcsRef)
|
||||
loader.CreateDisposition = bigquery.CreateNever
|
||||
job, err = loader.Run(ctx)
|
||||
// Poll the job for completion if desired, as above.
|
||||
|
||||
To upload, first define a type that implements the ValueSaver interface, which has a single method named Save.
|
||||
Then create an Uploader, and call its Put method with a slice of values.
|
||||
|
||||
u := table.Uploader()
|
||||
// Item implements the ValueSaver interface.
|
||||
items := []*Item{
|
||||
{Name: "n1", Size: 32.6, Count: 7},
|
||||
{Name: "n2", Size: 4, Count: 2},
|
||||
{Name: "n3", Size: 101.5, Count: 1},
|
||||
}
|
||||
if err := u.Put(ctx, items); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
You can also upload a struct that doesn't implement ValueSaver. Use the StructSaver type
|
||||
to specify the schema and insert ID by hand, or just supply the struct or struct pointer
|
||||
directly and the schema will be inferred:
|
||||
|
||||
type Item2 struct {
|
||||
Name string
|
||||
Size float64
|
||||
Count int
|
||||
}
|
||||
// Item implements the ValueSaver interface.
|
||||
items2 := []*Item2{
|
||||
{Name: "n1", Size: 32.6, Count: 7},
|
||||
{Name: "n2", Size: 4, Count: 2},
|
||||
{Name: "n3", Size: 101.5, Count: 1},
|
||||
}
|
||||
if err := u.Put(ctx, items2); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
Extracting
|
||||
|
||||
If you've been following so far, extracting data from a BigQuery table
|
||||
into a Google Cloud Storage object will feel familiar. First create an
|
||||
Extractor, then optionally configure it, and lastly call its Run method.
|
||||
|
||||
extractor := table.ExtractorTo(gcsRef)
|
||||
extractor.DisableHeader = true
|
||||
job, err = extractor.Run(ctx)
|
||||
// Poll the job for completion if desired, as above.
|
||||
|
||||
Authentication
|
||||
|
||||
See examples of authorization and authentication at
|
||||
https://godoc.org/cloud.google.com/go#pkg-examples.
|
||||
*/
|
||||
package bigquery // import "cloud.google.com/go/bigquery"
|
82
vendor/cloud.google.com/go/bigquery/error.go
generated
vendored
Normal file
82
vendor/cloud.google.com/go/bigquery/error.go
generated
vendored
Normal file
@@ -0,0 +1,82 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
// An Error contains detailed information about a failed bigquery operation.
|
||||
type Error struct {
|
||||
// Mirrors bq.ErrorProto, but drops DebugInfo
|
||||
Location, Message, Reason string
|
||||
}
|
||||
|
||||
func (e Error) Error() string {
|
||||
return fmt.Sprintf("{Location: %q; Message: %q; Reason: %q}", e.Location, e.Message, e.Reason)
|
||||
}
|
||||
|
||||
func errorFromErrorProto(ep *bq.ErrorProto) *Error {
|
||||
if ep == nil {
|
||||
return nil
|
||||
}
|
||||
return &Error{
|
||||
Location: ep.Location,
|
||||
Message: ep.Message,
|
||||
Reason: ep.Reason,
|
||||
}
|
||||
}
|
||||
|
||||
// A MultiError contains multiple related errors.
|
||||
type MultiError []error
|
||||
|
||||
func (m MultiError) Error() string {
|
||||
switch len(m) {
|
||||
case 0:
|
||||
return "(0 errors)"
|
||||
case 1:
|
||||
return m[0].Error()
|
||||
case 2:
|
||||
return m[0].Error() + " (and 1 other error)"
|
||||
}
|
||||
return fmt.Sprintf("%s (and %d other errors)", m[0].Error(), len(m)-1)
|
||||
}
|
||||
|
||||
// RowInsertionError contains all errors that occurred when attempting to insert a row.
|
||||
type RowInsertionError struct {
|
||||
InsertID string // The InsertID associated with the affected row.
|
||||
RowIndex int // The 0-based index of the affected row in the batch of rows being inserted.
|
||||
Errors MultiError
|
||||
}
|
||||
|
||||
func (e *RowInsertionError) Error() string {
|
||||
errFmt := "insertion of row [insertID: %q; insertIndex: %v] failed with error: %s"
|
||||
return fmt.Sprintf(errFmt, e.InsertID, e.RowIndex, e.Errors.Error())
|
||||
}
|
||||
|
||||
// PutMultiError contains an error for each row which was not successfully inserted
|
||||
// into a BigQuery table.
|
||||
type PutMultiError []RowInsertionError
|
||||
|
||||
func (pme PutMultiError) Error() string {
|
||||
plural := "s"
|
||||
if len(pme) == 1 {
|
||||
plural = ""
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%v row insertion%s failed", len(pme), plural)
|
||||
}
|
110
vendor/cloud.google.com/go/bigquery/error_test.go
generated
vendored
Normal file
110
vendor/cloud.google.com/go/bigquery/error_test.go
generated
vendored
Normal file
@@ -0,0 +1,110 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
func rowInsertionError(msg string) RowInsertionError {
|
||||
return RowInsertionError{Errors: []error{errors.New(msg)}}
|
||||
}
|
||||
|
||||
func TestPutMultiErrorString(t *testing.T) {
|
||||
testCases := []struct {
|
||||
errs PutMultiError
|
||||
want string
|
||||
}{
|
||||
{
|
||||
errs: PutMultiError{},
|
||||
want: "0 row insertions failed",
|
||||
},
|
||||
{
|
||||
errs: PutMultiError{rowInsertionError("a")},
|
||||
want: "1 row insertion failed",
|
||||
},
|
||||
{
|
||||
errs: PutMultiError{rowInsertionError("a"), rowInsertionError("b")},
|
||||
want: "2 row insertions failed",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
if tc.errs.Error() != tc.want {
|
||||
t.Errorf("PutMultiError string: got:\n%v\nwant:\n%v", tc.errs.Error(), tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultiErrorString(t *testing.T) {
|
||||
testCases := []struct {
|
||||
errs MultiError
|
||||
want string
|
||||
}{
|
||||
{
|
||||
errs: MultiError{},
|
||||
want: "(0 errors)",
|
||||
},
|
||||
{
|
||||
errs: MultiError{errors.New("a")},
|
||||
want: "a",
|
||||
},
|
||||
{
|
||||
errs: MultiError{errors.New("a"), errors.New("b")},
|
||||
want: "a (and 1 other error)",
|
||||
},
|
||||
{
|
||||
errs: MultiError{errors.New("a"), errors.New("b"), errors.New("c")},
|
||||
want: "a (and 2 other errors)",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
if tc.errs.Error() != tc.want {
|
||||
t.Errorf("PutMultiError string: got:\n%v\nwant:\n%v", tc.errs.Error(), tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestErrorFromErrorProto(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in *bq.ErrorProto
|
||||
want *Error
|
||||
}{
|
||||
{nil, nil},
|
||||
{
|
||||
in: &bq.ErrorProto{Location: "L", Message: "M", Reason: "R"},
|
||||
want: &Error{Location: "L", Message: "M", Reason: "R"},
|
||||
},
|
||||
} {
|
||||
if got := errorFromErrorProto(test.in); !testutil.Equal(got, test.want) {
|
||||
t.Errorf("%v: got %v, want %v", test.in, got, test.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestErrorString(t *testing.T) {
|
||||
e := &Error{Location: "<L>", Message: "<M>", Reason: "<R>"}
|
||||
got := e.Error()
|
||||
if !strings.Contains(got, "<L>") || !strings.Contains(got, "<M>") || !strings.Contains(got, "<R>") {
|
||||
t.Errorf(`got %q, expected to see "<L>", "<M>" and "<R>"`, got)
|
||||
}
|
||||
}
|
652
vendor/cloud.google.com/go/bigquery/examples_test.go
generated
vendored
Normal file
652
vendor/cloud.google.com/go/bigquery/examples_test.go
generated
vendored
Normal file
@@ -0,0 +1,652 @@
|
||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"cloud.google.com/go/bigquery"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
)
|
||||
|
||||
func ExampleNewClient() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
_ = client // TODO: Use client.
|
||||
}
|
||||
|
||||
func ExampleClient_Dataset() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
ds := client.Dataset("my_dataset")
|
||||
fmt.Println(ds)
|
||||
}
|
||||
|
||||
func ExampleClient_DatasetInProject() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
ds := client.DatasetInProject("their-project-id", "their-dataset")
|
||||
fmt.Println(ds)
|
||||
}
|
||||
|
||||
func ExampleClient_Datasets() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
it := client.Datasets(ctx)
|
||||
_ = it // TODO: iterate using Next or iterator.Pager.
|
||||
}
|
||||
|
||||
func ExampleClient_DatasetsInProject() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
it := client.DatasetsInProject(ctx, "their-project-id")
|
||||
_ = it // TODO: iterate using Next or iterator.Pager.
|
||||
}
|
||||
|
||||
func getJobID() string { return "" }
|
||||
|
||||
func ExampleClient_JobFromID() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
jobID := getJobID() // Get a job ID using Job.ID, the console or elsewhere.
|
||||
job, err := client.JobFromID(ctx, jobID)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(job)
|
||||
}
|
||||
|
||||
func ExampleNewGCSReference() {
|
||||
gcsRef := bigquery.NewGCSReference("gs://my-bucket/my-object")
|
||||
fmt.Println(gcsRef)
|
||||
}
|
||||
|
||||
func ExampleClient_Query() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
q := client.Query("select name, num from t1")
|
||||
q.DefaultProjectID = "project-id"
|
||||
// TODO: set other options on the Query.
|
||||
// TODO: Call Query.Run or Query.Read.
|
||||
}
|
||||
|
||||
func ExampleClient_Query_parameters() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
q := client.Query("select num from t1 where name = @user")
|
||||
q.Parameters = []bigquery.QueryParameter{
|
||||
{Name: "user", Value: "Elizabeth"},
|
||||
}
|
||||
// TODO: set other options on the Query.
|
||||
// TODO: Call Query.Run or Query.Read.
|
||||
}
|
||||
|
||||
func ExampleQuery_Read() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
q := client.Query("select name, num from t1")
|
||||
it, err := q.Read(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
_ = it // TODO: iterate using Next or iterator.Pager.
|
||||
}
|
||||
|
||||
func ExampleRowIterator_Next() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
q := client.Query("select name, num from t1")
|
||||
it, err := q.Read(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
for {
|
||||
var row []bigquery.Value
|
||||
err := it.Next(&row)
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(row)
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleRowIterator_Next_struct() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
type score struct {
|
||||
Name string
|
||||
Num int
|
||||
}
|
||||
|
||||
q := client.Query("select name, num from t1")
|
||||
it, err := q.Read(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
for {
|
||||
var s score
|
||||
err := it.Next(&s)
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(s)
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleJob_Read() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
q := client.Query("select name, num from t1")
|
||||
// Call Query.Run to get a Job, then call Read on the job.
|
||||
// Note: Query.Read is a shorthand for this.
|
||||
job, err := q.Run(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
it, err := job.Read(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
_ = it // TODO: iterate using Next or iterator.Pager.
|
||||
}
|
||||
|
||||
func ExampleJob_Wait() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
ds := client.Dataset("my_dataset")
|
||||
job, err := ds.Table("t1").CopierFrom(ds.Table("t2")).Run(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
status, err := job.Wait(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
if status.Err() != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleDataset_Create() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
if err := client.Dataset("my_dataset").Create(ctx); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleDataset_Delete() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
if err := client.Dataset("my_dataset").Delete(ctx); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleDataset_Metadata() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
md, err := client.Dataset("my_dataset").Metadata(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(md)
|
||||
}
|
||||
|
||||
func ExampleDataset_Table() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// Table creates a reference to the table. It does not create the actual
|
||||
// table in BigQuery; to do so, use Table.Create.
|
||||
t := client.Dataset("my_dataset").Table("my_table")
|
||||
fmt.Println(t)
|
||||
}
|
||||
|
||||
func ExampleDataset_Tables() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
it := client.Dataset("my_dataset").Tables(ctx)
|
||||
_ = it // TODO: iterate using Next or iterator.Pager.
|
||||
}
|
||||
|
||||
func ExampleDatasetIterator_Next() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
it := client.Datasets(ctx)
|
||||
for {
|
||||
ds, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(ds)
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleInferSchema() {
|
||||
type Item struct {
|
||||
Name string
|
||||
Size float64
|
||||
Count int
|
||||
}
|
||||
schema, err := bigquery.InferSchema(Item{})
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
// TODO: Handle error.
|
||||
}
|
||||
for _, fs := range schema {
|
||||
fmt.Println(fs.Name, fs.Type)
|
||||
}
|
||||
// Output:
|
||||
// Name STRING
|
||||
// Size FLOAT
|
||||
// Count INTEGER
|
||||
}
|
||||
|
||||
func ExampleInferSchema_tags() {
|
||||
type Item struct {
|
||||
Name string
|
||||
Size float64
|
||||
Count int `bigquery:"number"`
|
||||
Secret []byte `bigquery:"-"`
|
||||
}
|
||||
schema, err := bigquery.InferSchema(Item{})
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
// TODO: Handle error.
|
||||
}
|
||||
for _, fs := range schema {
|
||||
fmt.Println(fs.Name, fs.Type)
|
||||
}
|
||||
// Output:
|
||||
// Name STRING
|
||||
// Size FLOAT
|
||||
// number INTEGER
|
||||
}
|
||||
|
||||
func ExampleTable_Create() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
t := client.Dataset("my_dataset").Table("new-table")
|
||||
if err := t.Create(ctx); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleTable_Create_schema() {
|
||||
ctx := context.Background()
|
||||
// Infer table schema from a Go type.
|
||||
schema, err := bigquery.InferSchema(Item{})
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
t := client.Dataset("my_dataset").Table("new-table")
|
||||
if err := t.Create(ctx, schema); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleTable_Delete() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
if err := client.Dataset("my_dataset").Table("my_table").Delete(ctx); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleTable_Metadata() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
md, err := client.Dataset("my_dataset").Table("my_table").Metadata(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(md)
|
||||
}
|
||||
|
||||
func ExampleTable_Uploader() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
u := client.Dataset("my_dataset").Table("my_table").Uploader()
|
||||
_ = u // TODO: Use u.
|
||||
}
|
||||
|
||||
func ExampleTable_Uploader_options() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
u := client.Dataset("my_dataset").Table("my_table").Uploader()
|
||||
u.SkipInvalidRows = true
|
||||
u.IgnoreUnknownValues = true
|
||||
_ = u // TODO: Use u.
|
||||
}
|
||||
|
||||
func ExampleTable_CopierFrom() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
ds := client.Dataset("my_dataset")
|
||||
c := ds.Table("combined").CopierFrom(ds.Table("t1"), ds.Table("t2"))
|
||||
c.WriteDisposition = bigquery.WriteTruncate
|
||||
// TODO: set other options on the Copier.
|
||||
job, err := c.Run(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
status, err := job.Wait(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
if status.Err() != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleTable_ExtractorTo() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
gcsRef := bigquery.NewGCSReference("gs://my-bucket/my-object")
|
||||
gcsRef.FieldDelimiter = ":"
|
||||
// TODO: set other options on the GCSReference.
|
||||
ds := client.Dataset("my_dataset")
|
||||
extractor := ds.Table("my_table").ExtractorTo(gcsRef)
|
||||
extractor.DisableHeader = true
|
||||
// TODO: set other options on the Extractor.
|
||||
job, err := extractor.Run(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
status, err := job.Wait(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
if status.Err() != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleTable_LoaderFrom() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
gcsRef := bigquery.NewGCSReference("gs://my-bucket/my-object")
|
||||
gcsRef.AllowJaggedRows = true
|
||||
// TODO: set other options on the GCSReference.
|
||||
ds := client.Dataset("my_dataset")
|
||||
loader := ds.Table("my_table").LoaderFrom(gcsRef)
|
||||
loader.CreateDisposition = bigquery.CreateNever
|
||||
// TODO: set other options on the Loader.
|
||||
job, err := loader.Run(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
status, err := job.Wait(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
if status.Err() != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleTable_LoaderFrom_reader() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
f, err := os.Open("data.csv")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
rs := bigquery.NewReaderSource(f)
|
||||
rs.AllowJaggedRows = true
|
||||
// TODO: set other options on the GCSReference.
|
||||
ds := client.Dataset("my_dataset")
|
||||
loader := ds.Table("my_table").LoaderFrom(rs)
|
||||
loader.CreateDisposition = bigquery.CreateNever
|
||||
// TODO: set other options on the Loader.
|
||||
job, err := loader.Run(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
status, err := job.Wait(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
if status.Err() != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleTable_Read() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
it := client.Dataset("my_dataset").Table("my_table").Read(ctx)
|
||||
_ = it // TODO: iterate using Next or iterator.Pager.
|
||||
}
|
||||
|
||||
func ExampleTable_Update() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
t := client.Dataset("my_dataset").Table("my_table")
|
||||
tm, err := t.Update(ctx, bigquery.TableMetadataToUpdate{
|
||||
Description: "my favorite table",
|
||||
})
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(tm)
|
||||
}
|
||||
|
||||
func ExampleTableIterator_Next() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
it := client.Dataset("my_dataset").Tables(ctx)
|
||||
for {
|
||||
t, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(t)
|
||||
}
|
||||
}
|
||||
|
||||
type Item struct {
|
||||
Name string
|
||||
Size float64
|
||||
Count int
|
||||
}
|
||||
|
||||
// Save implements the ValueSaver interface.
|
||||
func (i *Item) Save() (map[string]bigquery.Value, string, error) {
|
||||
return map[string]bigquery.Value{
|
||||
"Name": i.Name,
|
||||
"Size": i.Size,
|
||||
"Count": i.Count,
|
||||
}, "", nil
|
||||
}
|
||||
|
||||
func ExampleUploader_Put() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
u := client.Dataset("my_dataset").Table("my_table").Uploader()
|
||||
// Item implements the ValueSaver interface.
|
||||
items := []*Item{
|
||||
{Name: "n1", Size: 32.6, Count: 7},
|
||||
{Name: "n2", Size: 4, Count: 2},
|
||||
{Name: "n3", Size: 101.5, Count: 1},
|
||||
}
|
||||
if err := u.Put(ctx, items); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
var schema bigquery.Schema
|
||||
|
||||
func ExampleUploader_Put_structSaver() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
u := client.Dataset("my_dataset").Table("my_table").Uploader()
|
||||
|
||||
type score struct {
|
||||
Name string
|
||||
Num int
|
||||
}
|
||||
|
||||
// Assume schema holds the table's schema.
|
||||
savers := []*bigquery.StructSaver{
|
||||
{Struct: score{Name: "n1", Num: 12}, Schema: schema, InsertID: "id1"},
|
||||
{Struct: score{Name: "n2", Num: 31}, Schema: schema, InsertID: "id2"},
|
||||
{Struct: score{Name: "n3", Num: 7}, Schema: schema, InsertID: "id3"},
|
||||
}
|
||||
if err := u.Put(ctx, savers); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleUploader_Put_struct() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
u := client.Dataset("my_dataset").Table("my_table").Uploader()
|
||||
|
||||
type score struct {
|
||||
Name string
|
||||
Num int
|
||||
}
|
||||
scores := []score{
|
||||
{Name: "n1", Num: 12},
|
||||
{Name: "n2", Num: 31},
|
||||
{Name: "n3", Num: 7},
|
||||
}
|
||||
// Schema is inferred from the score type.
|
||||
if err := u.Put(ctx, scores); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
76
vendor/cloud.google.com/go/bigquery/extract.go
generated
vendored
Normal file
76
vendor/cloud.google.com/go/bigquery/extract.go
generated
vendored
Normal file
@@ -0,0 +1,76 @@
|
||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
// ExtractConfig holds the configuration for an extract job.
|
||||
type ExtractConfig struct {
|
||||
// JobID is the ID to use for the extract job. If empty, a job ID will be automatically created.
|
||||
JobID string
|
||||
|
||||
// Src is the table from which data will be extracted.
|
||||
Src *Table
|
||||
|
||||
// Dst is the destination into which the data will be extracted.
|
||||
Dst *GCSReference
|
||||
|
||||
// DisableHeader disables the printing of a header row in exported data.
|
||||
DisableHeader bool
|
||||
}
|
||||
|
||||
// An Extractor extracts data from a BigQuery table into Google Cloud Storage.
|
||||
type Extractor struct {
|
||||
ExtractConfig
|
||||
c *Client
|
||||
}
|
||||
|
||||
// ExtractorTo returns an Extractor which can be used to extract data from a
|
||||
// BigQuery table into Google Cloud Storage.
|
||||
// The returned Extractor may optionally be further configured before its Run method is called.
|
||||
func (t *Table) ExtractorTo(dst *GCSReference) *Extractor {
|
||||
return &Extractor{
|
||||
c: t.c,
|
||||
ExtractConfig: ExtractConfig{
|
||||
Src: t,
|
||||
Dst: dst,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Run initiates an extract job.
|
||||
func (e *Extractor) Run(ctx context.Context) (*Job, error) {
|
||||
conf := &bq.JobConfigurationExtract{}
|
||||
job := &bq.Job{Configuration: &bq.JobConfiguration{Extract: conf}}
|
||||
|
||||
setJobRef(job, e.JobID, e.c.projectID)
|
||||
|
||||
conf.DestinationUris = append([]string{}, e.Dst.uris...)
|
||||
conf.Compression = string(e.Dst.Compression)
|
||||
conf.DestinationFormat = string(e.Dst.DestinationFormat)
|
||||
conf.FieldDelimiter = e.Dst.FieldDelimiter
|
||||
|
||||
conf.SourceTable = e.Src.tableRefProto()
|
||||
|
||||
if e.DisableHeader {
|
||||
f := false
|
||||
conf.PrintHeader = &f
|
||||
}
|
||||
|
||||
return e.c.insertJob(ctx, &insertJobConf{job: job})
|
||||
}
|
103
vendor/cloud.google.com/go/bigquery/extract_test.go
generated
vendored
Normal file
103
vendor/cloud.google.com/go/bigquery/extract_test.go
generated
vendored
Normal file
@@ -0,0 +1,103 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
func defaultExtractJob() *bq.Job {
|
||||
return &bq.Job{
|
||||
Configuration: &bq.JobConfiguration{
|
||||
Extract: &bq.JobConfigurationExtract{
|
||||
SourceTable: &bq.TableReference{
|
||||
ProjectId: "project-id",
|
||||
DatasetId: "dataset-id",
|
||||
TableId: "table-id",
|
||||
},
|
||||
DestinationUris: []string{"uri"},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtract(t *testing.T) {
|
||||
s := &testService{}
|
||||
c := &Client{
|
||||
service: s,
|
||||
projectID: "project-id",
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
dst *GCSReference
|
||||
src *Table
|
||||
config ExtractConfig
|
||||
want *bq.Job
|
||||
}{
|
||||
{
|
||||
dst: defaultGCS(),
|
||||
src: c.Dataset("dataset-id").Table("table-id"),
|
||||
want: defaultExtractJob(),
|
||||
},
|
||||
{
|
||||
dst: defaultGCS(),
|
||||
src: c.Dataset("dataset-id").Table("table-id"),
|
||||
config: ExtractConfig{DisableHeader: true},
|
||||
want: func() *bq.Job {
|
||||
j := defaultExtractJob()
|
||||
f := false
|
||||
j.Configuration.Extract.PrintHeader = &f
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: func() *GCSReference {
|
||||
g := NewGCSReference("uri")
|
||||
g.Compression = Gzip
|
||||
g.DestinationFormat = JSON
|
||||
g.FieldDelimiter = "\t"
|
||||
return g
|
||||
}(),
|
||||
src: c.Dataset("dataset-id").Table("table-id"),
|
||||
want: func() *bq.Job {
|
||||
j := defaultExtractJob()
|
||||
j.Configuration.Extract.Compression = "GZIP"
|
||||
j.Configuration.Extract.DestinationFormat = "NEWLINE_DELIMITED_JSON"
|
||||
j.Configuration.Extract.FieldDelimiter = "\t"
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
ext := tc.src.ExtractorTo(tc.dst)
|
||||
tc.config.Src = ext.Src
|
||||
tc.config.Dst = ext.Dst
|
||||
ext.ExtractConfig = tc.config
|
||||
if _, err := ext.Run(context.Background()); err != nil {
|
||||
t.Errorf("err calling extract: %v", err)
|
||||
continue
|
||||
}
|
||||
if !testutil.Equal(s.Job, tc.want) {
|
||||
t.Errorf("extracting: got:\n%v\nwant:\n%v", s.Job, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
172
vendor/cloud.google.com/go/bigquery/file.go
generated
vendored
Normal file
172
vendor/cloud.google.com/go/bigquery/file.go
generated
vendored
Normal file
@@ -0,0 +1,172 @@
|
||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
// A ReaderSource is a source for a load operation that gets
|
||||
// data from an io.Reader.
|
||||
type ReaderSource struct {
|
||||
r io.Reader
|
||||
FileConfig
|
||||
}
|
||||
|
||||
// NewReaderSource creates a ReaderSource from an io.Reader. You may
|
||||
// optionally configure properties on the ReaderSource that describe the
|
||||
// data being read, before passing it to Table.LoaderFrom.
|
||||
func NewReaderSource(r io.Reader) *ReaderSource {
|
||||
return &ReaderSource{r: r}
|
||||
}
|
||||
|
||||
func (r *ReaderSource) populateInsertJobConfForLoad(conf *insertJobConf) {
|
||||
conf.media = r.r
|
||||
r.FileConfig.populateLoadConfig(conf.job.Configuration.Load)
|
||||
}
|
||||
|
||||
// FileConfig contains configuration options that pertain to files, typically
|
||||
// text files that require interpretation to be used as a BigQuery table. A
|
||||
// file may live in Google Cloud Storage (see GCSReference), or it may be
|
||||
// loaded into a table via the Table.LoaderFromReader.
|
||||
type FileConfig struct {
|
||||
// SourceFormat is the format of the GCS data to be read.
|
||||
// Allowed values are: CSV, Avro, JSON, DatastoreBackup. The default is CSV.
|
||||
SourceFormat DataFormat
|
||||
|
||||
// FieldDelimiter is the separator for fields in a CSV file, used when
|
||||
// reading or exporting data. The default is ",".
|
||||
FieldDelimiter string
|
||||
|
||||
// The number of rows at the top of a CSV file that BigQuery will skip when
|
||||
// reading data.
|
||||
SkipLeadingRows int64
|
||||
|
||||
// AllowJaggedRows causes missing trailing optional columns to be tolerated
|
||||
// when reading CSV data. Missing values are treated as nulls.
|
||||
AllowJaggedRows bool
|
||||
|
||||
// AllowQuotedNewlines sets whether quoted data sections containing
|
||||
// newlines are allowed when reading CSV data.
|
||||
AllowQuotedNewlines bool
|
||||
|
||||
// Indicates if we should automatically infer the options and
|
||||
// schema for CSV and JSON sources.
|
||||
AutoDetect bool
|
||||
|
||||
// Encoding is the character encoding of data to be read.
|
||||
Encoding Encoding
|
||||
|
||||
// MaxBadRecords is the maximum number of bad records that will be ignored
|
||||
// when reading data.
|
||||
MaxBadRecords int64
|
||||
|
||||
// IgnoreUnknownValues causes values not matching the schema to be
|
||||
// tolerated. Unknown values are ignored. For CSV this ignores extra values
|
||||
// at the end of a line. For JSON this ignores named values that do not
|
||||
// match any column name. If this field is not set, records containing
|
||||
// unknown values are treated as bad records. The MaxBadRecords field can
|
||||
// be used to customize how bad records are handled.
|
||||
IgnoreUnknownValues bool
|
||||
|
||||
// Schema describes the data. It is required when reading CSV or JSON data,
|
||||
// unless the data is being loaded into a table that already exists.
|
||||
Schema Schema
|
||||
|
||||
// Quote is the value used to quote data sections in a CSV file. The
|
||||
// default quotation character is the double quote ("), which is used if
|
||||
// both Quote and ForceZeroQuote are unset.
|
||||
// To specify that no character should be interpreted as a quotation
|
||||
// character, set ForceZeroQuote to true.
|
||||
// Only used when reading data.
|
||||
Quote string
|
||||
ForceZeroQuote bool
|
||||
}
|
||||
|
||||
// quote returns the CSV quote character, or nil if unset.
|
||||
func (fc *FileConfig) quote() *string {
|
||||
if fc.ForceZeroQuote {
|
||||
quote := ""
|
||||
return "e
|
||||
}
|
||||
if fc.Quote == "" {
|
||||
return nil
|
||||
}
|
||||
return &fc.Quote
|
||||
}
|
||||
|
||||
func (fc *FileConfig) populateLoadConfig(conf *bq.JobConfigurationLoad) {
|
||||
conf.SkipLeadingRows = fc.SkipLeadingRows
|
||||
conf.SourceFormat = string(fc.SourceFormat)
|
||||
conf.Autodetect = fc.AutoDetect
|
||||
conf.AllowJaggedRows = fc.AllowJaggedRows
|
||||
conf.AllowQuotedNewlines = fc.AllowQuotedNewlines
|
||||
conf.Encoding = string(fc.Encoding)
|
||||
conf.FieldDelimiter = fc.FieldDelimiter
|
||||
conf.IgnoreUnknownValues = fc.IgnoreUnknownValues
|
||||
conf.MaxBadRecords = fc.MaxBadRecords
|
||||
if fc.Schema != nil {
|
||||
conf.Schema = fc.Schema.asTableSchema()
|
||||
}
|
||||
conf.Quote = fc.quote()
|
||||
}
|
||||
|
||||
func (fc *FileConfig) populateExternalDataConfig(conf *bq.ExternalDataConfiguration) {
|
||||
format := fc.SourceFormat
|
||||
if format == "" {
|
||||
// Format must be explicitly set for external data sources.
|
||||
format = CSV
|
||||
}
|
||||
// TODO(jba): support AutoDetect.
|
||||
conf.IgnoreUnknownValues = fc.IgnoreUnknownValues
|
||||
conf.MaxBadRecords = fc.MaxBadRecords
|
||||
conf.SourceFormat = string(format)
|
||||
if fc.Schema != nil {
|
||||
conf.Schema = fc.Schema.asTableSchema()
|
||||
}
|
||||
if format == CSV {
|
||||
conf.CsvOptions = &bq.CsvOptions{
|
||||
AllowJaggedRows: fc.AllowJaggedRows,
|
||||
AllowQuotedNewlines: fc.AllowQuotedNewlines,
|
||||
Encoding: string(fc.Encoding),
|
||||
FieldDelimiter: fc.FieldDelimiter,
|
||||
SkipLeadingRows: fc.SkipLeadingRows,
|
||||
Quote: fc.quote(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DataFormat describes the format of BigQuery table data.
|
||||
type DataFormat string
|
||||
|
||||
// Constants describing the format of BigQuery table data.
|
||||
const (
|
||||
CSV DataFormat = "CSV"
|
||||
Avro DataFormat = "AVRO"
|
||||
JSON DataFormat = "NEWLINE_DELIMITED_JSON"
|
||||
DatastoreBackup DataFormat = "DATASTORE_BACKUP"
|
||||
)
|
||||
|
||||
// Encoding specifies the character encoding of data to be loaded into BigQuery.
|
||||
// See https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.encoding
|
||||
// for more details about how this is used.
|
||||
type Encoding string
|
||||
|
||||
const (
|
||||
UTF_8 Encoding = "UTF-8"
|
||||
ISO_8859_1 Encoding = "ISO-8859-1"
|
||||
)
|
90
vendor/cloud.google.com/go/bigquery/file_test.go
generated
vendored
Normal file
90
vendor/cloud.google.com/go/bigquery/file_test.go
generated
vendored
Normal file
@@ -0,0 +1,90 @@
|
||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"cloud.google.com/go/internal/pretty"
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
func TestQuote(t *testing.T) {
|
||||
ptr := func(s string) *string { return &s }
|
||||
|
||||
for _, test := range []struct {
|
||||
quote string
|
||||
force bool
|
||||
want *string
|
||||
}{
|
||||
{"", false, nil},
|
||||
{"", true, ptr("")},
|
||||
{"-", false, ptr("-")},
|
||||
{"-", true, ptr("")},
|
||||
} {
|
||||
fc := FileConfig{
|
||||
Quote: test.quote,
|
||||
ForceZeroQuote: test.force,
|
||||
}
|
||||
got := fc.quote()
|
||||
if (got == nil) != (test.want == nil) {
|
||||
t.Errorf("%+v\ngot %v\nwant %v", test, pretty.Value(got), pretty.Value(test.want))
|
||||
}
|
||||
if got != nil && test.want != nil && *got != *test.want {
|
||||
t.Errorf("%+v: got %q, want %q", test, *got, *test.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPopulateLoadConfig(t *testing.T) {
|
||||
hyphen := "-"
|
||||
fc := FileConfig{
|
||||
SourceFormat: CSV,
|
||||
FieldDelimiter: "\t",
|
||||
SkipLeadingRows: 8,
|
||||
AllowJaggedRows: true,
|
||||
AllowQuotedNewlines: true,
|
||||
Encoding: UTF_8,
|
||||
MaxBadRecords: 7,
|
||||
IgnoreUnknownValues: true,
|
||||
Schema: Schema{
|
||||
stringFieldSchema(),
|
||||
nestedFieldSchema(),
|
||||
},
|
||||
Quote: hyphen,
|
||||
}
|
||||
want := &bq.JobConfigurationLoad{
|
||||
SourceFormat: "CSV",
|
||||
FieldDelimiter: "\t",
|
||||
SkipLeadingRows: 8,
|
||||
AllowJaggedRows: true,
|
||||
AllowQuotedNewlines: true,
|
||||
Encoding: "UTF-8",
|
||||
MaxBadRecords: 7,
|
||||
IgnoreUnknownValues: true,
|
||||
Schema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqStringFieldSchema(),
|
||||
bqNestedFieldSchema(),
|
||||
}},
|
||||
Quote: &hyphen,
|
||||
}
|
||||
got := &bq.JobConfigurationLoad{}
|
||||
fc.populateLoadConfig(got)
|
||||
if !testutil.Equal(got, want) {
|
||||
t.Errorf("got:\n%v\nwant:\n%v", pretty.Value(got), pretty.Value(want))
|
||||
}
|
||||
}
|
68
vendor/cloud.google.com/go/bigquery/gcs.go
generated
vendored
Normal file
68
vendor/cloud.google.com/go/bigquery/gcs.go
generated
vendored
Normal file
@@ -0,0 +1,68 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import bq "google.golang.org/api/bigquery/v2"
|
||||
|
||||
// GCSReference is a reference to one or more Google Cloud Storage objects, which together constitute
|
||||
// an input or output to a BigQuery operation.
|
||||
type GCSReference struct {
|
||||
// TODO(jba): Export so that GCSReference can be used to hold data from a Job.get api call and expose it to the user.
|
||||
uris []string
|
||||
|
||||
FileConfig
|
||||
|
||||
// DestinationFormat is the format to use when writing exported files.
|
||||
// Allowed values are: CSV, Avro, JSON. The default is CSV.
|
||||
// CSV is not supported for tables with nested or repeated fields.
|
||||
DestinationFormat DataFormat
|
||||
|
||||
// Compression specifies the type of compression to apply when writing data
|
||||
// to Google Cloud Storage, or using this GCSReference as an ExternalData
|
||||
// source with CSV or JSON SourceFormat. Default is None.
|
||||
Compression Compression
|
||||
}
|
||||
|
||||
// NewGCSReference constructs a reference to one or more Google Cloud Storage objects, which together constitute a data source or destination.
|
||||
// In the simple case, a single URI in the form gs://bucket/object may refer to a single GCS object.
|
||||
// Data may also be split into mutiple files, if multiple URIs or URIs containing wildcards are provided.
|
||||
// Each URI may contain one '*' wildcard character, which (if present) must come after the bucket name.
|
||||
// For more information about the treatment of wildcards and multiple URIs,
|
||||
// see https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple
|
||||
func NewGCSReference(uri ...string) *GCSReference {
|
||||
return &GCSReference{uris: uri}
|
||||
}
|
||||
|
||||
// Compression is the type of compression to apply when writing data to Google Cloud Storage.
|
||||
type Compression string
|
||||
|
||||
const (
|
||||
None Compression = "NONE"
|
||||
Gzip Compression = "GZIP"
|
||||
)
|
||||
|
||||
func (gcs *GCSReference) populateInsertJobConfForLoad(conf *insertJobConf) {
|
||||
conf.job.Configuration.Load.SourceUris = gcs.uris
|
||||
gcs.FileConfig.populateLoadConfig(conf.job.Configuration.Load)
|
||||
}
|
||||
|
||||
func (gcs *GCSReference) externalDataConfig() bq.ExternalDataConfiguration {
|
||||
conf := bq.ExternalDataConfiguration{
|
||||
Compression: string(gcs.Compression),
|
||||
SourceUris: append([]string{}, gcs.uris...),
|
||||
}
|
||||
gcs.FileConfig.populateExternalDataConfig(&conf)
|
||||
return conf
|
||||
}
|
1037
vendor/cloud.google.com/go/bigquery/integration_test.go
generated
vendored
Normal file
1037
vendor/cloud.google.com/go/bigquery/integration_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
154
vendor/cloud.google.com/go/bigquery/iterator.go
generated
vendored
Normal file
154
vendor/cloud.google.com/go/bigquery/iterator.go
generated
vendored
Normal file
@@ -0,0 +1,154 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
)
|
||||
|
||||
// A pageFetcher returns a page of rows, starting from the row specified by token.
|
||||
type pageFetcher interface {
|
||||
fetch(ctx context.Context, s service, token string) (*readDataResult, error)
|
||||
setPaging(*pagingConf)
|
||||
}
|
||||
|
||||
func newRowIterator(ctx context.Context, s service, pf pageFetcher) *RowIterator {
|
||||
it := &RowIterator{
|
||||
ctx: ctx,
|
||||
service: s,
|
||||
pf: pf,
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
|
||||
it.fetch,
|
||||
func() int { return len(it.rows) },
|
||||
func() interface{} { r := it.rows; it.rows = nil; return r })
|
||||
return it
|
||||
}
|
||||
|
||||
// A RowIterator provides access to the result of a BigQuery lookup.
|
||||
type RowIterator struct {
|
||||
ctx context.Context
|
||||
service service
|
||||
pf pageFetcher
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
|
||||
// StartIndex can be set before the first call to Next. If PageInfo().Token
|
||||
// is also set, StartIndex is ignored.
|
||||
StartIndex uint64
|
||||
|
||||
rows [][]Value
|
||||
|
||||
schema Schema // populated on first call to fetch
|
||||
structLoader structLoader // used to populate a pointer to a struct
|
||||
}
|
||||
|
||||
// Next loads the next row into dst. Its return value is iterator.Done if there
|
||||
// are no more results. Once Next returns iterator.Done, all subsequent calls
|
||||
// will return iterator.Done.
|
||||
//
|
||||
// dst may implement ValueLoader, or may be a *[]Value, *map[string]Value, or struct pointer.
|
||||
//
|
||||
// If dst is a *[]Value, it will be set to to new []Value whose i'th element
|
||||
// will be populated with the i'th column of the row.
|
||||
//
|
||||
// If dst is a *map[string]Value, a new map will be created if dst is nil. Then
|
||||
// for each schema column name, the map key of that name will be set to the column's
|
||||
// value.
|
||||
//
|
||||
// If dst is pointer to a struct, each column in the schema will be matched
|
||||
// with an exported field of the struct that has the same name, ignoring case.
|
||||
// Unmatched schema columns and struct fields will be ignored.
|
||||
//
|
||||
// Each BigQuery column type corresponds to one or more Go types; a matching struct
|
||||
// field must be of the correct type. The correspondences are:
|
||||
//
|
||||
// STRING string
|
||||
// BOOL bool
|
||||
// INTEGER int, int8, int16, int32, int64, uint8, uint16, uint32
|
||||
// FLOAT float32, float64
|
||||
// BYTES []byte
|
||||
// TIMESTAMP time.Time
|
||||
// DATE civil.Date
|
||||
// TIME civil.Time
|
||||
// DATETIME civil.DateTime
|
||||
//
|
||||
// A repeated field corresponds to a slice or array of the element type.
|
||||
// A RECORD type (nested schema) corresponds to a nested struct or struct pointer.
|
||||
// All calls to Next on the same iterator must use the same struct type.
|
||||
//
|
||||
// It is an error to attempt to read a BigQuery NULL value into a struct field.
|
||||
// If your table contains NULLs, use a *[]Value or *map[string]Value.
|
||||
func (it *RowIterator) Next(dst interface{}) error {
|
||||
var vl ValueLoader
|
||||
switch dst := dst.(type) {
|
||||
case ValueLoader:
|
||||
vl = dst
|
||||
case *[]Value:
|
||||
vl = (*valueList)(dst)
|
||||
case *map[string]Value:
|
||||
vl = (*valueMap)(dst)
|
||||
default:
|
||||
if !isStructPtr(dst) {
|
||||
return fmt.Errorf("bigquery: cannot convert %T to ValueLoader (need pointer to []Value, map[string]Value, or struct)", dst)
|
||||
}
|
||||
}
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return err
|
||||
}
|
||||
row := it.rows[0]
|
||||
it.rows = it.rows[1:]
|
||||
|
||||
if vl == nil {
|
||||
// This can only happen if dst is a pointer to a struct. We couldn't
|
||||
// set vl above because we need the schema.
|
||||
if err := it.structLoader.set(dst, it.schema); err != nil {
|
||||
return err
|
||||
}
|
||||
vl = &it.structLoader
|
||||
}
|
||||
return vl.Load(row, it.schema)
|
||||
}
|
||||
|
||||
func isStructPtr(x interface{}) bool {
|
||||
t := reflect.TypeOf(x)
|
||||
return t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
||||
func (it *RowIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
|
||||
|
||||
func (it *RowIterator) fetch(pageSize int, pageToken string) (string, error) {
|
||||
pc := &pagingConf{}
|
||||
if pageSize > 0 {
|
||||
pc.recordsPerRequest = int64(pageSize)
|
||||
pc.setRecordsPerRequest = true
|
||||
}
|
||||
if pageToken == "" {
|
||||
pc.startIndex = it.StartIndex
|
||||
}
|
||||
it.pf.setPaging(pc)
|
||||
res, err := it.pf.fetch(it.ctx, it.service, pageToken)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
it.rows = append(it.rows, res.rows...)
|
||||
it.schema = res.schema
|
||||
return res.pageToken, nil
|
||||
}
|
364
vendor/cloud.google.com/go/bigquery/iterator_test.go
generated
vendored
Normal file
364
vendor/cloud.google.com/go/bigquery/iterator_test.go
generated
vendored
Normal file
@@ -0,0 +1,364 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
)
|
||||
|
||||
type fetchResponse struct {
|
||||
result *readDataResult // The result to return.
|
||||
err error // The error to return.
|
||||
}
|
||||
|
||||
// pageFetcherStub services fetch requests by returning data from an in-memory list of values.
|
||||
type pageFetcherStub struct {
|
||||
fetchResponses map[string]fetchResponse
|
||||
|
||||
err error
|
||||
}
|
||||
|
||||
func (pf *pageFetcherStub) fetch(ctx context.Context, s service, token string) (*readDataResult, error) {
|
||||
call, ok := pf.fetchResponses[token]
|
||||
if !ok {
|
||||
pf.err = fmt.Errorf("Unexpected page token: %q", token)
|
||||
}
|
||||
return call.result, call.err
|
||||
}
|
||||
|
||||
func (pf *pageFetcherStub) setPaging(pc *pagingConf) {}
|
||||
|
||||
func TestIterator(t *testing.T) {
|
||||
var (
|
||||
iiSchema = Schema{
|
||||
{Type: IntegerFieldType},
|
||||
{Type: IntegerFieldType},
|
||||
}
|
||||
siSchema = Schema{
|
||||
{Type: StringFieldType},
|
||||
{Type: IntegerFieldType},
|
||||
}
|
||||
)
|
||||
fetchFailure := errors.New("fetch failure")
|
||||
|
||||
testCases := []struct {
|
||||
desc string
|
||||
pageToken string
|
||||
fetchResponses map[string]fetchResponse
|
||||
want [][]Value
|
||||
wantErr error
|
||||
wantSchema Schema
|
||||
}{
|
||||
{
|
||||
desc: "Iteration over single empty page",
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{},
|
||||
schema: Schema{},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: [][]Value{},
|
||||
wantSchema: Schema{},
|
||||
},
|
||||
{
|
||||
desc: "Iteration over single page",
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{{1, 2}, {11, 12}},
|
||||
schema: iiSchema,
|
||||
},
|
||||
},
|
||||
},
|
||||
want: [][]Value{{1, 2}, {11, 12}},
|
||||
wantSchema: iiSchema,
|
||||
},
|
||||
{
|
||||
desc: "Iteration over single page with different schema",
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{{"1", 2}, {"11", 12}},
|
||||
schema: siSchema,
|
||||
},
|
||||
},
|
||||
},
|
||||
want: [][]Value{{"1", 2}, {"11", 12}},
|
||||
wantSchema: siSchema,
|
||||
},
|
||||
{
|
||||
desc: "Iteration over two pages",
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
pageToken: "a",
|
||||
rows: [][]Value{{1, 2}, {11, 12}},
|
||||
schema: iiSchema,
|
||||
},
|
||||
},
|
||||
"a": {
|
||||
result: &readDataResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{{101, 102}, {111, 112}},
|
||||
schema: iiSchema,
|
||||
},
|
||||
},
|
||||
},
|
||||
want: [][]Value{{1, 2}, {11, 12}, {101, 102}, {111, 112}},
|
||||
wantSchema: iiSchema,
|
||||
},
|
||||
{
|
||||
desc: "Server response includes empty page",
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
pageToken: "a",
|
||||
rows: [][]Value{{1, 2}, {11, 12}},
|
||||
schema: iiSchema,
|
||||
},
|
||||
},
|
||||
"a": {
|
||||
result: &readDataResult{
|
||||
pageToken: "b",
|
||||
rows: [][]Value{},
|
||||
schema: iiSchema,
|
||||
},
|
||||
},
|
||||
"b": {
|
||||
result: &readDataResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{{101, 102}, {111, 112}},
|
||||
schema: iiSchema,
|
||||
},
|
||||
},
|
||||
},
|
||||
want: [][]Value{{1, 2}, {11, 12}, {101, 102}, {111, 112}},
|
||||
wantSchema: iiSchema,
|
||||
},
|
||||
{
|
||||
desc: "Fetch error",
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
pageToken: "a",
|
||||
rows: [][]Value{{1, 2}, {11, 12}},
|
||||
schema: iiSchema,
|
||||
},
|
||||
},
|
||||
"a": {
|
||||
// We returns some data from this fetch, but also an error.
|
||||
// So the end result should include only data from the previous fetch.
|
||||
err: fetchFailure,
|
||||
result: &readDataResult{
|
||||
pageToken: "b",
|
||||
rows: [][]Value{{101, 102}, {111, 112}},
|
||||
schema: iiSchema,
|
||||
},
|
||||
},
|
||||
},
|
||||
want: [][]Value{{1, 2}, {11, 12}},
|
||||
wantErr: fetchFailure,
|
||||
wantSchema: iiSchema,
|
||||
},
|
||||
|
||||
{
|
||||
desc: "Skip over an entire page",
|
||||
pageToken: "a",
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
pageToken: "a",
|
||||
rows: [][]Value{{1, 2}, {11, 12}},
|
||||
schema: iiSchema,
|
||||
},
|
||||
},
|
||||
"a": {
|
||||
result: &readDataResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{{101, 102}, {111, 112}},
|
||||
schema: iiSchema,
|
||||
},
|
||||
},
|
||||
},
|
||||
want: [][]Value{{101, 102}, {111, 112}},
|
||||
wantSchema: iiSchema,
|
||||
},
|
||||
|
||||
{
|
||||
desc: "Skip beyond all data",
|
||||
pageToken: "b",
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
pageToken: "a",
|
||||
rows: [][]Value{{1, 2}, {11, 12}},
|
||||
schema: iiSchema,
|
||||
},
|
||||
},
|
||||
"a": {
|
||||
result: &readDataResult{
|
||||
pageToken: "b",
|
||||
rows: [][]Value{{101, 102}, {111, 112}},
|
||||
schema: iiSchema,
|
||||
},
|
||||
},
|
||||
"b": {
|
||||
result: &readDataResult{},
|
||||
},
|
||||
},
|
||||
// In this test case, Next will return false on its first call,
|
||||
// so we won't even attempt to call Get.
|
||||
want: [][]Value{},
|
||||
wantSchema: Schema{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
pf := &pageFetcherStub{
|
||||
fetchResponses: tc.fetchResponses,
|
||||
}
|
||||
it := newRowIterator(context.Background(), nil, pf)
|
||||
it.PageInfo().Token = tc.pageToken
|
||||
values, schema, err := consumeRowIterator(it)
|
||||
if err != tc.wantErr {
|
||||
t.Fatalf("%s: got %v, want %v", tc.desc, err, tc.wantErr)
|
||||
}
|
||||
if (len(values) != 0 || len(tc.want) != 0) && !testutil.Equal(values, tc.want) {
|
||||
t.Errorf("%s: values:\ngot: %v\nwant:%v", tc.desc, values, tc.want)
|
||||
}
|
||||
if (len(schema) != 0 || len(tc.wantSchema) != 0) && !testutil.Equal(schema, tc.wantSchema) {
|
||||
t.Errorf("%s: iterator.Schema:\ngot: %v\nwant: %v", tc.desc, schema, tc.wantSchema)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type valueListWithSchema struct {
|
||||
vals valueList
|
||||
schema Schema
|
||||
}
|
||||
|
||||
func (v *valueListWithSchema) Load(vs []Value, s Schema) error {
|
||||
v.vals.Load(vs, s)
|
||||
v.schema = s
|
||||
return nil
|
||||
}
|
||||
|
||||
// consumeRowIterator reads the schema and all values from a RowIterator and returns them.
|
||||
func consumeRowIterator(it *RowIterator) ([][]Value, Schema, error) {
|
||||
var got [][]Value
|
||||
var schema Schema
|
||||
for {
|
||||
var vls valueListWithSchema
|
||||
err := it.Next(&vls)
|
||||
if err == iterator.Done {
|
||||
return got, schema, nil
|
||||
}
|
||||
if err != nil {
|
||||
return got, schema, err
|
||||
}
|
||||
got = append(got, vls.vals)
|
||||
schema = vls.schema
|
||||
}
|
||||
}
|
||||
|
||||
func TestNextDuringErrorState(t *testing.T) {
|
||||
pf := &pageFetcherStub{
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {err: errors.New("bang")},
|
||||
},
|
||||
}
|
||||
it := newRowIterator(context.Background(), nil, pf)
|
||||
var vals []Value
|
||||
if err := it.Next(&vals); err == nil {
|
||||
t.Errorf("Expected error after calling Next")
|
||||
}
|
||||
if err := it.Next(&vals); err == nil {
|
||||
t.Errorf("Expected error calling Next again when iterator has a non-nil error.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNextAfterFinished(t *testing.T) {
|
||||
testCases := []struct {
|
||||
fetchResponses map[string]fetchResponse
|
||||
want [][]Value
|
||||
}{
|
||||
{
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{{1, 2}, {11, 12}},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: [][]Value{{1, 2}, {11, 12}},
|
||||
},
|
||||
{
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: [][]Value{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
pf := &pageFetcherStub{
|
||||
fetchResponses: tc.fetchResponses,
|
||||
}
|
||||
it := newRowIterator(context.Background(), nil, pf)
|
||||
|
||||
values, _, err := consumeRowIterator(it)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if (len(values) != 0 || len(tc.want) != 0) && !testutil.Equal(values, tc.want) {
|
||||
t.Errorf("values: got:\n%v\nwant:\n%v", values, tc.want)
|
||||
}
|
||||
// Try calling Get again.
|
||||
var vals []Value
|
||||
if err := it.Next(&vals); err != iterator.Done {
|
||||
t.Errorf("Expected Done calling Next when there are no more values")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIteratorNextTypes(t *testing.T) {
|
||||
it := newRowIterator(context.Background(), nil, nil)
|
||||
for _, v := range []interface{}{3, "s", []int{}, &[]int{},
|
||||
map[string]Value{}, &map[string]interface{}{},
|
||||
struct{}{},
|
||||
} {
|
||||
if err := it.Next(v); err == nil {
|
||||
t.Error("%v: want error, got nil", v)
|
||||
}
|
||||
}
|
||||
}
|
326
vendor/cloud.google.com/go/bigquery/job.go
generated
vendored
Normal file
326
vendor/cloud.google.com/go/bigquery/job.go
generated
vendored
Normal file
@@ -0,0 +1,326 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/internal"
|
||||
gax "github.com/googleapis/gax-go"
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
// A Job represents an operation which has been submitted to BigQuery for processing.
|
||||
type Job struct {
|
||||
c *Client
|
||||
projectID string
|
||||
jobID string
|
||||
|
||||
isQuery bool
|
||||
destinationTable *bq.TableReference // table to read query results from
|
||||
}
|
||||
|
||||
// JobFromID creates a Job which refers to an existing BigQuery job. The job
|
||||
// need not have been created by this package. For example, the job may have
|
||||
// been created in the BigQuery console.
|
||||
func (c *Client) JobFromID(ctx context.Context, id string) (*Job, error) {
|
||||
job, err := c.service.getJob(ctx, c.projectID, id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
job.c = c
|
||||
return job, nil
|
||||
}
|
||||
|
||||
func (j *Job) ID() string {
|
||||
return j.jobID
|
||||
}
|
||||
|
||||
// State is one of a sequence of states that a Job progresses through as it is processed.
|
||||
type State int
|
||||
|
||||
const (
|
||||
Pending State = iota
|
||||
Running
|
||||
Done
|
||||
)
|
||||
|
||||
// JobStatus contains the current State of a job, and errors encountered while processing that job.
|
||||
type JobStatus struct {
|
||||
State State
|
||||
|
||||
err error
|
||||
|
||||
// All errors encountered during the running of the job.
|
||||
// Not all Errors are fatal, so errors here do not necessarily mean that the job has completed or was unsuccessful.
|
||||
Errors []*Error
|
||||
|
||||
// Statistics about the job.
|
||||
Statistics *JobStatistics
|
||||
}
|
||||
|
||||
// setJobRef initializes job's JobReference if given a non-empty jobID.
|
||||
// projectID must be non-empty.
|
||||
func setJobRef(job *bq.Job, jobID, projectID string) {
|
||||
if jobID == "" {
|
||||
return
|
||||
}
|
||||
// We don't check whether projectID is empty; the server will return an
|
||||
// error when it encounters the resulting JobReference.
|
||||
|
||||
job.JobReference = &bq.JobReference{
|
||||
JobId: jobID,
|
||||
ProjectId: projectID,
|
||||
}
|
||||
}
|
||||
|
||||
// Done reports whether the job has completed.
|
||||
// After Done returns true, the Err method will return an error if the job completed unsuccesfully.
|
||||
func (s *JobStatus) Done() bool {
|
||||
return s.State == Done
|
||||
}
|
||||
|
||||
// Err returns the error that caused the job to complete unsuccesfully (if any).
|
||||
func (s *JobStatus) Err() error {
|
||||
return s.err
|
||||
}
|
||||
|
||||
// Status returns the current status of the job. It fails if the Status could not be determined.
|
||||
func (j *Job) Status(ctx context.Context) (*JobStatus, error) {
|
||||
js, err := j.c.service.jobStatus(ctx, j.projectID, j.jobID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Fill in the client field of Tables in the statistics.
|
||||
if js.Statistics != nil {
|
||||
if qs, ok := js.Statistics.Details.(*QueryStatistics); ok {
|
||||
for _, t := range qs.ReferencedTables {
|
||||
t.c = j.c
|
||||
}
|
||||
}
|
||||
}
|
||||
return js, nil
|
||||
}
|
||||
|
||||
// Cancel requests that a job be cancelled. This method returns without waiting for
|
||||
// cancellation to take effect. To check whether the job has terminated, use Job.Status.
|
||||
// Cancelled jobs may still incur costs.
|
||||
func (j *Job) Cancel(ctx context.Context) error {
|
||||
return j.c.service.jobCancel(ctx, j.projectID, j.jobID)
|
||||
}
|
||||
|
||||
// Wait blocks until the job or the context is done. It returns the final status
|
||||
// of the job.
|
||||
// If an error occurs while retrieving the status, Wait returns that error. But
|
||||
// Wait returns nil if the status was retrieved successfully, even if
|
||||
// status.Err() != nil. So callers must check both errors. See the example.
|
||||
func (j *Job) Wait(ctx context.Context) (*JobStatus, error) {
|
||||
if j.isQuery {
|
||||
// We can avoid polling for query jobs.
|
||||
if _, err := j.c.service.waitForQuery(ctx, j.projectID, j.jobID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Note: extra RPC even if you just want to wait for the query to finish.
|
||||
js, err := j.Status(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return js, nil
|
||||
}
|
||||
// Non-query jobs must poll.
|
||||
var js *JobStatus
|
||||
err := internal.Retry(ctx, gax.Backoff{}, func() (stop bool, err error) {
|
||||
js, err = j.Status(ctx)
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
if js.Done() {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return js, nil
|
||||
}
|
||||
|
||||
// Read fetches the results of a query job.
|
||||
// If j is not a query job, Read returns an error.
|
||||
func (j *Job) Read(ctx context.Context) (*RowIterator, error) {
|
||||
if !j.isQuery {
|
||||
return nil, errors.New("bigquery: cannot read from a non-query job")
|
||||
}
|
||||
var projectID string
|
||||
if j.destinationTable != nil {
|
||||
projectID = j.destinationTable.ProjectId
|
||||
} else {
|
||||
projectID = j.c.projectID
|
||||
}
|
||||
|
||||
schema, err := j.c.service.waitForQuery(ctx, projectID, j.jobID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// The destination table should only be nil if there was a query error.
|
||||
if j.destinationTable == nil {
|
||||
return nil, errors.New("bigquery: query job missing destination table")
|
||||
}
|
||||
return newRowIterator(ctx, j.c.service, &readTableConf{
|
||||
projectID: j.destinationTable.ProjectId,
|
||||
datasetID: j.destinationTable.DatasetId,
|
||||
tableID: j.destinationTable.TableId,
|
||||
schema: schema,
|
||||
}), nil
|
||||
}
|
||||
|
||||
// JobStatistics contains statistics about a job.
|
||||
type JobStatistics struct {
|
||||
CreationTime time.Time
|
||||
StartTime time.Time
|
||||
EndTime time.Time
|
||||
TotalBytesProcessed int64
|
||||
|
||||
Details Statistics
|
||||
}
|
||||
|
||||
// Statistics is one of ExtractStatistics, LoadStatistics or QueryStatistics.
|
||||
type Statistics interface {
|
||||
implementsStatistics()
|
||||
}
|
||||
|
||||
// ExtractStatistics contains statistics about an extract job.
|
||||
type ExtractStatistics struct {
|
||||
// The number of files per destination URI or URI pattern specified in the
|
||||
// extract configuration. These values will be in the same order as the
|
||||
// URIs specified in the 'destinationUris' field.
|
||||
DestinationURIFileCounts []int64
|
||||
}
|
||||
|
||||
// LoadStatistics contains statistics about a load job.
|
||||
type LoadStatistics struct {
|
||||
// The number of bytes of source data in a load job.
|
||||
InputFileBytes int64
|
||||
|
||||
// The number of source files in a load job.
|
||||
InputFiles int64
|
||||
|
||||
// Size of the loaded data in bytes. Note that while a load job is in the
|
||||
// running state, this value may change.
|
||||
OutputBytes int64
|
||||
|
||||
// The number of rows imported in a load job. Note that while an import job is
|
||||
// in the running state, this value may change.
|
||||
OutputRows int64
|
||||
}
|
||||
|
||||
// QueryStatistics contains statistics about a query job.
|
||||
type QueryStatistics struct {
|
||||
// Billing tier for the job.
|
||||
BillingTier int64
|
||||
|
||||
// Whether the query result was fetched from the query cache.
|
||||
CacheHit bool
|
||||
|
||||
// The type of query statement, if valid.
|
||||
StatementType string
|
||||
|
||||
// Total bytes billed for the job.
|
||||
TotalBytesBilled int64
|
||||
|
||||
// Total bytes processed for the job.
|
||||
TotalBytesProcessed int64
|
||||
|
||||
// Describes execution plan for the query.
|
||||
QueryPlan []*ExplainQueryStage
|
||||
|
||||
// The number of rows affected by a DML statement. Present only for DML
|
||||
// statements INSERT, UPDATE or DELETE.
|
||||
NumDMLAffectedRows int64
|
||||
|
||||
// ReferencedTables: [Output-only, Experimental] Referenced tables for
|
||||
// the job. Queries that reference more than 50 tables will not have a
|
||||
// complete list.
|
||||
ReferencedTables []*Table
|
||||
|
||||
// The schema of the results. Present only for successful dry run of
|
||||
// non-legacy SQL queries.
|
||||
Schema Schema
|
||||
|
||||
// Standard SQL: list of undeclared query parameter names detected during a
|
||||
// dry run validation.
|
||||
UndeclaredQueryParameterNames []string
|
||||
}
|
||||
|
||||
// ExplainQueryStage describes one stage of a query.
|
||||
type ExplainQueryStage struct {
|
||||
// Relative amount of the total time the average shard spent on CPU-bound tasks.
|
||||
ComputeRatioAvg float64
|
||||
|
||||
// Relative amount of the total time the slowest shard spent on CPU-bound tasks.
|
||||
ComputeRatioMax float64
|
||||
|
||||
// Unique ID for stage within plan.
|
||||
ID int64
|
||||
|
||||
// Human-readable name for stage.
|
||||
Name string
|
||||
|
||||
// Relative amount of the total time the average shard spent reading input.
|
||||
ReadRatioAvg float64
|
||||
|
||||
// Relative amount of the total time the slowest shard spent reading input.
|
||||
ReadRatioMax float64
|
||||
|
||||
// Number of records read into the stage.
|
||||
RecordsRead int64
|
||||
|
||||
// Number of records written by the stage.
|
||||
RecordsWritten int64
|
||||
|
||||
// Current status for the stage.
|
||||
Status string
|
||||
|
||||
// List of operations within the stage in dependency order (approximately
|
||||
// chronological).
|
||||
Steps []*ExplainQueryStep
|
||||
|
||||
// Relative amount of the total time the average shard spent waiting to be scheduled.
|
||||
WaitRatioAvg float64
|
||||
|
||||
// Relative amount of the total time the slowest shard spent waiting to be scheduled.
|
||||
WaitRatioMax float64
|
||||
|
||||
// Relative amount of the total time the average shard spent on writing output.
|
||||
WriteRatioAvg float64
|
||||
|
||||
// Relative amount of the total time the slowest shard spent on writing output.
|
||||
WriteRatioMax float64
|
||||
}
|
||||
|
||||
// ExplainQueryStep describes one step of a query stage.
|
||||
type ExplainQueryStep struct {
|
||||
// Machine-readable operation type.
|
||||
Kind string
|
||||
|
||||
// Human-readable stage descriptions.
|
||||
Substeps []string
|
||||
}
|
||||
|
||||
func (*ExtractStatistics) implementsStatistics() {}
|
||||
func (*LoadStatistics) implementsStatistics() {}
|
||||
func (*QueryStatistics) implementsStatistics() {}
|
86
vendor/cloud.google.com/go/bigquery/load.go
generated
vendored
Normal file
86
vendor/cloud.google.com/go/bigquery/load.go
generated
vendored
Normal file
@@ -0,0 +1,86 @@
|
||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
// LoadConfig holds the configuration for a load job.
|
||||
type LoadConfig struct {
|
||||
// JobID is the ID to use for the load job. If unset, a job ID will be automatically created.
|
||||
JobID string
|
||||
|
||||
// Src is the source from which data will be loaded.
|
||||
Src LoadSource
|
||||
|
||||
// Dst is the table into which the data will be loaded.
|
||||
Dst *Table
|
||||
|
||||
// CreateDisposition specifies the circumstances under which the destination table will be created.
|
||||
// The default is CreateIfNeeded.
|
||||
CreateDisposition TableCreateDisposition
|
||||
|
||||
// WriteDisposition specifies how existing data in the destination table is treated.
|
||||
// The default is WriteAppend.
|
||||
WriteDisposition TableWriteDisposition
|
||||
}
|
||||
|
||||
// A Loader loads data from Google Cloud Storage into a BigQuery table.
|
||||
type Loader struct {
|
||||
LoadConfig
|
||||
c *Client
|
||||
}
|
||||
|
||||
// A LoadSource represents a source of data that can be loaded into
|
||||
// a BigQuery table.
|
||||
//
|
||||
// This package defines two LoadSources: GCSReference, for Google Cloud Storage
|
||||
// objects, and ReaderSource, for data read from an io.Reader.
|
||||
type LoadSource interface {
|
||||
populateInsertJobConfForLoad(conf *insertJobConf)
|
||||
}
|
||||
|
||||
// LoaderFrom returns a Loader which can be used to load data into a BigQuery table.
|
||||
// The returned Loader may optionally be further configured before its Run method is called.
|
||||
func (t *Table) LoaderFrom(src LoadSource) *Loader {
|
||||
return &Loader{
|
||||
c: t.c,
|
||||
LoadConfig: LoadConfig{
|
||||
Src: src,
|
||||
Dst: t,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Run initiates a load job.
|
||||
func (l *Loader) Run(ctx context.Context) (*Job, error) {
|
||||
job := &bq.Job{
|
||||
Configuration: &bq.JobConfiguration{
|
||||
Load: &bq.JobConfigurationLoad{
|
||||
CreateDisposition: string(l.CreateDisposition),
|
||||
WriteDisposition: string(l.WriteDisposition),
|
||||
},
|
||||
},
|
||||
}
|
||||
conf := &insertJobConf{job: job}
|
||||
l.Src.populateInsertJobConfForLoad(conf)
|
||||
setJobRef(job, l.JobID, l.c.projectID)
|
||||
|
||||
job.Configuration.Load.DestinationTable = l.Dst.tableRefProto()
|
||||
|
||||
return l.c.insertJob(ctx, conf)
|
||||
}
|
229
vendor/cloud.google.com/go/bigquery/load_test.go
generated
vendored
Normal file
229
vendor/cloud.google.com/go/bigquery/load_test.go
generated
vendored
Normal file
@@ -0,0 +1,229 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"cloud.google.com/go/internal/pretty"
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
func defaultLoadJob() *bq.Job {
|
||||
return &bq.Job{
|
||||
Configuration: &bq.JobConfiguration{
|
||||
Load: &bq.JobConfigurationLoad{
|
||||
DestinationTable: &bq.TableReference{
|
||||
ProjectId: "project-id",
|
||||
DatasetId: "dataset-id",
|
||||
TableId: "table-id",
|
||||
},
|
||||
SourceUris: []string{"uri"},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func stringFieldSchema() *FieldSchema {
|
||||
return &FieldSchema{Name: "fieldname", Type: StringFieldType}
|
||||
}
|
||||
|
||||
func nestedFieldSchema() *FieldSchema {
|
||||
return &FieldSchema{
|
||||
Name: "nested",
|
||||
Type: RecordFieldType,
|
||||
Schema: Schema{stringFieldSchema()},
|
||||
}
|
||||
}
|
||||
|
||||
func bqStringFieldSchema() *bq.TableFieldSchema {
|
||||
return &bq.TableFieldSchema{
|
||||
Name: "fieldname",
|
||||
Type: "STRING",
|
||||
}
|
||||
}
|
||||
|
||||
func bqNestedFieldSchema() *bq.TableFieldSchema {
|
||||
return &bq.TableFieldSchema{
|
||||
Name: "nested",
|
||||
Type: "RECORD",
|
||||
Fields: []*bq.TableFieldSchema{bqStringFieldSchema()},
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoad(t *testing.T) {
|
||||
c := &Client{projectID: "project-id"}
|
||||
|
||||
testCases := []struct {
|
||||
dst *Table
|
||||
src LoadSource
|
||||
config LoadConfig
|
||||
want *bq.Job
|
||||
}{
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: NewGCSReference("uri"),
|
||||
want: defaultLoadJob(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
config: LoadConfig{
|
||||
CreateDisposition: CreateNever,
|
||||
WriteDisposition: WriteTruncate,
|
||||
JobID: "ajob",
|
||||
},
|
||||
src: NewGCSReference("uri"),
|
||||
want: func() *bq.Job {
|
||||
j := defaultLoadJob()
|
||||
j.Configuration.Load.CreateDisposition = "CREATE_NEVER"
|
||||
j.Configuration.Load.WriteDisposition = "WRITE_TRUNCATE"
|
||||
j.JobReference = &bq.JobReference{
|
||||
JobId: "ajob",
|
||||
ProjectId: "project-id",
|
||||
}
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: func() *GCSReference {
|
||||
g := NewGCSReference("uri")
|
||||
g.MaxBadRecords = 1
|
||||
g.AllowJaggedRows = true
|
||||
g.AllowQuotedNewlines = true
|
||||
g.IgnoreUnknownValues = true
|
||||
return g
|
||||
}(),
|
||||
want: func() *bq.Job {
|
||||
j := defaultLoadJob()
|
||||
j.Configuration.Load.MaxBadRecords = 1
|
||||
j.Configuration.Load.AllowJaggedRows = true
|
||||
j.Configuration.Load.AllowQuotedNewlines = true
|
||||
j.Configuration.Load.IgnoreUnknownValues = true
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: func() *GCSReference {
|
||||
g := NewGCSReference("uri")
|
||||
g.Schema = Schema{
|
||||
stringFieldSchema(),
|
||||
nestedFieldSchema(),
|
||||
}
|
||||
return g
|
||||
}(),
|
||||
want: func() *bq.Job {
|
||||
j := defaultLoadJob()
|
||||
j.Configuration.Load.Schema = &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqStringFieldSchema(),
|
||||
bqNestedFieldSchema(),
|
||||
}}
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: func() *GCSReference {
|
||||
g := NewGCSReference("uri")
|
||||
g.SkipLeadingRows = 1
|
||||
g.SourceFormat = JSON
|
||||
g.Encoding = UTF_8
|
||||
g.FieldDelimiter = "\t"
|
||||
g.Quote = "-"
|
||||
return g
|
||||
}(),
|
||||
want: func() *bq.Job {
|
||||
j := defaultLoadJob()
|
||||
j.Configuration.Load.SkipLeadingRows = 1
|
||||
j.Configuration.Load.SourceFormat = "NEWLINE_DELIMITED_JSON"
|
||||
j.Configuration.Load.Encoding = "UTF-8"
|
||||
j.Configuration.Load.FieldDelimiter = "\t"
|
||||
hyphen := "-"
|
||||
j.Configuration.Load.Quote = &hyphen
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: NewGCSReference("uri"),
|
||||
want: func() *bq.Job {
|
||||
j := defaultLoadJob()
|
||||
// Quote is left unset in GCSReference, so should be nil here.
|
||||
j.Configuration.Load.Quote = nil
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: func() *GCSReference {
|
||||
g := NewGCSReference("uri")
|
||||
g.ForceZeroQuote = true
|
||||
return g
|
||||
}(),
|
||||
want: func() *bq.Job {
|
||||
j := defaultLoadJob()
|
||||
empty := ""
|
||||
j.Configuration.Load.Quote = &empty
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: func() *ReaderSource {
|
||||
r := NewReaderSource(strings.NewReader("foo"))
|
||||
r.SkipLeadingRows = 1
|
||||
r.SourceFormat = JSON
|
||||
r.Encoding = UTF_8
|
||||
r.FieldDelimiter = "\t"
|
||||
r.Quote = "-"
|
||||
return r
|
||||
}(),
|
||||
want: func() *bq.Job {
|
||||
j := defaultLoadJob()
|
||||
j.Configuration.Load.SourceUris = nil
|
||||
j.Configuration.Load.SkipLeadingRows = 1
|
||||
j.Configuration.Load.SourceFormat = "NEWLINE_DELIMITED_JSON"
|
||||
j.Configuration.Load.Encoding = "UTF-8"
|
||||
j.Configuration.Load.FieldDelimiter = "\t"
|
||||
hyphen := "-"
|
||||
j.Configuration.Load.Quote = &hyphen
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
s := &testService{}
|
||||
c.service = s
|
||||
loader := tc.dst.LoaderFrom(tc.src)
|
||||
tc.config.Src = tc.src
|
||||
tc.config.Dst = tc.dst
|
||||
loader.LoadConfig = tc.config
|
||||
if _, err := loader.Run(context.Background()); err != nil {
|
||||
t.Errorf("%d: err calling Loader.Run: %v", i, err)
|
||||
continue
|
||||
}
|
||||
if !testutil.Equal(s.Job, tc.want) {
|
||||
t.Errorf("loading %d: got:\n%v\nwant:\n%v",
|
||||
i, pretty.Value(s.Job), pretty.Value(tc.want))
|
||||
}
|
||||
}
|
||||
}
|
265
vendor/cloud.google.com/go/bigquery/params.go
generated
vendored
Normal file
265
vendor/cloud.google.com/go/bigquery/params.go
generated
vendored
Normal file
@@ -0,0 +1,265 @@
|
||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/civil"
|
||||
"cloud.google.com/go/internal/fields"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
var (
|
||||
// See https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#timestamp-type.
|
||||
timestampFormat = "2006-01-02 15:04:05.999999-07:00"
|
||||
|
||||
// See https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#schema.fields.name
|
||||
validFieldName = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]{0,127}$")
|
||||
)
|
||||
|
||||
func bqTagParser(t reflect.StructTag) (name string, keep bool, other interface{}, err error) {
|
||||
if s := t.Get("bigquery"); s != "" {
|
||||
if s == "-" {
|
||||
return "", false, nil, nil
|
||||
}
|
||||
if !validFieldName.MatchString(s) {
|
||||
return "", false, nil, errInvalidFieldName
|
||||
}
|
||||
return s, true, nil, nil
|
||||
}
|
||||
return "", true, nil, nil
|
||||
}
|
||||
|
||||
var fieldCache = fields.NewCache(bqTagParser, nil, nil)
|
||||
|
||||
var (
|
||||
int64ParamType = &bq.QueryParameterType{Type: "INT64"}
|
||||
float64ParamType = &bq.QueryParameterType{Type: "FLOAT64"}
|
||||
boolParamType = &bq.QueryParameterType{Type: "BOOL"}
|
||||
stringParamType = &bq.QueryParameterType{Type: "STRING"}
|
||||
bytesParamType = &bq.QueryParameterType{Type: "BYTES"}
|
||||
dateParamType = &bq.QueryParameterType{Type: "DATE"}
|
||||
timeParamType = &bq.QueryParameterType{Type: "TIME"}
|
||||
dateTimeParamType = &bq.QueryParameterType{Type: "DATETIME"}
|
||||
timestampParamType = &bq.QueryParameterType{Type: "TIMESTAMP"}
|
||||
)
|
||||
|
||||
var (
|
||||
typeOfDate = reflect.TypeOf(civil.Date{})
|
||||
typeOfTime = reflect.TypeOf(civil.Time{})
|
||||
typeOfDateTime = reflect.TypeOf(civil.DateTime{})
|
||||
typeOfGoTime = reflect.TypeOf(time.Time{})
|
||||
)
|
||||
|
||||
// A QueryParameter is a parameter to a query.
|
||||
type QueryParameter struct {
|
||||
// Name is used for named parameter mode.
|
||||
// It must match the name in the query case-insensitively.
|
||||
Name string
|
||||
|
||||
// Value is the value of the parameter.
|
||||
// The following Go types are supported, with their corresponding
|
||||
// Bigquery types:
|
||||
// int, int8, int16, int32, int64, uint8, uint16, uint32: INT64
|
||||
// Note that uint, uint64 and uintptr are not supported, because
|
||||
// they may contain values that cannot fit into a 64-bit signed integer.
|
||||
// float32, float64: FLOAT64
|
||||
// bool: BOOL
|
||||
// string: STRING
|
||||
// []byte: BYTES
|
||||
// time.Time: TIMESTAMP
|
||||
// Arrays and slices of the above.
|
||||
// Structs of the above. Only the exported fields are used.
|
||||
Value interface{}
|
||||
}
|
||||
|
||||
func (p QueryParameter) toRaw() (*bq.QueryParameter, error) {
|
||||
pv, err := paramValue(reflect.ValueOf(p.Value))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pt, err := paramType(reflect.TypeOf(p.Value))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &bq.QueryParameter{
|
||||
Name: p.Name,
|
||||
ParameterValue: &pv,
|
||||
ParameterType: pt,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func paramType(t reflect.Type) (*bq.QueryParameterType, error) {
|
||||
if t == nil {
|
||||
return nil, errors.New("bigquery: nil parameter")
|
||||
}
|
||||
switch t {
|
||||
case typeOfDate:
|
||||
return dateParamType, nil
|
||||
case typeOfTime:
|
||||
return timeParamType, nil
|
||||
case typeOfDateTime:
|
||||
return dateTimeParamType, nil
|
||||
case typeOfGoTime:
|
||||
return timestampParamType, nil
|
||||
}
|
||||
switch t.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint8, reflect.Uint16, reflect.Uint32:
|
||||
return int64ParamType, nil
|
||||
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return float64ParamType, nil
|
||||
|
||||
case reflect.Bool:
|
||||
return boolParamType, nil
|
||||
|
||||
case reflect.String:
|
||||
return stringParamType, nil
|
||||
|
||||
case reflect.Slice:
|
||||
if t.Elem().Kind() == reflect.Uint8 {
|
||||
return bytesParamType, nil
|
||||
}
|
||||
fallthrough
|
||||
|
||||
case reflect.Array:
|
||||
et, err := paramType(t.Elem())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &bq.QueryParameterType{Type: "ARRAY", ArrayType: et}, nil
|
||||
|
||||
case reflect.Ptr:
|
||||
if t.Elem().Kind() != reflect.Struct {
|
||||
break
|
||||
}
|
||||
t = t.Elem()
|
||||
fallthrough
|
||||
|
||||
case reflect.Struct:
|
||||
var fts []*bq.QueryParameterTypeStructTypes
|
||||
fields, err := fieldCache.Fields(t)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, f := range fields {
|
||||
pt, err := paramType(f.Type)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fts = append(fts, &bq.QueryParameterTypeStructTypes{
|
||||
Name: f.Name,
|
||||
Type: pt,
|
||||
})
|
||||
}
|
||||
return &bq.QueryParameterType{Type: "STRUCT", StructTypes: fts}, nil
|
||||
}
|
||||
return nil, fmt.Errorf("bigquery: Go type %s cannot be represented as a parameter type", t)
|
||||
}
|
||||
|
||||
func paramValue(v reflect.Value) (bq.QueryParameterValue, error) {
|
||||
var res bq.QueryParameterValue
|
||||
if !v.IsValid() {
|
||||
return res, errors.New("bigquery: nil parameter")
|
||||
}
|
||||
t := v.Type()
|
||||
switch t {
|
||||
case typeOfDate:
|
||||
res.Value = v.Interface().(civil.Date).String()
|
||||
return res, nil
|
||||
|
||||
case typeOfTime:
|
||||
// civil.Time has nanosecond resolution, but BigQuery TIME only microsecond.
|
||||
res.Value = civilTimeParamString(v.Interface().(civil.Time))
|
||||
return res, nil
|
||||
|
||||
case typeOfDateTime:
|
||||
dt := v.Interface().(civil.DateTime)
|
||||
res.Value = dt.Date.String() + " " + civilTimeParamString(dt.Time)
|
||||
return res, nil
|
||||
|
||||
case typeOfGoTime:
|
||||
res.Value = v.Interface().(time.Time).Format(timestampFormat)
|
||||
return res, nil
|
||||
}
|
||||
switch t.Kind() {
|
||||
case reflect.Slice:
|
||||
if t.Elem().Kind() == reflect.Uint8 {
|
||||
res.Value = base64.StdEncoding.EncodeToString(v.Interface().([]byte))
|
||||
return res, nil
|
||||
}
|
||||
fallthrough
|
||||
|
||||
case reflect.Array:
|
||||
var vals []*bq.QueryParameterValue
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
val, err := paramValue(v.Index(i))
|
||||
if err != nil {
|
||||
return bq.QueryParameterValue{}, err
|
||||
}
|
||||
vals = append(vals, &val)
|
||||
}
|
||||
return bq.QueryParameterValue{ArrayValues: vals}, nil
|
||||
|
||||
case reflect.Ptr:
|
||||
if t.Elem().Kind() != reflect.Struct {
|
||||
return res, fmt.Errorf("bigquery: Go type %s cannot be represented as a parameter value", t)
|
||||
}
|
||||
t = t.Elem()
|
||||
v = v.Elem()
|
||||
if !v.IsValid() {
|
||||
// nil pointer becomes empty value
|
||||
return res, nil
|
||||
}
|
||||
fallthrough
|
||||
|
||||
case reflect.Struct:
|
||||
fields, err := fieldCache.Fields(t)
|
||||
if err != nil {
|
||||
return bq.QueryParameterValue{}, err
|
||||
}
|
||||
res.StructValues = map[string]bq.QueryParameterValue{}
|
||||
for _, f := range fields {
|
||||
fv := v.FieldByIndex(f.Index)
|
||||
fp, err := paramValue(fv)
|
||||
if err != nil {
|
||||
return bq.QueryParameterValue{}, err
|
||||
}
|
||||
res.StructValues[f.Name] = fp
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
// None of the above: assume a scalar type. (If it's not a valid type,
|
||||
// paramType will catch the error.)
|
||||
res.Value = fmt.Sprint(v.Interface())
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func civilTimeParamString(t civil.Time) string {
|
||||
if t.Nanosecond == 0 {
|
||||
return t.String()
|
||||
} else {
|
||||
micro := (t.Nanosecond + 500) / 1000 // round to nearest microsecond
|
||||
t.Nanosecond = 0
|
||||
return t.String() + fmt.Sprintf(".%06d", micro)
|
||||
}
|
||||
}
|
249
vendor/cloud.google.com/go/bigquery/params_test.go
generated
vendored
Normal file
249
vendor/cloud.google.com/go/bigquery/params_test.go
generated
vendored
Normal file
@@ -0,0 +1,249 @@
|
||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"math"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
|
||||
"cloud.google.com/go/civil"
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
var scalarTests = []struct {
|
||||
val interface{}
|
||||
want string
|
||||
}{
|
||||
{int64(0), "0"},
|
||||
{3.14, "3.14"},
|
||||
{3.14159e-87, "3.14159e-87"},
|
||||
{true, "true"},
|
||||
{"string", "string"},
|
||||
{"\u65e5\u672c\u8a9e\n", "\u65e5\u672c\u8a9e\n"},
|
||||
{math.NaN(), "NaN"},
|
||||
{[]byte("foo"), "Zm9v"}, // base64 encoding of "foo"
|
||||
{time.Date(2016, 3, 20, 4, 22, 9, 5000, time.FixedZone("neg1-2", -3720)),
|
||||
"2016-03-20 04:22:09.000005-01:02"},
|
||||
{civil.Date{2016, 3, 20}, "2016-03-20"},
|
||||
{civil.Time{4, 5, 6, 789000000}, "04:05:06.789000"},
|
||||
{civil.DateTime{civil.Date{2016, 3, 20}, civil.Time{4, 5, 6, 789000000}}, "2016-03-20 04:05:06.789000"},
|
||||
}
|
||||
|
||||
type S1 struct {
|
||||
A int
|
||||
B *S2
|
||||
C bool
|
||||
}
|
||||
|
||||
type S2 struct {
|
||||
D string
|
||||
e int
|
||||
}
|
||||
|
||||
var s1 = S1{
|
||||
A: 1,
|
||||
B: &S2{D: "s"},
|
||||
C: true,
|
||||
}
|
||||
|
||||
func sval(s string) bq.QueryParameterValue {
|
||||
return bq.QueryParameterValue{Value: s}
|
||||
}
|
||||
|
||||
func TestParamValueScalar(t *testing.T) {
|
||||
for _, test := range scalarTests {
|
||||
got, err := paramValue(reflect.ValueOf(test.val))
|
||||
if err != nil {
|
||||
t.Errorf("%v: got %v, want nil", test.val, err)
|
||||
continue
|
||||
}
|
||||
want := sval(test.want)
|
||||
if !testutil.Equal(got, want) {
|
||||
t.Errorf("%v:\ngot %+v\nwant %+v", test.val, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParamValueArray(t *testing.T) {
|
||||
qpv := bq.QueryParameterValue{ArrayValues: []*bq.QueryParameterValue{
|
||||
{Value: "1"},
|
||||
{Value: "2"},
|
||||
},
|
||||
}
|
||||
for _, test := range []struct {
|
||||
val interface{}
|
||||
want bq.QueryParameterValue
|
||||
}{
|
||||
{[]int(nil), bq.QueryParameterValue{}},
|
||||
{[]int{}, bq.QueryParameterValue{}},
|
||||
{[]int{1, 2}, qpv},
|
||||
{[2]int{1, 2}, qpv},
|
||||
} {
|
||||
got, err := paramValue(reflect.ValueOf(test.val))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !testutil.Equal(got, test.want) {
|
||||
t.Errorf("%#v:\ngot %+v\nwant %+v", test.val, got, test.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParamValueStruct(t *testing.T) {
|
||||
got, err := paramValue(reflect.ValueOf(s1))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
want := bq.QueryParameterValue{
|
||||
StructValues: map[string]bq.QueryParameterValue{
|
||||
"A": sval("1"),
|
||||
"B": bq.QueryParameterValue{
|
||||
StructValues: map[string]bq.QueryParameterValue{
|
||||
"D": sval("s"),
|
||||
},
|
||||
},
|
||||
"C": sval("true"),
|
||||
},
|
||||
}
|
||||
if !testutil.Equal(got, want) {
|
||||
t.Errorf("got %+v\nwant %+v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParamValueErrors(t *testing.T) {
|
||||
// paramValue lets a few invalid types through, but paramType catches them.
|
||||
// Since we never call one without the other that's fine.
|
||||
for _, val := range []interface{}{nil, new([]int)} {
|
||||
_, err := paramValue(reflect.ValueOf(val))
|
||||
if err == nil {
|
||||
t.Errorf("%v (%T): got nil, want error", val, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParamType(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
val interface{}
|
||||
want *bq.QueryParameterType
|
||||
}{
|
||||
{0, int64ParamType},
|
||||
{uint32(32767), int64ParamType},
|
||||
{3.14, float64ParamType},
|
||||
{float32(3.14), float64ParamType},
|
||||
{math.NaN(), float64ParamType},
|
||||
{true, boolParamType},
|
||||
{"", stringParamType},
|
||||
{"string", stringParamType},
|
||||
{time.Now(), timestampParamType},
|
||||
{[]byte("foo"), bytesParamType},
|
||||
{[]int{}, &bq.QueryParameterType{Type: "ARRAY", ArrayType: int64ParamType}},
|
||||
{[3]bool{}, &bq.QueryParameterType{Type: "ARRAY", ArrayType: boolParamType}},
|
||||
{S1{}, &bq.QueryParameterType{
|
||||
Type: "STRUCT",
|
||||
StructTypes: []*bq.QueryParameterTypeStructTypes{
|
||||
{Name: "A", Type: int64ParamType},
|
||||
{Name: "B", Type: &bq.QueryParameterType{
|
||||
Type: "STRUCT",
|
||||
StructTypes: []*bq.QueryParameterTypeStructTypes{
|
||||
{Name: "D", Type: stringParamType},
|
||||
},
|
||||
}},
|
||||
{Name: "C", Type: boolParamType},
|
||||
},
|
||||
}},
|
||||
} {
|
||||
got, err := paramType(reflect.TypeOf(test.val))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !testutil.Equal(got, test.want) {
|
||||
t.Errorf("%v (%T): got %v, want %v", test.val, test.val, got, test.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParamTypeErrors(t *testing.T) {
|
||||
for _, val := range []interface{}{
|
||||
nil, uint(0), new([]int), make(chan int),
|
||||
} {
|
||||
_, err := paramType(reflect.TypeOf(val))
|
||||
if err == nil {
|
||||
t.Errorf("%v (%T): got nil, want error", val, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_ScalarParam(t *testing.T) {
|
||||
c := getClient(t)
|
||||
for _, test := range scalarTests {
|
||||
got, err := paramRoundTrip(c, test.val)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !testutil.Equal(got, test.val, cmp.Comparer(func(t1, t2 time.Time) bool {
|
||||
return t1.Round(time.Microsecond).Equal(t2.Round(time.Microsecond))
|
||||
})) {
|
||||
t.Errorf("\ngot %#v (%T)\nwant %#v (%T)", got, got, test.val, test.val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_OtherParam(t *testing.T) {
|
||||
c := getClient(t)
|
||||
for _, test := range []struct {
|
||||
val interface{}
|
||||
want interface{}
|
||||
}{
|
||||
{[]int(nil), []Value(nil)},
|
||||
{[]int{}, []Value(nil)},
|
||||
{[]int{1, 2}, []Value{int64(1), int64(2)}},
|
||||
{[3]int{1, 2, 3}, []Value{int64(1), int64(2), int64(3)}},
|
||||
{S1{}, []Value{int64(0), nil, false}},
|
||||
{s1, []Value{int64(1), []Value{"s"}, true}},
|
||||
} {
|
||||
got, err := paramRoundTrip(c, test.val)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !testutil.Equal(got, test.want) {
|
||||
t.Errorf("\ngot %#v (%T)\nwant %#v (%T)", got, got, test.want, test.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func paramRoundTrip(c *Client, x interface{}) (Value, error) {
|
||||
q := c.Query("select ?")
|
||||
q.Parameters = []QueryParameter{{Value: x}}
|
||||
it, err := q.Read(context.Background())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var val []Value
|
||||
err = it.Next(&val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(val) != 1 {
|
||||
return nil, errors.New("wrong number of values")
|
||||
}
|
||||
return val[0], nil
|
||||
}
|
206
vendor/cloud.google.com/go/bigquery/query.go
generated
vendored
Normal file
206
vendor/cloud.google.com/go/bigquery/query.go
generated
vendored
Normal file
@@ -0,0 +1,206 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
// QueryConfig holds the configuration for a query job.
|
||||
type QueryConfig struct {
|
||||
// JobID is the ID to use for the query job. If this field is empty, a job ID
|
||||
// will be automatically created.
|
||||
JobID string
|
||||
|
||||
// Dst is the table into which the results of the query will be written.
|
||||
// If this field is nil, a temporary table will be created.
|
||||
Dst *Table
|
||||
|
||||
// The query to execute. See https://cloud.google.com/bigquery/query-reference for details.
|
||||
Q string
|
||||
|
||||
// DefaultProjectID and DefaultDatasetID specify the dataset to use for unqualified table names in the query.
|
||||
// If DefaultProjectID is set, DefaultDatasetID must also be set.
|
||||
DefaultProjectID string
|
||||
DefaultDatasetID string
|
||||
|
||||
// TableDefinitions describes data sources outside of BigQuery.
|
||||
// The map keys may be used as table names in the query string.
|
||||
TableDefinitions map[string]ExternalData
|
||||
|
||||
// CreateDisposition specifies the circumstances under which the destination table will be created.
|
||||
// The default is CreateIfNeeded.
|
||||
CreateDisposition TableCreateDisposition
|
||||
|
||||
// WriteDisposition specifies how existing data in the destination table is treated.
|
||||
// The default is WriteEmpty.
|
||||
WriteDisposition TableWriteDisposition
|
||||
|
||||
// DisableQueryCache prevents results being fetched from the query cache.
|
||||
// If this field is false, results are fetched from the cache if they are available.
|
||||
// The query cache is a best-effort cache that is flushed whenever tables in the query are modified.
|
||||
// Cached results are only available when TableID is unspecified in the query's destination Table.
|
||||
// For more information, see https://cloud.google.com/bigquery/querying-data#querycaching
|
||||
DisableQueryCache bool
|
||||
|
||||
// DisableFlattenedResults prevents results being flattened.
|
||||
// If this field is false, results from nested and repeated fields are flattened.
|
||||
// DisableFlattenedResults implies AllowLargeResults
|
||||
// For more information, see https://cloud.google.com/bigquery/docs/data#nested
|
||||
DisableFlattenedResults bool
|
||||
|
||||
// AllowLargeResults allows the query to produce arbitrarily large result tables.
|
||||
// The destination must be a table.
|
||||
// When using this option, queries will take longer to execute, even if the result set is small.
|
||||
// For additional limitations, see https://cloud.google.com/bigquery/querying-data#largequeryresults
|
||||
AllowLargeResults bool
|
||||
|
||||
// Priority specifies the priority with which to schedule the query.
|
||||
// The default priority is InteractivePriority.
|
||||
// For more information, see https://cloud.google.com/bigquery/querying-data#batchqueries
|
||||
Priority QueryPriority
|
||||
|
||||
// MaxBillingTier sets the maximum billing tier for a Query.
|
||||
// Queries that have resource usage beyond this tier will fail (without
|
||||
// incurring a charge). If this field is zero, the project default will be used.
|
||||
MaxBillingTier int
|
||||
|
||||
// MaxBytesBilled limits the number of bytes billed for
|
||||
// this job. Queries that would exceed this limit will fail (without incurring
|
||||
// a charge).
|
||||
// If this field is less than 1, the project default will be
|
||||
// used.
|
||||
MaxBytesBilled int64
|
||||
|
||||
// UseStandardSQL causes the query to use standard SQL.
|
||||
// The default is false (using legacy SQL).
|
||||
UseStandardSQL bool
|
||||
|
||||
// Parameters is a list of query parameters. The presence of parameters
|
||||
// implies the use of standard SQL.
|
||||
// If the query uses positional syntax ("?"), then no parameter may have a name.
|
||||
// If the query uses named syntax ("@p"), then all parameters must have names.
|
||||
// It is illegal to mix positional and named syntax.
|
||||
Parameters []QueryParameter
|
||||
}
|
||||
|
||||
// QueryPriority specifies a priority with which a query is to be executed.
|
||||
type QueryPriority string
|
||||
|
||||
const (
|
||||
BatchPriority QueryPriority = "BATCH"
|
||||
InteractivePriority QueryPriority = "INTERACTIVE"
|
||||
)
|
||||
|
||||
// A Query queries data from a BigQuery table. Use Client.Query to create a Query.
|
||||
type Query struct {
|
||||
client *Client
|
||||
QueryConfig
|
||||
}
|
||||
|
||||
// Query creates a query with string q.
|
||||
// The returned Query may optionally be further configured before its Run method is called.
|
||||
func (c *Client) Query(q string) *Query {
|
||||
return &Query{
|
||||
client: c,
|
||||
QueryConfig: QueryConfig{Q: q},
|
||||
}
|
||||
}
|
||||
|
||||
// Run initiates a query job.
|
||||
func (q *Query) Run(ctx context.Context) (*Job, error) {
|
||||
job := &bq.Job{
|
||||
Configuration: &bq.JobConfiguration{
|
||||
Query: &bq.JobConfigurationQuery{},
|
||||
},
|
||||
}
|
||||
setJobRef(job, q.JobID, q.client.projectID)
|
||||
|
||||
if err := q.QueryConfig.populateJobQueryConfig(job.Configuration.Query); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
j, err := q.client.insertJob(ctx, &insertJobConf{job: job})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
j.isQuery = true
|
||||
return j, nil
|
||||
}
|
||||
|
||||
func (q *QueryConfig) populateJobQueryConfig(conf *bq.JobConfigurationQuery) error {
|
||||
conf.Query = q.Q
|
||||
|
||||
if len(q.TableDefinitions) > 0 {
|
||||
conf.TableDefinitions = make(map[string]bq.ExternalDataConfiguration)
|
||||
}
|
||||
for name, data := range q.TableDefinitions {
|
||||
conf.TableDefinitions[name] = data.externalDataConfig()
|
||||
}
|
||||
|
||||
if q.DefaultProjectID != "" || q.DefaultDatasetID != "" {
|
||||
conf.DefaultDataset = &bq.DatasetReference{
|
||||
DatasetId: q.DefaultDatasetID,
|
||||
ProjectId: q.DefaultProjectID,
|
||||
}
|
||||
}
|
||||
|
||||
if tier := int64(q.MaxBillingTier); tier > 0 {
|
||||
conf.MaximumBillingTier = &tier
|
||||
}
|
||||
conf.CreateDisposition = string(q.CreateDisposition)
|
||||
conf.WriteDisposition = string(q.WriteDisposition)
|
||||
conf.AllowLargeResults = q.AllowLargeResults
|
||||
conf.Priority = string(q.Priority)
|
||||
|
||||
f := false
|
||||
if q.DisableQueryCache {
|
||||
conf.UseQueryCache = &f
|
||||
}
|
||||
if q.DisableFlattenedResults {
|
||||
conf.FlattenResults = &f
|
||||
// DisableFlattenResults implies AllowLargeResults.
|
||||
conf.AllowLargeResults = true
|
||||
}
|
||||
if q.MaxBytesBilled >= 1 {
|
||||
conf.MaximumBytesBilled = q.MaxBytesBilled
|
||||
}
|
||||
if q.UseStandardSQL || len(q.Parameters) > 0 {
|
||||
conf.UseLegacySql = false
|
||||
conf.ForceSendFields = append(conf.ForceSendFields, "UseLegacySql")
|
||||
}
|
||||
|
||||
if q.Dst != nil && !q.Dst.implicitTable() {
|
||||
conf.DestinationTable = q.Dst.tableRefProto()
|
||||
}
|
||||
for _, p := range q.Parameters {
|
||||
qp, err := p.toRaw()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
conf.QueryParameters = append(conf.QueryParameters, qp)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read submits a query for execution and returns the results via a RowIterator.
|
||||
// It is a shorthand for Query.Run followed by Job.Read.
|
||||
func (q *Query) Read(ctx context.Context) (*RowIterator, error) {
|
||||
job, err := q.Run(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return job.Read(ctx)
|
||||
}
|
306
vendor/cloud.google.com/go/bigquery/query_test.go
generated
vendored
Normal file
306
vendor/cloud.google.com/go/bigquery/query_test.go
generated
vendored
Normal file
@@ -0,0 +1,306 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
func defaultQueryJob() *bq.Job {
|
||||
return &bq.Job{
|
||||
Configuration: &bq.JobConfiguration{
|
||||
Query: &bq.JobConfigurationQuery{
|
||||
DestinationTable: &bq.TableReference{
|
||||
ProjectId: "project-id",
|
||||
DatasetId: "dataset-id",
|
||||
TableId: "table-id",
|
||||
},
|
||||
Query: "query string",
|
||||
DefaultDataset: &bq.DatasetReference{
|
||||
ProjectId: "def-project-id",
|
||||
DatasetId: "def-dataset-id",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestQuery(t *testing.T) {
|
||||
c := &Client{
|
||||
projectID: "project-id",
|
||||
}
|
||||
testCases := []struct {
|
||||
dst *Table
|
||||
src *QueryConfig
|
||||
want *bq.Job
|
||||
}{
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: defaultQuery,
|
||||
want: defaultQueryJob(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: &QueryConfig{
|
||||
Q: "query string",
|
||||
},
|
||||
want: func() *bq.Job {
|
||||
j := defaultQueryJob()
|
||||
j.Configuration.Query.DefaultDataset = nil
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: &Table{},
|
||||
src: defaultQuery,
|
||||
want: func() *bq.Job {
|
||||
j := defaultQueryJob()
|
||||
j.Configuration.Query.DestinationTable = nil
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: &QueryConfig{
|
||||
Q: "query string",
|
||||
TableDefinitions: map[string]ExternalData{
|
||||
"atable": func() *GCSReference {
|
||||
g := NewGCSReference("uri")
|
||||
g.AllowJaggedRows = true
|
||||
g.AllowQuotedNewlines = true
|
||||
g.Compression = Gzip
|
||||
g.Encoding = UTF_8
|
||||
g.FieldDelimiter = ";"
|
||||
g.IgnoreUnknownValues = true
|
||||
g.MaxBadRecords = 1
|
||||
g.Quote = "'"
|
||||
g.SkipLeadingRows = 2
|
||||
g.Schema = Schema([]*FieldSchema{
|
||||
{Name: "name", Type: StringFieldType},
|
||||
})
|
||||
return g
|
||||
}(),
|
||||
},
|
||||
},
|
||||
want: func() *bq.Job {
|
||||
j := defaultQueryJob()
|
||||
j.Configuration.Query.DefaultDataset = nil
|
||||
td := make(map[string]bq.ExternalDataConfiguration)
|
||||
quote := "'"
|
||||
td["atable"] = bq.ExternalDataConfiguration{
|
||||
Compression: "GZIP",
|
||||
IgnoreUnknownValues: true,
|
||||
MaxBadRecords: 1,
|
||||
SourceFormat: "CSV", // must be explicitly set.
|
||||
SourceUris: []string{"uri"},
|
||||
CsvOptions: &bq.CsvOptions{
|
||||
AllowJaggedRows: true,
|
||||
AllowQuotedNewlines: true,
|
||||
Encoding: "UTF-8",
|
||||
FieldDelimiter: ";",
|
||||
SkipLeadingRows: 2,
|
||||
Quote: "e,
|
||||
},
|
||||
Schema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
{Name: "name", Type: "STRING"},
|
||||
},
|
||||
},
|
||||
}
|
||||
j.Configuration.Query.TableDefinitions = td
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: &Table{
|
||||
ProjectID: "project-id",
|
||||
DatasetID: "dataset-id",
|
||||
TableID: "table-id",
|
||||
},
|
||||
src: &QueryConfig{
|
||||
Q: "query string",
|
||||
DefaultProjectID: "def-project-id",
|
||||
DefaultDatasetID: "def-dataset-id",
|
||||
CreateDisposition: CreateNever,
|
||||
WriteDisposition: WriteTruncate,
|
||||
},
|
||||
want: func() *bq.Job {
|
||||
j := defaultQueryJob()
|
||||
j.Configuration.Query.WriteDisposition = "WRITE_TRUNCATE"
|
||||
j.Configuration.Query.CreateDisposition = "CREATE_NEVER"
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: &QueryConfig{
|
||||
Q: "query string",
|
||||
DefaultProjectID: "def-project-id",
|
||||
DefaultDatasetID: "def-dataset-id",
|
||||
DisableQueryCache: true,
|
||||
},
|
||||
want: func() *bq.Job {
|
||||
j := defaultQueryJob()
|
||||
f := false
|
||||
j.Configuration.Query.UseQueryCache = &f
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: &QueryConfig{
|
||||
Q: "query string",
|
||||
DefaultProjectID: "def-project-id",
|
||||
DefaultDatasetID: "def-dataset-id",
|
||||
AllowLargeResults: true,
|
||||
},
|
||||
want: func() *bq.Job {
|
||||
j := defaultQueryJob()
|
||||
j.Configuration.Query.AllowLargeResults = true
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: &QueryConfig{
|
||||
Q: "query string",
|
||||
DefaultProjectID: "def-project-id",
|
||||
DefaultDatasetID: "def-dataset-id",
|
||||
DisableFlattenedResults: true,
|
||||
},
|
||||
want: func() *bq.Job {
|
||||
j := defaultQueryJob()
|
||||
f := false
|
||||
j.Configuration.Query.FlattenResults = &f
|
||||
j.Configuration.Query.AllowLargeResults = true
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: &QueryConfig{
|
||||
Q: "query string",
|
||||
DefaultProjectID: "def-project-id",
|
||||
DefaultDatasetID: "def-dataset-id",
|
||||
Priority: QueryPriority("low"),
|
||||
},
|
||||
want: func() *bq.Job {
|
||||
j := defaultQueryJob()
|
||||
j.Configuration.Query.Priority = "low"
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: &QueryConfig{
|
||||
Q: "query string",
|
||||
DefaultProjectID: "def-project-id",
|
||||
DefaultDatasetID: "def-dataset-id",
|
||||
MaxBillingTier: 3,
|
||||
MaxBytesBilled: 5,
|
||||
},
|
||||
want: func() *bq.Job {
|
||||
j := defaultQueryJob()
|
||||
tier := int64(3)
|
||||
j.Configuration.Query.MaximumBillingTier = &tier
|
||||
j.Configuration.Query.MaximumBytesBilled = 5
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: &QueryConfig{
|
||||
Q: "query string",
|
||||
DefaultProjectID: "def-project-id",
|
||||
DefaultDatasetID: "def-dataset-id",
|
||||
MaxBytesBilled: -1,
|
||||
},
|
||||
want: defaultQueryJob(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: &QueryConfig{
|
||||
Q: "query string",
|
||||
DefaultProjectID: "def-project-id",
|
||||
DefaultDatasetID: "def-dataset-id",
|
||||
UseStandardSQL: true,
|
||||
},
|
||||
want: func() *bq.Job {
|
||||
j := defaultQueryJob()
|
||||
j.Configuration.Query.UseLegacySql = false
|
||||
j.Configuration.Query.ForceSendFields = []string{"UseLegacySql"}
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
s := &testService{}
|
||||
c.service = s
|
||||
query := c.Query("")
|
||||
query.QueryConfig = *tc.src
|
||||
query.Dst = tc.dst
|
||||
if _, err := query.Run(context.Background()); err != nil {
|
||||
t.Errorf("err calling query: %v", err)
|
||||
continue
|
||||
}
|
||||
if !testutil.Equal(s.Job, tc.want) {
|
||||
t.Errorf("querying: got:\n%v\nwant:\n%v", s.Job, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfiguringQuery(t *testing.T) {
|
||||
s := &testService{}
|
||||
c := &Client{
|
||||
projectID: "project-id",
|
||||
service: s,
|
||||
}
|
||||
|
||||
query := c.Query("q")
|
||||
query.JobID = "ajob"
|
||||
query.DefaultProjectID = "def-project-id"
|
||||
query.DefaultDatasetID = "def-dataset-id"
|
||||
// Note: Other configuration fields are tested in other tests above.
|
||||
// A lot of that can be consolidated once Client.Copy is gone.
|
||||
|
||||
want := &bq.Job{
|
||||
Configuration: &bq.JobConfiguration{
|
||||
Query: &bq.JobConfigurationQuery{
|
||||
Query: "q",
|
||||
DefaultDataset: &bq.DatasetReference{
|
||||
ProjectId: "def-project-id",
|
||||
DatasetId: "def-dataset-id",
|
||||
},
|
||||
},
|
||||
},
|
||||
JobReference: &bq.JobReference{
|
||||
JobId: "ajob",
|
||||
ProjectId: "project-id",
|
||||
},
|
||||
}
|
||||
|
||||
if _, err := query.Run(context.Background()); err != nil {
|
||||
t.Fatalf("err calling Query.Run: %v", err)
|
||||
}
|
||||
if !testutil.Equal(s.Job, want) {
|
||||
t.Errorf("querying: got:\n%v\nwant:\n%v", s.Job, want)
|
||||
}
|
||||
}
|
263
vendor/cloud.google.com/go/bigquery/read_test.go
generated
vendored
Normal file
263
vendor/cloud.google.com/go/bigquery/read_test.go
generated
vendored
Normal file
@@ -0,0 +1,263 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
"google.golang.org/api/iterator"
|
||||
)
|
||||
|
||||
type readTabledataArgs struct {
|
||||
conf *readTableConf
|
||||
tok string
|
||||
}
|
||||
|
||||
// readServiceStub services read requests by returning data from an in-memory list of values.
|
||||
type readServiceStub struct {
|
||||
// values and pageTokens are used as sources of data to return in response to calls to readTabledata or readQuery.
|
||||
values [][][]Value // contains pages / rows / columns.
|
||||
pageTokens map[string]string // maps incoming page token to returned page token.
|
||||
|
||||
// arguments are recorded for later inspection.
|
||||
readTabledataCalls []readTabledataArgs
|
||||
|
||||
service
|
||||
}
|
||||
|
||||
func (s *readServiceStub) readValues(tok string) *readDataResult {
|
||||
result := &readDataResult{
|
||||
pageToken: s.pageTokens[tok],
|
||||
rows: s.values[0],
|
||||
}
|
||||
s.values = s.values[1:]
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func (s *readServiceStub) waitForQuery(ctx context.Context, projectID, jobID string) (Schema, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (s *readServiceStub) readTabledata(ctx context.Context, conf *readTableConf, token string) (*readDataResult, error) {
|
||||
s.readTabledataCalls = append(s.readTabledataCalls, readTabledataArgs{conf, token})
|
||||
return s.readValues(token), nil
|
||||
}
|
||||
|
||||
func TestRead(t *testing.T) {
|
||||
// The data for the service stub to return is populated for each test case in the testCases for loop.
|
||||
ctx := context.Background()
|
||||
service := &readServiceStub{}
|
||||
c := &Client{
|
||||
projectID: "project-id",
|
||||
service: service,
|
||||
}
|
||||
|
||||
queryJob := &Job{
|
||||
projectID: "project-id",
|
||||
jobID: "job-id",
|
||||
c: c,
|
||||
isQuery: true,
|
||||
destinationTable: &bq.TableReference{
|
||||
ProjectId: "project-id",
|
||||
DatasetId: "dataset-id",
|
||||
TableId: "table-id",
|
||||
},
|
||||
}
|
||||
|
||||
for _, readFunc := range []func() *RowIterator{
|
||||
func() *RowIterator {
|
||||
return c.Dataset("dataset-id").Table("table-id").Read(ctx)
|
||||
},
|
||||
func() *RowIterator {
|
||||
it, err := queryJob.Read(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return it
|
||||
},
|
||||
} {
|
||||
testCases := []struct {
|
||||
data [][][]Value
|
||||
pageTokens map[string]string
|
||||
want [][]Value
|
||||
}{
|
||||
{
|
||||
data: [][][]Value{{{1, 2}, {11, 12}}, {{30, 40}, {31, 41}}},
|
||||
pageTokens: map[string]string{"": "a", "a": ""},
|
||||
want: [][]Value{{1, 2}, {11, 12}, {30, 40}, {31, 41}},
|
||||
},
|
||||
{
|
||||
data: [][][]Value{{{1, 2}, {11, 12}}, {{30, 40}, {31, 41}}},
|
||||
pageTokens: map[string]string{"": ""}, // no more pages after first one.
|
||||
want: [][]Value{{1, 2}, {11, 12}},
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
service.values = tc.data
|
||||
service.pageTokens = tc.pageTokens
|
||||
if got, ok := collectValues(t, readFunc()); ok {
|
||||
if !testutil.Equal(got, tc.want) {
|
||||
t.Errorf("reading: got:\n%v\nwant:\n%v", got, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func collectValues(t *testing.T, it *RowIterator) ([][]Value, bool) {
|
||||
var got [][]Value
|
||||
for {
|
||||
var vals []Value
|
||||
err := it.Next(&vals)
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("err calling Next: %v", err)
|
||||
return nil, false
|
||||
}
|
||||
got = append(got, vals)
|
||||
}
|
||||
return got, true
|
||||
}
|
||||
|
||||
func TestNoMoreValues(t *testing.T) {
|
||||
c := &Client{
|
||||
projectID: "project-id",
|
||||
service: &readServiceStub{
|
||||
values: [][][]Value{{{1, 2}, {11, 12}}},
|
||||
},
|
||||
}
|
||||
it := c.Dataset("dataset-id").Table("table-id").Read(context.Background())
|
||||
var vals []Value
|
||||
// We expect to retrieve two values and then fail on the next attempt.
|
||||
if err := it.Next(&vals); err != nil {
|
||||
t.Fatalf("Next: got: %v: want: nil", err)
|
||||
}
|
||||
if err := it.Next(&vals); err != nil {
|
||||
t.Fatalf("Next: got: %v: want: nil", err)
|
||||
}
|
||||
if err := it.Next(&vals); err != iterator.Done {
|
||||
t.Fatalf("Next: got: %v: want: iterator.Done", err)
|
||||
}
|
||||
}
|
||||
|
||||
type errorReadService struct {
|
||||
service
|
||||
}
|
||||
|
||||
var errBang = errors.New("bang!")
|
||||
|
||||
func (s *errorReadService) readTabledata(ctx context.Context, conf *readTableConf, token string) (*readDataResult, error) {
|
||||
return nil, errBang
|
||||
}
|
||||
|
||||
func TestReadError(t *testing.T) {
|
||||
// test that service read errors are propagated back to the caller.
|
||||
c := &Client{
|
||||
projectID: "project-id",
|
||||
service: &errorReadService{},
|
||||
}
|
||||
it := c.Dataset("dataset-id").Table("table-id").Read(context.Background())
|
||||
var vals []Value
|
||||
if err := it.Next(&vals); err != errBang {
|
||||
t.Fatalf("Get: got: %v: want: %v", err, errBang)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadTabledataOptions(t *testing.T) {
|
||||
// test that read options are propagated.
|
||||
s := &readServiceStub{
|
||||
values: [][][]Value{{{1, 2}}},
|
||||
}
|
||||
c := &Client{
|
||||
projectID: "project-id",
|
||||
service: s,
|
||||
}
|
||||
it := c.Dataset("dataset-id").Table("table-id").Read(context.Background())
|
||||
it.PageInfo().MaxSize = 5
|
||||
var vals []Value
|
||||
if err := it.Next(&vals); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
want := []readTabledataArgs{{
|
||||
conf: &readTableConf{
|
||||
projectID: "project-id",
|
||||
datasetID: "dataset-id",
|
||||
tableID: "table-id",
|
||||
paging: pagingConf{
|
||||
recordsPerRequest: 5,
|
||||
setRecordsPerRequest: true,
|
||||
},
|
||||
},
|
||||
tok: "",
|
||||
}}
|
||||
|
||||
if !testutil.Equal(s.readTabledataCalls, want, cmp.AllowUnexported(readTabledataArgs{}, readTableConf{}, pagingConf{})) {
|
||||
t.Errorf("reading: got:\n%v\nwant:\n%v", s.readTabledataCalls, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadQueryOptions(t *testing.T) {
|
||||
// test that read options are propagated.
|
||||
s := &readServiceStub{
|
||||
values: [][][]Value{{{1, 2}}},
|
||||
}
|
||||
queryJob := &Job{
|
||||
projectID: "project-id",
|
||||
jobID: "job-id",
|
||||
c: &Client{service: s},
|
||||
isQuery: true,
|
||||
destinationTable: &bq.TableReference{
|
||||
ProjectId: "project-id",
|
||||
DatasetId: "dataset-id",
|
||||
TableId: "table-id",
|
||||
},
|
||||
}
|
||||
it, err := queryJob.Read(context.Background())
|
||||
if err != nil {
|
||||
t.Fatalf("err calling Read: %v", err)
|
||||
}
|
||||
it.PageInfo().MaxSize = 5
|
||||
var vals []Value
|
||||
if err := it.Next(&vals); err != nil {
|
||||
t.Fatalf("Next: got: %v: want: nil", err)
|
||||
}
|
||||
|
||||
want := []readTabledataArgs{{
|
||||
conf: &readTableConf{
|
||||
projectID: "project-id",
|
||||
datasetID: "dataset-id",
|
||||
tableID: "table-id",
|
||||
paging: pagingConf{
|
||||
recordsPerRequest: 5,
|
||||
setRecordsPerRequest: true,
|
||||
},
|
||||
},
|
||||
tok: "",
|
||||
}}
|
||||
|
||||
if !testutil.Equal(s.readTabledataCalls, want, cmp.AllowUnexported(readTabledataArgs{}, readTableConf{}, pagingConf{})) {
|
||||
t.Errorf("reading: got:\n%v\nwant:\n%v", s.readTabledataCalls, want)
|
||||
}
|
||||
}
|
315
vendor/cloud.google.com/go/bigquery/schema.go
generated
vendored
Normal file
315
vendor/cloud.google.com/go/bigquery/schema.go
generated
vendored
Normal file
@@ -0,0 +1,315 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"cloud.google.com/go/internal/atomiccache"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
// Schema describes the fields in a table or query result.
|
||||
type Schema []*FieldSchema
|
||||
|
||||
type FieldSchema struct {
|
||||
// The field name.
|
||||
// Must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_),
|
||||
// and must start with a letter or underscore.
|
||||
// The maximum length is 128 characters.
|
||||
Name string
|
||||
|
||||
// A description of the field. The maximum length is 16,384 characters.
|
||||
Description string
|
||||
|
||||
// Whether the field may contain multiple values.
|
||||
Repeated bool
|
||||
// Whether the field is required. Ignored if Repeated is true.
|
||||
Required bool
|
||||
|
||||
// The field data type. If Type is Record, then this field contains a nested schema,
|
||||
// which is described by Schema.
|
||||
Type FieldType
|
||||
// Describes the nested schema if Type is set to Record.
|
||||
Schema Schema
|
||||
}
|
||||
|
||||
func (fs *FieldSchema) asTableFieldSchema() *bq.TableFieldSchema {
|
||||
tfs := &bq.TableFieldSchema{
|
||||
Description: fs.Description,
|
||||
Name: fs.Name,
|
||||
Type: string(fs.Type),
|
||||
}
|
||||
|
||||
if fs.Repeated {
|
||||
tfs.Mode = "REPEATED"
|
||||
} else if fs.Required {
|
||||
tfs.Mode = "REQUIRED"
|
||||
} // else leave as default, which is interpreted as NULLABLE.
|
||||
|
||||
for _, f := range fs.Schema {
|
||||
tfs.Fields = append(tfs.Fields, f.asTableFieldSchema())
|
||||
}
|
||||
|
||||
return tfs
|
||||
}
|
||||
|
||||
func (s Schema) asTableSchema() *bq.TableSchema {
|
||||
var fields []*bq.TableFieldSchema
|
||||
for _, f := range s {
|
||||
fields = append(fields, f.asTableFieldSchema())
|
||||
}
|
||||
return &bq.TableSchema{Fields: fields}
|
||||
}
|
||||
|
||||
// customizeCreateTable allows a Schema to be used directly as an option to CreateTable.
|
||||
func (s Schema) customizeCreateTable(conf *createTableConf) {
|
||||
conf.schema = s.asTableSchema()
|
||||
}
|
||||
|
||||
func convertTableFieldSchema(tfs *bq.TableFieldSchema) *FieldSchema {
|
||||
fs := &FieldSchema{
|
||||
Description: tfs.Description,
|
||||
Name: tfs.Name,
|
||||
Repeated: tfs.Mode == "REPEATED",
|
||||
Required: tfs.Mode == "REQUIRED",
|
||||
Type: FieldType(tfs.Type),
|
||||
}
|
||||
|
||||
for _, f := range tfs.Fields {
|
||||
fs.Schema = append(fs.Schema, convertTableFieldSchema(f))
|
||||
}
|
||||
return fs
|
||||
}
|
||||
|
||||
func convertTableSchema(ts *bq.TableSchema) Schema {
|
||||
if ts == nil {
|
||||
return nil
|
||||
}
|
||||
var s Schema
|
||||
for _, f := range ts.Fields {
|
||||
s = append(s, convertTableFieldSchema(f))
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
type FieldType string
|
||||
|
||||
const (
|
||||
StringFieldType FieldType = "STRING"
|
||||
BytesFieldType FieldType = "BYTES"
|
||||
IntegerFieldType FieldType = "INTEGER"
|
||||
FloatFieldType FieldType = "FLOAT"
|
||||
BooleanFieldType FieldType = "BOOLEAN"
|
||||
TimestampFieldType FieldType = "TIMESTAMP"
|
||||
RecordFieldType FieldType = "RECORD"
|
||||
DateFieldType FieldType = "DATE"
|
||||
TimeFieldType FieldType = "TIME"
|
||||
DateTimeFieldType FieldType = "DATETIME"
|
||||
)
|
||||
|
||||
var (
|
||||
errNoStruct = errors.New("bigquery: can only infer schema from struct or pointer to struct")
|
||||
errUnsupportedFieldType = errors.New("bigquery: unsupported type of field in struct")
|
||||
errInvalidFieldName = errors.New("bigquery: invalid name of field in struct")
|
||||
)
|
||||
|
||||
var typeOfByteSlice = reflect.TypeOf([]byte{})
|
||||
|
||||
// InferSchema tries to derive a BigQuery schema from the supplied struct value.
|
||||
// NOTE: All fields in the returned Schema are configured to be required,
|
||||
// unless the corresponding field in the supplied struct is a slice or array.
|
||||
//
|
||||
// It is considered an error if the struct (including nested structs) contains
|
||||
// any exported fields that are pointers or one of the following types:
|
||||
// uint, uint64, uintptr, map, interface, complex64, complex128, func, chan.
|
||||
// In these cases, an error will be returned.
|
||||
// Future versions may handle these cases without error.
|
||||
//
|
||||
// Recursively defined structs are also disallowed.
|
||||
func InferSchema(st interface{}) (Schema, error) {
|
||||
return inferSchemaReflectCached(reflect.TypeOf(st))
|
||||
}
|
||||
|
||||
var schemaCache atomiccache.Cache
|
||||
|
||||
type cacheVal struct {
|
||||
schema Schema
|
||||
err error
|
||||
}
|
||||
|
||||
func inferSchemaReflectCached(t reflect.Type) (Schema, error) {
|
||||
cv := schemaCache.Get(t, func() interface{} {
|
||||
s, err := inferSchemaReflect(t)
|
||||
return cacheVal{s, err}
|
||||
}).(cacheVal)
|
||||
return cv.schema, cv.err
|
||||
}
|
||||
|
||||
func inferSchemaReflect(t reflect.Type) (Schema, error) {
|
||||
rec, err := hasRecursiveType(t, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if rec {
|
||||
return nil, fmt.Errorf("bigquery: schema inference for recursive type %s", t)
|
||||
}
|
||||
return inferStruct(t)
|
||||
}
|
||||
|
||||
func inferStruct(t reflect.Type) (Schema, error) {
|
||||
switch t.Kind() {
|
||||
case reflect.Ptr:
|
||||
if t.Elem().Kind() != reflect.Struct {
|
||||
return nil, errNoStruct
|
||||
}
|
||||
t = t.Elem()
|
||||
fallthrough
|
||||
|
||||
case reflect.Struct:
|
||||
return inferFields(t)
|
||||
default:
|
||||
return nil, errNoStruct
|
||||
}
|
||||
}
|
||||
|
||||
// inferFieldSchema infers the FieldSchema for a Go type
|
||||
func inferFieldSchema(rt reflect.Type) (*FieldSchema, error) {
|
||||
switch rt {
|
||||
case typeOfByteSlice:
|
||||
return &FieldSchema{Required: true, Type: BytesFieldType}, nil
|
||||
case typeOfGoTime:
|
||||
return &FieldSchema{Required: true, Type: TimestampFieldType}, nil
|
||||
case typeOfDate:
|
||||
return &FieldSchema{Required: true, Type: DateFieldType}, nil
|
||||
case typeOfTime:
|
||||
return &FieldSchema{Required: true, Type: TimeFieldType}, nil
|
||||
case typeOfDateTime:
|
||||
return &FieldSchema{Required: true, Type: DateTimeFieldType}, nil
|
||||
}
|
||||
if isSupportedIntType(rt) {
|
||||
return &FieldSchema{Required: true, Type: IntegerFieldType}, nil
|
||||
}
|
||||
switch rt.Kind() {
|
||||
case reflect.Slice, reflect.Array:
|
||||
et := rt.Elem()
|
||||
if et != typeOfByteSlice && (et.Kind() == reflect.Slice || et.Kind() == reflect.Array) {
|
||||
// Multi dimensional slices/arrays are not supported by BigQuery
|
||||
return nil, errUnsupportedFieldType
|
||||
}
|
||||
|
||||
f, err := inferFieldSchema(et)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.Repeated = true
|
||||
f.Required = false
|
||||
return f, nil
|
||||
case reflect.Struct, reflect.Ptr:
|
||||
nested, err := inferStruct(rt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &FieldSchema{Required: true, Type: RecordFieldType, Schema: nested}, nil
|
||||
case reflect.String:
|
||||
return &FieldSchema{Required: true, Type: StringFieldType}, nil
|
||||
case reflect.Bool:
|
||||
return &FieldSchema{Required: true, Type: BooleanFieldType}, nil
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return &FieldSchema{Required: true, Type: FloatFieldType}, nil
|
||||
default:
|
||||
return nil, errUnsupportedFieldType
|
||||
}
|
||||
}
|
||||
|
||||
// inferFields extracts all exported field types from struct type.
|
||||
func inferFields(rt reflect.Type) (Schema, error) {
|
||||
var s Schema
|
||||
fields, err := fieldCache.Fields(rt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, field := range fields {
|
||||
f, err := inferFieldSchema(field.Type)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.Name = field.Name
|
||||
s = append(s, f)
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// isSupportedIntType reports whether t can be properly represented by the
|
||||
// BigQuery INTEGER/INT64 type.
|
||||
func isSupportedIntType(t reflect.Type) bool {
|
||||
switch t.Kind() {
|
||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int,
|
||||
reflect.Uint8, reflect.Uint16, reflect.Uint32:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// typeList is a linked list of reflect.Types.
|
||||
type typeList struct {
|
||||
t reflect.Type
|
||||
next *typeList
|
||||
}
|
||||
|
||||
func (l *typeList) has(t reflect.Type) bool {
|
||||
for l != nil {
|
||||
if l.t == t {
|
||||
return true
|
||||
}
|
||||
l = l.next
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// hasRecursiveType reports whether t or any type inside t refers to itself, directly or indirectly,
|
||||
// via exported fields. (Schema inference ignores unexported fields.)
|
||||
func hasRecursiveType(t reflect.Type, seen *typeList) (bool, error) {
|
||||
for t.Kind() == reflect.Ptr || t.Kind() == reflect.Slice || t.Kind() == reflect.Array {
|
||||
t = t.Elem()
|
||||
}
|
||||
if t.Kind() != reflect.Struct {
|
||||
return false, nil
|
||||
}
|
||||
if seen.has(t) {
|
||||
return true, nil
|
||||
}
|
||||
fields, err := fieldCache.Fields(t)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
seen = &typeList{t, seen}
|
||||
// Because seen is a linked list, additions to it from one field's
|
||||
// recursive call will not affect the value for subsequent fields' calls.
|
||||
for _, field := range fields {
|
||||
ok, err := hasRecursiveType(field.Type, seen)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if ok {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
797
vendor/cloud.google.com/go/bigquery/schema_test.go
generated
vendored
Normal file
797
vendor/cloud.google.com/go/bigquery/schema_test.go
generated
vendored
Normal file
@@ -0,0 +1,797 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/civil"
|
||||
"cloud.google.com/go/internal/pretty"
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
func (fs *FieldSchema) GoString() string {
|
||||
if fs == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
|
||||
return fmt.Sprintf("{Name:%s Description:%s Repeated:%t Required:%t Type:%s Schema:%s}",
|
||||
fs.Name,
|
||||
fs.Description,
|
||||
fs.Repeated,
|
||||
fs.Required,
|
||||
fs.Type,
|
||||
fmt.Sprintf("%#v", fs.Schema),
|
||||
)
|
||||
}
|
||||
|
||||
func bqTableFieldSchema(desc, name, typ, mode string) *bq.TableFieldSchema {
|
||||
return &bq.TableFieldSchema{
|
||||
Description: desc,
|
||||
Name: name,
|
||||
Mode: mode,
|
||||
Type: typ,
|
||||
}
|
||||
}
|
||||
|
||||
func fieldSchema(desc, name, typ string, repeated, required bool) *FieldSchema {
|
||||
return &FieldSchema{
|
||||
Description: desc,
|
||||
Name: name,
|
||||
Repeated: repeated,
|
||||
Required: required,
|
||||
Type: FieldType(typ),
|
||||
}
|
||||
}
|
||||
|
||||
func TestSchemaConversion(t *testing.T) {
|
||||
testCases := []struct {
|
||||
schema Schema
|
||||
bqSchema *bq.TableSchema
|
||||
}{
|
||||
{
|
||||
// required
|
||||
bqSchema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqTableFieldSchema("desc", "name", "STRING", "REQUIRED"),
|
||||
},
|
||||
},
|
||||
schema: Schema{
|
||||
fieldSchema("desc", "name", "STRING", false, true),
|
||||
},
|
||||
},
|
||||
{
|
||||
// repeated
|
||||
bqSchema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqTableFieldSchema("desc", "name", "STRING", "REPEATED"),
|
||||
},
|
||||
},
|
||||
schema: Schema{
|
||||
fieldSchema("desc", "name", "STRING", true, false),
|
||||
},
|
||||
},
|
||||
{
|
||||
// nullable, string
|
||||
bqSchema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqTableFieldSchema("desc", "name", "STRING", ""),
|
||||
},
|
||||
},
|
||||
schema: Schema{
|
||||
fieldSchema("desc", "name", "STRING", false, false),
|
||||
},
|
||||
},
|
||||
{
|
||||
// integer
|
||||
bqSchema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqTableFieldSchema("desc", "name", "INTEGER", ""),
|
||||
},
|
||||
},
|
||||
schema: Schema{
|
||||
fieldSchema("desc", "name", "INTEGER", false, false),
|
||||
},
|
||||
},
|
||||
{
|
||||
// float
|
||||
bqSchema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqTableFieldSchema("desc", "name", "FLOAT", ""),
|
||||
},
|
||||
},
|
||||
schema: Schema{
|
||||
fieldSchema("desc", "name", "FLOAT", false, false),
|
||||
},
|
||||
},
|
||||
{
|
||||
// boolean
|
||||
bqSchema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqTableFieldSchema("desc", "name", "BOOLEAN", ""),
|
||||
},
|
||||
},
|
||||
schema: Schema{
|
||||
fieldSchema("desc", "name", "BOOLEAN", false, false),
|
||||
},
|
||||
},
|
||||
{
|
||||
// timestamp
|
||||
bqSchema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqTableFieldSchema("desc", "name", "TIMESTAMP", ""),
|
||||
},
|
||||
},
|
||||
schema: Schema{
|
||||
fieldSchema("desc", "name", "TIMESTAMP", false, false),
|
||||
},
|
||||
},
|
||||
{
|
||||
// civil times
|
||||
bqSchema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqTableFieldSchema("desc", "f1", "TIME", ""),
|
||||
bqTableFieldSchema("desc", "f2", "DATE", ""),
|
||||
bqTableFieldSchema("desc", "f3", "DATETIME", ""),
|
||||
},
|
||||
},
|
||||
schema: Schema{
|
||||
fieldSchema("desc", "f1", "TIME", false, false),
|
||||
fieldSchema("desc", "f2", "DATE", false, false),
|
||||
fieldSchema("desc", "f3", "DATETIME", false, false),
|
||||
},
|
||||
},
|
||||
{
|
||||
// nested
|
||||
bqSchema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
{
|
||||
Description: "An outer schema wrapping a nested schema",
|
||||
Name: "outer",
|
||||
Mode: "REQUIRED",
|
||||
Type: "RECORD",
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqTableFieldSchema("inner field", "inner", "STRING", ""),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
schema: Schema{
|
||||
&FieldSchema{
|
||||
Description: "An outer schema wrapping a nested schema",
|
||||
Name: "outer",
|
||||
Required: true,
|
||||
Type: "RECORD",
|
||||
Schema: []*FieldSchema{
|
||||
{
|
||||
Description: "inner field",
|
||||
Name: "inner",
|
||||
Type: "STRING",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
bqSchema := tc.schema.asTableSchema()
|
||||
if !testutil.Equal(bqSchema, tc.bqSchema) {
|
||||
t.Errorf("converting to TableSchema: got:\n%v\nwant:\n%v",
|
||||
pretty.Value(bqSchema), pretty.Value(tc.bqSchema))
|
||||
}
|
||||
schema := convertTableSchema(tc.bqSchema)
|
||||
if !testutil.Equal(schema, tc.schema) {
|
||||
t.Errorf("converting to Schema: got:\n%v\nwant:\n%v", schema, tc.schema)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type allStrings struct {
|
||||
String string
|
||||
ByteSlice []byte
|
||||
}
|
||||
|
||||
type allSignedIntegers struct {
|
||||
Int64 int64
|
||||
Int32 int32
|
||||
Int16 int16
|
||||
Int8 int8
|
||||
Int int
|
||||
}
|
||||
|
||||
type allUnsignedIntegers struct {
|
||||
Uint32 uint32
|
||||
Uint16 uint16
|
||||
Uint8 uint8
|
||||
}
|
||||
|
||||
type allFloat struct {
|
||||
Float64 float64
|
||||
Float32 float32
|
||||
// NOTE: Complex32 and Complex64 are unsupported by BigQuery
|
||||
}
|
||||
|
||||
type allBoolean struct {
|
||||
Bool bool
|
||||
}
|
||||
|
||||
type allTime struct {
|
||||
Timestamp time.Time
|
||||
Time civil.Time
|
||||
Date civil.Date
|
||||
DateTime civil.DateTime
|
||||
}
|
||||
|
||||
func reqField(name, typ string) *FieldSchema {
|
||||
return &FieldSchema{
|
||||
Name: name,
|
||||
Type: FieldType(typ),
|
||||
Required: true,
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimpleInference(t *testing.T) {
|
||||
testCases := []struct {
|
||||
in interface{}
|
||||
want Schema
|
||||
}{
|
||||
{
|
||||
in: allSignedIntegers{},
|
||||
want: Schema{
|
||||
reqField("Int64", "INTEGER"),
|
||||
reqField("Int32", "INTEGER"),
|
||||
reqField("Int16", "INTEGER"),
|
||||
reqField("Int8", "INTEGER"),
|
||||
reqField("Int", "INTEGER"),
|
||||
},
|
||||
},
|
||||
{
|
||||
in: allUnsignedIntegers{},
|
||||
want: Schema{
|
||||
reqField("Uint32", "INTEGER"),
|
||||
reqField("Uint16", "INTEGER"),
|
||||
reqField("Uint8", "INTEGER"),
|
||||
},
|
||||
},
|
||||
{
|
||||
in: allFloat{},
|
||||
want: Schema{
|
||||
reqField("Float64", "FLOAT"),
|
||||
reqField("Float32", "FLOAT"),
|
||||
},
|
||||
},
|
||||
{
|
||||
in: allBoolean{},
|
||||
want: Schema{
|
||||
reqField("Bool", "BOOLEAN"),
|
||||
},
|
||||
},
|
||||
{
|
||||
in: &allBoolean{},
|
||||
want: Schema{
|
||||
reqField("Bool", "BOOLEAN"),
|
||||
},
|
||||
},
|
||||
{
|
||||
in: allTime{},
|
||||
want: Schema{
|
||||
reqField("Timestamp", "TIMESTAMP"),
|
||||
reqField("Time", "TIME"),
|
||||
reqField("Date", "DATE"),
|
||||
reqField("DateTime", "DATETIME"),
|
||||
},
|
||||
},
|
||||
{
|
||||
in: allStrings{},
|
||||
want: Schema{
|
||||
reqField("String", "STRING"),
|
||||
reqField("ByteSlice", "BYTES"),
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
got, err := InferSchema(tc.in)
|
||||
if err != nil {
|
||||
t.Fatalf("%T: error inferring TableSchema: %v", tc.in, err)
|
||||
}
|
||||
if !testutil.Equal(got, tc.want) {
|
||||
t.Errorf("%T: inferring TableSchema: got:\n%#v\nwant:\n%#v", tc.in,
|
||||
pretty.Value(got), pretty.Value(tc.want))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type containsNested struct {
|
||||
hidden string
|
||||
NotNested int
|
||||
Nested struct {
|
||||
Inside int
|
||||
}
|
||||
}
|
||||
|
||||
type containsDoubleNested struct {
|
||||
NotNested int
|
||||
Nested struct {
|
||||
InsideNested struct {
|
||||
Inside int
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type ptrNested struct {
|
||||
Ptr *struct{ Inside int }
|
||||
}
|
||||
|
||||
type dup struct { // more than one field of the same struct type
|
||||
A, B allBoolean
|
||||
}
|
||||
|
||||
func TestNestedInference(t *testing.T) {
|
||||
testCases := []struct {
|
||||
in interface{}
|
||||
want Schema
|
||||
}{
|
||||
{
|
||||
in: containsNested{},
|
||||
want: Schema{
|
||||
reqField("NotNested", "INTEGER"),
|
||||
&FieldSchema{
|
||||
Name: "Nested",
|
||||
Required: true,
|
||||
Type: "RECORD",
|
||||
Schema: Schema{reqField("Inside", "INTEGER")},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
in: containsDoubleNested{},
|
||||
want: Schema{
|
||||
reqField("NotNested", "INTEGER"),
|
||||
&FieldSchema{
|
||||
Name: "Nested",
|
||||
Required: true,
|
||||
Type: "RECORD",
|
||||
Schema: Schema{
|
||||
{
|
||||
Name: "InsideNested",
|
||||
Required: true,
|
||||
Type: "RECORD",
|
||||
Schema: Schema{reqField("Inside", "INTEGER")},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
in: ptrNested{},
|
||||
want: Schema{
|
||||
&FieldSchema{
|
||||
Name: "Ptr",
|
||||
Required: true,
|
||||
Type: "RECORD",
|
||||
Schema: Schema{reqField("Inside", "INTEGER")},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
in: dup{},
|
||||
want: Schema{
|
||||
&FieldSchema{
|
||||
Name: "A",
|
||||
Required: true,
|
||||
Type: "RECORD",
|
||||
Schema: Schema{reqField("Bool", "BOOLEAN")},
|
||||
},
|
||||
&FieldSchema{
|
||||
Name: "B",
|
||||
Required: true,
|
||||
Type: "RECORD",
|
||||
Schema: Schema{reqField("Bool", "BOOLEAN")},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
got, err := InferSchema(tc.in)
|
||||
if err != nil {
|
||||
t.Fatalf("%T: error inferring TableSchema: %v", tc.in, err)
|
||||
}
|
||||
if !testutil.Equal(got, tc.want) {
|
||||
t.Errorf("%T: inferring TableSchema: got:\n%#v\nwant:\n%#v", tc.in,
|
||||
pretty.Value(got), pretty.Value(tc.want))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type repeated struct {
|
||||
NotRepeated []byte
|
||||
RepeatedByteSlice [][]byte
|
||||
Slice []int
|
||||
Array [5]bool
|
||||
}
|
||||
|
||||
type nestedRepeated struct {
|
||||
NotRepeated int
|
||||
Repeated []struct {
|
||||
Inside int
|
||||
}
|
||||
RepeatedPtr []*struct{ Inside int }
|
||||
}
|
||||
|
||||
func repField(name, typ string) *FieldSchema {
|
||||
return &FieldSchema{
|
||||
Name: name,
|
||||
Type: FieldType(typ),
|
||||
Repeated: true,
|
||||
}
|
||||
}
|
||||
|
||||
func TestRepeatedInference(t *testing.T) {
|
||||
testCases := []struct {
|
||||
in interface{}
|
||||
want Schema
|
||||
}{
|
||||
{
|
||||
in: repeated{},
|
||||
want: Schema{
|
||||
reqField("NotRepeated", "BYTES"),
|
||||
repField("RepeatedByteSlice", "BYTES"),
|
||||
repField("Slice", "INTEGER"),
|
||||
repField("Array", "BOOLEAN"),
|
||||
},
|
||||
},
|
||||
{
|
||||
in: nestedRepeated{},
|
||||
want: Schema{
|
||||
reqField("NotRepeated", "INTEGER"),
|
||||
{
|
||||
Name: "Repeated",
|
||||
Repeated: true,
|
||||
Type: "RECORD",
|
||||
Schema: Schema{reqField("Inside", "INTEGER")},
|
||||
},
|
||||
{
|
||||
Name: "RepeatedPtr",
|
||||
Repeated: true,
|
||||
Type: "RECORD",
|
||||
Schema: Schema{reqField("Inside", "INTEGER")},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
got, err := InferSchema(tc.in)
|
||||
if err != nil {
|
||||
t.Fatalf("%d: error inferring TableSchema: %v", i, err)
|
||||
}
|
||||
if !testutil.Equal(got, tc.want) {
|
||||
t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i,
|
||||
pretty.Value(got), pretty.Value(tc.want))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type Embedded struct {
|
||||
Embedded int
|
||||
}
|
||||
|
||||
type embedded struct {
|
||||
Embedded2 int
|
||||
}
|
||||
|
||||
type nestedEmbedded struct {
|
||||
Embedded
|
||||
embedded
|
||||
}
|
||||
|
||||
func TestEmbeddedInference(t *testing.T) {
|
||||
got, err := InferSchema(nestedEmbedded{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
want := Schema{
|
||||
reqField("Embedded", "INTEGER"),
|
||||
reqField("Embedded2", "INTEGER"),
|
||||
}
|
||||
if !testutil.Equal(got, want) {
|
||||
t.Errorf("got %v, want %v", pretty.Value(got), pretty.Value(want))
|
||||
}
|
||||
}
|
||||
|
||||
func TestRecursiveInference(t *testing.T) {
|
||||
type List struct {
|
||||
Val int
|
||||
Next *List
|
||||
}
|
||||
|
||||
_, err := InferSchema(List{})
|
||||
if err == nil {
|
||||
t.Fatal("got nil, want error")
|
||||
}
|
||||
}
|
||||
|
||||
type withTags struct {
|
||||
NoTag int
|
||||
ExcludeTag int `bigquery:"-"`
|
||||
SimpleTag int `bigquery:"simple_tag"`
|
||||
UnderscoreTag int `bigquery:"_id"`
|
||||
MixedCase int `bigquery:"MIXEDcase"`
|
||||
}
|
||||
|
||||
type withTagsNested struct {
|
||||
Nested withTags `bigquery:"nested"`
|
||||
NestedAnonymous struct {
|
||||
ExcludeTag int `bigquery:"-"`
|
||||
Inside int `bigquery:"inside"`
|
||||
} `bigquery:"anon"`
|
||||
}
|
||||
|
||||
type withTagsRepeated struct {
|
||||
Repeated []withTags `bigquery:"repeated"`
|
||||
RepeatedAnonymous []struct {
|
||||
ExcludeTag int `bigquery:"-"`
|
||||
Inside int `bigquery:"inside"`
|
||||
} `bigquery:"anon"`
|
||||
}
|
||||
|
||||
type withTagsEmbedded struct {
|
||||
withTags
|
||||
}
|
||||
|
||||
var withTagsSchema = Schema{
|
||||
reqField("NoTag", "INTEGER"),
|
||||
reqField("simple_tag", "INTEGER"),
|
||||
reqField("_id", "INTEGER"),
|
||||
reqField("MIXEDcase", "INTEGER"),
|
||||
}
|
||||
|
||||
func TestTagInference(t *testing.T) {
|
||||
testCases := []struct {
|
||||
in interface{}
|
||||
want Schema
|
||||
}{
|
||||
{
|
||||
in: withTags{},
|
||||
want: withTagsSchema,
|
||||
},
|
||||
{
|
||||
in: withTagsNested{},
|
||||
want: Schema{
|
||||
&FieldSchema{
|
||||
Name: "nested",
|
||||
Required: true,
|
||||
Type: "RECORD",
|
||||
Schema: withTagsSchema,
|
||||
},
|
||||
&FieldSchema{
|
||||
Name: "anon",
|
||||
Required: true,
|
||||
Type: "RECORD",
|
||||
Schema: Schema{reqField("inside", "INTEGER")},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
in: withTagsRepeated{},
|
||||
want: Schema{
|
||||
&FieldSchema{
|
||||
Name: "repeated",
|
||||
Repeated: true,
|
||||
Type: "RECORD",
|
||||
Schema: withTagsSchema,
|
||||
},
|
||||
&FieldSchema{
|
||||
Name: "anon",
|
||||
Repeated: true,
|
||||
Type: "RECORD",
|
||||
Schema: Schema{reqField("inside", "INTEGER")},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
in: withTagsEmbedded{},
|
||||
want: withTagsSchema,
|
||||
},
|
||||
}
|
||||
for i, tc := range testCases {
|
||||
got, err := InferSchema(tc.in)
|
||||
if err != nil {
|
||||
t.Fatalf("%d: error inferring TableSchema: %v", i, err)
|
||||
}
|
||||
if !testutil.Equal(got, tc.want) {
|
||||
t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i,
|
||||
pretty.Value(got), pretty.Value(tc.want))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTagInferenceErrors(t *testing.T) {
|
||||
testCases := []struct {
|
||||
in interface{}
|
||||
err error
|
||||
}{
|
||||
{
|
||||
in: struct {
|
||||
LongTag int `bigquery:"abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxy"`
|
||||
}{},
|
||||
err: errInvalidFieldName,
|
||||
},
|
||||
{
|
||||
in: struct {
|
||||
UnsupporedStartChar int `bigquery:"øab"`
|
||||
}{},
|
||||
err: errInvalidFieldName,
|
||||
},
|
||||
{
|
||||
in: struct {
|
||||
UnsupportedEndChar int `bigquery:"abø"`
|
||||
}{},
|
||||
err: errInvalidFieldName,
|
||||
},
|
||||
{
|
||||
in: struct {
|
||||
UnsupportedMiddleChar int `bigquery:"aøb"`
|
||||
}{},
|
||||
err: errInvalidFieldName,
|
||||
},
|
||||
{
|
||||
in: struct {
|
||||
StartInt int `bigquery:"1abc"`
|
||||
}{},
|
||||
err: errInvalidFieldName,
|
||||
},
|
||||
{
|
||||
in: struct {
|
||||
Hyphens int `bigquery:"a-b"`
|
||||
}{},
|
||||
err: errInvalidFieldName,
|
||||
},
|
||||
{
|
||||
in: struct {
|
||||
OmitEmpty int `bigquery:"abc,omitempty"`
|
||||
}{},
|
||||
err: errInvalidFieldName,
|
||||
},
|
||||
}
|
||||
for i, tc := range testCases {
|
||||
want := tc.err
|
||||
_, got := InferSchema(tc.in)
|
||||
if got != want {
|
||||
t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSchemaErrors(t *testing.T) {
|
||||
testCases := []struct {
|
||||
in interface{}
|
||||
err error
|
||||
}{
|
||||
{
|
||||
in: []byte{},
|
||||
err: errNoStruct,
|
||||
},
|
||||
{
|
||||
in: new(int),
|
||||
err: errNoStruct,
|
||||
},
|
||||
{
|
||||
in: struct{ Uint uint }{},
|
||||
err: errUnsupportedFieldType,
|
||||
},
|
||||
{
|
||||
in: struct{ Uint64 uint64 }{},
|
||||
err: errUnsupportedFieldType,
|
||||
},
|
||||
{
|
||||
in: struct{ Uintptr uintptr }{},
|
||||
err: errUnsupportedFieldType,
|
||||
},
|
||||
{
|
||||
in: struct{ Complex complex64 }{},
|
||||
err: errUnsupportedFieldType,
|
||||
},
|
||||
{
|
||||
in: struct{ Map map[string]int }{},
|
||||
err: errUnsupportedFieldType,
|
||||
},
|
||||
{
|
||||
in: struct{ Chan chan bool }{},
|
||||
err: errUnsupportedFieldType,
|
||||
},
|
||||
{
|
||||
in: struct{ Ptr *int }{},
|
||||
err: errNoStruct,
|
||||
},
|
||||
{
|
||||
in: struct{ Interface interface{} }{},
|
||||
err: errUnsupportedFieldType,
|
||||
},
|
||||
{
|
||||
in: struct{ MultiDimensional [][]int }{},
|
||||
err: errUnsupportedFieldType,
|
||||
},
|
||||
{
|
||||
in: struct{ MultiDimensional [][][]byte }{},
|
||||
err: errUnsupportedFieldType,
|
||||
},
|
||||
{
|
||||
in: struct{ ChanSlice []chan bool }{},
|
||||
err: errUnsupportedFieldType,
|
||||
},
|
||||
{
|
||||
in: struct{ NestedChan struct{ Chan []chan bool } }{},
|
||||
err: errUnsupportedFieldType,
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
want := tc.err
|
||||
_, got := InferSchema(tc.in)
|
||||
if got != want {
|
||||
t.Errorf("%#v: got:\n%#v\nwant:\n%#v", tc.in, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHasRecursiveType(t *testing.T) {
|
||||
type (
|
||||
nonStruct int
|
||||
nonRec struct{ A string }
|
||||
dup struct{ A, B nonRec }
|
||||
rec struct {
|
||||
A int
|
||||
B *rec
|
||||
}
|
||||
recUnexported struct {
|
||||
A int
|
||||
b *rec
|
||||
}
|
||||
hasRec struct {
|
||||
A int
|
||||
R *rec
|
||||
}
|
||||
recSlicePointer struct {
|
||||
A []*recSlicePointer
|
||||
}
|
||||
)
|
||||
for _, test := range []struct {
|
||||
in interface{}
|
||||
want bool
|
||||
}{
|
||||
{nonStruct(0), false},
|
||||
{nonRec{}, false},
|
||||
{dup{}, false},
|
||||
{rec{}, true},
|
||||
{recUnexported{}, false},
|
||||
{hasRec{}, true},
|
||||
{&recSlicePointer{}, true},
|
||||
} {
|
||||
got, err := hasRecursiveType(reflect.TypeOf(test.in), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if got != test.want {
|
||||
t.Errorf("%T: got %t, want %t", test.in, got, test.want)
|
||||
}
|
||||
}
|
||||
}
|
758
vendor/cloud.google.com/go/bigquery/service.go
generated
vendored
Normal file
758
vendor/cloud.google.com/go/bigquery/service.go
generated
vendored
Normal file
@@ -0,0 +1,758 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/internal"
|
||||
"cloud.google.com/go/internal/version"
|
||||
gax "github.com/googleapis/gax-go"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
// service provides an internal abstraction to isolate the generated
|
||||
// BigQuery API; most of this package uses this interface instead.
|
||||
// The single implementation, *bigqueryService, contains all the knowledge
|
||||
// of the generated BigQuery API.
|
||||
type service interface {
|
||||
// Jobs
|
||||
insertJob(ctx context.Context, projectId string, conf *insertJobConf) (*Job, error)
|
||||
getJob(ctx context.Context, projectId, jobID string) (*Job, error)
|
||||
jobCancel(ctx context.Context, projectId, jobID string) error
|
||||
jobStatus(ctx context.Context, projectId, jobID string) (*JobStatus, error)
|
||||
|
||||
// Tables
|
||||
createTable(ctx context.Context, conf *createTableConf) error
|
||||
getTableMetadata(ctx context.Context, projectID, datasetID, tableID string) (*TableMetadata, error)
|
||||
deleteTable(ctx context.Context, projectID, datasetID, tableID string) error
|
||||
|
||||
// listTables returns a page of Tables and a next page token. Note: the Tables do not have their c field populated.
|
||||
listTables(ctx context.Context, projectID, datasetID string, pageSize int, pageToken string) ([]*Table, string, error)
|
||||
patchTable(ctx context.Context, projectID, datasetID, tableID string, conf *patchTableConf) (*TableMetadata, error)
|
||||
|
||||
// Table data
|
||||
readTabledata(ctx context.Context, conf *readTableConf, pageToken string) (*readDataResult, error)
|
||||
insertRows(ctx context.Context, projectID, datasetID, tableID string, rows []*insertionRow, conf *insertRowsConf) error
|
||||
|
||||
// Datasets
|
||||
insertDataset(ctx context.Context, datasetID, projectID string) error
|
||||
deleteDataset(ctx context.Context, datasetID, projectID string) error
|
||||
getDatasetMetadata(ctx context.Context, projectID, datasetID string) (*DatasetMetadata, error)
|
||||
|
||||
// Misc
|
||||
|
||||
// Waits for a query to complete.
|
||||
waitForQuery(ctx context.Context, projectID, jobID string) (Schema, error)
|
||||
|
||||
// listDatasets returns a page of Datasets and a next page token. Note: the Datasets do not have their c field populated.
|
||||
listDatasets(ctx context.Context, projectID string, maxResults int, pageToken string, all bool, filter string) ([]*Dataset, string, error)
|
||||
}
|
||||
|
||||
var xGoogHeader = fmt.Sprintf("gl-go/%s gccl/%s", version.Go(), version.Repo)
|
||||
|
||||
func setClientHeader(headers http.Header) {
|
||||
headers.Set("x-goog-api-client", xGoogHeader)
|
||||
}
|
||||
|
||||
type bigqueryService struct {
|
||||
s *bq.Service
|
||||
}
|
||||
|
||||
func newBigqueryService(client *http.Client, endpoint string) (*bigqueryService, error) {
|
||||
s, err := bq.New(client)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("constructing bigquery client: %v", err)
|
||||
}
|
||||
s.BasePath = endpoint
|
||||
|
||||
return &bigqueryService{s: s}, nil
|
||||
}
|
||||
|
||||
// getPages calls the supplied getPage function repeatedly until there are no pages left to get.
|
||||
// token is the token of the initial page to start from. Use an empty string to start from the beginning.
|
||||
func getPages(token string, getPage func(token string) (nextToken string, err error)) error {
|
||||
for {
|
||||
var err error
|
||||
token, err = getPage(token)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if token == "" {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type insertJobConf struct {
|
||||
job *bq.Job
|
||||
media io.Reader
|
||||
}
|
||||
|
||||
// Calls the Jobs.Insert RPC and returns a Job. Callers must set the returned Job's
|
||||
// client.
|
||||
func (s *bigqueryService) insertJob(ctx context.Context, projectID string, conf *insertJobConf) (*Job, error) {
|
||||
call := s.s.Jobs.Insert(projectID, conf.job).Context(ctx)
|
||||
setClientHeader(call.Header())
|
||||
if conf.media != nil {
|
||||
call.Media(conf.media)
|
||||
}
|
||||
var res *bq.Job
|
||||
var err error
|
||||
invoke := func() error {
|
||||
res, err = call.Do()
|
||||
return err
|
||||
}
|
||||
// A job with a client-generated ID can be retried; the presence of the
|
||||
// ID makes the insert operation idempotent.
|
||||
// We don't retry if there is media, because it is an io.Reader. We'd
|
||||
// have to read the contents and keep it in memory, and that could be expensive.
|
||||
// TODO(jba): Look into retrying if media != nil.
|
||||
if conf.job.JobReference != nil && conf.media == nil {
|
||||
err = runWithRetry(ctx, invoke)
|
||||
} else {
|
||||
err = invoke()
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var dt *bq.TableReference
|
||||
if qc := res.Configuration.Query; qc != nil {
|
||||
dt = qc.DestinationTable
|
||||
}
|
||||
return &Job{
|
||||
projectID: projectID,
|
||||
jobID: res.JobReference.JobId,
|
||||
destinationTable: dt,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type pagingConf struct {
|
||||
recordsPerRequest int64
|
||||
setRecordsPerRequest bool
|
||||
|
||||
startIndex uint64
|
||||
}
|
||||
|
||||
type readTableConf struct {
|
||||
projectID, datasetID, tableID string
|
||||
paging pagingConf
|
||||
schema Schema // lazily initialized when the first page of data is fetched.
|
||||
}
|
||||
|
||||
func (conf *readTableConf) fetch(ctx context.Context, s service, token string) (*readDataResult, error) {
|
||||
return s.readTabledata(ctx, conf, token)
|
||||
}
|
||||
|
||||
func (conf *readTableConf) setPaging(pc *pagingConf) { conf.paging = *pc }
|
||||
|
||||
type readDataResult struct {
|
||||
pageToken string
|
||||
rows [][]Value
|
||||
totalRows uint64
|
||||
schema Schema
|
||||
}
|
||||
|
||||
func (s *bigqueryService) readTabledata(ctx context.Context, conf *readTableConf, pageToken string) (*readDataResult, error) {
|
||||
// Prepare request to fetch one page of table data.
|
||||
req := s.s.Tabledata.List(conf.projectID, conf.datasetID, conf.tableID)
|
||||
setClientHeader(req.Header())
|
||||
|
||||
if pageToken != "" {
|
||||
req.PageToken(pageToken)
|
||||
} else {
|
||||
req.StartIndex(conf.paging.startIndex)
|
||||
}
|
||||
|
||||
if conf.paging.setRecordsPerRequest {
|
||||
req.MaxResults(conf.paging.recordsPerRequest)
|
||||
}
|
||||
|
||||
// Fetch the table schema in the background, if necessary.
|
||||
var schemaErr error
|
||||
var schemaFetch sync.WaitGroup
|
||||
if conf.schema == nil {
|
||||
schemaFetch.Add(1)
|
||||
go func() {
|
||||
defer schemaFetch.Done()
|
||||
var t *bq.Table
|
||||
t, schemaErr = s.s.Tables.Get(conf.projectID, conf.datasetID, conf.tableID).
|
||||
Fields("schema").
|
||||
Context(ctx).
|
||||
Do()
|
||||
if schemaErr == nil && t.Schema != nil {
|
||||
conf.schema = convertTableSchema(t.Schema)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
res, err := req.Context(ctx).Do()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
schemaFetch.Wait()
|
||||
if schemaErr != nil {
|
||||
return nil, schemaErr
|
||||
}
|
||||
|
||||
result := &readDataResult{
|
||||
pageToken: res.PageToken,
|
||||
totalRows: uint64(res.TotalRows),
|
||||
schema: conf.schema,
|
||||
}
|
||||
result.rows, err = convertRows(res.Rows, conf.schema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (s *bigqueryService) waitForQuery(ctx context.Context, projectID, jobID string) (Schema, error) {
|
||||
// Use GetQueryResults only to wait for completion, not to read results.
|
||||
req := s.s.Jobs.GetQueryResults(projectID, jobID).Context(ctx).MaxResults(0)
|
||||
setClientHeader(req.Header())
|
||||
backoff := gax.Backoff{
|
||||
Initial: 1 * time.Second,
|
||||
Multiplier: 2,
|
||||
Max: 60 * time.Second,
|
||||
}
|
||||
var res *bq.GetQueryResultsResponse
|
||||
err := internal.Retry(ctx, backoff, func() (stop bool, err error) {
|
||||
res, err = req.Do()
|
||||
if err != nil {
|
||||
return !retryableError(err), err
|
||||
}
|
||||
if !res.JobComplete { // GetQueryResults may return early without error; retry.
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return convertTableSchema(res.Schema), nil
|
||||
}
|
||||
|
||||
type insertRowsConf struct {
|
||||
templateSuffix string
|
||||
ignoreUnknownValues bool
|
||||
skipInvalidRows bool
|
||||
}
|
||||
|
||||
func (s *bigqueryService) insertRows(ctx context.Context, projectID, datasetID, tableID string, rows []*insertionRow, conf *insertRowsConf) error {
|
||||
req := &bq.TableDataInsertAllRequest{
|
||||
TemplateSuffix: conf.templateSuffix,
|
||||
IgnoreUnknownValues: conf.ignoreUnknownValues,
|
||||
SkipInvalidRows: conf.skipInvalidRows,
|
||||
}
|
||||
for _, row := range rows {
|
||||
m := make(map[string]bq.JsonValue)
|
||||
for k, v := range row.Row {
|
||||
m[k] = bq.JsonValue(v)
|
||||
}
|
||||
req.Rows = append(req.Rows, &bq.TableDataInsertAllRequestRows{
|
||||
InsertId: row.InsertID,
|
||||
Json: m,
|
||||
})
|
||||
}
|
||||
call := s.s.Tabledata.InsertAll(projectID, datasetID, tableID, req).Context(ctx)
|
||||
setClientHeader(call.Header())
|
||||
var res *bq.TableDataInsertAllResponse
|
||||
err := runWithRetry(ctx, func() (err error) {
|
||||
res, err = call.Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(res.InsertErrors) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var errs PutMultiError
|
||||
for _, e := range res.InsertErrors {
|
||||
if int(e.Index) > len(rows) {
|
||||
return fmt.Errorf("internal error: unexpected row index: %v", e.Index)
|
||||
}
|
||||
rie := RowInsertionError{
|
||||
InsertID: rows[e.Index].InsertID,
|
||||
RowIndex: int(e.Index),
|
||||
}
|
||||
for _, errp := range e.Errors {
|
||||
rie.Errors = append(rie.Errors, errorFromErrorProto(errp))
|
||||
}
|
||||
errs = append(errs, rie)
|
||||
}
|
||||
return errs
|
||||
}
|
||||
|
||||
func (s *bigqueryService) getJob(ctx context.Context, projectID, jobID string) (*Job, error) {
|
||||
job, err := s.getJobInternal(ctx, projectID, jobID, "configuration")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var isQuery bool
|
||||
var dest *bq.TableReference
|
||||
if job.Configuration.Query != nil {
|
||||
isQuery = true
|
||||
dest = job.Configuration.Query.DestinationTable
|
||||
}
|
||||
return &Job{
|
||||
projectID: projectID,
|
||||
jobID: jobID,
|
||||
isQuery: isQuery,
|
||||
destinationTable: dest,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *bigqueryService) jobStatus(ctx context.Context, projectID, jobID string) (*JobStatus, error) {
|
||||
job, err := s.getJobInternal(ctx, projectID, jobID, "status", "statistics")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
st, err := jobStatusFromProto(job.Status)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
st.Statistics = jobStatisticsFromProto(job.Statistics)
|
||||
return st, nil
|
||||
}
|
||||
|
||||
func (s *bigqueryService) getJobInternal(ctx context.Context, projectID, jobID string, fields ...googleapi.Field) (*bq.Job, error) {
|
||||
var job *bq.Job
|
||||
err := runWithRetry(ctx, func() (err error) {
|
||||
job, err = s.s.Jobs.Get(projectID, jobID).
|
||||
Fields(fields...).
|
||||
Context(ctx).
|
||||
Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return job, nil
|
||||
}
|
||||
|
||||
func (s *bigqueryService) jobCancel(ctx context.Context, projectID, jobID string) error {
|
||||
// Jobs.Cancel returns a job entity, but the only relevant piece of
|
||||
// data it may contain (the status of the job) is unreliable. From the
|
||||
// docs: "This call will return immediately, and the client will need
|
||||
// to poll for the job status to see if the cancel completed
|
||||
// successfully". So it would be misleading to return a status.
|
||||
return runWithRetry(ctx, func() error {
|
||||
_, err := s.s.Jobs.Cancel(projectID, jobID).
|
||||
Fields(). // We don't need any of the response data.
|
||||
Context(ctx).
|
||||
Do()
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
var stateMap = map[string]State{"PENDING": Pending, "RUNNING": Running, "DONE": Done}
|
||||
|
||||
func jobStatusFromProto(status *bq.JobStatus) (*JobStatus, error) {
|
||||
state, ok := stateMap[status.State]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected job state: %v", status.State)
|
||||
}
|
||||
|
||||
newStatus := &JobStatus{
|
||||
State: state,
|
||||
err: nil,
|
||||
}
|
||||
if err := errorFromErrorProto(status.ErrorResult); state == Done && err != nil {
|
||||
newStatus.err = err
|
||||
}
|
||||
|
||||
for _, ep := range status.Errors {
|
||||
newStatus.Errors = append(newStatus.Errors, errorFromErrorProto(ep))
|
||||
}
|
||||
return newStatus, nil
|
||||
}
|
||||
|
||||
func jobStatisticsFromProto(s *bq.JobStatistics) *JobStatistics {
|
||||
js := &JobStatistics{
|
||||
CreationTime: unixMillisToTime(s.CreationTime),
|
||||
StartTime: unixMillisToTime(s.StartTime),
|
||||
EndTime: unixMillisToTime(s.EndTime),
|
||||
TotalBytesProcessed: s.TotalBytesProcessed,
|
||||
}
|
||||
switch {
|
||||
case s.Extract != nil:
|
||||
js.Details = &ExtractStatistics{
|
||||
DestinationURIFileCounts: []int64(s.Extract.DestinationUriFileCounts),
|
||||
}
|
||||
case s.Load != nil:
|
||||
js.Details = &LoadStatistics{
|
||||
InputFileBytes: s.Load.InputFileBytes,
|
||||
InputFiles: s.Load.InputFiles,
|
||||
OutputBytes: s.Load.OutputBytes,
|
||||
OutputRows: s.Load.OutputRows,
|
||||
}
|
||||
case s.Query != nil:
|
||||
var names []string
|
||||
for _, qp := range s.Query.UndeclaredQueryParameters {
|
||||
names = append(names, qp.Name)
|
||||
}
|
||||
var tables []*Table
|
||||
for _, tr := range s.Query.ReferencedTables {
|
||||
tables = append(tables, convertTableReference(tr))
|
||||
}
|
||||
js.Details = &QueryStatistics{
|
||||
BillingTier: s.Query.BillingTier,
|
||||
CacheHit: s.Query.CacheHit,
|
||||
StatementType: s.Query.StatementType,
|
||||
TotalBytesBilled: s.Query.TotalBytesBilled,
|
||||
TotalBytesProcessed: s.Query.TotalBytesProcessed,
|
||||
NumDMLAffectedRows: s.Query.NumDmlAffectedRows,
|
||||
QueryPlan: queryPlanFromProto(s.Query.QueryPlan),
|
||||
Schema: convertTableSchema(s.Query.Schema),
|
||||
ReferencedTables: tables,
|
||||
UndeclaredQueryParameterNames: names,
|
||||
}
|
||||
}
|
||||
return js
|
||||
}
|
||||
|
||||
func queryPlanFromProto(stages []*bq.ExplainQueryStage) []*ExplainQueryStage {
|
||||
var res []*ExplainQueryStage
|
||||
for _, s := range stages {
|
||||
var steps []*ExplainQueryStep
|
||||
for _, p := range s.Steps {
|
||||
steps = append(steps, &ExplainQueryStep{
|
||||
Kind: p.Kind,
|
||||
Substeps: p.Substeps,
|
||||
})
|
||||
}
|
||||
res = append(res, &ExplainQueryStage{
|
||||
ComputeRatioAvg: s.ComputeRatioAvg,
|
||||
ComputeRatioMax: s.ComputeRatioMax,
|
||||
ID: s.Id,
|
||||
Name: s.Name,
|
||||
ReadRatioAvg: s.ReadRatioAvg,
|
||||
ReadRatioMax: s.ReadRatioMax,
|
||||
RecordsRead: s.RecordsRead,
|
||||
RecordsWritten: s.RecordsWritten,
|
||||
Status: s.Status,
|
||||
Steps: steps,
|
||||
WaitRatioAvg: s.WaitRatioAvg,
|
||||
WaitRatioMax: s.WaitRatioMax,
|
||||
WriteRatioAvg: s.WriteRatioAvg,
|
||||
WriteRatioMax: s.WriteRatioMax,
|
||||
})
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// listTables returns a subset of tables that belong to a dataset, and a token for fetching the next subset.
|
||||
func (s *bigqueryService) listTables(ctx context.Context, projectID, datasetID string, pageSize int, pageToken string) ([]*Table, string, error) {
|
||||
var tables []*Table
|
||||
req := s.s.Tables.List(projectID, datasetID).
|
||||
PageToken(pageToken).
|
||||
Context(ctx)
|
||||
setClientHeader(req.Header())
|
||||
if pageSize > 0 {
|
||||
req.MaxResults(int64(pageSize))
|
||||
}
|
||||
var res *bq.TableList
|
||||
err := runWithRetry(ctx, func() (err error) {
|
||||
res, err = req.Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
for _, t := range res.Tables {
|
||||
tables = append(tables, convertTableReference(t.TableReference))
|
||||
}
|
||||
return tables, res.NextPageToken, nil
|
||||
}
|
||||
|
||||
type createTableConf struct {
|
||||
projectID, datasetID, tableID string
|
||||
expiration time.Time
|
||||
viewQuery string
|
||||
schema *bq.TableSchema
|
||||
useStandardSQL bool
|
||||
timePartitioning *TimePartitioning
|
||||
}
|
||||
|
||||
// createTable creates a table in the BigQuery service.
|
||||
// expiration is an optional time after which the table will be deleted and its storage reclaimed.
|
||||
// If viewQuery is non-empty, the created table will be of type VIEW.
|
||||
// Note: expiration can only be set during table creation.
|
||||
// Note: after table creation, a view can be modified only if its table was initially created with a view.
|
||||
func (s *bigqueryService) createTable(ctx context.Context, conf *createTableConf) error {
|
||||
table := &bq.Table{
|
||||
// TODO(jba): retry? Is this always idempotent?
|
||||
TableReference: &bq.TableReference{
|
||||
ProjectId: conf.projectID,
|
||||
DatasetId: conf.datasetID,
|
||||
TableId: conf.tableID,
|
||||
},
|
||||
}
|
||||
if !conf.expiration.IsZero() {
|
||||
table.ExpirationTime = conf.expiration.UnixNano() / 1e6
|
||||
}
|
||||
// TODO(jba): make it impossible to provide both a view query and a schema.
|
||||
if conf.viewQuery != "" {
|
||||
table.View = &bq.ViewDefinition{
|
||||
Query: conf.viewQuery,
|
||||
}
|
||||
if conf.useStandardSQL {
|
||||
table.View.UseLegacySql = false
|
||||
table.View.ForceSendFields = append(table.View.ForceSendFields, "UseLegacySql")
|
||||
}
|
||||
}
|
||||
if conf.schema != nil {
|
||||
table.Schema = conf.schema
|
||||
}
|
||||
if conf.timePartitioning != nil {
|
||||
table.TimePartitioning = &bq.TimePartitioning{
|
||||
Type: "DAY",
|
||||
ExpirationMs: int64(conf.timePartitioning.Expiration.Seconds() * 1000),
|
||||
}
|
||||
}
|
||||
|
||||
req := s.s.Tables.Insert(conf.projectID, conf.datasetID, table).Context(ctx)
|
||||
setClientHeader(req.Header())
|
||||
_, err := req.Do()
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *bigqueryService) getTableMetadata(ctx context.Context, projectID, datasetID, tableID string) (*TableMetadata, error) {
|
||||
req := s.s.Tables.Get(projectID, datasetID, tableID).Context(ctx)
|
||||
setClientHeader(req.Header())
|
||||
var table *bq.Table
|
||||
err := runWithRetry(ctx, func() (err error) {
|
||||
table, err = req.Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bqTableToMetadata(table), nil
|
||||
}
|
||||
|
||||
func (s *bigqueryService) deleteTable(ctx context.Context, projectID, datasetID, tableID string) error {
|
||||
req := s.s.Tables.Delete(projectID, datasetID, tableID).Context(ctx)
|
||||
setClientHeader(req.Header())
|
||||
return runWithRetry(ctx, func() error { return req.Do() })
|
||||
}
|
||||
|
||||
func bqTableToMetadata(t *bq.Table) *TableMetadata {
|
||||
md := &TableMetadata{
|
||||
Description: t.Description,
|
||||
Name: t.FriendlyName,
|
||||
Type: TableType(t.Type),
|
||||
ID: t.Id,
|
||||
NumBytes: t.NumBytes,
|
||||
NumRows: t.NumRows,
|
||||
ExpirationTime: unixMillisToTime(t.ExpirationTime),
|
||||
CreationTime: unixMillisToTime(t.CreationTime),
|
||||
LastModifiedTime: unixMillisToTime(int64(t.LastModifiedTime)),
|
||||
}
|
||||
if t.Schema != nil {
|
||||
md.Schema = convertTableSchema(t.Schema)
|
||||
}
|
||||
if t.View != nil {
|
||||
md.View = t.View.Query
|
||||
}
|
||||
if t.TimePartitioning != nil {
|
||||
md.TimePartitioning = &TimePartitioning{
|
||||
Expiration: time.Duration(t.TimePartitioning.ExpirationMs) * time.Millisecond,
|
||||
}
|
||||
}
|
||||
if t.StreamingBuffer != nil {
|
||||
md.StreamingBuffer = &StreamingBuffer{
|
||||
EstimatedBytes: t.StreamingBuffer.EstimatedBytes,
|
||||
EstimatedRows: t.StreamingBuffer.EstimatedRows,
|
||||
OldestEntryTime: unixMillisToTime(int64(t.StreamingBuffer.OldestEntryTime)),
|
||||
}
|
||||
}
|
||||
return md
|
||||
}
|
||||
|
||||
func bqDatasetToMetadata(d *bq.Dataset) *DatasetMetadata {
|
||||
/// TODO(jba): access
|
||||
return &DatasetMetadata{
|
||||
CreationTime: unixMillisToTime(d.CreationTime),
|
||||
LastModifiedTime: unixMillisToTime(d.LastModifiedTime),
|
||||
DefaultTableExpiration: time.Duration(d.DefaultTableExpirationMs) * time.Millisecond,
|
||||
Description: d.Description,
|
||||
Name: d.FriendlyName,
|
||||
ID: d.Id,
|
||||
Location: d.Location,
|
||||
Labels: d.Labels,
|
||||
}
|
||||
}
|
||||
|
||||
// Convert a number of milliseconds since the Unix epoch to a time.Time.
|
||||
// Treat an input of zero specially: convert it to the zero time,
|
||||
// rather than the start of the epoch.
|
||||
func unixMillisToTime(m int64) time.Time {
|
||||
if m == 0 {
|
||||
return time.Time{}
|
||||
}
|
||||
return time.Unix(0, m*1e6)
|
||||
}
|
||||
|
||||
func convertTableReference(tr *bq.TableReference) *Table {
|
||||
return &Table{
|
||||
ProjectID: tr.ProjectId,
|
||||
DatasetID: tr.DatasetId,
|
||||
TableID: tr.TableId,
|
||||
}
|
||||
}
|
||||
|
||||
// patchTableConf contains fields to be patched.
|
||||
type patchTableConf struct {
|
||||
// These fields are omitted from the patch operation if nil.
|
||||
Description *string
|
||||
Name *string
|
||||
Schema Schema
|
||||
}
|
||||
|
||||
func (s *bigqueryService) patchTable(ctx context.Context, projectID, datasetID, tableID string, conf *patchTableConf) (*TableMetadata, error) {
|
||||
t := &bq.Table{}
|
||||
forceSend := func(field string) {
|
||||
t.ForceSendFields = append(t.ForceSendFields, field)
|
||||
}
|
||||
|
||||
if conf.Description != nil {
|
||||
t.Description = *conf.Description
|
||||
forceSend("Description")
|
||||
}
|
||||
if conf.Name != nil {
|
||||
t.FriendlyName = *conf.Name
|
||||
forceSend("FriendlyName")
|
||||
}
|
||||
if conf.Schema != nil {
|
||||
t.Schema = conf.Schema.asTableSchema()
|
||||
forceSend("Schema")
|
||||
}
|
||||
table, err := s.s.Tables.Patch(projectID, datasetID, tableID, t).
|
||||
Context(ctx).
|
||||
Do()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bqTableToMetadata(table), nil
|
||||
}
|
||||
|
||||
func (s *bigqueryService) insertDataset(ctx context.Context, datasetID, projectID string) error {
|
||||
// TODO(jba): retry?
|
||||
ds := &bq.Dataset{
|
||||
DatasetReference: &bq.DatasetReference{DatasetId: datasetID},
|
||||
}
|
||||
req := s.s.Datasets.Insert(projectID, ds).Context(ctx)
|
||||
setClientHeader(req.Header())
|
||||
_, err := req.Do()
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *bigqueryService) deleteDataset(ctx context.Context, datasetID, projectID string) error {
|
||||
req := s.s.Datasets.Delete(projectID, datasetID).Context(ctx)
|
||||
setClientHeader(req.Header())
|
||||
return runWithRetry(ctx, func() error { return req.Do() })
|
||||
}
|
||||
|
||||
func (s *bigqueryService) getDatasetMetadata(ctx context.Context, projectID, datasetID string) (*DatasetMetadata, error) {
|
||||
req := s.s.Datasets.Get(projectID, datasetID).Context(ctx)
|
||||
setClientHeader(req.Header())
|
||||
var ds *bq.Dataset
|
||||
if err := runWithRetry(ctx, func() (err error) {
|
||||
ds, err = req.Do()
|
||||
return err
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bqDatasetToMetadata(ds), nil
|
||||
}
|
||||
|
||||
func (s *bigqueryService) listDatasets(ctx context.Context, projectID string, maxResults int, pageToken string, all bool, filter string) ([]*Dataset, string, error) {
|
||||
req := s.s.Datasets.List(projectID).
|
||||
Context(ctx).
|
||||
PageToken(pageToken).
|
||||
All(all)
|
||||
setClientHeader(req.Header())
|
||||
if maxResults > 0 {
|
||||
req.MaxResults(int64(maxResults))
|
||||
}
|
||||
if filter != "" {
|
||||
req.Filter(filter)
|
||||
}
|
||||
var res *bq.DatasetList
|
||||
err := runWithRetry(ctx, func() (err error) {
|
||||
res, err = req.Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
var datasets []*Dataset
|
||||
for _, d := range res.Datasets {
|
||||
datasets = append(datasets, s.convertListedDataset(d))
|
||||
}
|
||||
return datasets, res.NextPageToken, nil
|
||||
}
|
||||
|
||||
func (s *bigqueryService) convertListedDataset(d *bq.DatasetListDatasets) *Dataset {
|
||||
return &Dataset{
|
||||
ProjectID: d.DatasetReference.ProjectId,
|
||||
DatasetID: d.DatasetReference.DatasetId,
|
||||
}
|
||||
}
|
||||
|
||||
// runWithRetry calls the function until it returns nil or a non-retryable error, or
|
||||
// the context is done.
|
||||
// See the similar function in ../storage/invoke.go. The main difference is the
|
||||
// reason for retrying.
|
||||
func runWithRetry(ctx context.Context, call func() error) error {
|
||||
backoff := gax.Backoff{
|
||||
Initial: 2 * time.Second,
|
||||
Max: 32 * time.Second,
|
||||
Multiplier: 2,
|
||||
}
|
||||
return internal.Retry(ctx, backoff, func() (stop bool, err error) {
|
||||
err = call()
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
return !retryableError(err), err
|
||||
})
|
||||
}
|
||||
|
||||
// Use the criteria in https://cloud.google.com/bigquery/troubleshooting-errors.
|
||||
func retryableError(err error) bool {
|
||||
e, ok := err.(*googleapi.Error)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
var reason string
|
||||
if len(e.Errors) > 0 {
|
||||
reason = e.Errors[0].Reason
|
||||
}
|
||||
return reason == "backendError" && (e.Code == 500 || e.Code == 503)
|
||||
}
|
84
vendor/cloud.google.com/go/bigquery/service_test.go
generated
vendored
Normal file
84
vendor/cloud.google.com/go/bigquery/service_test.go
generated
vendored
Normal file
@@ -0,0 +1,84 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
func TestBQTableToMetadata(t *testing.T) {
|
||||
aTime := time.Date(2017, 1, 26, 0, 0, 0, 0, time.Local)
|
||||
aTimeMillis := aTime.UnixNano() / 1e6
|
||||
for _, test := range []struct {
|
||||
in *bq.Table
|
||||
want *TableMetadata
|
||||
}{
|
||||
{&bq.Table{}, &TableMetadata{}}, // test minimal case
|
||||
{
|
||||
&bq.Table{
|
||||
CreationTime: aTimeMillis,
|
||||
Description: "desc",
|
||||
Etag: "etag",
|
||||
ExpirationTime: aTimeMillis,
|
||||
FriendlyName: "fname",
|
||||
Id: "id",
|
||||
LastModifiedTime: uint64(aTimeMillis),
|
||||
Location: "loc",
|
||||
NumBytes: 123,
|
||||
NumLongTermBytes: 23,
|
||||
NumRows: 7,
|
||||
StreamingBuffer: &bq.Streamingbuffer{
|
||||
EstimatedBytes: 11,
|
||||
EstimatedRows: 3,
|
||||
OldestEntryTime: uint64(aTimeMillis),
|
||||
},
|
||||
TimePartitioning: &bq.TimePartitioning{
|
||||
ExpirationMs: 7890,
|
||||
Type: "DAY",
|
||||
},
|
||||
Type: "EXTERNAL",
|
||||
View: &bq.ViewDefinition{Query: "view-query"},
|
||||
},
|
||||
&TableMetadata{
|
||||
Description: "desc",
|
||||
Name: "fname",
|
||||
View: "view-query",
|
||||
ID: "id",
|
||||
Type: ExternalTable,
|
||||
ExpirationTime: aTime.Truncate(time.Millisecond),
|
||||
CreationTime: aTime.Truncate(time.Millisecond),
|
||||
LastModifiedTime: aTime.Truncate(time.Millisecond),
|
||||
NumBytes: 123,
|
||||
NumRows: 7,
|
||||
TimePartitioning: &TimePartitioning{Expiration: time.Duration(7890) * time.Millisecond},
|
||||
StreamingBuffer: &StreamingBuffer{
|
||||
EstimatedBytes: 11,
|
||||
EstimatedRows: 3,
|
||||
OldestEntryTime: aTime,
|
||||
},
|
||||
},
|
||||
},
|
||||
} {
|
||||
got := bqTableToMetadata(test.in)
|
||||
if !testutil.Equal(got, test.want) {
|
||||
t.Errorf("%v:\ngot %+v\nwant %+v", test.in, got, test.want)
|
||||
}
|
||||
}
|
||||
}
|
253
vendor/cloud.google.com/go/bigquery/table.go
generated
vendored
Normal file
253
vendor/cloud.google.com/go/bigquery/table.go
generated
vendored
Normal file
@@ -0,0 +1,253 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"cloud.google.com/go/internal/optional"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
// A Table is a reference to a BigQuery table.
|
||||
type Table struct {
|
||||
// ProjectID, DatasetID and TableID may be omitted if the Table is the destination for a query.
|
||||
// In this case the result will be stored in an ephemeral table.
|
||||
ProjectID string
|
||||
DatasetID string
|
||||
// TableID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_).
|
||||
// The maximum length is 1,024 characters.
|
||||
TableID string
|
||||
|
||||
c *Client
|
||||
}
|
||||
|
||||
// TableMetadata contains information about a BigQuery table.
|
||||
type TableMetadata struct {
|
||||
Description string // The user-friendly description of this table.
|
||||
Name string // The user-friendly name for this table.
|
||||
Schema Schema
|
||||
View string
|
||||
|
||||
ID string // An opaque ID uniquely identifying the table.
|
||||
Type TableType
|
||||
|
||||
// The time when this table expires. If not set, the table will persist
|
||||
// indefinitely. Expired tables will be deleted and their storage reclaimed.
|
||||
ExpirationTime time.Time
|
||||
|
||||
CreationTime time.Time
|
||||
LastModifiedTime time.Time
|
||||
|
||||
// The size of the table in bytes.
|
||||
// This does not include data that is being buffered during a streaming insert.
|
||||
NumBytes int64
|
||||
|
||||
// The number of rows of data in this table.
|
||||
// This does not include data that is being buffered during a streaming insert.
|
||||
NumRows uint64
|
||||
|
||||
// The time-based partitioning settings for this table.
|
||||
TimePartitioning *TimePartitioning
|
||||
|
||||
// Contains information regarding this table's streaming buffer, if one is
|
||||
// present. This field will be nil if the table is not being streamed to or if
|
||||
// there is no data in the streaming buffer.
|
||||
StreamingBuffer *StreamingBuffer
|
||||
}
|
||||
|
||||
// TableCreateDisposition specifies the circumstances under which destination table will be created.
|
||||
// Default is CreateIfNeeded.
|
||||
type TableCreateDisposition string
|
||||
|
||||
const (
|
||||
// CreateIfNeeded will create the table if it does not already exist.
|
||||
// Tables are created atomically on successful completion of a job.
|
||||
CreateIfNeeded TableCreateDisposition = "CREATE_IF_NEEDED"
|
||||
|
||||
// CreateNever ensures the table must already exist and will not be
|
||||
// automatically created.
|
||||
CreateNever TableCreateDisposition = "CREATE_NEVER"
|
||||
)
|
||||
|
||||
// TableWriteDisposition specifies how existing data in a destination table is treated.
|
||||
// Default is WriteAppend.
|
||||
type TableWriteDisposition string
|
||||
|
||||
const (
|
||||
// WriteAppend will append to any existing data in the destination table.
|
||||
// Data is appended atomically on successful completion of a job.
|
||||
WriteAppend TableWriteDisposition = "WRITE_APPEND"
|
||||
|
||||
// WriteTruncate overrides the existing data in the destination table.
|
||||
// Data is overwritten atomically on successful completion of a job.
|
||||
WriteTruncate TableWriteDisposition = "WRITE_TRUNCATE"
|
||||
|
||||
// WriteEmpty fails writes if the destination table already contains data.
|
||||
WriteEmpty TableWriteDisposition = "WRITE_EMPTY"
|
||||
)
|
||||
|
||||
// TableType is the type of table.
|
||||
type TableType string
|
||||
|
||||
const (
|
||||
RegularTable TableType = "TABLE"
|
||||
ViewTable TableType = "VIEW"
|
||||
ExternalTable TableType = "EXTERNAL"
|
||||
)
|
||||
|
||||
// StreamingBuffer holds information about the streaming buffer.
|
||||
type StreamingBuffer struct {
|
||||
// A lower-bound estimate of the number of bytes currently in the streaming
|
||||
// buffer.
|
||||
EstimatedBytes uint64
|
||||
|
||||
// A lower-bound estimate of the number of rows currently in the streaming
|
||||
// buffer.
|
||||
EstimatedRows uint64
|
||||
|
||||
// The time of the oldest entry in the streaming buffer.
|
||||
OldestEntryTime time.Time
|
||||
}
|
||||
|
||||
func (t *Table) tableRefProto() *bq.TableReference {
|
||||
return &bq.TableReference{
|
||||
ProjectId: t.ProjectID,
|
||||
DatasetId: t.DatasetID,
|
||||
TableId: t.TableID,
|
||||
}
|
||||
}
|
||||
|
||||
// FullyQualifiedName returns the ID of the table in projectID:datasetID.tableID format.
|
||||
func (t *Table) FullyQualifiedName() string {
|
||||
return fmt.Sprintf("%s:%s.%s", t.ProjectID, t.DatasetID, t.TableID)
|
||||
}
|
||||
|
||||
// implicitTable reports whether Table is an empty placeholder, which signifies that a new table should be created with an auto-generated Table ID.
|
||||
func (t *Table) implicitTable() bool {
|
||||
return t.ProjectID == "" && t.DatasetID == "" && t.TableID == ""
|
||||
}
|
||||
|
||||
// Create creates a table in the BigQuery service.
|
||||
func (t *Table) Create(ctx context.Context, options ...CreateTableOption) error {
|
||||
conf := &createTableConf{
|
||||
projectID: t.ProjectID,
|
||||
datasetID: t.DatasetID,
|
||||
tableID: t.TableID,
|
||||
}
|
||||
for _, o := range options {
|
||||
o.customizeCreateTable(conf)
|
||||
}
|
||||
return t.c.service.createTable(ctx, conf)
|
||||
}
|
||||
|
||||
// Metadata fetches the metadata for the table.
|
||||
func (t *Table) Metadata(ctx context.Context) (*TableMetadata, error) {
|
||||
return t.c.service.getTableMetadata(ctx, t.ProjectID, t.DatasetID, t.TableID)
|
||||
}
|
||||
|
||||
// Delete deletes the table.
|
||||
func (t *Table) Delete(ctx context.Context) error {
|
||||
return t.c.service.deleteTable(ctx, t.ProjectID, t.DatasetID, t.TableID)
|
||||
}
|
||||
|
||||
// A CreateTableOption is an optional argument to CreateTable.
|
||||
type CreateTableOption interface {
|
||||
customizeCreateTable(*createTableConf)
|
||||
}
|
||||
|
||||
type tableExpiration time.Time
|
||||
|
||||
// TableExpiration returns a CreateTableOption that will cause the created table to be deleted after the expiration time.
|
||||
func TableExpiration(exp time.Time) CreateTableOption { return tableExpiration(exp) }
|
||||
|
||||
func (opt tableExpiration) customizeCreateTable(conf *createTableConf) {
|
||||
conf.expiration = time.Time(opt)
|
||||
}
|
||||
|
||||
type viewQuery string
|
||||
|
||||
// ViewQuery returns a CreateTableOption that causes the created table to be a virtual table defined by the supplied query.
|
||||
// For more information see: https://cloud.google.com/bigquery/querying-data#views
|
||||
func ViewQuery(query string) CreateTableOption { return viewQuery(query) }
|
||||
|
||||
func (opt viewQuery) customizeCreateTable(conf *createTableConf) {
|
||||
conf.viewQuery = string(opt)
|
||||
}
|
||||
|
||||
type useStandardSQL struct{}
|
||||
|
||||
// UseStandardSQL returns a CreateTableOption to set the table to use standard SQL.
|
||||
// The default setting is false (using legacy SQL).
|
||||
func UseStandardSQL() CreateTableOption { return useStandardSQL{} }
|
||||
|
||||
func (opt useStandardSQL) customizeCreateTable(conf *createTableConf) {
|
||||
conf.useStandardSQL = true
|
||||
}
|
||||
|
||||
// TimePartitioning is a CreateTableOption that can be used to set time-based
|
||||
// date partitioning on a table.
|
||||
// For more information see: https://cloud.google.com/bigquery/docs/creating-partitioned-tables
|
||||
type TimePartitioning struct {
|
||||
// (Optional) The amount of time to keep the storage for a partition.
|
||||
// If the duration is empty (0), the data in the partitions do not expire.
|
||||
Expiration time.Duration
|
||||
}
|
||||
|
||||
func (opt TimePartitioning) customizeCreateTable(conf *createTableConf) {
|
||||
conf.timePartitioning = &opt
|
||||
}
|
||||
|
||||
// Read fetches the contents of the table.
|
||||
func (t *Table) Read(ctx context.Context) *RowIterator {
|
||||
return newRowIterator(ctx, t.c.service, &readTableConf{
|
||||
projectID: t.ProjectID,
|
||||
datasetID: t.DatasetID,
|
||||
tableID: t.TableID,
|
||||
})
|
||||
}
|
||||
|
||||
// Update modifies specific Table metadata fields.
|
||||
func (t *Table) Update(ctx context.Context, tm TableMetadataToUpdate) (*TableMetadata, error) {
|
||||
var conf patchTableConf
|
||||
if tm.Description != nil {
|
||||
s := optional.ToString(tm.Description)
|
||||
conf.Description = &s
|
||||
}
|
||||
if tm.Name != nil {
|
||||
s := optional.ToString(tm.Name)
|
||||
conf.Name = &s
|
||||
}
|
||||
conf.Schema = tm.Schema
|
||||
return t.c.service.patchTable(ctx, t.ProjectID, t.DatasetID, t.TableID, &conf)
|
||||
}
|
||||
|
||||
// TableMetadataToUpdate is used when updating a table's metadata.
|
||||
// Only non-nil fields will be updated.
|
||||
type TableMetadataToUpdate struct {
|
||||
// Description is the user-friendly description of this table.
|
||||
Description optional.String
|
||||
|
||||
// Name is the user-friendly name for this table.
|
||||
Name optional.String
|
||||
|
||||
// Schema is the table's schema.
|
||||
// When updating a schema, you can add columns but not remove them.
|
||||
Schema Schema
|
||||
// TODO(jba): support updating the view
|
||||
}
|
177
vendor/cloud.google.com/go/bigquery/uploader.go
generated
vendored
Normal file
177
vendor/cloud.google.com/go/bigquery/uploader.go
generated
vendored
Normal file
@@ -0,0 +1,177 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// An Uploader does streaming inserts into a BigQuery table.
|
||||
// It is safe for concurrent use.
|
||||
type Uploader struct {
|
||||
t *Table
|
||||
|
||||
// SkipInvalidRows causes rows containing invalid data to be silently
|
||||
// ignored. The default value is false, which causes the entire request to
|
||||
// fail if there is an attempt to insert an invalid row.
|
||||
SkipInvalidRows bool
|
||||
|
||||
// IgnoreUnknownValues causes values not matching the schema to be ignored.
|
||||
// The default value is false, which causes records containing such values
|
||||
// to be treated as invalid records.
|
||||
IgnoreUnknownValues bool
|
||||
|
||||
// A TableTemplateSuffix allows Uploaders to create tables automatically.
|
||||
//
|
||||
// Experimental: this option is experimental and may be modified or removed in future versions,
|
||||
// regardless of any other documented package stability guarantees.
|
||||
//
|
||||
// When you specify a suffix, the table you upload data to
|
||||
// will be used as a template for creating a new table, with the same schema,
|
||||
// called <table> + <suffix>.
|
||||
//
|
||||
// More information is available at
|
||||
// https://cloud.google.com/bigquery/streaming-data-into-bigquery#template-tables
|
||||
TableTemplateSuffix string
|
||||
}
|
||||
|
||||
// Uploader returns an Uploader that can be used to append rows to t.
|
||||
// The returned Uploader may optionally be further configured before its Put method is called.
|
||||
func (t *Table) Uploader() *Uploader {
|
||||
return &Uploader{t: t}
|
||||
}
|
||||
|
||||
// Put uploads one or more rows to the BigQuery service.
|
||||
//
|
||||
// If src is ValueSaver, then its Save method is called to produce a row for uploading.
|
||||
//
|
||||
// If src is a struct or pointer to a struct, then a schema is inferred from it
|
||||
// and used to create a StructSaver. The InsertID of the StructSaver will be
|
||||
// empty.
|
||||
//
|
||||
// If src is a slice of ValueSavers, structs, or struct pointers, then each
|
||||
// element of the slice is treated as above, and multiple rows are uploaded.
|
||||
//
|
||||
// Put returns a PutMultiError if one or more rows failed to be uploaded.
|
||||
// The PutMultiError contains a RowInsertionError for each failed row.
|
||||
//
|
||||
// Put will retry on temporary errors (see
|
||||
// https://cloud.google.com/bigquery/troubleshooting-errors). This can result
|
||||
// in duplicate rows if you do not use insert IDs. Also, if the error persists,
|
||||
// the call will run indefinitely. Pass a context with a timeout to prevent
|
||||
// hanging calls.
|
||||
func (u *Uploader) Put(ctx context.Context, src interface{}) error {
|
||||
savers, err := valueSavers(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return u.putMulti(ctx, savers)
|
||||
}
|
||||
|
||||
func valueSavers(src interface{}) ([]ValueSaver, error) {
|
||||
saver, ok, err := toValueSaver(src)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ok {
|
||||
return []ValueSaver{saver}, nil
|
||||
}
|
||||
srcVal := reflect.ValueOf(src)
|
||||
if srcVal.Kind() != reflect.Slice {
|
||||
return nil, fmt.Errorf("%T is not a ValueSaver, struct, struct pointer, or slice", src)
|
||||
|
||||
}
|
||||
var savers []ValueSaver
|
||||
for i := 0; i < srcVal.Len(); i++ {
|
||||
s := srcVal.Index(i).Interface()
|
||||
saver, ok, err := toValueSaver(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("src[%d] has type %T, which is not a ValueSaver, struct or struct pointer", i, s)
|
||||
}
|
||||
savers = append(savers, saver)
|
||||
}
|
||||
return savers, nil
|
||||
}
|
||||
|
||||
// Make a ValueSaver from x, which must implement ValueSaver already
|
||||
// or be a struct or pointer to struct.
|
||||
func toValueSaver(x interface{}) (ValueSaver, bool, error) {
|
||||
if _, ok := x.(StructSaver); ok {
|
||||
return nil, false, errors.New("bigquery: use &StructSaver, not StructSaver")
|
||||
}
|
||||
var insertID string
|
||||
// Handle StructSavers specially so we can infer the schema if necessary.
|
||||
if ss, ok := x.(*StructSaver); ok && ss.Schema == nil {
|
||||
x = ss.Struct
|
||||
insertID = ss.InsertID
|
||||
// Fall through so we can infer the schema.
|
||||
}
|
||||
if saver, ok := x.(ValueSaver); ok {
|
||||
return saver, ok, nil
|
||||
}
|
||||
v := reflect.ValueOf(x)
|
||||
// Support Put with []interface{}
|
||||
if v.Kind() == reflect.Interface {
|
||||
v = v.Elem()
|
||||
}
|
||||
if v.Kind() == reflect.Ptr {
|
||||
v = v.Elem()
|
||||
}
|
||||
if v.Kind() != reflect.Struct {
|
||||
return nil, false, nil
|
||||
}
|
||||
schema, err := inferSchemaReflectCached(v.Type())
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
return &StructSaver{
|
||||
Struct: x,
|
||||
InsertID: insertID,
|
||||
Schema: schema,
|
||||
}, true, nil
|
||||
}
|
||||
|
||||
func (u *Uploader) putMulti(ctx context.Context, src []ValueSaver) error {
|
||||
var rows []*insertionRow
|
||||
for _, saver := range src {
|
||||
row, insertID, err := saver.Save()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rows = append(rows, &insertionRow{InsertID: insertID, Row: row})
|
||||
}
|
||||
|
||||
return u.t.c.service.insertRows(ctx, u.t.ProjectID, u.t.DatasetID, u.t.TableID, rows, &insertRowsConf{
|
||||
skipInvalidRows: u.SkipInvalidRows,
|
||||
ignoreUnknownValues: u.IgnoreUnknownValues,
|
||||
templateSuffix: u.TableTemplateSuffix,
|
||||
})
|
||||
}
|
||||
|
||||
// An insertionRow represents a row of data to be inserted into a table.
|
||||
type insertionRow struct {
|
||||
// If InsertID is non-empty, BigQuery will use it to de-duplicate insertions of
|
||||
// this row on a best-effort basis.
|
||||
InsertID string
|
||||
// The data to be inserted, represented as a map from field name to Value.
|
||||
Row map[string]Value
|
||||
}
|
281
vendor/cloud.google.com/go/bigquery/uploader_test.go
generated
vendored
Normal file
281
vendor/cloud.google.com/go/bigquery/uploader_test.go
generated
vendored
Normal file
@@ -0,0 +1,281 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
|
||||
"cloud.google.com/go/internal/pretty"
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type testSaver struct {
|
||||
ir *insertionRow
|
||||
err error
|
||||
}
|
||||
|
||||
func (ts testSaver) Save() (map[string]Value, string, error) {
|
||||
return ts.ir.Row, ts.ir.InsertID, ts.err
|
||||
}
|
||||
|
||||
func TestRejectsNonValueSavers(t *testing.T) {
|
||||
client := &Client{projectID: "project-id"}
|
||||
u := Uploader{t: client.Dataset("dataset-id").Table("table-id")}
|
||||
inputs := []interface{}{
|
||||
1,
|
||||
[]int{1, 2},
|
||||
[]interface{}{
|
||||
testSaver{ir: &insertionRow{"a", map[string]Value{"one": 1}}},
|
||||
1,
|
||||
},
|
||||
StructSaver{},
|
||||
}
|
||||
for _, in := range inputs {
|
||||
if err := u.Put(context.Background(), in); err == nil {
|
||||
t.Errorf("put value: %v; got nil, want error", in)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type insertRowsRecorder struct {
|
||||
rowBatches [][]*insertionRow
|
||||
service
|
||||
}
|
||||
|
||||
func (irr *insertRowsRecorder) insertRows(ctx context.Context, projectID, datasetID, tableID string, rows []*insertionRow, conf *insertRowsConf) error {
|
||||
irr.rowBatches = append(irr.rowBatches, rows)
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestInsertsData(t *testing.T) {
|
||||
testCases := []struct {
|
||||
data [][]*insertionRow
|
||||
}{
|
||||
{
|
||||
data: [][]*insertionRow{
|
||||
{
|
||||
&insertionRow{"a", map[string]Value{"one": 1}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
|
||||
data: [][]*insertionRow{
|
||||
{
|
||||
&insertionRow{"a", map[string]Value{"one": 1}},
|
||||
&insertionRow{"b", map[string]Value{"two": 2}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
|
||||
data: [][]*insertionRow{
|
||||
{
|
||||
&insertionRow{"a", map[string]Value{"one": 1}},
|
||||
},
|
||||
{
|
||||
&insertionRow{"b", map[string]Value{"two": 2}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
|
||||
data: [][]*insertionRow{
|
||||
{
|
||||
&insertionRow{"a", map[string]Value{"one": 1}},
|
||||
&insertionRow{"b", map[string]Value{"two": 2}},
|
||||
},
|
||||
{
|
||||
&insertionRow{"c", map[string]Value{"three": 3}},
|
||||
&insertionRow{"d", map[string]Value{"four": 4}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
irr := &insertRowsRecorder{}
|
||||
client := &Client{
|
||||
projectID: "project-id",
|
||||
service: irr,
|
||||
}
|
||||
u := client.Dataset("dataset-id").Table("table-id").Uploader()
|
||||
for _, batch := range tc.data {
|
||||
if len(batch) == 0 {
|
||||
continue
|
||||
}
|
||||
var toUpload interface{}
|
||||
if len(batch) == 1 {
|
||||
toUpload = testSaver{ir: batch[0]}
|
||||
} else {
|
||||
savers := []testSaver{}
|
||||
for _, row := range batch {
|
||||
savers = append(savers, testSaver{ir: row})
|
||||
}
|
||||
toUpload = savers
|
||||
}
|
||||
|
||||
err := u.Put(context.Background(), toUpload)
|
||||
if err != nil {
|
||||
t.Errorf("expected successful Put of ValueSaver; got: %v", err)
|
||||
}
|
||||
}
|
||||
if got, want := irr.rowBatches, tc.data; !testutil.Equal(got, want) {
|
||||
t.Errorf("got: %v, want: %v", got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type uploadOptionRecorder struct {
|
||||
received *insertRowsConf
|
||||
service
|
||||
}
|
||||
|
||||
func (u *uploadOptionRecorder) insertRows(ctx context.Context, projectID, datasetID, tableID string, rows []*insertionRow, conf *insertRowsConf) error {
|
||||
u.received = conf
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestUploadOptionsPropagate(t *testing.T) {
|
||||
// we don't care for the data in this testcase.
|
||||
dummyData := testSaver{ir: &insertionRow{}}
|
||||
recorder := new(uploadOptionRecorder)
|
||||
c := &Client{service: recorder}
|
||||
table := &Table{
|
||||
ProjectID: "project-id",
|
||||
DatasetID: "dataset-id",
|
||||
TableID: "table-id",
|
||||
c: c,
|
||||
}
|
||||
|
||||
tests := [...]struct {
|
||||
ul *Uploader
|
||||
conf insertRowsConf
|
||||
}{
|
||||
{
|
||||
// test zero options lead to zero value for insertRowsConf
|
||||
ul: table.Uploader(),
|
||||
},
|
||||
{
|
||||
ul: func() *Uploader {
|
||||
u := table.Uploader()
|
||||
u.TableTemplateSuffix = "suffix"
|
||||
return u
|
||||
}(),
|
||||
conf: insertRowsConf{
|
||||
templateSuffix: "suffix",
|
||||
},
|
||||
},
|
||||
{
|
||||
ul: func() *Uploader {
|
||||
u := table.Uploader()
|
||||
u.IgnoreUnknownValues = true
|
||||
return u
|
||||
}(),
|
||||
conf: insertRowsConf{
|
||||
ignoreUnknownValues: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
ul: func() *Uploader {
|
||||
u := table.Uploader()
|
||||
u.SkipInvalidRows = true
|
||||
return u
|
||||
}(),
|
||||
conf: insertRowsConf{
|
||||
skipInvalidRows: true,
|
||||
},
|
||||
},
|
||||
{ // multiple upload options combine
|
||||
ul: func() *Uploader {
|
||||
u := table.Uploader()
|
||||
u.TableTemplateSuffix = "suffix"
|
||||
u.IgnoreUnknownValues = true
|
||||
u.SkipInvalidRows = true
|
||||
return u
|
||||
}(),
|
||||
conf: insertRowsConf{
|
||||
templateSuffix: "suffix",
|
||||
skipInvalidRows: true,
|
||||
ignoreUnknownValues: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range tests {
|
||||
err := tc.ul.Put(context.Background(), dummyData)
|
||||
if err != nil {
|
||||
t.Fatalf("%d: expected successful Put of ValueSaver; got: %v", i, err)
|
||||
}
|
||||
|
||||
if recorder.received == nil {
|
||||
t.Fatalf("%d: received no options at all!", i)
|
||||
}
|
||||
|
||||
want := tc.conf
|
||||
got := *recorder.received
|
||||
if got != want {
|
||||
t.Errorf("%d: got %#v, want %#v, ul=%#v", i, got, want, tc.ul)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValueSavers(t *testing.T) {
|
||||
ts := &testSaver{ir: &insertionRow{}}
|
||||
type T struct{ I int }
|
||||
schema, err := InferSchema(T{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for _, test := range []struct {
|
||||
in interface{}
|
||||
want []ValueSaver
|
||||
}{
|
||||
{ts, []ValueSaver{ts}},
|
||||
{T{I: 1}, []ValueSaver{&StructSaver{Schema: schema, Struct: T{I: 1}}}},
|
||||
{[]ValueSaver{ts, ts}, []ValueSaver{ts, ts}},
|
||||
{[]interface{}{ts, ts}, []ValueSaver{ts, ts}},
|
||||
{[]T{{I: 1}, {I: 2}}, []ValueSaver{
|
||||
&StructSaver{Schema: schema, Struct: T{I: 1}},
|
||||
&StructSaver{Schema: schema, Struct: T{I: 2}},
|
||||
}},
|
||||
{[]interface{}{T{I: 1}, &T{I: 2}}, []ValueSaver{
|
||||
&StructSaver{Schema: schema, Struct: T{I: 1}},
|
||||
&StructSaver{Schema: schema, Struct: &T{I: 2}},
|
||||
}},
|
||||
{&StructSaver{Struct: T{I: 3}, InsertID: "foo"},
|
||||
[]ValueSaver{
|
||||
&StructSaver{Schema: schema, Struct: T{I: 3}, InsertID: "foo"},
|
||||
}},
|
||||
} {
|
||||
got, err := valueSavers(test.in)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !testutil.Equal(got, test.want, cmp.AllowUnexported(testSaver{})) {
|
||||
t.Errorf("%+v: got %v, want %v", test.in, pretty.Value(got), pretty.Value(test.want))
|
||||
}
|
||||
// Make sure Save is successful.
|
||||
for i, vs := range got {
|
||||
_, _, err := vs.Save()
|
||||
if err != nil {
|
||||
t.Fatalf("%+v, #%d: got error %v, want nil", test.in, i, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
47
vendor/cloud.google.com/go/bigquery/utils_test.go
generated
vendored
Normal file
47
vendor/cloud.google.com/go/bigquery/utils_test.go
generated
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
func defaultGCS() *GCSReference {
|
||||
return &GCSReference{
|
||||
uris: []string{"uri"},
|
||||
}
|
||||
}
|
||||
|
||||
var defaultQuery = &QueryConfig{
|
||||
Q: "query string",
|
||||
DefaultProjectID: "def-project-id",
|
||||
DefaultDatasetID: "def-dataset-id",
|
||||
}
|
||||
|
||||
type testService struct {
|
||||
*bq.Job
|
||||
|
||||
service
|
||||
}
|
||||
|
||||
func (s *testService) insertJob(ctx context.Context, projectID string, conf *insertJobConf) (*Job, error) {
|
||||
s.Job = conf.job
|
||||
return &Job{}, nil
|
||||
}
|
||||
|
||||
func (s *testService) jobStatus(ctx context.Context, projectID, jobID string) (*JobStatus, error) {
|
||||
return &JobStatus{State: Done}, nil
|
||||
}
|
657
vendor/cloud.google.com/go/bigquery/value.go
generated
vendored
Normal file
657
vendor/cloud.google.com/go/bigquery/value.go
generated
vendored
Normal file
@@ -0,0 +1,657 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/civil"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
// Value stores the contents of a single cell from a BigQuery result.
|
||||
type Value interface{}
|
||||
|
||||
// ValueLoader stores a slice of Values representing a result row from a Read operation.
|
||||
// See RowIterator.Next for more information.
|
||||
type ValueLoader interface {
|
||||
Load(v []Value, s Schema) error
|
||||
}
|
||||
|
||||
// valueList converts a []Value to implement ValueLoader.
|
||||
type valueList []Value
|
||||
|
||||
// Load stores a sequence of values in a valueList.
|
||||
// It resets the slice length to zero, then appends each value to it.
|
||||
func (vs *valueList) Load(v []Value, _ Schema) error {
|
||||
*vs = append((*vs)[:0], v...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// valueMap converts a map[string]Value to implement ValueLoader.
|
||||
type valueMap map[string]Value
|
||||
|
||||
// Load stores a sequence of values in a valueMap.
|
||||
func (vm *valueMap) Load(v []Value, s Schema) error {
|
||||
if *vm == nil {
|
||||
*vm = map[string]Value{}
|
||||
}
|
||||
loadMap(*vm, v, s)
|
||||
return nil
|
||||
}
|
||||
|
||||
func loadMap(m map[string]Value, vals []Value, s Schema) {
|
||||
for i, f := range s {
|
||||
val := vals[i]
|
||||
var v interface{}
|
||||
switch {
|
||||
case f.Schema == nil:
|
||||
v = val
|
||||
case !f.Repeated:
|
||||
m2 := map[string]Value{}
|
||||
loadMap(m2, val.([]Value), f.Schema)
|
||||
v = m2
|
||||
default: // repeated and nested
|
||||
sval := val.([]Value)
|
||||
vs := make([]Value, len(sval))
|
||||
for j, e := range sval {
|
||||
m2 := map[string]Value{}
|
||||
loadMap(m2, e.([]Value), f.Schema)
|
||||
vs[j] = m2
|
||||
}
|
||||
v = vs
|
||||
}
|
||||
m[f.Name] = v
|
||||
}
|
||||
}
|
||||
|
||||
type structLoader struct {
|
||||
typ reflect.Type // type of struct
|
||||
err error
|
||||
ops []structLoaderOp
|
||||
|
||||
vstructp reflect.Value // pointer to current struct value; changed by set
|
||||
}
|
||||
|
||||
// A setFunc is a function that sets a struct field or slice/array
|
||||
// element to a value.
|
||||
type setFunc func(v reflect.Value, val interface{}) error
|
||||
|
||||
// A structLoaderOp instructs the loader to set a struct field to a row value.
|
||||
type structLoaderOp struct {
|
||||
fieldIndex []int
|
||||
valueIndex int
|
||||
setFunc setFunc
|
||||
repeated bool
|
||||
}
|
||||
|
||||
var errNoNulls = errors.New("bigquery: NULL values cannot be read into structs")
|
||||
|
||||
func setAny(v reflect.Value, x interface{}) error {
|
||||
if x == nil {
|
||||
return errNoNulls
|
||||
}
|
||||
v.Set(reflect.ValueOf(x))
|
||||
return nil
|
||||
}
|
||||
|
||||
func setInt(v reflect.Value, x interface{}) error {
|
||||
if x == nil {
|
||||
return errNoNulls
|
||||
}
|
||||
xx := x.(int64)
|
||||
if v.OverflowInt(xx) {
|
||||
return fmt.Errorf("bigquery: value %v overflows struct field of type %v", xx, v.Type())
|
||||
}
|
||||
v.SetInt(xx)
|
||||
return nil
|
||||
}
|
||||
|
||||
func setFloat(v reflect.Value, x interface{}) error {
|
||||
if x == nil {
|
||||
return errNoNulls
|
||||
}
|
||||
xx := x.(float64)
|
||||
if v.OverflowFloat(xx) {
|
||||
return fmt.Errorf("bigquery: value %v overflows struct field of type %v", xx, v.Type())
|
||||
}
|
||||
v.SetFloat(xx)
|
||||
return nil
|
||||
}
|
||||
|
||||
func setBool(v reflect.Value, x interface{}) error {
|
||||
if x == nil {
|
||||
return errNoNulls
|
||||
}
|
||||
v.SetBool(x.(bool))
|
||||
return nil
|
||||
}
|
||||
|
||||
func setString(v reflect.Value, x interface{}) error {
|
||||
if x == nil {
|
||||
return errNoNulls
|
||||
}
|
||||
v.SetString(x.(string))
|
||||
return nil
|
||||
}
|
||||
|
||||
func setBytes(v reflect.Value, x interface{}) error {
|
||||
if x == nil {
|
||||
return errNoNulls
|
||||
}
|
||||
v.SetBytes(x.([]byte))
|
||||
return nil
|
||||
}
|
||||
|
||||
// set remembers a value for the next call to Load. The value must be
|
||||
// a pointer to a struct. (This is checked in RowIterator.Next.)
|
||||
func (sl *structLoader) set(structp interface{}, schema Schema) error {
|
||||
if sl.err != nil {
|
||||
return sl.err
|
||||
}
|
||||
sl.vstructp = reflect.ValueOf(structp)
|
||||
typ := sl.vstructp.Type().Elem()
|
||||
if sl.typ == nil {
|
||||
// First call: remember the type and compile the schema.
|
||||
sl.typ = typ
|
||||
ops, err := compileToOps(typ, schema)
|
||||
if err != nil {
|
||||
sl.err = err
|
||||
return err
|
||||
}
|
||||
sl.ops = ops
|
||||
} else if sl.typ != typ {
|
||||
return fmt.Errorf("bigquery: struct type changed from %s to %s", sl.typ, typ)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// compileToOps produces a sequence of operations that will set the fields of a
|
||||
// value of structType to the contents of a row with schema.
|
||||
func compileToOps(structType reflect.Type, schema Schema) ([]structLoaderOp, error) {
|
||||
var ops []structLoaderOp
|
||||
fields, err := fieldCache.Fields(structType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for i, schemaField := range schema {
|
||||
// Look for an exported struct field with the same name as the schema
|
||||
// field, ignoring case (BigQuery column names are case-insensitive,
|
||||
// and we want to act like encoding/json anyway).
|
||||
structField := fields.Match(schemaField.Name)
|
||||
if structField == nil {
|
||||
// Ignore schema fields with no corresponding struct field.
|
||||
continue
|
||||
}
|
||||
op := structLoaderOp{
|
||||
fieldIndex: structField.Index,
|
||||
valueIndex: i,
|
||||
}
|
||||
t := structField.Type
|
||||
if schemaField.Repeated {
|
||||
if t.Kind() != reflect.Slice && t.Kind() != reflect.Array {
|
||||
return nil, fmt.Errorf("bigquery: repeated schema field %s requires slice or array, but struct field %s has type %s",
|
||||
schemaField.Name, structField.Name, t)
|
||||
}
|
||||
t = t.Elem()
|
||||
op.repeated = true
|
||||
}
|
||||
if schemaField.Type == RecordFieldType {
|
||||
// Field can be a struct or a pointer to a struct.
|
||||
if t.Kind() == reflect.Ptr {
|
||||
t = t.Elem()
|
||||
}
|
||||
if t.Kind() != reflect.Struct {
|
||||
return nil, fmt.Errorf("bigquery: field %s has type %s, expected struct or *struct",
|
||||
structField.Name, structField.Type)
|
||||
}
|
||||
nested, err := compileToOps(t, schemaField.Schema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
op.setFunc = func(v reflect.Value, val interface{}) error {
|
||||
return setNested(nested, v, val.([]Value))
|
||||
}
|
||||
} else {
|
||||
op.setFunc = determineSetFunc(t, schemaField.Type)
|
||||
if op.setFunc == nil {
|
||||
return nil, fmt.Errorf("bigquery: schema field %s of type %s is not assignable to struct field %s of type %s",
|
||||
schemaField.Name, schemaField.Type, structField.Name, t)
|
||||
}
|
||||
}
|
||||
ops = append(ops, op)
|
||||
}
|
||||
return ops, nil
|
||||
}
|
||||
|
||||
// determineSetFunc chooses the best function for setting a field of type ftype
|
||||
// to a value whose schema field type is sftype. It returns nil if stype
|
||||
// is not assignable to ftype.
|
||||
// determineSetFunc considers only basic types. See compileToOps for
|
||||
// handling of repetition and nesting.
|
||||
func determineSetFunc(ftype reflect.Type, stype FieldType) setFunc {
|
||||
switch stype {
|
||||
case StringFieldType:
|
||||
if ftype.Kind() == reflect.String {
|
||||
return setString
|
||||
}
|
||||
|
||||
case BytesFieldType:
|
||||
if ftype == typeOfByteSlice {
|
||||
return setBytes
|
||||
}
|
||||
|
||||
case IntegerFieldType:
|
||||
if isSupportedIntType(ftype) {
|
||||
return setInt
|
||||
}
|
||||
|
||||
case FloatFieldType:
|
||||
switch ftype.Kind() {
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return setFloat
|
||||
}
|
||||
|
||||
case BooleanFieldType:
|
||||
if ftype.Kind() == reflect.Bool {
|
||||
return setBool
|
||||
}
|
||||
|
||||
case TimestampFieldType:
|
||||
if ftype == typeOfGoTime {
|
||||
return setAny
|
||||
}
|
||||
|
||||
case DateFieldType:
|
||||
if ftype == typeOfDate {
|
||||
return setAny
|
||||
}
|
||||
|
||||
case TimeFieldType:
|
||||
if ftype == typeOfTime {
|
||||
return setAny
|
||||
}
|
||||
|
||||
case DateTimeFieldType:
|
||||
if ftype == typeOfDateTime {
|
||||
return setAny
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sl *structLoader) Load(values []Value, _ Schema) error {
|
||||
if sl.err != nil {
|
||||
return sl.err
|
||||
}
|
||||
return runOps(sl.ops, sl.vstructp.Elem(), values)
|
||||
}
|
||||
|
||||
// runOps executes a sequence of ops, setting the fields of vstruct to the
|
||||
// supplied values.
|
||||
func runOps(ops []structLoaderOp, vstruct reflect.Value, values []Value) error {
|
||||
for _, op := range ops {
|
||||
field := vstruct.FieldByIndex(op.fieldIndex)
|
||||
var err error
|
||||
if op.repeated {
|
||||
err = setRepeated(field, values[op.valueIndex].([]Value), op.setFunc)
|
||||
} else {
|
||||
err = op.setFunc(field, values[op.valueIndex])
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func setNested(ops []structLoaderOp, v reflect.Value, vals []Value) error {
|
||||
// v is either a struct or a pointer to a struct.
|
||||
if v.Kind() == reflect.Ptr {
|
||||
// If the pointer is nil, set it to a zero struct value.
|
||||
if v.IsNil() {
|
||||
v.Set(reflect.New(v.Type().Elem()))
|
||||
}
|
||||
v = v.Elem()
|
||||
}
|
||||
return runOps(ops, v, vals)
|
||||
}
|
||||
|
||||
func setRepeated(field reflect.Value, vslice []Value, setElem setFunc) error {
|
||||
vlen := len(vslice)
|
||||
var flen int
|
||||
switch field.Type().Kind() {
|
||||
case reflect.Slice:
|
||||
// Make a slice of the right size, avoiding allocation if possible.
|
||||
switch {
|
||||
case field.Len() < vlen:
|
||||
field.Set(reflect.MakeSlice(field.Type(), vlen, vlen))
|
||||
case field.Len() > vlen:
|
||||
field.SetLen(vlen)
|
||||
}
|
||||
flen = vlen
|
||||
|
||||
case reflect.Array:
|
||||
flen = field.Len()
|
||||
if flen > vlen {
|
||||
// Set extra elements to their zero value.
|
||||
z := reflect.Zero(field.Type().Elem())
|
||||
for i := vlen; i < flen; i++ {
|
||||
field.Index(i).Set(z)
|
||||
}
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("bigquery: impossible field type %s", field.Type())
|
||||
}
|
||||
for i, val := range vslice {
|
||||
if i < flen { // avoid writing past the end of a short array
|
||||
if err := setElem(field.Index(i), val); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// A ValueSaver returns a row of data to be inserted into a table.
|
||||
type ValueSaver interface {
|
||||
// Save returns a row to be inserted into a BigQuery table, represented
|
||||
// as a map from field name to Value.
|
||||
// If insertID is non-empty, BigQuery will use it to de-duplicate
|
||||
// insertions of this row on a best-effort basis.
|
||||
Save() (row map[string]Value, insertID string, err error)
|
||||
}
|
||||
|
||||
// ValuesSaver implements ValueSaver for a slice of Values.
|
||||
type ValuesSaver struct {
|
||||
Schema Schema
|
||||
|
||||
// If non-empty, BigQuery will use InsertID to de-duplicate insertions
|
||||
// of this row on a best-effort basis.
|
||||
InsertID string
|
||||
|
||||
Row []Value
|
||||
}
|
||||
|
||||
// Save implements ValueSaver.
|
||||
func (vls *ValuesSaver) Save() (map[string]Value, string, error) {
|
||||
m, err := valuesToMap(vls.Row, vls.Schema)
|
||||
return m, vls.InsertID, err
|
||||
}
|
||||
|
||||
func valuesToMap(vs []Value, schema Schema) (map[string]Value, error) {
|
||||
if len(vs) != len(schema) {
|
||||
return nil, errors.New("Schema does not match length of row to be inserted")
|
||||
}
|
||||
|
||||
m := make(map[string]Value)
|
||||
for i, fieldSchema := range schema {
|
||||
if fieldSchema.Type != RecordFieldType {
|
||||
m[fieldSchema.Name] = vs[i]
|
||||
continue
|
||||
}
|
||||
// Nested record, possibly repeated.
|
||||
vals, ok := vs[i].([]Value)
|
||||
if !ok {
|
||||
return nil, errors.New("nested record is not a []Value")
|
||||
}
|
||||
if !fieldSchema.Repeated {
|
||||
value, err := valuesToMap(vals, fieldSchema.Schema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m[fieldSchema.Name] = value
|
||||
continue
|
||||
}
|
||||
// A repeated nested field is converted into a slice of maps.
|
||||
var maps []Value
|
||||
for _, v := range vals {
|
||||
sv, ok := v.([]Value)
|
||||
if !ok {
|
||||
return nil, errors.New("nested record in slice is not a []Value")
|
||||
}
|
||||
value, err := valuesToMap(sv, fieldSchema.Schema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
maps = append(maps, value)
|
||||
}
|
||||
m[fieldSchema.Name] = maps
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// StructSaver implements ValueSaver for a struct.
|
||||
// The struct is converted to a map of values by using the values of struct
|
||||
// fields corresponding to schema fields. Additional and missing
|
||||
// fields are ignored, as are nested struct pointers that are nil.
|
||||
type StructSaver struct {
|
||||
// Schema determines what fields of the struct are uploaded. It should
|
||||
// match the table's schema.
|
||||
Schema Schema
|
||||
|
||||
// If non-empty, BigQuery will use InsertID to de-duplicate insertions
|
||||
// of this row on a best-effort basis.
|
||||
InsertID string
|
||||
|
||||
// Struct should be a struct or a pointer to a struct.
|
||||
Struct interface{}
|
||||
}
|
||||
|
||||
// Save implements ValueSaver.
|
||||
func (ss *StructSaver) Save() (row map[string]Value, insertID string, err error) {
|
||||
vstruct := reflect.ValueOf(ss.Struct)
|
||||
row, err = structToMap(vstruct, ss.Schema)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
return row, ss.InsertID, nil
|
||||
}
|
||||
|
||||
func structToMap(vstruct reflect.Value, schema Schema) (map[string]Value, error) {
|
||||
if vstruct.Kind() == reflect.Ptr {
|
||||
vstruct = vstruct.Elem()
|
||||
}
|
||||
if !vstruct.IsValid() {
|
||||
return nil, nil
|
||||
}
|
||||
m := map[string]Value{}
|
||||
if vstruct.Kind() != reflect.Struct {
|
||||
return nil, fmt.Errorf("bigquery: type is %s, need struct or struct pointer", vstruct.Type())
|
||||
}
|
||||
fields, err := fieldCache.Fields(vstruct.Type())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, schemaField := range schema {
|
||||
// Look for an exported struct field with the same name as the schema
|
||||
// field, ignoring case.
|
||||
structField := fields.Match(schemaField.Name)
|
||||
if structField == nil {
|
||||
continue
|
||||
}
|
||||
val, err := structFieldToUploadValue(vstruct.FieldByIndex(structField.Index), schemaField)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Add the value to the map, unless it is nil.
|
||||
if val != nil {
|
||||
m[schemaField.Name] = val
|
||||
}
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// structFieldToUploadValue converts a struct field to a value suitable for ValueSaver.Save, using
|
||||
// the schemaField as a guide.
|
||||
// structFieldToUploadValue is careful to return a true nil interface{} when needed, so its
|
||||
// caller can easily identify a nil value.
|
||||
func structFieldToUploadValue(vfield reflect.Value, schemaField *FieldSchema) (interface{}, error) {
|
||||
if schemaField.Repeated && (vfield.Kind() != reflect.Slice && vfield.Kind() != reflect.Array) {
|
||||
return nil, fmt.Errorf("bigquery: repeated schema field %s requires slice or array, but value has type %s",
|
||||
schemaField.Name, vfield.Type())
|
||||
}
|
||||
|
||||
// A non-nested field can be represented by its Go value.
|
||||
if schemaField.Type != RecordFieldType {
|
||||
if !schemaField.Repeated || vfield.Len() > 0 {
|
||||
return vfield.Interface(), nil
|
||||
}
|
||||
// The service treats a null repeated field as an error. Return
|
||||
// nil to omit the field entirely.
|
||||
return nil, nil
|
||||
}
|
||||
// A non-repeated nested field is converted into a map[string]Value.
|
||||
if !schemaField.Repeated {
|
||||
m, err := structToMap(vfield, schemaField.Schema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if m == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
// A repeated nested field is converted into a slice of maps.
|
||||
if vfield.Len() == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
var vals []Value
|
||||
for i := 0; i < vfield.Len(); i++ {
|
||||
m, err := structToMap(vfield.Index(i), schemaField.Schema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vals = append(vals, m)
|
||||
}
|
||||
return vals, nil
|
||||
}
|
||||
|
||||
// convertRows converts a series of TableRows into a series of Value slices.
|
||||
// schema is used to interpret the data from rows; its length must match the
|
||||
// length of each row.
|
||||
func convertRows(rows []*bq.TableRow, schema Schema) ([][]Value, error) {
|
||||
var rs [][]Value
|
||||
for _, r := range rows {
|
||||
row, err := convertRow(r, schema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rs = append(rs, row)
|
||||
}
|
||||
return rs, nil
|
||||
}
|
||||
|
||||
func convertRow(r *bq.TableRow, schema Schema) ([]Value, error) {
|
||||
if len(schema) != len(r.F) {
|
||||
return nil, errors.New("schema length does not match row length")
|
||||
}
|
||||
var values []Value
|
||||
for i, cell := range r.F {
|
||||
fs := schema[i]
|
||||
v, err := convertValue(cell.V, fs.Type, fs.Schema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
values = append(values, v)
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
func convertValue(val interface{}, typ FieldType, schema Schema) (Value, error) {
|
||||
switch val := val.(type) {
|
||||
case nil:
|
||||
return nil, nil
|
||||
case []interface{}:
|
||||
return convertRepeatedRecord(val, typ, schema)
|
||||
case map[string]interface{}:
|
||||
return convertNestedRecord(val, schema)
|
||||
case string:
|
||||
return convertBasicType(val, typ)
|
||||
default:
|
||||
return nil, fmt.Errorf("got value %v; expected a value of type %s", val, typ)
|
||||
}
|
||||
}
|
||||
|
||||
func convertRepeatedRecord(vals []interface{}, typ FieldType, schema Schema) (Value, error) {
|
||||
var values []Value
|
||||
for _, cell := range vals {
|
||||
// each cell contains a single entry, keyed by "v"
|
||||
val := cell.(map[string]interface{})["v"]
|
||||
v, err := convertValue(val, typ, schema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
values = append(values, v)
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
func convertNestedRecord(val map[string]interface{}, schema Schema) (Value, error) {
|
||||
// convertNestedRecord is similar to convertRow, as a record has the same structure as a row.
|
||||
|
||||
// Nested records are wrapped in a map with a single key, "f".
|
||||
record := val["f"].([]interface{})
|
||||
if len(record) != len(schema) {
|
||||
return nil, errors.New("schema length does not match record length")
|
||||
}
|
||||
|
||||
var values []Value
|
||||
for i, cell := range record {
|
||||
// each cell contains a single entry, keyed by "v"
|
||||
val := cell.(map[string]interface{})["v"]
|
||||
|
||||
fs := schema[i]
|
||||
v, err := convertValue(val, fs.Type, fs.Schema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
values = append(values, v)
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
// convertBasicType returns val as an interface with a concrete type specified by typ.
|
||||
func convertBasicType(val string, typ FieldType) (Value, error) {
|
||||
switch typ {
|
||||
case StringFieldType:
|
||||
return val, nil
|
||||
case BytesFieldType:
|
||||
return base64.StdEncoding.DecodeString(val)
|
||||
case IntegerFieldType:
|
||||
return strconv.ParseInt(val, 10, 64)
|
||||
case FloatFieldType:
|
||||
return strconv.ParseFloat(val, 64)
|
||||
case BooleanFieldType:
|
||||
return strconv.ParseBool(val)
|
||||
case TimestampFieldType:
|
||||
f, err := strconv.ParseFloat(val, 64)
|
||||
return Value(time.Unix(0, int64(f*1e9)).UTC()), err
|
||||
case DateFieldType:
|
||||
return civil.ParseDate(val)
|
||||
case TimeFieldType:
|
||||
return civil.ParseTime(val)
|
||||
case DateTimeFieldType:
|
||||
return civil.ParseDateTime(val)
|
||||
default:
|
||||
return nil, fmt.Errorf("unrecognized type: %s", typ)
|
||||
}
|
||||
}
|
887
vendor/cloud.google.com/go/bigquery/value_test.go
generated
vendored
Normal file
887
vendor/cloud.google.com/go/bigquery/value_test.go
generated
vendored
Normal file
@@ -0,0 +1,887 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"math"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
|
||||
"cloud.google.com/go/civil"
|
||||
"cloud.google.com/go/internal/pretty"
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
func TestConvertBasicValues(t *testing.T) {
|
||||
schema := []*FieldSchema{
|
||||
{Type: StringFieldType},
|
||||
{Type: IntegerFieldType},
|
||||
{Type: FloatFieldType},
|
||||
{Type: BooleanFieldType},
|
||||
{Type: BytesFieldType},
|
||||
}
|
||||
row := &bq.TableRow{
|
||||
F: []*bq.TableCell{
|
||||
{V: "a"},
|
||||
{V: "1"},
|
||||
{V: "1.2"},
|
||||
{V: "true"},
|
||||
{V: base64.StdEncoding.EncodeToString([]byte("foo"))},
|
||||
},
|
||||
}
|
||||
got, err := convertRow(row, schema)
|
||||
if err != nil {
|
||||
t.Fatalf("error converting: %v", err)
|
||||
}
|
||||
want := []Value{"a", int64(1), 1.2, true, []byte("foo")}
|
||||
if !testutil.Equal(got, want) {
|
||||
t.Errorf("converting basic values: got:\n%v\nwant:\n%v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConvertTime(t *testing.T) {
|
||||
// TODO(jba): add tests for civil time types.
|
||||
schema := []*FieldSchema{
|
||||
{Type: TimestampFieldType},
|
||||
}
|
||||
thyme := time.Date(1970, 1, 1, 10, 0, 0, 10, time.UTC)
|
||||
row := &bq.TableRow{
|
||||
F: []*bq.TableCell{
|
||||
{V: fmt.Sprintf("%.10f", float64(thyme.UnixNano())/1e9)},
|
||||
},
|
||||
}
|
||||
got, err := convertRow(row, schema)
|
||||
if err != nil {
|
||||
t.Fatalf("error converting: %v", err)
|
||||
}
|
||||
if !got[0].(time.Time).Equal(thyme) {
|
||||
t.Errorf("converting basic values: got:\n%v\nwant:\n%v", got, thyme)
|
||||
}
|
||||
if got[0].(time.Time).Location() != time.UTC {
|
||||
t.Errorf("expected time zone UTC: got:\n%v", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConvertNullValues(t *testing.T) {
|
||||
schema := []*FieldSchema{
|
||||
{Type: StringFieldType},
|
||||
}
|
||||
row := &bq.TableRow{
|
||||
F: []*bq.TableCell{
|
||||
{V: nil},
|
||||
},
|
||||
}
|
||||
got, err := convertRow(row, schema)
|
||||
if err != nil {
|
||||
t.Fatalf("error converting: %v", err)
|
||||
}
|
||||
want := []Value{nil}
|
||||
if !testutil.Equal(got, want) {
|
||||
t.Errorf("converting null values: got:\n%v\nwant:\n%v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBasicRepetition(t *testing.T) {
|
||||
schema := []*FieldSchema{
|
||||
{Type: IntegerFieldType, Repeated: true},
|
||||
}
|
||||
row := &bq.TableRow{
|
||||
F: []*bq.TableCell{
|
||||
{
|
||||
V: []interface{}{
|
||||
map[string]interface{}{
|
||||
"v": "1",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"v": "2",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"v": "3",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
got, err := convertRow(row, schema)
|
||||
if err != nil {
|
||||
t.Fatalf("error converting: %v", err)
|
||||
}
|
||||
want := []Value{[]Value{int64(1), int64(2), int64(3)}}
|
||||
if !testutil.Equal(got, want) {
|
||||
t.Errorf("converting basic repeated values: got:\n%v\nwant:\n%v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNestedRecordContainingRepetition(t *testing.T) {
|
||||
schema := []*FieldSchema{
|
||||
{
|
||||
Type: RecordFieldType,
|
||||
Schema: Schema{
|
||||
{Type: IntegerFieldType, Repeated: true},
|
||||
},
|
||||
},
|
||||
}
|
||||
row := &bq.TableRow{
|
||||
F: []*bq.TableCell{
|
||||
{
|
||||
V: map[string]interface{}{
|
||||
"f": []interface{}{
|
||||
map[string]interface{}{
|
||||
"v": []interface{}{
|
||||
map[string]interface{}{"v": "1"},
|
||||
map[string]interface{}{"v": "2"},
|
||||
map[string]interface{}{"v": "3"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
got, err := convertRow(row, schema)
|
||||
if err != nil {
|
||||
t.Fatalf("error converting: %v", err)
|
||||
}
|
||||
want := []Value{[]Value{[]Value{int64(1), int64(2), int64(3)}}}
|
||||
if !testutil.Equal(got, want) {
|
||||
t.Errorf("converting basic repeated values: got:\n%v\nwant:\n%v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRepeatedRecordContainingRepetition(t *testing.T) {
|
||||
schema := []*FieldSchema{
|
||||
{
|
||||
Type: RecordFieldType,
|
||||
Repeated: true,
|
||||
Schema: Schema{
|
||||
{Type: IntegerFieldType, Repeated: true},
|
||||
},
|
||||
},
|
||||
}
|
||||
row := &bq.TableRow{F: []*bq.TableCell{
|
||||
{
|
||||
V: []interface{}{ // repeated records.
|
||||
map[string]interface{}{ // first record.
|
||||
"v": map[string]interface{}{ // pointless single-key-map wrapper.
|
||||
"f": []interface{}{ // list of record fields.
|
||||
map[string]interface{}{ // only record (repeated ints)
|
||||
"v": []interface{}{ // pointless wrapper.
|
||||
map[string]interface{}{
|
||||
"v": "1",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"v": "2",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"v": "3",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
map[string]interface{}{ // second record.
|
||||
"v": map[string]interface{}{
|
||||
"f": []interface{}{
|
||||
map[string]interface{}{
|
||||
"v": []interface{}{
|
||||
map[string]interface{}{
|
||||
"v": "4",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"v": "5",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"v": "6",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}}
|
||||
|
||||
got, err := convertRow(row, schema)
|
||||
if err != nil {
|
||||
t.Fatalf("error converting: %v", err)
|
||||
}
|
||||
want := []Value{ // the row is a list of length 1, containing an entry for the repeated record.
|
||||
[]Value{ // the repeated record is a list of length 2, containing an entry for each repetition.
|
||||
[]Value{ // the record is a list of length 1, containing an entry for the repeated integer field.
|
||||
[]Value{int64(1), int64(2), int64(3)}, // the repeated integer field is a list of length 3.
|
||||
},
|
||||
[]Value{ // second record
|
||||
[]Value{int64(4), int64(5), int64(6)},
|
||||
},
|
||||
},
|
||||
}
|
||||
if !testutil.Equal(got, want) {
|
||||
t.Errorf("converting repeated records with repeated values: got:\n%v\nwant:\n%v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRepeatedRecordContainingRecord(t *testing.T) {
|
||||
schema := []*FieldSchema{
|
||||
{
|
||||
Type: RecordFieldType,
|
||||
Repeated: true,
|
||||
Schema: Schema{
|
||||
{
|
||||
Type: StringFieldType,
|
||||
},
|
||||
{
|
||||
Type: RecordFieldType,
|
||||
Schema: Schema{
|
||||
{Type: IntegerFieldType},
|
||||
{Type: StringFieldType},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
row := &bq.TableRow{F: []*bq.TableCell{
|
||||
{
|
||||
V: []interface{}{ // repeated records.
|
||||
map[string]interface{}{ // first record.
|
||||
"v": map[string]interface{}{ // pointless single-key-map wrapper.
|
||||
"f": []interface{}{ // list of record fields.
|
||||
map[string]interface{}{ // first record field (name)
|
||||
"v": "first repeated record",
|
||||
},
|
||||
map[string]interface{}{ // second record field (nested record).
|
||||
"v": map[string]interface{}{ // pointless single-key-map wrapper.
|
||||
"f": []interface{}{ // nested record fields
|
||||
map[string]interface{}{
|
||||
"v": "1",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"v": "two",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
map[string]interface{}{ // second record.
|
||||
"v": map[string]interface{}{
|
||||
"f": []interface{}{
|
||||
map[string]interface{}{
|
||||
"v": "second repeated record",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"v": map[string]interface{}{
|
||||
"f": []interface{}{
|
||||
map[string]interface{}{
|
||||
"v": "3",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"v": "four",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}}
|
||||
|
||||
got, err := convertRow(row, schema)
|
||||
if err != nil {
|
||||
t.Fatalf("error converting: %v", err)
|
||||
}
|
||||
// TODO: test with flattenresults.
|
||||
want := []Value{ // the row is a list of length 1, containing an entry for the repeated record.
|
||||
[]Value{ // the repeated record is a list of length 2, containing an entry for each repetition.
|
||||
[]Value{ // record contains a string followed by a nested record.
|
||||
"first repeated record",
|
||||
[]Value{
|
||||
int64(1),
|
||||
"two",
|
||||
},
|
||||
},
|
||||
[]Value{ // second record.
|
||||
"second repeated record",
|
||||
[]Value{
|
||||
int64(3),
|
||||
"four",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
if !testutil.Equal(got, want) {
|
||||
t.Errorf("converting repeated records containing record : got:\n%v\nwant:\n%v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestValuesSaverConvertsToMap(t *testing.T) {
|
||||
testCases := []struct {
|
||||
vs ValuesSaver
|
||||
want *insertionRow
|
||||
}{
|
||||
{
|
||||
vs: ValuesSaver{
|
||||
Schema: []*FieldSchema{
|
||||
{Name: "intField", Type: IntegerFieldType},
|
||||
{Name: "strField", Type: StringFieldType},
|
||||
},
|
||||
InsertID: "iid",
|
||||
Row: []Value{1, "a"},
|
||||
},
|
||||
want: &insertionRow{
|
||||
InsertID: "iid",
|
||||
Row: map[string]Value{"intField": 1, "strField": "a"},
|
||||
},
|
||||
},
|
||||
{
|
||||
vs: ValuesSaver{
|
||||
Schema: []*FieldSchema{
|
||||
{Name: "intField", Type: IntegerFieldType},
|
||||
{
|
||||
Name: "recordField",
|
||||
Type: RecordFieldType,
|
||||
Schema: []*FieldSchema{
|
||||
{Name: "nestedInt", Type: IntegerFieldType, Repeated: true},
|
||||
},
|
||||
},
|
||||
},
|
||||
InsertID: "iid",
|
||||
Row: []Value{1, []Value{[]Value{2, 3}}},
|
||||
},
|
||||
want: &insertionRow{
|
||||
InsertID: "iid",
|
||||
Row: map[string]Value{
|
||||
"intField": 1,
|
||||
"recordField": map[string]Value{
|
||||
"nestedInt": []Value{2, 3},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{ // repeated nested field
|
||||
vs: ValuesSaver{
|
||||
Schema: Schema{
|
||||
{
|
||||
Name: "records",
|
||||
Type: RecordFieldType,
|
||||
Schema: Schema{
|
||||
{Name: "x", Type: IntegerFieldType},
|
||||
{Name: "y", Type: IntegerFieldType},
|
||||
},
|
||||
Repeated: true,
|
||||
},
|
||||
},
|
||||
InsertID: "iid",
|
||||
Row: []Value{ // a row is a []Value
|
||||
[]Value{ // repeated field's value is a []Value
|
||||
[]Value{1, 2}, // first record of the repeated field
|
||||
[]Value{3, 4}, // second record
|
||||
},
|
||||
},
|
||||
},
|
||||
want: &insertionRow{
|
||||
InsertID: "iid",
|
||||
Row: map[string]Value{
|
||||
"records": []Value{
|
||||
map[string]Value{"x": 1, "y": 2},
|
||||
map[string]Value{"x": 3, "y": 4},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
data, insertID, err := tc.vs.Save()
|
||||
if err != nil {
|
||||
t.Errorf("Expected successful save; got: %v", err)
|
||||
}
|
||||
got := &insertionRow{insertID, data}
|
||||
if !testutil.Equal(got, tc.want) {
|
||||
t.Errorf("saving ValuesSaver:\ngot:\n%+v\nwant:\n%+v", got, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStructSaver(t *testing.T) {
|
||||
schema := Schema{
|
||||
{Name: "s", Type: StringFieldType},
|
||||
{Name: "r", Type: IntegerFieldType, Repeated: true},
|
||||
{Name: "nested", Type: RecordFieldType, Schema: Schema{
|
||||
{Name: "b", Type: BooleanFieldType},
|
||||
}},
|
||||
{Name: "rnested", Type: RecordFieldType, Repeated: true, Schema: Schema{
|
||||
{Name: "b", Type: BooleanFieldType},
|
||||
}},
|
||||
}
|
||||
|
||||
type (
|
||||
N struct{ B bool }
|
||||
T struct {
|
||||
S string
|
||||
R []int
|
||||
Nested *N
|
||||
Rnested []*N
|
||||
}
|
||||
)
|
||||
|
||||
check := func(msg string, in interface{}, want map[string]Value) {
|
||||
ss := StructSaver{
|
||||
Schema: schema,
|
||||
InsertID: "iid",
|
||||
Struct: in,
|
||||
}
|
||||
got, gotIID, err := ss.Save()
|
||||
if err != nil {
|
||||
t.Fatalf("%s: %v", msg, err)
|
||||
}
|
||||
if wantIID := "iid"; gotIID != wantIID {
|
||||
t.Errorf("%s: InsertID: got %q, want %q", msg, gotIID, wantIID)
|
||||
}
|
||||
if !testutil.Equal(got, want) {
|
||||
t.Errorf("%s:\ngot\n%#v\nwant\n%#v", msg, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
in := T{
|
||||
S: "x",
|
||||
R: []int{1, 2},
|
||||
Nested: &N{B: true},
|
||||
Rnested: []*N{{true}, {false}},
|
||||
}
|
||||
want := map[string]Value{
|
||||
"s": "x",
|
||||
"r": []int{1, 2},
|
||||
"nested": map[string]Value{"b": true},
|
||||
"rnested": []Value{map[string]Value{"b": true}, map[string]Value{"b": false}},
|
||||
}
|
||||
check("all values", in, want)
|
||||
check("all values, ptr", &in, want)
|
||||
check("empty struct", T{}, map[string]Value{"s": ""})
|
||||
|
||||
// Missing and extra fields ignored.
|
||||
type T2 struct {
|
||||
S string
|
||||
// missing R, Nested, RNested
|
||||
Extra int
|
||||
}
|
||||
check("missing and extra", T2{S: "x"}, map[string]Value{"s": "x"})
|
||||
|
||||
check("nils in slice", T{Rnested: []*N{{true}, nil, {false}}},
|
||||
map[string]Value{
|
||||
"s": "",
|
||||
"rnested": []Value{map[string]Value{"b": true}, map[string]Value(nil), map[string]Value{"b": false}},
|
||||
})
|
||||
}
|
||||
|
||||
func TestConvertRows(t *testing.T) {
|
||||
schema := []*FieldSchema{
|
||||
{Type: StringFieldType},
|
||||
{Type: IntegerFieldType},
|
||||
{Type: FloatFieldType},
|
||||
{Type: BooleanFieldType},
|
||||
}
|
||||
rows := []*bq.TableRow{
|
||||
{F: []*bq.TableCell{
|
||||
{V: "a"},
|
||||
{V: "1"},
|
||||
{V: "1.2"},
|
||||
{V: "true"},
|
||||
}},
|
||||
{F: []*bq.TableCell{
|
||||
{V: "b"},
|
||||
{V: "2"},
|
||||
{V: "2.2"},
|
||||
{V: "false"},
|
||||
}},
|
||||
}
|
||||
want := [][]Value{
|
||||
{"a", int64(1), 1.2, true},
|
||||
{"b", int64(2), 2.2, false},
|
||||
}
|
||||
got, err := convertRows(rows, schema)
|
||||
if err != nil {
|
||||
t.Fatalf("got %v, want nil", err)
|
||||
}
|
||||
if !testutil.Equal(got, want) {
|
||||
t.Errorf("\ngot %v\nwant %v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestValueList(t *testing.T) {
|
||||
schema := Schema{
|
||||
{Name: "s", Type: StringFieldType},
|
||||
{Name: "i", Type: IntegerFieldType},
|
||||
{Name: "f", Type: FloatFieldType},
|
||||
{Name: "b", Type: BooleanFieldType},
|
||||
}
|
||||
want := []Value{"x", 7, 3.14, true}
|
||||
var got []Value
|
||||
vl := (*valueList)(&got)
|
||||
if err := vl.Load(want, schema); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !testutil.Equal(got, want) {
|
||||
t.Errorf("got %+v, want %+v", got, want)
|
||||
}
|
||||
|
||||
// Load truncates, not appends.
|
||||
// https://github.com/GoogleCloudPlatform/google-cloud-go/issues/437
|
||||
if err := vl.Load(want, schema); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !testutil.Equal(got, want) {
|
||||
t.Errorf("got %+v, want %+v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestValueMap(t *testing.T) {
|
||||
ns := Schema{
|
||||
{Name: "x", Type: IntegerFieldType},
|
||||
{Name: "y", Type: IntegerFieldType},
|
||||
}
|
||||
schema := Schema{
|
||||
{Name: "s", Type: StringFieldType},
|
||||
{Name: "i", Type: IntegerFieldType},
|
||||
{Name: "f", Type: FloatFieldType},
|
||||
{Name: "b", Type: BooleanFieldType},
|
||||
{Name: "n", Type: RecordFieldType, Schema: ns},
|
||||
{Name: "rn", Type: RecordFieldType, Schema: ns, Repeated: true},
|
||||
}
|
||||
in := []Value{"x", 7, 3.14, true,
|
||||
[]Value{1, 2},
|
||||
[]Value{[]Value{3, 4}, []Value{5, 6}},
|
||||
}
|
||||
var vm valueMap
|
||||
if err := vm.Load(in, schema); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
want := map[string]Value{
|
||||
"s": "x",
|
||||
"i": 7,
|
||||
"f": 3.14,
|
||||
"b": true,
|
||||
"n": map[string]Value{"x": 1, "y": 2},
|
||||
"rn": []Value{
|
||||
map[string]Value{"x": 3, "y": 4},
|
||||
map[string]Value{"x": 5, "y": 6},
|
||||
},
|
||||
}
|
||||
if !testutil.Equal(vm, valueMap(want)) {
|
||||
t.Errorf("got\n%+v\nwant\n%+v", vm, want)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
var (
|
||||
// For testing StructLoader
|
||||
schema2 = Schema{
|
||||
{Name: "s", Type: StringFieldType},
|
||||
{Name: "s2", Type: StringFieldType},
|
||||
{Name: "by", Type: BytesFieldType},
|
||||
{Name: "I", Type: IntegerFieldType},
|
||||
{Name: "F", Type: FloatFieldType},
|
||||
{Name: "B", Type: BooleanFieldType},
|
||||
{Name: "TS", Type: TimestampFieldType},
|
||||
{Name: "D", Type: DateFieldType},
|
||||
{Name: "T", Type: TimeFieldType},
|
||||
{Name: "DT", Type: DateTimeFieldType},
|
||||
{Name: "nested", Type: RecordFieldType, Schema: Schema{
|
||||
{Name: "nestS", Type: StringFieldType},
|
||||
{Name: "nestI", Type: IntegerFieldType},
|
||||
}},
|
||||
{Name: "t", Type: StringFieldType},
|
||||
}
|
||||
|
||||
testTimestamp = time.Date(2016, 11, 5, 7, 50, 22, 8, time.UTC)
|
||||
testDate = civil.Date{2016, 11, 5}
|
||||
testTime = civil.Time{7, 50, 22, 8}
|
||||
testDateTime = civil.DateTime{testDate, testTime}
|
||||
|
||||
testValues = []Value{"x", "y", []byte{1, 2, 3}, int64(7), 3.14, true,
|
||||
testTimestamp, testDate, testTime, testDateTime,
|
||||
[]Value{"nested", int64(17)}, "z"}
|
||||
)
|
||||
|
||||
type testStruct1 struct {
|
||||
B bool
|
||||
I int
|
||||
times
|
||||
S string
|
||||
S2 String
|
||||
By []byte
|
||||
s string
|
||||
F float64
|
||||
Nested nested
|
||||
Tagged string `bigquery:"t"`
|
||||
}
|
||||
|
||||
type String string
|
||||
|
||||
type nested struct {
|
||||
NestS string
|
||||
NestI int
|
||||
}
|
||||
|
||||
type times struct {
|
||||
TS time.Time
|
||||
T civil.Time
|
||||
D civil.Date
|
||||
DT civil.DateTime
|
||||
}
|
||||
|
||||
func TestStructLoader(t *testing.T) {
|
||||
var ts1 testStruct1
|
||||
if err := load(&ts1, schema2, testValues); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Note: the schema field named "s" gets matched to the exported struct
|
||||
// field "S", not the unexported "s".
|
||||
want := &testStruct1{
|
||||
B: true,
|
||||
I: 7,
|
||||
F: 3.14,
|
||||
times: times{TS: testTimestamp, T: testTime, D: testDate, DT: testDateTime},
|
||||
S: "x",
|
||||
S2: "y",
|
||||
By: []byte{1, 2, 3},
|
||||
Nested: nested{NestS: "nested", NestI: 17},
|
||||
Tagged: "z",
|
||||
}
|
||||
if !testutil.Equal(&ts1, want, cmp.AllowUnexported(testStruct1{})) {
|
||||
t.Errorf("got %+v, want %+v", pretty.Value(ts1), pretty.Value(*want))
|
||||
d, _, err := pretty.Diff(*want, ts1)
|
||||
if err == nil {
|
||||
t.Logf("diff:\n%s", d)
|
||||
}
|
||||
}
|
||||
|
||||
// Test pointers to nested structs.
|
||||
type nestedPtr struct{ Nested *nested }
|
||||
var np nestedPtr
|
||||
if err := load(&np, schema2, testValues); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
want2 := &nestedPtr{Nested: &nested{NestS: "nested", NestI: 17}}
|
||||
if !testutil.Equal(&np, want2) {
|
||||
t.Errorf("got %+v, want %+v", pretty.Value(np), pretty.Value(*want2))
|
||||
}
|
||||
|
||||
// Existing values should be reused.
|
||||
nst := &nested{NestS: "x", NestI: -10}
|
||||
np = nestedPtr{Nested: nst}
|
||||
if err := load(&np, schema2, testValues); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !testutil.Equal(&np, want2) {
|
||||
t.Errorf("got %+v, want %+v", pretty.Value(np), pretty.Value(*want2))
|
||||
}
|
||||
if np.Nested != nst {
|
||||
t.Error("nested struct pointers not equal")
|
||||
}
|
||||
}
|
||||
|
||||
type repStruct struct {
|
||||
Nums []int
|
||||
ShortNums [2]int // to test truncation
|
||||
LongNums [5]int // to test padding with zeroes
|
||||
Nested []*nested
|
||||
}
|
||||
|
||||
var (
|
||||
repSchema = Schema{
|
||||
{Name: "nums", Type: IntegerFieldType, Repeated: true},
|
||||
{Name: "shortNums", Type: IntegerFieldType, Repeated: true},
|
||||
{Name: "longNums", Type: IntegerFieldType, Repeated: true},
|
||||
{Name: "nested", Type: RecordFieldType, Repeated: true, Schema: Schema{
|
||||
{Name: "nestS", Type: StringFieldType},
|
||||
{Name: "nestI", Type: IntegerFieldType},
|
||||
}},
|
||||
}
|
||||
v123 = []Value{int64(1), int64(2), int64(3)}
|
||||
repValues = []Value{v123, v123, v123,
|
||||
[]Value{
|
||||
[]Value{"x", int64(1)},
|
||||
[]Value{"y", int64(2)},
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func TestStructLoaderRepeated(t *testing.T) {
|
||||
var r1 repStruct
|
||||
if err := load(&r1, repSchema, repValues); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
want := repStruct{
|
||||
Nums: []int{1, 2, 3},
|
||||
ShortNums: [...]int{1, 2}, // extra values discarded
|
||||
LongNums: [...]int{1, 2, 3, 0, 0},
|
||||
Nested: []*nested{{"x", 1}, {"y", 2}},
|
||||
}
|
||||
if !testutil.Equal(r1, want) {
|
||||
t.Errorf("got %+v, want %+v", pretty.Value(r1), pretty.Value(want))
|
||||
}
|
||||
|
||||
r2 := repStruct{
|
||||
Nums: []int{-1, -2, -3, -4, -5}, // truncated to zero and appended to
|
||||
LongNums: [...]int{-1, -2, -3, -4, -5}, // unset elements are zeroed
|
||||
}
|
||||
if err := load(&r2, repSchema, repValues); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !testutil.Equal(r2, want) {
|
||||
t.Errorf("got %+v, want %+v", pretty.Value(r2), pretty.Value(want))
|
||||
}
|
||||
if got, want := cap(r2.Nums), 5; got != want {
|
||||
t.Errorf("cap(r2.Nums) = %d, want %d", got, want)
|
||||
}
|
||||
|
||||
// Short slice case.
|
||||
r3 := repStruct{Nums: []int{-1}}
|
||||
if err := load(&r3, repSchema, repValues); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !testutil.Equal(r3, want) {
|
||||
t.Errorf("got %+v, want %+v", pretty.Value(r3), pretty.Value(want))
|
||||
}
|
||||
if got, want := cap(r3.Nums), 3; got != want {
|
||||
t.Errorf("cap(r3.Nums) = %d, want %d", got, want)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestStructLoaderOverflow(t *testing.T) {
|
||||
type S struct {
|
||||
I int16
|
||||
F float32
|
||||
}
|
||||
schema := Schema{
|
||||
{Name: "I", Type: IntegerFieldType},
|
||||
{Name: "F", Type: FloatFieldType},
|
||||
}
|
||||
var s S
|
||||
if err := load(&s, schema, []Value{int64(math.MaxInt16 + 1), 0}); err == nil {
|
||||
t.Error("int: got nil, want error")
|
||||
}
|
||||
if err := load(&s, schema, []Value{int64(0), math.MaxFloat32 * 2}); err == nil {
|
||||
t.Error("float: got nil, want error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStructLoaderFieldOverlap(t *testing.T) {
|
||||
// It's OK if the struct has fields that the schema does not, and vice versa.
|
||||
type S1 struct {
|
||||
I int
|
||||
X [][]int // not in the schema; does not even correspond to a valid BigQuery type
|
||||
// many schema fields missing
|
||||
}
|
||||
var s1 S1
|
||||
if err := load(&s1, schema2, testValues); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
want1 := S1{I: 7}
|
||||
if !testutil.Equal(s1, want1) {
|
||||
t.Errorf("got %+v, want %+v", pretty.Value(s1), pretty.Value(want1))
|
||||
}
|
||||
|
||||
// It's even valid to have no overlapping fields at all.
|
||||
type S2 struct{ Z int }
|
||||
|
||||
var s2 S2
|
||||
if err := load(&s2, schema2, testValues); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
want2 := S2{}
|
||||
if !testutil.Equal(s2, want2) {
|
||||
t.Errorf("got %+v, want %+v", pretty.Value(s2), pretty.Value(want2))
|
||||
}
|
||||
}
|
||||
|
||||
func TestStructLoaderErrors(t *testing.T) {
|
||||
check := func(sp interface{}) {
|
||||
var sl structLoader
|
||||
err := sl.set(sp, schema2)
|
||||
if err == nil {
|
||||
t.Errorf("%T: got nil, want error", sp)
|
||||
}
|
||||
}
|
||||
|
||||
type bad1 struct{ F int32 } // wrong type for FLOAT column
|
||||
check(&bad1{})
|
||||
|
||||
type bad2 struct{ I uint } // unsupported integer type
|
||||
check(&bad2{})
|
||||
|
||||
// Using more than one struct type with the same structLoader.
|
||||
type different struct {
|
||||
B bool
|
||||
I int
|
||||
times
|
||||
S string
|
||||
s string
|
||||
Nums []int
|
||||
}
|
||||
|
||||
var sl structLoader
|
||||
if err := sl.set(&testStruct1{}, schema2); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err := sl.set(&different{}, schema2)
|
||||
if err == nil {
|
||||
t.Error("different struct types: got nil, want error")
|
||||
}
|
||||
}
|
||||
|
||||
func load(pval interface{}, schema Schema, vals []Value) error {
|
||||
var sl structLoader
|
||||
if err := sl.set(pval, schema); err != nil {
|
||||
return err
|
||||
}
|
||||
return sl.Load(vals, nil)
|
||||
}
|
||||
|
||||
func BenchmarkStructLoader_NoCompile(b *testing.B) {
|
||||
benchmarkStructLoader(b, false)
|
||||
}
|
||||
|
||||
func BenchmarkStructLoader_Compile(b *testing.B) {
|
||||
benchmarkStructLoader(b, true)
|
||||
}
|
||||
|
||||
func benchmarkStructLoader(b *testing.B, compile bool) {
|
||||
var ts1 testStruct1
|
||||
for i := 0; i < b.N; i++ {
|
||||
var sl structLoader
|
||||
for j := 0; j < 10; j++ {
|
||||
if err := load(&ts1, schema2, testValues); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
if !compile {
|
||||
sl.typ = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user